Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.5 commit in: /
Date: Thu, 02 Jun 2016 19:24:41
Message-Id: 1464894484.afed3274f4b915ecdbdb505c7255ab11f78a22d7.mpagano@gentoo
1 commit: afed3274f4b915ecdbdb505c7255ab11f78a22d7
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jun 2 19:08:04 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Jun 2 19:08:04 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=afed3274
7
8 Linux patch 4.5.6
9
10 0000_README | 4 +
11 1005_linux-4.5.6.patch | 32192 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 32196 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index f936e50..9a3ebeb 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -63,6 +63,10 @@ Patch: 1004_linux-4.5.5.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.5.5
21
22 +Patch: 1005_linux-4.5.6.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.5.6
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1005_linux-4.5.6.patch b/1005_linux-4.5.6.patch
31 new file mode 100644
32 index 0000000..339ef1c
33 --- /dev/null
34 +++ b/1005_linux-4.5.6.patch
35 @@ -0,0 +1,32192 @@
36 +diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
37 +index ff49cf901148..81eb378210c6 100644
38 +--- a/Documentation/cgroup-v2.txt
39 ++++ b/Documentation/cgroup-v2.txt
40 +@@ -1368,6 +1368,12 @@ system than killing the group. Otherwise, memory.max is there to
41 + limit this type of spillover and ultimately contain buggy or even
42 + malicious applications.
43 +
44 ++Setting the original memory.limit_in_bytes below the current usage was
45 ++subject to a race condition, where concurrent charges could cause the
46 ++limit setting to fail. memory.max on the other hand will first set the
47 ++limit to prevent new charges, and then reclaim and OOM kill until the
48 ++new limit is met - or the task writing to memory.max is killed.
49 ++
50 + The combined memory+swap accounting and limiting is replaced by real
51 + control over swap space.
52 +
53 +diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
54 +index c2340eeeb97f..c000832a7fb9 100644
55 +--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
56 ++++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
57 +@@ -30,6 +30,10 @@ Optional properties:
58 + - target-supply : regulator for SATA target power
59 + - phys : reference to the SATA PHY node
60 + - phy-names : must be "sata-phy"
61 ++- ports-implemented : Mask that indicates which ports that the HBA supports
62 ++ are available for software to use. Useful if PORTS_IMPL
63 ++ is not programmed by the BIOS, which is true with
64 ++ some embedded SOC's.
65 +
66 + Required properties when using sub-nodes:
67 + - #address-cells : number of cells to encode an address
68 +diff --git a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
69 +index 08a4a32c8eb0..0326154c7925 100644
70 +--- a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
71 ++++ b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
72 +@@ -134,12 +134,12 @@ mfio80 ddr_debug, mips_trace_data, mips_debug
73 + mfio81 dreq0, mips_trace_data, eth_debug
74 + mfio82 dreq1, mips_trace_data, eth_debug
75 + mfio83 mips_pll_lock, mips_trace_data, usb_debug
76 +-mfio84 sys_pll_lock, mips_trace_data, usb_debug
77 +-mfio85 wifi_pll_lock, mips_trace_data, sdhost_debug
78 +-mfio86 bt_pll_lock, mips_trace_data, sdhost_debug
79 +-mfio87 rpu_v_pll_lock, dreq2, socif_debug
80 +-mfio88 rpu_l_pll_lock, dreq3, socif_debug
81 +-mfio89 audio_pll_lock, dreq4, dreq5
82 ++mfio84 audio_pll_lock, mips_trace_data, usb_debug
83 ++mfio85 rpu_v_pll_lock, mips_trace_data, sdhost_debug
84 ++mfio86 rpu_l_pll_lock, mips_trace_data, sdhost_debug
85 ++mfio87 sys_pll_lock, dreq2, socif_debug
86 ++mfio88 wifi_pll_lock, dreq3, socif_debug
87 ++mfio89 bt_pll_lock, dreq4, dreq5
88 + tck
89 + trstn
90 + tdi
91 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
92 +index 9a53c929f017..21e4b4861331 100644
93 +--- a/Documentation/kernel-parameters.txt
94 ++++ b/Documentation/kernel-parameters.txt
95 +@@ -4016,6 +4016,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
96 + sector if the number is odd);
97 + i = IGNORE_DEVICE (don't bind to this
98 + device);
99 ++ j = NO_REPORT_LUNS (don't use report luns
100 ++ command, uas only);
101 + l = NOT_LOCKABLE (don't try to lock and
102 + unlock ejectable media);
103 + m = MAX_SECTORS_64 (don't transfer more
104 +diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
105 +index bc3842dc323a..e2dea3dc4307 100644
106 +--- a/Documentation/serial/tty.txt
107 ++++ b/Documentation/serial/tty.txt
108 +@@ -213,9 +213,6 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write
109 +
110 + TTY_OTHER_CLOSED Device is a pty and the other side has closed.
111 +
112 +-TTY_OTHER_DONE Device is a pty and the other side has closed and
113 +- all pending input processing has been completed.
114 +-
115 + TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
116 + smaller chunks.
117 +
118 +diff --git a/MAINTAINERS b/MAINTAINERS
119 +index 6ee06ea47be4..77e4c10b4c06 100644
120 +--- a/MAINTAINERS
121 ++++ b/MAINTAINERS
122 +@@ -228,13 +228,13 @@ F: kernel/sys_ni.c
123 +
124 + ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
125 + M: Hans de Goede <hdegoede@××××××.com>
126 +-L: lm-sensors@××××××××××.org
127 ++L: linux-hwmon@×××××××××××.org
128 + S: Maintained
129 + F: drivers/hwmon/abituguru.c
130 +
131 + ABIT UGURU 3 HARDWARE MONITOR DRIVER
132 + M: Alistair John Strachan <alistair@××××××××××.uk>
133 +-L: lm-sensors@××××××××××.org
134 ++L: linux-hwmon@×××××××××××.org
135 + S: Maintained
136 + F: drivers/hwmon/abituguru3.c
137 +
138 +@@ -386,14 +386,14 @@ F: Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
139 +
140 + ADM1025 HARDWARE MONITOR DRIVER
141 + M: Jean Delvare <jdelvare@××××.com>
142 +-L: lm-sensors@××××××××××.org
143 ++L: linux-hwmon@×××××××××××.org
144 + S: Maintained
145 + F: Documentation/hwmon/adm1025
146 + F: drivers/hwmon/adm1025.c
147 +
148 + ADM1029 HARDWARE MONITOR DRIVER
149 + M: Corentin Labbe <clabbe.montjoie@×××××.com>
150 +-L: lm-sensors@××××××××××.org
151 ++L: linux-hwmon@×××××××××××.org
152 + S: Maintained
153 + F: drivers/hwmon/adm1029.c
154 +
155 +@@ -438,7 +438,7 @@ F: drivers/video/backlight/adp8860_bl.c
156 +
157 + ADS1015 HARDWARE MONITOR DRIVER
158 + M: Dirk Eibach <eibach@×××××.de>
159 +-L: lm-sensors@××××××××××.org
160 ++L: linux-hwmon@×××××××××××.org
161 + S: Maintained
162 + F: Documentation/hwmon/ads1015
163 + F: drivers/hwmon/ads1015.c
164 +@@ -451,7 +451,7 @@ F: drivers/macintosh/therm_adt746x.c
165 +
166 + ADT7475 HARDWARE MONITOR DRIVER
167 + M: Jean Delvare <jdelvare@××××.com>
168 +-L: lm-sensors@××××××××××.org
169 ++L: linux-hwmon@×××××××××××.org
170 + S: Maintained
171 + F: Documentation/hwmon/adt7475
172 + F: drivers/hwmon/adt7475.c
173 +@@ -628,7 +628,7 @@ F: include/linux/ccp.h
174 +
175 + AMD FAM15H PROCESSOR POWER MONITORING DRIVER
176 + M: Huang Rui <ray.huang@×××.com>
177 +-L: lm-sensors@××××××××××.org
178 ++L: linux-hwmon@×××××××××××.org
179 + S: Supported
180 + F: Documentation/hwmon/fam15h_power
181 + F: drivers/hwmon/fam15h_power.c
182 +@@ -786,7 +786,7 @@ F: drivers/input/mouse/bcm5974.c
183 +
184 + APPLE SMC DRIVER
185 + M: Henrik Rydberg <rydberg@×××××××.org>
186 +-L: lm-sensors@××××××××××.org
187 ++L: linux-hwmon@×××××××××××.org
188 + S: Odd fixes
189 + F: drivers/hwmon/applesmc.c
190 +
191 +@@ -1825,7 +1825,7 @@ F: include/media/i2c/as3645a.h
192 +
193 + ASC7621 HARDWARE MONITOR DRIVER
194 + M: George Joseph <george.joseph@×××××××××.com>
195 +-L: lm-sensors@××××××××××.org
196 ++L: linux-hwmon@×××××××××××.org
197 + S: Maintained
198 + F: Documentation/hwmon/asc7621
199 + F: drivers/hwmon/asc7621.c
200 +@@ -1918,7 +1918,7 @@ F: drivers/net/wireless/ath/carl9170/
201 +
202 + ATK0110 HWMON DRIVER
203 + M: Luca Tettamanti <kronos.it@×××××.com>
204 +-L: lm-sensors@××××××××××.org
205 ++L: linux-hwmon@×××××××××××.org
206 + S: Maintained
207 + F: drivers/hwmon/asus_atk0110.c
208 +
209 +@@ -3037,7 +3037,7 @@ F: mm/swap_cgroup.c
210 +
211 + CORETEMP HARDWARE MONITORING DRIVER
212 + M: Fenghua Yu <fenghua.yu@×××××.com>
213 +-L: lm-sensors@××××××××××.org
214 ++L: linux-hwmon@×××××××××××.org
215 + S: Maintained
216 + F: Documentation/hwmon/coretemp
217 + F: drivers/hwmon/coretemp.c
218 +@@ -3625,7 +3625,7 @@ T: git git://git.infradead.org/users/vkoul/slave-dma.git
219 +
220 + DME1737 HARDWARE MONITOR DRIVER
221 + M: Juerg Haefliger <juergh@×××××.com>
222 +-L: lm-sensors@××××××××××.org
223 ++L: linux-hwmon@×××××××××××.org
224 + S: Maintained
225 + F: Documentation/hwmon/dme1737
226 + F: drivers/hwmon/dme1737.c
227 +@@ -4163,8 +4163,8 @@ F: Documentation/efi-stub.txt
228 + F: arch/ia64/kernel/efi.c
229 + F: arch/x86/boot/compressed/eboot.[ch]
230 + F: arch/x86/include/asm/efi.h
231 +-F: arch/x86/platform/efi/*
232 +-F: drivers/firmware/efi/*
233 ++F: arch/x86/platform/efi/
234 ++F: drivers/firmware/efi/
235 + F: include/linux/efi*.h
236 +
237 + EFI VARIABLE FILESYSTEM
238 +@@ -4322,7 +4322,7 @@ F: include/video/exynos_mipi*
239 +
240 + F71805F HARDWARE MONITORING DRIVER
241 + M: Jean Delvare <jdelvare@××××.com>
242 +-L: lm-sensors@××××××××××.org
243 ++L: linux-hwmon@×××××××××××.org
244 + S: Maintained
245 + F: Documentation/hwmon/f71805f
246 + F: drivers/hwmon/f71805f.c
247 +@@ -4401,7 +4401,7 @@ F: fs/*
248 +
249 + FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
250 + M: Riku Voipio <riku.voipio@×××.fi>
251 +-L: lm-sensors@××××××××××.org
252 ++L: linux-hwmon@×××××××××××.org
253 + S: Maintained
254 + F: drivers/hwmon/f75375s.c
255 + F: include/linux/f75375s.h
256 +@@ -4958,8 +4958,8 @@ F: drivers/media/usb/hackrf/
257 + HARDWARE MONITORING
258 + M: Jean Delvare <jdelvare@××××.com>
259 + M: Guenter Roeck <linux@××××××××.net>
260 +-L: lm-sensors@××××××××××.org
261 +-W: http://www.lm-sensors.org/
262 ++L: linux-hwmon@×××××××××××.org
263 ++W: http://hwmon.wiki.kernel.org/
264 + T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
265 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
266 + S: Maintained
267 +@@ -5484,7 +5484,7 @@ F: drivers/usb/atm/ueagle-atm.c
268 +
269 + INA209 HARDWARE MONITOR DRIVER
270 + M: Guenter Roeck <linux@××××××××.net>
271 +-L: lm-sensors@××××××××××.org
272 ++L: linux-hwmon@×××××××××××.org
273 + S: Maintained
274 + F: Documentation/hwmon/ina209
275 + F: Documentation/devicetree/bindings/i2c/ina209.txt
276 +@@ -5492,7 +5492,7 @@ F: drivers/hwmon/ina209.c
277 +
278 + INA2XX HARDWARE MONITOR DRIVER
279 + M: Guenter Roeck <linux@××××××××.net>
280 +-L: lm-sensors@××××××××××.org
281 ++L: linux-hwmon@×××××××××××.org
282 + S: Maintained
283 + F: Documentation/hwmon/ina2xx
284 + F: drivers/hwmon/ina2xx.c
285 +@@ -5985,7 +5985,7 @@ F: drivers/isdn/hardware/eicon/
286 +
287 + IT87 HARDWARE MONITORING DRIVER
288 + M: Jean Delvare <jdelvare@××××.com>
289 +-L: lm-sensors@××××××××××.org
290 ++L: linux-hwmon@×××××××××××.org
291 + S: Maintained
292 + F: Documentation/hwmon/it87
293 + F: drivers/hwmon/it87.c
294 +@@ -6021,7 +6021,7 @@ F: drivers/media/dvb-frontends/ix2505v*
295 +
296 + JC42.4 TEMPERATURE SENSOR DRIVER
297 + M: Guenter Roeck <linux@××××××××.net>
298 +-L: lm-sensors@××××××××××.org
299 ++L: linux-hwmon@×××××××××××.org
300 + S: Maintained
301 + F: drivers/hwmon/jc42.c
302 + F: Documentation/hwmon/jc42
303 +@@ -6071,14 +6071,14 @@ F: drivers/tty/serial/jsm/
304 +
305 + K10TEMP HARDWARE MONITORING DRIVER
306 + M: Clemens Ladisch <clemens@×××××××.de>
307 +-L: lm-sensors@××××××××××.org
308 ++L: linux-hwmon@×××××××××××.org
309 + S: Maintained
310 + F: Documentation/hwmon/k10temp
311 + F: drivers/hwmon/k10temp.c
312 +
313 + K8TEMP HARDWARE MONITORING DRIVER
314 + M: Rudolf Marek <r.marek@×××××××××.cz>
315 +-L: lm-sensors@××××××××××.org
316 ++L: linux-hwmon@×××××××××××.org
317 + S: Maintained
318 + F: Documentation/hwmon/k8temp
319 + F: drivers/hwmon/k8temp.c
320 +@@ -6605,27 +6605,27 @@ F: net/llc/
321 +
322 + LM73 HARDWARE MONITOR DRIVER
323 + M: Guillaume Ligneul <guillaume.ligneul@×××××.com>
324 +-L: lm-sensors@××××××××××.org
325 ++L: linux-hwmon@×××××××××××.org
326 + S: Maintained
327 + F: drivers/hwmon/lm73.c
328 +
329 + LM78 HARDWARE MONITOR DRIVER
330 + M: Jean Delvare <jdelvare@××××.com>
331 +-L: lm-sensors@××××××××××.org
332 ++L: linux-hwmon@×××××××××××.org
333 + S: Maintained
334 + F: Documentation/hwmon/lm78
335 + F: drivers/hwmon/lm78.c
336 +
337 + LM83 HARDWARE MONITOR DRIVER
338 + M: Jean Delvare <jdelvare@××××.com>
339 +-L: lm-sensors@××××××××××.org
340 ++L: linux-hwmon@×××××××××××.org
341 + S: Maintained
342 + F: Documentation/hwmon/lm83
343 + F: drivers/hwmon/lm83.c
344 +
345 + LM90 HARDWARE MONITOR DRIVER
346 + M: Jean Delvare <jdelvare@××××.com>
347 +-L: lm-sensors@××××××××××.org
348 ++L: linux-hwmon@×××××××××××.org
349 + S: Maintained
350 + F: Documentation/hwmon/lm90
351 + F: Documentation/devicetree/bindings/hwmon/lm90.txt
352 +@@ -6633,7 +6633,7 @@ F: drivers/hwmon/lm90.c
353 +
354 + LM95234 HARDWARE MONITOR DRIVER
355 + M: Guenter Roeck <linux@××××××××.net>
356 +-L: lm-sensors@××××××××××.org
357 ++L: linux-hwmon@×××××××××××.org
358 + S: Maintained
359 + F: Documentation/hwmon/lm95234
360 + F: drivers/hwmon/lm95234.c
361 +@@ -6700,7 +6700,7 @@ F: drivers/scsi/sym53c8xx_2/
362 +
363 + LTC4261 HARDWARE MONITOR DRIVER
364 + M: Guenter Roeck <linux@××××××××.net>
365 +-L: lm-sensors@××××××××××.org
366 ++L: linux-hwmon@×××××××××××.org
367 + S: Maintained
368 + F: Documentation/hwmon/ltc4261
369 + F: drivers/hwmon/ltc4261.c
370 +@@ -6870,28 +6870,28 @@ F: include/uapi/linux/matroxfb.h
371 +
372 + MAX16065 HARDWARE MONITOR DRIVER
373 + M: Guenter Roeck <linux@××××××××.net>
374 +-L: lm-sensors@××××××××××.org
375 ++L: linux-hwmon@×××××××××××.org
376 + S: Maintained
377 + F: Documentation/hwmon/max16065
378 + F: drivers/hwmon/max16065.c
379 +
380 + MAX20751 HARDWARE MONITOR DRIVER
381 + M: Guenter Roeck <linux@××××××××.net>
382 +-L: lm-sensors@××××××××××.org
383 ++L: linux-hwmon@×××××××××××.org
384 + S: Maintained
385 + F: Documentation/hwmon/max20751
386 + F: drivers/hwmon/max20751.c
387 +
388 + MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
389 + M: "Hans J. Koch" <hjk@×××××××××.de>
390 +-L: lm-sensors@××××××××××.org
391 ++L: linux-hwmon@×××××××××××.org
392 + S: Maintained
393 + F: Documentation/hwmon/max6650
394 + F: drivers/hwmon/max6650.c
395 +
396 + MAX6697 HARDWARE MONITOR DRIVER
397 + M: Guenter Roeck <linux@××××××××.net>
398 +-L: lm-sensors@××××××××××.org
399 ++L: linux-hwmon@×××××××××××.org
400 + S: Maintained
401 + F: Documentation/hwmon/max6697
402 + F: Documentation/devicetree/bindings/i2c/max6697.txt
403 +@@ -7455,7 +7455,7 @@ F: drivers/scsi/NCR_D700.*
404 +
405 + NCT6775 HARDWARE MONITOR DRIVER
406 + M: Guenter Roeck <linux@××××××××.net>
407 +-L: lm-sensors@××××××××××.org
408 ++L: linux-hwmon@×××××××××××.org
409 + S: Maintained
410 + F: Documentation/hwmon/nct6775
411 + F: drivers/hwmon/nct6775.c
412 +@@ -8235,7 +8235,7 @@ F: drivers/video/logo/logo_parisc*
413 +
414 + PC87360 HARDWARE MONITORING DRIVER
415 + M: Jim Cromie <jim.cromie@×××××.com>
416 +-L: lm-sensors@××××××××××.org
417 ++L: linux-hwmon@×××××××××××.org
418 + S: Maintained
419 + F: Documentation/hwmon/pc87360
420 + F: drivers/hwmon/pc87360.c
421 +@@ -8247,7 +8247,7 @@ F: drivers/char/pc8736x_gpio.c
422 +
423 + PC87427 HARDWARE MONITORING DRIVER
424 + M: Jean Delvare <jdelvare@××××.com>
425 +-L: lm-sensors@××××××××××.org
426 ++L: linux-hwmon@×××××××××××.org
427 + S: Maintained
428 + F: Documentation/hwmon/pc87427
429 + F: drivers/hwmon/pc87427.c
430 +@@ -8601,8 +8601,8 @@ F: drivers/rtc/rtc-puv3.c
431 +
432 + PMBUS HARDWARE MONITORING DRIVERS
433 + M: Guenter Roeck <linux@××××××××.net>
434 +-L: lm-sensors@××××××××××.org
435 +-W: http://www.lm-sensors.org/
436 ++L: linux-hwmon@×××××××××××.org
437 ++W: http://hwmon.wiki.kernel.org/
438 + W: http://www.roeck-us.net/linux/drivers/
439 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
440 + S: Maintained
441 +@@ -8807,7 +8807,7 @@ F: drivers/media/usb/pwc/*
442 +
443 + PWM FAN DRIVER
444 + M: Kamil Debski <k.debski@×××××××.com>
445 +-L: lm-sensors@××××××××××.org
446 ++L: linux-hwmon@×××××××××××.org
447 + S: Supported
448 + F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
449 + F: Documentation/hwmon/pwm-fan
450 +@@ -10113,28 +10113,28 @@ F: Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
451 +
452 + SMM665 HARDWARE MONITOR DRIVER
453 + M: Guenter Roeck <linux@××××××××.net>
454 +-L: lm-sensors@××××××××××.org
455 ++L: linux-hwmon@×××××××××××.org
456 + S: Maintained
457 + F: Documentation/hwmon/smm665
458 + F: drivers/hwmon/smm665.c
459 +
460 + SMSC EMC2103 HARDWARE MONITOR DRIVER
461 + M: Steve Glendinning <steve.glendinning@×××××××.net>
462 +-L: lm-sensors@××××××××××.org
463 ++L: linux-hwmon@×××××××××××.org
464 + S: Maintained
465 + F: Documentation/hwmon/emc2103
466 + F: drivers/hwmon/emc2103.c
467 +
468 + SMSC SCH5627 HARDWARE MONITOR DRIVER
469 + M: Hans de Goede <hdegoede@××××××.com>
470 +-L: lm-sensors@××××××××××.org
471 ++L: linux-hwmon@×××××××××××.org
472 + S: Supported
473 + F: Documentation/hwmon/sch5627
474 + F: drivers/hwmon/sch5627.c
475 +
476 + SMSC47B397 HARDWARE MONITOR DRIVER
477 + M: Jean Delvare <jdelvare@××××.com>
478 +-L: lm-sensors@××××××××××.org
479 ++L: linux-hwmon@×××××××××××.org
480 + S: Maintained
481 + F: Documentation/hwmon/smsc47b397
482 + F: drivers/hwmon/smsc47b397.c
483 +@@ -11067,7 +11067,7 @@ F: include/linux/mmc/sh_mobile_sdhi.h
484 +
485 + TMP401 HARDWARE MONITOR DRIVER
486 + M: Guenter Roeck <linux@××××××××.net>
487 +-L: lm-sensors@××××××××××.org
488 ++L: linux-hwmon@×××××××××××.org
489 + S: Maintained
490 + F: Documentation/hwmon/tmp401
491 + F: drivers/hwmon/tmp401.c
492 +@@ -11812,14 +11812,14 @@ F: Documentation/networking/vrf.txt
493 +
494 + VT1211 HARDWARE MONITOR DRIVER
495 + M: Juerg Haefliger <juergh@×××××.com>
496 +-L: lm-sensors@××××××××××.org
497 ++L: linux-hwmon@×××××××××××.org
498 + S: Maintained
499 + F: Documentation/hwmon/vt1211
500 + F: drivers/hwmon/vt1211.c
501 +
502 + VT8231 HARDWARE MONITOR DRIVER
503 + M: Roger Lucas <vt8231@×××××××××××××××.uk>
504 +-L: lm-sensors@××××××××××.org
505 ++L: linux-hwmon@×××××××××××.org
506 + S: Maintained
507 + F: drivers/hwmon/vt8231.c
508 +
509 +@@ -11838,21 +11838,21 @@ F: drivers/w1/
510 +
511 + W83791D HARDWARE MONITORING DRIVER
512 + M: Marc Hulsman <m.hulsman@×××××××.nl>
513 +-L: lm-sensors@××××××××××.org
514 ++L: linux-hwmon@×××××××××××.org
515 + S: Maintained
516 + F: Documentation/hwmon/w83791d
517 + F: drivers/hwmon/w83791d.c
518 +
519 + W83793 HARDWARE MONITORING DRIVER
520 + M: Rudolf Marek <r.marek@×××××××××.cz>
521 +-L: lm-sensors@××××××××××.org
522 ++L: linux-hwmon@×××××××××××.org
523 + S: Maintained
524 + F: Documentation/hwmon/w83793
525 + F: drivers/hwmon/w83793.c
526 +
527 + W83795 HARDWARE MONITORING DRIVER
528 + M: Jean Delvare <jdelvare@××××.com>
529 +-L: lm-sensors@××××××××××.org
530 ++L: linux-hwmon@×××××××××××.org
531 + S: Maintained
532 + F: drivers/hwmon/w83795.c
533 +
534 +diff --git a/Makefile b/Makefile
535 +index 7b3ecdcdc6c1..07a1786f695a 100644
536 +--- a/Makefile
537 ++++ b/Makefile
538 +@@ -1,6 +1,6 @@
539 + VERSION = 4
540 + PATCHLEVEL = 5
541 +-SUBLEVEL = 0
542 ++SUBLEVEL = 6
543 + EXTRAVERSION =
544 + NAME = Blurry Fish Butt
545 +
546 +@@ -688,9 +688,10 @@ KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
547 + KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
548 + else
549 +
550 +-# This warning generated too much noise in a regular build.
551 +-# Use make W=1 to enable this warning (see scripts/Makefile.build)
552 ++# These warnings generated too much noise in a regular build.
553 ++# Use make W=1 to enable them (see scripts/Makefile.build)
554 + KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
555 ++KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
556 + endif
557 +
558 + ifdef CONFIG_FRAME_POINTER
559 +diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
560 +index 44a578c10732..ab5d5701e11d 100644
561 +--- a/arch/arc/boot/dts/axs10x_mb.dtsi
562 ++++ b/arch/arc/boot/dts/axs10x_mb.dtsi
563 +@@ -47,6 +47,14 @@
564 + clocks = <&apbclk>;
565 + clock-names = "stmmaceth";
566 + max-speed = <100>;
567 ++ mdio0 {
568 ++ #address-cells = <1>;
569 ++ #size-cells = <0>;
570 ++ compatible = "snps,dwmac-mdio";
571 ++ phy1: ethernet-phy@1 {
572 ++ reg = <1>;
573 ++ };
574 ++ };
575 + };
576 +
577 + ehci@0x40000 {
578 +diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
579 +index 57c1f33844d4..0352fb8d21b9 100644
580 +--- a/arch/arc/include/asm/bitops.h
581 ++++ b/arch/arc/include/asm/bitops.h
582 +@@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
583 + \
584 + m += nr >> 5; \
585 + \
586 +- /* \
587 +- * ARC ISA micro-optimization: \
588 +- * \
589 +- * Instructions dealing with bitpos only consider lower 5 bits \
590 +- * e.g (x << 33) is handled like (x << 1) by ASL instruction \
591 +- * (mem pointer still needs adjustment to point to next word) \
592 +- * \
593 +- * Hence the masking to clamp @nr arg can be elided in general. \
594 +- * \
595 +- * However if @nr is a constant (above assumed in a register), \
596 +- * and greater than 31, gcc can optimize away (x << 33) to 0, \
597 +- * as overflow, given the 32-bit ISA. Thus masking needs to be \
598 +- * done for const @nr, but no code is generated due to gcc \
599 +- * const prop. \
600 +- */ \
601 + nr &= 0x1f; \
602 + \
603 + __asm__ __volatile__( \
604 +diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
605 +index 694ece8a0243..cb69299a492e 100644
606 +--- a/arch/arc/include/asm/io.h
607 ++++ b/arch/arc/include/asm/io.h
608 +@@ -13,6 +13,15 @@
609 + #include <asm/byteorder.h>
610 + #include <asm/page.h>
611 +
612 ++#ifdef CONFIG_ISA_ARCV2
613 ++#include <asm/barrier.h>
614 ++#define __iormb() rmb()
615 ++#define __iowmb() wmb()
616 ++#else
617 ++#define __iormb() do { } while (0)
618 ++#define __iowmb() do { } while (0)
619 ++#endif
620 ++
621 + extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
622 + extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
623 + unsigned long flags);
624 +@@ -22,6 +31,15 @@ extern void iounmap(const void __iomem *addr);
625 + #define ioremap_wc(phy, sz) ioremap(phy, sz)
626 + #define ioremap_wt(phy, sz) ioremap(phy, sz)
627 +
628 ++/*
629 ++ * io{read,write}{16,32}be() macros
630 ++ */
631 ++#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
632 ++#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
633 ++
634 ++#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
635 ++#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
636 ++
637 + /* Change struct page to physical address */
638 + #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
639 +
640 +@@ -99,15 +117,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
641 +
642 + }
643 +
644 +-#ifdef CONFIG_ISA_ARCV2
645 +-#include <asm/barrier.h>
646 +-#define __iormb() rmb()
647 +-#define __iowmb() wmb()
648 +-#else
649 +-#define __iormb() do { } while (0)
650 +-#define __iowmb() do { } while (0)
651 +-#endif
652 +-
653 + /*
654 + * MMIO can also get buffered/optimized in micro-arch, so barriers needed
655 + * Based on ARM model for the typical use case
656 +@@ -129,15 +138,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
657 + #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
658 +
659 + /*
660 +- * Relaxed API for drivers which can handle any ordering themselves
661 ++ * Relaxed API for drivers which can handle barrier ordering themselves
662 ++ *
663 ++ * Also these are defined to perform little endian accesses.
664 ++ * To provide the typical device register semantics of fixed endian,
665 ++ * swap the byte order for Big Endian
666 ++ *
667 ++ * http://lkml.kernel.org/r/201603100845.30602.arnd@×××××.de
668 + */
669 + #define readb_relaxed(c) __raw_readb(c)
670 +-#define readw_relaxed(c) __raw_readw(c)
671 +-#define readl_relaxed(c) __raw_readl(c)
672 ++#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
673 ++ __raw_readw(c)); __r; })
674 ++#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
675 ++ __raw_readl(c)); __r; })
676 +
677 + #define writeb_relaxed(v,c) __raw_writeb(v,c)
678 +-#define writew_relaxed(v,c) __raw_writew(v,c)
679 +-#define writel_relaxed(v,c) __raw_writel(v,c)
680 ++#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
681 ++#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
682 +
683 + #include <asm-generic/io.h>
684 +
685 +diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
686 +index 1fafaad516ba..97471d62d5e4 100644
687 +--- a/arch/arm/boot/dts/am33xx.dtsi
688 ++++ b/arch/arm/boot/dts/am33xx.dtsi
689 +@@ -860,7 +860,7 @@
690 + ti,no-idle-on-init;
691 + reg = <0x50000000 0x2000>;
692 + interrupts = <100>;
693 +- dmas = <&edma 52>;
694 ++ dmas = <&edma 52 0>;
695 + dma-names = "rxtx";
696 + gpmc,num-cs = <7>;
697 + gpmc,num-waitpins = <2>;
698 +diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
699 +index 92068fbf8b57..6bd38a28e26c 100644
700 +--- a/arch/arm/boot/dts/am4372.dtsi
701 ++++ b/arch/arm/boot/dts/am4372.dtsi
702 +@@ -207,7 +207,7 @@
703 + ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
704 + <&edma_tptc2 0>;
705 +
706 +- ti,edma-memcpy-channels = <32 33>;
707 ++ ti,edma-memcpy-channels = <58 59>;
708 + };
709 +
710 + edma_tptc0: tptc@49800000 {
711 +@@ -884,7 +884,7 @@
712 + gpmc: gpmc@50000000 {
713 + compatible = "ti,am3352-gpmc";
714 + ti,hwmods = "gpmc";
715 +- dmas = <&edma 52>;
716 ++ dmas = <&edma 52 0>;
717 + dma-names = "rxtx";
718 + clocks = <&l3s_gclk>;
719 + clock-names = "fck";
720 +diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
721 +index d580e2b70f9a..637dc5dbc8ac 100644
722 +--- a/arch/arm/boot/dts/am43x-epos-evm.dts
723 ++++ b/arch/arm/boot/dts/am43x-epos-evm.dts
724 +@@ -792,3 +792,8 @@
725 + tx-num-evt = <32>;
726 + rx-num-evt = <32>;
727 + };
728 ++
729 ++&synctimer_32kclk {
730 ++ assigned-clocks = <&mux_synctimer32k_ck>;
731 ++ assigned-clock-parents = <&clkdiv32k_ick>;
732 ++};
733 +diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
734 +index 7ccce7529b0c..cc952cf8ec30 100644
735 +--- a/arch/arm/boot/dts/armada-375.dtsi
736 ++++ b/arch/arm/boot/dts/armada-375.dtsi
737 +@@ -529,7 +529,7 @@
738 + };
739 +
740 + sata@a0000 {
741 +- compatible = "marvell,orion-sata";
742 ++ compatible = "marvell,armada-370-sata";
743 + reg = <0xa0000 0x5000>;
744 + interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
745 + clocks = <&gateclk 14>, <&gateclk 20>;
746 +diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
747 +index 3710755c6d76..85d2c377c332 100644
748 +--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
749 ++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
750 +@@ -117,7 +117,7 @@
751 + };
752 +
753 + /* USB part of the eSATA/USB 2.0 port */
754 +- usb@50000 {
755 ++ usb@58000 {
756 + status = "okay";
757 + };
758 +
759 +diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
760 +index ff888d21c786..f3e2b96c06a3 100644
761 +--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
762 ++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
763 +@@ -303,6 +303,7 @@
764 + regulator-name = "mmc0-card-supply";
765 + regulator-min-microvolt = <3300000>;
766 + regulator-max-microvolt = <3300000>;
767 ++ regulator-always-on;
768 + };
769 +
770 + gpio_keys {
771 +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
772 +index 569026e8f96c..da84e65b56ef 100644
773 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
774 ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
775 +@@ -268,5 +268,6 @@
776 + regulator-min-microvolt = <3300000>;
777 + regulator-max-microvolt = <3300000>;
778 + vin-supply = <&vcc_3v3_reg>;
779 ++ regulator-always-on;
780 + };
781 + };
782 +diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
783 +index 0827d594b1f0..cd0cd5fd09a3 100644
784 +--- a/arch/arm/boot/dts/at91sam9x5.dtsi
785 ++++ b/arch/arm/boot/dts/at91sam9x5.dtsi
786 +@@ -106,7 +106,7 @@
787 +
788 + pmc: pmc@fffffc00 {
789 + compatible = "atmel,at91sam9x5-pmc", "syscon";
790 +- reg = <0xfffffc00 0x100>;
791 ++ reg = <0xfffffc00 0x200>;
792 + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
793 + interrupt-controller;
794 + #address-cells = <1>;
795 +diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
796 +index cf6998a0804d..564341af7e97 100644
797 +--- a/arch/arm/boot/dts/pxa3xx.dtsi
798 ++++ b/arch/arm/boot/dts/pxa3xx.dtsi
799 +@@ -30,7 +30,7 @@
800 + reg = <0x43100000 90>;
801 + interrupts = <45>;
802 + clocks = <&clks CLK_NAND>;
803 +- dmas = <&pdma 97>;
804 ++ dmas = <&pdma 97 3>;
805 + dma-names = "data";
806 + #address-cells = <1>;
807 + #size-cells = <1>;
808 +diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
809 +index ed521e85e208..e8bc7e8bedd2 100644
810 +--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
811 ++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
812 +@@ -665,7 +665,7 @@
813 + };
814 +
815 + sata0: sata@29000000 {
816 +- compatible = "generic-ahci";
817 ++ compatible = "qcom,apq8064-ahci", "generic-ahci";
818 + status = "disabled";
819 + reg = <0x29000000 0x180>;
820 + interrupts = <GIC_SPI 209 IRQ_TYPE_NONE>;
821 +@@ -687,6 +687,7 @@
822 +
823 + phys = <&sata_phy0>;
824 + phy-names = "sata-phy";
825 ++ ports-implemented = <0x1>;
826 + };
827 +
828 + /* Temporary fixed regulator */
829 +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
830 +index 7d0cba6f1cc5..c86ea8aac203 100644
831 +--- a/arch/arm/kernel/setup.c
832 ++++ b/arch/arm/kernel/setup.c
833 +@@ -430,11 +430,13 @@ static void __init patch_aeabi_idiv(void)
834 + pr_info("CPU: div instructions available: patching division code\n");
835 +
836 + fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
837 ++ asm ("" : "+g" (fn_addr));
838 + ((u32 *)fn_addr)[0] = udiv_instruction();
839 + ((u32 *)fn_addr)[1] = bx_lr_instruction();
840 + flush_icache_range(fn_addr, fn_addr + 8);
841 +
842 + fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
843 ++ asm ("" : "+g" (fn_addr));
844 + ((u32 *)fn_addr)[0] = sdiv_instruction();
845 + ((u32 *)fn_addr)[1] = bx_lr_instruction();
846 + flush_icache_range(fn_addr, fn_addr + 8);
847 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
848 +index aba61fd3697a..88323ffb1ee0 100644
849 +--- a/arch/arm/kvm/mmu.c
850 ++++ b/arch/arm/kvm/mmu.c
851 +@@ -886,11 +886,14 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
852 + VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
853 +
854 + old_pmd = *pmd;
855 +- kvm_set_pmd(pmd, *new_pmd);
856 +- if (pmd_present(old_pmd))
857 ++ if (pmd_present(old_pmd)) {
858 ++ pmd_clear(pmd);
859 + kvm_tlb_flush_vmid_ipa(kvm, addr);
860 +- else
861 ++ } else {
862 + get_page(virt_to_page(pmd));
863 ++ }
864 ++
865 ++ kvm_set_pmd(pmd, *new_pmd);
866 + return 0;
867 + }
868 +
869 +@@ -939,12 +942,14 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
870 +
871 + /* Create 2nd stage page table mapping - Level 3 */
872 + old_pte = *pte;
873 +- kvm_set_pte(pte, *new_pte);
874 +- if (pte_present(old_pte))
875 ++ if (pte_present(old_pte)) {
876 ++ kvm_set_pte(pte, __pte(0));
877 + kvm_tlb_flush_vmid_ipa(kvm, addr);
878 +- else
879 ++ } else {
880 + get_page(virt_to_page(pte));
881 ++ }
882 +
883 ++ kvm_set_pte(pte, *new_pte);
884 + return 0;
885 + }
886 +
887 +diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
888 +index 47905a50e075..318394ed5c7a 100644
889 +--- a/arch/arm/mach-cns3xxx/pcie.c
890 ++++ b/arch/arm/mach-cns3xxx/pcie.c
891 +@@ -220,13 +220,13 @@ static void cns3xxx_write_config(struct cns3xxx_pcie *cnspci,
892 + u32 mask = (0x1ull << (size * 8)) - 1;
893 + int shift = (where % 4) * 8;
894 +
895 +- v = readl_relaxed(base + (where & 0xffc));
896 ++ v = readl_relaxed(base);
897 +
898 + v &= ~(mask << shift);
899 + v |= (val & mask) << shift;
900 +
901 +- writel_relaxed(v, base + (where & 0xffc));
902 +- readl_relaxed(base + (where & 0xffc));
903 ++ writel_relaxed(v, base);
904 ++ readl_relaxed(base);
905 + }
906 +
907 + static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
908 +diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
909 +index 652a0bb11578..5189bcecad12 100644
910 +--- a/arch/arm/mach-exynos/Kconfig
911 ++++ b/arch/arm/mach-exynos/Kconfig
912 +@@ -27,6 +27,7 @@ menuconfig ARCH_EXYNOS
913 + select S5P_DEV_MFC
914 + select SRAM
915 + select THERMAL
916 ++ select THERMAL_OF
917 + select MFD_SYSCON
918 + select CLKSRC_EXYNOS_MCT
919 + select POWER_RESET
920 +diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
921 +index 7c21760f590f..875a2bab64f6 100644
922 +--- a/arch/arm/mach-exynos/pm_domains.c
923 ++++ b/arch/arm/mach-exynos/pm_domains.c
924 +@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
925 + if (IS_ERR(pd->clk[i]))
926 + break;
927 +
928 +- if (IS_ERR(pd->clk[i]))
929 ++ if (IS_ERR(pd->pclk[i]))
930 + continue; /* Skip on first power up */
931 + if (clk_set_parent(pd->clk[i], pd->pclk[i]))
932 + pr_err("%s: error setting parent to clock%d\n",
933 +diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
934 +index aa7b379e2661..2a3db0bd9e15 100644
935 +--- a/arch/arm/mach-omap2/cpuidle34xx.c
936 ++++ b/arch/arm/mach-omap2/cpuidle34xx.c
937 +@@ -34,6 +34,7 @@
938 + #include "pm.h"
939 + #include "control.h"
940 + #include "common.h"
941 ++#include "soc.h"
942 +
943 + /* Mach specific information to be recorded in the C-state driver_data */
944 + struct omap3_idle_statedata {
945 +@@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
946 + .safe_state_index = 0,
947 + };
948 +
949 ++/*
950 ++ * Numbers based on measurements made in October 2009 for PM optimized kernel
951 ++ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
952 ++ * and worst case latencies).
953 ++ */
954 ++static struct cpuidle_driver omap3430_idle_driver = {
955 ++ .name = "omap3430_idle",
956 ++ .owner = THIS_MODULE,
957 ++ .states = {
958 ++ {
959 ++ .enter = omap3_enter_idle_bm,
960 ++ .exit_latency = 110 + 162,
961 ++ .target_residency = 5,
962 ++ .name = "C1",
963 ++ .desc = "MPU ON + CORE ON",
964 ++ },
965 ++ {
966 ++ .enter = omap3_enter_idle_bm,
967 ++ .exit_latency = 106 + 180,
968 ++ .target_residency = 309,
969 ++ .name = "C2",
970 ++ .desc = "MPU ON + CORE ON",
971 ++ },
972 ++ {
973 ++ .enter = omap3_enter_idle_bm,
974 ++ .exit_latency = 107 + 410,
975 ++ .target_residency = 46057,
976 ++ .name = "C3",
977 ++ .desc = "MPU RET + CORE ON",
978 ++ },
979 ++ {
980 ++ .enter = omap3_enter_idle_bm,
981 ++ .exit_latency = 121 + 3374,
982 ++ .target_residency = 46057,
983 ++ .name = "C4",
984 ++ .desc = "MPU OFF + CORE ON",
985 ++ },
986 ++ {
987 ++ .enter = omap3_enter_idle_bm,
988 ++ .exit_latency = 855 + 1146,
989 ++ .target_residency = 46057,
990 ++ .name = "C5",
991 ++ .desc = "MPU RET + CORE RET",
992 ++ },
993 ++ {
994 ++ .enter = omap3_enter_idle_bm,
995 ++ .exit_latency = 7580 + 4134,
996 ++ .target_residency = 484329,
997 ++ .name = "C6",
998 ++ .desc = "MPU OFF + CORE RET",
999 ++ },
1000 ++ {
1001 ++ .enter = omap3_enter_idle_bm,
1002 ++ .exit_latency = 7505 + 15274,
1003 ++ .target_residency = 484329,
1004 ++ .name = "C7",
1005 ++ .desc = "MPU OFF + CORE OFF",
1006 ++ },
1007 ++ },
1008 ++ .state_count = ARRAY_SIZE(omap3_idle_data),
1009 ++ .safe_state_index = 0,
1010 ++};
1011 ++
1012 + /* Public functions */
1013 +
1014 + /**
1015 +@@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
1016 + if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
1017 + return -ENODEV;
1018 +
1019 +- return cpuidle_register(&omap3_idle_driver, NULL);
1020 ++ if (cpu_is_omap3430())
1021 ++ return cpuidle_register(&omap3430_idle_driver, NULL);
1022 ++ else
1023 ++ return cpuidle_register(&omap3_idle_driver, NULL);
1024 + }
1025 +diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
1026 +index 3c87e40650cf..9821be6dfd5e 100644
1027 +--- a/arch/arm/mach-omap2/io.c
1028 ++++ b/arch/arm/mach-omap2/io.c
1029 +@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
1030 + void __init dra7xx_map_io(void)
1031 + {
1032 + iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
1033 ++ omap_barriers_init();
1034 + }
1035 + #endif
1036 + /*
1037 +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
1038 +index b6d62e4cdfdd..2af6ff63e3b4 100644
1039 +--- a/arch/arm/mach-omap2/omap_hwmod.c
1040 ++++ b/arch/arm/mach-omap2/omap_hwmod.c
1041 +@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
1042 + (sf & SYSC_HAS_CLOCKACTIVITY))
1043 + _set_clockactivity(oh, oh->class->sysc->clockact, &v);
1044 +
1045 +- /* If the cached value is the same as the new value, skip the write */
1046 +- if (oh->_sysc_cache != v)
1047 +- _write_sysconfig(v, oh);
1048 ++ _write_sysconfig(v, oh);
1049 +
1050 + /*
1051 + * Set the autoidle bit only after setting the smartidle bit
1052 +@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
1053 + _set_master_standbymode(oh, idlemode, &v);
1054 + }
1055 +
1056 +- _write_sysconfig(v, oh);
1057 ++ /* If the cached value is the same as the new value, skip the write */
1058 ++ if (oh->_sysc_cache != v)
1059 ++ _write_sysconfig(v, oh);
1060 + }
1061 +
1062 + /**
1063 +diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
1064 +index f998eb1c698e..0cf4426183cf 100644
1065 +--- a/arch/arm/mach-prima2/Kconfig
1066 ++++ b/arch/arm/mach-prima2/Kconfig
1067 +@@ -2,6 +2,7 @@ menuconfig ARCH_SIRF
1068 + bool "CSR SiRF"
1069 + depends on ARCH_MULTI_V7
1070 + select ARCH_HAS_RESET_CONTROLLER
1071 ++ select RESET_CONTROLLER
1072 + select ARCH_REQUIRE_GPIOLIB
1073 + select GENERIC_IRQ_CHIP
1074 + select NO_IOPORT_MAP
1075 +diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
1076 +index 5d94b7a2fb10..c160fa3007e9 100644
1077 +--- a/arch/arm/mach-socfpga/headsmp.S
1078 ++++ b/arch/arm/mach-socfpga/headsmp.S
1079 +@@ -13,6 +13,7 @@
1080 + #include <asm/assembler.h>
1081 +
1082 + .arch armv7-a
1083 ++ .arm
1084 +
1085 + ENTRY(secondary_trampoline)
1086 + /* CPU1 will always fetch from 0x0 when it is brought out of reset.
1087 +diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
1088 +index 4e603ea36ad3..123f45d92cd1 100644
1089 +--- a/arch/arm64/include/asm/opcodes.h
1090 ++++ b/arch/arm64/include/asm/opcodes.h
1091 +@@ -1 +1,5 @@
1092 ++#ifdef CONFIG_CPU_BIG_ENDIAN
1093 ++#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
1094 ++#endif
1095 ++
1096 + #include <../../arm/include/asm/opcodes.h>
1097 +diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
1098 +index 5c25b831273d..9786f770088d 100644
1099 +--- a/arch/arm64/include/asm/pgtable-hwdef.h
1100 ++++ b/arch/arm64/include/asm/pgtable-hwdef.h
1101 +@@ -133,7 +133,6 @@
1102 + * Section
1103 + */
1104 + #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
1105 +-#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
1106 + #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
1107 + #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
1108 + #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1109 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
1110 +index 819aff5d593f..137d3bf88e2a 100644
1111 +--- a/arch/arm64/include/asm/pgtable.h
1112 ++++ b/arch/arm64/include/asm/pgtable.h
1113 +@@ -279,7 +279,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
1114 + static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1115 + pte_t *ptep, pte_t pte)
1116 + {
1117 +- if (pte_valid(pte)) {
1118 ++ if (pte_present(pte)) {
1119 + if (pte_sw_dirty(pte) && pte_write(pte))
1120 + pte_val(pte) &= ~PTE_RDONLY;
1121 + else
1122 +@@ -356,6 +356,7 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot)
1123 + #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
1124 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1125 +
1126 ++#define pmd_present(pmd) pte_present(pmd_pte(pmd))
1127 + #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
1128 + #define pmd_young(pmd) pte_young(pmd_pte(pmd))
1129 + #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
1130 +@@ -364,7 +365,7 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot)
1131 + #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
1132 + #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
1133 + #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
1134 +-#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
1135 ++#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
1136 +
1137 + #define __HAVE_ARCH_PMD_WRITE
1138 + #define pmd_write(pmd) pte_write(pmd_pte(pmd))
1139 +@@ -403,7 +404,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
1140 + unsigned long size, pgprot_t vma_prot);
1141 +
1142 + #define pmd_none(pmd) (!pmd_val(pmd))
1143 +-#define pmd_present(pmd) (pmd_val(pmd))
1144 +
1145 + #define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
1146 +
1147 +@@ -547,6 +547,21 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1148 + }
1149 +
1150 + #ifdef CONFIG_ARM64_HW_AFDBM
1151 ++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1152 ++extern int ptep_set_access_flags(struct vm_area_struct *vma,
1153 ++ unsigned long address, pte_t *ptep,
1154 ++ pte_t entry, int dirty);
1155 ++
1156 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1157 ++#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1158 ++static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1159 ++ unsigned long address, pmd_t *pmdp,
1160 ++ pmd_t entry, int dirty)
1161 ++{
1162 ++ return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
1163 ++}
1164 ++#endif
1165 ++
1166 + /*
1167 + * Atomic pte/pmd modifications.
1168 + */
1169 +@@ -599,9 +614,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1170 + }
1171 +
1172 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1173 +-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1174 +-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1175 +- unsigned long address, pmd_t *pmdp)
1176 ++#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1177 ++static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1178 ++ unsigned long address, pmd_t *pmdp)
1179 + {
1180 + return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
1181 + }
1182 +@@ -649,6 +664,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
1183 + * bits 0-1: present (must be zero)
1184 + * bits 2-7: swap type
1185 + * bits 8-57: swap offset
1186 ++ * bit 58: PTE_PROT_NONE (must be zero)
1187 + */
1188 + #define __SWP_TYPE_SHIFT 2
1189 + #define __SWP_TYPE_BITS 6
1190 +diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
1191 +index 212ae6361d8b..a5f234039616 100644
1192 +--- a/arch/arm64/kernel/cpuinfo.c
1193 ++++ b/arch/arm64/kernel/cpuinfo.c
1194 +@@ -85,7 +85,8 @@ static const char *const compat_hwcap_str[] = {
1195 + "idivt",
1196 + "vfpd32",
1197 + "lpae",
1198 +- "evtstrm"
1199 ++ "evtstrm",
1200 ++ NULL
1201 + };
1202 +
1203 + static const char *const compat_hwcap2_str[] = {
1204 +diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
1205 +index c536c9e307b9..0931155f6406 100644
1206 +--- a/arch/arm64/kernel/debug-monitors.c
1207 ++++ b/arch/arm64/kernel/debug-monitors.c
1208 +@@ -186,20 +186,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
1209 +
1210 + /* EL1 Single Step Handler hooks */
1211 + static LIST_HEAD(step_hook);
1212 +-static DEFINE_RWLOCK(step_hook_lock);
1213 ++static DEFINE_SPINLOCK(step_hook_lock);
1214 +
1215 + void register_step_hook(struct step_hook *hook)
1216 + {
1217 +- write_lock(&step_hook_lock);
1218 +- list_add(&hook->node, &step_hook);
1219 +- write_unlock(&step_hook_lock);
1220 ++ spin_lock(&step_hook_lock);
1221 ++ list_add_rcu(&hook->node, &step_hook);
1222 ++ spin_unlock(&step_hook_lock);
1223 + }
1224 +
1225 + void unregister_step_hook(struct step_hook *hook)
1226 + {
1227 +- write_lock(&step_hook_lock);
1228 +- list_del(&hook->node);
1229 +- write_unlock(&step_hook_lock);
1230 ++ spin_lock(&step_hook_lock);
1231 ++ list_del_rcu(&hook->node);
1232 ++ spin_unlock(&step_hook_lock);
1233 ++ synchronize_rcu();
1234 + }
1235 +
1236 + /*
1237 +@@ -213,15 +214,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
1238 + struct step_hook *hook;
1239 + int retval = DBG_HOOK_ERROR;
1240 +
1241 +- read_lock(&step_hook_lock);
1242 ++ rcu_read_lock();
1243 +
1244 +- list_for_each_entry(hook, &step_hook, node) {
1245 ++ list_for_each_entry_rcu(hook, &step_hook, node) {
1246 + retval = hook->fn(regs, esr);
1247 + if (retval == DBG_HOOK_HANDLED)
1248 + break;
1249 + }
1250 +
1251 +- read_unlock(&step_hook_lock);
1252 ++ rcu_read_unlock();
1253 +
1254 + return retval;
1255 + }
1256 +diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
1257 +index 4d1ac81870d2..e9e0e6db73f6 100644
1258 +--- a/arch/arm64/kvm/inject_fault.c
1259 ++++ b/arch/arm64/kvm/inject_fault.c
1260 +@@ -162,7 +162,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
1261 + esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
1262 +
1263 + if (!is_iabt)
1264 +- esr |= ESR_ELx_EC_DABT_LOW;
1265 ++ esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
1266 +
1267 + vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
1268 + }
1269 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
1270 +index abe2a9542b3a..a26e3acea6a9 100644
1271 +--- a/arch/arm64/mm/fault.c
1272 ++++ b/arch/arm64/mm/fault.c
1273 +@@ -81,6 +81,56 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
1274 + printk("\n");
1275 + }
1276 +
1277 ++#ifdef CONFIG_ARM64_HW_AFDBM
1278 ++/*
1279 ++ * This function sets the access flags (dirty, accessed), as well as write
1280 ++ * permission, and only to a more permissive setting.
1281 ++ *
1282 ++ * It needs to cope with hardware update of the accessed/dirty state by other
1283 ++ * agents in the system and can safely skip the __sync_icache_dcache() call as,
1284 ++ * like set_pte_at(), the PTE is never changed from no-exec to exec here.
1285 ++ *
1286 ++ * Returns whether or not the PTE actually changed.
1287 ++ */
1288 ++int ptep_set_access_flags(struct vm_area_struct *vma,
1289 ++ unsigned long address, pte_t *ptep,
1290 ++ pte_t entry, int dirty)
1291 ++{
1292 ++ pteval_t old_pteval;
1293 ++ unsigned int tmp;
1294 ++
1295 ++ if (pte_same(*ptep, entry))
1296 ++ return 0;
1297 ++
1298 ++ /* only preserve the access flags and write permission */
1299 ++ pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
1300 ++
1301 ++ /*
1302 ++ * PTE_RDONLY is cleared by default in the asm below, so set it in
1303 ++ * back if necessary (read-only or clean PTE).
1304 ++ */
1305 ++ if (!pte_write(entry) || !dirty)
1306 ++ pte_val(entry) |= PTE_RDONLY;
1307 ++
1308 ++ /*
1309 ++ * Setting the flags must be done atomically to avoid racing with the
1310 ++ * hardware update of the access/dirty state.
1311 ++ */
1312 ++ asm volatile("// ptep_set_access_flags\n"
1313 ++ " prfm pstl1strm, %2\n"
1314 ++ "1: ldxr %0, %2\n"
1315 ++ " and %0, %0, %3 // clear PTE_RDONLY\n"
1316 ++ " orr %0, %0, %4 // set flags\n"
1317 ++ " stxr %w1, %0, %2\n"
1318 ++ " cbnz %w1, 1b\n"
1319 ++ : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
1320 ++ : "L" (~PTE_RDONLY), "r" (pte_val(entry)));
1321 ++
1322 ++ flush_tlb_fix_spurious_fault(vma, address);
1323 ++ return 1;
1324 ++}
1325 ++#endif
1326 ++
1327 + /*
1328 + * The kernel tried to access some page that wasn't present.
1329 + */
1330 +diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
1331 +index a865d2a04f75..5de673ac9cb1 100644
1332 +--- a/arch/ia64/include/asm/io.h
1333 ++++ b/arch/ia64/include/asm/io.h
1334 +@@ -433,6 +433,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo
1335 + return ioremap(phys_addr, size);
1336 + }
1337 + #define ioremap_cache ioremap_cache
1338 ++#define ioremap_uc ioremap_nocache
1339 +
1340 +
1341 + /*
1342 +diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
1343 +index bdeed9d13c6f..433c4b9a9f0a 100644
1344 +--- a/arch/mips/alchemy/devboards/db1000.c
1345 ++++ b/arch/mips/alchemy/devboards/db1000.c
1346 +@@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
1347 + if (board == BCSR_WHOAMI_DB1500) {
1348 + c0 = AU1500_GPIO2_INT;
1349 + c1 = AU1500_GPIO5_INT;
1350 +- d0 = AU1500_GPIO0_INT;
1351 +- d1 = AU1500_GPIO3_INT;
1352 ++ d0 = 0; /* GPIO number, NOT irq! */
1353 ++ d1 = 3; /* GPIO number, NOT irq! */
1354 + s0 = AU1500_GPIO1_INT;
1355 + s1 = AU1500_GPIO4_INT;
1356 + } else if (board == BCSR_WHOAMI_DB1100) {
1357 + c0 = AU1100_GPIO2_INT;
1358 + c1 = AU1100_GPIO5_INT;
1359 +- d0 = AU1100_GPIO0_INT;
1360 +- d1 = AU1100_GPIO3_INT;
1361 ++ d0 = 0; /* GPIO number, NOT irq! */
1362 ++ d1 = 3; /* GPIO number, NOT irq! */
1363 + s0 = AU1100_GPIO1_INT;
1364 + s1 = AU1100_GPIO4_INT;
1365 +
1366 +@@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
1367 + } else if (board == BCSR_WHOAMI_DB1000) {
1368 + c0 = AU1000_GPIO2_INT;
1369 + c1 = AU1000_GPIO5_INT;
1370 +- d0 = AU1000_GPIO0_INT;
1371 +- d1 = AU1000_GPIO3_INT;
1372 ++ d0 = 0; /* GPIO number, NOT irq! */
1373 ++ d1 = 3; /* GPIO number, NOT irq! */
1374 + s0 = AU1000_GPIO1_INT;
1375 + s1 = AU1000_GPIO4_INT;
1376 + platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
1377 + } else if ((board == BCSR_WHOAMI_PB1500) ||
1378 + (board == BCSR_WHOAMI_PB1500R2)) {
1379 + c0 = AU1500_GPIO203_INT;
1380 +- d0 = AU1500_GPIO201_INT;
1381 ++ d0 = 1; /* GPIO number, NOT irq! */
1382 + s0 = AU1500_GPIO202_INT;
1383 + twosocks = 0;
1384 + flashsize = 64;
1385 +@@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
1386 + */
1387 + } else if (board == BCSR_WHOAMI_PB1100) {
1388 + c0 = AU1100_GPIO11_INT;
1389 +- d0 = AU1100_GPIO9_INT;
1390 ++ d0 = 9; /* GPIO number, NOT irq! */
1391 + s0 = AU1100_GPIO10_INT;
1392 + twosocks = 0;
1393 + flashsize = 64;
1394 +@@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
1395 + } else
1396 + return 0; /* unknown board, no further dev setup to do */
1397 +
1398 +- irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
1399 + irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
1400 + irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
1401 +
1402 +@@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
1403 + c0, d0, /*s0*/0, 0, 0);
1404 +
1405 + if (twosocks) {
1406 +- irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
1407 + irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
1408 + irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
1409 +
1410 +diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
1411 +index b518f029f5e7..1c01d6eadb08 100644
1412 +--- a/arch/mips/alchemy/devboards/db1550.c
1413 ++++ b/arch/mips/alchemy/devboards/db1550.c
1414 +@@ -514,7 +514,7 @@ static void __init db1550_devices(void)
1415 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
1416 + AU1000_PCMCIA_IO_PHYS_ADDR,
1417 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
1418 +- AU1550_GPIO3_INT, AU1550_GPIO0_INT,
1419 ++ AU1550_GPIO3_INT, 0,
1420 + /*AU1550_GPIO21_INT*/0, 0, 0);
1421 +
1422 + db1x_register_pcmcia_socket(
1423 +@@ -524,7 +524,7 @@ static void __init db1550_devices(void)
1424 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
1425 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
1426 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
1427 +- AU1550_GPIO5_INT, AU1550_GPIO1_INT,
1428 ++ AU1550_GPIO5_INT, 1,
1429 + /*AU1550_GPIO22_INT*/0, 0, 1);
1430 +
1431 + platform_device_register(&db1550_nand_dev);
1432 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
1433 +index f6b12790716c..942b8f6bf35b 100644
1434 +--- a/arch/mips/include/asm/kvm_host.h
1435 ++++ b/arch/mips/include/asm/kvm_host.h
1436 +@@ -747,7 +747,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
1437 +
1438 + uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
1439 + void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
1440 +-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
1441 ++void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
1442 + void kvm_mips_init_count(struct kvm_vcpu *vcpu);
1443 + int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
1444 + int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
1445 +diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
1446 +index 490cea569d57..5c62065cbf22 100644
1447 +--- a/arch/mips/kernel/unaligned.c
1448 ++++ b/arch/mips/kernel/unaligned.c
1449 +@@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1450 + {
1451 + union mips_instruction insn;
1452 + unsigned long value;
1453 +- unsigned int res;
1454 ++ unsigned int res, preempted;
1455 + unsigned long origpc;
1456 + unsigned long orig31;
1457 + void __user *fault_addr = NULL;
1458 +@@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1459 + if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
1460 + goto sigbus;
1461 +
1462 +- /*
1463 +- * Disable preemption to avoid a race between copying
1464 +- * state from userland, migrating to another CPU and
1465 +- * updating the hardware vector register below.
1466 +- */
1467 +- preempt_disable();
1468 +-
1469 +- res = __copy_from_user_inatomic(fpr, addr,
1470 +- sizeof(*fpr));
1471 +- if (res)
1472 +- goto fault;
1473 +-
1474 +- /*
1475 +- * Update the hardware register if it is in use by the
1476 +- * task in this quantum, in order to avoid having to
1477 +- * save & restore the whole vector context.
1478 +- */
1479 +- if (test_thread_flag(TIF_USEDMSA))
1480 +- write_msa_wr(wd, fpr, df);
1481 ++ do {
1482 ++ /*
1483 ++ * If we have live MSA context keep track of
1484 ++ * whether we get preempted in order to avoid
1485 ++ * the register context we load being clobbered
1486 ++ * by the live context as it's saved during
1487 ++ * preemption. If we don't have live context
1488 ++ * then it can't be saved to clobber the value
1489 ++ * we load.
1490 ++ */
1491 ++ preempted = test_thread_flag(TIF_USEDMSA);
1492 ++
1493 ++ res = __copy_from_user_inatomic(fpr, addr,
1494 ++ sizeof(*fpr));
1495 ++ if (res)
1496 ++ goto fault;
1497 +
1498 +- preempt_enable();
1499 ++ /*
1500 ++ * Update the hardware register if it is in use
1501 ++ * by the task in this quantum, in order to
1502 ++ * avoid having to save & restore the whole
1503 ++ * vector context.
1504 ++ */
1505 ++ preempt_disable();
1506 ++ if (test_thread_flag(TIF_USEDMSA)) {
1507 ++ write_msa_wr(wd, fpr, df);
1508 ++ preempted = 0;
1509 ++ }
1510 ++ preempt_enable();
1511 ++ } while (preempted);
1512 + break;
1513 +
1514 + case msa_st_op:
1515 +diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
1516 +index b37954cc880d..b8b7860ec1a8 100644
1517 +--- a/arch/mips/kvm/emulate.c
1518 ++++ b/arch/mips/kvm/emulate.c
1519 +@@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
1520 + */
1521 + static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
1522 + {
1523 +- ktime_t expires;
1524 ++ struct mips_coproc *cop0 = vcpu->arch.cop0;
1525 ++ ktime_t expires, threshold;
1526 ++ uint32_t count, compare;
1527 + int running;
1528 +
1529 +- /* Is the hrtimer pending? */
1530 ++ /* Calculate the biased and scaled guest CP0_Count */
1531 ++ count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
1532 ++ compare = kvm_read_c0_guest_compare(cop0);
1533 ++
1534 ++ /*
1535 ++ * Find whether CP0_Count has reached the closest timer interrupt. If
1536 ++ * not, we shouldn't inject it.
1537 ++ */
1538 ++ if ((int32_t)(count - compare) < 0)
1539 ++ return count;
1540 ++
1541 ++ /*
1542 ++ * The CP0_Count we're going to return has already reached the closest
1543 ++ * timer interrupt. Quickly check if it really is a new interrupt by
1544 ++ * looking at whether the interval until the hrtimer expiry time is
1545 ++ * less than 1/4 of the timer period.
1546 ++ */
1547 + expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
1548 +- if (ktime_compare(now, expires) >= 0) {
1549 ++ threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
1550 ++ if (ktime_before(expires, threshold)) {
1551 + /*
1552 + * Cancel it while we handle it so there's no chance of
1553 + * interference with the timeout handler.
1554 +@@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
1555 + }
1556 + }
1557 +
1558 +- /* Return the biased and scaled guest CP0_Count */
1559 +- return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
1560 ++ return count;
1561 + }
1562 +
1563 + /**
1564 +@@ -420,32 +438,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
1565 + }
1566 +
1567 + /**
1568 +- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
1569 +- * @vcpu: Virtual CPU.
1570 +- *
1571 +- * Recalculates and updates the expiry time of the hrtimer. This can be used
1572 +- * after timer parameters have been altered which do not depend on the time that
1573 +- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
1574 +- * kvm_mips_resume_hrtimer() are used directly).
1575 +- *
1576 +- * It is guaranteed that no timer interrupts will be lost in the process.
1577 +- *
1578 +- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
1579 +- */
1580 +-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
1581 +-{
1582 +- ktime_t now;
1583 +- uint32_t count;
1584 +-
1585 +- /*
1586 +- * freeze_hrtimer takes care of a timer interrupts <= count, and
1587 +- * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
1588 +- */
1589 +- now = kvm_mips_freeze_hrtimer(vcpu, &count);
1590 +- kvm_mips_resume_hrtimer(vcpu, now, count);
1591 +-}
1592 +-
1593 +-/**
1594 + * kvm_mips_write_count() - Modify the count and update timer.
1595 + * @vcpu: Virtual CPU.
1596 + * @count: Guest CP0_Count value to set.
1597 +@@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
1598 + * kvm_mips_write_compare() - Modify compare and update timer.
1599 + * @vcpu: Virtual CPU.
1600 + * @compare: New CP0_Compare value.
1601 ++ * @ack: Whether to acknowledge timer interrupt.
1602 + *
1603 + * Update CP0_Compare to a new value and update the timeout.
1604 ++ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
1605 ++ * any pending timer interrupt is preserved.
1606 + */
1607 +-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
1608 ++void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
1609 + {
1610 + struct mips_coproc *cop0 = vcpu->arch.cop0;
1611 ++ int dc;
1612 ++ u32 old_compare = kvm_read_c0_guest_compare(cop0);
1613 ++ ktime_t now;
1614 ++ uint32_t count;
1615 +
1616 + /* if unchanged, must just be an ack */
1617 +- if (kvm_read_c0_guest_compare(cop0) == compare)
1618 ++ if (old_compare == compare) {
1619 ++ if (!ack)
1620 ++ return;
1621 ++ kvm_mips_callbacks->dequeue_timer_int(vcpu);
1622 ++ kvm_write_c0_guest_compare(cop0, compare);
1623 + return;
1624 ++ }
1625 ++
1626 ++ /* freeze_hrtimer() takes care of timer interrupts <= count */
1627 ++ dc = kvm_mips_count_disabled(vcpu);
1628 ++ if (!dc)
1629 ++ now = kvm_mips_freeze_hrtimer(vcpu, &count);
1630 ++
1631 ++ if (ack)
1632 ++ kvm_mips_callbacks->dequeue_timer_int(vcpu);
1633 +
1634 +- /* Update compare */
1635 + kvm_write_c0_guest_compare(cop0, compare);
1636 +
1637 +- /* Update timeout if count enabled */
1638 +- if (!kvm_mips_count_disabled(vcpu))
1639 +- kvm_mips_update_hrtimer(vcpu);
1640 ++ /* resume_hrtimer() takes care of timer interrupts > count */
1641 ++ if (!dc)
1642 ++ kvm_mips_resume_hrtimer(vcpu, now, count);
1643 + }
1644 +
1645 + /**
1646 +@@ -1095,9 +1106,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
1647 +
1648 + /* If we are writing to COMPARE */
1649 + /* Clear pending timer interrupt, if any */
1650 +- kvm_mips_callbacks->dequeue_timer_int(vcpu);
1651 + kvm_mips_write_compare(vcpu,
1652 +- vcpu->arch.gprs[rt]);
1653 ++ vcpu->arch.gprs[rt],
1654 ++ true);
1655 + } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1656 + unsigned int old_val, val, change;
1657 +
1658 +diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
1659 +index ad988000563f..1664589d4746 100644
1660 +--- a/arch/mips/kvm/trap_emul.c
1661 ++++ b/arch/mips/kvm/trap_emul.c
1662 +@@ -546,7 +546,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
1663 + kvm_mips_write_count(vcpu, v);
1664 + break;
1665 + case KVM_REG_MIPS_CP0_COMPARE:
1666 +- kvm_mips_write_compare(vcpu, v);
1667 ++ kvm_mips_write_compare(vcpu, v, false);
1668 + break;
1669 + case KVM_REG_MIPS_CP0_CAUSE:
1670 + /*
1671 +diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
1672 +index 14f655cf542e..86ed37671ef5 100644
1673 +--- a/arch/parisc/Kconfig
1674 ++++ b/arch/parisc/Kconfig
1675 +@@ -29,6 +29,7 @@ config PARISC
1676 + select TTY # Needed for pdc_cons.c
1677 + select HAVE_DEBUG_STACKOVERFLOW
1678 + select HAVE_ARCH_AUDITSYSCALL
1679 ++ select HAVE_ARCH_SECCOMP_FILTER
1680 + select ARCH_NO_COHERENT_DMA_MMAP
1681 +
1682 + help
1683 +diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
1684 +index 0448a2c8eafb..3387307cc33e 100644
1685 +--- a/arch/parisc/include/asm/compat.h
1686 ++++ b/arch/parisc/include/asm/compat.h
1687 +@@ -183,6 +183,13 @@ typedef struct compat_siginfo {
1688 + int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
1689 + int _fd;
1690 + } _sigpoll;
1691 ++
1692 ++ /* SIGSYS */
1693 ++ struct {
1694 ++ compat_uptr_t _call_addr; /* calling user insn */
1695 ++ int _syscall; /* triggering system call number */
1696 ++ compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */
1697 ++ } _sigsys;
1698 + } _sifields;
1699 + } compat_siginfo_t;
1700 +
1701 +diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
1702 +index a5eba95d87fe..637ce8d6f375 100644
1703 +--- a/arch/parisc/include/asm/syscall.h
1704 ++++ b/arch/parisc/include/asm/syscall.h
1705 +@@ -39,6 +39,19 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
1706 + }
1707 + }
1708 +
1709 ++static inline void syscall_set_return_value(struct task_struct *task,
1710 ++ struct pt_regs *regs,
1711 ++ int error, long val)
1712 ++{
1713 ++ regs->gr[28] = error ? error : val;
1714 ++}
1715 ++
1716 ++static inline void syscall_rollback(struct task_struct *task,
1717 ++ struct pt_regs *regs)
1718 ++{
1719 ++ /* do nothing */
1720 ++}
1721 ++
1722 + static inline int syscall_get_arch(void)
1723 + {
1724 + int arch = AUDIT_ARCH_PARISC;
1725 +diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
1726 +index 0abdd4c607ed..1960b87c1c8b 100644
1727 +--- a/arch/parisc/include/asm/uaccess.h
1728 ++++ b/arch/parisc/include/asm/uaccess.h
1729 +@@ -76,6 +76,7 @@ struct exception_table_entry {
1730 + */
1731 + struct exception_data {
1732 + unsigned long fault_ip;
1733 ++ unsigned long fault_gp;
1734 + unsigned long fault_space;
1735 + unsigned long fault_addr;
1736 + };
1737 +diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
1738 +index d2f62570a7b1..78d30d2ea2d8 100644
1739 +--- a/arch/parisc/kernel/asm-offsets.c
1740 ++++ b/arch/parisc/kernel/asm-offsets.c
1741 +@@ -299,6 +299,7 @@ int main(void)
1742 + #endif
1743 + BLANK();
1744 + DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
1745 ++ DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
1746 + DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
1747 + DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
1748 + BLANK();
1749 +diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
1750 +index 568b2c61ea02..3cad8aadc69e 100644
1751 +--- a/arch/parisc/kernel/parisc_ksyms.c
1752 ++++ b/arch/parisc/kernel/parisc_ksyms.c
1753 +@@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64);
1754 + EXPORT_SYMBOL(lclear_user);
1755 + EXPORT_SYMBOL(lstrnlen_user);
1756 +
1757 +-/* Global fixups */
1758 +-extern void fixup_get_user_skip_1(void);
1759 +-extern void fixup_get_user_skip_2(void);
1760 +-extern void fixup_put_user_skip_1(void);
1761 +-extern void fixup_put_user_skip_2(void);
1762 ++/* Global fixups - defined as int to avoid creation of function pointers */
1763 ++extern int fixup_get_user_skip_1;
1764 ++extern int fixup_get_user_skip_2;
1765 ++extern int fixup_put_user_skip_1;
1766 ++extern int fixup_put_user_skip_2;
1767 + EXPORT_SYMBOL(fixup_get_user_skip_1);
1768 + EXPORT_SYMBOL(fixup_get_user_skip_2);
1769 + EXPORT_SYMBOL(fixup_put_user_skip_1);
1770 +diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
1771 +index ce0b2b4075c7..8fb81a391599 100644
1772 +--- a/arch/parisc/kernel/ptrace.c
1773 ++++ b/arch/parisc/kernel/ptrace.c
1774 +@@ -270,7 +270,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1775 + long do_syscall_trace_enter(struct pt_regs *regs)
1776 + {
1777 + /* Do the secure computing check first. */
1778 +- secure_computing_strict(regs->gr[20]);
1779 ++ if (secure_computing() == -1)
1780 ++ return -1;
1781 +
1782 + if (test_thread_flag(TIF_SYSCALL_TRACE) &&
1783 + tracehook_report_syscall_entry(regs)) {
1784 +@@ -296,7 +297,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
1785 + regs->gr[23] & 0xffffffff);
1786 +
1787 + out:
1788 +- return regs->gr[20];
1789 ++ /*
1790 ++ * Sign extend the syscall number to 64bit since it may have been
1791 ++ * modified by a compat ptrace call
1792 ++ */
1793 ++ return (int) ((u32) regs->gr[20]);
1794 + }
1795 +
1796 + void do_syscall_trace_exit(struct pt_regs *regs)
1797 +diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
1798 +index 984abbee71ca..c342b2e17492 100644
1799 +--- a/arch/parisc/kernel/signal32.c
1800 ++++ b/arch/parisc/kernel/signal32.c
1801 +@@ -371,6 +371,11 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
1802 + val = (compat_int_t)from->si_int;
1803 + err |= __put_user(val, &to->si_int);
1804 + break;
1805 ++ case __SI_SYS >> 16:
1806 ++ err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr);
1807 ++ err |= __put_user(from->si_syscall, &to->si_syscall);
1808 ++ err |= __put_user(from->si_arch, &to->si_arch);
1809 ++ break;
1810 + }
1811 + }
1812 + return err;
1813 +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
1814 +index fbafa0d0e2bf..57b4836b7ecd 100644
1815 +--- a/arch/parisc/kernel/syscall.S
1816 ++++ b/arch/parisc/kernel/syscall.S
1817 +@@ -329,6 +329,7 @@ tracesys_next:
1818 +
1819 + ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
1820 + LDREG TI_TASK(%r1), %r1
1821 ++ LDREG TASK_PT_GR28(%r1), %r28 /* Restore return value */
1822 + LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
1823 + LDREG TASK_PT_GR25(%r1), %r25
1824 + LDREG TASK_PT_GR24(%r1), %r24
1825 +@@ -342,7 +343,8 @@ tracesys_next:
1826 + stw %r21, -56(%r30) /* 6th argument */
1827 + #endif
1828 +
1829 +- comiclr,>>= __NR_Linux_syscalls, %r20, %r0
1830 ++ cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
1831 ++ comiclr,>> __NR_Linux_syscalls, %r20, %r0
1832 + b,n .Ltracesys_nosys
1833 +
1834 + LDREGX %r20(%r19), %r19
1835 +diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1836 +index 553b09855cfd..77e2262c97f6 100644
1837 +--- a/arch/parisc/kernel/traps.c
1838 ++++ b/arch/parisc/kernel/traps.c
1839 +@@ -798,6 +798,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1840 +
1841 + if (fault_space == 0 && !faulthandler_disabled())
1842 + {
1843 ++ /* Clean up and return if in exception table. */
1844 ++ if (fixup_exception(regs))
1845 ++ return;
1846 + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
1847 + parisc_terminate("Kernel Fault", regs, code, fault_address);
1848 + }
1849 +diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
1850 +index 536ef66bb94b..1052b747e011 100644
1851 +--- a/arch/parisc/lib/fixup.S
1852 ++++ b/arch/parisc/lib/fixup.S
1853 +@@ -26,6 +26,7 @@
1854 +
1855 + #ifdef CONFIG_SMP
1856 + .macro get_fault_ip t1 t2
1857 ++ loadgp
1858 + addil LT%__per_cpu_offset,%r27
1859 + LDREG RT%__per_cpu_offset(%r1),\t1
1860 + /* t2 = smp_processor_id() */
1861 +@@ -40,14 +41,19 @@
1862 + LDREG RT%exception_data(%r1),\t1
1863 + /* t1 = this_cpu_ptr(&exception_data) */
1864 + add,l \t1,\t2,\t1
1865 ++ /* %r27 = t1->fault_gp - restore gp */
1866 ++ LDREG EXCDATA_GP(\t1), %r27
1867 + /* t1 = t1->fault_ip */
1868 + LDREG EXCDATA_IP(\t1), \t1
1869 + .endm
1870 + #else
1871 + .macro get_fault_ip t1 t2
1872 ++ loadgp
1873 + /* t1 = this_cpu_ptr(&exception_data) */
1874 + addil LT%exception_data,%r27
1875 + LDREG RT%exception_data(%r1),\t2
1876 ++ /* %r27 = t2->fault_gp - restore gp */
1877 ++ LDREG EXCDATA_GP(\t2), %r27
1878 + /* t1 = t2->fault_ip */
1879 + LDREG EXCDATA_IP(\t2), \t1
1880 + .endm
1881 +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1882 +index a762864ec92e..f9064449908a 100644
1883 +--- a/arch/parisc/mm/fault.c
1884 ++++ b/arch/parisc/mm/fault.c
1885 +@@ -151,6 +151,7 @@ int fixup_exception(struct pt_regs *regs)
1886 + struct exception_data *d;
1887 + d = this_cpu_ptr(&exception_data);
1888 + d->fault_ip = regs->iaoq[0];
1889 ++ d->fault_gp = regs->gr[27];
1890 + d->fault_space = regs->isr;
1891 + d->fault_addr = regs->ior;
1892 +
1893 +diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
1894 +index e4396a7d0f7c..4afe66aa1400 100644
1895 +--- a/arch/powerpc/include/asm/word-at-a-time.h
1896 ++++ b/arch/powerpc/include/asm/word-at-a-time.h
1897 +@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
1898 + "andc %1,%1,%2\n\t"
1899 + "popcntd %0,%1"
1900 + : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
1901 +- : "r" (bits));
1902 ++ : "b" (bits));
1903 +
1904 + return leading_zero_bits;
1905 + }
1906 +diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
1907 +index 8dde19962a5b..f63c96cd3608 100644
1908 +--- a/arch/powerpc/include/uapi/asm/cputable.h
1909 ++++ b/arch/powerpc/include/uapi/asm/cputable.h
1910 +@@ -31,6 +31,7 @@
1911 + #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
1912 + 0x00000040
1913 +
1914 ++/* Reserved - do not use 0x00000004 */
1915 + #define PPC_FEATURE_TRUE_LE 0x00000002
1916 + #define PPC_FEATURE_PPC_LE 0x00000001
1917 +
1918 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
1919 +index 3c5736e52a14..54ed9c7663e6 100644
1920 +--- a/arch/powerpc/kernel/process.c
1921 ++++ b/arch/powerpc/kernel/process.c
1922 +@@ -854,7 +854,7 @@ void restore_tm_state(struct pt_regs *regs)
1923 + static inline void save_sprs(struct thread_struct *t)
1924 + {
1925 + #ifdef CONFIG_ALTIVEC
1926 +- if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
1927 ++ if (cpu_has_feature(CPU_FTR_ALTIVEC))
1928 + t->vrsave = mfspr(SPRN_VRSAVE);
1929 + #endif
1930 + #ifdef CONFIG_PPC_BOOK3S_64
1931 +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
1932 +index 7030b035905d..a15fe1d4e84a 100644
1933 +--- a/arch/powerpc/kernel/prom.c
1934 ++++ b/arch/powerpc/kernel/prom.c
1935 +@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
1936 + unsigned long cpu_features; /* CPU_FTR_xxx bit */
1937 + unsigned long mmu_features; /* MMU_FTR_xxx bit */
1938 + unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
1939 ++ unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
1940 + unsigned char pabyte; /* byte number in ibm,pa-features */
1941 + unsigned char pabit; /* bit number (big-endian) */
1942 + unsigned char invert; /* if 1, pa bit set => clear feature */
1943 + } ibm_pa_features[] __initdata = {
1944 +- {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
1945 +- {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
1946 +- {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
1947 +- {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
1948 +- {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
1949 +- {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
1950 +- {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
1951 ++ {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
1952 ++ {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
1953 ++ {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
1954 ++ {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
1955 ++ {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
1956 ++ {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
1957 ++ {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
1958 + /*
1959 +- * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
1960 +- * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
1961 +- * which is 0 if the kernel doesn't support TM.
1962 ++ * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
1963 ++ * we don't want to turn on TM here, so we use the *_COMP versions
1964 ++ * which are 0 if the kernel doesn't support TM.
1965 + */
1966 +- {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
1967 ++ {CPU_FTR_TM_COMP, 0, 0,
1968 ++ PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
1969 + };
1970 +
1971 + static void __init scan_features(unsigned long node, const unsigned char *ftrs,
1972 +@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
1973 + if (bit ^ fp->invert) {
1974 + cur_cpu_spec->cpu_features |= fp->cpu_features;
1975 + cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
1976 ++ cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
1977 + cur_cpu_spec->mmu_features |= fp->mmu_features;
1978 + } else {
1979 + cur_cpu_spec->cpu_features &= ~fp->cpu_features;
1980 + cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
1981 ++ cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
1982 + cur_cpu_spec->mmu_features &= ~fp->mmu_features;
1983 + }
1984 + }
1985 +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
1986 +index 744e24bcb85c..4a811ca7ac9d 100644
1987 +--- a/arch/powerpc/mm/hugetlbpage.c
1988 ++++ b/arch/powerpc/mm/hugetlbpage.c
1989 +@@ -414,13 +414,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
1990 + {
1991 + struct hugepd_freelist **batchp;
1992 +
1993 +- batchp = this_cpu_ptr(&hugepd_freelist_cur);
1994 ++ batchp = &get_cpu_var(hugepd_freelist_cur);
1995 +
1996 + if (atomic_read(&tlb->mm->mm_users) < 2 ||
1997 + cpumask_equal(mm_cpumask(tlb->mm),
1998 + cpumask_of(smp_processor_id()))) {
1999 + kmem_cache_free(hugepte_cache, hugepte);
2000 +- put_cpu_var(hugepd_freelist_cur);
2001 ++ put_cpu_var(hugepd_freelist_cur);
2002 + return;
2003 + }
2004 +
2005 +diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
2006 +index c873e682b67f..6dafabb6ae1a 100644
2007 +--- a/arch/s390/include/asm/pci.h
2008 ++++ b/arch/s390/include/asm/pci.h
2009 +@@ -45,7 +45,8 @@ struct zpci_fmb {
2010 + u64 rpcit_ops;
2011 + u64 dma_rbytes;
2012 + u64 dma_wbytes;
2013 +-} __packed __aligned(16);
2014 ++ u64 pad[2];
2015 ++} __packed __aligned(128);
2016 +
2017 + enum zpci_state {
2018 + ZPCI_FN_STATE_RESERVED,
2019 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
2020 +index cd5a191381b9..c920b81be5bb 100644
2021 +--- a/arch/s390/kernel/entry.S
2022 ++++ b/arch/s390/kernel/entry.S
2023 +@@ -1199,114 +1199,12 @@ cleanup_critical:
2024 + .quad .Lpsw_idle_lpsw
2025 +
2026 + .Lcleanup_save_fpu_regs:
2027 +- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
2028 +- bor %r14
2029 +- clg %r9,BASED(.Lcleanup_save_fpu_regs_done)
2030 +- jhe 5f
2031 +- clg %r9,BASED(.Lcleanup_save_fpu_regs_fp)
2032 +- jhe 4f
2033 +- clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
2034 +- jhe 3f
2035 +- clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
2036 +- jhe 2f
2037 +- clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
2038 +- jhe 1f
2039 +- lg %r2,__LC_CURRENT
2040 +- aghi %r2,__TASK_thread
2041 +-0: # Store floating-point controls
2042 +- stfpc __THREAD_FPU_fpc(%r2)
2043 +-1: # Load register save area and check if VX is active
2044 +- lg %r3,__THREAD_FPU_regs(%r2)
2045 +- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
2046 +- jz 4f # no VX -> store FP regs
2047 +-2: # Store vector registers (V0-V15)
2048 +- VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
2049 +-3: # Store vector registers (V16-V31)
2050 +- VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
2051 +- j 5f # -> done, set CIF_FPU flag
2052 +-4: # Store floating-point registers
2053 +- std 0,0(%r3)
2054 +- std 1,8(%r3)
2055 +- std 2,16(%r3)
2056 +- std 3,24(%r3)
2057 +- std 4,32(%r3)
2058 +- std 5,40(%r3)
2059 +- std 6,48(%r3)
2060 +- std 7,56(%r3)
2061 +- std 8,64(%r3)
2062 +- std 9,72(%r3)
2063 +- std 10,80(%r3)
2064 +- std 11,88(%r3)
2065 +- std 12,96(%r3)
2066 +- std 13,104(%r3)
2067 +- std 14,112(%r3)
2068 +- std 15,120(%r3)
2069 +-5: # Set CIF_FPU flag
2070 +- oi __LC_CPU_FLAGS+7,_CIF_FPU
2071 +- lg %r9,48(%r11) # return from save_fpu_regs
2072 ++ larl %r9,save_fpu_regs
2073 + br %r14
2074 +-.Lcleanup_save_fpu_fpc_end:
2075 +- .quad .Lsave_fpu_regs_fpc_end
2076 +-.Lcleanup_save_fpu_regs_vx_low:
2077 +- .quad .Lsave_fpu_regs_vx_low
2078 +-.Lcleanup_save_fpu_regs_vx_high:
2079 +- .quad .Lsave_fpu_regs_vx_high
2080 +-.Lcleanup_save_fpu_regs_fp:
2081 +- .quad .Lsave_fpu_regs_fp
2082 +-.Lcleanup_save_fpu_regs_done:
2083 +- .quad .Lsave_fpu_regs_done
2084 +
2085 + .Lcleanup_load_fpu_regs:
2086 +- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
2087 +- bnor %r14
2088 +- clg %r9,BASED(.Lcleanup_load_fpu_regs_done)
2089 +- jhe 1f
2090 +- clg %r9,BASED(.Lcleanup_load_fpu_regs_fp)
2091 +- jhe 2f
2092 +- clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
2093 +- jhe 3f
2094 +- clg %r9,BASED(.Lcleanup_load_fpu_regs_vx)
2095 +- jhe 4f
2096 +- lg %r4,__LC_CURRENT
2097 +- aghi %r4,__TASK_thread
2098 +- lfpc __THREAD_FPU_fpc(%r4)
2099 +- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
2100 +- lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
2101 +- jz 2f # -> no VX, load FP regs
2102 +-4: # Load V0 ..V15 registers
2103 +- VLM %v0,%v15,0,%r4
2104 +-3: # Load V16..V31 registers
2105 +- VLM %v16,%v31,256,%r4
2106 +- j 1f
2107 +-2: # Load floating-point registers
2108 +- ld 0,0(%r4)
2109 +- ld 1,8(%r4)
2110 +- ld 2,16(%r4)
2111 +- ld 3,24(%r4)
2112 +- ld 4,32(%r4)
2113 +- ld 5,40(%r4)
2114 +- ld 6,48(%r4)
2115 +- ld 7,56(%r4)
2116 +- ld 8,64(%r4)
2117 +- ld 9,72(%r4)
2118 +- ld 10,80(%r4)
2119 +- ld 11,88(%r4)
2120 +- ld 12,96(%r4)
2121 +- ld 13,104(%r4)
2122 +- ld 14,112(%r4)
2123 +- ld 15,120(%r4)
2124 +-1: # Clear CIF_FPU bit
2125 +- ni __LC_CPU_FLAGS+7,255-_CIF_FPU
2126 +- lg %r9,48(%r11) # return from load_fpu_regs
2127 ++ larl %r9,load_fpu_regs
2128 + br %r14
2129 +-.Lcleanup_load_fpu_regs_vx:
2130 +- .quad .Lload_fpu_regs_vx
2131 +-.Lcleanup_load_fpu_regs_vx_high:
2132 +- .quad .Lload_fpu_regs_vx_high
2133 +-.Lcleanup_load_fpu_regs_fp:
2134 +- .quad .Lload_fpu_regs_fp
2135 +-.Lcleanup_load_fpu_regs_done:
2136 +- .quad .Lload_fpu_regs_done
2137 +
2138 + /*
2139 + * Integer constants
2140 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
2141 +index 9220db5c996a..93fc63ef6e95 100644
2142 +--- a/arch/s390/kernel/setup.c
2143 ++++ b/arch/s390/kernel/setup.c
2144 +@@ -327,6 +327,7 @@ static void __init setup_lowcore(void)
2145 + + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
2146 + lc->current_task = (unsigned long) init_thread_union.thread_info.task;
2147 + lc->thread_info = (unsigned long) &init_thread_union;
2148 ++ lc->lpp = LPP_MAGIC;
2149 + lc->machine_flags = S390_lowcore.machine_flags;
2150 + lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
2151 + memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
2152 +diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
2153 +index 13dab0c1645c..3776aca22082 100644
2154 +--- a/arch/s390/mm/gup.c
2155 ++++ b/arch/s390/mm/gup.c
2156 +@@ -20,9 +20,9 @@
2157 + static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
2158 + unsigned long end, int write, struct page **pages, int *nr)
2159 + {
2160 ++ struct page *head, *page;
2161 + unsigned long mask;
2162 + pte_t *ptep, pte;
2163 +- struct page *page;
2164 +
2165 + mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
2166 +
2167 +@@ -37,12 +37,14 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
2168 + return 0;
2169 + VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2170 + page = pte_page(pte);
2171 +- if (!page_cache_get_speculative(page))
2172 ++ head = compound_head(page);
2173 ++ if (!page_cache_get_speculative(head))
2174 + return 0;
2175 + if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2176 +- put_page(page);
2177 ++ put_page(head);
2178 + return 0;
2179 + }
2180 ++ VM_BUG_ON_PAGE(compound_head(page) != head, page);
2181 + pages[*nr] = page;
2182 + (*nr)++;
2183 +
2184 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
2185 +index 8f19c8f9d660..8f75edc998ff 100644
2186 +--- a/arch/s390/pci/pci.c
2187 ++++ b/arch/s390/pci/pci.c
2188 +@@ -864,8 +864,11 @@ static inline int barsize(u8 size)
2189 +
2190 + static int zpci_mem_init(void)
2191 + {
2192 ++ BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
2193 ++ __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
2194 ++
2195 + zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
2196 +- 16, 0, NULL);
2197 ++ __alignof__(struct zpci_fmb), 0, NULL);
2198 + if (!zdev_fmb_cache)
2199 + goto error_fmb;
2200 +
2201 +diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
2202 +index ec29e14ec5a8..bf25d7c79a2d 100644
2203 +--- a/arch/sh/mm/kmap.c
2204 ++++ b/arch/sh/mm/kmap.c
2205 +@@ -36,6 +36,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
2206 +
2207 + BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
2208 +
2209 ++ preempt_disable();
2210 + pagefault_disable();
2211 +
2212 + idx = FIX_CMAP_END -
2213 +@@ -64,4 +65,5 @@ void kunmap_coherent(void *kvaddr)
2214 + }
2215 +
2216 + pagefault_enable();
2217 ++ preempt_enable();
2218 + }
2219 +diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
2220 +index b821b13d343a..8a6b57108ac2 100644
2221 +--- a/arch/um/drivers/mconsole_kern.c
2222 ++++ b/arch/um/drivers/mconsole_kern.c
2223 +@@ -133,7 +133,7 @@ void mconsole_proc(struct mc_request *req)
2224 + ptr += strlen("proc");
2225 + ptr = skip_spaces(ptr);
2226 +
2227 +- file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
2228 ++ file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
2229 + if (IS_ERR(file)) {
2230 + mconsole_reply(req, "Failed to open file", 1, 0);
2231 + printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
2232 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
2233 +index c46662f64c39..3bf45a0cd69e 100644
2234 +--- a/arch/x86/Kconfig
2235 ++++ b/arch/x86/Kconfig
2236 +@@ -1160,22 +1160,23 @@ config MICROCODE
2237 + bool "CPU microcode loading support"
2238 + default y
2239 + depends on CPU_SUP_AMD || CPU_SUP_INTEL
2240 +- depends on BLK_DEV_INITRD
2241 + select FW_LOADER
2242 + ---help---
2243 +-
2244 + If you say Y here, you will be able to update the microcode on
2245 +- certain Intel and AMD processors. The Intel support is for the
2246 +- IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
2247 +- Xeon etc. The AMD support is for families 0x10 and later. You will
2248 +- obviously need the actual microcode binary data itself which is not
2249 +- shipped with the Linux kernel.
2250 +-
2251 +- This option selects the general module only, you need to select
2252 +- at least one vendor specific module as well.
2253 +-
2254 +- To compile this driver as a module, choose M here: the module
2255 +- will be called microcode.
2256 ++ Intel and AMD processors. The Intel support is for the IA32 family,
2257 ++ e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
2258 ++ AMD support is for families 0x10 and later. You will obviously need
2259 ++ the actual microcode binary data itself which is not shipped with
2260 ++ the Linux kernel.
2261 ++
2262 ++ The preferred method to load microcode from a detached initrd is described
2263 ++ in Documentation/x86/early-microcode.txt. For that you need to enable
2264 ++ CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
2265 ++ initrd for microcode blobs.
2266 ++
2267 ++ In addition, you can build-in the microcode into the kernel. For that you
2268 ++ need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
2269 ++ to the CONFIG_EXTRA_FIRMWARE config option.
2270 +
2271 + config MICROCODE_INTEL
2272 + bool "Intel microcode loading support"
2273 +diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
2274 +index a841e9765bd6..8381c09d2870 100644
2275 +--- a/arch/x86/crypto/sha-mb/sha1_mb.c
2276 ++++ b/arch/x86/crypto/sha-mb/sha1_mb.c
2277 +@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
2278 +
2279 + req = cast_mcryptd_ctx_to_req(req_ctx);
2280 + if (irqs_disabled())
2281 +- rctx->complete(&req->base, ret);
2282 ++ req_ctx->complete(&req->base, ret);
2283 + else {
2284 + local_bh_disable();
2285 +- rctx->complete(&req->base, ret);
2286 ++ req_ctx->complete(&req->base, ret);
2287 + local_bh_enable();
2288 + }
2289 + }
2290 +diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
2291 +index 03663740c866..1a4477cedc49 100644
2292 +--- a/arch/x86/entry/common.c
2293 ++++ b/arch/x86/entry/common.c
2294 +@@ -268,6 +268,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
2295 + /* Called with IRQs disabled. */
2296 + __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
2297 + {
2298 ++ struct thread_info *ti = pt_regs_to_thread_info(regs);
2299 + u32 cached_flags;
2300 +
2301 + if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
2302 +@@ -275,12 +276,22 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
2303 +
2304 + lockdep_sys_exit();
2305 +
2306 +- cached_flags =
2307 +- READ_ONCE(pt_regs_to_thread_info(regs)->flags);
2308 ++ cached_flags = READ_ONCE(ti->flags);
2309 +
2310 + if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
2311 + exit_to_usermode_loop(regs, cached_flags);
2312 +
2313 ++#ifdef CONFIG_COMPAT
2314 ++ /*
2315 ++ * Compat syscalls set TS_COMPAT. Make sure we clear it before
2316 ++ * returning to user mode. We need to clear it *after* signal
2317 ++ * handling, because syscall restart has a fixup for compat
2318 ++ * syscalls. The fixup is exercised by the ptrace_syscall_32
2319 ++ * selftest.
2320 ++ */
2321 ++ ti->status &= ~TS_COMPAT;
2322 ++#endif
2323 ++
2324 + user_enter();
2325 + }
2326 +
2327 +@@ -332,14 +343,6 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
2328 + if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
2329 + syscall_slow_exit_work(regs, cached_flags);
2330 +
2331 +-#ifdef CONFIG_COMPAT
2332 +- /*
2333 +- * Compat syscalls set TS_COMPAT. Make sure we clear it before
2334 +- * returning to user mode.
2335 +- */
2336 +- ti->status &= ~TS_COMPAT;
2337 +-#endif
2338 +-
2339 + local_irq_disable();
2340 + prepare_exit_to_usermode(regs);
2341 + }
2342 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
2343 +index c80f6b6f3da2..e8c4fba52d3d 100644
2344 +--- a/arch/x86/include/asm/apic.h
2345 ++++ b/arch/x86/include/asm/apic.h
2346 +@@ -644,8 +644,8 @@ static inline void entering_irq(void)
2347 +
2348 + static inline void entering_ack_irq(void)
2349 + {
2350 +- ack_APIC_irq();
2351 + entering_irq();
2352 ++ ack_APIC_irq();
2353 + }
2354 +
2355 + static inline void ipi_entering_ack_irq(void)
2356 +diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
2357 +index f8a29d2c97b0..e6a8613fbfb0 100644
2358 +--- a/arch/x86/include/asm/hugetlb.h
2359 ++++ b/arch/x86/include/asm/hugetlb.h
2360 +@@ -4,6 +4,7 @@
2361 + #include <asm/page.h>
2362 + #include <asm-generic/hugetlb.h>
2363 +
2364 ++#define hugepages_supported() cpu_has_pse
2365 +
2366 + static inline int is_hugepage_only_range(struct mm_struct *mm,
2367 + unsigned long addr,
2368 +diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
2369 +index 1815b736269d..84b3d194a958 100644
2370 +--- a/arch/x86/include/asm/hw_irq.h
2371 ++++ b/arch/x86/include/asm/hw_irq.h
2372 +@@ -141,6 +141,7 @@ struct irq_alloc_info {
2373 + struct irq_cfg {
2374 + unsigned int dest_apicid;
2375 + u8 vector;
2376 ++ u8 old_vector;
2377 + };
2378 +
2379 + extern struct irq_cfg *irq_cfg(unsigned int irq);
2380 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
2381 +index 44adbb819041..f8dba2097c40 100644
2382 +--- a/arch/x86/include/asm/kvm_host.h
2383 ++++ b/arch/x86/include/asm/kvm_host.h
2384 +@@ -42,7 +42,7 @@
2385 +
2386 + #define KVM_PIO_PAGE_OFFSET 1
2387 + #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
2388 +-#define KVM_HALT_POLL_NS_DEFAULT 500000
2389 ++#define KVM_HALT_POLL_NS_DEFAULT 400000
2390 +
2391 + #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
2392 +
2393 +diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
2394 +index 1e1b07a5a738..9d3a96c4da78 100644
2395 +--- a/arch/x86/include/asm/microcode.h
2396 ++++ b/arch/x86/include/asm/microcode.h
2397 +@@ -3,6 +3,7 @@
2398 +
2399 + #include <asm/cpu.h>
2400 + #include <linux/earlycpio.h>
2401 ++#include <linux/initrd.h>
2402 +
2403 + #define native_rdmsr(msr, val1, val2) \
2404 + do { \
2405 +@@ -143,4 +144,29 @@ static inline void reload_early_microcode(void) { }
2406 + static inline bool
2407 + get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
2408 + #endif
2409 ++
2410 ++static inline unsigned long get_initrd_start(void)
2411 ++{
2412 ++#ifdef CONFIG_BLK_DEV_INITRD
2413 ++ return initrd_start;
2414 ++#else
2415 ++ return 0;
2416 ++#endif
2417 ++}
2418 ++
2419 ++static inline unsigned long get_initrd_start_addr(void)
2420 ++{
2421 ++#ifdef CONFIG_BLK_DEV_INITRD
2422 ++#ifdef CONFIG_X86_32
2423 ++ unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
2424 ++
2425 ++ return (unsigned long)__pa_nodebug(*initrd_start_p);
2426 ++#else
2427 ++ return get_initrd_start();
2428 ++#endif
2429 ++#else /* CONFIG_BLK_DEV_INITRD */
2430 ++ return 0;
2431 ++#endif
2432 ++}
2433 ++
2434 + #endif /* _ASM_X86_MICROCODE_H */
2435 +diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
2436 +index 7bcb861a04e5..5a2ed3ed2f26 100644
2437 +--- a/arch/x86/include/asm/perf_event.h
2438 ++++ b/arch/x86/include/asm/perf_event.h
2439 +@@ -165,6 +165,7 @@ struct x86_pmu_capability {
2440 + #define GLOBAL_STATUS_ASIF BIT_ULL(60)
2441 + #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
2442 + #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
2443 ++#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
2444 +
2445 + /*
2446 + * IBS cpuid feature detection
2447 +diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
2448 +index 8b2d4bea9962..39171b3646bb 100644
2449 +--- a/arch/x86/include/asm/xen/hypervisor.h
2450 ++++ b/arch/x86/include/asm/xen/hypervisor.h
2451 +@@ -62,4 +62,6 @@ void xen_arch_register_cpu(int num);
2452 + void xen_arch_unregister_cpu(int num);
2453 + #endif
2454 +
2455 ++extern void xen_set_iopl_mask(unsigned mask);
2456 ++
2457 + #endif /* _ASM_X86_XEN_HYPERVISOR_H */
2458 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
2459 +index 3b670df4ba7b..ef495511f019 100644
2460 +--- a/arch/x86/kernel/apic/vector.c
2461 ++++ b/arch/x86/kernel/apic/vector.c
2462 +@@ -213,6 +213,7 @@ update:
2463 + */
2464 + cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
2465 + d->move_in_progress = !cpumask_empty(d->old_domain);
2466 ++ d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
2467 + d->cfg.vector = vector;
2468 + cpumask_copy(d->domain, vector_cpumask);
2469 + success:
2470 +@@ -255,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
2471 + struct irq_desc *desc;
2472 + int cpu, vector;
2473 +
2474 +- BUG_ON(!data->cfg.vector);
2475 ++ if (!data->cfg.vector)
2476 ++ return;
2477 +
2478 + vector = data->cfg.vector;
2479 + for_each_cpu_and(cpu, data->domain, cpu_online_mask)
2480 +@@ -655,46 +657,97 @@ void irq_complete_move(struct irq_cfg *cfg)
2481 + }
2482 +
2483 + /*
2484 +- * Called with @desc->lock held and interrupts disabled.
2485 ++ * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
2486 + */
2487 + void irq_force_complete_move(struct irq_desc *desc)
2488 + {
2489 + struct irq_data *irqdata = irq_desc_get_irq_data(desc);
2490 + struct apic_chip_data *data = apic_chip_data(irqdata);
2491 + struct irq_cfg *cfg = data ? &data->cfg : NULL;
2492 ++ unsigned int cpu;
2493 +
2494 + if (!cfg)
2495 + return;
2496 +
2497 +- __irq_complete_move(cfg, cfg->vector);
2498 +-
2499 + /*
2500 + * This is tricky. If the cleanup of @data->old_domain has not been
2501 + * done yet, then the following setaffinity call will fail with
2502 + * -EBUSY. This can leave the interrupt in a stale state.
2503 + *
2504 +- * The cleanup cannot make progress because we hold @desc->lock. So in
2505 +- * case @data->old_domain is not yet cleaned up, we need to drop the
2506 +- * lock and acquire it again. @desc cannot go away, because the
2507 +- * hotplug code holds the sparse irq lock.
2508 ++ * All CPUs are stuck in stop machine with interrupts disabled so
2509 ++ * calling __irq_complete_move() would be completely pointless.
2510 + */
2511 + raw_spin_lock(&vector_lock);
2512 +- /* Clean out all offline cpus (including ourself) first. */
2513 ++ /*
2514 ++ * Clean out all offline cpus (including the outgoing one) from the
2515 ++ * old_domain mask.
2516 ++ */
2517 + cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
2518 +- while (!cpumask_empty(data->old_domain)) {
2519 ++
2520 ++ /*
2521 ++ * If move_in_progress is cleared and the old_domain mask is empty,
2522 ++ * then there is nothing to cleanup. fixup_irqs() will take care of
2523 ++ * the stale vectors on the outgoing cpu.
2524 ++ */
2525 ++ if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
2526 + raw_spin_unlock(&vector_lock);
2527 +- raw_spin_unlock(&desc->lock);
2528 +- cpu_relax();
2529 +- raw_spin_lock(&desc->lock);
2530 ++ return;
2531 ++ }
2532 ++
2533 ++ /*
2534 ++ * 1) The interrupt is in move_in_progress state. That means that we
2535 ++ * have not seen an interrupt since the io_apic was reprogrammed to
2536 ++ * the new vector.
2537 ++ *
2538 ++ * 2) The interrupt has fired on the new vector, but the cleanup IPIs
2539 ++ * have not been processed yet.
2540 ++ */
2541 ++ if (data->move_in_progress) {
2542 + /*
2543 +- * Reevaluate apic_chip_data. It might have been cleared after
2544 +- * we dropped @desc->lock.
2545 ++ * In theory there is a race:
2546 ++ *
2547 ++ * set_ioapic(new_vector) <-- Interrupt is raised before update
2548 ++ * is effective, i.e. it's raised on
2549 ++ * the old vector.
2550 ++ *
2551 ++ * So if the target cpu cannot handle that interrupt before
2552 ++ * the old vector is cleaned up, we get a spurious interrupt
2553 ++ * and in the worst case the ioapic irq line becomes stale.
2554 ++ *
2555 ++ * But in case of cpu hotplug this should be a non issue
2556 ++ * because if the affinity update happens right before all
2557 ++ * cpus rendevouz in stop machine, there is no way that the
2558 ++ * interrupt can be blocked on the target cpu because all cpus
2559 ++ * loops first with interrupts enabled in stop machine, so the
2560 ++ * old vector is not yet cleaned up when the interrupt fires.
2561 ++ *
2562 ++ * So the only way to run into this issue is if the delivery
2563 ++ * of the interrupt on the apic/system bus would be delayed
2564 ++ * beyond the point where the target cpu disables interrupts
2565 ++ * in stop machine. I doubt that it can happen, but at least
2566 ++ * there is a theroretical chance. Virtualization might be
2567 ++ * able to expose this, but AFAICT the IOAPIC emulation is not
2568 ++ * as stupid as the real hardware.
2569 ++ *
2570 ++ * Anyway, there is nothing we can do about that at this point
2571 ++ * w/o refactoring the whole fixup_irq() business completely.
2572 ++ * We print at least the irq number and the old vector number,
2573 ++ * so we have the necessary information when a problem in that
2574 ++ * area arises.
2575 + */
2576 +- data = apic_chip_data(irqdata);
2577 +- if (!data)
2578 +- return;
2579 +- raw_spin_lock(&vector_lock);
2580 ++ pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
2581 ++ irqdata->irq, cfg->old_vector);
2582 + }
2583 ++ /*
2584 ++ * If old_domain is not empty, then other cpus still have the irq
2585 ++ * descriptor set in their vector array. Clean it up.
2586 ++ */
2587 ++ for_each_cpu(cpu, data->old_domain)
2588 ++ per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
2589 ++
2590 ++ /* Cleanup the left overs of the (half finished) move */
2591 ++ cpumask_clear(data->old_domain);
2592 ++ data->move_in_progress = 0;
2593 + raw_spin_unlock(&vector_lock);
2594 + }
2595 + #endif
2596 +diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
2597 +index 0a850100c594..2658e2af74ec 100644
2598 +--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
2599 ++++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
2600 +@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
2601 + void mce_gen_pool_process(void)
2602 + {
2603 + struct llist_node *head;
2604 +- struct mce_evt_llist *node;
2605 ++ struct mce_evt_llist *node, *tmp;
2606 + struct mce *mce;
2607 +
2608 + head = llist_del_all(&mce_event_llist);
2609 +@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
2610 + return;
2611 +
2612 + head = llist_reverse_order(head);
2613 +- llist_for_each_entry(node, head, llnode) {
2614 ++ llist_for_each_entry_safe(node, tmp, head, llnode) {
2615 + mce = &node->mce;
2616 + atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
2617 + gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
2618 +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
2619 +index 2c5aaf8c2e2f..05538582a809 100644
2620 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
2621 ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
2622 +@@ -385,6 +385,9 @@ static void intel_thermal_interrupt(void)
2623 + {
2624 + __u64 msr_val;
2625 +
2626 ++ if (static_cpu_has(X86_FEATURE_HWP))
2627 ++ wrmsrl_safe(MSR_HWP_STATUS, 0);
2628 ++
2629 + rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
2630 +
2631 + /* Check for violation of core thermal thresholds*/
2632 +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
2633 +index ee81c544ee0d..4f4735bd8698 100644
2634 +--- a/arch/x86/kernel/cpu/microcode/intel.c
2635 ++++ b/arch/x86/kernel/cpu/microcode/intel.c
2636 +@@ -551,10 +551,14 @@ scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
2637 + cd.data = NULL;
2638 + cd.size = 0;
2639 +
2640 +- cd = find_cpio_data(p, (void *)start, size, &offset);
2641 +- if (!cd.data) {
2642 ++ /* try built-in microcode if no initrd */
2643 ++ if (!size) {
2644 + if (!load_builtin_intel_microcode(&cd))
2645 + return UCODE_ERROR;
2646 ++ } else {
2647 ++ cd = find_cpio_data(p, (void *)start, size, &offset);
2648 ++ if (!cd.data)
2649 ++ return UCODE_ERROR;
2650 + }
2651 +
2652 + return get_matching_model_microcode(0, start, cd.data, cd.size,
2653 +@@ -690,7 +694,7 @@ int __init save_microcode_in_initrd_intel(void)
2654 + if (count == 0)
2655 + return ret;
2656 +
2657 +- copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
2658 ++ copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
2659 + ret = save_microcode(&mc_saved_data, mc_saved, count);
2660 + if (ret)
2661 + pr_err("Cannot save microcode patches from initrd.\n");
2662 +@@ -728,16 +732,20 @@ void __init load_ucode_intel_bsp(void)
2663 + struct boot_params *p;
2664 +
2665 + p = (struct boot_params *)__pa_nodebug(&boot_params);
2666 +- start = p->hdr.ramdisk_image;
2667 + size = p->hdr.ramdisk_size;
2668 +
2669 +- _load_ucode_intel_bsp(
2670 +- (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
2671 +- (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
2672 +- start, size);
2673 ++ /*
2674 ++ * Set start only if we have an initrd image. We cannot use initrd_start
2675 ++ * because it is not set that early yet.
2676 ++ */
2677 ++ start = (size ? p->hdr.ramdisk_image : 0);
2678 ++
2679 ++ _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
2680 ++ (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
2681 ++ start, size);
2682 + #else
2683 +- start = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
2684 + size = boot_params.hdr.ramdisk_size;
2685 ++ start = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
2686 +
2687 + _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
2688 + #endif
2689 +@@ -748,20 +756,14 @@ void load_ucode_intel_ap(void)
2690 + struct mc_saved_data *mc_saved_data_p;
2691 + struct ucode_cpu_info uci;
2692 + unsigned long *mc_saved_in_initrd_p;
2693 +- unsigned long initrd_start_addr;
2694 + enum ucode_state ret;
2695 + #ifdef CONFIG_X86_32
2696 +- unsigned long *initrd_start_p;
2697 +
2698 +- mc_saved_in_initrd_p =
2699 +- (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
2700 ++ mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
2701 + mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
2702 +- initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
2703 +- initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
2704 + #else
2705 +- mc_saved_data_p = &mc_saved_data;
2706 + mc_saved_in_initrd_p = mc_saved_in_initrd;
2707 +- initrd_start_addr = initrd_start;
2708 ++ mc_saved_data_p = &mc_saved_data;
2709 + #endif
2710 +
2711 + /*
2712 +@@ -773,7 +775,7 @@ void load_ucode_intel_ap(void)
2713 +
2714 + collect_cpu_info_early(&uci);
2715 + ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
2716 +- initrd_start_addr, &uci);
2717 ++ get_initrd_start_addr(), &uci);
2718 +
2719 + if (ret != UCODE_OK)
2720 + return;
2721 +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
2722 +index 1b443db2db50..6532f5b40646 100644
2723 +--- a/arch/x86/kernel/cpu/perf_event.c
2724 ++++ b/arch/x86/kernel/cpu/perf_event.c
2725 +@@ -596,6 +596,19 @@ void x86_pmu_disable_all(void)
2726 + }
2727 + }
2728 +
2729 ++/*
2730 ++ * There may be PMI landing after enabled=0. The PMI hitting could be before or
2731 ++ * after disable_all.
2732 ++ *
2733 ++ * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
2734 ++ * It will not be re-enabled in the NMI handler again, because enabled=0. After
2735 ++ * handling the NMI, disable_all will be called, which will not change the
2736 ++ * state either. If PMI hits after disable_all, the PMU is already disabled
2737 ++ * before entering NMI handler. The NMI handler will not change the state
2738 ++ * either.
2739 ++ *
2740 ++ * So either situation is harmless.
2741 ++ */
2742 + static void x86_pmu_disable(struct pmu *pmu)
2743 + {
2744 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2745 +diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
2746 +index 7bb61e32fb29..98be6d6d32fa 100644
2747 +--- a/arch/x86/kernel/cpu/perf_event.h
2748 ++++ b/arch/x86/kernel/cpu/perf_event.h
2749 +@@ -586,6 +586,7 @@ struct x86_pmu {
2750 + pebs_broken :1,
2751 + pebs_prec_dist :1;
2752 + int pebs_record_size;
2753 ++ int pebs_buffer_size;
2754 + void (*drain_pebs)(struct pt_regs *regs);
2755 + struct event_constraint *pebs_constraints;
2756 + void (*pebs_aliases)(struct perf_event *event);
2757 +@@ -904,6 +905,8 @@ void intel_pmu_lbr_init_skl(void);
2758 +
2759 + void intel_pmu_lbr_init_knl(void);
2760 +
2761 ++void intel_pmu_pebs_data_source_nhm(void);
2762 ++
2763 + int intel_pmu_setup_lbr_filter(struct perf_event *event);
2764 +
2765 + void intel_pt_interrupt(void);
2766 +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
2767 +index fed2ab1f1065..760aec1e8f82 100644
2768 +--- a/arch/x86/kernel/cpu/perf_event_intel.c
2769 ++++ b/arch/x86/kernel/cpu/perf_event_intel.c
2770 +@@ -1502,7 +1502,15 @@ static __initconst const u64 knl_hw_cache_extra_regs
2771 + };
2772 +
2773 + /*
2774 +- * Use from PMIs where the LBRs are already disabled.
2775 ++ * Used from PMIs where the LBRs are already disabled.
2776 ++ *
2777 ++ * This function could be called consecutively. It is required to remain in
2778 ++ * disabled state if called consecutively.
2779 ++ *
2780 ++ * During consecutive calls, the same disable value will be written to related
2781 ++ * registers, so the PMU state remains unchanged. hw.state in
2782 ++ * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
2783 ++ * calls.
2784 + */
2785 + static void __intel_pmu_disable_all(void)
2786 + {
2787 +@@ -1884,6 +1892,16 @@ again:
2788 + if (__test_and_clear_bit(62, (unsigned long *)&status)) {
2789 + handled++;
2790 + x86_pmu.drain_pebs(regs);
2791 ++ /*
2792 ++ * There are cases where, even though, the PEBS ovfl bit is set
2793 ++ * in GLOBAL_OVF_STATUS, the PEBS events may also have their
2794 ++ * overflow bits set for their counters. We must clear them
2795 ++ * here because they have been processed as exact samples in
2796 ++ * the drain_pebs() routine. They must not be processed again
2797 ++ * in the for_each_bit_set() loop for regular samples below.
2798 ++ */
2799 ++ status &= ~cpuc->pebs_enabled;
2800 ++ status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2801 + }
2802 +
2803 + /*
2804 +@@ -1929,7 +1947,10 @@ again:
2805 + goto again;
2806 +
2807 + done:
2808 +- __intel_pmu_enable_all(0, true);
2809 ++ /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2810 ++ if (cpuc->enabled)
2811 ++ __intel_pmu_enable_all(0, true);
2812 ++
2813 + /*
2814 + * Only unmask the NMI after the overflow counters
2815 + * have been reset. This avoids spurious NMIs on
2816 +@@ -3396,6 +3417,7 @@ __init int intel_pmu_init(void)
2817 + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2818 + X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2819 +
2820 ++ intel_pmu_pebs_data_source_nhm();
2821 + x86_add_quirk(intel_nehalem_quirk);
2822 +
2823 + pr_cont("Nehalem events, ");
2824 +@@ -3459,6 +3481,7 @@ __init int intel_pmu_init(void)
2825 + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2826 + X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2827 +
2828 ++ intel_pmu_pebs_data_source_nhm();
2829 + pr_cont("Westmere events, ");
2830 + break;
2831 +
2832 +diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
2833 +index 10602f0a438f..955140140fd4 100644
2834 +--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
2835 ++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
2836 +@@ -51,7 +51,8 @@ union intel_x86_pebs_dse {
2837 + #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
2838 + #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
2839 +
2840 +-static const u64 pebs_data_source[] = {
2841 ++/* Version for Sandy Bridge and later */
2842 ++static u64 pebs_data_source[] = {
2843 + P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
2844 + OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */
2845 + OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
2846 +@@ -70,6 +71,14 @@ static const u64 pebs_data_source[] = {
2847 + OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
2848 + };
2849 +
2850 ++/* Patch up minor differences in the bits */
2851 ++void __init intel_pmu_pebs_data_source_nhm(void)
2852 ++{
2853 ++ pebs_data_source[0x05] = OP_LH | P(LVL, L3) | P(SNOOP, HIT);
2854 ++ pebs_data_source[0x06] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
2855 ++ pebs_data_source[0x07] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
2856 ++}
2857 ++
2858 + static u64 precise_store_data(u64 status)
2859 + {
2860 + union intel_x86_pebs_dse dse;
2861 +@@ -269,7 +278,7 @@ static int alloc_pebs_buffer(int cpu)
2862 + if (!x86_pmu.pebs)
2863 + return 0;
2864 +
2865 +- buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
2866 ++ buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
2867 + if (unlikely(!buffer))
2868 + return -ENOMEM;
2869 +
2870 +@@ -286,7 +295,7 @@ static int alloc_pebs_buffer(int cpu)
2871 + per_cpu(insn_buffer, cpu) = ibuffer;
2872 + }
2873 +
2874 +- max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
2875 ++ max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
2876 +
2877 + ds->pebs_buffer_base = (u64)(unsigned long)buffer;
2878 + ds->pebs_index = ds->pebs_buffer_base;
2879 +@@ -1319,6 +1328,7 @@ void __init intel_ds_init(void)
2880 +
2881 + x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
2882 + x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
2883 ++ x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
2884 + if (x86_pmu.pebs) {
2885 + char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
2886 + int format = x86_pmu.intel_cap.pebs_format;
2887 +@@ -1327,6 +1337,14 @@ void __init intel_ds_init(void)
2888 + case 0:
2889 + printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
2890 + x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
2891 ++ /*
2892 ++ * Using >PAGE_SIZE buffers makes the WRMSR to
2893 ++ * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
2894 ++ * mysteriously hang on Core2.
2895 ++ *
2896 ++ * As a workaround, we don't do this.
2897 ++ */
2898 ++ x86_pmu.pebs_buffer_size = PAGE_SIZE;
2899 + x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
2900 + break;
2901 +
2902 +diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
2903 +index c0bbd1033b7c..a5286d0bbb43 100644
2904 +--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
2905 ++++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
2906 +@@ -695,6 +695,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
2907 +
2908 + /* clear STOP and INT from current entry */
2909 + buf->topa_index[buf->stop_pos]->stop = 0;
2910 ++ buf->topa_index[buf->stop_pos]->intr = 0;
2911 + buf->topa_index[buf->intr_pos]->intr = 0;
2912 +
2913 + /* how many pages till the STOP marker */
2914 +@@ -719,6 +720,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
2915 + buf->intr_pos = idx;
2916 +
2917 + buf->topa_index[buf->stop_pos]->stop = 1;
2918 ++ buf->topa_index[buf->stop_pos]->intr = 1;
2919 + buf->topa_index[buf->intr_pos]->intr = 1;
2920 +
2921 + return 0;
2922 +diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
2923 +index 33acb884ccf1..4547b2cca71b 100644
2924 +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
2925 ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
2926 +@@ -2875,11 +2875,13 @@ static struct intel_uncore_type bdx_uncore_sbox = {
2927 + .format_group = &hswep_uncore_sbox_format_group,
2928 + };
2929 +
2930 ++#define BDX_MSR_UNCORE_SBOX 3
2931 ++
2932 + static struct intel_uncore_type *bdx_msr_uncores[] = {
2933 + &bdx_uncore_ubox,
2934 + &bdx_uncore_cbox,
2935 +- &bdx_uncore_sbox,
2936 + &hswep_uncore_pcu,
2937 ++ &bdx_uncore_sbox,
2938 + NULL,
2939 + };
2940 +
2941 +@@ -2888,6 +2890,10 @@ void bdx_uncore_cpu_init(void)
2942 + if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2943 + bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2944 + uncore_msr_uncores = bdx_msr_uncores;
2945 ++
2946 ++ /* BDX-DE doesn't have SBOX */
2947 ++ if (boot_cpu_data.x86_model == 86)
2948 ++ uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
2949 + }
2950 +
2951 + static struct intel_uncore_type bdx_uncore_ha = {
2952 +diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
2953 +index 5b0c232d1ee6..b931095e86d4 100644
2954 +--- a/arch/x86/kernel/cpu/perf_event_knc.c
2955 ++++ b/arch/x86/kernel/cpu/perf_event_knc.c
2956 +@@ -263,7 +263,9 @@ again:
2957 + goto again;
2958 +
2959 + done:
2960 +- knc_pmu_enable_all(0);
2961 ++ /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2962 ++ if (cpuc->enabled)
2963 ++ knc_pmu_enable_all(0);
2964 +
2965 + return handled;
2966 + }
2967 +diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
2968 +index 37dae792dbbe..589b3193f102 100644
2969 +--- a/arch/x86/kernel/ioport.c
2970 ++++ b/arch/x86/kernel/ioport.c
2971 +@@ -96,9 +96,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
2972 + SYSCALL_DEFINE1(iopl, unsigned int, level)
2973 + {
2974 + struct pt_regs *regs = current_pt_regs();
2975 +- unsigned int old = (regs->flags >> 12) & 3;
2976 + struct thread_struct *t = &current->thread;
2977 +
2978 ++ /*
2979 ++ * Careful: the IOPL bits in regs->flags are undefined under Xen PV
2980 ++ * and changing them has no effect.
2981 ++ */
2982 ++ unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
2983 ++
2984 + if (level > 3)
2985 + return -EINVAL;
2986 + /* Trying to gain more privileges? */
2987 +@@ -106,8 +111,9 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
2988 + if (!capable(CAP_SYS_RAWIO))
2989 + return -EPERM;
2990 + }
2991 +- regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
2992 +- t->iopl = level << 12;
2993 ++ regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
2994 ++ (level << X86_EFLAGS_IOPL_BIT);
2995 ++ t->iopl = level << X86_EFLAGS_IOPL_BIT;
2996 + set_iopl_mask(t->iopl);
2997 +
2998 + return 0;
2999 +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
3000 +index b9d99e0f82c4..9f751876066f 100644
3001 +--- a/arch/x86/kernel/process_64.c
3002 ++++ b/arch/x86/kernel/process_64.c
3003 +@@ -48,6 +48,7 @@
3004 + #include <asm/syscalls.h>
3005 + #include <asm/debugreg.h>
3006 + #include <asm/switch_to.h>
3007 ++#include <asm/xen/hypervisor.h>
3008 +
3009 + asmlinkage extern void ret_from_fork(void);
3010 +
3011 +@@ -411,6 +412,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
3012 + task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
3013 + __switch_to_xtra(prev_p, next_p, tss);
3014 +
3015 ++#ifdef CONFIG_XEN
3016 ++ /*
3017 ++ * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
3018 ++ * current_pt_regs()->flags may not match the current task's
3019 ++ * intended IOPL. We need to switch it manually.
3020 ++ */
3021 ++ if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
3022 ++ prev->iopl != next->iopl))
3023 ++ xen_set_iopl_mask(next->iopl);
3024 ++#endif
3025 ++
3026 + if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
3027 + /*
3028 + * AMD CPUs have a misfeature: SYSRET sets the SS selector but
3029 +diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
3030 +index b285d4e8c68e..5da924bbf0a0 100644
3031 +--- a/arch/x86/kernel/sysfb_efi.c
3032 ++++ b/arch/x86/kernel/sysfb_efi.c
3033 +@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
3034 + continue;
3035 + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3036 + resource_size_t start, end;
3037 ++ unsigned long flags;
3038 ++
3039 ++ flags = pci_resource_flags(dev, i);
3040 ++ if (!(flags & IORESOURCE_MEM))
3041 ++ continue;
3042 ++
3043 ++ if (flags & IORESOURCE_UNSET)
3044 ++ continue;
3045 ++
3046 ++ if (pci_resource_len(dev, i) == 0)
3047 ++ continue;
3048 +
3049 + start = pci_resource_start(dev, i);
3050 +- if (start == 0)
3051 +- break;
3052 + end = pci_resource_end(dev, i);
3053 + if (screen_info.lfb_base >= start &&
3054 + screen_info.lfb_base < end) {
3055 + found_bar = 1;
3056 ++ break;
3057 + }
3058 + }
3059 + }
3060 +diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
3061 +index 92ae6acac8a7..6aa0f4d9eea6 100644
3062 +--- a/arch/x86/kernel/tsc_msr.c
3063 ++++ b/arch/x86/kernel/tsc_msr.c
3064 +@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
3065 +
3066 + if (freq_desc_tables[cpu_index].msr_plat) {
3067 + rdmsr(MSR_PLATFORM_INFO, lo, hi);
3068 +- ratio = (lo >> 8) & 0x1f;
3069 ++ ratio = (lo >> 8) & 0xff;
3070 + } else {
3071 + rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
3072 + ratio = (hi >> 8) & 0x1f;
3073 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
3074 +index 6525e926f566..2e1fd586b895 100644
3075 +--- a/arch/x86/kvm/cpuid.c
3076 ++++ b/arch/x86/kvm/cpuid.c
3077 +@@ -509,6 +509,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
3078 + do_cpuid_1_ent(&entry[i], function, idx);
3079 + if (idx == 1) {
3080 + entry[i].eax &= kvm_supported_word10_x86_features;
3081 ++ cpuid_mask(&entry[i].eax, 10);
3082 + entry[i].ebx = 0;
3083 + if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
3084 + entry[i].ebx =
3085 +diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
3086 +index b0ea42b78ccd..ab5318727579 100644
3087 +--- a/arch/x86/kvm/i8254.c
3088 ++++ b/arch/x86/kvm/i8254.c
3089 +@@ -245,7 +245,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
3090 + * PIC is being reset. Handle it gracefully here
3091 + */
3092 + atomic_inc(&ps->pending);
3093 +- else if (value > 0)
3094 ++ else if (value > 0 && ps->reinject)
3095 + /* in this case, we had multiple outstanding pit interrupts
3096 + * that we needed to inject. Reinject
3097 + */
3098 +@@ -288,7 +288,9 @@ static void pit_do_work(struct kthread_work *work)
3099 + * last one has been acked.
3100 + */
3101 + spin_lock(&ps->inject_lock);
3102 +- if (ps->irq_ack) {
3103 ++ if (!ps->reinject)
3104 ++ inject = 1;
3105 ++ else if (ps->irq_ack) {
3106 + ps->irq_ack = 0;
3107 + inject = 1;
3108 + }
3109 +@@ -317,10 +319,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
3110 + struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
3111 + struct kvm_pit *pt = ps->kvm->arch.vpit;
3112 +
3113 +- if (ps->reinject || !atomic_read(&ps->pending)) {
3114 ++ if (ps->reinject)
3115 + atomic_inc(&ps->pending);
3116 +- queue_kthread_work(&pt->worker, &pt->expired);
3117 +- }
3118 ++
3119 ++ queue_kthread_work(&pt->worker, &pt->expired);
3120 +
3121 + if (ps->is_periodic) {
3122 + hrtimer_add_expires_ns(&ps->timer, ps->period);
3123 +diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
3124 +index 3f8c732117ec..c146f3c262c3 100644
3125 +--- a/arch/x86/kvm/mtrr.c
3126 ++++ b/arch/x86/kvm/mtrr.c
3127 +@@ -44,8 +44,6 @@ static bool msr_mtrr_valid(unsigned msr)
3128 + case MSR_MTRRdefType:
3129 + case MSR_IA32_CR_PAT:
3130 + return true;
3131 +- case 0x2f8:
3132 +- return true;
3133 + }
3134 + return false;
3135 + }
3136 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
3137 +index 9bd8f44baded..60946a5d3812 100644
3138 +--- a/arch/x86/kvm/vmx.c
3139 ++++ b/arch/x86/kvm/vmx.c
3140 +@@ -2702,8 +2702,15 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
3141 + } else
3142 + vmx->nested.nested_vmx_ept_caps = 0;
3143 +
3144 ++ /*
3145 ++ * Old versions of KVM use the single-context version without
3146 ++ * checking for support, so declare that it is supported even
3147 ++ * though it is treated as global context. The alternative is
3148 ++ * not failing the single-context invvpid, and it is worse.
3149 ++ */
3150 + if (enable_vpid)
3151 + vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
3152 ++ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
3153 + VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
3154 + else
3155 + vmx->nested.nested_vmx_vpid_caps = 0;
3156 +@@ -5014,8 +5021,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
3157 + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
3158 +
3159 + cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
3160 +- vmx_set_cr0(vcpu, cr0); /* enter rmode */
3161 + vmx->vcpu.arch.cr0 = cr0;
3162 ++ vmx_set_cr0(vcpu, cr0); /* enter rmode */
3163 + vmx_set_cr4(vcpu, 0);
3164 + vmx_set_efer(vcpu, 0);
3165 + vmx_fpu_activate(vcpu);
3166 +@@ -7398,6 +7405,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
3167 + if (!(types & (1UL << type))) {
3168 + nested_vmx_failValid(vcpu,
3169 + VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
3170 ++ skip_emulated_instruction(vcpu);
3171 + return 1;
3172 + }
3173 +
3174 +@@ -7456,6 +7464,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
3175 + if (!(types & (1UL << type))) {
3176 + nested_vmx_failValid(vcpu,
3177 + VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
3178 ++ skip_emulated_instruction(vcpu);
3179 + return 1;
3180 + }
3181 +
3182 +@@ -7472,12 +7481,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
3183 + }
3184 +
3185 + switch (type) {
3186 ++ case VMX_VPID_EXTENT_SINGLE_CONTEXT:
3187 ++ /*
3188 ++ * Old versions of KVM use the single-context version so we
3189 ++ * have to support it; just treat it the same as all-context.
3190 ++ */
3191 + case VMX_VPID_EXTENT_ALL_CONTEXT:
3192 + __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
3193 + nested_vmx_succeed(vcpu);
3194 + break;
3195 + default:
3196 +- /* Trap single context invalidation invvpid calls */
3197 ++ /* Trap individual address invalidation invvpid calls */
3198 + BUG_ON(1);
3199 + break;
3200 + }
3201 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
3202 +index eaf6ee8c28b8..ac4963c38aa3 100644
3203 +--- a/arch/x86/kvm/x86.c
3204 ++++ b/arch/x86/kvm/x86.c
3205 +@@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
3206 + if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
3207 + return 1;
3208 + }
3209 +- kvm_put_guest_xcr0(vcpu);
3210 + vcpu->arch.xcr0 = xcr0;
3211 +
3212 + if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
3213 +@@ -2752,6 +2751,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3214 + }
3215 +
3216 + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
3217 ++ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
3218 + }
3219 +
3220 + void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3221 +@@ -6073,12 +6073,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
3222 + }
3223 +
3224 + /* try to inject new event if pending */
3225 +- if (vcpu->arch.nmi_pending) {
3226 +- if (kvm_x86_ops->nmi_allowed(vcpu)) {
3227 +- --vcpu->arch.nmi_pending;
3228 +- vcpu->arch.nmi_injected = true;
3229 +- kvm_x86_ops->set_nmi(vcpu);
3230 +- }
3231 ++ if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
3232 ++ --vcpu->arch.nmi_pending;
3233 ++ vcpu->arch.nmi_injected = true;
3234 ++ kvm_x86_ops->set_nmi(vcpu);
3235 + } else if (kvm_cpu_has_injectable_intr(vcpu)) {
3236 + /*
3237 + * Because interrupts can be injected asynchronously, we are
3238 +@@ -6547,10 +6545,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
3239 + if (inject_pending_event(vcpu, req_int_win) != 0)
3240 + req_immediate_exit = true;
3241 + /* enable NMI/IRQ window open exits if needed */
3242 +- else if (vcpu->arch.nmi_pending)
3243 +- kvm_x86_ops->enable_nmi_window(vcpu);
3244 +- else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
3245 +- kvm_x86_ops->enable_irq_window(vcpu);
3246 ++ else {
3247 ++ if (vcpu->arch.nmi_pending)
3248 ++ kvm_x86_ops->enable_nmi_window(vcpu);
3249 ++ if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
3250 ++ kvm_x86_ops->enable_irq_window(vcpu);
3251 ++ }
3252 +
3253 + if (kvm_lapic_enabled(vcpu)) {
3254 + update_cr8_intercept(vcpu);
3255 +@@ -6568,8 +6568,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
3256 + kvm_x86_ops->prepare_guest_switch(vcpu);
3257 + if (vcpu->fpu_active)
3258 + kvm_load_guest_fpu(vcpu);
3259 +- kvm_load_guest_xcr0(vcpu);
3260 +-
3261 + vcpu->mode = IN_GUEST_MODE;
3262 +
3263 + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3264 +@@ -6592,6 +6590,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
3265 + goto cancel_injection;
3266 + }
3267 +
3268 ++ kvm_load_guest_xcr0(vcpu);
3269 ++
3270 + if (req_immediate_exit)
3271 + smp_send_reschedule(vcpu->cpu);
3272 +
3273 +@@ -6641,6 +6641,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
3274 + vcpu->mode = OUTSIDE_GUEST_MODE;
3275 + smp_wmb();
3276 +
3277 ++ kvm_put_guest_xcr0(vcpu);
3278 ++
3279 + /* Interrupt is enabled by handle_external_intr() */
3280 + kvm_x86_ops->handle_external_intr(vcpu);
3281 +
3282 +@@ -7288,7 +7290,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
3283 + * and assume host would use all available bits.
3284 + * Guest xcr0 would be loaded later.
3285 + */
3286 +- kvm_put_guest_xcr0(vcpu);
3287 + vcpu->guest_fpu_loaded = 1;
3288 + __kernel_fpu_begin();
3289 + __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
3290 +@@ -7297,8 +7298,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
3291 +
3292 + void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
3293 + {
3294 +- kvm_put_guest_xcr0(vcpu);
3295 +-
3296 + if (!vcpu->guest_fpu_loaded) {
3297 + vcpu->fpu_counter = 0;
3298 + return;
3299 +diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
3300 +index 637ab34ed632..ddb2244b06a1 100644
3301 +--- a/arch/x86/mm/kmmio.c
3302 ++++ b/arch/x86/mm/kmmio.c
3303 +@@ -33,7 +33,7 @@
3304 + struct kmmio_fault_page {
3305 + struct list_head list;
3306 + struct kmmio_fault_page *release_next;
3307 +- unsigned long page; /* location of the fault page */
3308 ++ unsigned long addr; /* the requested address */
3309 + pteval_t old_presence; /* page presence prior to arming */
3310 + bool armed;
3311 +
3312 +@@ -70,9 +70,16 @@ unsigned int kmmio_count;
3313 + static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
3314 + static LIST_HEAD(kmmio_probes);
3315 +
3316 +-static struct list_head *kmmio_page_list(unsigned long page)
3317 ++static struct list_head *kmmio_page_list(unsigned long addr)
3318 + {
3319 +- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
3320 ++ unsigned int l;
3321 ++ pte_t *pte = lookup_address(addr, &l);
3322 ++
3323 ++ if (!pte)
3324 ++ return NULL;
3325 ++ addr &= page_level_mask(l);
3326 ++
3327 ++ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
3328 + }
3329 +
3330 + /* Accessed per-cpu */
3331 +@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
3332 + }
3333 +
3334 + /* You must be holding RCU read lock. */
3335 +-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
3336 ++static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
3337 + {
3338 + struct list_head *head;
3339 + struct kmmio_fault_page *f;
3340 ++ unsigned int l;
3341 ++ pte_t *pte = lookup_address(addr, &l);
3342 +
3343 +- page &= PAGE_MASK;
3344 +- head = kmmio_page_list(page);
3345 ++ if (!pte)
3346 ++ return NULL;
3347 ++ addr &= page_level_mask(l);
3348 ++ head = kmmio_page_list(addr);
3349 + list_for_each_entry_rcu(f, head, list) {
3350 +- if (f->page == page)
3351 ++ if (f->addr == addr)
3352 + return f;
3353 + }
3354 + return NULL;
3355 +@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
3356 + static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
3357 + {
3358 + unsigned int level;
3359 +- pte_t *pte = lookup_address(f->page, &level);
3360 ++ pte_t *pte = lookup_address(f->addr, &level);
3361 +
3362 + if (!pte) {
3363 +- pr_err("no pte for page 0x%08lx\n", f->page);
3364 ++ pr_err("no pte for addr 0x%08lx\n", f->addr);
3365 + return -1;
3366 + }
3367 +
3368 +@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
3369 + return -1;
3370 + }
3371 +
3372 +- __flush_tlb_one(f->page);
3373 ++ __flush_tlb_one(f->addr);
3374 + return 0;
3375 + }
3376 +
3377 +@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
3378 + int ret;
3379 + WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
3380 + if (f->armed) {
3381 +- pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
3382 +- f->page, f->count, !!f->old_presence);
3383 ++ pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
3384 ++ f->addr, f->count, !!f->old_presence);
3385 + }
3386 + ret = clear_page_presence(f, true);
3387 +- WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
3388 +- f->page);
3389 ++ WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
3390 ++ f->addr);
3391 + f->armed = true;
3392 + return ret;
3393 + }
3394 +@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
3395 + {
3396 + int ret = clear_page_presence(f, false);
3397 + WARN_ONCE(ret < 0,
3398 +- KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
3399 ++ KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
3400 + f->armed = false;
3401 + }
3402 +
3403 +@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
3404 + struct kmmio_context *ctx;
3405 + struct kmmio_fault_page *faultpage;
3406 + int ret = 0; /* default to fault not handled */
3407 ++ unsigned long page_base = addr;
3408 ++ unsigned int l;
3409 ++ pte_t *pte = lookup_address(addr, &l);
3410 ++ if (!pte)
3411 ++ return -EINVAL;
3412 ++ page_base &= page_level_mask(l);
3413 +
3414 + /*
3415 + * Preemption is now disabled to prevent process switch during
3416 +@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
3417 + preempt_disable();
3418 + rcu_read_lock();
3419 +
3420 +- faultpage = get_kmmio_fault_page(addr);
3421 ++ faultpage = get_kmmio_fault_page(page_base);
3422 + if (!faultpage) {
3423 + /*
3424 + * Either this page fault is not caused by kmmio, or
3425 +@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
3426 +
3427 + ctx = &get_cpu_var(kmmio_ctx);
3428 + if (ctx->active) {
3429 +- if (addr == ctx->addr) {
3430 ++ if (page_base == ctx->addr) {
3431 + /*
3432 + * A second fault on the same page means some other
3433 + * condition needs handling by do_page_fault(), the
3434 +@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
3435 + ctx->active++;
3436 +
3437 + ctx->fpage = faultpage;
3438 +- ctx->probe = get_kmmio_probe(addr);
3439 ++ ctx->probe = get_kmmio_probe(page_base);
3440 + ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
3441 +- ctx->addr = addr;
3442 ++ ctx->addr = page_base;
3443 +
3444 + if (ctx->probe && ctx->probe->pre_handler)
3445 + ctx->probe->pre_handler(ctx->probe, regs, addr);
3446 +@@ -354,12 +371,11 @@ out:
3447 + }
3448 +
3449 + /* You must be holding kmmio_lock. */
3450 +-static int add_kmmio_fault_page(unsigned long page)
3451 ++static int add_kmmio_fault_page(unsigned long addr)
3452 + {
3453 + struct kmmio_fault_page *f;
3454 +
3455 +- page &= PAGE_MASK;
3456 +- f = get_kmmio_fault_page(page);
3457 ++ f = get_kmmio_fault_page(addr);
3458 + if (f) {
3459 + if (!f->count)
3460 + arm_kmmio_fault_page(f);
3461 +@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
3462 + return -1;
3463 +
3464 + f->count = 1;
3465 +- f->page = page;
3466 ++ f->addr = addr;
3467 +
3468 + if (arm_kmmio_fault_page(f)) {
3469 + kfree(f);
3470 + return -1;
3471 + }
3472 +
3473 +- list_add_rcu(&f->list, kmmio_page_list(f->page));
3474 ++ list_add_rcu(&f->list, kmmio_page_list(f->addr));
3475 +
3476 + return 0;
3477 + }
3478 +
3479 + /* You must be holding kmmio_lock. */
3480 +-static void release_kmmio_fault_page(unsigned long page,
3481 ++static void release_kmmio_fault_page(unsigned long addr,
3482 + struct kmmio_fault_page **release_list)
3483 + {
3484 + struct kmmio_fault_page *f;
3485 +
3486 +- page &= PAGE_MASK;
3487 +- f = get_kmmio_fault_page(page);
3488 ++ f = get_kmmio_fault_page(addr);
3489 + if (!f)
3490 + return;
3491 +
3492 +@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
3493 + int ret = 0;
3494 + unsigned long size = 0;
3495 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
3496 ++ unsigned int l;
3497 ++ pte_t *pte;
3498 +
3499 + spin_lock_irqsave(&kmmio_lock, flags);
3500 + if (get_kmmio_probe(p->addr)) {
3501 + ret = -EEXIST;
3502 + goto out;
3503 + }
3504 ++
3505 ++ pte = lookup_address(p->addr, &l);
3506 ++ if (!pte) {
3507 ++ ret = -EINVAL;
3508 ++ goto out;
3509 ++ }
3510 ++
3511 + kmmio_count++;
3512 + list_add_rcu(&p->list, &kmmio_probes);
3513 + while (size < size_lim) {
3514 + if (add_kmmio_fault_page(p->addr + size))
3515 + pr_err("Unable to set page fault.\n");
3516 +- size += PAGE_SIZE;
3517 ++ size += page_level_size(l);
3518 + }
3519 + out:
3520 + spin_unlock_irqrestore(&kmmio_lock, flags);
3521 +@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
3522 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
3523 + struct kmmio_fault_page *release_list = NULL;
3524 + struct kmmio_delayed_release *drelease;
3525 ++ unsigned int l;
3526 ++ pte_t *pte;
3527 ++
3528 ++ pte = lookup_address(p->addr, &l);
3529 ++ if (!pte)
3530 ++ return;
3531 +
3532 + spin_lock_irqsave(&kmmio_lock, flags);
3533 + while (size < size_lim) {
3534 + release_kmmio_fault_page(p->addr + size, &release_list);
3535 +- size += PAGE_SIZE;
3536 ++ size += page_level_size(l);
3537 + }
3538 + list_del_rcu(&p->list);
3539 + kmmio_count--;
3540 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
3541 +index 8f4cc3dfac32..5fb6adaaa796 100644
3542 +--- a/arch/x86/mm/tlb.c
3543 ++++ b/arch/x86/mm/tlb.c
3544 +@@ -106,8 +106,6 @@ static void flush_tlb_func(void *info)
3545 +
3546 + if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
3547 + return;
3548 +- if (!f->flush_end)
3549 +- f->flush_end = f->flush_start + PAGE_SIZE;
3550 +
3551 + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
3552 + if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
3553 +@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
3554 + unsigned long end)
3555 + {
3556 + struct flush_tlb_info info;
3557 ++
3558 ++ if (end == 0)
3559 ++ end = start + PAGE_SIZE;
3560 + info.flush_mm = mm;
3561 + info.flush_start = start;
3562 + info.flush_end = end;
3563 +
3564 + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
3565 +- trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
3566 ++ if (end == TLB_FLUSH_ALL)
3567 ++ trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
3568 ++ else
3569 ++ trace_tlb_flush(TLB_REMOTE_SEND_IPI,
3570 ++ (end - start) >> PAGE_SHIFT);
3571 ++
3572 + if (is_uv_system()) {
3573 + unsigned int cpu;
3574 +
3575 +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
3576 +index e58565556703..0ae7e9fa348d 100644
3577 +--- a/arch/x86/pci/fixup.c
3578 ++++ b/arch/x86/pci/fixup.c
3579 +@@ -540,3 +540,10 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
3580 + }
3581 + }
3582 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
3583 ++
3584 ++static void pci_bdwep_bar(struct pci_dev *dev)
3585 ++{
3586 ++ dev->non_compliant_bars = 1;
3587 ++}
3588 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
3589 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
3590 +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
3591 +index beac4dfdade6..349b8ce92bf2 100644
3592 +--- a/arch/x86/pci/xen.c
3593 ++++ b/arch/x86/pci/xen.c
3594 +@@ -491,8 +491,11 @@ int __init pci_xen_initial_domain(void)
3595 + #endif
3596 + __acpi_register_gsi = acpi_register_gsi_xen;
3597 + __acpi_unregister_gsi = NULL;
3598 +- /* Pre-allocate legacy irqs */
3599 +- for (irq = 0; irq < nr_legacy_irqs(); irq++) {
3600 ++ /*
3601 ++ * Pre-allocate the legacy IRQs. Use NR_LEGACY_IRQS here
3602 ++ * because we don't have a PIC and thus nr_legacy_irqs() is zero.
3603 ++ */
3604 ++ for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
3605 + int trigger, polarity;
3606 +
3607 + if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
3608 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
3609 +index d09e4c9d7cc5..e3679db17545 100644
3610 +--- a/arch/x86/xen/enlighten.c
3611 ++++ b/arch/x86/xen/enlighten.c
3612 +@@ -961,7 +961,7 @@ static void xen_load_sp0(struct tss_struct *tss,
3613 + tss->x86_tss.sp0 = thread->sp0;
3614 + }
3615 +
3616 +-static void xen_set_iopl_mask(unsigned mask)
3617 ++void xen_set_iopl_mask(unsigned mask)
3618 + {
3619 + struct physdev_set_iopl set_iopl;
3620 +
3621 +diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
3622 +index 9ed55649ac8e..05e1df943856 100644
3623 +--- a/arch/xtensa/kernel/head.S
3624 ++++ b/arch/xtensa/kernel/head.S
3625 +@@ -128,7 +128,7 @@ ENTRY(_startup)
3626 + wsr a0, icountlevel
3627 +
3628 + .set _index, 0
3629 +- .rept XCHAL_NUM_DBREAK - 1
3630 ++ .rept XCHAL_NUM_DBREAK
3631 + wsr a0, SREG_DBREAKC + _index
3632 + .set _index, _index + 1
3633 + .endr
3634 +diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
3635 +index d75aa1476da7..1a804a2f9a5b 100644
3636 +--- a/arch/xtensa/mm/cache.c
3637 ++++ b/arch/xtensa/mm/cache.c
3638 +@@ -97,11 +97,11 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
3639 + unsigned long paddr;
3640 + void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
3641 +
3642 +- pagefault_disable();
3643 ++ preempt_disable();
3644 + kmap_invalidate_coherent(page, vaddr);
3645 + set_bit(PG_arch_1, &page->flags);
3646 + clear_page_alias(kvaddr, paddr);
3647 +- pagefault_enable();
3648 ++ preempt_enable();
3649 + }
3650 +
3651 + void copy_user_highpage(struct page *dst, struct page *src,
3652 +@@ -113,11 +113,11 @@ void copy_user_highpage(struct page *dst, struct page *src,
3653 + void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
3654 + &src_paddr);
3655 +
3656 +- pagefault_disable();
3657 ++ preempt_disable();
3658 + kmap_invalidate_coherent(dst, vaddr);
3659 + set_bit(PG_arch_1, &dst->flags);
3660 + copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
3661 +- pagefault_enable();
3662 ++ preempt_enable();
3663 + }
3664 +
3665 + #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
3666 +diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
3667 +index 70cb408bc20d..92d785fefb6d 100644
3668 +--- a/arch/xtensa/platforms/iss/console.c
3669 ++++ b/arch/xtensa/platforms/iss/console.c
3670 +@@ -100,21 +100,23 @@ static void rs_poll(unsigned long priv)
3671 + {
3672 + struct tty_port *port = (struct tty_port *)priv;
3673 + int i = 0;
3674 ++ int rd = 1;
3675 + unsigned char c;
3676 +
3677 + spin_lock(&timer_lock);
3678 +
3679 + while (simc_poll(0)) {
3680 +- simc_read(0, &c, 1);
3681 ++ rd = simc_read(0, &c, 1);
3682 ++ if (rd <= 0)
3683 ++ break;
3684 + tty_insert_flip_char(port, c, TTY_NORMAL);
3685 + i++;
3686 + }
3687 +
3688 + if (i)
3689 + tty_flip_buffer_push(port);
3690 +-
3691 +-
3692 +- mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
3693 ++ if (rd)
3694 ++ mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
3695 + spin_unlock(&timer_lock);
3696 + }
3697 +
3698 +diff --git a/block/blk-core.c b/block/blk-core.c
3699 +index b83d29755b5a..45f4d7efbf34 100644
3700 +--- a/block/blk-core.c
3701 ++++ b/block/blk-core.c
3702 +@@ -2198,7 +2198,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
3703 + if (q->mq_ops) {
3704 + if (blk_queue_io_stat(q))
3705 + blk_account_io_start(rq, true);
3706 +- blk_mq_insert_request(rq, false, true, true);
3707 ++ blk_mq_insert_request(rq, false, true, false);
3708 + return 0;
3709 + }
3710 +
3711 +diff --git a/block/partition-generic.c b/block/partition-generic.c
3712 +index fefd01b496a0..cfcfe1b0ecbc 100644
3713 +--- a/block/partition-generic.c
3714 ++++ b/block/partition-generic.c
3715 +@@ -350,15 +350,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
3716 + goto out_del;
3717 + }
3718 +
3719 ++ err = hd_ref_init(p);
3720 ++ if (err) {
3721 ++ if (flags & ADDPART_FLAG_WHOLEDISK)
3722 ++ goto out_remove_file;
3723 ++ goto out_del;
3724 ++ }
3725 ++
3726 + /* everything is up and running, commence */
3727 + rcu_assign_pointer(ptbl->part[partno], p);
3728 +
3729 + /* suppress uevent if the disk suppresses it */
3730 + if (!dev_get_uevent_suppress(ddev))
3731 + kobject_uevent(&pdev->kobj, KOBJ_ADD);
3732 +-
3733 +- if (!hd_ref_init(p))
3734 +- return p;
3735 ++ return p;
3736 +
3737 + out_free_info:
3738 + free_part_info(p);
3739 +@@ -367,6 +372,8 @@ out_free_stats:
3740 + out_free:
3741 + kfree(p);
3742 + return ERR_PTR(err);
3743 ++out_remove_file:
3744 ++ device_remove_file(pdev, &dev_attr_whole_disk);
3745 + out_del:
3746 + kobject_put(p->holder_dir);
3747 + device_del(pdev);
3748 +diff --git a/crypto/ahash.c b/crypto/ahash.c
3749 +index d19b52324cf5..dac1c24e9c3e 100644
3750 +--- a/crypto/ahash.c
3751 ++++ b/crypto/ahash.c
3752 +@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
3753 + struct scatterlist *sg;
3754 +
3755 + sg = walk->sg;
3756 +- walk->pg = sg_page(sg);
3757 + walk->offset = sg->offset;
3758 ++ walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
3759 ++ walk->offset = offset_in_page(walk->offset);
3760 + walk->entrylen = sg->length;
3761 +
3762 + if (walk->entrylen > walk->total)
3763 +diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
3764 +index 90d6d47965b0..ecdb5a2ce085 100644
3765 +--- a/crypto/asymmetric_keys/pkcs7_trust.c
3766 ++++ b/crypto/asymmetric_keys/pkcs7_trust.c
3767 +@@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
3768 + int cached_ret = -ENOKEY;
3769 + int ret;
3770 +
3771 ++ *_trusted = false;
3772 ++
3773 + for (p = pkcs7->certs; p; p = p->next)
3774 + p->seen = false;
3775 +
3776 +diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
3777 +index 021d39c0ba75..13c4e5a5fe8c 100644
3778 +--- a/crypto/asymmetric_keys/x509_cert_parser.c
3779 ++++ b/crypto/asymmetric_keys/x509_cert_parser.c
3780 +@@ -494,7 +494,7 @@ int x509_decode_time(time64_t *_t, size_t hdrlen,
3781 + unsigned char tag,
3782 + const unsigned char *value, size_t vlen)
3783 + {
3784 +- static const unsigned char month_lengths[] = { 31, 29, 31, 30, 31, 30,
3785 ++ static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30,
3786 + 31, 31, 30, 31, 30, 31 };
3787 + const unsigned char *p = value;
3788 + unsigned year, mon, day, hour, min, sec, mon_len;
3789 +@@ -540,9 +540,9 @@ int x509_decode_time(time64_t *_t, size_t hdrlen,
3790 + if (year % 4 == 0) {
3791 + mon_len = 29;
3792 + if (year % 100 == 0) {
3793 +- year /= 100;
3794 +- if (year % 4 != 0)
3795 +- mon_len = 28;
3796 ++ mon_len = 28;
3797 ++ if (year % 400 == 0)
3798 ++ mon_len = 29;
3799 + }
3800 + }
3801 + }
3802 +diff --git a/crypto/keywrap.c b/crypto/keywrap.c
3803 +index b1d106ce55f3..72014f963ba7 100644
3804 +--- a/crypto/keywrap.c
3805 ++++ b/crypto/keywrap.c
3806 +@@ -212,7 +212,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
3807 + SEMIBSIZE))
3808 + ret = -EBADMSG;
3809 +
3810 +- memzero_explicit(&block, sizeof(struct crypto_kw_block));
3811 ++ memzero_explicit(block, sizeof(struct crypto_kw_block));
3812 +
3813 + return ret;
3814 + }
3815 +@@ -297,7 +297,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
3816 + /* establish the IV for the caller to pick up */
3817 + memcpy(desc->info, block->A, SEMIBSIZE);
3818 +
3819 +- memzero_explicit(&block, sizeof(struct crypto_kw_block));
3820 ++ memzero_explicit(block, sizeof(struct crypto_kw_block));
3821 +
3822 + return 0;
3823 + }
3824 +diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
3825 +index 50f5c97e1087..0cbc5a5025c2 100644
3826 +--- a/crypto/rsa-pkcs1pad.c
3827 ++++ b/crypto/rsa-pkcs1pad.c
3828 +@@ -310,16 +310,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
3829 + req_ctx->child_req.src = req->src;
3830 + req_ctx->child_req.src_len = req->src_len;
3831 + req_ctx->child_req.dst = req_ctx->out_sg;
3832 +- req_ctx->child_req.dst_len = ctx->key_size - 1;
3833 ++ req_ctx->child_req.dst_len = ctx->key_size ;
3834 +
3835 +- req_ctx->out_buf = kmalloc(ctx->key_size - 1,
3836 ++ req_ctx->out_buf = kmalloc(ctx->key_size,
3837 + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3838 + GFP_KERNEL : GFP_ATOMIC);
3839 + if (!req_ctx->out_buf)
3840 + return -ENOMEM;
3841 +
3842 + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
3843 +- ctx->key_size - 1, NULL);
3844 ++ ctx->key_size, NULL);
3845 +
3846 + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
3847 + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
3848 +@@ -491,16 +491,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
3849 + req_ctx->child_req.src = req->src;
3850 + req_ctx->child_req.src_len = req->src_len;
3851 + req_ctx->child_req.dst = req_ctx->out_sg;
3852 +- req_ctx->child_req.dst_len = ctx->key_size - 1;
3853 ++ req_ctx->child_req.dst_len = ctx->key_size;
3854 +
3855 +- req_ctx->out_buf = kmalloc(ctx->key_size - 1,
3856 ++ req_ctx->out_buf = kmalloc(ctx->key_size,
3857 + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3858 + GFP_KERNEL : GFP_ATOMIC);
3859 + if (!req_ctx->out_buf)
3860 + return -ENOMEM;
3861 +
3862 + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
3863 +- ctx->key_size - 1, NULL);
3864 ++ ctx->key_size, NULL);
3865 +
3866 + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
3867 + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
3868 +diff --git a/crypto/testmgr.c b/crypto/testmgr.c
3869 +index ae8c57fd8bc7..d4944318ca1f 100644
3870 +--- a/crypto/testmgr.c
3871 ++++ b/crypto/testmgr.c
3872 +@@ -1849,6 +1849,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
3873 + static int do_test_rsa(struct crypto_akcipher *tfm,
3874 + struct akcipher_testvec *vecs)
3875 + {
3876 ++ char *xbuf[XBUFSIZE];
3877 + struct akcipher_request *req;
3878 + void *outbuf_enc = NULL;
3879 + void *outbuf_dec = NULL;
3880 +@@ -1857,9 +1858,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
3881 + int err = -ENOMEM;
3882 + struct scatterlist src, dst, src_tab[2];
3883 +
3884 ++ if (testmgr_alloc_buf(xbuf))
3885 ++ return err;
3886 ++
3887 + req = akcipher_request_alloc(tfm, GFP_KERNEL);
3888 + if (!req)
3889 +- return err;
3890 ++ goto free_xbuf;
3891 +
3892 + init_completion(&result.completion);
3893 +
3894 +@@ -1877,9 +1881,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
3895 + if (!outbuf_enc)
3896 + goto free_req;
3897 +
3898 ++ if (WARN_ON(vecs->m_size > PAGE_SIZE))
3899 ++ goto free_all;
3900 ++
3901 ++ memcpy(xbuf[0], vecs->m, vecs->m_size);
3902 ++
3903 + sg_init_table(src_tab, 2);
3904 +- sg_set_buf(&src_tab[0], vecs->m, 8);
3905 +- sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
3906 ++ sg_set_buf(&src_tab[0], xbuf[0], 8);
3907 ++ sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
3908 + sg_init_one(&dst, outbuf_enc, out_len_max);
3909 + akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
3910 + out_len_max);
3911 +@@ -1898,7 +1907,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
3912 + goto free_all;
3913 + }
3914 + /* verify that encrypted message is equal to expected */
3915 +- if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
3916 ++ if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
3917 + pr_err("alg: rsa: encrypt test failed. Invalid output\n");
3918 + err = -EINVAL;
3919 + goto free_all;
3920 +@@ -1913,7 +1922,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
3921 + err = -ENOMEM;
3922 + goto free_all;
3923 + }
3924 +- sg_init_one(&src, vecs->c, vecs->c_size);
3925 ++
3926 ++ if (WARN_ON(vecs->c_size > PAGE_SIZE))
3927 ++ goto free_all;
3928 ++
3929 ++ memcpy(xbuf[0], vecs->c, vecs->c_size);
3930 ++
3931 ++ sg_init_one(&src, xbuf[0], vecs->c_size);
3932 + sg_init_one(&dst, outbuf_dec, out_len_max);
3933 + init_completion(&result.completion);
3934 + akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
3935 +@@ -1940,6 +1955,8 @@ free_all:
3936 + kfree(outbuf_enc);
3937 + free_req:
3938 + akcipher_request_free(req);
3939 ++free_xbuf:
3940 ++ testmgr_free_buf(xbuf);
3941 + return err;
3942 + }
3943 +
3944 +diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
3945 +index 6979186dbd4b..9f77943653fb 100644
3946 +--- a/drivers/acpi/acpi_processor.c
3947 ++++ b/drivers/acpi/acpi_processor.c
3948 +@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
3949 + }
3950 + #endif /* CONFIG_ACPI_HOTPLUG_CPU */
3951 +
3952 ++#ifdef CONFIG_X86
3953 ++static bool acpi_hwp_native_thermal_lvt_set;
3954 ++static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
3955 ++ u32 lvl,
3956 ++ void *context,
3957 ++ void **rv)
3958 ++{
3959 ++ u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
3960 ++ u32 capbuf[2];
3961 ++ struct acpi_osc_context osc_context = {
3962 ++ .uuid_str = sb_uuid_str,
3963 ++ .rev = 1,
3964 ++ .cap.length = 8,
3965 ++ .cap.pointer = capbuf,
3966 ++ };
3967 ++
3968 ++ if (acpi_hwp_native_thermal_lvt_set)
3969 ++ return AE_CTRL_TERMINATE;
3970 ++
3971 ++ capbuf[0] = 0x0000;
3972 ++ capbuf[1] = 0x1000; /* set bit 12 */
3973 ++
3974 ++ if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
3975 ++ if (osc_context.ret.pointer && osc_context.ret.length > 1) {
3976 ++ u32 *capbuf_ret = osc_context.ret.pointer;
3977 ++
3978 ++ if (capbuf_ret[1] & 0x1000) {
3979 ++ acpi_handle_info(handle,
3980 ++ "_OSC native thermal LVT Acked\n");
3981 ++ acpi_hwp_native_thermal_lvt_set = true;
3982 ++ }
3983 ++ }
3984 ++ kfree(osc_context.ret.pointer);
3985 ++ }
3986 ++
3987 ++ return AE_OK;
3988 ++}
3989 ++
3990 ++void __init acpi_early_processor_osc(void)
3991 ++{
3992 ++ if (boot_cpu_has(X86_FEATURE_HWP)) {
3993 ++ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
3994 ++ ACPI_UINT32_MAX,
3995 ++ acpi_hwp_native_thermal_lvt_osc,
3996 ++ NULL, NULL, NULL);
3997 ++ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
3998 ++ acpi_hwp_native_thermal_lvt_osc,
3999 ++ NULL, NULL);
4000 ++ }
4001 ++}
4002 ++#endif
4003 ++
4004 + /*
4005 + * The following ACPI IDs are known to be suitable for representing as
4006 + * processor devices.
4007 +diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
4008 +index 6a72047aae1c..c3a052d43317 100644
4009 +--- a/drivers/acpi/acpica/dsmethod.c
4010 ++++ b/drivers/acpi/acpica/dsmethod.c
4011 +@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
4012 + obj_desc->method.mutex->mutex.
4013 + original_sync_level =
4014 + obj_desc->method.mutex->mutex.sync_level;
4015 ++
4016 ++ obj_desc->method.mutex->mutex.thread_id =
4017 ++ acpi_os_get_thread_id();
4018 + }
4019 + }
4020 +
4021 +diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
4022 +index bd75d46234a4..ddb436f86415 100644
4023 +--- a/drivers/acpi/acpica/nsinit.c
4024 ++++ b/drivers/acpi/acpica/nsinit.c
4025 +@@ -83,6 +83,8 @@ acpi_status acpi_ns_initialize_objects(void)
4026 +
4027 + ACPI_FUNCTION_TRACE(ns_initialize_objects);
4028 +
4029 ++ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
4030 ++ "[Init] Completing Initialization of ACPI Objects\n"));
4031 + ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
4032 + "**** Starting initialization of namespace objects ****\n"));
4033 + ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
4034 +diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
4035 +index 278666e39563..c37d47982fbe 100644
4036 +--- a/drivers/acpi/acpica/tbxfload.c
4037 ++++ b/drivers/acpi/acpica/tbxfload.c
4038 +@@ -83,6 +83,20 @@ acpi_status __init acpi_load_tables(void)
4039 + "While loading namespace from ACPI tables"));
4040 + }
4041 +
4042 ++ if (!acpi_gbl_group_module_level_code) {
4043 ++ /*
4044 ++ * Initialize the objects that remain uninitialized. This
4045 ++ * runs the executable AML that may be part of the
4046 ++ * declaration of these objects:
4047 ++ * operation_regions, buffer_fields, Buffers, and Packages.
4048 ++ */
4049 ++ status = acpi_ns_initialize_objects();
4050 ++ if (ACPI_FAILURE(status)) {
4051 ++ return_ACPI_STATUS(status);
4052 ++ }
4053 ++ }
4054 ++
4055 ++ acpi_gbl_reg_methods_enabled = TRUE;
4056 + return_ACPI_STATUS(status);
4057 + }
4058 +
4059 +diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
4060 +index 721b87cce908..638fbd4ad72b 100644
4061 +--- a/drivers/acpi/acpica/utxfinit.c
4062 ++++ b/drivers/acpi/acpica/utxfinit.c
4063 +@@ -267,7 +267,6 @@ acpi_status __init acpi_initialize_objects(u32 flags)
4064 + * initialized, even if they contain executable AML (see the call to
4065 + * acpi_ns_initialize_objects below).
4066 + */
4067 +- acpi_gbl_reg_methods_enabled = TRUE;
4068 + if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
4069 + ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
4070 + "[Init] Executing _REG OpRegion methods\n"));
4071 +@@ -299,20 +298,18 @@ acpi_status __init acpi_initialize_objects(u32 flags)
4072 + */
4073 + if (acpi_gbl_group_module_level_code) {
4074 + acpi_ns_exec_module_code_list();
4075 +- }
4076 +
4077 +- /*
4078 +- * Initialize the objects that remain uninitialized. This runs the
4079 +- * executable AML that may be part of the declaration of these objects:
4080 +- * operation_regions, buffer_fields, Buffers, and Packages.
4081 +- */
4082 +- if (!(flags & ACPI_NO_OBJECT_INIT)) {
4083 +- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
4084 +- "[Init] Completing Initialization of ACPI Objects\n"));
4085 +-
4086 +- status = acpi_ns_initialize_objects();
4087 +- if (ACPI_FAILURE(status)) {
4088 +- return_ACPI_STATUS(status);
4089 ++ /*
4090 ++ * Initialize the objects that remain uninitialized. This
4091 ++ * runs the executable AML that may be part of the
4092 ++ * declaration of these objects:
4093 ++ * operation_regions, buffer_fields, Buffers, and Packages.
4094 ++ */
4095 ++ if (!(flags & ACPI_NO_OBJECT_INIT)) {
4096 ++ status = acpi_ns_initialize_objects();
4097 ++ if (ACPI_FAILURE(status)) {
4098 ++ return_ACPI_STATUS(status);
4099 ++ }
4100 + }
4101 + }
4102 +
4103 +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
4104 +index 891c42d1cd65..f9081b791b81 100644
4105 +--- a/drivers/acpi/bus.c
4106 ++++ b/drivers/acpi/bus.c
4107 +@@ -1005,6 +1005,9 @@ static int __init acpi_bus_init(void)
4108 + goto error1;
4109 + }
4110 +
4111 ++ /* Set capability bits for _OSC under processor scope */
4112 ++ acpi_early_processor_osc();
4113 ++
4114 + /*
4115 + * _OSC method may exist in module level code,
4116 + * so it must be run after ACPI_FULL_INITIALIZATION
4117 +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
4118 +index cd2c3d6d40e0..993fd31394c8 100644
4119 +--- a/drivers/acpi/device_pm.c
4120 ++++ b/drivers/acpi/device_pm.c
4121 +@@ -319,6 +319,7 @@ int acpi_device_fix_up_power(struct acpi_device *device)
4122 +
4123 + return ret;
4124 + }
4125 ++EXPORT_SYMBOL_GPL(acpi_device_fix_up_power);
4126 +
4127 + int acpi_device_update_power(struct acpi_device *device, int *state_p)
4128 + {
4129 +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
4130 +index 1e6833a5cd44..6f41c73f82bb 100644
4131 +--- a/drivers/acpi/internal.h
4132 ++++ b/drivers/acpi/internal.h
4133 +@@ -138,6 +138,12 @@ void acpi_early_processor_set_pdc(void);
4134 + static inline void acpi_early_processor_set_pdc(void) {}
4135 + #endif
4136 +
4137 ++#ifdef CONFIG_X86
4138 ++void acpi_early_processor_osc(void);
4139 ++#else
4140 ++static inline void acpi_early_processor_osc(void) {}
4141 ++#endif
4142 ++
4143 + /* --------------------------------------------------------------------------
4144 + Embedded Controller
4145 + -------------------------------------------------------------------------- */
4146 +diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
4147 +index 67da6fb72274..c45fdc49ff28 100644
4148 +--- a/drivers/acpi/osl.c
4149 ++++ b/drivers/acpi/osl.c
4150 +@@ -135,7 +135,7 @@ static struct osi_linux {
4151 + unsigned int enable:1;
4152 + unsigned int dmi:1;
4153 + unsigned int cmdline:1;
4154 +- unsigned int default_disabling:1;
4155 ++ u8 default_disabling;
4156 + } osi_linux = {0, 0, 0, 0};
4157 +
4158 + static u32 acpi_osi_handler(acpi_string interface, u32 supported)
4159 +@@ -1713,10 +1713,13 @@ void __init acpi_osi_setup(char *str)
4160 + if (*str == '!') {
4161 + str++;
4162 + if (*str == '\0') {
4163 +- osi_linux.default_disabling = 1;
4164 ++ /* Do not override acpi_osi=!* */
4165 ++ if (!osi_linux.default_disabling)
4166 ++ osi_linux.default_disabling =
4167 ++ ACPI_DISABLE_ALL_VENDOR_STRINGS;
4168 + return;
4169 + } else if (*str == '*') {
4170 +- acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
4171 ++ osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
4172 + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
4173 + osi = &osi_setup_entries[i];
4174 + osi->enable = false;
4175 +@@ -1789,10 +1792,13 @@ static void __init acpi_osi_setup_late(void)
4176 + acpi_status status;
4177 +
4178 + if (osi_linux.default_disabling) {
4179 +- status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
4180 ++ status = acpi_update_interfaces(osi_linux.default_disabling);
4181 +
4182 + if (ACPI_SUCCESS(status))
4183 +- printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
4184 ++ printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
4185 ++ osi_linux.default_disabling ==
4186 ++ ACPI_DISABLE_ALL_STRINGS ?
4187 ++ " and feature groups" : "");
4188 + }
4189 +
4190 + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
4191 +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
4192 +index d02fd53042a5..56241eb341f4 100644
4193 +--- a/drivers/acpi/resource.c
4194 ++++ b/drivers/acpi/resource.c
4195 +@@ -27,8 +27,20 @@
4196 +
4197 + #ifdef CONFIG_X86
4198 + #define valid_IRQ(i) (((i) != 0) && ((i) != 2))
4199 ++static inline bool acpi_iospace_resource_valid(struct resource *res)
4200 ++{
4201 ++ /* On X86 IO space is limited to the [0 - 64K] IO port range */
4202 ++ return res->end < 0x10003;
4203 ++}
4204 + #else
4205 + #define valid_IRQ(i) (true)
4206 ++/*
4207 ++ * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
4208 ++ * addresses mapping IO space in CPU physical address space, IO space
4209 ++ * resources can be placed anywhere in the 64-bit physical address space.
4210 ++ */
4211 ++static inline bool
4212 ++acpi_iospace_resource_valid(struct resource *res) { return true; }
4213 + #endif
4214 +
4215 + static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
4216 +@@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
4217 + if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
4218 + res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
4219 +
4220 +- if (res->end >= 0x10003)
4221 ++ if (!acpi_iospace_resource_valid(res))
4222 + res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
4223 +
4224 + if (io_decode == ACPI_DECODE_16)
4225 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
4226 +index 9cb975200cac..f054cadf30d8 100644
4227 +--- a/drivers/acpi/sleep.c
4228 ++++ b/drivers/acpi/sleep.c
4229 +@@ -714,6 +714,7 @@ static int acpi_hibernation_enter(void)
4230 +
4231 + static void acpi_hibernation_leave(void)
4232 + {
4233 ++ pm_set_resume_via_firmware();
4234 + /*
4235 + * If ACPI is not enabled by the BIOS and the boot kernel, we need to
4236 + * enable it here.
4237 +diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
4238 +index 04975b851c23..639adb1f8abd 100644
4239 +--- a/drivers/ata/ahci_platform.c
4240 ++++ b/drivers/ata/ahci_platform.c
4241 +@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
4242 + if (rc)
4243 + return rc;
4244 +
4245 ++ of_property_read_u32(dev->of_node,
4246 ++ "ports-implemented", &hpriv->force_port_map);
4247 ++
4248 + if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
4249 + hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
4250 +
4251 +diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
4252 +index 8e3f7faf00d3..73b19b277138 100644
4253 +--- a/drivers/ata/ahci_xgene.c
4254 ++++ b/drivers/ata/ahci_xgene.c
4255 +@@ -821,9 +821,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
4256 + dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
4257 + __func__);
4258 + version = XGENE_AHCI_V1;
4259 +- }
4260 +- if (info->valid & ACPI_VALID_CID)
4261 ++ } else if (info->valid & ACPI_VALID_CID) {
4262 + version = XGENE_AHCI_V2;
4263 ++ }
4264 + }
4265 + }
4266 + #endif
4267 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
4268 +index 85ea5142a095..bb050ea26101 100644
4269 +--- a/drivers/ata/libahci.c
4270 ++++ b/drivers/ata/libahci.c
4271 +@@ -469,6 +469,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
4272 + dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
4273 + port_map, hpriv->force_port_map);
4274 + port_map = hpriv->force_port_map;
4275 ++ hpriv->saved_port_map = port_map;
4276 + }
4277 +
4278 + if (hpriv->mask_port_map) {
4279 +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
4280 +index 301b785f9f56..0caf92ae25f3 100644
4281 +--- a/drivers/base/power/domain.c
4282 ++++ b/drivers/base/power/domain.c
4283 +@@ -1378,7 +1378,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
4284 + mutex_lock(&subdomain->lock);
4285 + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
4286 +
4287 +- if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
4288 ++ if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
4289 + pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
4290 + subdomain->name);
4291 + ret = -EBUSY;
4292 +diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
4293 +index cf351d3dab1c..0708f301ad97 100644
4294 +--- a/drivers/base/power/opp/core.c
4295 ++++ b/drivers/base/power/opp/core.c
4296 +@@ -844,8 +844,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
4297 + }
4298 +
4299 + opp->u_volt = microvolt[0];
4300 +- opp->u_volt_min = microvolt[1];
4301 +- opp->u_volt_max = microvolt[2];
4302 ++
4303 ++ if (count == 1) {
4304 ++ opp->u_volt_min = opp->u_volt;
4305 ++ opp->u_volt_max = opp->u_volt;
4306 ++ } else {
4307 ++ opp->u_volt_min = microvolt[1];
4308 ++ opp->u_volt_max = microvolt[2];
4309 ++ }
4310 +
4311 + /* Search for "opp-microamp-<name>" */
4312 + prop = NULL;
4313 +diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
4314 +index 7e58f6560399..4a36e415e938 100644
4315 +--- a/drivers/base/regmap/regmap-spmi.c
4316 ++++ b/drivers/base/regmap/regmap-spmi.c
4317 +@@ -142,7 +142,7 @@ static int regmap_spmi_ext_read(void *context,
4318 + while (val_size) {
4319 + len = min_t(size_t, val_size, 8);
4320 +
4321 +- err = spmi_ext_register_readl(context, addr, val, val_size);
4322 ++ err = spmi_ext_register_readl(context, addr, val, len);
4323 + if (err)
4324 + goto err_out;
4325 +
4326 +diff --git a/drivers/block/brd.c b/drivers/block/brd.c
4327 +index cb27190e9f39..f7ecc287d733 100644
4328 +--- a/drivers/block/brd.c
4329 ++++ b/drivers/block/brd.c
4330 +@@ -341,7 +341,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
4331 +
4332 + if (unlikely(bio->bi_rw & REQ_DISCARD)) {
4333 + if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
4334 +- bio->bi_iter.bi_size & PAGE_MASK)
4335 ++ bio->bi_iter.bi_size & ~PAGE_MASK)
4336 + goto io_error;
4337 + discard_from_brd(brd, sector, bio->bi_iter.bi_size);
4338 + goto out;
4339 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
4340 +index 423f4ca7d712..80cf8add46ff 100644
4341 +--- a/drivers/block/loop.c
4342 ++++ b/drivers/block/loop.c
4343 +@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
4344 + bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
4345 + iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
4346 + bio_segments(bio), blk_rq_bytes(cmd->rq));
4347 ++ /*
4348 ++ * This bio may be started from the middle of the 'bvec'
4349 ++ * because of bio splitting, so offset from the bvec must
4350 ++ * be passed to iov iterator
4351 ++ */
4352 ++ iter.iov_offset = bio->bi_iter.bi_bvec_done;
4353 +
4354 + cmd->iocb.ki_pos = pos;
4355 + cmd->iocb.ki_filp = file;
4356 +diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
4357 +index 9b180dbbd03c..1c330b61f05d 100644
4358 +--- a/drivers/block/mtip32xx/mtip32xx.c
4359 ++++ b/drivers/block/mtip32xx/mtip32xx.c
4360 +@@ -173,7 +173,13 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
4361 + {
4362 + struct request *rq;
4363 +
4364 ++ if (mtip_check_surprise_removal(dd->pdev))
4365 ++ return NULL;
4366 ++
4367 + rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
4368 ++ if (IS_ERR(rq))
4369 ++ return NULL;
4370 ++
4371 + return blk_mq_rq_to_pdu(rq);
4372 + }
4373 +
4374 +@@ -233,15 +239,9 @@ static void mtip_async_complete(struct mtip_port *port,
4375 + "Command tag %d failed due to TFE\n", tag);
4376 + }
4377 +
4378 +- /* Unmap the DMA scatter list entries */
4379 +- dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
4380 +-
4381 + rq = mtip_rq_from_tag(dd, tag);
4382 +
4383 +- if (unlikely(cmd->unaligned))
4384 +- up(&port->cmd_slot_unal);
4385 +-
4386 +- blk_mq_end_request(rq, status ? -EIO : 0);
4387 ++ blk_mq_complete_request(rq, status);
4388 + }
4389 +
4390 + /*
4391 +@@ -581,6 +581,8 @@ static void mtip_completion(struct mtip_port *port,
4392 + dev_warn(&port->dd->pdev->dev,
4393 + "Internal command %d completed with TFE\n", tag);
4394 +
4395 ++ command->comp_func = NULL;
4396 ++ command->comp_data = NULL;
4397 + complete(waiting);
4398 + }
4399 +
4400 +@@ -618,8 +620,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
4401 +
4402 + port = dd->port;
4403 +
4404 +- set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
4405 +-
4406 + if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
4407 + cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
4408 + dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
4409 +@@ -628,7 +628,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
4410 + cmd->comp_func(port, MTIP_TAG_INTERNAL,
4411 + cmd, PORT_IRQ_TF_ERR);
4412 + }
4413 +- goto handle_tfe_exit;
4414 ++ return;
4415 + }
4416 +
4417 + /* clear the tag accumulator */
4418 +@@ -701,7 +701,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
4419 + fail_reason = "thermal shutdown";
4420 + }
4421 + if (buf[288] == 0xBF) {
4422 +- set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
4423 ++ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
4424 + dev_info(&dd->pdev->dev,
4425 + "Drive indicates rebuild has failed. Secure erase required.\n");
4426 + fail_all_ncq_cmds = 1;
4427 +@@ -771,11 +771,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
4428 + }
4429 + }
4430 + print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
4431 +-
4432 +-handle_tfe_exit:
4433 +- /* clear eh_active */
4434 +- clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
4435 +- wake_up_interruptible(&port->svc_wait);
4436 + }
4437 +
4438 + /*
4439 +@@ -1007,6 +1002,7 @@ static bool mtip_pause_ncq(struct mtip_port *port,
4440 + (fis->features == 0x27 || fis->features == 0x72 ||
4441 + fis->features == 0x62 || fis->features == 0x26))) {
4442 + clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
4443 ++ clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
4444 + /* Com reset after secure erase or lowlevel format */
4445 + mtip_restart_port(port);
4446 + clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
4447 +@@ -1021,12 +1017,14 @@ static bool mtip_pause_ncq(struct mtip_port *port,
4448 + *
4449 + * @port Pointer to port data structure
4450 + * @timeout Max duration to wait (ms)
4451 ++ * @atomic gfp_t flag to indicate blockable context or not
4452 + *
4453 + * return value
4454 + * 0 Success
4455 + * -EBUSY Commands still active
4456 + */
4457 +-static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
4458 ++static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
4459 ++ gfp_t atomic)
4460 + {
4461 + unsigned long to;
4462 + unsigned int n;
4463 +@@ -1037,16 +1035,21 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
4464 + to = jiffies + msecs_to_jiffies(timeout);
4465 + do {
4466 + if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
4467 +- test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
4468 ++ test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
4469 ++ atomic == GFP_KERNEL) {
4470 + msleep(20);
4471 + continue; /* svc thd is actively issuing commands */
4472 + }
4473 +
4474 +- msleep(100);
4475 ++ if (atomic == GFP_KERNEL)
4476 ++ msleep(100);
4477 ++ else {
4478 ++ cpu_relax();
4479 ++ udelay(100);
4480 ++ }
4481 ++
4482 + if (mtip_check_surprise_removal(port->dd->pdev))
4483 + goto err_fault;
4484 +- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
4485 +- goto err_fault;
4486 +
4487 + /*
4488 + * Ignore s_active bit 0 of array element 0.
4489 +@@ -1099,6 +1102,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
4490 + struct mtip_cmd *int_cmd;
4491 + struct driver_data *dd = port->dd;
4492 + int rv = 0;
4493 ++ unsigned long start;
4494 +
4495 + /* Make sure the buffer is 8 byte aligned. This is asic specific. */
4496 + if (buffer & 0x00000007) {
4497 +@@ -1107,6 +1111,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
4498 + }
4499 +
4500 + int_cmd = mtip_get_int_command(dd);
4501 ++ if (!int_cmd) {
4502 ++ dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
4503 ++ return -EFAULT;
4504 ++ }
4505 +
4506 + set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
4507 +
4508 +@@ -1119,7 +1127,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
4509 + if (fis->command != ATA_CMD_STANDBYNOW1) {
4510 + /* wait for io to complete if non atomic */
4511 + if (mtip_quiesce_io(port,
4512 +- MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
4513 ++ MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
4514 + dev_warn(&dd->pdev->dev,
4515 + "Failed to quiesce IO\n");
4516 + mtip_put_int_command(dd, int_cmd);
4517 +@@ -1162,6 +1170,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
4518 + /* Populate the command header */
4519 + int_cmd->command_header->byte_count = 0;
4520 +
4521 ++ start = jiffies;
4522 ++
4523 + /* Issue the command to the hardware */
4524 + mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
4525 +
4526 +@@ -1170,10 +1180,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
4527 + if ((rv = wait_for_completion_interruptible_timeout(
4528 + &wait,
4529 + msecs_to_jiffies(timeout))) <= 0) {
4530 ++
4531 + if (rv == -ERESTARTSYS) { /* interrupted */
4532 + dev_err(&dd->pdev->dev,
4533 +- "Internal command [%02X] was interrupted after %lu ms\n",
4534 +- fis->command, timeout);
4535 ++ "Internal command [%02X] was interrupted after %u ms\n",
4536 ++ fis->command,
4537 ++ jiffies_to_msecs(jiffies - start));
4538 + rv = -EINTR;
4539 + goto exec_ic_exit;
4540 + } else if (rv == 0) /* timeout */
4541 +@@ -2890,6 +2902,42 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
4542 + return -EFAULT;
4543 + }
4544 +
4545 ++static void mtip_softirq_done_fn(struct request *rq)
4546 ++{
4547 ++ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
4548 ++ struct driver_data *dd = rq->q->queuedata;
4549 ++
4550 ++ /* Unmap the DMA scatter list entries */
4551 ++ dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
4552 ++ cmd->direction);
4553 ++
4554 ++ if (unlikely(cmd->unaligned))
4555 ++ up(&dd->port->cmd_slot_unal);
4556 ++
4557 ++ blk_mq_end_request(rq, rq->errors);
4558 ++}
4559 ++
4560 ++static void mtip_abort_cmd(struct request *req, void *data,
4561 ++ bool reserved)
4562 ++{
4563 ++ struct driver_data *dd = data;
4564 ++
4565 ++ dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
4566 ++
4567 ++ clear_bit(req->tag, dd->port->cmds_to_issue);
4568 ++ req->errors = -EIO;
4569 ++ mtip_softirq_done_fn(req);
4570 ++}
4571 ++
4572 ++static void mtip_queue_cmd(struct request *req, void *data,
4573 ++ bool reserved)
4574 ++{
4575 ++ struct driver_data *dd = data;
4576 ++
4577 ++ set_bit(req->tag, dd->port->cmds_to_issue);
4578 ++ blk_abort_request(req);
4579 ++}
4580 ++
4581 + /*
4582 + * service thread to issue queued commands
4583 + *
4584 +@@ -2902,7 +2950,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
4585 + static int mtip_service_thread(void *data)
4586 + {
4587 + struct driver_data *dd = (struct driver_data *)data;
4588 +- unsigned long slot, slot_start, slot_wrap;
4589 ++ unsigned long slot, slot_start, slot_wrap, to;
4590 + unsigned int num_cmd_slots = dd->slot_groups * 32;
4591 + struct mtip_port *port = dd->port;
4592 +
4593 +@@ -2917,9 +2965,7 @@ static int mtip_service_thread(void *data)
4594 + * is in progress nor error handling is active
4595 + */
4596 + wait_event_interruptible(port->svc_wait, (port->flags) &&
4597 +- !(port->flags & MTIP_PF_PAUSE_IO));
4598 +-
4599 +- set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
4600 ++ (port->flags & MTIP_PF_SVC_THD_WORK));
4601 +
4602 + if (kthread_should_stop() ||
4603 + test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
4604 +@@ -2929,6 +2975,8 @@ static int mtip_service_thread(void *data)
4605 + &dd->dd_flag)))
4606 + goto st_out;
4607 +
4608 ++ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
4609 ++
4610 + restart_eh:
4611 + /* Demux bits: start with error handling */
4612 + if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
4613 +@@ -2939,6 +2987,32 @@ restart_eh:
4614 + if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
4615 + goto restart_eh;
4616 +
4617 ++ if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
4618 ++ to = jiffies + msecs_to_jiffies(5000);
4619 ++
4620 ++ do {
4621 ++ mdelay(100);
4622 ++ } while (atomic_read(&dd->irq_workers_active) != 0 &&
4623 ++ time_before(jiffies, to));
4624 ++
4625 ++ if (atomic_read(&dd->irq_workers_active) != 0)
4626 ++ dev_warn(&dd->pdev->dev,
4627 ++ "Completion workers still active!");
4628 ++
4629 ++ spin_lock(dd->queue->queue_lock);
4630 ++ blk_mq_all_tag_busy_iter(*dd->tags.tags,
4631 ++ mtip_queue_cmd, dd);
4632 ++ spin_unlock(dd->queue->queue_lock);
4633 ++
4634 ++ set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
4635 ++
4636 ++ if (mtip_device_reset(dd))
4637 ++ blk_mq_all_tag_busy_iter(*dd->tags.tags,
4638 ++ mtip_abort_cmd, dd);
4639 ++
4640 ++ clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
4641 ++ }
4642 ++
4643 + if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
4644 + slot = 1;
4645 + /* used to restrict the loop to one iteration */
4646 +@@ -2971,10 +3045,8 @@ restart_eh:
4647 + }
4648 +
4649 + if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
4650 +- if (mtip_ftl_rebuild_poll(dd) < 0)
4651 +- set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
4652 +- &dd->dd_flag);
4653 +- clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
4654 ++ if (mtip_ftl_rebuild_poll(dd) == 0)
4655 ++ clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
4656 + }
4657 + }
4658 +
4659 +@@ -3089,7 +3161,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
4660 + if (buf[288] == 0xBF) {
4661 + dev_info(&dd->pdev->dev,
4662 + "Drive indicates rebuild has failed.\n");
4663 +- /* TODO */
4664 ++ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
4665 + }
4666 + }
4667 +
4668 +@@ -3263,20 +3335,25 @@ out1:
4669 + return rv;
4670 + }
4671 +
4672 +-static void mtip_standby_drive(struct driver_data *dd)
4673 ++static int mtip_standby_drive(struct driver_data *dd)
4674 + {
4675 +- if (dd->sr)
4676 +- return;
4677 ++ int rv = 0;
4678 +
4679 ++ if (dd->sr || !dd->port)
4680 ++ return -ENODEV;
4681 + /*
4682 + * Send standby immediate (E0h) to the drive so that it
4683 + * saves its state.
4684 + */
4685 + if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
4686 +- !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
4687 +- if (mtip_standby_immediate(dd->port))
4688 ++ !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
4689 ++ !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
4690 ++ rv = mtip_standby_immediate(dd->port);
4691 ++ if (rv)
4692 + dev_warn(&dd->pdev->dev,
4693 + "STANDBY IMMEDIATE failed\n");
4694 ++ }
4695 ++ return rv;
4696 + }
4697 +
4698 + /*
4699 +@@ -3289,10 +3366,6 @@ static void mtip_standby_drive(struct driver_data *dd)
4700 + */
4701 + static int mtip_hw_exit(struct driver_data *dd)
4702 + {
4703 +- /*
4704 +- * Send standby immediate (E0h) to the drive so that it
4705 +- * saves its state.
4706 +- */
4707 + if (!dd->sr) {
4708 + /* de-initialize the port. */
4709 + mtip_deinit_port(dd->port);
4710 +@@ -3334,8 +3407,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
4711 + * Send standby immediate (E0h) to the drive so that it
4712 + * saves its state.
4713 + */
4714 +- if (!dd->sr && dd->port)
4715 +- mtip_standby_immediate(dd->port);
4716 ++ mtip_standby_drive(dd);
4717 +
4718 + return 0;
4719 + }
4720 +@@ -3358,7 +3430,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
4721 + * Send standby immediate (E0h) to the drive
4722 + * so that it saves its state.
4723 + */
4724 +- if (mtip_standby_immediate(dd->port) != 0) {
4725 ++ if (mtip_standby_drive(dd) != 0) {
4726 + dev_err(&dd->pdev->dev,
4727 + "Failed standby-immediate command\n");
4728 + return -EFAULT;
4729 +@@ -3596,6 +3668,28 @@ static int mtip_block_getgeo(struct block_device *dev,
4730 + return 0;
4731 + }
4732 +
4733 ++static int mtip_block_open(struct block_device *dev, fmode_t mode)
4734 ++{
4735 ++ struct driver_data *dd;
4736 ++
4737 ++ if (dev && dev->bd_disk) {
4738 ++ dd = (struct driver_data *) dev->bd_disk->private_data;
4739 ++
4740 ++ if (dd) {
4741 ++ if (test_bit(MTIP_DDF_REMOVAL_BIT,
4742 ++ &dd->dd_flag)) {
4743 ++ return -ENODEV;
4744 ++ }
4745 ++ return 0;
4746 ++ }
4747 ++ }
4748 ++ return -ENODEV;
4749 ++}
4750 ++
4751 ++void mtip_block_release(struct gendisk *disk, fmode_t mode)
4752 ++{
4753 ++}
4754 ++
4755 + /*
4756 + * Block device operation function.
4757 + *
4758 +@@ -3603,6 +3697,8 @@ static int mtip_block_getgeo(struct block_device *dev,
4759 + * layer.
4760 + */
4761 + static const struct block_device_operations mtip_block_ops = {
4762 ++ .open = mtip_block_open,
4763 ++ .release = mtip_block_release,
4764 + .ioctl = mtip_block_ioctl,
4765 + #ifdef CONFIG_COMPAT
4766 + .compat_ioctl = mtip_block_compat_ioctl,
4767 +@@ -3664,10 +3760,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
4768 + rq_data_dir(rq))) {
4769 + return -ENODATA;
4770 + }
4771 +- if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
4772 ++ if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
4773 ++ test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
4774 + return -ENODATA;
4775 +- if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
4776 +- return -ENXIO;
4777 + }
4778 +
4779 + if (rq->cmd_flags & REQ_DISCARD) {
4780 +@@ -3779,11 +3874,33 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
4781 + return 0;
4782 + }
4783 +
4784 ++static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
4785 ++ bool reserved)
4786 ++{
4787 ++ struct driver_data *dd = req->q->queuedata;
4788 ++ int ret = BLK_EH_RESET_TIMER;
4789 ++
4790 ++ if (reserved)
4791 ++ goto exit_handler;
4792 ++
4793 ++ if (test_bit(req->tag, dd->port->cmds_to_issue))
4794 ++ goto exit_handler;
4795 ++
4796 ++ if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
4797 ++ goto exit_handler;
4798 ++
4799 ++ wake_up_interruptible(&dd->port->svc_wait);
4800 ++exit_handler:
4801 ++ return ret;
4802 ++}
4803 ++
4804 + static struct blk_mq_ops mtip_mq_ops = {
4805 + .queue_rq = mtip_queue_rq,
4806 + .map_queue = blk_mq_map_queue,
4807 + .init_request = mtip_init_cmd,
4808 + .exit_request = mtip_free_cmd,
4809 ++ .complete = mtip_softirq_done_fn,
4810 ++ .timeout = mtip_cmd_timeout,
4811 + };
4812 +
4813 + /*
4814 +@@ -3850,7 +3967,6 @@ static int mtip_block_initialize(struct driver_data *dd)
4815 +
4816 + mtip_hw_debugfs_init(dd);
4817 +
4818 +-skip_create_disk:
4819 + memset(&dd->tags, 0, sizeof(dd->tags));
4820 + dd->tags.ops = &mtip_mq_ops;
4821 + dd->tags.nr_hw_queues = 1;
4822 +@@ -3860,12 +3976,13 @@ skip_create_disk:
4823 + dd->tags.numa_node = dd->numa_node;
4824 + dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
4825 + dd->tags.driver_data = dd;
4826 ++ dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
4827 +
4828 + rv = blk_mq_alloc_tag_set(&dd->tags);
4829 + if (rv) {
4830 + dev_err(&dd->pdev->dev,
4831 + "Unable to allocate request queue\n");
4832 +- goto block_queue_alloc_init_error;
4833 ++ goto block_queue_alloc_tag_error;
4834 + }
4835 +
4836 + /* Allocate the request queue. */
4837 +@@ -3880,6 +3997,7 @@ skip_create_disk:
4838 + dd->disk->queue = dd->queue;
4839 + dd->queue->queuedata = dd;
4840 +
4841 ++skip_create_disk:
4842 + /* Initialize the protocol layer. */
4843 + wait_for_rebuild = mtip_hw_get_identify(dd);
4844 + if (wait_for_rebuild < 0) {
4845 +@@ -3976,8 +4094,9 @@ kthread_run_error:
4846 + read_capacity_error:
4847 + init_hw_cmds_error:
4848 + blk_cleanup_queue(dd->queue);
4849 +- blk_mq_free_tag_set(&dd->tags);
4850 + block_queue_alloc_init_error:
4851 ++ blk_mq_free_tag_set(&dd->tags);
4852 ++block_queue_alloc_tag_error:
4853 + mtip_hw_debugfs_exit(dd);
4854 + disk_index_error:
4855 + spin_lock(&rssd_index_lock);
4856 +@@ -3994,6 +4113,22 @@ protocol_init_error:
4857 + return rv;
4858 + }
4859 +
4860 ++static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
4861 ++{
4862 ++ struct driver_data *dd = (struct driver_data *)data;
4863 ++ struct mtip_cmd *cmd;
4864 ++
4865 ++ if (likely(!reserv))
4866 ++ blk_mq_complete_request(rq, -ENODEV);
4867 ++ else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
4868 ++
4869 ++ cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
4870 ++ if (cmd->comp_func)
4871 ++ cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
4872 ++ cmd, -ENODEV);
4873 ++ }
4874 ++}
4875 ++
4876 + /*
4877 + * Block layer deinitialization function.
4878 + *
4879 +@@ -4025,12 +4160,23 @@ static int mtip_block_remove(struct driver_data *dd)
4880 + }
4881 + }
4882 +
4883 +- if (!dd->sr)
4884 +- mtip_standby_drive(dd);
4885 ++ if (!dd->sr) {
4886 ++ /*
4887 ++ * Explicitly wait here for IOs to quiesce,
4888 ++ * as mtip_standby_drive usually won't wait for IOs.
4889 ++ */
4890 ++ if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
4891 ++ GFP_KERNEL))
4892 ++ mtip_standby_drive(dd);
4893 ++ }
4894 + else
4895 + dev_info(&dd->pdev->dev, "device %s surprise removal\n",
4896 + dd->disk->disk_name);
4897 +
4898 ++ blk_mq_freeze_queue_start(dd->queue);
4899 ++ blk_mq_stop_hw_queues(dd->queue);
4900 ++ blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
4901 ++
4902 + /*
4903 + * Delete our gendisk structure. This also removes the device
4904 + * from /dev
4905 +@@ -4040,7 +4186,8 @@ static int mtip_block_remove(struct driver_data *dd)
4906 + dd->bdev = NULL;
4907 + }
4908 + if (dd->disk) {
4909 +- del_gendisk(dd->disk);
4910 ++ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
4911 ++ del_gendisk(dd->disk);
4912 + if (dd->disk->queue) {
4913 + blk_cleanup_queue(dd->queue);
4914 + blk_mq_free_tag_set(&dd->tags);
4915 +@@ -4081,7 +4228,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
4916 + dev_info(&dd->pdev->dev,
4917 + "Shutting down %s ...\n", dd->disk->disk_name);
4918 +
4919 +- del_gendisk(dd->disk);
4920 ++ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
4921 ++ del_gendisk(dd->disk);
4922 + if (dd->disk->queue) {
4923 + blk_cleanup_queue(dd->queue);
4924 + blk_mq_free_tag_set(&dd->tags);
4925 +@@ -4426,7 +4574,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
4926 + struct driver_data *dd = pci_get_drvdata(pdev);
4927 + unsigned long flags, to;
4928 +
4929 +- set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
4930 ++ set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
4931 +
4932 + spin_lock_irqsave(&dev_lock, flags);
4933 + list_del_init(&dd->online_list);
4934 +@@ -4443,12 +4591,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
4935 + } while (atomic_read(&dd->irq_workers_active) != 0 &&
4936 + time_before(jiffies, to));
4937 +
4938 ++ if (!dd->sr)
4939 ++ fsync_bdev(dd->bdev);
4940 ++
4941 + if (atomic_read(&dd->irq_workers_active) != 0) {
4942 + dev_warn(&dd->pdev->dev,
4943 + "Completion workers still active!\n");
4944 + }
4945 +
4946 +- blk_mq_stop_hw_queues(dd->queue);
4947 ++ blk_set_queue_dying(dd->queue);
4948 ++ set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
4949 ++
4950 + /* Clean up the block layer. */
4951 + mtip_block_remove(dd);
4952 +
4953 +diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
4954 +index 3274784008eb..7617888f7944 100644
4955 +--- a/drivers/block/mtip32xx/mtip32xx.h
4956 ++++ b/drivers/block/mtip32xx/mtip32xx.h
4957 +@@ -134,16 +134,24 @@ enum {
4958 + MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
4959 + MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
4960 + MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
4961 ++ MTIP_PF_TO_ACTIVE_BIT = 9, /* timeout handling */
4962 + MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
4963 + (1 << MTIP_PF_EH_ACTIVE_BIT) |
4964 + (1 << MTIP_PF_SE_ACTIVE_BIT) |
4965 +- (1 << MTIP_PF_DM_ACTIVE_BIT)),
4966 ++ (1 << MTIP_PF_DM_ACTIVE_BIT) |
4967 ++ (1 << MTIP_PF_TO_ACTIVE_BIT)),
4968 +
4969 + MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
4970 + MTIP_PF_ISSUE_CMDS_BIT = 5,
4971 + MTIP_PF_REBUILD_BIT = 6,
4972 + MTIP_PF_SVC_THD_STOP_BIT = 8,
4973 +
4974 ++ MTIP_PF_SVC_THD_WORK = ((1 << MTIP_PF_EH_ACTIVE_BIT) |
4975 ++ (1 << MTIP_PF_ISSUE_CMDS_BIT) |
4976 ++ (1 << MTIP_PF_REBUILD_BIT) |
4977 ++ (1 << MTIP_PF_SVC_THD_STOP_BIT) |
4978 ++ (1 << MTIP_PF_TO_ACTIVE_BIT)),
4979 ++
4980 + /* below are bit numbers in 'dd_flag' defined in driver_data */
4981 + MTIP_DDF_SEC_LOCK_BIT = 0,
4982 + MTIP_DDF_REMOVE_PENDING_BIT = 1,
4983 +@@ -153,6 +161,7 @@ enum {
4984 + MTIP_DDF_RESUME_BIT = 6,
4985 + MTIP_DDF_INIT_DONE_BIT = 7,
4986 + MTIP_DDF_REBUILD_FAILED_BIT = 8,
4987 ++ MTIP_DDF_REMOVAL_BIT = 9,
4988 +
4989 + MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
4990 + (1 << MTIP_DDF_SEC_LOCK_BIT) |
4991 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
4992 +index e4c5cc107934..c65d41f4007a 100644
4993 +--- a/drivers/block/nbd.c
4994 ++++ b/drivers/block/nbd.c
4995 +@@ -618,8 +618,8 @@ static void nbd_request_handler(struct request_queue *q)
4996 + req, req->cmd_type);
4997 +
4998 + if (unlikely(!nbd->sock)) {
4999 +- dev_err(disk_to_dev(nbd->disk),
5000 +- "Attempted send on closed socket\n");
5001 ++ dev_err_ratelimited(disk_to_dev(nbd->disk),
5002 ++ "Attempted send on closed socket\n");
5003 + req->errors++;
5004 + nbd_end_request(nbd, req);
5005 + spin_lock_irq(q->queue_lock);
5006 +diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
5007 +index 64a7b5971b57..cab97593ba54 100644
5008 +--- a/drivers/block/null_blk.c
5009 ++++ b/drivers/block/null_blk.c
5010 +@@ -742,10 +742,11 @@ static int null_add_dev(void)
5011 +
5012 + add_disk(disk);
5013 +
5014 ++done:
5015 + mutex_lock(&lock);
5016 + list_add_tail(&nullb->list, &nullb_list);
5017 + mutex_unlock(&lock);
5018 +-done:
5019 ++
5020 + return 0;
5021 +
5022 + out_cleanup_lightnvm:
5023 +diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
5024 +index 562b5a4ca7b7..78a39f736c64 100644
5025 +--- a/drivers/block/paride/pd.c
5026 ++++ b/drivers/block/paride/pd.c
5027 +@@ -126,7 +126,7 @@
5028 + */
5029 + #include <linux/types.h>
5030 +
5031 +-static bool verbose = 0;
5032 ++static int verbose = 0;
5033 + static int major = PD_MAJOR;
5034 + static char *name = PD_NAME;
5035 + static int cluster = 64;
5036 +@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
5037 + static DEFINE_MUTEX(pd_mutex);
5038 + static DEFINE_SPINLOCK(pd_lock);
5039 +
5040 +-module_param(verbose, bool, 0);
5041 ++module_param(verbose, int, 0);
5042 + module_param(major, int, 0);
5043 + module_param(name, charp, 0);
5044 + module_param(cluster, int, 0);
5045 +diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
5046 +index 1740d75e8a32..216a94fed5b4 100644
5047 +--- a/drivers/block/paride/pt.c
5048 ++++ b/drivers/block/paride/pt.c
5049 +@@ -117,7 +117,7 @@
5050 +
5051 + */
5052 +
5053 +-static bool verbose = 0;
5054 ++static int verbose = 0;
5055 + static int major = PT_MAJOR;
5056 + static char *name = PT_NAME;
5057 + static int disable = 0;
5058 +@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
5059 +
5060 + #include <asm/uaccess.h>
5061 +
5062 +-module_param(verbose, bool, 0);
5063 ++module_param(verbose, int, 0);
5064 + module_param(major, int, 0);
5065 + module_param(name, charp, 0);
5066 + module_param_array(drive0, int, NULL, 0);
5067 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
5068 +index 4a876785b68c..9745cf9fcd3c 100644
5069 +--- a/drivers/block/rbd.c
5070 ++++ b/drivers/block/rbd.c
5071 +@@ -1955,7 +1955,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
5072 +
5073 + osdc = &rbd_dev->rbd_client->client->osdc;
5074 + osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
5075 +- GFP_ATOMIC);
5076 ++ GFP_NOIO);
5077 + if (!osd_req)
5078 + return NULL; /* ENOMEM */
5079 +
5080 +@@ -2004,7 +2004,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
5081 + rbd_dev = img_request->rbd_dev;
5082 + osdc = &rbd_dev->rbd_client->client->osdc;
5083 + osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
5084 +- false, GFP_ATOMIC);
5085 ++ false, GFP_NOIO);
5086 + if (!osd_req)
5087 + return NULL; /* ENOMEM */
5088 +
5089 +@@ -2506,7 +2506,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
5090 + bio_chain_clone_range(&bio_list,
5091 + &bio_offset,
5092 + clone_size,
5093 +- GFP_ATOMIC);
5094 ++ GFP_NOIO);
5095 + if (!obj_request->bio_list)
5096 + goto out_unwind;
5097 + } else if (type == OBJ_REQUEST_PAGES) {
5098 +diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
5099 +index fa893c3ec408..0beaa52df66b 100644
5100 +--- a/drivers/bluetooth/ath3k.c
5101 ++++ b/drivers/bluetooth/ath3k.c
5102 +@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
5103 + { USB_DEVICE(0x0489, 0xe05f) },
5104 + { USB_DEVICE(0x0489, 0xe076) },
5105 + { USB_DEVICE(0x0489, 0xe078) },
5106 ++ { USB_DEVICE(0x0489, 0xe095) },
5107 + { USB_DEVICE(0x04c5, 0x1330) },
5108 + { USB_DEVICE(0x04CA, 0x3004) },
5109 + { USB_DEVICE(0x04CA, 0x3005) },
5110 +@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
5111 + { USB_DEVICE(0x04CA, 0x300d) },
5112 + { USB_DEVICE(0x04CA, 0x300f) },
5113 + { USB_DEVICE(0x04CA, 0x3010) },
5114 ++ { USB_DEVICE(0x04CA, 0x3014) },
5115 + { USB_DEVICE(0x0930, 0x0219) },
5116 + { USB_DEVICE(0x0930, 0x021c) },
5117 + { USB_DEVICE(0x0930, 0x0220) },
5118 +@@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
5119 + { USB_DEVICE(0x13d3, 0x3362) },
5120 + { USB_DEVICE(0x13d3, 0x3375) },
5121 + { USB_DEVICE(0x13d3, 0x3393) },
5122 ++ { USB_DEVICE(0x13d3, 0x3395) },
5123 + { USB_DEVICE(0x13d3, 0x3402) },
5124 + { USB_DEVICE(0x13d3, 0x3408) },
5125 + { USB_DEVICE(0x13d3, 0x3423) },
5126 + { USB_DEVICE(0x13d3, 0x3432) },
5127 ++ { USB_DEVICE(0x13d3, 0x3472) },
5128 + { USB_DEVICE(0x13d3, 0x3474) },
5129 +
5130 + /* Atheros AR5BBU12 with sflash firmware */
5131 +@@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
5132 + { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
5133 + { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
5134 + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
5135 ++ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
5136 + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
5137 + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
5138 + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
5139 +@@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
5140 + { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
5141 + { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
5142 + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
5143 ++ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
5144 + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
5145 + { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
5146 + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
5147 +@@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
5148 + { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
5149 + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
5150 + { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
5151 ++ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
5152 + { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
5153 + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
5154 + { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
5155 + { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
5156 ++ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
5157 + { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
5158 +
5159 + /* Atheros AR5BBU22 with sflash firmware */
5160 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
5161 +index a191e318fab8..0d4e372e426d 100644
5162 +--- a/drivers/bluetooth/btusb.c
5163 ++++ b/drivers/bluetooth/btusb.c
5164 +@@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
5165 + { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
5166 + { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
5167 + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
5168 ++ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
5169 + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
5170 + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
5171 + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
5172 +@@ -206,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
5173 + { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
5174 + { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
5175 + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
5176 ++ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
5177 + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
5178 + { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
5179 + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
5180 +@@ -227,10 +229,12 @@ static const struct usb_device_id blacklist_table[] = {
5181 + { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
5182 + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
5183 + { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
5184 ++ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
5185 + { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
5186 + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
5187 + { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
5188 + { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
5189 ++ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
5190 + { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
5191 +
5192 + /* Atheros AR5BBU12 with sflash firmware */
5193 +diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
5194 +index 80783dcb7f57..aba31210c802 100644
5195 +--- a/drivers/bluetooth/hci_vhci.c
5196 ++++ b/drivers/bluetooth/hci_vhci.c
5197 +@@ -50,6 +50,7 @@ struct vhci_data {
5198 + wait_queue_head_t read_wait;
5199 + struct sk_buff_head readq;
5200 +
5201 ++ struct mutex open_mutex;
5202 + struct delayed_work open_timeout;
5203 + };
5204 +
5205 +@@ -87,12 +88,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
5206 + return 0;
5207 + }
5208 +
5209 +-static int vhci_create_device(struct vhci_data *data, __u8 opcode)
5210 ++static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
5211 + {
5212 + struct hci_dev *hdev;
5213 + struct sk_buff *skb;
5214 + __u8 dev_type;
5215 +
5216 ++ if (data->hdev)
5217 ++ return -EBADFD;
5218 ++
5219 + /* bits 0-1 are dev_type (BR/EDR or AMP) */
5220 + dev_type = opcode & 0x03;
5221 +
5222 +@@ -151,6 +155,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
5223 + return 0;
5224 + }
5225 +
5226 ++static int vhci_create_device(struct vhci_data *data, __u8 opcode)
5227 ++{
5228 ++ int err;
5229 ++
5230 ++ mutex_lock(&data->open_mutex);
5231 ++ err = __vhci_create_device(data, opcode);
5232 ++ mutex_unlock(&data->open_mutex);
5233 ++
5234 ++ return err;
5235 ++}
5236 ++
5237 + static inline ssize_t vhci_get_user(struct vhci_data *data,
5238 + struct iov_iter *from)
5239 + {
5240 +@@ -189,11 +204,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
5241 + break;
5242 +
5243 + case HCI_VENDOR_PKT:
5244 +- if (data->hdev) {
5245 +- kfree_skb(skb);
5246 +- return -EBADFD;
5247 +- }
5248 +-
5249 + cancel_delayed_work_sync(&data->open_timeout);
5250 +
5251 + opcode = *((__u8 *) skb->data);
5252 +@@ -320,6 +330,7 @@ static int vhci_open(struct inode *inode, struct file *file)
5253 + skb_queue_head_init(&data->readq);
5254 + init_waitqueue_head(&data->read_wait);
5255 +
5256 ++ mutex_init(&data->open_mutex);
5257 + INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
5258 +
5259 + file->private_data = data;
5260 +@@ -333,15 +344,18 @@ static int vhci_open(struct inode *inode, struct file *file)
5261 + static int vhci_release(struct inode *inode, struct file *file)
5262 + {
5263 + struct vhci_data *data = file->private_data;
5264 +- struct hci_dev *hdev = data->hdev;
5265 ++ struct hci_dev *hdev;
5266 +
5267 + cancel_delayed_work_sync(&data->open_timeout);
5268 +
5269 ++ hdev = data->hdev;
5270 ++
5271 + if (hdev) {
5272 + hci_unregister_dev(hdev);
5273 + hci_free_dev(hdev);
5274 + }
5275 +
5276 ++ skb_queue_purge(&data->readq);
5277 + file->private_data = NULL;
5278 + kfree(data);
5279 +
5280 +diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
5281 +index e98d15eaa799..1827fc4d15c1 100644
5282 +--- a/drivers/bus/imx-weim.c
5283 ++++ b/drivers/bus/imx-weim.c
5284 +@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
5285 + return ret;
5286 + }
5287 +
5288 +- for_each_child_of_node(pdev->dev.of_node, child) {
5289 ++ for_each_available_child_of_node(pdev->dev.of_node, child) {
5290 + if (!child->name)
5291 + continue;
5292 +
5293 +diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
5294 +index 834a2aeaf27a..350b7309c26d 100644
5295 +--- a/drivers/bus/uniphier-system-bus.c
5296 ++++ b/drivers/bus/uniphier-system-bus.c
5297 +@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
5298 +
5299 + for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
5300 + for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
5301 +- if (priv->bank[i].end > priv->bank[j].base ||
5302 ++ if (priv->bank[i].end > priv->bank[j].base &&
5303 + priv->bank[i].base < priv->bank[j].end) {
5304 + dev_err(priv->dev,
5305 + "region overlap between bank%d and bank%d\n",
5306 +diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
5307 +index 45cc39aabeee..252142524ff2 100644
5308 +--- a/drivers/char/tpm/tpm-chip.c
5309 ++++ b/drivers/char/tpm/tpm-chip.c
5310 +@@ -136,11 +136,13 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
5311 + chip->cdev.owner = chip->pdev->driver->owner;
5312 + chip->cdev.kobj.parent = &chip->dev.kobj;
5313 +
5314 ++ devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
5315 ++
5316 + return chip;
5317 + }
5318 + EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
5319 +
5320 +-static int tpm_dev_add_device(struct tpm_chip *chip)
5321 ++static int tpm_add_char_device(struct tpm_chip *chip)
5322 + {
5323 + int rc;
5324 +
5325 +@@ -151,7 +153,6 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
5326 + chip->devname, MAJOR(chip->dev.devt),
5327 + MINOR(chip->dev.devt), rc);
5328 +
5329 +- device_unregister(&chip->dev);
5330 + return rc;
5331 + }
5332 +
5333 +@@ -162,16 +163,17 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
5334 + chip->devname, MAJOR(chip->dev.devt),
5335 + MINOR(chip->dev.devt), rc);
5336 +
5337 ++ cdev_del(&chip->cdev);
5338 + return rc;
5339 + }
5340 +
5341 + return rc;
5342 + }
5343 +
5344 +-static void tpm_dev_del_device(struct tpm_chip *chip)
5345 ++static void tpm_del_char_device(struct tpm_chip *chip)
5346 + {
5347 + cdev_del(&chip->cdev);
5348 +- device_unregister(&chip->dev);
5349 ++ device_del(&chip->dev);
5350 + }
5351 +
5352 + static int tpm1_chip_register(struct tpm_chip *chip)
5353 +@@ -222,7 +224,7 @@ int tpm_chip_register(struct tpm_chip *chip)
5354 +
5355 + tpm_add_ppi(chip);
5356 +
5357 +- rc = tpm_dev_add_device(chip);
5358 ++ rc = tpm_add_char_device(chip);
5359 + if (rc)
5360 + goto out_err;
5361 +
5362 +@@ -274,6 +276,6 @@ void tpm_chip_unregister(struct tpm_chip *chip)
5363 + sysfs_remove_link(&chip->pdev->kobj, "ppi");
5364 +
5365 + tpm1_chip_unregister(chip);
5366 +- tpm_dev_del_device(chip);
5367 ++ tpm_del_char_device(chip);
5368 + }
5369 + EXPORT_SYMBOL_GPL(tpm_chip_unregister);
5370 +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
5371 +index 45a634016f95..b28e4da3d2cf 100644
5372 +--- a/drivers/char/tpm/tpm2-cmd.c
5373 ++++ b/drivers/char/tpm/tpm2-cmd.c
5374 +@@ -20,7 +20,11 @@
5375 + #include <keys/trusted-type.h>
5376 +
5377 + enum tpm2_object_attributes {
5378 +- TPM2_ATTR_USER_WITH_AUTH = BIT(6),
5379 ++ TPM2_OA_USER_WITH_AUTH = BIT(6),
5380 ++};
5381 ++
5382 ++enum tpm2_session_attributes {
5383 ++ TPM2_SA_CONTINUE_SESSION = BIT(0),
5384 + };
5385 +
5386 + struct tpm2_startup_in {
5387 +@@ -478,22 +482,18 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
5388 + tpm_buf_append_u8(&buf, payload->migratable);
5389 +
5390 + /* public */
5391 +- if (options->policydigest)
5392 +- tpm_buf_append_u16(&buf, 14 + options->digest_len);
5393 +- else
5394 +- tpm_buf_append_u16(&buf, 14);
5395 +-
5396 ++ tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
5397 + tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
5398 + tpm_buf_append_u16(&buf, hash);
5399 +
5400 + /* policy */
5401 +- if (options->policydigest) {
5402 ++ if (options->policydigest_len) {
5403 + tpm_buf_append_u32(&buf, 0);
5404 +- tpm_buf_append_u16(&buf, options->digest_len);
5405 ++ tpm_buf_append_u16(&buf, options->policydigest_len);
5406 + tpm_buf_append(&buf, options->policydigest,
5407 +- options->digest_len);
5408 ++ options->policydigest_len);
5409 + } else {
5410 +- tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
5411 ++ tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
5412 + tpm_buf_append_u16(&buf, 0);
5413 + }
5414 +
5415 +@@ -631,7 +631,7 @@ static int tpm2_unseal(struct tpm_chip *chip,
5416 + options->policyhandle ?
5417 + options->policyhandle : TPM2_RS_PW,
5418 + NULL /* nonce */, 0,
5419 +- 0 /* session_attributes */,
5420 ++ TPM2_SA_CONTINUE_SESSION,
5421 + options->blobauth /* hmac */,
5422 + TPM_DIGEST_SIZE);
5423 +
5424 +diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
5425 +index 8342cf51ffdc..26bab5a2959f 100644
5426 +--- a/drivers/char/tpm/tpm_crb.c
5427 ++++ b/drivers/char/tpm/tpm_crb.c
5428 +@@ -302,11 +302,11 @@ static int crb_acpi_remove(struct acpi_device *device)
5429 + struct device *dev = &device->dev;
5430 + struct tpm_chip *chip = dev_get_drvdata(dev);
5431 +
5432 +- tpm_chip_unregister(chip);
5433 +-
5434 + if (chip->flags & TPM_CHIP_FLAG_TPM2)
5435 + tpm2_shutdown(chip, TPM2_SU_CLEAR);
5436 +
5437 ++ tpm_chip_unregister(chip);
5438 ++
5439 + return 0;
5440 + }
5441 +
5442 +diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
5443 +index bd72fb04225e..4e6940acf639 100644
5444 +--- a/drivers/char/tpm/tpm_eventlog.c
5445 ++++ b/drivers/char/tpm/tpm_eventlog.c
5446 +@@ -232,7 +232,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
5447 + {
5448 + struct tcpa_event *event = v;
5449 + struct tcpa_event temp_event;
5450 +- char *tempPtr;
5451 ++ char *temp_ptr;
5452 + int i;
5453 +
5454 + memcpy(&temp_event, event, sizeof(struct tcpa_event));
5455 +@@ -242,10 +242,16 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
5456 + temp_event.event_type = do_endian_conversion(event->event_type);
5457 + temp_event.event_size = do_endian_conversion(event->event_size);
5458 +
5459 +- tempPtr = (char *)&temp_event;
5460 ++ temp_ptr = (char *) &temp_event;
5461 +
5462 +- for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
5463 +- seq_putc(m, tempPtr[i]);
5464 ++ for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
5465 ++ seq_putc(m, temp_ptr[i]);
5466 ++
5467 ++ temp_ptr = (char *) v;
5468 ++
5469 ++ for (i = (sizeof(struct tcpa_event) - 1);
5470 ++ i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
5471 ++ seq_putc(m, temp_ptr[i]);
5472 +
5473 + return 0;
5474 +
5475 +diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
5476 +index e4f89e28b5ec..3a177ade6e6c 100644
5477 +--- a/drivers/clk/bcm/clk-bcm2835-aux.c
5478 ++++ b/drivers/clk/bcm/clk-bcm2835-aux.c
5479 +@@ -38,8 +38,8 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
5480 +
5481 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5482 + reg = devm_ioremap_resource(dev, res);
5483 +- if (!reg)
5484 +- return -ENODEV;
5485 ++ if (IS_ERR(reg))
5486 ++ return PTR_ERR(reg);
5487 +
5488 + onecell = devm_kmalloc(dev, sizeof(*onecell), GFP_KERNEL);
5489 + if (!onecell)
5490 +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
5491 +index 015e687ffabe..dd2856b5633c 100644
5492 +--- a/drivers/clk/bcm/clk-bcm2835.c
5493 ++++ b/drivers/clk/bcm/clk-bcm2835.c
5494 +@@ -1078,10 +1078,12 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw)
5495 + struct bcm2835_cprman *cprman = divider->cprman;
5496 + const struct bcm2835_pll_divider_data *data = divider->data;
5497 +
5498 ++ spin_lock(&cprman->regs_lock);
5499 + cprman_write(cprman, data->cm_reg,
5500 + (cprman_read(cprman, data->cm_reg) &
5501 + ~data->load_mask) | data->hold_mask);
5502 + cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
5503 ++ spin_unlock(&cprman->regs_lock);
5504 + }
5505 +
5506 + static int bcm2835_pll_divider_on(struct clk_hw *hw)
5507 +@@ -1090,12 +1092,14 @@ static int bcm2835_pll_divider_on(struct clk_hw *hw)
5508 + struct bcm2835_cprman *cprman = divider->cprman;
5509 + const struct bcm2835_pll_divider_data *data = divider->data;
5510 +
5511 ++ spin_lock(&cprman->regs_lock);
5512 + cprman_write(cprman, data->a2w_reg,
5513 + cprman_read(cprman, data->a2w_reg) &
5514 + ~A2W_PLL_CHANNEL_DISABLE);
5515 +
5516 + cprman_write(cprman, data->cm_reg,
5517 + cprman_read(cprman, data->cm_reg) & ~data->hold_mask);
5518 ++ spin_unlock(&cprman->regs_lock);
5519 +
5520 + return 0;
5521 + }
5522 +@@ -1107,13 +1111,15 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
5523 + struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
5524 + struct bcm2835_cprman *cprman = divider->cprman;
5525 + const struct bcm2835_pll_divider_data *data = divider->data;
5526 +- u32 cm;
5527 +- int ret;
5528 ++ u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
5529 +
5530 +- ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
5531 +- if (ret)
5532 +- return ret;
5533 ++ div = DIV_ROUND_UP_ULL(parent_rate, rate);
5534 ++
5535 ++ div = min(div, max_div);
5536 ++ if (div == max_div)
5537 ++ div = 0;
5538 +
5539 ++ cprman_write(cprman, data->a2w_reg, div);
5540 + cm = cprman_read(cprman, data->cm_reg);
5541 + cprman_write(cprman, data->cm_reg, cm | data->load_mask);
5542 + cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
5543 +diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
5544 +index ded3ff4b91b9..aa1dacdaa39d 100644
5545 +--- a/drivers/clk/clk-divider.c
5546 ++++ b/drivers/clk/clk-divider.c
5547 +@@ -423,6 +423,12 @@ const struct clk_ops clk_divider_ops = {
5548 + };
5549 + EXPORT_SYMBOL_GPL(clk_divider_ops);
5550 +
5551 ++const struct clk_ops clk_divider_ro_ops = {
5552 ++ .recalc_rate = clk_divider_recalc_rate,
5553 ++ .round_rate = clk_divider_round_rate,
5554 ++};
5555 ++EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
5556 ++
5557 + static struct clk *_register_divider(struct device *dev, const char *name,
5558 + const char *parent_name, unsigned long flags,
5559 + void __iomem *reg, u8 shift, u8 width,
5560 +@@ -446,7 +452,10 @@ static struct clk *_register_divider(struct device *dev, const char *name,
5561 + return ERR_PTR(-ENOMEM);
5562 +
5563 + init.name = name;
5564 +- init.ops = &clk_divider_ops;
5565 ++ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
5566 ++ init.ops = &clk_divider_ro_ops;
5567 ++ else
5568 ++ init.ops = &clk_divider_ops;
5569 + init.flags = flags | CLK_IS_BASIC;
5570 + init.parent_names = (parent_name ? &parent_name: NULL);
5571 + init.num_parents = (parent_name ? 1 : 0);
5572 +diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
5573 +index 10224b01b97c..b134a8b15e2c 100644
5574 +--- a/drivers/clk/clk-xgene.c
5575 ++++ b/drivers/clk/clk-xgene.c
5576 +@@ -351,8 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
5577 + /* Set new divider */
5578 + data = xgene_clk_read(pclk->param.divider_reg +
5579 + pclk->param.reg_divider_offset);
5580 +- data &= ~((1 << pclk->param.reg_divider_width) - 1)
5581 +- << pclk->param.reg_divider_shift;
5582 ++ data &= ~(((1 << pclk->param.reg_divider_width) - 1)
5583 ++ << pclk->param.reg_divider_shift);
5584 + data |= divider;
5585 + xgene_clk_write(data, pclk->param.divider_reg +
5586 + pclk->param.reg_divider_offset);
5587 +diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
5588 +index c83ae1367abc..d920d410b51d 100644
5589 +--- a/drivers/clk/meson/clkc.c
5590 ++++ b/drivers/clk/meson/clkc.c
5591 +@@ -198,7 +198,7 @@ meson_clk_register_fixed_rate(const struct clk_conf *clk_conf,
5592 + }
5593 +
5594 + void __init meson_clk_register_clks(const struct clk_conf *clk_confs,
5595 +- size_t nr_confs,
5596 ++ unsigned int nr_confs,
5597 + void __iomem *clk_base)
5598 + {
5599 + unsigned int i;
5600 +diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
5601 +index 13aabbb3acbe..558da89555af 100644
5602 +--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
5603 ++++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
5604 +@@ -222,7 +222,7 @@ static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *bran
5605 + div->width = 1;
5606 +
5607 + div_hw = &div->hw;
5608 +- div_ops = &clk_divider_ops;
5609 ++ div_ops = &clk_divider_ro_ops;
5610 + }
5611 +
5612 + branch->gate.reg = branch->offset + reg_base;
5613 +diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
5614 +index 8cc9b2868b41..5f56d6aae31d 100644
5615 +--- a/drivers/clk/qcom/gcc-msm8916.c
5616 ++++ b/drivers/clk/qcom/gcc-msm8916.c
5617 +@@ -2346,6 +2346,7 @@ static struct clk_branch gcc_crypto_ahb_clk = {
5618 + "pcnoc_bfdcd_clk_src",
5619 + },
5620 + .num_parents = 1,
5621 ++ .flags = CLK_SET_RATE_PARENT,
5622 + .ops = &clk_branch2_ops,
5623 + },
5624 + },
5625 +@@ -2381,6 +2382,7 @@ static struct clk_branch gcc_crypto_clk = {
5626 + "crypto_clk_src",
5627 + },
5628 + .num_parents = 1,
5629 ++ .flags = CLK_SET_RATE_PARENT,
5630 + .ops = &clk_branch2_ops,
5631 + },
5632 + },
5633 +diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
5634 +index 983dd7dc89a7..0a0c1f533249 100644
5635 +--- a/drivers/clk/qcom/gcc-msm8960.c
5636 ++++ b/drivers/clk/qcom/gcc-msm8960.c
5637 +@@ -2753,7 +2753,7 @@ static struct clk_rcg ce3_src = {
5638 + },
5639 + .freq_tbl = clk_tbl_ce3,
5640 + .clkr = {
5641 +- .enable_reg = 0x2c08,
5642 ++ .enable_reg = 0x36c0,
5643 + .enable_mask = BIT(7),
5644 + .hw.init = &(struct clk_init_data){
5645 + .name = "ce3_src",
5646 +@@ -2769,7 +2769,7 @@ static struct clk_branch ce3_core_clk = {
5647 + .halt_reg = 0x2fdc,
5648 + .halt_bit = 5,
5649 + .clkr = {
5650 +- .enable_reg = 0x36c4,
5651 ++ .enable_reg = 0x36cc,
5652 + .enable_mask = BIT(4),
5653 + .hw.init = &(struct clk_init_data){
5654 + .name = "ce3_core_clk",
5655 +diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
5656 +index 7f7444cbf6fc..05263571c223 100644
5657 +--- a/drivers/clk/rockchip/clk-rk3188.c
5658 ++++ b/drivers/clk/rockchip/clk-rk3188.c
5659 +@@ -748,6 +748,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
5660 + "hclk_peri",
5661 + "pclk_cpu",
5662 + "pclk_peri",
5663 ++ "hclk_cpubus"
5664 + };
5665 +
5666 + static void __init rk3188_common_clk_init(struct device_node *np)
5667 +diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
5668 +index 981a50205339..97f49aab8d42 100644
5669 +--- a/drivers/clk/rockchip/clk-rk3228.c
5670 ++++ b/drivers/clk/rockchip/clk-rk3228.c
5671 +@@ -605,13 +605,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
5672 +
5673 + /* PD_MMC */
5674 + MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3228_SDMMC_CON0, 1),
5675 +- MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 1),
5676 ++ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 0),
5677 +
5678 + MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RK3228_SDIO_CON0, 1),
5679 +- MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK3228_SDIO_CON1, 1),
5680 ++ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK3228_SDIO_CON1, 0),
5681 +
5682 + MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3228_EMMC_CON0, 1),
5683 +- MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 1),
5684 ++ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 0),
5685 + };
5686 +
5687 + static const char *const rk3228_critical_clocks[] __initconst = {
5688 +diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
5689 +index 21f3ea909fab..57acb625c8ff 100644
5690 +--- a/drivers/clk/rockchip/clk-rk3368.c
5691 ++++ b/drivers/clk/rockchip/clk-rk3368.c
5692 +@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
5693 + .core_reg = RK3368_CLKSEL_CON(0),
5694 + .div_core_shift = 0,
5695 + .div_core_mask = 0x1f,
5696 +- .mux_core_shift = 15,
5697 ++ .mux_core_shift = 7,
5698 + };
5699 +
5700 + static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
5701 +@@ -218,29 +218,29 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
5702 + }
5703 +
5704 + static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
5705 +- RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
5706 +- RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
5707 +- RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
5708 +- RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
5709 +- RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
5710 +- RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
5711 +- RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
5712 +- RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
5713 +- RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
5714 +- RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
5715 ++ RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
5716 ++ RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
5717 ++ RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
5718 ++ RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
5719 ++ RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
5720 ++ RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
5721 ++ RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
5722 ++ RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
5723 ++ RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
5724 ++ RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
5725 + };
5726 +
5727 + static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
5728 +- RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
5729 +- RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
5730 +- RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
5731 +- RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
5732 +- RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
5733 +- RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
5734 +- RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
5735 +- RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
5736 +- RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
5737 +- RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
5738 ++ RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
5739 ++ RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
5740 ++ RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
5741 ++ RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
5742 ++ RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
5743 ++ RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
5744 ++ RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
5745 ++ RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
5746 ++ RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
5747 ++ RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
5748 + };
5749 +
5750 + static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
5751 +@@ -384,10 +384,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
5752 + * Clock-Architecture Diagram 3
5753 + */
5754 +
5755 +- COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
5756 ++ COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
5757 + RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
5758 + RK3368_CLKGATE_CON(4), 6, GFLAGS),
5759 +- COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
5760 ++ COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
5761 + RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
5762 + RK3368_CLKGATE_CON(4), 7, GFLAGS),
5763 +
5764 +@@ -442,7 +442,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
5765 + GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
5766 + RK3368_CLKGATE_CON(4), 13, GFLAGS),
5767 + GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
5768 +- RK3368_CLKGATE_CON(5), 12, GFLAGS),
5769 ++ RK3368_CLKGATE_CON(4), 12, GFLAGS),
5770 +
5771 + COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
5772 + RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
5773 +diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
5774 +index d9a0b5d4d47f..226af5720c9e 100644
5775 +--- a/drivers/clk/rockchip/clk.c
5776 ++++ b/drivers/clk/rockchip/clk.c
5777 +@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
5778 + if (gate_offset >= 0) {
5779 + gate = kzalloc(sizeof(*gate), GFP_KERNEL);
5780 + if (!gate)
5781 +- return ERR_PTR(-ENOMEM);
5782 ++ goto err_gate;
5783 +
5784 + gate->flags = gate_flags;
5785 + gate->reg = base + gate_offset;
5786 +@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
5787 + if (div_width > 0) {
5788 + div = kzalloc(sizeof(*div), GFP_KERNEL);
5789 + if (!div)
5790 +- return ERR_PTR(-ENOMEM);
5791 ++ goto err_div;
5792 +
5793 + div->flags = div_flags;
5794 + div->reg = base + muxdiv_offset;
5795 +@@ -90,7 +90,9 @@ static struct clk *rockchip_clk_register_branch(const char *name,
5796 + div->width = div_width;
5797 + div->lock = lock;
5798 + div->table = div_table;
5799 +- div_ops = &clk_divider_ops;
5800 ++ div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
5801 ++ ? &clk_divider_ro_ops
5802 ++ : &clk_divider_ops;
5803 + }
5804 +
5805 + clk = clk_register_composite(NULL, name, parent_names, num_parents,
5806 +@@ -100,6 +102,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
5807 + flags);
5808 +
5809 + return clk;
5810 ++err_div:
5811 ++ kfree(gate);
5812 ++err_gate:
5813 ++ kfree(mux);
5814 ++ return ERR_PTR(-ENOMEM);
5815 + }
5816 +
5817 + struct rockchip_clk_frac {
5818 +diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
5819 +index 7ba61103a6f5..2ea61debffc1 100644
5820 +--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
5821 ++++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
5822 +@@ -36,7 +36,7 @@ static struct clk *sun8i_a23_apb0_register(struct device_node *node,
5823 +
5824 + /* The A23 APB0 clock is a standard 2 bit wide divider clock */
5825 + clk = clk_register_divider(NULL, clk_name, clk_parent, 0, reg,
5826 +- 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
5827 ++ 0, 2, 0, NULL);
5828 + if (IS_ERR(clk))
5829 + return clk;
5830 +
5831 +diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
5832 +index e78755e0ef78..1fe1e8d970cf 100644
5833 +--- a/drivers/clk/versatile/clk-sp810.c
5834 ++++ b/drivers/clk/versatile/clk-sp810.c
5835 +@@ -92,6 +92,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
5836 + int num = ARRAY_SIZE(parent_names);
5837 + char name[12];
5838 + struct clk_init_data init;
5839 ++ static int instance;
5840 + int i;
5841 + bool deprecated;
5842 +
5843 +@@ -117,7 +118,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
5844 + deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
5845 +
5846 + for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
5847 +- snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
5848 ++ snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
5849 +
5850 + sp810->timerclken[i].sp810 = sp810;
5851 + sp810->timerclken[i].channel = i;
5852 +@@ -138,5 +139,6 @@ static void __init clk_sp810_of_setup(struct device_node *node)
5853 + }
5854 +
5855 + of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
5856 ++ instance++;
5857 + }
5858 + CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
5859 +diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
5860 +index 2bcecafdeaea..c407c47a3232 100644
5861 +--- a/drivers/clocksource/tango_xtal.c
5862 ++++ b/drivers/clocksource/tango_xtal.c
5863 +@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
5864 +
5865 + ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
5866 + 32, clocksource_mmio_readl_up);
5867 +- if (!ret) {
5868 ++ if (ret) {
5869 + pr_err("%s: registration failed\n", np->full_name);
5870 + return;
5871 + }
5872 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
5873 +index cd83d477e32d..e89512383c3c 100644
5874 +--- a/drivers/cpufreq/intel_pstate.c
5875 ++++ b/drivers/cpufreq/intel_pstate.c
5876 +@@ -673,6 +673,11 @@ static int core_get_max_pstate(void)
5877 + if (err)
5878 + goto skip_tar;
5879 +
5880 ++ /* For level 1 and 2, bits[23:16] contain the ratio */
5881 ++ if (tdp_ctrl)
5882 ++ tdp_ratio >>= 16;
5883 ++
5884 ++ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
5885 + if (tdp_ratio - 1 == tar) {
5886 + max_pstate = tar;
5887 + pr_debug("max_pstate=TAC %x\n", max_pstate);
5888 +diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
5889 +index a9c659f58974..04042038ec4b 100644
5890 +--- a/drivers/cpufreq/sti-cpufreq.c
5891 ++++ b/drivers/cpufreq/sti-cpufreq.c
5892 +@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
5893 + {
5894 + int ret;
5895 +
5896 ++ if ((!of_machine_is_compatible("st,stih407")) &&
5897 ++ (!of_machine_is_compatible("st,stih410")))
5898 ++ return -ENODEV;
5899 ++
5900 + ddata.cpu = get_cpu_device(0);
5901 + if (!ddata.cpu) {
5902 + dev_err(ddata.cpu, "Failed to get device for CPU0\n");
5903 +diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
5904 +index 545069d5fdfb..e342565e8715 100644
5905 +--- a/drivers/cpuidle/cpuidle-arm.c
5906 ++++ b/drivers/cpuidle/cpuidle-arm.c
5907 +@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
5908 + * call the CPU ops suspend protocol with idle index as a
5909 + * parameter.
5910 + */
5911 +- arm_cpuidle_suspend(idx);
5912 ++ ret = arm_cpuidle_suspend(idx);
5913 +
5914 + cpu_pm_exit();
5915 + }
5916 +diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
5917 +index 3eb3f1279fb7..7de007abe46e 100644
5918 +--- a/drivers/crypto/atmel-aes.c
5919 ++++ b/drivers/crypto/atmel-aes.c
5920 +@@ -2085,9 +2085,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
5921 + }
5922 +
5923 + aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
5924 +- if (!aes_dd->io_base) {
5925 ++ if (IS_ERR(aes_dd->io_base)) {
5926 + dev_err(dev, "can't ioremap\n");
5927 +- err = -ENOMEM;
5928 ++ err = PTR_ERR(aes_dd->io_base);
5929 + goto res_err;
5930 + }
5931 +
5932 +diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
5933 +index 8bf9914d4d15..68d47a2da4a1 100644
5934 +--- a/drivers/crypto/atmel-sha.c
5935 ++++ b/drivers/crypto/atmel-sha.c
5936 +@@ -1404,9 +1404,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
5937 + }
5938 +
5939 + sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
5940 +- if (!sha_dd->io_base) {
5941 ++ if (IS_ERR(sha_dd->io_base)) {
5942 + dev_err(dev, "can't ioremap\n");
5943 +- err = -ENOMEM;
5944 ++ err = PTR_ERR(sha_dd->io_base);
5945 + goto res_err;
5946 + }
5947 +
5948 +diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
5949 +index 2c7a628d0375..bf467d7be35c 100644
5950 +--- a/drivers/crypto/atmel-tdes.c
5951 ++++ b/drivers/crypto/atmel-tdes.c
5952 +@@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
5953 + }
5954 +
5955 + tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
5956 +- if (!tdes_dd->io_base) {
5957 ++ if (IS_ERR(tdes_dd->io_base)) {
5958 + dev_err(dev, "can't ioremap\n");
5959 +- err = -ENOMEM;
5960 ++ err = PTR_ERR(tdes_dd->io_base);
5961 + goto res_err;
5962 + }
5963 +
5964 +diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
5965 +index f7e0d8d4c3da..8f50a02ff68d 100644
5966 +--- a/drivers/crypto/caam/jr.c
5967 ++++ b/drivers/crypto/caam/jr.c
5968 +@@ -248,7 +248,7 @@ static void caam_jr_dequeue(unsigned long devarg)
5969 + struct device *caam_jr_alloc(void)
5970 + {
5971 + struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
5972 +- struct device *dev = NULL;
5973 ++ struct device *dev = ERR_PTR(-ENODEV);
5974 + int min_tfm_cnt = INT_MAX;
5975 + int tfm_cnt;
5976 +
5977 +diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
5978 +index d89f20c04266..60fc0fa26fd3 100644
5979 +--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
5980 ++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
5981 +@@ -220,6 +220,42 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
5982 + return ccp_aes_cmac_finup(req);
5983 + }
5984 +
5985 ++static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
5986 ++{
5987 ++ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
5988 ++ struct ccp_aes_cmac_exp_ctx state;
5989 ++
5990 ++ /* Don't let anything leak to 'out' */
5991 ++ memset(&state, 0, sizeof(state));
5992 ++
5993 ++ state.null_msg = rctx->null_msg;
5994 ++ memcpy(state.iv, rctx->iv, sizeof(state.iv));
5995 ++ state.buf_count = rctx->buf_count;
5996 ++ memcpy(state.buf, rctx->buf, sizeof(state.buf));
5997 ++
5998 ++ /* 'out' may not be aligned so memcpy from local variable */
5999 ++ memcpy(out, &state, sizeof(state));
6000 ++
6001 ++ return 0;
6002 ++}
6003 ++
6004 ++static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
6005 ++{
6006 ++ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
6007 ++ struct ccp_aes_cmac_exp_ctx state;
6008 ++
6009 ++ /* 'in' may not be aligned so memcpy to local variable */
6010 ++ memcpy(&state, in, sizeof(state));
6011 ++
6012 ++ memset(rctx, 0, sizeof(*rctx));
6013 ++ rctx->null_msg = state.null_msg;
6014 ++ memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
6015 ++ rctx->buf_count = state.buf_count;
6016 ++ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
6017 ++
6018 ++ return 0;
6019 ++}
6020 ++
6021 + static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
6022 + unsigned int key_len)
6023 + {
6024 +@@ -352,10 +388,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
6025 + alg->final = ccp_aes_cmac_final;
6026 + alg->finup = ccp_aes_cmac_finup;
6027 + alg->digest = ccp_aes_cmac_digest;
6028 ++ alg->export = ccp_aes_cmac_export;
6029 ++ alg->import = ccp_aes_cmac_import;
6030 + alg->setkey = ccp_aes_cmac_setkey;
6031 +
6032 + halg = &alg->halg;
6033 + halg->digestsize = AES_BLOCK_SIZE;
6034 ++ halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
6035 +
6036 + base = &halg->base;
6037 + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
6038 +diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
6039 +index d14b3f28e010..ab9945f2cb7a 100644
6040 +--- a/drivers/crypto/ccp/ccp-crypto-sha.c
6041 ++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
6042 +@@ -207,6 +207,46 @@ static int ccp_sha_digest(struct ahash_request *req)
6043 + return ccp_sha_finup(req);
6044 + }
6045 +
6046 ++static int ccp_sha_export(struct ahash_request *req, void *out)
6047 ++{
6048 ++ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
6049 ++ struct ccp_sha_exp_ctx state;
6050 ++
6051 ++ /* Don't let anything leak to 'out' */
6052 ++ memset(&state, 0, sizeof(state));
6053 ++
6054 ++ state.type = rctx->type;
6055 ++ state.msg_bits = rctx->msg_bits;
6056 ++ state.first = rctx->first;
6057 ++ memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
6058 ++ state.buf_count = rctx->buf_count;
6059 ++ memcpy(state.buf, rctx->buf, sizeof(state.buf));
6060 ++
6061 ++ /* 'out' may not be aligned so memcpy from local variable */
6062 ++ memcpy(out, &state, sizeof(state));
6063 ++
6064 ++ return 0;
6065 ++}
6066 ++
6067 ++static int ccp_sha_import(struct ahash_request *req, const void *in)
6068 ++{
6069 ++ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
6070 ++ struct ccp_sha_exp_ctx state;
6071 ++
6072 ++ /* 'in' may not be aligned so memcpy to local variable */
6073 ++ memcpy(&state, in, sizeof(state));
6074 ++
6075 ++ memset(rctx, 0, sizeof(*rctx));
6076 ++ rctx->type = state.type;
6077 ++ rctx->msg_bits = state.msg_bits;
6078 ++ rctx->first = state.first;
6079 ++ memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
6080 ++ rctx->buf_count = state.buf_count;
6081 ++ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
6082 ++
6083 ++ return 0;
6084 ++}
6085 ++
6086 + static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
6087 + unsigned int key_len)
6088 + {
6089 +@@ -403,9 +443,12 @@ static int ccp_register_sha_alg(struct list_head *head,
6090 + alg->final = ccp_sha_final;
6091 + alg->finup = ccp_sha_finup;
6092 + alg->digest = ccp_sha_digest;
6093 ++ alg->export = ccp_sha_export;
6094 ++ alg->import = ccp_sha_import;
6095 +
6096 + halg = &alg->halg;
6097 + halg->digestsize = def->digest_size;
6098 ++ halg->statesize = sizeof(struct ccp_sha_exp_ctx);
6099 +
6100 + base = &halg->base;
6101 + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
6102 +diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
6103 +index 76a96f0f44c6..a326ec20bfa8 100644
6104 +--- a/drivers/crypto/ccp/ccp-crypto.h
6105 ++++ b/drivers/crypto/ccp/ccp-crypto.h
6106 +@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
6107 + struct ccp_cmd cmd;
6108 + };
6109 +
6110 ++struct ccp_aes_cmac_exp_ctx {
6111 ++ unsigned int null_msg;
6112 ++
6113 ++ u8 iv[AES_BLOCK_SIZE];
6114 ++
6115 ++ unsigned int buf_count;
6116 ++ u8 buf[AES_BLOCK_SIZE];
6117 ++};
6118 ++
6119 + /***** SHA related defines *****/
6120 + #define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
6121 + #define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
6122 +@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
6123 + struct ccp_cmd cmd;
6124 + };
6125 +
6126 ++struct ccp_sha_exp_ctx {
6127 ++ enum ccp_sha_type type;
6128 ++
6129 ++ u64 msg_bits;
6130 ++
6131 ++ unsigned int first;
6132 ++
6133 ++ u8 ctx[MAX_SHA_CONTEXT_SIZE];
6134 ++
6135 ++ unsigned int buf_count;
6136 ++ u8 buf[MAX_SHA_BLOCK_SIZE];
6137 ++};
6138 ++
6139 + /***** Common Context Structure *****/
6140 + struct ccp_ctx {
6141 + int (*complete)(struct crypto_async_request *req, int ret);
6142 +diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
6143 +index c0656e7f37b5..80239ae69527 100644
6144 +--- a/drivers/crypto/marvell/cesa.c
6145 ++++ b/drivers/crypto/marvell/cesa.c
6146 +@@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
6147 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
6148 + cesa->regs = devm_ioremap_resource(dev, res);
6149 + if (IS_ERR(cesa->regs))
6150 +- return -ENOMEM;
6151 ++ return PTR_ERR(cesa->regs);
6152 +
6153 + ret = mv_cesa_dev_dma_init(cesa);
6154 + if (ret)
6155 +diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
6156 +index 0e82ce3c383e..976b01e58afb 100644
6157 +--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
6158 ++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
6159 +@@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
6160 + uint32_t vf_mask);
6161 + void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
6162 + void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
6163 ++int adf_init_pf_wq(void);
6164 ++void adf_exit_pf_wq(void);
6165 + #else
6166 + static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
6167 + {
6168 +@@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
6169 + static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
6170 + {
6171 + }
6172 ++
6173 ++static inline int adf_init_pf_wq(void)
6174 ++{
6175 ++ return 0;
6176 ++}
6177 ++
6178 ++static inline void adf_exit_pf_wq(void)
6179 ++{
6180 ++}
6181 + #endif
6182 + #endif
6183 +diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
6184 +index 5c897e6e7994..3c3f948290ca 100644
6185 +--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
6186 ++++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
6187 +@@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void)
6188 + if (adf_init_aer())
6189 + goto err_aer;
6190 +
6191 ++ if (adf_init_pf_wq())
6192 ++ goto err_pf_wq;
6193 ++
6194 + if (qat_crypto_register())
6195 + goto err_crypto_register;
6196 +
6197 + return 0;
6198 +
6199 + err_crypto_register:
6200 ++ adf_exit_pf_wq();
6201 ++err_pf_wq:
6202 + adf_exit_aer();
6203 + err_aer:
6204 + adf_chr_drv_destroy();
6205 +@@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
6206 + {
6207 + adf_chr_drv_destroy();
6208 + adf_exit_aer();
6209 ++ adf_exit_pf_wq();
6210 + qat_crypto_unregister();
6211 + adf_clean_vf_map(false);
6212 + mutex_destroy(&adf_ctl_lock);
6213 +diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
6214 +index 1117a8b58280..38a0415e767d 100644
6215 +--- a/drivers/crypto/qat/qat_common/adf_sriov.c
6216 ++++ b/drivers/crypto/qat/qat_common/adf_sriov.c
6217 +@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
6218 + int i;
6219 + u32 reg;
6220 +
6221 +- /* Workqueue for PF2VF responses */
6222 +- pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
6223 +- if (!pf2vf_resp_wq)
6224 +- return -ENOMEM;
6225 +-
6226 + for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
6227 + i++, vf_info++) {
6228 + /* This ptr will be populated when VFs will be created */
6229 +@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
6230 +
6231 + kfree(accel_dev->pf.vf_info);
6232 + accel_dev->pf.vf_info = NULL;
6233 +-
6234 +- if (pf2vf_resp_wq) {
6235 +- destroy_workqueue(pf2vf_resp_wq);
6236 +- pf2vf_resp_wq = NULL;
6237 +- }
6238 + }
6239 + EXPORT_SYMBOL_GPL(adf_disable_sriov);
6240 +
6241 +@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
6242 + return numvfs;
6243 + }
6244 + EXPORT_SYMBOL_GPL(adf_sriov_configure);
6245 ++
6246 ++int __init adf_init_pf_wq(void)
6247 ++{
6248 ++ /* Workqueue for PF2VF responses */
6249 ++ pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
6250 ++
6251 ++ return !pf2vf_resp_wq ? -ENOMEM : 0;
6252 ++}
6253 ++
6254 ++void adf_exit_pf_wq(void)
6255 ++{
6256 ++ if (pf2vf_resp_wq) {
6257 ++ destroy_workqueue(pf2vf_resp_wq);
6258 ++ pf2vf_resp_wq = NULL;
6259 ++ }
6260 ++}
6261 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
6262 +index a19ee127edca..e72fea737a0d 100644
6263 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
6264 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
6265 +@@ -35,6 +35,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
6266 + unsigned int todo;
6267 + struct sg_mapping_iter mi, mo;
6268 + unsigned int oi, oo; /* offset for in and out */
6269 ++ unsigned long flags;
6270 +
6271 + if (areq->nbytes == 0)
6272 + return 0;
6273 +@@ -49,7 +50,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
6274 + return -EINVAL;
6275 + }
6276 +
6277 +- spin_lock_bh(&ss->slock);
6278 ++ spin_lock_irqsave(&ss->slock, flags);
6279 +
6280 + for (i = 0; i < op->keylen; i += 4)
6281 + writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
6282 +@@ -117,7 +118,7 @@ release_ss:
6283 + sg_miter_stop(&mi);
6284 + sg_miter_stop(&mo);
6285 + writel(0, ss->base + SS_CTL);
6286 +- spin_unlock_bh(&ss->slock);
6287 ++ spin_unlock_irqrestore(&ss->slock, flags);
6288 + return err;
6289 + }
6290 +
6291 +@@ -149,6 +150,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
6292 + unsigned int ob = 0; /* offset in buf */
6293 + unsigned int obo = 0; /* offset in bufo*/
6294 + unsigned int obl = 0; /* length of data in bufo */
6295 ++ unsigned long flags;
6296 +
6297 + if (areq->nbytes == 0)
6298 + return 0;
6299 +@@ -181,7 +183,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
6300 + if (no_chunk == 1)
6301 + return sun4i_ss_opti_poll(areq);
6302 +
6303 +- spin_lock_bh(&ss->slock);
6304 ++ spin_lock_irqsave(&ss->slock, flags);
6305 +
6306 + for (i = 0; i < op->keylen; i += 4)
6307 + writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
6308 +@@ -308,7 +310,7 @@ release_ss:
6309 + sg_miter_stop(&mi);
6310 + sg_miter_stop(&mo);
6311 + writel(0, ss->base + SS_CTL);
6312 +- spin_unlock_bh(&ss->slock);
6313 ++ spin_unlock_irqrestore(&ss->slock, flags);
6314 +
6315 + return err;
6316 + }
6317 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
6318 +index a0d4a08313ae..b7ee8d30147d 100644
6319 +--- a/drivers/crypto/talitos.c
6320 ++++ b/drivers/crypto/talitos.c
6321 +@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
6322 + ptr->eptr = upper_32_bits(dma_addr);
6323 + }
6324 +
6325 ++static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
6326 ++ struct talitos_ptr *src_ptr, bool is_sec1)
6327 ++{
6328 ++ dst_ptr->ptr = src_ptr->ptr;
6329 ++ if (!is_sec1)
6330 ++ dst_ptr->eptr = src_ptr->eptr;
6331 ++}
6332 ++
6333 + static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
6334 + bool is_sec1)
6335 + {
6336 +@@ -827,6 +835,16 @@ struct talitos_ahash_req_ctx {
6337 + struct scatterlist *psrc;
6338 + };
6339 +
6340 ++struct talitos_export_state {
6341 ++ u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
6342 ++ u8 buf[HASH_MAX_BLOCK_SIZE];
6343 ++ unsigned int swinit;
6344 ++ unsigned int first;
6345 ++ unsigned int last;
6346 ++ unsigned int to_hash_later;
6347 ++ unsigned int nbuf;
6348 ++};
6349 ++
6350 + static int aead_setkey(struct crypto_aead *authenc,
6351 + const u8 *key, unsigned int keylen)
6352 + {
6353 +@@ -1083,21 +1101,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
6354 + sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
6355 + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
6356 + : DMA_TO_DEVICE);
6357 +-
6358 + /* hmac data */
6359 + desc->ptr[1].len = cpu_to_be16(areq->assoclen);
6360 + if (sg_count > 1 &&
6361 + (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
6362 + areq->assoclen,
6363 + &edesc->link_tbl[tbl_off])) > 1) {
6364 +- tbl_off += ret;
6365 +-
6366 + to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
6367 + sizeof(struct talitos_ptr), 0);
6368 + desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
6369 +
6370 + dma_sync_single_for_device(dev, edesc->dma_link_tbl,
6371 + edesc->dma_len, DMA_BIDIRECTIONAL);
6372 ++
6373 ++ tbl_off += ret;
6374 + } else {
6375 + to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
6376 + desc->ptr[1].j_extent = 0;
6377 +@@ -1126,11 +1143,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
6378 + if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
6379 + sg_link_tbl_len += authsize;
6380 +
6381 +- if (sg_count > 1 &&
6382 +- (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
6383 +- sg_link_tbl_len,
6384 +- &edesc->link_tbl[tbl_off])) > 1) {
6385 +- tbl_off += ret;
6386 ++ if (sg_count == 1) {
6387 ++ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
6388 ++ areq->assoclen, 0);
6389 ++ } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
6390 ++ areq->assoclen, sg_link_tbl_len,
6391 ++ &edesc->link_tbl[tbl_off])) >
6392 ++ 1) {
6393 + desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
6394 + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
6395 + tbl_off *
6396 +@@ -1138,8 +1157,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
6397 + dma_sync_single_for_device(dev, edesc->dma_link_tbl,
6398 + edesc->dma_len,
6399 + DMA_BIDIRECTIONAL);
6400 +- } else
6401 +- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
6402 ++ tbl_off += ret;
6403 ++ } else {
6404 ++ copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
6405 ++ }
6406 +
6407 + /* cipher out */
6408 + desc->ptr[5].len = cpu_to_be16(cryptlen);
6409 +@@ -1151,11 +1172,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
6410 +
6411 + edesc->icv_ool = false;
6412 +
6413 +- if (sg_count > 1 &&
6414 +- (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
6415 ++ if (sg_count == 1) {
6416 ++ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
6417 ++ areq->assoclen, 0);
6418 ++ } else if ((sg_count =
6419 ++ sg_to_link_tbl_offset(areq->dst, sg_count,
6420 + areq->assoclen, cryptlen,
6421 +- &edesc->link_tbl[tbl_off])) >
6422 +- 1) {
6423 ++ &edesc->link_tbl[tbl_off])) > 1) {
6424 + struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
6425 +
6426 + to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
6427 +@@ -1178,8 +1201,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
6428 + edesc->dma_len, DMA_BIDIRECTIONAL);
6429 +
6430 + edesc->icv_ool = true;
6431 +- } else
6432 +- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
6433 ++ } else {
6434 ++ copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
6435 ++ }
6436 +
6437 + /* iv out */
6438 + map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
6439 +@@ -1967,6 +1991,46 @@ static int ahash_digest(struct ahash_request *areq)
6440 + return ahash_process_req(areq, areq->nbytes);
6441 + }
6442 +
6443 ++static int ahash_export(struct ahash_request *areq, void *out)
6444 ++{
6445 ++ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
6446 ++ struct talitos_export_state *export = out;
6447 ++
6448 ++ memcpy(export->hw_context, req_ctx->hw_context,
6449 ++ req_ctx->hw_context_size);
6450 ++ memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
6451 ++ export->swinit = req_ctx->swinit;
6452 ++ export->first = req_ctx->first;
6453 ++ export->last = req_ctx->last;
6454 ++ export->to_hash_later = req_ctx->to_hash_later;
6455 ++ export->nbuf = req_ctx->nbuf;
6456 ++
6457 ++ return 0;
6458 ++}
6459 ++
6460 ++static int ahash_import(struct ahash_request *areq, const void *in)
6461 ++{
6462 ++ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
6463 ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
6464 ++ const struct talitos_export_state *export = in;
6465 ++
6466 ++ memset(req_ctx, 0, sizeof(*req_ctx));
6467 ++ req_ctx->hw_context_size =
6468 ++ (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
6469 ++ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
6470 ++ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
6471 ++ memcpy(req_ctx->hw_context, export->hw_context,
6472 ++ req_ctx->hw_context_size);
6473 ++ memcpy(req_ctx->buf, export->buf, export->nbuf);
6474 ++ req_ctx->swinit = export->swinit;
6475 ++ req_ctx->first = export->first;
6476 ++ req_ctx->last = export->last;
6477 ++ req_ctx->to_hash_later = export->to_hash_later;
6478 ++ req_ctx->nbuf = export->nbuf;
6479 ++
6480 ++ return 0;
6481 ++}
6482 ++
6483 + struct keyhash_result {
6484 + struct completion completion;
6485 + int err;
6486 +@@ -2444,6 +2508,7 @@ static struct talitos_alg_template driver_algs[] = {
6487 + { .type = CRYPTO_ALG_TYPE_AHASH,
6488 + .alg.hash = {
6489 + .halg.digestsize = MD5_DIGEST_SIZE,
6490 ++ .halg.statesize = sizeof(struct talitos_export_state),
6491 + .halg.base = {
6492 + .cra_name = "md5",
6493 + .cra_driver_name = "md5-talitos",
6494 +@@ -2459,6 +2524,7 @@ static struct talitos_alg_template driver_algs[] = {
6495 + { .type = CRYPTO_ALG_TYPE_AHASH,
6496 + .alg.hash = {
6497 + .halg.digestsize = SHA1_DIGEST_SIZE,
6498 ++ .halg.statesize = sizeof(struct talitos_export_state),
6499 + .halg.base = {
6500 + .cra_name = "sha1",
6501 + .cra_driver_name = "sha1-talitos",
6502 +@@ -2474,6 +2540,7 @@ static struct talitos_alg_template driver_algs[] = {
6503 + { .type = CRYPTO_ALG_TYPE_AHASH,
6504 + .alg.hash = {
6505 + .halg.digestsize = SHA224_DIGEST_SIZE,
6506 ++ .halg.statesize = sizeof(struct talitos_export_state),
6507 + .halg.base = {
6508 + .cra_name = "sha224",
6509 + .cra_driver_name = "sha224-talitos",
6510 +@@ -2489,6 +2556,7 @@ static struct talitos_alg_template driver_algs[] = {
6511 + { .type = CRYPTO_ALG_TYPE_AHASH,
6512 + .alg.hash = {
6513 + .halg.digestsize = SHA256_DIGEST_SIZE,
6514 ++ .halg.statesize = sizeof(struct talitos_export_state),
6515 + .halg.base = {
6516 + .cra_name = "sha256",
6517 + .cra_driver_name = "sha256-talitos",
6518 +@@ -2504,6 +2572,7 @@ static struct talitos_alg_template driver_algs[] = {
6519 + { .type = CRYPTO_ALG_TYPE_AHASH,
6520 + .alg.hash = {
6521 + .halg.digestsize = SHA384_DIGEST_SIZE,
6522 ++ .halg.statesize = sizeof(struct talitos_export_state),
6523 + .halg.base = {
6524 + .cra_name = "sha384",
6525 + .cra_driver_name = "sha384-talitos",
6526 +@@ -2519,6 +2588,7 @@ static struct talitos_alg_template driver_algs[] = {
6527 + { .type = CRYPTO_ALG_TYPE_AHASH,
6528 + .alg.hash = {
6529 + .halg.digestsize = SHA512_DIGEST_SIZE,
6530 ++ .halg.statesize = sizeof(struct talitos_export_state),
6531 + .halg.base = {
6532 + .cra_name = "sha512",
6533 + .cra_driver_name = "sha512-talitos",
6534 +@@ -2534,6 +2604,7 @@ static struct talitos_alg_template driver_algs[] = {
6535 + { .type = CRYPTO_ALG_TYPE_AHASH,
6536 + .alg.hash = {
6537 + .halg.digestsize = MD5_DIGEST_SIZE,
6538 ++ .halg.statesize = sizeof(struct talitos_export_state),
6539 + .halg.base = {
6540 + .cra_name = "hmac(md5)",
6541 + .cra_driver_name = "hmac-md5-talitos",
6542 +@@ -2549,6 +2620,7 @@ static struct talitos_alg_template driver_algs[] = {
6543 + { .type = CRYPTO_ALG_TYPE_AHASH,
6544 + .alg.hash = {
6545 + .halg.digestsize = SHA1_DIGEST_SIZE,
6546 ++ .halg.statesize = sizeof(struct talitos_export_state),
6547 + .halg.base = {
6548 + .cra_name = "hmac(sha1)",
6549 + .cra_driver_name = "hmac-sha1-talitos",
6550 +@@ -2564,6 +2636,7 @@ static struct talitos_alg_template driver_algs[] = {
6551 + { .type = CRYPTO_ALG_TYPE_AHASH,
6552 + .alg.hash = {
6553 + .halg.digestsize = SHA224_DIGEST_SIZE,
6554 ++ .halg.statesize = sizeof(struct talitos_export_state),
6555 + .halg.base = {
6556 + .cra_name = "hmac(sha224)",
6557 + .cra_driver_name = "hmac-sha224-talitos",
6558 +@@ -2579,6 +2652,7 @@ static struct talitos_alg_template driver_algs[] = {
6559 + { .type = CRYPTO_ALG_TYPE_AHASH,
6560 + .alg.hash = {
6561 + .halg.digestsize = SHA256_DIGEST_SIZE,
6562 ++ .halg.statesize = sizeof(struct talitos_export_state),
6563 + .halg.base = {
6564 + .cra_name = "hmac(sha256)",
6565 + .cra_driver_name = "hmac-sha256-talitos",
6566 +@@ -2594,6 +2668,7 @@ static struct talitos_alg_template driver_algs[] = {
6567 + { .type = CRYPTO_ALG_TYPE_AHASH,
6568 + .alg.hash = {
6569 + .halg.digestsize = SHA384_DIGEST_SIZE,
6570 ++ .halg.statesize = sizeof(struct talitos_export_state),
6571 + .halg.base = {
6572 + .cra_name = "hmac(sha384)",
6573 + .cra_driver_name = "hmac-sha384-talitos",
6574 +@@ -2609,6 +2684,7 @@ static struct talitos_alg_template driver_algs[] = {
6575 + { .type = CRYPTO_ALG_TYPE_AHASH,
6576 + .alg.hash = {
6577 + .halg.digestsize = SHA512_DIGEST_SIZE,
6578 ++ .halg.statesize = sizeof(struct talitos_export_state),
6579 + .halg.base = {
6580 + .cra_name = "hmac(sha512)",
6581 + .cra_driver_name = "hmac-sha512-talitos",
6582 +@@ -2629,21 +2705,11 @@ struct talitos_crypto_alg {
6583 + struct talitos_alg_template algt;
6584 + };
6585 +
6586 +-static int talitos_cra_init(struct crypto_tfm *tfm)
6587 ++static int talitos_init_common(struct talitos_ctx *ctx,
6588 ++ struct talitos_crypto_alg *talitos_alg)
6589 + {
6590 +- struct crypto_alg *alg = tfm->__crt_alg;
6591 +- struct talitos_crypto_alg *talitos_alg;
6592 +- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
6593 + struct talitos_private *priv;
6594 +
6595 +- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
6596 +- talitos_alg = container_of(__crypto_ahash_alg(alg),
6597 +- struct talitos_crypto_alg,
6598 +- algt.alg.hash);
6599 +- else
6600 +- talitos_alg = container_of(alg, struct talitos_crypto_alg,
6601 +- algt.alg.crypto);
6602 +-
6603 + /* update context with ptr to dev */
6604 + ctx->dev = talitos_alg->dev;
6605 +
6606 +@@ -2661,10 +2727,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
6607 + return 0;
6608 + }
6609 +
6610 ++static int talitos_cra_init(struct crypto_tfm *tfm)
6611 ++{
6612 ++ struct crypto_alg *alg = tfm->__crt_alg;
6613 ++ struct talitos_crypto_alg *talitos_alg;
6614 ++ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
6615 ++
6616 ++ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
6617 ++ talitos_alg = container_of(__crypto_ahash_alg(alg),
6618 ++ struct talitos_crypto_alg,
6619 ++ algt.alg.hash);
6620 ++ else
6621 ++ talitos_alg = container_of(alg, struct talitos_crypto_alg,
6622 ++ algt.alg.crypto);
6623 ++
6624 ++ return talitos_init_common(ctx, talitos_alg);
6625 ++}
6626 ++
6627 + static int talitos_cra_init_aead(struct crypto_aead *tfm)
6628 + {
6629 +- talitos_cra_init(crypto_aead_tfm(tfm));
6630 +- return 0;
6631 ++ struct aead_alg *alg = crypto_aead_alg(tfm);
6632 ++ struct talitos_crypto_alg *talitos_alg;
6633 ++ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
6634 ++
6635 ++ talitos_alg = container_of(alg, struct talitos_crypto_alg,
6636 ++ algt.alg.aead);
6637 ++
6638 ++ return talitos_init_common(ctx, talitos_alg);
6639 + }
6640 +
6641 + static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
6642 +@@ -2787,6 +2876,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
6643 + t_alg->algt.alg.hash.finup = ahash_finup;
6644 + t_alg->algt.alg.hash.digest = ahash_digest;
6645 + t_alg->algt.alg.hash.setkey = ahash_setkey;
6646 ++ t_alg->algt.alg.hash.import = ahash_import;
6647 ++ t_alg->algt.alg.hash.export = ahash_export;
6648 +
6649 + if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
6650 + !strncmp(alg->cra_name, "hmac", 4)) {
6651 +diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
6652 +index 4c243c1ffc7f..790f7cadc1ed 100644
6653 +--- a/drivers/crypto/ux500/cryp/cryp_core.c
6654 ++++ b/drivers/crypto/ux500/cryp/cryp_core.c
6655 +@@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
6656 +
6657 + device_data->phybase = res->start;
6658 + device_data->base = devm_ioremap_resource(dev, res);
6659 +- if (!device_data->base) {
6660 ++ if (IS_ERR(device_data->base)) {
6661 + dev_err(dev, "[%s]: ioremap failed!", __func__);
6662 +- ret = -ENOMEM;
6663 ++ ret = PTR_ERR(device_data->base);
6664 + goto out;
6665 + }
6666 +
6667 +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
6668 +index d6fdc583ce5d..574e87c7f2b8 100644
6669 +--- a/drivers/crypto/ux500/hash/hash_core.c
6670 ++++ b/drivers/crypto/ux500/hash/hash_core.c
6671 +@@ -1659,9 +1659,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
6672 +
6673 + device_data->phybase = res->start;
6674 + device_data->base = devm_ioremap_resource(dev, res);
6675 +- if (!device_data->base) {
6676 ++ if (IS_ERR(device_data->base)) {
6677 + dev_err(dev, "%s: ioremap() failed!\n", __func__);
6678 +- ret = -ENOMEM;
6679 ++ ret = PTR_ERR(device_data->base);
6680 + goto out;
6681 + }
6682 + spin_lock_init(&device_data->ctx_lock);
6683 +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
6684 +index 5ad0ec1f0e29..97199b3c25a2 100644
6685 +--- a/drivers/dma/dw/core.c
6686 ++++ b/drivers/dma/dw/core.c
6687 +@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
6688 + static void dwc_initialize(struct dw_dma_chan *dwc)
6689 + {
6690 + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
6691 +- struct dw_dma_slave *dws = dwc->chan.private;
6692 + u32 cfghi = DWC_CFGH_FIFO_MODE;
6693 + u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
6694 +
6695 + if (dwc->initialized == true)
6696 + return;
6697 +
6698 +- if (dws) {
6699 +- /*
6700 +- * We need controller-specific data to set up slave
6701 +- * transfers.
6702 +- */
6703 +- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
6704 +-
6705 +- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
6706 +- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
6707 +- } else {
6708 +- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
6709 +- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
6710 +- }
6711 ++ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
6712 ++ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
6713 +
6714 + channel_writel(dwc, CFG_LO, cfglo);
6715 + channel_writel(dwc, CFG_HI, cfghi);
6716 +@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
6717 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
6718 + struct dw_dma_slave *dws = param;
6719 +
6720 +- if (!dws || dws->dma_dev != chan->device->dev)
6721 ++ if (dws->dma_dev != chan->device->dev)
6722 + return false;
6723 +
6724 + /* We have to copy data since dws can be temporary storage */
6725 +@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
6726 + * doesn't mean what you think it means), and status writeback.
6727 + */
6728 +
6729 ++ /*
6730 ++ * We need controller-specific data to set up slave transfers.
6731 ++ */
6732 ++ if (chan->private && !dw_dma_filter(chan, chan->private)) {
6733 ++ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
6734 ++ return -EINVAL;
6735 ++ }
6736 ++
6737 + /* Enable controller here if needed */
6738 + if (!dw->in_use)
6739 + dw_dma_on(dw);
6740 +@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
6741 + spin_lock_irqsave(&dwc->lock, flags);
6742 + list_splice_init(&dwc->free_list, &list);
6743 + dwc->descs_allocated = 0;
6744 ++
6745 ++ /* Clear custom channel configuration */
6746 ++ dwc->src_id = 0;
6747 ++ dwc->dst_id = 0;
6748 ++
6749 ++ dwc->src_master = 0;
6750 ++ dwc->dst_master = 0;
6751 ++
6752 + dwc->initialized = false;
6753 +
6754 + /* Disable interrupts */
6755 +diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
6756 +index e3d7fcb69b4c..2dac314a2d7a 100644
6757 +--- a/drivers/dma/edma.c
6758 ++++ b/drivers/dma/edma.c
6759 +@@ -1563,32 +1563,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
6760 + return IRQ_HANDLED;
6761 + }
6762 +
6763 +-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
6764 +-{
6765 +- struct platform_device *tc_pdev;
6766 +- int ret;
6767 +-
6768 +- if (!IS_ENABLED(CONFIG_OF) || !tc)
6769 +- return;
6770 +-
6771 +- tc_pdev = of_find_device_by_node(tc->node);
6772 +- if (!tc_pdev) {
6773 +- pr_err("%s: TPTC device is not found\n", __func__);
6774 +- return;
6775 +- }
6776 +- if (!pm_runtime_enabled(&tc_pdev->dev))
6777 +- pm_runtime_enable(&tc_pdev->dev);
6778 +-
6779 +- if (enable)
6780 +- ret = pm_runtime_get_sync(&tc_pdev->dev);
6781 +- else
6782 +- ret = pm_runtime_put_sync(&tc_pdev->dev);
6783 +-
6784 +- if (ret < 0)
6785 +- pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
6786 +- enable ? "get" : "put", dev_name(&tc_pdev->dev));
6787 +-}
6788 +-
6789 + /* Alloc channel resources */
6790 + static int edma_alloc_chan_resources(struct dma_chan *chan)
6791 + {
6792 +@@ -1625,8 +1599,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
6793 + EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
6794 + echan->hw_triggered ? "HW" : "SW");
6795 +
6796 +- edma_tc_set_pm_state(echan->tc, true);
6797 +-
6798 + return 0;
6799 +
6800 + err_slot:
6801 +@@ -1663,7 +1635,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
6802 + echan->alloced = false;
6803 + }
6804 +
6805 +- edma_tc_set_pm_state(echan->tc, false);
6806 + echan->tc = NULL;
6807 + echan->hw_triggered = false;
6808 +
6809 +@@ -2408,10 +2379,8 @@ static int edma_pm_suspend(struct device *dev)
6810 + int i;
6811 +
6812 + for (i = 0; i < ecc->num_channels; i++) {
6813 +- if (echan[i].alloced) {
6814 ++ if (echan[i].alloced)
6815 + edma_setup_interrupt(&echan[i], false);
6816 +- edma_tc_set_pm_state(echan[i].tc, false);
6817 +- }
6818 + }
6819 +
6820 + return 0;
6821 +@@ -2441,8 +2410,6 @@ static int edma_pm_resume(struct device *dev)
6822 +
6823 + /* Set up channel -> slot mapping for the entry slot */
6824 + edma_set_chmap(&echan[i], echan[i].slot[0]);
6825 +-
6826 +- edma_tc_set_pm_state(echan[i].tc, true);
6827 + }
6828 + }
6829 +
6830 +@@ -2466,7 +2433,8 @@ static struct platform_driver edma_driver = {
6831 +
6832 + static int edma_tptc_probe(struct platform_device *pdev)
6833 + {
6834 +- return 0;
6835 ++ pm_runtime_enable(&pdev->dev);
6836 ++ return pm_runtime_get_sync(&pdev->dev);
6837 + }
6838 +
6839 + static struct platform_driver edma_tptc_driver = {
6840 +diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
6841 +index eef145edb936..025d375fc3d7 100644
6842 +--- a/drivers/dma/hsu/hsu.c
6843 ++++ b/drivers/dma/hsu/hsu.c
6844 +@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
6845 + sr = hsu_chan_readl(hsuc, HSU_CH_SR);
6846 + spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
6847 +
6848 +- return sr;
6849 ++ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
6850 + }
6851 +
6852 + irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
6853 +@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
6854 + static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
6855 + {
6856 + struct hsu_dma_desc *desc = hsuc->desc;
6857 +- size_t bytes = desc->length;
6858 ++ size_t bytes = 0;
6859 + int i;
6860 +
6861 +- i = desc->active % HSU_DMA_CHAN_NR_DESC;
6862 ++ for (i = desc->active; i < desc->nents; i++)
6863 ++ bytes += desc->sg[i].len;
6864 ++
6865 ++ i = HSU_DMA_CHAN_NR_DESC - 1;
6866 + do {
6867 + bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
6868 + } while (--i >= 0);
6869 +diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
6870 +index 578a8ee8cd05..6b070c22b1df 100644
6871 +--- a/drivers/dma/hsu/hsu.h
6872 ++++ b/drivers/dma/hsu/hsu.h
6873 +@@ -41,6 +41,9 @@
6874 + #define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
6875 + #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
6876 + #define HSU_CH_SR_CHE BIT(15)
6877 ++#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
6878 ++#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
6879 ++#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
6880 +
6881 + /* Bits in HSU_CH_CR */
6882 + #define HSU_CH_CR_CHA BIT(0)
6883 +diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
6884 +index 9794b073d7d7..a5ed9407c51b 100644
6885 +--- a/drivers/dma/omap-dma.c
6886 ++++ b/drivers/dma/omap-dma.c
6887 +@@ -48,6 +48,7 @@ struct omap_chan {
6888 + unsigned dma_sig;
6889 + bool cyclic;
6890 + bool paused;
6891 ++ bool running;
6892 +
6893 + int dma_ch;
6894 + struct omap_desc *desc;
6895 +@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
6896 +
6897 + /* Enable channel */
6898 + omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
6899 ++
6900 ++ c->running = true;
6901 + }
6902 +
6903 + static void omap_dma_stop(struct omap_chan *c)
6904 +@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
6905 +
6906 + omap_dma_chan_write(c, CLNK_CTRL, val);
6907 + }
6908 ++
6909 ++ c->running = false;
6910 + }
6911 +
6912 + static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
6913 +@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
6914 + struct omap_chan *c = to_omap_dma_chan(chan);
6915 + struct virt_dma_desc *vd;
6916 + enum dma_status ret;
6917 +- uint32_t ccr;
6918 + unsigned long flags;
6919 +
6920 +- ccr = omap_dma_chan_read(c, CCR);
6921 +- /* The channel is no longer active, handle the completion right away */
6922 +- if (!(ccr & CCR_ENABLE))
6923 +- omap_dma_callback(c->dma_ch, 0, c);
6924 +-
6925 + ret = dma_cookie_status(chan, cookie, txstate);
6926 ++
6927 ++ if (!c->paused && c->running) {
6928 ++ uint32_t ccr = omap_dma_chan_read(c, CCR);
6929 ++ /*
6930 ++ * The channel is no longer active, set the return value
6931 ++ * accordingly
6932 ++ */
6933 ++ if (!(ccr & CCR_ENABLE))
6934 ++ ret = DMA_COMPLETE;
6935 ++ }
6936 ++
6937 + if (ret == DMA_COMPLETE || !txstate)
6938 + return ret;
6939 +
6940 +diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
6941 +index debca824bed6..77c1c44009d8 100644
6942 +--- a/drivers/dma/pxa_dma.c
6943 ++++ b/drivers/dma/pxa_dma.c
6944 +@@ -122,6 +122,7 @@ struct pxad_chan {
6945 + struct pxad_device {
6946 + struct dma_device slave;
6947 + int nr_chans;
6948 ++ int nr_requestors;
6949 + void __iomem *base;
6950 + struct pxad_phy *phys;
6951 + spinlock_t phy_lock; /* Phy association */
6952 +@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
6953 + return;
6954 +
6955 + /* clear the channel mapping in DRCMR */
6956 +- if (chan->drcmr <= DRCMR_CHLNUM) {
6957 ++ if (chan->drcmr <= pdev->nr_requestors) {
6958 + reg = pxad_drcmr(chan->drcmr);
6959 + writel_relaxed(0, chan->phy->base + reg);
6960 + }
6961 +@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
6962 +
6963 + static void phy_enable(struct pxad_phy *phy, bool misaligned)
6964 + {
6965 ++ struct pxad_device *pdev;
6966 + u32 reg, dalgn;
6967 +
6968 + if (!phy->vchan)
6969 +@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
6970 + "%s(); phy=%p(%d) misaligned=%d\n", __func__,
6971 + phy, phy->idx, misaligned);
6972 +
6973 +- if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
6974 ++ pdev = to_pxad_dev(phy->vchan->vc.chan.device);
6975 ++ if (phy->vchan->drcmr <= pdev->nr_requestors) {
6976 + reg = pxad_drcmr(phy->vchan->drcmr);
6977 + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
6978 + }
6979 +@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
6980 + {
6981 + u32 maxburst = 0, dev_addr = 0;
6982 + enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
6983 ++ struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
6984 +
6985 + *dcmd = 0;
6986 + if (dir == DMA_DEV_TO_MEM) {
6987 +@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
6988 + dev_addr = chan->cfg.src_addr;
6989 + *dev_src = dev_addr;
6990 + *dcmd |= PXA_DCMD_INCTRGADDR;
6991 +- if (chan->drcmr <= DRCMR_CHLNUM)
6992 ++ if (chan->drcmr <= pdev->nr_requestors)
6993 + *dcmd |= PXA_DCMD_FLOWSRC;
6994 + }
6995 + if (dir == DMA_MEM_TO_DEV) {
6996 +@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
6997 + dev_addr = chan->cfg.dst_addr;
6998 + *dev_dst = dev_addr;
6999 + *dcmd |= PXA_DCMD_INCSRCADDR;
7000 +- if (chan->drcmr <= DRCMR_CHLNUM)
7001 ++ if (chan->drcmr <= pdev->nr_requestors)
7002 + *dcmd |= PXA_DCMD_FLOWTRG;
7003 + }
7004 + if (dir == DMA_MEM_TO_MEM)
7005 +@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
7006 +
7007 + static int pxad_init_dmadev(struct platform_device *op,
7008 + struct pxad_device *pdev,
7009 +- unsigned int nr_phy_chans)
7010 ++ unsigned int nr_phy_chans,
7011 ++ unsigned int nr_requestors)
7012 + {
7013 + int ret;
7014 + unsigned int i;
7015 + struct pxad_chan *c;
7016 +
7017 + pdev->nr_chans = nr_phy_chans;
7018 ++ pdev->nr_requestors = nr_requestors;
7019 + INIT_LIST_HEAD(&pdev->slave.channels);
7020 + pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
7021 + pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
7022 +@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
7023 + const struct of_device_id *of_id;
7024 + struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
7025 + struct resource *iores;
7026 +- int ret, dma_channels = 0;
7027 ++ int ret, dma_channels = 0, nb_requestors = 0;
7028 + const enum dma_slave_buswidth widths =
7029 + DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
7030 + DMA_SLAVE_BUSWIDTH_4_BYTES;
7031 +@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
7032 + return PTR_ERR(pdev->base);
7033 +
7034 + of_id = of_match_device(pxad_dt_ids, &op->dev);
7035 +- if (of_id)
7036 ++ if (of_id) {
7037 + of_property_read_u32(op->dev.of_node, "#dma-channels",
7038 + &dma_channels);
7039 +- else if (pdata && pdata->dma_channels)
7040 ++ ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
7041 ++ &nb_requestors);
7042 ++ if (ret) {
7043 ++ dev_warn(pdev->slave.dev,
7044 ++ "#dma-requests set to default 32 as missing in OF: %d",
7045 ++ ret);
7046 ++ nb_requestors = 32;
7047 ++ };
7048 ++ } else if (pdata && pdata->dma_channels) {
7049 + dma_channels = pdata->dma_channels;
7050 +- else
7051 ++ nb_requestors = pdata->nb_requestors;
7052 ++ } else {
7053 + dma_channels = 32; /* default 32 channel */
7054 ++ }
7055 +
7056 + dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
7057 + dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
7058 +@@ -1423,7 +1439,7 @@ static int pxad_probe(struct platform_device *op)
7059 + pdev->slave.descriptor_reuse = true;
7060 +
7061 + pdev->slave.dev = &op->dev;
7062 +- ret = pxad_init_dmadev(op, pdev, dma_channels);
7063 ++ ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
7064 + if (ret) {
7065 + dev_err(pdev->slave.dev, "unable to register\n");
7066 + return ret;
7067 +@@ -1442,7 +1458,8 @@ static int pxad_probe(struct platform_device *op)
7068 +
7069 + platform_set_drvdata(op, pdev);
7070 + pxad_init_debugfs(pdev);
7071 +- dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
7072 ++ dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
7073 ++ dma_channels, nb_requestors);
7074 + return 0;
7075 + }
7076 +
7077 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
7078 +index 9eee13ef83a5..d87a47547ba5 100644
7079 +--- a/drivers/edac/amd64_edac.c
7080 ++++ b/drivers/edac/amd64_edac.c
7081 +@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
7082 + u64 chan_off;
7083 + u64 dram_base = get_dram_base(pvt, range);
7084 + u64 hole_off = f10_dhar_offset(pvt);
7085 +- u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
7086 ++ u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
7087 +
7088 + if (hi_rng) {
7089 + /*
7090 +diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
7091 +index 01087a38da22..792bdae2b91d 100644
7092 +--- a/drivers/edac/i7core_edac.c
7093 ++++ b/drivers/edac/i7core_edac.c
7094 +@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
7095 +
7096 + i7_dev = get_i7core_dev(mce->socketid);
7097 + if (!i7_dev)
7098 +- return NOTIFY_BAD;
7099 ++ return NOTIFY_DONE;
7100 +
7101 + mci = i7_dev->mci;
7102 + pvt = mci->pvt_info;
7103 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
7104 +index f5c6b97c8958..8bf745d2da7e 100644
7105 +--- a/drivers/edac/sb_edac.c
7106 ++++ b/drivers/edac/sb_edac.c
7107 +@@ -362,6 +362,7 @@ struct sbridge_pvt {
7108 +
7109 + /* Memory type detection */
7110 + bool is_mirrored, is_lockstep, is_close_pg;
7111 ++ bool is_chan_hash;
7112 +
7113 + /* Fifo double buffers */
7114 + struct mce mce_entry[MCE_LOG_LEN];
7115 +@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
7116 + return (pkg >> 2) & 0x1;
7117 + }
7118 +
7119 ++static int haswell_chan_hash(int idx, u64 addr)
7120 ++{
7121 ++ int i;
7122 ++
7123 ++ /*
7124 ++ * XOR even bits from 12:26 to bit0 of idx,
7125 ++ * odd bits from 13:27 to bit1
7126 ++ */
7127 ++ for (i = 12; i < 28; i += 2)
7128 ++ idx ^= (addr >> i) & 3;
7129 ++
7130 ++ return idx;
7131 ++}
7132 ++
7133 + /****************************************************************************
7134 + Memory check routines
7135 + ****************************************************************************/
7136 +@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
7137 + KNL_MAX_CHANNELS : NUM_CHANNELS;
7138 + u64 knl_mc_sizes[KNL_MAX_CHANNELS];
7139 +
7140 ++ if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
7141 ++ pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
7142 ++ pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
7143 ++ }
7144 + if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
7145 + pvt->info.type == KNIGHTS_LANDING)
7146 + pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
7147 +@@ -1839,8 +1858,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
7148 + edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
7149 + n_tads, gb, (mb*1000)/1024,
7150 + ((u64)tmp_mb) << 20L,
7151 +- (u32)TAD_SOCK(reg),
7152 +- (u32)TAD_CH(reg),
7153 ++ (u32)(1 << TAD_SOCK(reg)),
7154 ++ (u32)TAD_CH(reg) + 1,
7155 + (u32)TAD_TGT0(reg),
7156 + (u32)TAD_TGT1(reg),
7157 + (u32)TAD_TGT2(reg),
7158 +@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
7159 + }
7160 +
7161 + ch_way = TAD_CH(reg) + 1;
7162 +- sck_way = TAD_SOCK(reg) + 1;
7163 ++ sck_way = TAD_SOCK(reg);
7164 +
7165 + if (ch_way == 3)
7166 + idx = addr >> 6;
7167 +- else
7168 ++ else {
7169 + idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
7170 ++ if (pvt->is_chan_hash)
7171 ++ idx = haswell_chan_hash(idx, addr);
7172 ++ }
7173 + idx = idx % ch_way;
7174 +
7175 + /*
7176 +@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
7177 + switch(ch_way) {
7178 + case 2:
7179 + case 4:
7180 +- sck_xch = 1 << sck_way * (ch_way >> 1);
7181 ++ sck_xch = (1 << sck_way) * (ch_way >> 1);
7182 + break;
7183 + default:
7184 + sprintf(msg, "Invalid mirror set. Can't decode addr");
7185 +@@ -2175,7 +2197,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
7186 + n_tads,
7187 + addr,
7188 + limit,
7189 +- (u32)TAD_SOCK(reg),
7190 ++ sck_way,
7191 + ch_way,
7192 + offset,
7193 + idx,
7194 +@@ -2190,18 +2212,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
7195 + offset, addr);
7196 + return -EINVAL;
7197 + }
7198 +- addr -= offset;
7199 +- /* Store the low bits [0:6] of the addr */
7200 +- ch_addr = addr & 0x7f;
7201 +- /* Remove socket wayness and remove 6 bits */
7202 +- addr >>= 6;
7203 +- addr = div_u64(addr, sck_xch);
7204 +-#if 0
7205 +- /* Divide by channel way */
7206 +- addr = addr / ch_way;
7207 +-#endif
7208 +- /* Recover the last 6 bits */
7209 +- ch_addr |= addr << 6;
7210 ++
7211 ++ ch_addr = addr - offset;
7212 ++ ch_addr >>= (6 + shiftup);
7213 ++ ch_addr /= sck_xch;
7214 ++ ch_addr <<= (6 + shiftup);
7215 ++ ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
7216 +
7217 + /*
7218 + * Step 3) Decode rank
7219 +@@ -3152,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
7220 +
7221 + mci = get_mci_for_node_id(mce->socketid);
7222 + if (!mci)
7223 +- return NOTIFY_BAD;
7224 ++ return NOTIFY_DONE;
7225 + pvt = mci->pvt_info;
7226 +
7227 + /*
7228 +diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
7229 +index 74dfb7f4f277..d8cac4661cfe 100644
7230 +--- a/drivers/extcon/extcon-max77843.c
7231 ++++ b/drivers/extcon/extcon-max77843.c
7232 +@@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
7233 + /* Clear IRQ bits before request IRQs */
7234 + ret = regmap_bulk_read(max77843->regmap_muic,
7235 + MAX77843_MUIC_REG_INT1, info->status,
7236 +- MAX77843_MUIC_IRQ_NUM);
7237 ++ MAX77843_MUIC_STATUS_NUM);
7238 + if (ret) {
7239 + dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
7240 + goto err_muic_irq;
7241 +diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
7242 +index 0c2f0a61b0ea..0b631e5b5b84 100644
7243 +--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
7244 ++++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
7245 +@@ -94,15 +94,14 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
7246 +
7247 + found:
7248 + __ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
7249 +- header = (struct nvram_header *)nvram_buf;
7250 +- nvram_len = header->len;
7251 ++ nvram_len = ((struct nvram_header *)(nvram_buf))->len;
7252 + if (nvram_len > size) {
7253 + pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
7254 + nvram_len = size;
7255 + }
7256 + if (nvram_len >= NVRAM_SPACE) {
7257 + pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
7258 +- header->len, NVRAM_SPACE - 1);
7259 ++ nvram_len, NVRAM_SPACE - 1);
7260 + nvram_len = NVRAM_SPACE - 1;
7261 + }
7262 + /* proceed reading data after header */
7263 +diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
7264 +index 9e15d571b53c..a76c35fc0b92 100644
7265 +--- a/drivers/firmware/efi/arm-init.c
7266 ++++ b/drivers/firmware/efi/arm-init.c
7267 +@@ -203,7 +203,19 @@ void __init efi_init(void)
7268 +
7269 + reserve_regions();
7270 + early_memunmap(memmap.map, params.mmap_size);
7271 +- memblock_mark_nomap(params.mmap & PAGE_MASK,
7272 +- PAGE_ALIGN(params.mmap_size +
7273 +- (params.mmap & ~PAGE_MASK)));
7274 ++
7275 ++ if (IS_ENABLED(CONFIG_ARM)) {
7276 ++ /*
7277 ++ * ARM currently does not allow ioremap_cache() to be called on
7278 ++ * memory regions that are covered by struct page. So remove the
7279 ++ * UEFI memory map from the linear mapping.
7280 ++ */
7281 ++ memblock_mark_nomap(params.mmap & PAGE_MASK,
7282 ++ PAGE_ALIGN(params.mmap_size +
7283 ++ (params.mmap & ~PAGE_MASK)));
7284 ++ } else {
7285 ++ memblock_reserve(params.mmap & PAGE_MASK,
7286 ++ PAGE_ALIGN(params.mmap_size +
7287 ++ (params.mmap & ~PAGE_MASK)));
7288 ++ }
7289 + }
7290 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
7291 +index 2cd37dad67a6..c51f3b2fe3c0 100644
7292 +--- a/drivers/firmware/efi/efi.c
7293 ++++ b/drivers/firmware/efi/efi.c
7294 +@@ -182,6 +182,7 @@ static int generic_ops_register(void)
7295 + {
7296 + generic_ops.get_variable = efi.get_variable;
7297 + generic_ops.set_variable = efi.set_variable;
7298 ++ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
7299 + generic_ops.get_next_variable = efi.get_next_variable;
7300 + generic_ops.query_variable_store = efi_query_variable_store;
7301 +
7302 +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
7303 +index 7f2ea21c730d..6f182fd91a6d 100644
7304 +--- a/drivers/firmware/efi/vars.c
7305 ++++ b/drivers/firmware/efi/vars.c
7306 +@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
7307 + { NULL_GUID, "", NULL },
7308 + };
7309 +
7310 ++/*
7311 ++ * Check if @var_name matches the pattern given in @match_name.
7312 ++ *
7313 ++ * @var_name: an array of @len non-NUL characters.
7314 ++ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
7315 ++ * final "*" character matches any trailing characters @var_name,
7316 ++ * including the case when there are none left in @var_name.
7317 ++ * @match: on output, the number of non-wildcard characters in @match_name
7318 ++ * that @var_name matches, regardless of the return value.
7319 ++ * @return: whether @var_name fully matches @match_name.
7320 ++ */
7321 + static bool
7322 + variable_matches(const char *var_name, size_t len, const char *match_name,
7323 + int *match)
7324 + {
7325 + for (*match = 0; ; (*match)++) {
7326 + char c = match_name[*match];
7327 +- char u = var_name[*match];
7328 +
7329 +- /* Wildcard in the matching name means we've matched */
7330 +- if (c == '*')
7331 ++ switch (c) {
7332 ++ case '*':
7333 ++ /* Wildcard in @match_name means we've matched. */
7334 + return true;
7335 +
7336 +- /* Case sensitive match */
7337 +- if (!c && *match == len)
7338 +- return true;
7339 ++ case '\0':
7340 ++ /* @match_name has ended. Has @var_name too? */
7341 ++ return (*match == len);
7342 +
7343 +- if (c != u)
7344 ++ default:
7345 ++ /*
7346 ++ * We've reached a non-wildcard char in @match_name.
7347 ++ * Continue only if there's an identical character in
7348 ++ * @var_name.
7349 ++ */
7350 ++ if (*match < len && c == var_name[*match])
7351 ++ continue;
7352 + return false;
7353 +-
7354 +- if (!c)
7355 +- return true;
7356 ++ }
7357 + }
7358 +- return true;
7359 + }
7360 +
7361 + bool
7362 +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
7363 +index 23196c5fc17c..90c1511d731f 100644
7364 +--- a/drivers/gpio/gpio-pca953x.c
7365 ++++ b/drivers/gpio/gpio-pca953x.c
7366 +@@ -18,6 +18,7 @@
7367 + #include <linux/i2c.h>
7368 + #include <linux/platform_data/pca953x.h>
7369 + #include <linux/slab.h>
7370 ++#include <asm/unaligned.h>
7371 + #include <linux/of_platform.h>
7372 + #include <linux/acpi.h>
7373 +
7374 +@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
7375 + switch (chip->chip_type) {
7376 + case PCA953X_TYPE:
7377 + ret = i2c_smbus_write_word_data(chip->client,
7378 +- reg << 1, (u16) *val);
7379 ++ reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
7380 + break;
7381 + case PCA957X_TYPE:
7382 + ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
7383 +@@ -367,9 +368,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
7384 + memcpy(reg_val, chip->reg_output, NBANK(chip));
7385 + mutex_lock(&chip->i2c_lock);
7386 + for(bank=0; bank<NBANK(chip); bank++) {
7387 +- unsigned bankmask = mask[bank/4] >> ((bank % 4) * 8);
7388 ++ unsigned bankmask = mask[bank / sizeof(*mask)] >>
7389 ++ ((bank % sizeof(*mask)) * 8);
7390 + if(bankmask) {
7391 +- unsigned bankval = bits[bank/4] >> ((bank % 4) * 8);
7392 ++ unsigned bankval = bits[bank / sizeof(*bits)] >>
7393 ++ ((bank % sizeof(*bits)) * 8);
7394 + reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
7395 + }
7396 + }
7397 +diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
7398 +index b2b7b78664b8..76ac906b4d78 100644
7399 +--- a/drivers/gpio/gpio-pxa.c
7400 ++++ b/drivers/gpio/gpio-pxa.c
7401 +@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
7402 + writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
7403 +
7404 + ret = pinctrl_gpio_direction_output(chip->base + offset);
7405 +- if (!ret)
7406 +- return 0;
7407 ++ if (ret)
7408 ++ return ret;
7409 +
7410 + spin_lock_irqsave(&gpio_lock, flags);
7411 +
7412 +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
7413 +index 540cbc88c7a2..cc4d9bd0839e 100644
7414 +--- a/drivers/gpio/gpiolib-acpi.c
7415 ++++ b/drivers/gpio/gpiolib-acpi.c
7416 +@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
7417 + lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
7418 + if (lookup) {
7419 + lookup->adev = adev;
7420 +- lookup->con_id = con_id;
7421 ++ lookup->con_id = kstrdup(con_id, GFP_KERNEL);
7422 + list_add_tail(&lookup->node, &acpi_crs_lookup_list);
7423 + }
7424 + }
7425 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
7426 +index 5e7770f9a415..ff299752d5e6 100644
7427 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
7428 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
7429 +@@ -1619,6 +1619,7 @@ struct amdgpu_uvd {
7430 + struct amdgpu_bo *vcpu_bo;
7431 + void *cpu_addr;
7432 + uint64_t gpu_addr;
7433 ++ unsigned fw_version;
7434 + atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
7435 + struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
7436 + struct delayed_work idle_work;
7437 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
7438 +index 7a4b101e10c6..75cb5b9b88f8 100644
7439 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
7440 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
7441 +@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
7442 + struct drm_device *ddev = adev->ddev;
7443 + struct drm_crtc *crtc;
7444 + uint32_t line_time_us, vblank_lines;
7445 ++ struct cgs_mode_info *mode_info;
7446 +
7447 + if (info == NULL)
7448 + return -EINVAL;
7449 +
7450 ++ mode_info = info->mode_info;
7451 ++
7452 + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
7453 + list_for_each_entry(crtc,
7454 + &ddev->mode_config.crtc_list, head) {
7455 +@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
7456 + info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
7457 + info->display_count++;
7458 + }
7459 +- if (info->mode_info != NULL &&
7460 ++ if (mode_info != NULL &&
7461 + crtc->enabled && amdgpu_crtc->enabled &&
7462 + amdgpu_crtc->hw_mode.clock) {
7463 + line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
7464 +@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
7465 + vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
7466 + amdgpu_crtc->hw_mode.crtc_vdisplay +
7467 + (amdgpu_crtc->v_border * 2);
7468 +- info->mode_info->vblank_time_us = vblank_lines * line_time_us;
7469 +- info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
7470 +- info->mode_info->ref_clock = adev->clock.spll.reference_freq;
7471 +- info->mode_info++;
7472 ++ mode_info->vblank_time_us = vblank_lines * line_time_us;
7473 ++ mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
7474 ++ mode_info->ref_clock = adev->clock.spll.reference_freq;
7475 ++ mode_info = NULL;
7476 + }
7477 + }
7478 + }
7479 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
7480 +index e23843f4d877..4488e82f87b0 100644
7481 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
7482 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
7483 +@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
7484 + fw_info.feature = adev->vce.fb_version;
7485 + break;
7486 + case AMDGPU_INFO_FW_UVD:
7487 +- fw_info.ver = 0;
7488 ++ fw_info.ver = adev->uvd.fw_version;
7489 + fw_info.feature = 0;
7490 + break;
7491 + case AMDGPU_INFO_FW_GMC:
7492 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
7493 +index fdc1be8550da..3b2d75d96ea0 100644
7494 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
7495 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
7496 +@@ -53,7 +53,7 @@ struct amdgpu_hpd;
7497 +
7498 + #define AMDGPU_MAX_HPD_PINS 6
7499 + #define AMDGPU_MAX_CRTCS 6
7500 +-#define AMDGPU_MAX_AFMT_BLOCKS 7
7501 ++#define AMDGPU_MAX_AFMT_BLOCKS 9
7502 +
7503 + enum amdgpu_rmx_type {
7504 + RMX_OFF,
7505 +@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
7506 + struct atom_context *atom_context;
7507 + struct card_info *atom_card_info;
7508 + bool mode_config_initialized;
7509 +- struct amdgpu_crtc *crtcs[6];
7510 +- struct amdgpu_afmt *afmt[7];
7511 ++ struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
7512 ++ struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
7513 + /* DVI-I properties */
7514 + struct drm_property *coherent_mode_property;
7515 + /* DAC enable load detect */
7516 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
7517 +index b8fbbd7699e4..73628c7599e7 100644
7518 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
7519 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
7520 +@@ -540,6 +540,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
7521 + if (!metadata_size) {
7522 + if (bo->metadata_size) {
7523 + kfree(bo->metadata);
7524 ++ bo->metadata = NULL;
7525 + bo->metadata_size = 0;
7526 + }
7527 + return 0;
7528 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
7529 +index 53f987aeeacf..3b35ad83867c 100644
7530 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
7531 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
7532 +@@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
7533 + DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
7534 + version_major, version_minor, family_id);
7535 +
7536 ++ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
7537 ++ (family_id << 8));
7538 ++
7539 + bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
7540 + + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
7541 + r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
7542 +@@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
7543 + memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
7544 + (adev->uvd.fw->size) - offset);
7545 +
7546 ++ cancel_delayed_work_sync(&adev->uvd.idle_work);
7547 ++
7548 + size = amdgpu_bo_size(adev->uvd.vcpu_bo);
7549 + size -= le32_to_cpu(hdr->ucode_size_bytes);
7550 + ptr = adev->uvd.cpu_addr;
7551 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
7552 +index a745eeeb5d82..bb0da76051a1 100644
7553 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
7554 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
7555 +@@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
7556 + if (i == AMDGPU_MAX_VCE_HANDLES)
7557 + return 0;
7558 +
7559 ++ cancel_delayed_work_sync(&adev->vce.idle_work);
7560 + /* TODO: suspending running encoding sessions isn't supported */
7561 + return -EINVAL;
7562 + }
7563 +diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
7564 +index bf731e9f643e..7f85c2c1d681 100644
7565 +--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
7566 ++++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
7567 +@@ -276,8 +276,8 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
7568 + }
7569 + }
7570 + } else {
7571 +- for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
7572 +- for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
7573 ++ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
7574 ++ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
7575 + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
7576 + if (max_pix_clock >= pix_clock) {
7577 + *dp_lanes = lane_num;
7578 +diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
7579 +index 1e0bba29e167..1cd6de575305 100644
7580 +--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
7581 ++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
7582 +@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
7583 + && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
7584 + adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
7585 +
7586 ++ /* vertical FP must be at least 1 */
7587 ++ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
7588 ++ adjusted_mode->crtc_vsync_start++;
7589 ++
7590 + /* get the native mode for scaling */
7591 + if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
7592 + amdgpu_panel_mode_fixup(encoder, adjusted_mode);
7593 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
7594 +index 06602df707f8..9b1c43005c80 100644
7595 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
7596 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
7597 +@@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
7598 + unsigned vm_id, uint64_t pd_addr)
7599 + {
7600 + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
7601 +- uint32_t seq = ring->fence_drv.sync_seq;
7602 ++ uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
7603 + uint64_t addr = ring->fence_drv.gpu_addr;
7604 +
7605 + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
7606 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
7607 +index b8060795b27b..53964b14ff48 100644
7608 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
7609 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
7610 +@@ -902,14 +902,6 @@ static int gmc_v7_0_early_init(void *handle)
7611 + gmc_v7_0_set_gart_funcs(adev);
7612 + gmc_v7_0_set_irq_funcs(adev);
7613 +
7614 +- if (adev->flags & AMD_IS_APU) {
7615 +- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
7616 +- } else {
7617 +- u32 tmp = RREG32(mmMC_SEQ_MISC0);
7618 +- tmp &= MC_SEQ_MISC0__MT__MASK;
7619 +- adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
7620 +- }
7621 +-
7622 + return 0;
7623 + }
7624 +
7625 +@@ -930,6 +922,14 @@ static int gmc_v7_0_sw_init(void *handle)
7626 + if (r)
7627 + return r;
7628 +
7629 ++ if (adev->flags & AMD_IS_APU) {
7630 ++ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
7631 ++ } else {
7632 ++ u32 tmp = RREG32(mmMC_SEQ_MISC0);
7633 ++ tmp &= MC_SEQ_MISC0__MT__MASK;
7634 ++ adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
7635 ++ }
7636 ++
7637 + r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
7638 + if (r)
7639 + return r;
7640 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
7641 +index 3efd45546241..e59251f4a85d 100644
7642 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
7643 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
7644 +@@ -856,14 +856,6 @@ static int gmc_v8_0_early_init(void *handle)
7645 + gmc_v8_0_set_gart_funcs(adev);
7646 + gmc_v8_0_set_irq_funcs(adev);
7647 +
7648 +- if (adev->flags & AMD_IS_APU) {
7649 +- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
7650 +- } else {
7651 +- u32 tmp = RREG32(mmMC_SEQ_MISC0);
7652 +- tmp &= MC_SEQ_MISC0__MT__MASK;
7653 +- adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
7654 +- }
7655 +-
7656 + return 0;
7657 + }
7658 +
7659 +@@ -874,6 +866,8 @@ static int gmc_v8_0_late_init(void *handle)
7660 + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
7661 + }
7662 +
7663 ++#define mmMC_SEQ_MISC0_FIJI 0xA71
7664 ++
7665 + static int gmc_v8_0_sw_init(void *handle)
7666 + {
7667 + int r;
7668 +@@ -884,6 +878,19 @@ static int gmc_v8_0_sw_init(void *handle)
7669 + if (r)
7670 + return r;
7671 +
7672 ++ if (adev->flags & AMD_IS_APU) {
7673 ++ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
7674 ++ } else {
7675 ++ u32 tmp;
7676 ++
7677 ++ if (adev->asic_type == CHIP_FIJI)
7678 ++ tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
7679 ++ else
7680 ++ tmp = RREG32(mmMC_SEQ_MISC0);
7681 ++ tmp &= MC_SEQ_MISC0__MT__MASK;
7682 ++ adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
7683 ++ }
7684 ++
7685 + r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
7686 + if (r)
7687 + return r;
7688 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
7689 +index 2cf50180cc51..b1c7a9b3631b 100644
7690 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
7691 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
7692 +@@ -32,8 +32,8 @@
7693 + #include "oss/oss_2_4_d.h"
7694 + #include "oss/oss_2_4_sh_mask.h"
7695 +
7696 +-#include "gmc/gmc_8_1_d.h"
7697 +-#include "gmc/gmc_8_1_sh_mask.h"
7698 ++#include "gmc/gmc_7_1_d.h"
7699 ++#include "gmc/gmc_7_1_sh_mask.h"
7700 +
7701 + #include "gca/gfx_8_0_d.h"
7702 + #include "gca/gfx_8_0_enum.h"
7703 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
7704 +index c34c393e9aea..d5e19b5fbbfb 100644
7705 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
7706 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
7707 +@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
7708 + union SQ_CMD_BITS *in_reg_sq_cmd,
7709 + union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
7710 + {
7711 +- int status;
7712 ++ int status = 0;
7713 + union SQ_CMD_BITS reg_sq_cmd;
7714 + union GRBM_GFX_INDEX_BITS reg_gfx_index;
7715 + struct HsaDbgWaveMsgAMDGen2 *pMsg;
7716 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
7717 +index cf01177ca3b5..2ea012e88991 100644
7718 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
7719 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
7720 +@@ -241,6 +241,11 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
7721 + phm_cap_set(hwmgr->platform_descriptor.platformCaps,
7722 + PHM_PlatformCaps_DynamicUVDState);
7723 +
7724 ++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
7725 ++ PHM_PlatformCaps_UVDDPM);
7726 ++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
7727 ++ PHM_PlatformCaps_VCEDPM);
7728 ++
7729 + cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
7730 + cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
7731 + cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
7732 +diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
7733 +index 9535c5b60387..7e5a97204051 100644
7734 +--- a/drivers/gpu/drm/drm_dp_helper.c
7735 ++++ b/drivers/gpu/drm/drm_dp_helper.c
7736 +@@ -178,7 +178,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
7737 + {
7738 + struct drm_dp_aux_msg msg;
7739 + unsigned int retry;
7740 +- int err;
7741 ++ int err = 0;
7742 +
7743 + memset(&msg, 0, sizeof(msg));
7744 + msg.address = offset;
7745 +@@ -186,6 +186,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
7746 + msg.buffer = buffer;
7747 + msg.size = size;
7748 +
7749 ++ mutex_lock(&aux->hw_mutex);
7750 ++
7751 + /*
7752 + * The specification doesn't give any recommendation on how often to
7753 + * retry native transactions. We used to retry 7 times like for
7754 +@@ -194,25 +196,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
7755 + */
7756 + for (retry = 0; retry < 32; retry++) {
7757 +
7758 +- mutex_lock(&aux->hw_mutex);
7759 + err = aux->transfer(aux, &msg);
7760 +- mutex_unlock(&aux->hw_mutex);
7761 + if (err < 0) {
7762 + if (err == -EBUSY)
7763 + continue;
7764 +
7765 +- return err;
7766 ++ goto unlock;
7767 + }
7768 +
7769 +
7770 + switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
7771 + case DP_AUX_NATIVE_REPLY_ACK:
7772 + if (err < size)
7773 +- return -EPROTO;
7774 +- return err;
7775 ++ err = -EPROTO;
7776 ++ goto unlock;
7777 +
7778 + case DP_AUX_NATIVE_REPLY_NACK:
7779 +- return -EIO;
7780 ++ err = -EIO;
7781 ++ goto unlock;
7782 +
7783 + case DP_AUX_NATIVE_REPLY_DEFER:
7784 + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
7785 +@@ -221,7 +222,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
7786 + }
7787 +
7788 + DRM_DEBUG_KMS("too many retries, giving up\n");
7789 +- return -EIO;
7790 ++ err = -EIO;
7791 ++
7792 ++unlock:
7793 ++ mutex_unlock(&aux->hw_mutex);
7794 ++ return err;
7795 + }
7796 +
7797 + /**
7798 +@@ -543,9 +548,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
7799 + int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
7800 +
7801 + for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
7802 +- mutex_lock(&aux->hw_mutex);
7803 + ret = aux->transfer(aux, msg);
7804 +- mutex_unlock(&aux->hw_mutex);
7805 + if (ret < 0) {
7806 + if (ret == -EBUSY)
7807 + continue;
7808 +@@ -684,6 +687,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
7809 +
7810 + memset(&msg, 0, sizeof(msg));
7811 +
7812 ++ mutex_lock(&aux->hw_mutex);
7813 ++
7814 + for (i = 0; i < num; i++) {
7815 + msg.address = msgs[i].addr;
7816 + drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
7817 +@@ -738,6 +743,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
7818 + msg.size = 0;
7819 + (void)drm_dp_i2c_do_msg(aux, &msg);
7820 +
7821 ++ mutex_unlock(&aux->hw_mutex);
7822 ++
7823 + return err;
7824 + }
7825 +
7826 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
7827 +index 27fbd79d0daf..71ea0521ea96 100644
7828 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
7829 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
7830 +@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
7831 + u8 sinks[DRM_DP_MAX_SDP_STREAMS];
7832 + int i;
7833 +
7834 ++ port = drm_dp_get_validated_port_ref(mgr, port);
7835 ++ if (!port)
7836 ++ return -EINVAL;
7837 ++
7838 + port_num = port->port_num;
7839 + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
7840 + if (!mstb) {
7841 + mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
7842 +
7843 +- if (!mstb)
7844 ++ if (!mstb) {
7845 ++ drm_dp_put_port(port);
7846 + return -EINVAL;
7847 ++ }
7848 + }
7849 +
7850 + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
7851 +@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
7852 + kfree(txmsg);
7853 + fail_put:
7854 + drm_dp_put_mst_branch_device(mstb);
7855 ++ drm_dp_put_port(port);
7856 + return ret;
7857 + }
7858 +
7859 +@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
7860 + req_payload.start_slot = cur_slots;
7861 + if (mgr->proposed_vcpis[i]) {
7862 + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
7863 ++ port = drm_dp_get_validated_port_ref(mgr, port);
7864 ++ if (!port) {
7865 ++ mutex_unlock(&mgr->payload_lock);
7866 ++ return -EINVAL;
7867 ++ }
7868 + req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
7869 + req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
7870 + } else {
7871 +@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
7872 + mgr->payloads[i].payload_state = req_payload.payload_state;
7873 + }
7874 + cur_slots += req_payload.num_slots;
7875 ++
7876 ++ if (port)
7877 ++ drm_dp_put_port(port);
7878 + }
7879 +
7880 + for (i = 0; i < mgr->max_payloads; i++) {
7881 +@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
7882 +
7883 + if (mgr->mst_primary) {
7884 + int sret;
7885 ++ u8 guid[16];
7886 ++
7887 + sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
7888 + if (sret != DP_RECEIVER_CAP_SIZE) {
7889 + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
7890 +@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
7891 + ret = -1;
7892 + goto out_unlock;
7893 + }
7894 ++
7895 ++ /* Some hubs forget their guids after they resume */
7896 ++ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
7897 ++ if (sret != 16) {
7898 ++ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
7899 ++ ret = -1;
7900 ++ goto out_unlock;
7901 ++ }
7902 ++ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
7903 ++
7904 + ret = 0;
7905 + } else
7906 + ret = -1;
7907 +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
7908 +index cf39ed3133d6..4d0bc2a8843c 100644
7909 +--- a/drivers/gpu/drm/i915/i915_debugfs.c
7910 ++++ b/drivers/gpu/drm/i915/i915_debugfs.c
7911 +@@ -2860,20 +2860,6 @@ static void intel_dp_info(struct seq_file *m,
7912 + intel_panel_info(m, &intel_connector->panel);
7913 + }
7914 +
7915 +-static void intel_dp_mst_info(struct seq_file *m,
7916 +- struct intel_connector *intel_connector)
7917 +-{
7918 +- struct intel_encoder *intel_encoder = intel_connector->encoder;
7919 +- struct intel_dp_mst_encoder *intel_mst =
7920 +- enc_to_mst(&intel_encoder->base);
7921 +- struct intel_digital_port *intel_dig_port = intel_mst->primary;
7922 +- struct intel_dp *intel_dp = &intel_dig_port->dp;
7923 +- bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
7924 +- intel_connector->port);
7925 +-
7926 +- seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
7927 +-}
7928 +-
7929 + static void intel_hdmi_info(struct seq_file *m,
7930 + struct intel_connector *intel_connector)
7931 + {
7932 +@@ -2917,8 +2903,6 @@ static void intel_connector_info(struct seq_file *m,
7933 + intel_hdmi_info(m, intel_connector);
7934 + else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
7935 + intel_lvds_info(m, intel_connector);
7936 +- else if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
7937 +- intel_dp_mst_info(m, intel_connector);
7938 + }
7939 +
7940 + seq_printf(m, "\tmodes:\n");
7941 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
7942 +index f357058c74d9..2e832fa07e09 100644
7943 +--- a/drivers/gpu/drm/i915/i915_drv.c
7944 ++++ b/drivers/gpu/drm/i915/i915_drv.c
7945 +@@ -797,7 +797,7 @@ static int i915_drm_resume(struct drm_device *dev)
7946 + static int i915_drm_resume_early(struct drm_device *dev)
7947 + {
7948 + struct drm_i915_private *dev_priv = dev->dev_private;
7949 +- int ret = 0;
7950 ++ int ret;
7951 +
7952 + /*
7953 + * We have a resume ordering issue with the snd-hda driver also
7954 +@@ -808,6 +808,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
7955 + * FIXME: This should be solved with a special hdmi sink device or
7956 + * similar so that power domains can be employed.
7957 + */
7958 ++
7959 ++ /*
7960 ++ * Note that we need to set the power state explicitly, since we
7961 ++ * powered off the device during freeze and the PCI core won't power
7962 ++ * it back up for us during thaw. Powering off the device during
7963 ++ * freeze is not a hard requirement though, and during the
7964 ++ * suspend/resume phases the PCI core makes sure we get here with the
7965 ++ * device powered on. So in case we change our freeze logic and keep
7966 ++ * the device powered we can also remove the following set power state
7967 ++ * call.
7968 ++ */
7969 ++ ret = pci_set_power_state(dev->pdev, PCI_D0);
7970 ++ if (ret) {
7971 ++ DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
7972 ++ goto out;
7973 ++ }
7974 ++
7975 ++ /*
7976 ++ * Note that pci_enable_device() first enables any parent bridge
7977 ++ * device and only then sets the power state for this device. The
7978 ++ * bridge enabling is a nop though, since bridge devices are resumed
7979 ++ * first. The order of enabling power and enabling the device is
7980 ++ * imposed by the PCI core as described above, so here we preserve the
7981 ++ * same order for the freeze/thaw phases.
7982 ++ *
7983 ++ * TODO: eventually we should remove pci_disable_device() /
7984 ++ * pci_enable_enable_device() from suspend/resume. Due to how they
7985 ++ * depend on the device enable refcount we can't anyway depend on them
7986 ++ * disabling/enabling the device.
7987 ++ */
7988 + if (pci_enable_device(dev->pdev)) {
7989 + ret = -EIO;
7990 + goto out;
7991 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
7992 +index 4897728713f6..5672b57e65d5 100644
7993 +--- a/drivers/gpu/drm/i915/i915_reg.h
7994 ++++ b/drivers/gpu/drm/i915/i915_reg.h
7995 +@@ -2898,7 +2898,14 @@ enum skl_disp_power_wells {
7996 + #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
7997 + #define BXT_RP_STATE_CAP _MMIO(0x138170)
7998 +
7999 +-#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
8000 ++/*
8001 ++ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
8002 ++ * 8300) freezing up around GPU hangs. Looks as if even
8003 ++ * scheduling/timer interrupts start misbehaving if the RPS
8004 ++ * EI/thresholds are "bad", leading to a very sluggish or even
8005 ++ * frozen machine.
8006 ++ */
8007 ++#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
8008 + #define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
8009 + #define INTERVAL_0_833_US(us) (((us) * 6) / 5)
8010 + #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
8011 +@@ -7405,6 +7412,8 @@ enum skl_disp_power_wells {
8012 + #define TRANS_CLK_SEL_DISABLED (0x0<<29)
8013 + #define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
8014 +
8015 ++#define CDCLK_FREQ _MMIO(0x46200)
8016 ++
8017 + #define _TRANSA_MSA_MISC 0x60410
8018 + #define _TRANSB_MSA_MISC 0x61410
8019 + #define _TRANSC_MSA_MISC 0x62410
8020 +diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
8021 +index 30f921421b0c..7d281b40064a 100644
8022 +--- a/drivers/gpu/drm/i915/intel_audio.c
8023 ++++ b/drivers/gpu/drm/i915/intel_audio.c
8024 +@@ -262,8 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
8025 + tmp |= AUD_CONFIG_N_PROG_ENABLE;
8026 + tmp &= ~AUD_CONFIG_UPPER_N_MASK;
8027 + tmp &= ~AUD_CONFIG_LOWER_N_MASK;
8028 +- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
8029 +- intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
8030 ++ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
8031 + tmp |= AUD_CONFIG_N_VALUE_INDEX;
8032 + I915_WRITE(HSW_AUD_CFG(pipe), tmp);
8033 +
8034 +@@ -476,8 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
8035 + tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
8036 + tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
8037 + tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
8038 +- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
8039 +- intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
8040 ++ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
8041 + tmp |= AUD_CONFIG_N_VALUE_INDEX;
8042 + else
8043 + tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
8044 +@@ -515,8 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
8045 +
8046 + /* ELD Conn_Type */
8047 + connector->eld[5] &= ~(3 << 2);
8048 +- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
8049 +- intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST))
8050 ++ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
8051 + connector->eld[5] |= (1 << 2);
8052 +
8053 + connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
8054 +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
8055 +index a7b4a524fadd..dbacc2901d47 100644
8056 +--- a/drivers/gpu/drm/i915/intel_crt.c
8057 ++++ b/drivers/gpu/drm/i915/intel_crt.c
8058 +@@ -255,8 +255,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
8059 + pipe_config->has_pch_encoder = true;
8060 +
8061 + /* LPT FDI RX only supports 8bpc. */
8062 +- if (HAS_PCH_LPT(dev))
8063 ++ if (HAS_PCH_LPT(dev)) {
8064 ++ if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
8065 ++ DRM_DEBUG_KMS("LPT only supports 24bpp\n");
8066 ++ return false;
8067 ++ }
8068 ++
8069 + pipe_config->pipe_bpp = 24;
8070 ++ }
8071 +
8072 + /* FDI must always be 2.7 GHz */
8073 + if (HAS_DDI(dev)) {
8074 +diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
8075 +index 647d85e77c2f..597cfb5ca847 100644
8076 +--- a/drivers/gpu/drm/i915/intel_csr.c
8077 ++++ b/drivers/gpu/drm/i915/intel_csr.c
8078 +@@ -177,7 +177,8 @@ static const struct stepping_info kbl_stepping_info[] = {
8079 + static const struct stepping_info skl_stepping_info[] = {
8080 + {'A', '0'}, {'B', '0'}, {'C', '0'},
8081 + {'D', '0'}, {'E', '0'}, {'F', '0'},
8082 +- {'G', '0'}, {'H', '0'}, {'I', '0'}
8083 ++ {'G', '0'}, {'H', '0'}, {'I', '0'},
8084 ++ {'J', '0'}, {'K', '0'}
8085 + };
8086 +
8087 + static const struct stepping_info bxt_stepping_info[] = {
8088 +diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
8089 +index 084d5586585d..40511493914c 100644
8090 +--- a/drivers/gpu/drm/i915/intel_ddi.c
8091 ++++ b/drivers/gpu/drm/i915/intel_ddi.c
8092 +@@ -464,9 +464,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
8093 + } else if (IS_BROADWELL(dev)) {
8094 + ddi_translations_fdi = bdw_ddi_translations_fdi;
8095 + ddi_translations_dp = bdw_ddi_translations_dp;
8096 +- ddi_translations_edp = bdw_ddi_translations_edp;
8097 ++
8098 ++ if (dev_priv->edp_low_vswing) {
8099 ++ ddi_translations_edp = bdw_ddi_translations_edp;
8100 ++ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
8101 ++ } else {
8102 ++ ddi_translations_edp = bdw_ddi_translations_dp;
8103 ++ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
8104 ++ }
8105 ++
8106 + ddi_translations_hdmi = bdw_ddi_translations_hdmi;
8107 +- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
8108 ++
8109 + n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
8110 + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
8111 + hdmi_default_entry = 7;
8112 +@@ -3157,23 +3165,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
8113 + I915_WRITE(FDI_RX_CTL(PIPE_A), val);
8114 + }
8115 +
8116 +-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
8117 +- struct intel_crtc *intel_crtc)
8118 +-{
8119 +- u32 temp;
8120 +-
8121 +- if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
8122 +- temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
8123 +-
8124 +- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
8125 +-
8126 +- if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
8127 +- return true;
8128 +- }
8129 +-
8130 +- return false;
8131 +-}
8132 +-
8133 + void intel_ddi_get_config(struct intel_encoder *encoder,
8134 + struct intel_crtc_state *pipe_config)
8135 + {
8136 +@@ -3234,8 +3225,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
8137 + break;
8138 + }
8139 +
8140 +- pipe_config->has_audio =
8141 +- intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
8142 ++ if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
8143 ++ temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
8144 ++ if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
8145 ++ pipe_config->has_audio = true;
8146 ++ }
8147 +
8148 + if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
8149 + pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
8150 +@@ -3260,12 +3254,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
8151 + intel_ddi_clock_get(encoder, pipe_config);
8152 + }
8153 +
8154 +-static void intel_ddi_destroy(struct drm_encoder *encoder)
8155 +-{
8156 +- /* HDMI has nothing special to destroy, so we can go with this. */
8157 +- intel_dp_encoder_destroy(encoder);
8158 +-}
8159 +-
8160 + static bool intel_ddi_compute_config(struct intel_encoder *encoder,
8161 + struct intel_crtc_state *pipe_config)
8162 + {
8163 +@@ -3284,7 +3272,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
8164 + }
8165 +
8166 + static const struct drm_encoder_funcs intel_ddi_funcs = {
8167 +- .destroy = intel_ddi_destroy,
8168 ++ .reset = intel_dp_encoder_reset,
8169 ++ .destroy = intel_dp_encoder_destroy,
8170 + };
8171 +
8172 + static struct intel_connector *
8173 +@@ -3356,6 +3345,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
8174 + intel_encoder->post_disable = intel_ddi_post_disable;
8175 + intel_encoder->get_hw_state = intel_ddi_get_hw_state;
8176 + intel_encoder->get_config = intel_ddi_get_config;
8177 ++ intel_encoder->suspend = intel_dp_encoder_suspend;
8178 +
8179 + intel_dig_port->port = port;
8180 + intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
8181 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
8182 +index 46947fffd599..39b00b9daf2d 100644
8183 +--- a/drivers/gpu/drm/i915/intel_display.c
8184 ++++ b/drivers/gpu/drm/i915/intel_display.c
8185 +@@ -4455,7 +4455,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
8186 + intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
8187 +
8188 + return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
8189 +- &state->scaler_state.scaler_id, DRM_ROTATE_0,
8190 ++ &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
8191 + state->pipe_src_w, state->pipe_src_h,
8192 + adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
8193 + }
8194 +@@ -9793,6 +9793,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
8195 + sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
8196 + mutex_unlock(&dev_priv->rps.hw_lock);
8197 +
8198 ++ I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
8199 ++
8200 + intel_update_cdclk(dev);
8201 +
8202 + WARN(cdclk != dev_priv->cdclk_freq,
8203 +@@ -13429,6 +13431,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
8204 + }
8205 +
8206 + for_each_crtc_in_state(state, crtc, crtc_state, i) {
8207 ++ if (state->legacy_cursor_update)
8208 ++ continue;
8209 ++
8210 + ret = intel_crtc_wait_for_pending_flips(crtc);
8211 + if (ret)
8212 + return ret;
8213 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
8214 +index cdc2c15873dc..3cd4996c791c 100644
8215 +--- a/drivers/gpu/drm/i915/intel_dp.c
8216 ++++ b/drivers/gpu/drm/i915/intel_dp.c
8217 +@@ -4905,7 +4905,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
8218 + kfree(intel_dig_port);
8219 + }
8220 +
8221 +-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
8222 ++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
8223 + {
8224 + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
8225 +
8226 +@@ -4947,7 +4947,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
8227 + edp_panel_vdd_schedule_off(intel_dp);
8228 + }
8229 +
8230 +-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
8231 ++void intel_dp_encoder_reset(struct drm_encoder *encoder)
8232 + {
8233 + struct intel_dp *intel_dp;
8234 +
8235 +diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
8236 +index fa0dabf578dc..e8e6984b8053 100644
8237 +--- a/drivers/gpu/drm/i915/intel_dp_mst.c
8238 ++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
8239 +@@ -78,8 +78,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
8240 + return false;
8241 + }
8242 +
8243 +- if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port))
8244 +- pipe_config->has_audio = true;
8245 + mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
8246 +
8247 + pipe_config->pbn = mst_pbn;
8248 +@@ -104,11 +102,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
8249 + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
8250 + struct intel_digital_port *intel_dig_port = intel_mst->primary;
8251 + struct intel_dp *intel_dp = &intel_dig_port->dp;
8252 +- struct drm_device *dev = encoder->base.dev;
8253 +- struct drm_i915_private *dev_priv = dev->dev_private;
8254 +- struct drm_crtc *crtc = encoder->base.crtc;
8255 +- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8256 +-
8257 + int ret;
8258 +
8259 + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
8260 +@@ -119,10 +112,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
8261 + if (ret) {
8262 + DRM_ERROR("failed to update payload %d\n", ret);
8263 + }
8264 +- if (intel_crtc->config->has_audio) {
8265 +- intel_audio_codec_disable(encoder);
8266 +- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
8267 +- }
8268 + }
8269 +
8270 + static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
8271 +@@ -184,7 +173,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
8272 + intel_mst->port = found->port;
8273 +
8274 + if (intel_dp->active_mst_links == 0) {
8275 +- intel_ddi_clk_select(encoder, intel_crtc->config);
8276 ++ intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
8277 +
8278 + intel_dp_set_link_params(intel_dp, intel_crtc->config);
8279 +
8280 +@@ -219,7 +208,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
8281 + struct intel_dp *intel_dp = &intel_dig_port->dp;
8282 + struct drm_device *dev = intel_dig_port->base.base.dev;
8283 + struct drm_i915_private *dev_priv = dev->dev_private;
8284 +- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
8285 + enum port port = intel_dig_port->port;
8286 + int ret;
8287 +
8288 +@@ -232,13 +220,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
8289 + ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
8290 +
8291 + ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
8292 +-
8293 +- if (crtc->config->has_audio) {
8294 +- DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
8295 +- pipe_name(crtc->pipe));
8296 +- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
8297 +- intel_audio_codec_enable(encoder);
8298 +- }
8299 + }
8300 +
8301 + static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
8302 +@@ -264,9 +245,6 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
8303 +
8304 + pipe_config->has_dp_encoder = true;
8305 +
8306 +- pipe_config->has_audio =
8307 +- intel_ddi_is_audio_enabled(dev_priv, crtc);
8308 +-
8309 + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
8310 + if (temp & TRANS_DDI_PHSYNC)
8311 + flags |= DRM_MODE_FLAG_PHSYNC;
8312 +@@ -499,6 +477,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
8313 + struct intel_connector *intel_connector = to_intel_connector(connector);
8314 + struct drm_device *dev = connector->dev;
8315 +
8316 ++ intel_connector->unregister(intel_connector);
8317 ++
8318 + /* need to nuke the connector */
8319 + drm_modeset_lock_all(dev);
8320 + if (connector->state->crtc) {
8321 +@@ -512,11 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
8322 +
8323 + WARN(ret, "Disabling mst crtc failed with %i\n", ret);
8324 + }
8325 +- drm_modeset_unlock_all(dev);
8326 +-
8327 +- intel_connector->unregister(intel_connector);
8328 +
8329 +- drm_modeset_lock_all(dev);
8330 + intel_connector_remove_from_fbdev(intel_connector);
8331 + drm_connector_cleanup(connector);
8332 + drm_modeset_unlock_all(dev);
8333 +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
8334 +index df7f3cb66056..1ae61f488987 100644
8335 +--- a/drivers/gpu/drm/i915/intel_drv.h
8336 ++++ b/drivers/gpu/drm/i915/intel_drv.h
8337 +@@ -1013,8 +1013,6 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
8338 + void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
8339 + bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
8340 + void intel_ddi_fdi_disable(struct drm_crtc *crtc);
8341 +-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
8342 +- struct intel_crtc *intel_crtc);
8343 + void intel_ddi_get_config(struct intel_encoder *encoder,
8344 + struct intel_crtc_state *pipe_config);
8345 + struct intel_encoder *
8346 +@@ -1234,6 +1232,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
8347 + void intel_dp_start_link_train(struct intel_dp *intel_dp);
8348 + void intel_dp_stop_link_train(struct intel_dp *intel_dp);
8349 + void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
8350 ++void intel_dp_encoder_reset(struct drm_encoder *encoder);
8351 ++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
8352 + void intel_dp_encoder_destroy(struct drm_encoder *encoder);
8353 + int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
8354 + bool intel_dp_compute_config(struct intel_encoder *encoder,
8355 +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
8356 +index 616108c4bc3e..43fdae8ff3c3 100644
8357 +--- a/drivers/gpu/drm/i915/intel_hdmi.c
8358 ++++ b/drivers/gpu/drm/i915/intel_hdmi.c
8359 +@@ -1407,8 +1407,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
8360 + hdmi_to_dig_port(intel_hdmi));
8361 + }
8362 +
8363 +- if (!live_status)
8364 +- DRM_DEBUG_KMS("Live status not up!");
8365 ++ if (!live_status) {
8366 ++ DRM_DEBUG_KMS("HDMI live status down\n");
8367 ++ /*
8368 ++ * Live status register is not reliable on all intel platforms.
8369 ++ * So consider live_status only for certain platforms, for
8370 ++ * others, read EDID to determine presence of sink.
8371 ++ */
8372 ++ if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
8373 ++ live_status = true;
8374 ++ }
8375 +
8376 + intel_hdmi_unset_edid(connector);
8377 +
8378 +diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
8379 +index f1fa756c5d5d..cfd5f9fff2f4 100644
8380 +--- a/drivers/gpu/drm/i915/intel_lrc.c
8381 ++++ b/drivers/gpu/drm/i915/intel_lrc.c
8382 +@@ -781,11 +781,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
8383 + if (unlikely(total_bytes > remain_usable)) {
8384 + /*
8385 + * The base request will fit but the reserved space
8386 +- * falls off the end. So only need to to wait for the
8387 +- * reserved size after flushing out the remainder.
8388 ++ * falls off the end. So don't need an immediate wrap
8389 ++ * and only need to effectively wait for the reserved
8390 ++ * size space from the start of ringbuffer.
8391 + */
8392 + wait_bytes = remain_actual + ringbuf->reserved_size;
8393 +- need_wrap = true;
8394 + } else if (total_bytes > ringbuf->space) {
8395 + /* No wrapping required, just waiting. */
8396 + wait_bytes = total_bytes;
8397 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
8398 +index b28c29f20e75..0eae3994e5e3 100644
8399 +--- a/drivers/gpu/drm/i915/intel_pm.c
8400 ++++ b/drivers/gpu/drm/i915/intel_pm.c
8401 +@@ -2281,6 +2281,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
8402 + return PTR_ERR(cstate);
8403 +
8404 + pipe_wm = &cstate->wm.optimal.ilk;
8405 ++ memset(pipe_wm, 0, sizeof(*pipe_wm));
8406 +
8407 + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
8408 + ps = drm_atomic_get_plane_state(state,
8409 +@@ -3606,23 +3607,43 @@ static void skl_update_wm(struct drm_crtc *crtc)
8410 + dev_priv->wm.skl_hw = *results;
8411 + }
8412 +
8413 ++static void ilk_compute_wm_config(struct drm_device *dev,
8414 ++ struct intel_wm_config *config)
8415 ++{
8416 ++ struct intel_crtc *crtc;
8417 ++
8418 ++ /* Compute the currently _active_ config */
8419 ++ for_each_intel_crtc(dev, crtc) {
8420 ++ const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
8421 ++
8422 ++ if (!wm->pipe_enabled)
8423 ++ continue;
8424 ++
8425 ++ config->sprites_enabled |= wm->sprites_enabled;
8426 ++ config->sprites_scaled |= wm->sprites_scaled;
8427 ++ config->num_pipes_active++;
8428 ++ }
8429 ++}
8430 ++
8431 + static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
8432 + {
8433 + struct drm_device *dev = dev_priv->dev;
8434 + struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
8435 + struct ilk_wm_maximums max;
8436 +- struct intel_wm_config *config = &dev_priv->wm.config;
8437 ++ struct intel_wm_config config = {};
8438 + struct ilk_wm_values results = {};
8439 + enum intel_ddb_partitioning partitioning;
8440 +
8441 +- ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
8442 +- ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
8443 ++ ilk_compute_wm_config(dev, &config);
8444 ++
8445 ++ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
8446 ++ ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
8447 +
8448 + /* 5/6 split only in single pipe config on IVB+ */
8449 + if (INTEL_INFO(dev)->gen >= 7 &&
8450 +- config->num_pipes_active == 1 && config->sprites_enabled) {
8451 +- ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
8452 +- ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
8453 ++ config.num_pipes_active == 1 && config.sprites_enabled) {
8454 ++ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
8455 ++ ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
8456 +
8457 + best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
8458 + } else {
8459 +@@ -6544,6 +6565,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
8460 + misccpctl = I915_READ(GEN7_MISCCPCTL);
8461 + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
8462 + I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
8463 ++ /*
8464 ++ * Wait at least 100 clocks before re-enabling clock gating. See
8465 ++ * the definition of L3SQCREG1 in BSpec.
8466 ++ */
8467 ++ POSTING_READ(GEN8_L3SQCREG1);
8468 ++ udelay(1);
8469 + I915_WRITE(GEN7_MISCCPCTL, misccpctl);
8470 +
8471 + /*
8472 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
8473 +index 40c6aff57256..549afa7bc75f 100644
8474 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
8475 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
8476 +@@ -951,7 +951,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
8477 +
8478 + /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
8479 + tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
8480 +- if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
8481 ++ if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
8482 + IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
8483 + tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
8484 + WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
8485 +@@ -1044,7 +1044,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
8486 + WA_SET_BIT_MASKED(HIZ_CHICKEN,
8487 + BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
8488 +
8489 +- if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
8490 ++ /* This is tied to WaForceContextSaveRestoreNonCoherent */
8491 ++ if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
8492 + /*
8493 + *Use Force Non-Coherent whenever executing a 3D context. This
8494 + * is a workaround for a possible hang in the unlikely event
8495 +@@ -1901,6 +1902,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
8496 + return 0;
8497 + }
8498 +
8499 ++static void cleanup_phys_status_page(struct intel_engine_cs *ring)
8500 ++{
8501 ++ struct drm_i915_private *dev_priv = to_i915(ring->dev);
8502 ++
8503 ++ if (!dev_priv->status_page_dmah)
8504 ++ return;
8505 ++
8506 ++ drm_pci_free(ring->dev, dev_priv->status_page_dmah);
8507 ++ ring->status_page.page_addr = NULL;
8508 ++}
8509 ++
8510 + static void cleanup_status_page(struct intel_engine_cs *ring)
8511 + {
8512 + struct drm_i915_gem_object *obj;
8513 +@@ -1917,9 +1929,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
8514 +
8515 + static int init_status_page(struct intel_engine_cs *ring)
8516 + {
8517 +- struct drm_i915_gem_object *obj;
8518 ++ struct drm_i915_gem_object *obj = ring->status_page.obj;
8519 +
8520 +- if ((obj = ring->status_page.obj) == NULL) {
8521 ++ if (obj == NULL) {
8522 + unsigned flags;
8523 + int ret;
8524 +
8525 +@@ -2019,10 +2031,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
8526 + {
8527 + struct drm_i915_private *dev_priv = to_i915(dev);
8528 + struct drm_i915_gem_object *obj = ringbuf->obj;
8529 ++ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
8530 ++ unsigned flags = PIN_OFFSET_BIAS | 4096;
8531 + int ret;
8532 +
8533 + if (HAS_LLC(dev_priv) && !obj->stolen) {
8534 +- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
8535 ++ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
8536 + if (ret)
8537 + return ret;
8538 +
8539 +@@ -2038,7 +2052,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
8540 + return -ENOMEM;
8541 + }
8542 + } else {
8543 +- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
8544 ++ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
8545 ++ flags | PIN_MAPPABLE);
8546 + if (ret)
8547 + return ret;
8548 +
8549 +@@ -2164,7 +2179,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
8550 + if (ret)
8551 + goto error;
8552 + } else {
8553 +- BUG_ON(ring->id != RCS);
8554 ++ WARN_ON(ring->id != RCS);
8555 + ret = init_phys_status_page(ring);
8556 + if (ret)
8557 + goto error;
8558 +@@ -2210,7 +2225,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
8559 + if (ring->cleanup)
8560 + ring->cleanup(ring);
8561 +
8562 +- cleanup_status_page(ring);
8563 ++ if (I915_NEED_GFX_HWS(ring->dev)) {
8564 ++ cleanup_status_page(ring);
8565 ++ } else {
8566 ++ WARN_ON(ring->id != RCS);
8567 ++ cleanup_phys_status_page(ring);
8568 ++ }
8569 +
8570 + i915_cmd_parser_fini_ring(ring);
8571 + i915_gem_batch_pool_fini(&ring->batch_pool);
8572 +@@ -2373,11 +2393,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
8573 + if (unlikely(total_bytes > remain_usable)) {
8574 + /*
8575 + * The base request will fit but the reserved space
8576 +- * falls off the end. So only need to to wait for the
8577 +- * reserved size after flushing out the remainder.
8578 ++ * falls off the end. So don't need an immediate wrap
8579 ++ * and only need to effectively wait for the reserved
8580 ++ * size space from the start of ringbuffer.
8581 + */
8582 + wait_bytes = remain_actual + ringbuf->reserved_size;
8583 +- need_wrap = true;
8584 + } else if (total_bytes > ringbuf->space) {
8585 + /* No wrapping required, just waiting. */
8586 + wait_bytes = total_bytes;
8587 +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
8588 +index 277e60ae0e47..08961f7d151c 100644
8589 +--- a/drivers/gpu/drm/i915/intel_uncore.c
8590 ++++ b/drivers/gpu/drm/i915/intel_uncore.c
8591 +@@ -1155,7 +1155,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
8592 + } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
8593 + dev_priv->uncore.funcs.force_wake_get =
8594 + fw_domains_get_with_thread_status;
8595 +- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
8596 ++ if (IS_HASWELL(dev))
8597 ++ dev_priv->uncore.funcs.force_wake_put =
8598 ++ fw_domains_put_with_fifo;
8599 ++ else
8600 ++ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
8601 + fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
8602 + FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
8603 + } else if (IS_IVYBRIDGE(dev)) {
8604 +diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
8605 +index 3216e157a8a0..89da47234016 100644
8606 +--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
8607 ++++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
8608 +@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
8609 + struct nvkm_ramht *ramht = *pramht;
8610 + if (ramht) {
8611 + nvkm_gpuobj_del(&ramht->gpuobj);
8612 +- kfree(*pramht);
8613 ++ vfree(*pramht);
8614 + *pramht = NULL;
8615 + }
8616 + }
8617 +@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
8618 + struct nvkm_ramht *ramht;
8619 + int ret, i;
8620 +
8621 +- if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
8622 +- sizeof(*ramht->data), GFP_KERNEL)))
8623 ++ if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
8624 ++ (size >> 3) * sizeof(*ramht->data))))
8625 + return -ENOMEM;
8626 +
8627 + ramht->device = device;
8628 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
8629 +index 1f81069edc58..332b5fe687fe 100644
8630 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
8631 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
8632 +@@ -1807,6 +1807,8 @@ gf100_gr_init(struct gf100_gr *gr)
8633 +
8634 + gf100_gr_mmio(gr, gr->func->mmio);
8635 +
8636 ++ nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
8637 ++
8638 + memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
8639 + for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
8640 + do {
8641 +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
8642 +index 86276519b2ef..47e52647c9e5 100644
8643 +--- a/drivers/gpu/drm/qxl/qxl_display.c
8644 ++++ b/drivers/gpu/drm/qxl/qxl_display.c
8645 +@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
8646 +
8647 + qxl_bo_kunmap(user_bo);
8648 +
8649 ++ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
8650 ++ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
8651 ++ qcrtc->hot_spot_x = hot_x;
8652 ++ qcrtc->hot_spot_y = hot_y;
8653 ++
8654 + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
8655 + cmd->type = QXL_CURSOR_SET;
8656 +- cmd->u.set.position.x = qcrtc->cur_x;
8657 +- cmd->u.set.position.y = qcrtc->cur_y;
8658 ++ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
8659 ++ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
8660 +
8661 + cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
8662 +
8663 +@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
8664 +
8665 + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
8666 + cmd->type = QXL_CURSOR_MOVE;
8667 +- cmd->u.position.x = qcrtc->cur_x;
8668 +- cmd->u.position.y = qcrtc->cur_y;
8669 ++ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
8670 ++ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
8671 + qxl_release_unmap(qdev, release, &cmd->release_info);
8672 +
8673 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
8674 +diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
8675 +index 6e6b9b1519b8..3f3897eb458c 100644
8676 +--- a/drivers/gpu/drm/qxl/qxl_drv.h
8677 ++++ b/drivers/gpu/drm/qxl/qxl_drv.h
8678 +@@ -135,6 +135,8 @@ struct qxl_crtc {
8679 + int index;
8680 + int cur_x;
8681 + int cur_y;
8682 ++ int hot_spot_x;
8683 ++ int hot_spot_y;
8684 + };
8685 +
8686 + struct qxl_output {
8687 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
8688 +index 801dd60ac192..7f52142d37d5 100644
8689 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
8690 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
8691 +@@ -1740,6 +1740,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
8692 + static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
8693 + {
8694 + struct drm_device *dev = crtc->dev;
8695 ++ struct radeon_device *rdev = dev->dev_private;
8696 + struct drm_crtc *test_crtc;
8697 + struct radeon_crtc *test_radeon_crtc;
8698 +
8699 +@@ -1749,6 +1750,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
8700 + test_radeon_crtc = to_radeon_crtc(test_crtc);
8701 + if (test_radeon_crtc->encoder &&
8702 + ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
8703 ++ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
8704 ++ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
8705 ++ test_radeon_crtc->pll_id == ATOM_PPLL2)
8706 ++ continue;
8707 + /* for DP use the same PLL for all */
8708 + if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
8709 + return test_radeon_crtc->pll_id;
8710 +@@ -1770,6 +1775,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
8711 + {
8712 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
8713 + struct drm_device *dev = crtc->dev;
8714 ++ struct radeon_device *rdev = dev->dev_private;
8715 + struct drm_crtc *test_crtc;
8716 + struct radeon_crtc *test_radeon_crtc;
8717 + u32 adjusted_clock, test_adjusted_clock;
8718 +@@ -1785,6 +1791,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
8719 + test_radeon_crtc = to_radeon_crtc(test_crtc);
8720 + if (test_radeon_crtc->encoder &&
8721 + !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
8722 ++ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
8723 ++ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
8724 ++ test_radeon_crtc->pll_id == ATOM_PPLL2)
8725 ++ continue;
8726 + /* check if we are already driving this connector with another crtc */
8727 + if (test_radeon_crtc->connector == radeon_crtc->connector) {
8728 + /* if we are, return that pll */
8729 +diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
8730 +index 6af832545bc5..b5760851195c 100644
8731 +--- a/drivers/gpu/drm/radeon/atombios_dp.c
8732 ++++ b/drivers/gpu/drm/radeon/atombios_dp.c
8733 +@@ -326,8 +326,8 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
8734 + }
8735 + }
8736 + } else {
8737 +- for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
8738 +- for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
8739 ++ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
8740 ++ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
8741 + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
8742 + if (max_pix_clock >= pix_clock) {
8743 + *dp_lanes = lane_num;
8744 +diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
8745 +index 01b20e14a247..9de6503b10d8 100644
8746 +--- a/drivers/gpu/drm/radeon/atombios_encoders.c
8747 ++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
8748 +@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
8749 + && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
8750 + adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
8751 +
8752 ++ /* vertical FP must be at least 1 */
8753 ++ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
8754 ++ adjusted_mode->crtc_vsync_start++;
8755 ++
8756 + /* get the native mode for scaling */
8757 + if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
8758 + radeon_panel_mode_fixup(encoder, adjusted_mode);
8759 +@@ -892,8 +896,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
8760 + else
8761 + args.v1.ucLaneNum = 4;
8762 +
8763 +- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
8764 +- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
8765 + switch (radeon_encoder->encoder_id) {
8766 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
8767 + args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
8768 +@@ -910,6 +912,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
8769 + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
8770 + else
8771 + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
8772 ++
8773 ++ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
8774 ++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
8775 ++
8776 + break;
8777 + case 2:
8778 + case 3:
8779 +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
8780 +index 2ad462896896..32491355a1d4 100644
8781 +--- a/drivers/gpu/drm/radeon/evergreen.c
8782 ++++ b/drivers/gpu/drm/radeon/evergreen.c
8783 +@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
8784 + WREG32(VM_CONTEXT1_CNTL, 0);
8785 + }
8786 +
8787 ++static const unsigned ni_dig_offsets[] =
8788 ++{
8789 ++ NI_DIG0_REGISTER_OFFSET,
8790 ++ NI_DIG1_REGISTER_OFFSET,
8791 ++ NI_DIG2_REGISTER_OFFSET,
8792 ++ NI_DIG3_REGISTER_OFFSET,
8793 ++ NI_DIG4_REGISTER_OFFSET,
8794 ++ NI_DIG5_REGISTER_OFFSET
8795 ++};
8796 ++
8797 ++static const unsigned ni_tx_offsets[] =
8798 ++{
8799 ++ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
8800 ++ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
8801 ++ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
8802 ++ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
8803 ++ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
8804 ++ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
8805 ++};
8806 ++
8807 ++static const unsigned evergreen_dp_offsets[] =
8808 ++{
8809 ++ EVERGREEN_DP0_REGISTER_OFFSET,
8810 ++ EVERGREEN_DP1_REGISTER_OFFSET,
8811 ++ EVERGREEN_DP2_REGISTER_OFFSET,
8812 ++ EVERGREEN_DP3_REGISTER_OFFSET,
8813 ++ EVERGREEN_DP4_REGISTER_OFFSET,
8814 ++ EVERGREEN_DP5_REGISTER_OFFSET
8815 ++};
8816 ++
8817 ++
8818 ++/*
8819 ++ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
8820 ++ * We go from crtc to connector and it is not relible since it
8821 ++ * should be an opposite direction .If crtc is enable then
8822 ++ * find the dig_fe which selects this crtc and insure that it enable.
8823 ++ * if such dig_fe is found then find dig_be which selects found dig_be and
8824 ++ * insure that it enable and in DP_SST mode.
8825 ++ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
8826 ++ * from dp symbols clocks .
8827 ++ */
8828 ++static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
8829 ++ unsigned crtc_id, unsigned *ret_dig_fe)
8830 ++{
8831 ++ unsigned i;
8832 ++ unsigned dig_fe;
8833 ++ unsigned dig_be;
8834 ++ unsigned dig_en_be;
8835 ++ unsigned uniphy_pll;
8836 ++ unsigned digs_fe_selected;
8837 ++ unsigned dig_be_mode;
8838 ++ unsigned dig_fe_mask;
8839 ++ bool is_enabled = false;
8840 ++ bool found_crtc = false;
8841 ++
8842 ++ /* loop through all running dig_fe to find selected crtc */
8843 ++ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
8844 ++ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
8845 ++ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
8846 ++ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
8847 ++ /* found running pipe */
8848 ++ found_crtc = true;
8849 ++ dig_fe_mask = 1 << i;
8850 ++ dig_fe = i;
8851 ++ break;
8852 ++ }
8853 ++ }
8854 ++
8855 ++ if (found_crtc) {
8856 ++ /* loop through all running dig_be to find selected dig_fe */
8857 ++ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
8858 ++ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
8859 ++ /* if dig_fe_selected by dig_be? */
8860 ++ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
8861 ++ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
8862 ++ if (dig_fe_mask & digs_fe_selected &&
8863 ++ /* if dig_be in sst mode? */
8864 ++ dig_be_mode == NI_DIG_BE_DPSST) {
8865 ++ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
8866 ++ ni_dig_offsets[i]);
8867 ++ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
8868 ++ ni_tx_offsets[i]);
8869 ++ /* dig_be enable and tx is running */
8870 ++ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
8871 ++ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
8872 ++ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
8873 ++ is_enabled = true;
8874 ++ *ret_dig_fe = dig_fe;
8875 ++ break;
8876 ++ }
8877 ++ }
8878 ++ }
8879 ++ }
8880 ++
8881 ++ return is_enabled;
8882 ++}
8883 ++
8884 ++/*
8885 ++ * Blank dig when in dp sst mode
8886 ++ * Dig ignores crtc timing
8887 ++ */
8888 ++static void evergreen_blank_dp_output(struct radeon_device *rdev,
8889 ++ unsigned dig_fe)
8890 ++{
8891 ++ unsigned stream_ctrl;
8892 ++ unsigned fifo_ctrl;
8893 ++ unsigned counter = 0;
8894 ++
8895 ++ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
8896 ++ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
8897 ++ return;
8898 ++ }
8899 ++
8900 ++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
8901 ++ evergreen_dp_offsets[dig_fe]);
8902 ++ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
8903 ++ DRM_ERROR("dig %d , should be enable\n", dig_fe);
8904 ++ return;
8905 ++ }
8906 ++
8907 ++ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
8908 ++ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
8909 ++ evergreen_dp_offsets[dig_fe], stream_ctrl);
8910 ++
8911 ++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
8912 ++ evergreen_dp_offsets[dig_fe]);
8913 ++ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
8914 ++ msleep(1);
8915 ++ counter++;
8916 ++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
8917 ++ evergreen_dp_offsets[dig_fe]);
8918 ++ }
8919 ++ if (counter >= 32 )
8920 ++ DRM_ERROR("counter exceeds %d\n", counter);
8921 ++
8922 ++ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
8923 ++ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
8924 ++ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
8925 ++
8926 ++}
8927 ++
8928 + void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
8929 + {
8930 + u32 crtc_enabled, tmp, frame_count, blackout;
8931 + int i, j;
8932 ++ unsigned dig_fe;
8933 +
8934 + if (!ASIC_IS_NODCE(rdev)) {
8935 + save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
8936 +@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
8937 + break;
8938 + udelay(1);
8939 + }
8940 +-
8941 ++ /*we should disable dig if it drives dp sst*/
8942 ++ /*but we are in radeon_device_init and the topology is unknown*/
8943 ++ /*and it is available after radeon_modeset_init*/
8944 ++ /*the following method radeon_atom_encoder_dpms_dig*/
8945 ++ /*does the job if we initialize it properly*/
8946 ++ /*for now we do it this manually*/
8947 ++ /**/
8948 ++ if (ASIC_IS_DCE5(rdev) &&
8949 ++ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
8950 ++ evergreen_blank_dp_output(rdev, dig_fe);
8951 ++ /*we could remove 6 lines below*/
8952 + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
8953 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
8954 + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
8955 +diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
8956 +index aa939dfed3a3..b436badf9efa 100644
8957 +--- a/drivers/gpu/drm/radeon/evergreen_reg.h
8958 ++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
8959 +@@ -250,8 +250,43 @@
8960 +
8961 + /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
8962 + #define EVERGREEN_HDMI_BASE 0x7030
8963 ++/*DIG block*/
8964 ++#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
8965 ++#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
8966 ++#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
8967 ++#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
8968 ++#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
8969 ++#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
8970 ++
8971 ++
8972 ++#define NI_DIG_FE_CNTL 0x7000
8973 ++# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
8974 ++# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
8975 ++
8976 ++
8977 ++#define NI_DIG_BE_CNTL 0x7140
8978 ++# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
8979 ++# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
8980 ++
8981 ++#define NI_DIG_BE_EN_CNTL 0x7144
8982 ++# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
8983 ++# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
8984 ++# define NI_DIG_BE_DPSST 0
8985 +
8986 + /* Display Port block */
8987 ++#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
8988 ++#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
8989 ++#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
8990 ++#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
8991 ++#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
8992 ++#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
8993 ++
8994 ++
8995 ++#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
8996 ++# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
8997 ++# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
8998 ++#define EVERGREEN_DP_STEER_FIFO 0x7310
8999 ++# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
9000 + #define EVERGREEN_DP_SEC_CNTL 0x7280
9001 + # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
9002 + # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
9003 +@@ -266,4 +301,15 @@
9004 + # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
9005 + # define EVERGREEN_DP_SEC_SS_EN (1 << 28)
9006 +
9007 ++/*DCIO_UNIPHY block*/
9008 ++#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
9009 ++#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
9010 ++#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
9011 ++#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
9012 ++#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
9013 ++#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
9014 ++
9015 ++#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
9016 ++# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
9017 ++
9018 + #endif
9019 +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
9020 +index 340f3f549f29..9cfc1c3e1965 100644
9021 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c
9022 ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
9023 +@@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
9024 + rdev->mode_info.dither_property,
9025 + RADEON_FMT_DITHER_DISABLE);
9026 +
9027 +- if (radeon_audio != 0)
9028 ++ if (radeon_audio != 0) {
9029 + drm_object_attach_property(&radeon_connector->base.base,
9030 + rdev->mode_info.audio_property,
9031 + RADEON_AUDIO_AUTO);
9032 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
9033 ++ }
9034 + if (ASIC_IS_DCE5(rdev))
9035 + drm_object_attach_property(&radeon_connector->base.base,
9036 + rdev->mode_info.output_csc_property,
9037 +@@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
9038 + drm_object_attach_property(&radeon_connector->base.base,
9039 + rdev->mode_info.audio_property,
9040 + RADEON_AUDIO_AUTO);
9041 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
9042 + }
9043 + if (connector_type == DRM_MODE_CONNECTOR_DVII) {
9044 + radeon_connector->dac_load_detect = true;
9045 +@@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
9046 + drm_object_attach_property(&radeon_connector->base.base,
9047 + rdev->mode_info.audio_property,
9048 + RADEON_AUDIO_AUTO);
9049 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
9050 + }
9051 + if (ASIC_IS_DCE5(rdev))
9052 + drm_object_attach_property(&radeon_connector->base.base,
9053 +@@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
9054 + drm_object_attach_property(&radeon_connector->base.base,
9055 + rdev->mode_info.audio_property,
9056 + RADEON_AUDIO_AUTO);
9057 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
9058 + }
9059 + if (ASIC_IS_DCE5(rdev))
9060 + drm_object_attach_property(&radeon_connector->base.base,
9061 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
9062 +index 2d9196a447fd..bfcef4db8138 100644
9063 +--- a/drivers/gpu/drm/radeon/radeon_display.c
9064 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
9065 +@@ -1683,10 +1683,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
9066 + /* setup afmt */
9067 + radeon_afmt_init(rdev);
9068 +
9069 +- if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
9070 +- radeon_fbdev_init(rdev);
9071 +- drm_kms_helper_poll_init(rdev->ddev);
9072 +- }
9073 ++ radeon_fbdev_init(rdev);
9074 ++ drm_kms_helper_poll_init(rdev->ddev);
9075 +
9076 + /* do pm late init */
9077 + ret = radeon_pm_late_init(rdev);
9078 +diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
9079 +index 3b0c229d7dcd..db64e0062689 100644
9080 +--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
9081 ++++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
9082 +@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
9083 +
9084 + tmp &= AUX_HPD_SEL(0x7);
9085 + tmp |= AUX_HPD_SEL(chan->rec.hpd);
9086 +- tmp |= AUX_EN | AUX_LS_READ_EN;
9087 ++ tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
9088 +
9089 + WREG32(AUX_CONTROL + aux_offset[instance], tmp);
9090 +
9091 +diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
9092 +index df7a1719c841..9d210bbcab50 100644
9093 +--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
9094 ++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
9095 +@@ -525,17 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
9096 + drm_mode_set_crtcinfo(adjusted_mode, 0);
9097 + {
9098 + struct radeon_connector_atom_dig *dig_connector;
9099 +- int ret;
9100 +-
9101 + dig_connector = mst_enc->connector->con_priv;
9102 +- ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
9103 +- dig_connector->dpcd, adjusted_mode->clock,
9104 +- &dig_connector->dp_lane_count,
9105 +- &dig_connector->dp_clock);
9106 +- if (ret) {
9107 +- dig_connector->dp_lane_count = 0;
9108 +- dig_connector->dp_clock = 0;
9109 +- }
9110 ++ dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
9111 ++ dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
9112 + DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
9113 + dig_connector->dp_lane_count, dig_connector->dp_clock);
9114 + }
9115 +diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
9116 +index d2e628eea53d..d179596334a7 100644
9117 +--- a/drivers/gpu/drm/radeon/radeon_fb.c
9118 ++++ b/drivers/gpu/drm/radeon/radeon_fb.c
9119 +@@ -292,7 +292,8 @@ out_unref:
9120 +
9121 + void radeon_fb_output_poll_changed(struct radeon_device *rdev)
9122 + {
9123 +- drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
9124 ++ if (rdev->mode_info.rfbdev)
9125 ++ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
9126 + }
9127 +
9128 + static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
9129 +@@ -325,6 +326,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
9130 + int bpp_sel = 32;
9131 + int ret;
9132 +
9133 ++ /* don't enable fbdev if no connectors */
9134 ++ if (list_empty(&rdev->ddev->mode_config.connector_list))
9135 ++ return 0;
9136 ++
9137 + /* select 8 bpp console on RN50 or 16MB cards */
9138 + if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
9139 + bpp_sel = 8;
9140 +@@ -377,11 +382,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
9141 +
9142 + void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
9143 + {
9144 +- fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
9145 ++ if (rdev->mode_info.rfbdev)
9146 ++ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
9147 + }
9148 +
9149 + bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
9150 + {
9151 ++ if (!rdev->mode_info.rfbdev)
9152 ++ return false;
9153 ++
9154 + if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
9155 + return true;
9156 + return false;
9157 +@@ -389,12 +398,14 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
9158 +
9159 + void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
9160 + {
9161 +- drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
9162 ++ if (rdev->mode_info.rfbdev)
9163 ++ drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
9164 + }
9165 +
9166 + void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
9167 + {
9168 +- drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
9169 ++ if (rdev->mode_info.rfbdev)
9170 ++ drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
9171 + }
9172 +
9173 + void radeon_fbdev_restore_mode(struct radeon_device *rdev)
9174 +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
9175 +index e06ac546a90f..f342aad79cc6 100644
9176 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c
9177 ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
9178 +@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
9179 + {
9180 + struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
9181 +
9182 ++ if (radeon_ttm_tt_has_userptr(bo->ttm))
9183 ++ return -EPERM;
9184 + return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
9185 + }
9186 +
9187 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
9188 +index a82b891ae1fe..caa73de584a5 100644
9189 +--- a/drivers/gpu/drm/radeon/si_dpm.c
9190 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
9191 +@@ -2926,9 +2926,12 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
9192 + /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
9193 + { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
9194 + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
9195 ++ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
9196 + { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
9197 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
9198 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
9199 ++ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
9200 ++ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
9201 + { 0, 0, 0, 0 },
9202 + };
9203 +
9204 +@@ -3008,6 +3011,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
9205 + }
9206 + ++p;
9207 + }
9208 ++ /* limit mclk on all R7 370 parts for stability */
9209 ++ if (rdev->pdev->device == 0x6811 &&
9210 ++ rdev->pdev->revision == 0x81)
9211 ++ max_mclk = 120000;
9212 +
9213 + if (rps->vce_active) {
9214 + rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
9215 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
9216 +index 4cbf26555093..e3daafa1be13 100644
9217 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
9218 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
9219 +@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
9220 +
9221 + void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
9222 + {
9223 +- struct ttm_bo_device *bdev = bo->bdev;
9224 +- struct ttm_mem_type_manager *man;
9225 ++ int put_count = 0;
9226 +
9227 + lockdep_assert_held(&bo->resv->lock.base);
9228 +
9229 +- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
9230 +- list_del_init(&bo->swap);
9231 +- list_del_init(&bo->lru);
9232 +-
9233 +- } else {
9234 +- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
9235 +- list_move_tail(&bo->swap, &bo->glob->swap_lru);
9236 +-
9237 +- man = &bdev->man[bo->mem.mem_type];
9238 +- list_move_tail(&bo->lru, &man->lru);
9239 +- }
9240 ++ put_count = ttm_bo_del_from_lru(bo);
9241 ++ ttm_bo_list_ref_sub(bo, put_count, true);
9242 ++ ttm_bo_add_to_lru(bo);
9243 + }
9244 + EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
9245 +
9246 +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
9247 +index 200419d4d43c..18a2acbccb7d 100644
9248 +--- a/drivers/gpu/drm/udl/udl_fb.c
9249 ++++ b/drivers/gpu/drm/udl/udl_fb.c
9250 +@@ -538,7 +538,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
9251 + out_destroy_fbi:
9252 + drm_fb_helper_release_fbi(helper);
9253 + out_gfree:
9254 +- drm_gem_object_unreference(&ufbdev->ufb.obj->base);
9255 ++ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
9256 + out:
9257 + return ret;
9258 + }
9259 +diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
9260 +index 2a0a784ab6ee..d7528e0d8442 100644
9261 +--- a/drivers/gpu/drm/udl/udl_gem.c
9262 ++++ b/drivers/gpu/drm/udl/udl_gem.c
9263 +@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
9264 + return ret;
9265 + }
9266 +
9267 +- drm_gem_object_unreference(&obj->base);
9268 ++ drm_gem_object_unreference_unlocked(&obj->base);
9269 + *handle_p = handle;
9270 + return 0;
9271 + }
9272 +diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
9273 +index 22278bcfc60e..ac8eafea6361 100644
9274 +--- a/drivers/gpu/drm/vc4/vc4_bo.c
9275 ++++ b/drivers/gpu/drm/vc4/vc4_bo.c
9276 +@@ -499,11 +499,12 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
9277 + if (IS_ERR(bo))
9278 + return PTR_ERR(bo);
9279 +
9280 +- ret = copy_from_user(bo->base.vaddr,
9281 ++ if (copy_from_user(bo->base.vaddr,
9282 + (void __user *)(uintptr_t)args->data,
9283 +- args->size);
9284 +- if (ret != 0)
9285 ++ args->size)) {
9286 ++ ret = -EFAULT;
9287 + goto fail;
9288 ++ }
9289 + /* Clear the rest of the memory from allocating from the BO
9290 + * cache.
9291 + */
9292 +diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
9293 +index e00db3f510dd..abb98c77bad2 100644
9294 +--- a/drivers/gpu/ipu-v3/ipu-common.c
9295 ++++ b/drivers/gpu/ipu-v3/ipu-common.c
9296 +@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
9297 + goto err_register;
9298 + }
9299 +
9300 +- pdev->dev.of_node = of_node;
9301 + pdev->dev.parent = dev;
9302 +
9303 + ret = platform_device_add_data(pdev, &reg->pdata,
9304 +@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
9305 + platform_device_put(pdev);
9306 + goto err_register;
9307 + }
9308 ++
9309 ++ /*
9310 ++ * Set of_node only after calling platform_device_add. Otherwise
9311 ++ * the platform:imx-ipuv3-crtc modalias won't be used.
9312 ++ */
9313 ++ pdev->dev.of_node = of_node;
9314 + }
9315 +
9316 + return 0;
9317 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
9318 +index 7e89288b1537..99446ffd71fb 100644
9319 +--- a/drivers/hid/hid-core.c
9320 ++++ b/drivers/hid/hid-core.c
9321 +@@ -1891,6 +1891,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
9322 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
9323 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
9324 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
9325 ++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
9326 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
9327 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
9328 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
9329 +@@ -2615,9 +2616,10 @@ int hid_add_device(struct hid_device *hdev)
9330 + /*
9331 + * Scan generic devices for group information
9332 + */
9333 +- if (hid_ignore_special_drivers ||
9334 +- (!hdev->group &&
9335 +- !hid_match_id(hdev, hid_have_special_driver))) {
9336 ++ if (hid_ignore_special_drivers) {
9337 ++ hdev->group = HID_GROUP_GENERIC;
9338 ++ } else if (!hdev->group &&
9339 ++ !hid_match_id(hdev, hid_have_special_driver)) {
9340 + ret = hid_scan_report(hdev);
9341 + if (ret)
9342 + hid_warn(hdev, "bad device descriptor (%d)\n", ret);
9343 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
9344 +index b6ff6e78ac54..14c14c82795c 100644
9345 +--- a/drivers/hid/hid-ids.h
9346 ++++ b/drivers/hid/hid-ids.h
9347 +@@ -255,6 +255,7 @@
9348 + #define USB_DEVICE_ID_CORSAIR_K90 0x1b02
9349 +
9350 + #define USB_VENDOR_ID_CREATIVELABS 0x041e
9351 ++#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
9352 + #define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
9353 +
9354 + #define USB_VENDOR_ID_CVTOUCH 0x1ff7
9355 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
9356 +index 296d4991560e..a20fc604ffd8 100644
9357 +--- a/drivers/hid/hid-multitouch.c
9358 ++++ b/drivers/hid/hid-multitouch.c
9359 +@@ -396,6 +396,11 @@ static void mt_feature_mapping(struct hid_device *hdev,
9360 + td->is_buttonpad = true;
9361 +
9362 + break;
9363 ++ case 0xff0000c5:
9364 ++ /* Retrieve the Win8 blob once to enable some devices */
9365 ++ if (usage->usage_index == 0)
9366 ++ mt_get_feature(hdev, field->report);
9367 ++ break;
9368 + }
9369 + }
9370 +
9371 +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
9372 +index b9216938a718..bb897497f008 100644
9373 +--- a/drivers/hid/i2c-hid/i2c-hid.c
9374 ++++ b/drivers/hid/i2c-hid/i2c-hid.c
9375 +@@ -283,17 +283,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
9376 + u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
9377 + u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
9378 + u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
9379 ++ u16 size;
9380 ++ int args_len;
9381 ++ int index = 0;
9382 ++
9383 ++ i2c_hid_dbg(ihid, "%s\n", __func__);
9384 ++
9385 ++ if (data_len > ihid->bufsize)
9386 ++ return -EINVAL;
9387 +
9388 +- /* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
9389 +- u16 size = 2 /* size */ +
9390 ++ size = 2 /* size */ +
9391 + (reportID ? 1 : 0) /* reportID */ +
9392 + data_len /* buf */;
9393 +- int args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
9394 ++ args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
9395 + 2 /* dataRegister */ +
9396 + size /* args */;
9397 +- int index = 0;
9398 +-
9399 +- i2c_hid_dbg(ihid, "%s\n", __func__);
9400 +
9401 + if (!use_data && maxOutputLength == 0)
9402 + return -ENOSYS;
9403 +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
9404 +index ad71160b9ea4..ae83af649a60 100644
9405 +--- a/drivers/hid/usbhid/hid-core.c
9406 ++++ b/drivers/hid/usbhid/hid-core.c
9407 +@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
9408 + return ret;
9409 + }
9410 +
9411 +-static void usbhid_restart_queues(struct usbhid_device *usbhid)
9412 +-{
9413 +- if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
9414 +- usbhid_restart_out_queue(usbhid);
9415 +- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
9416 +- usbhid_restart_ctrl_queue(usbhid);
9417 +-}
9418 +-
9419 + static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
9420 + {
9421 + struct usbhid_device *usbhid = hid->driver_data;
9422 +@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
9423 + usb_kill_urb(usbhid->urbout);
9424 + }
9425 +
9426 ++static void hid_restart_io(struct hid_device *hid)
9427 ++{
9428 ++ struct usbhid_device *usbhid = hid->driver_data;
9429 ++ int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
9430 ++ int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
9431 ++
9432 ++ spin_lock_irq(&usbhid->lock);
9433 ++ clear_bit(HID_SUSPENDED, &usbhid->iofl);
9434 ++ usbhid_mark_busy(usbhid);
9435 ++
9436 ++ if (clear_halt || reset_pending)
9437 ++ schedule_work(&usbhid->reset_work);
9438 ++ usbhid->retry_delay = 0;
9439 ++ spin_unlock_irq(&usbhid->lock);
9440 ++
9441 ++ if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
9442 ++ return;
9443 ++
9444 ++ if (!clear_halt) {
9445 ++ if (hid_start_in(hid) < 0)
9446 ++ hid_io_error(hid);
9447 ++ }
9448 ++
9449 ++ spin_lock_irq(&usbhid->lock);
9450 ++ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
9451 ++ usbhid_restart_out_queue(usbhid);
9452 ++ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
9453 ++ usbhid_restart_ctrl_queue(usbhid);
9454 ++ spin_unlock_irq(&usbhid->lock);
9455 ++}
9456 ++
9457 + /* Treat USB reset pretty much the same as suspend/resume */
9458 + static int hid_pre_reset(struct usb_interface *intf)
9459 + {
9460 +@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
9461 + return 1;
9462 + }
9463 +
9464 ++ /* No need to do another reset or clear a halted endpoint */
9465 + spin_lock_irq(&usbhid->lock);
9466 + clear_bit(HID_RESET_PENDING, &usbhid->iofl);
9467 ++ clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
9468 + spin_unlock_irq(&usbhid->lock);
9469 + hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
9470 +- status = hid_start_in(hid);
9471 +- if (status < 0)
9472 +- hid_io_error(hid);
9473 +- usbhid_restart_queues(usbhid);
9474 ++
9475 ++ hid_restart_io(hid);
9476 +
9477 + return 0;
9478 + }
9479 +@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
9480 + #ifdef CONFIG_PM
9481 + static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
9482 + {
9483 +- struct usbhid_device *usbhid = hid->driver_data;
9484 +- int status;
9485 +-
9486 +- spin_lock_irq(&usbhid->lock);
9487 +- clear_bit(HID_SUSPENDED, &usbhid->iofl);
9488 +- usbhid_mark_busy(usbhid);
9489 +-
9490 +- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
9491 +- test_bit(HID_RESET_PENDING, &usbhid->iofl))
9492 +- schedule_work(&usbhid->reset_work);
9493 +- usbhid->retry_delay = 0;
9494 +-
9495 +- usbhid_restart_queues(usbhid);
9496 +- spin_unlock_irq(&usbhid->lock);
9497 +-
9498 +- status = hid_start_in(hid);
9499 +- if (status < 0)
9500 +- hid_io_error(hid);
9501 ++ int status = 0;
9502 +
9503 ++ hid_restart_io(hid);
9504 + if (driver_suspended && hid->driver && hid->driver->resume)
9505 + status = hid->driver->resume(hid);
9506 + return status;
9507 +@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
9508 + static int hid_resume(struct usb_interface *intf)
9509 + {
9510 + struct hid_device *hid = usb_get_intfdata (intf);
9511 +- struct usbhid_device *usbhid = hid->driver_data;
9512 + int status;
9513 +
9514 +- if (!test_bit(HID_STARTED, &usbhid->iofl))
9515 +- return 0;
9516 +-
9517 + status = hid_resume_common(hid, true);
9518 + dev_dbg(&intf->dev, "resume status %d\n", status);
9519 + return 0;
9520 +@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
9521 + static int hid_reset_resume(struct usb_interface *intf)
9522 + {
9523 + struct hid_device *hid = usb_get_intfdata(intf);
9524 +- struct usbhid_device *usbhid = hid->driver_data;
9525 + int status;
9526 +
9527 +- clear_bit(HID_SUSPENDED, &usbhid->iofl);
9528 + status = hid_post_reset(intf);
9529 + if (status >= 0 && hid->driver && hid->driver->reset_resume) {
9530 + int ret = hid->driver->reset_resume(hid);
9531 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
9532 +index 7dd0953cd70f..dc8e6adf95a4 100644
9533 +--- a/drivers/hid/usbhid/hid-quirks.c
9534 ++++ b/drivers/hid/usbhid/hid-quirks.c
9535 +@@ -70,6 +70,7 @@ static const struct hid_blacklist {
9536 + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
9537 + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
9538 + { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
9539 ++ { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
9540 + { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
9541 + { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
9542 + { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
9543 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
9544 +index 99ef77fcfb80..f71187aad0d0 100644
9545 +--- a/drivers/hid/wacom_wac.c
9546 ++++ b/drivers/hid/wacom_wac.c
9547 +@@ -2409,6 +2409,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
9548 + }
9549 +
9550 + /*
9551 ++ * Hack for the Bamboo One:
9552 ++ * the device presents a PAD/Touch interface as most Bamboos and even
9553 ++ * sends ghosts PAD data on it. However, later, we must disable this
9554 ++ * ghost interface, and we can not detect it unless we set it here
9555 ++ * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
9556 ++ */
9557 ++ if (features->type == BAMBOO_PEN &&
9558 ++ features->pktlen == WACOM_PKGLEN_BBTOUCH3)
9559 ++ features->device_type |= WACOM_DEVICETYPE_PAD;
9560 ++
9561 ++ /*
9562 + * Raw Wacom-mode pen and touch events both come from interface
9563 + * 0, whose HID descriptor has an application usage of 0xFF0D
9564 + * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
9565 +@@ -3367,6 +3378,10 @@ static const struct wacom_features wacom_features_0x33E =
9566 + { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
9567 + INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
9568 + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
9569 ++static const struct wacom_features wacom_features_0x343 =
9570 ++ { "Wacom DTK1651", 34616, 19559, 1023, 0,
9571 ++ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
9572 ++ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
9573 +
9574 + static const struct wacom_features wacom_features_HID_ANY_ID =
9575 + { "Wacom HID", .type = HID_GENERIC };
9576 +@@ -3532,6 +3547,7 @@ const struct hid_device_id wacom_ids[] = {
9577 + { USB_DEVICE_WACOM(0x33C) },
9578 + { USB_DEVICE_WACOM(0x33D) },
9579 + { USB_DEVICE_WACOM(0x33E) },
9580 ++ { USB_DEVICE_WACOM(0x343) },
9581 + { USB_DEVICE_WACOM(0x4001) },
9582 + { USB_DEVICE_WACOM(0x4004) },
9583 + { USB_DEVICE_WACOM(0x5000) },
9584 +diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
9585 +index b53702ce692f..e35560b955b1 100644
9586 +--- a/drivers/hv/ring_buffer.c
9587 ++++ b/drivers/hv/ring_buffer.c
9588 +@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
9589 + * there is room for the producer to send the pending packet.
9590 + */
9591 +
9592 +-static bool hv_need_to_signal_on_read(u32 prev_write_sz,
9593 +- struct hv_ring_buffer_info *rbi)
9594 ++static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
9595 + {
9596 + u32 cur_write_sz;
9597 + u32 r_size;
9598 +- u32 write_loc = rbi->ring_buffer->write_index;
9599 ++ u32 write_loc;
9600 + u32 read_loc = rbi->ring_buffer->read_index;
9601 +- u32 pending_sz = rbi->ring_buffer->pending_send_sz;
9602 ++ u32 pending_sz;
9603 +
9604 ++ /*
9605 ++ * Issue a full memory barrier before making the signaling decision.
9606 ++ * Here is the reason for having this barrier:
9607 ++ * If the reading of the pend_sz (in this function)
9608 ++ * were to be reordered and read before we commit the new read
9609 ++ * index (in the calling function) we could
9610 ++ * have a problem. If the host were to set the pending_sz after we
9611 ++ * have sampled pending_sz and go to sleep before we commit the
9612 ++ * read index, we could miss sending the interrupt. Issue a full
9613 ++ * memory barrier to address this.
9614 ++ */
9615 ++ mb();
9616 ++
9617 ++ pending_sz = rbi->ring_buffer->pending_send_sz;
9618 ++ write_loc = rbi->ring_buffer->write_index;
9619 + /* If the other end is not blocked on write don't bother. */
9620 + if (pending_sz == 0)
9621 + return false;
9622 +@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
9623 + cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
9624 + read_loc - write_loc;
9625 +
9626 +- if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
9627 ++ if (cur_write_sz >= pending_sz)
9628 + return true;
9629 +
9630 + return false;
9631 +@@ -458,7 +472,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
9632 + /* Update the read index */
9633 + hv_set_next_read_location(inring_info, next_read_location);
9634 +
9635 +- *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
9636 ++ *signal = hv_need_to_signal_on_read(inring_info);
9637 +
9638 + out_unlock:
9639 + spin_unlock_irqrestore(&inring_info->ring_lock, flags);
9640 +diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
9641 +index 36544c4f653c..303d0c9df907 100644
9642 +--- a/drivers/hwmon/max1111.c
9643 ++++ b/drivers/hwmon/max1111.c
9644 +@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
9645 +
9646 + int max1111_read_channel(int channel)
9647 + {
9648 ++ if (!the_max1111 || !the_max1111->spi)
9649 ++ return -ENODEV;
9650 ++
9651 + return max1111_read(&the_max1111->spi->dev, channel);
9652 + }
9653 + EXPORT_SYMBOL(max1111_read_channel);
9654 +@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
9655 + {
9656 + struct max1111_data *data = spi_get_drvdata(spi);
9657 +
9658 ++#ifdef CONFIG_SHARPSL_PM
9659 ++ the_max1111 = NULL;
9660 ++#endif
9661 + hwmon_device_unregister(data->hwmon_dev);
9662 + sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
9663 + sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
9664 +diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
9665 +index 83e9f591a54b..e7a348807f0c 100644
9666 +--- a/drivers/hwtracing/stm/Kconfig
9667 ++++ b/drivers/hwtracing/stm/Kconfig
9668 +@@ -1,6 +1,7 @@
9669 + config STM
9670 + tristate "System Trace Module devices"
9671 + select CONFIGFS_FS
9672 ++ select SRCU
9673 + help
9674 + A System Trace Module (STM) is a device exporting data in System
9675 + Trace Protocol (STP) format as defined by MIPI STP standards.
9676 +diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
9677 +index 714bdc837769..b167ab25310a 100644
9678 +--- a/drivers/i2c/busses/i2c-cpm.c
9679 ++++ b/drivers/i2c/busses/i2c-cpm.c
9680 +@@ -116,8 +116,8 @@ struct cpm_i2c {
9681 + cbd_t __iomem *rbase;
9682 + u_char *txbuf[CPM_MAXBD];
9683 + u_char *rxbuf[CPM_MAXBD];
9684 +- u32 txdma[CPM_MAXBD];
9685 +- u32 rxdma[CPM_MAXBD];
9686 ++ dma_addr_t txdma[CPM_MAXBD];
9687 ++ dma_addr_t rxdma[CPM_MAXBD];
9688 + };
9689 +
9690 + static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
9691 +diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
9692 +index b29c7500461a..f54ece8fce78 100644
9693 +--- a/drivers/i2c/busses/i2c-exynos5.c
9694 ++++ b/drivers/i2c/busses/i2c-exynos5.c
9695 +@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
9696 + return -EIO;
9697 + }
9698 +
9699 +- clk_prepare_enable(i2c->clk);
9700 ++ ret = clk_enable(i2c->clk);
9701 ++ if (ret)
9702 ++ return ret;
9703 +
9704 + for (i = 0; i < num; i++, msgs++) {
9705 + stop = (i == num - 1);
9706 +@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
9707 + }
9708 +
9709 + out:
9710 +- clk_disable_unprepare(i2c->clk);
9711 ++ clk_disable(i2c->clk);
9712 + return ret;
9713 + }
9714 +
9715 +@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
9716 + return -ENOENT;
9717 + }
9718 +
9719 +- clk_prepare_enable(i2c->clk);
9720 ++ ret = clk_prepare_enable(i2c->clk);
9721 ++ if (ret)
9722 ++ return ret;
9723 +
9724 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
9725 + i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
9726 +@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
9727 +
9728 + platform_set_drvdata(pdev, i2c);
9729 +
9730 ++ clk_disable(i2c->clk);
9731 ++
9732 ++ return 0;
9733 ++
9734 + err_clk:
9735 + clk_disable_unprepare(i2c->clk);
9736 + return ret;
9737 +@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
9738 +
9739 + i2c_del_adapter(&i2c->adap);
9740 +
9741 ++ clk_unprepare(i2c->clk);
9742 ++
9743 + return 0;
9744 + }
9745 +
9746 +@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
9747 +
9748 + i2c->suspended = 1;
9749 +
9750 ++ clk_unprepare(i2c->clk);
9751 ++
9752 + return 0;
9753 + }
9754 +
9755 +@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
9756 + struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
9757 + int ret = 0;
9758 +
9759 +- clk_prepare_enable(i2c->clk);
9760 ++ ret = clk_prepare_enable(i2c->clk);
9761 ++ if (ret)
9762 ++ return ret;
9763 +
9764 + ret = exynos5_hsi2c_clock_setup(i2c);
9765 + if (ret) {
9766 +@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
9767 + }
9768 +
9769 + exynos5_i2c_init(i2c);
9770 +- clk_disable_unprepare(i2c->clk);
9771 ++ clk_disable(i2c->clk);
9772 + i2c->suspended = 0;
9773 +
9774 + return 0;
9775 +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
9776 +index cd4510a63375..146eed70bdf4 100644
9777 +--- a/drivers/idle/intel_idle.c
9778 ++++ b/drivers/idle/intel_idle.c
9779 +@@ -65,7 +65,7 @@
9780 + #include <asm/mwait.h>
9781 + #include <asm/msr.h>
9782 +
9783 +-#define INTEL_IDLE_VERSION "0.4"
9784 ++#define INTEL_IDLE_VERSION "0.4.1"
9785 + #define PREFIX "intel_idle: "
9786 +
9787 + static struct cpuidle_driver intel_idle_driver = {
9788 +@@ -994,36 +994,92 @@ static void intel_idle_cpuidle_devices_uninit(void)
9789 + }
9790 +
9791 + /*
9792 +- * intel_idle_state_table_update()
9793 +- *
9794 +- * Update the default state_table for this CPU-id
9795 ++ * ivt_idle_state_table_update(void)
9796 + *
9797 +- * Currently used to access tuned IVT multi-socket targets
9798 ++ * Tune IVT multi-socket targets
9799 + * Assumption: num_sockets == (max_package_num + 1)
9800 + */
9801 +-void intel_idle_state_table_update(void)
9802 ++static void ivt_idle_state_table_update(void)
9803 + {
9804 + /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
9805 +- if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
9806 +- int cpu, package_num, num_sockets = 1;
9807 +-
9808 +- for_each_online_cpu(cpu) {
9809 +- package_num = topology_physical_package_id(cpu);
9810 +- if (package_num + 1 > num_sockets) {
9811 +- num_sockets = package_num + 1;
9812 +-
9813 +- if (num_sockets > 4) {
9814 +- cpuidle_state_table = ivt_cstates_8s;
9815 +- return;
9816 +- }
9817 ++ int cpu, package_num, num_sockets = 1;
9818 ++
9819 ++ for_each_online_cpu(cpu) {
9820 ++ package_num = topology_physical_package_id(cpu);
9821 ++ if (package_num + 1 > num_sockets) {
9822 ++ num_sockets = package_num + 1;
9823 ++
9824 ++ if (num_sockets > 4) {
9825 ++ cpuidle_state_table = ivt_cstates_8s;
9826 ++ return;
9827 + }
9828 + }
9829 ++ }
9830 ++
9831 ++ if (num_sockets > 2)
9832 ++ cpuidle_state_table = ivt_cstates_4s;
9833 ++
9834 ++ /* else, 1 and 2 socket systems use default ivt_cstates */
9835 ++}
9836 ++/*
9837 ++ * sklh_idle_state_table_update(void)
9838 ++ *
9839 ++ * On SKL-H (model 0x5e) disable C8 and C9 if:
9840 ++ * C10 is enabled and SGX disabled
9841 ++ */
9842 ++static void sklh_idle_state_table_update(void)
9843 ++{
9844 ++ unsigned long long msr;
9845 ++ unsigned int eax, ebx, ecx, edx;
9846 ++
9847 ++
9848 ++ /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
9849 ++ if (max_cstate <= 7)
9850 ++ return;
9851 ++
9852 ++ /* if PC10 not present in CPUID.MWAIT.EDX */
9853 ++ if ((mwait_substates & (0xF << 28)) == 0)
9854 ++ return;
9855 ++
9856 ++ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
9857 ++
9858 ++ /* PC10 is not enabled in PKG C-state limit */
9859 ++ if ((msr & 0xF) != 8)
9860 ++ return;
9861 ++
9862 ++ ecx = 0;
9863 ++ cpuid(7, &eax, &ebx, &ecx, &edx);
9864 ++
9865 ++ /* if SGX is present */
9866 ++ if (ebx & (1 << 2)) {
9867 +
9868 +- if (num_sockets > 2)
9869 +- cpuidle_state_table = ivt_cstates_4s;
9870 +- /* else, 1 and 2 socket systems use default ivt_cstates */
9871 ++ rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
9872 ++
9873 ++ /* if SGX is enabled */
9874 ++ if (msr & (1 << 18))
9875 ++ return;
9876 ++ }
9877 ++
9878 ++ skl_cstates[5].disabled = 1; /* C8-SKL */
9879 ++ skl_cstates[6].disabled = 1; /* C9-SKL */
9880 ++}
9881 ++/*
9882 ++ * intel_idle_state_table_update()
9883 ++ *
9884 ++ * Update the default state_table for this CPU-id
9885 ++ */
9886 ++
9887 ++static void intel_idle_state_table_update(void)
9888 ++{
9889 ++ switch (boot_cpu_data.x86_model) {
9890 ++
9891 ++ case 0x3e: /* IVT */
9892 ++ ivt_idle_state_table_update();
9893 ++ break;
9894 ++ case 0x5e: /* SKL-H */
9895 ++ sklh_idle_state_table_update();
9896 ++ break;
9897 + }
9898 +- return;
9899 + }
9900 +
9901 + /*
9902 +@@ -1063,6 +1119,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
9903 + if (num_substates == 0)
9904 + continue;
9905 +
9906 ++ /* if state marked as disabled, skip it */
9907 ++ if (cpuidle_state_table[cstate].disabled != 0) {
9908 ++ pr_debug(PREFIX "state %s is disabled",
9909 ++ cpuidle_state_table[cstate].name);
9910 ++ continue;
9911 ++ }
9912 ++
9913 ++
9914 + if (((mwait_cstate + 1) > 2) &&
9915 + !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
9916 + mark_tsc_unstable("TSC halts in idle"
9917 +diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
9918 +index c73331f7782b..2072a31e813b 100644
9919 +--- a/drivers/iio/accel/bmc150-accel-core.c
9920 ++++ b/drivers/iio/accel/bmc150-accel-core.c
9921 +@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
9922 + {
9923 + int ret;
9924 + int axis = chan->scan_index;
9925 +- unsigned int raw_val;
9926 ++ __le16 raw_val;
9927 +
9928 + mutex_lock(&data->mutex);
9929 + ret = bmc150_accel_set_power_state(data, true);
9930 +@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
9931 + }
9932 +
9933 + ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
9934 +- &raw_val, 2);
9935 ++ &raw_val, sizeof(raw_val));
9936 + if (ret < 0) {
9937 + dev_err(data->dev, "Error reading axis %d\n", axis);
9938 + bmc150_accel_set_power_state(data, false);
9939 + mutex_unlock(&data->mutex);
9940 + return ret;
9941 + }
9942 +- *val = sign_extend32(raw_val >> chan->scan_type.shift,
9943 ++ *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
9944 + chan->scan_type.realbits - 1);
9945 + ret = bmc150_accel_set_power_state(data, false);
9946 + mutex_unlock(&data->mutex);
9947 +@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
9948 + .realbits = (bits), \
9949 + .storagebits = 16, \
9950 + .shift = 16 - (bits), \
9951 ++ .endianness = IIO_LE, \
9952 + }, \
9953 + .event_spec = &bmc150_accel_event, \
9954 + .num_event_specs = 1 \
9955 +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
9956 +index bbce3b09ac45..4dac567e75b4 100644
9957 +--- a/drivers/iio/gyro/bmg160_core.c
9958 ++++ b/drivers/iio/gyro/bmg160_core.c
9959 +@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
9960 + static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
9961 + {
9962 + int ret;
9963 +- unsigned int raw_val;
9964 ++ __le16 raw_val;
9965 +
9966 + mutex_lock(&data->mutex);
9967 + ret = bmg160_set_power_state(data, true);
9968 +@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
9969 + }
9970 +
9971 + ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
9972 +- 2);
9973 ++ sizeof(raw_val));
9974 + if (ret < 0) {
9975 + dev_err(data->dev, "Error reading axis %d\n", axis);
9976 + bmg160_set_power_state(data, false);
9977 +@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
9978 + return ret;
9979 + }
9980 +
9981 +- *val = sign_extend32(raw_val, 15);
9982 ++ *val = sign_extend32(le16_to_cpu(raw_val), 15);
9983 + ret = bmg160_set_power_state(data, false);
9984 + mutex_unlock(&data->mutex);
9985 + if (ret < 0)
9986 +@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
9987 + .sign = 's', \
9988 + .realbits = 16, \
9989 + .storagebits = 16, \
9990 ++ .endianness = IIO_LE, \
9991 + }, \
9992 + .event_spec = &bmg160_event, \
9993 + .num_event_specs = 1 \
9994 +@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
9995 + mutex_unlock(&data->mutex);
9996 + goto err;
9997 + }
9998 +- data->buffer[i++] = ret;
9999 ++ data->buffer[i++] = val;
10000 + }
10001 + mutex_unlock(&data->mutex);
10002 +
10003 +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
10004 +index 139ae916225f..5b6abc541e8c 100644
10005 +--- a/drivers/iio/industrialio-buffer.c
10006 ++++ b/drivers/iio/industrialio-buffer.c
10007 +@@ -645,6 +645,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
10008 + unsigned int modes;
10009 +
10010 + memset(config, 0, sizeof(*config));
10011 ++ config->watermark = ~0;
10012 +
10013 + /*
10014 + * If there is just one buffer and we are removing it there is nothing
10015 +diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
10016 +index b13936dacc78..f2a7f72f7aa6 100644
10017 +--- a/drivers/iio/magnetometer/ak8975.c
10018 ++++ b/drivers/iio/magnetometer/ak8975.c
10019 +@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
10020 + int rc;
10021 + int irq;
10022 +
10023 ++ init_waitqueue_head(&data->data_ready_queue);
10024 ++ clear_bit(0, &data->flags);
10025 + if (client->irq)
10026 + irq = client->irq;
10027 + else
10028 +@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
10029 + return rc;
10030 + }
10031 +
10032 +- init_waitqueue_head(&data->data_ready_queue);
10033 +- clear_bit(0, &data->flags);
10034 + data->eoc_irq = irq;
10035 +
10036 + return rc;
10037 +@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
10038 + int eoc_gpio;
10039 + int err;
10040 + const char *name = NULL;
10041 +- enum asahi_compass_chipset chipset;
10042 ++ enum asahi_compass_chipset chipset = AK_MAX_TYPE;
10043 +
10044 + /* Grab and set up the supplied GPIO. */
10045 + if (client->dev.platform_data)
10046 +diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
10047 +index 06a4d9c35581..9daca4681922 100644
10048 +--- a/drivers/iio/magnetometer/st_magn.h
10049 ++++ b/drivers/iio/magnetometer/st_magn.h
10050 +@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
10051 + static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
10052 + {
10053 + }
10054 ++#define ST_MAGN_TRIGGER_SET_STATE NULL
10055 + #endif /* CONFIG_IIO_BUFFER */
10056 +
10057 + #endif /* ST_MAGN_H */
10058 +diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
10059 +index 53343ffbff7a..1b109b2a235e 100644
10060 +--- a/drivers/infiniband/core/cache.c
10061 ++++ b/drivers/infiniband/core/cache.c
10062 +@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
10063 + NULL);
10064 +
10065 + /* Coudn't find default GID location */
10066 +- WARN_ON(ix < 0);
10067 ++ if (WARN_ON(ix < 0))
10068 ++ goto release;
10069 +
10070 + zattr_type.gid_type = gid_type;
10071 +
10072 +diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
10073 +index 6b4e8a008bc0..564adf3116e8 100644
10074 +--- a/drivers/infiniband/core/ucm.c
10075 ++++ b/drivers/infiniband/core/ucm.c
10076 +@@ -48,6 +48,7 @@
10077 +
10078 + #include <asm/uaccess.h>
10079 +
10080 ++#include <rdma/ib.h>
10081 + #include <rdma/ib_cm.h>
10082 + #include <rdma/ib_user_cm.h>
10083 + #include <rdma/ib_marshall.h>
10084 +@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
10085 + struct ib_ucm_cmd_hdr hdr;
10086 + ssize_t result;
10087 +
10088 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
10089 ++ return -EACCES;
10090 ++
10091 + if (len < sizeof(hdr))
10092 + return -EINVAL;
10093 +
10094 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
10095 +index 8b5a934e1133..886f61ea6cc7 100644
10096 +--- a/drivers/infiniband/core/ucma.c
10097 ++++ b/drivers/infiniband/core/ucma.c
10098 +@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
10099 + struct rdma_ucm_cmd_hdr hdr;
10100 + ssize_t ret;
10101 +
10102 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
10103 ++ return -EACCES;
10104 ++
10105 + if (len < sizeof(hdr))
10106 + return -EINVAL;
10107 +
10108 +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
10109 +index 39680aed99dd..d3fb8aa46c59 100644
10110 +--- a/drivers/infiniband/core/uverbs_main.c
10111 ++++ b/drivers/infiniband/core/uverbs_main.c
10112 +@@ -48,6 +48,8 @@
10113 +
10114 + #include <asm/uaccess.h>
10115 +
10116 ++#include <rdma/ib.h>
10117 ++
10118 + #include "uverbs.h"
10119 +
10120 + MODULE_AUTHOR("Roland Dreier");
10121 +@@ -693,6 +695,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
10122 + int srcu_key;
10123 + ssize_t ret;
10124 +
10125 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
10126 ++ return -EACCES;
10127 ++
10128 + if (count < sizeof hdr)
10129 + return -EINVAL;
10130 +
10131 +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
10132 +index cf21df4a8bf5..4e94cff5ba71 100644
10133 +--- a/drivers/infiniband/hw/cxgb4/cq.c
10134 ++++ b/drivers/infiniband/hw/cxgb4/cq.c
10135 +@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
10136 + cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
10137 + &cq->bar2_qid,
10138 + user ? &cq->bar2_pa : NULL);
10139 +- if (user && !cq->bar2_va) {
10140 ++ if (user && !cq->bar2_pa) {
10141 + pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
10142 + pci_name(rdev->lldi.pdev), cq->cqid);
10143 + ret = -EINVAL;
10144 +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
10145 +index e99345eb875a..8ff690bf09d9 100644
10146 +--- a/drivers/infiniband/hw/cxgb4/qp.c
10147 ++++ b/drivers/infiniband/hw/cxgb4/qp.c
10148 +@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
10149 +
10150 + if (pbar2_pa)
10151 + *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
10152 ++
10153 ++ if (is_t4(rdev->lldi.adapter_type))
10154 ++ return NULL;
10155 ++
10156 + return rdev->bar2_kva + bar2_qoffset;
10157 + }
10158 +
10159 +@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
10160 + /*
10161 + * User mode must have bar2 access.
10162 + */
10163 +- if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
10164 ++ if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
10165 + pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
10166 + pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
10167 + goto free_dma;
10168 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
10169 +index 03c418ccbc98..eaed31d04468 100644
10170 +--- a/drivers/infiniband/hw/mlx5/main.c
10171 ++++ b/drivers/infiniband/hw/mlx5/main.c
10172 +@@ -517,7 +517,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
10173 + sizeof(struct mlx5_wqe_ctrl_seg)) /
10174 + sizeof(struct mlx5_wqe_data_seg);
10175 + props->max_sge = min(max_rq_sg, max_sq_sg);
10176 +- props->max_sge_rd = props->max_sge;
10177 ++ props->max_sge_rd = MLX5_MAX_SGE_RD;
10178 + props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
10179 + props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
10180 + props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
10181 +@@ -654,8 +654,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
10182 + struct mlx5_ib_dev *dev = to_mdev(ibdev);
10183 + struct mlx5_core_dev *mdev = dev->mdev;
10184 + struct mlx5_hca_vport_context *rep;
10185 +- int max_mtu;
10186 +- int oper_mtu;
10187 ++ u16 max_mtu;
10188 ++ u16 oper_mtu;
10189 + int err;
10190 + u8 ib_link_width_oper;
10191 + u8 vl_hw_cap;
10192 +diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
10193 +index e449e394963f..24f4a782e0f4 100644
10194 +--- a/drivers/infiniband/hw/qib/qib_file_ops.c
10195 ++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
10196 +@@ -45,6 +45,8 @@
10197 + #include <linux/export.h>
10198 + #include <linux/uio.h>
10199 +
10200 ++#include <rdma/ib.h>
10201 ++
10202 + #include "qib.h"
10203 + #include "qib_common.h"
10204 + #include "qib_user_sdma.h"
10205 +@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
10206 + ssize_t ret = 0;
10207 + void *dest;
10208 +
10209 ++ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
10210 ++ return -EACCES;
10211 ++
10212 + if (count < sizeof(cmd.type)) {
10213 + ret = -EINVAL;
10214 + goto bail;
10215 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
10216 +index f121e6129339..0e1a802c3618 100644
10217 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
10218 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
10219 +@@ -65,6 +65,7 @@ isert_rdma_accept(struct isert_conn *isert_conn);
10220 + struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
10221 +
10222 + static void isert_release_work(struct work_struct *work);
10223 ++static void isert_wait4flush(struct isert_conn *isert_conn);
10224 +
10225 + static inline bool
10226 + isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
10227 +@@ -820,12 +821,31 @@ isert_put_conn(struct isert_conn *isert_conn)
10228 + kref_put(&isert_conn->kref, isert_release_kref);
10229 + }
10230 +
10231 ++static void
10232 ++isert_handle_unbound_conn(struct isert_conn *isert_conn)
10233 ++{
10234 ++ struct isert_np *isert_np = isert_conn->cm_id->context;
10235 ++
10236 ++ mutex_lock(&isert_np->mutex);
10237 ++ if (!list_empty(&isert_conn->node)) {
10238 ++ /*
10239 ++ * This means iscsi doesn't know this connection
10240 ++ * so schedule a cleanup ourselves
10241 ++ */
10242 ++ list_del_init(&isert_conn->node);
10243 ++ isert_put_conn(isert_conn);
10244 ++ complete(&isert_conn->wait);
10245 ++ queue_work(isert_release_wq, &isert_conn->release_work);
10246 ++ }
10247 ++ mutex_unlock(&isert_np->mutex);
10248 ++}
10249 ++
10250 + /**
10251 + * isert_conn_terminate() - Initiate connection termination
10252 + * @isert_conn: isert connection struct
10253 + *
10254 + * Notes:
10255 +- * In case the connection state is FULL_FEATURE, move state
10256 ++ * In case the connection state is BOUND, move state
10257 + * to TEMINATING and start teardown sequence (rdma_disconnect).
10258 + * In case the connection state is UP, complete flush as well.
10259 + *
10260 +@@ -837,23 +857,19 @@ isert_conn_terminate(struct isert_conn *isert_conn)
10261 + {
10262 + int err;
10263 +
10264 +- switch (isert_conn->state) {
10265 +- case ISER_CONN_TERMINATING:
10266 +- break;
10267 +- case ISER_CONN_UP:
10268 +- case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
10269 +- isert_info("Terminating conn %p state %d\n",
10270 +- isert_conn, isert_conn->state);
10271 +- isert_conn->state = ISER_CONN_TERMINATING;
10272 +- err = rdma_disconnect(isert_conn->cm_id);
10273 +- if (err)
10274 +- isert_warn("Failed rdma_disconnect isert_conn %p\n",
10275 +- isert_conn);
10276 +- break;
10277 +- default:
10278 +- isert_warn("conn %p teminating in state %d\n",
10279 +- isert_conn, isert_conn->state);
10280 +- }
10281 ++ if (isert_conn->state >= ISER_CONN_TERMINATING)
10282 ++ return;
10283 ++
10284 ++ isert_info("Terminating conn %p state %d\n",
10285 ++ isert_conn, isert_conn->state);
10286 ++ isert_conn->state = ISER_CONN_TERMINATING;
10287 ++ err = rdma_disconnect(isert_conn->cm_id);
10288 ++ if (err)
10289 ++ isert_warn("Failed rdma_disconnect isert_conn %p\n",
10290 ++ isert_conn);
10291 ++
10292 ++ isert_info("conn %p completing wait\n", isert_conn);
10293 ++ complete(&isert_conn->wait);
10294 + }
10295 +
10296 + static int
10297 +@@ -887,35 +903,27 @@ static int
10298 + isert_disconnected_handler(struct rdma_cm_id *cma_id,
10299 + enum rdma_cm_event_type event)
10300 + {
10301 +- struct isert_np *isert_np = cma_id->context;
10302 +- struct isert_conn *isert_conn;
10303 +- bool terminating = false;
10304 +-
10305 +- if (isert_np->cm_id == cma_id)
10306 +- return isert_np_cma_handler(cma_id->context, event);
10307 +-
10308 +- isert_conn = cma_id->qp->qp_context;
10309 ++ struct isert_conn *isert_conn = cma_id->qp->qp_context;
10310 +
10311 + mutex_lock(&isert_conn->mutex);
10312 +- terminating = (isert_conn->state == ISER_CONN_TERMINATING);
10313 +- isert_conn_terminate(isert_conn);
10314 +- mutex_unlock(&isert_conn->mutex);
10315 +-
10316 +- isert_info("conn %p completing wait\n", isert_conn);
10317 +- complete(&isert_conn->wait);
10318 +-
10319 +- if (terminating)
10320 +- goto out;
10321 +-
10322 +- mutex_lock(&isert_np->mutex);
10323 +- if (!list_empty(&isert_conn->node)) {
10324 +- list_del_init(&isert_conn->node);
10325 +- isert_put_conn(isert_conn);
10326 +- queue_work(isert_release_wq, &isert_conn->release_work);
10327 ++ switch (isert_conn->state) {
10328 ++ case ISER_CONN_TERMINATING:
10329 ++ break;
10330 ++ case ISER_CONN_UP:
10331 ++ isert_conn_terminate(isert_conn);
10332 ++ isert_wait4flush(isert_conn);
10333 ++ isert_handle_unbound_conn(isert_conn);
10334 ++ break;
10335 ++ case ISER_CONN_BOUND:
10336 ++ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
10337 ++ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
10338 ++ break;
10339 ++ default:
10340 ++ isert_warn("conn %p teminating in state %d\n",
10341 ++ isert_conn, isert_conn->state);
10342 + }
10343 +- mutex_unlock(&isert_np->mutex);
10344 ++ mutex_unlock(&isert_conn->mutex);
10345 +
10346 +-out:
10347 + return 0;
10348 + }
10349 +
10350 +@@ -934,12 +942,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
10351 + static int
10352 + isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
10353 + {
10354 ++ struct isert_np *isert_np = cma_id->context;
10355 + int ret = 0;
10356 +
10357 + isert_info("%s (%d): status %d id %p np %p\n",
10358 + rdma_event_msg(event->event), event->event,
10359 + event->status, cma_id, cma_id->context);
10360 +
10361 ++ if (isert_np->cm_id == cma_id)
10362 ++ return isert_np_cma_handler(cma_id->context, event->event);
10363 ++
10364 + switch (event->event) {
10365 + case RDMA_CM_EVENT_CONNECT_REQUEST:
10366 + ret = isert_connect_request(cma_id, event);
10367 +@@ -985,13 +997,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
10368 + rx_wr--;
10369 + rx_wr->next = NULL; /* mark end of work requests list */
10370 +
10371 +- isert_conn->post_recv_buf_count += count;
10372 + ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
10373 + &rx_wr_failed);
10374 +- if (ret) {
10375 ++ if (ret)
10376 + isert_err("ib_post_recv() failed with ret: %d\n", ret);
10377 +- isert_conn->post_recv_buf_count -= count;
10378 +- }
10379 +
10380 + return ret;
10381 + }
10382 +@@ -1007,12 +1016,9 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
10383 + rx_wr.num_sge = 1;
10384 + rx_wr.next = NULL;
10385 +
10386 +- isert_conn->post_recv_buf_count++;
10387 + ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
10388 +- if (ret) {
10389 ++ if (ret)
10390 + isert_err("ib_post_recv() failed with ret: %d\n", ret);
10391 +- isert_conn->post_recv_buf_count--;
10392 +- }
10393 +
10394 + return ret;
10395 + }
10396 +@@ -1132,12 +1138,9 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
10397 + rx_wr.sg_list = &sge;
10398 + rx_wr.num_sge = 1;
10399 +
10400 +- isert_conn->post_recv_buf_count++;
10401 + ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
10402 +- if (ret) {
10403 ++ if (ret)
10404 + isert_err("ib_post_recv() failed: %d\n", ret);
10405 +- isert_conn->post_recv_buf_count--;
10406 +- }
10407 +
10408 + return ret;
10409 + }
10410 +@@ -1633,7 +1636,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
10411 + ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
10412 + DMA_FROM_DEVICE);
10413 +
10414 +- isert_conn->post_recv_buf_count--;
10415 + }
10416 +
10417 + static int
10418 +@@ -2048,7 +2050,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
10419 + void *start = isert_conn->rx_descs;
10420 + int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
10421 +
10422 +- if (wr_id >= start && wr_id < start + len)
10423 ++ if ((wr_id >= start && wr_id < start + len) ||
10424 ++ (wr_id == isert_conn->login_req_buf))
10425 + return false;
10426 +
10427 + return true;
10428 +@@ -2072,10 +2075,6 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
10429 + isert_unmap_tx_desc(desc, ib_dev);
10430 + else
10431 + isert_completion_put(desc, isert_cmd, ib_dev, true);
10432 +- } else {
10433 +- isert_conn->post_recv_buf_count--;
10434 +- if (!isert_conn->post_recv_buf_count)
10435 +- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
10436 + }
10437 + }
10438 +
10439 +@@ -3214,6 +3213,7 @@ accept_wait:
10440 +
10441 + conn->context = isert_conn;
10442 + isert_conn->conn = conn;
10443 ++ isert_conn->state = ISER_CONN_BOUND;
10444 +
10445 + isert_set_conn_info(np, conn, isert_conn);
10446 +
10447 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
10448 +index 8d50453eef66..1aa019ab9d78 100644
10449 +--- a/drivers/infiniband/ulp/isert/ib_isert.h
10450 ++++ b/drivers/infiniband/ulp/isert/ib_isert.h
10451 +@@ -84,6 +84,7 @@ enum iser_ib_op_code {
10452 + enum iser_conn_state {
10453 + ISER_CONN_INIT,
10454 + ISER_CONN_UP,
10455 ++ ISER_CONN_BOUND,
10456 + ISER_CONN_FULL_FEATURE,
10457 + ISER_CONN_TERMINATING,
10458 + ISER_CONN_DOWN,
10459 +@@ -179,7 +180,6 @@ struct isert_device;
10460 +
10461 + struct isert_conn {
10462 + enum iser_conn_state state;
10463 +- int post_recv_buf_count;
10464 + u32 responder_resources;
10465 + u32 initiator_depth;
10466 + bool pi_support;
10467 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
10468 +index 03022f6420d7..a09841abae50 100644
10469 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
10470 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
10471 +@@ -1541,7 +1541,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
10472 +
10473 + if (dev->use_fast_reg) {
10474 + state.sg = idb_sg;
10475 +- sg_set_buf(idb_sg, req->indirect_desc, idb_len);
10476 ++ sg_init_one(idb_sg, req->indirect_desc, idb_len);
10477 + idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
10478 + #ifdef CONFIG_NEED_SG_DMA_LENGTH
10479 + idb_sg->dma_length = idb_sg->length; /* hack^2 */
10480 +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
10481 +index 0c37fee363b1..4328679a67a7 100644
10482 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
10483 ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
10484 +@@ -1670,47 +1670,6 @@ send_sense:
10485 + return -1;
10486 + }
10487 +
10488 +-/**
10489 +- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
10490 +- * @ch: RDMA channel of the task management request.
10491 +- * @fn: Task management function to perform.
10492 +- * @req_tag: Tag of the SRP task management request.
10493 +- * @mgmt_ioctx: I/O context of the task management request.
10494 +- *
10495 +- * Returns zero if the target core will process the task management
10496 +- * request asynchronously.
10497 +- *
10498 +- * Note: It is assumed that the initiator serializes tag-based task management
10499 +- * requests.
10500 +- */
10501 +-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
10502 +-{
10503 +- struct srpt_device *sdev;
10504 +- struct srpt_rdma_ch *ch;
10505 +- struct srpt_send_ioctx *target;
10506 +- int ret, i;
10507 +-
10508 +- ret = -EINVAL;
10509 +- ch = ioctx->ch;
10510 +- BUG_ON(!ch);
10511 +- BUG_ON(!ch->sport);
10512 +- sdev = ch->sport->sdev;
10513 +- BUG_ON(!sdev);
10514 +- spin_lock_irq(&sdev->spinlock);
10515 +- for (i = 0; i < ch->rq_size; ++i) {
10516 +- target = ch->ioctx_ring[i];
10517 +- if (target->cmd.se_lun == ioctx->cmd.se_lun &&
10518 +- target->cmd.tag == tag &&
10519 +- srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
10520 +- ret = 0;
10521 +- /* now let the target core abort &target->cmd; */
10522 +- break;
10523 +- }
10524 +- }
10525 +- spin_unlock_irq(&sdev->spinlock);
10526 +- return ret;
10527 +-}
10528 +-
10529 + static int srp_tmr_to_tcm(int fn)
10530 + {
10531 + switch (fn) {
10532 +@@ -1745,7 +1704,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
10533 + struct se_cmd *cmd;
10534 + struct se_session *sess = ch->sess;
10535 + uint64_t unpacked_lun;
10536 +- uint32_t tag = 0;
10537 + int tcm_tmr;
10538 + int rc;
10539 +
10540 +@@ -1761,25 +1719,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
10541 + srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
10542 + send_ioctx->cmd.tag = srp_tsk->tag;
10543 + tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
10544 +- if (tcm_tmr < 0) {
10545 +- send_ioctx->cmd.se_tmr_req->response =
10546 +- TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
10547 +- goto fail;
10548 +- }
10549 + unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
10550 + sizeof(srp_tsk->lun));
10551 +-
10552 +- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
10553 +- rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
10554 +- if (rc < 0) {
10555 +- send_ioctx->cmd.se_tmr_req->response =
10556 +- TMR_TASK_DOES_NOT_EXIST;
10557 +- goto fail;
10558 +- }
10559 +- tag = srp_tsk->task_tag;
10560 +- }
10561 + rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
10562 +- srp_tsk, tcm_tmr, GFP_KERNEL, tag,
10563 ++ srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
10564 + TARGET_SCF_ACK_KREF);
10565 + if (rc != 0) {
10566 + send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
10567 +diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
10568 +index cfd58e87da26..1c5914cae853 100644
10569 +--- a/drivers/input/misc/ati_remote2.c
10570 ++++ b/drivers/input/misc/ati_remote2.c
10571 +@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
10572 +
10573 + ar2->udev = udev;
10574 +
10575 ++ /* Sanity check, first interface must have an endpoint */
10576 ++ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
10577 ++ dev_err(&interface->dev,
10578 ++ "%s(): interface 0 must have an endpoint\n", __func__);
10579 ++ r = -ENODEV;
10580 ++ goto fail1;
10581 ++ }
10582 + ar2->intf[0] = interface;
10583 + ar2->ep[0] = &alt->endpoint[0].desc;
10584 +
10585 ++ /* Sanity check, the device must have two interfaces */
10586 + ar2->intf[1] = usb_ifnum_to_if(udev, 1);
10587 ++ if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
10588 ++ dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
10589 ++ __func__, udev->actconfig->desc.bNumInterfaces);
10590 ++ r = -ENODEV;
10591 ++ goto fail1;
10592 ++ }
10593 ++
10594 + r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
10595 + if (r)
10596 + goto fail1;
10597 ++
10598 ++ /* Sanity check, second interface must have an endpoint */
10599 + alt = ar2->intf[1]->cur_altsetting;
10600 ++ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
10601 ++ dev_err(&interface->dev,
10602 ++ "%s(): interface 1 must have an endpoint\n", __func__);
10603 ++ r = -ENODEV;
10604 ++ goto fail2;
10605 ++ }
10606 + ar2->ep[1] = &alt->endpoint[0].desc;
10607 +
10608 + r = ati_remote2_urb_init(ar2);
10609 + if (r)
10610 +- goto fail2;
10611 ++ goto fail3;
10612 +
10613 + ar2->channel_mask = channel_mask;
10614 + ar2->mode_mask = mode_mask;
10615 +
10616 + r = ati_remote2_setup(ar2, ar2->channel_mask);
10617 + if (r)
10618 +- goto fail2;
10619 ++ goto fail3;
10620 +
10621 + usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
10622 + strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
10623 +@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
10624 +
10625 + r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
10626 + if (r)
10627 +- goto fail2;
10628 ++ goto fail3;
10629 +
10630 + r = ati_remote2_input_init(ar2);
10631 + if (r)
10632 +- goto fail3;
10633 ++ goto fail4;
10634 +
10635 + usb_set_intfdata(interface, ar2);
10636 +
10637 +@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
10638 +
10639 + return 0;
10640 +
10641 +- fail3:
10642 ++ fail4:
10643 + sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
10644 +- fail2:
10645 ++ fail3:
10646 + ati_remote2_urb_cleanup(ar2);
10647 ++ fail2:
10648 + usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
10649 + fail1:
10650 + kfree(ar2);
10651 +diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
10652 +index ac1fa5f44580..9c0ea36913b4 100644
10653 +--- a/drivers/input/misc/ims-pcu.c
10654 ++++ b/drivers/input/misc/ims-pcu.c
10655 +@@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
10656 +
10657 + pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
10658 + union_desc->bMasterInterface0);
10659 ++ if (!pcu->ctrl_intf)
10660 ++ return -EINVAL;
10661 +
10662 + alt = pcu->ctrl_intf->cur_altsetting;
10663 + pcu->ep_ctrl = &alt->endpoint[0].desc;
10664 +@@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
10665 +
10666 + pcu->data_intf = usb_ifnum_to_if(pcu->udev,
10667 + union_desc->bSlaveInterface0);
10668 ++ if (!pcu->data_intf)
10669 ++ return -EINVAL;
10670 +
10671 + alt = pcu->data_intf->cur_altsetting;
10672 + if (alt->desc.bNumEndpoints != 2) {
10673 +diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
10674 +index a806ba3818f7..8d6326d7e7be 100644
10675 +--- a/drivers/input/misc/max8997_haptic.c
10676 ++++ b/drivers/input/misc/max8997_haptic.c
10677 +@@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
10678 + struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
10679 + const struct max8997_platform_data *pdata =
10680 + dev_get_platdata(iodev->dev);
10681 +- const struct max8997_haptic_platform_data *haptic_pdata =
10682 +- pdata->haptic_pdata;
10683 ++ const struct max8997_haptic_platform_data *haptic_pdata = NULL;
10684 + struct max8997_haptic *chip;
10685 + struct input_dev *input_dev;
10686 + int error;
10687 +
10688 ++ if (pdata)
10689 ++ haptic_pdata = pdata->haptic_pdata;
10690 ++
10691 + if (!haptic_pdata) {
10692 + dev_err(&pdev->dev, "no haptic platform data\n");
10693 + return -EINVAL;
10694 +diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
10695 +index 3f02e0e03d12..67aab86048ad 100644
10696 +--- a/drivers/input/misc/pmic8xxx-pwrkey.c
10697 ++++ b/drivers/input/misc/pmic8xxx-pwrkey.c
10698 +@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
10699 + if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
10700 + kpd_delay = 15625;
10701 +
10702 +- if (kpd_delay > 62500 || kpd_delay == 0) {
10703 ++ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
10704 ++ if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
10705 + dev_err(&pdev->dev, "invalid power key trigger delay\n");
10706 + return -EINVAL;
10707 + }
10708 +@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
10709 + pwr->name = "pmic8xxx_pwrkey";
10710 + pwr->phys = "pmic8xxx_pwrkey/input0";
10711 +
10712 +- delay = (kpd_delay << 10) / USEC_PER_SEC;
10713 +- delay = 1 + ilog2(delay);
10714 ++ delay = (kpd_delay << 6) / USEC_PER_SEC;
10715 ++ delay = ilog2(delay);
10716 +
10717 + err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
10718 + if (err < 0) {
10719 +diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
10720 +index 63b539d3daba..84909a12ff36 100644
10721 +--- a/drivers/input/misc/powermate.c
10722 ++++ b/drivers/input/misc/powermate.c
10723 +@@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
10724 + int error = -ENOMEM;
10725 +
10726 + interface = intf->cur_altsetting;
10727 ++ if (interface->desc.bNumEndpoints < 1)
10728 ++ return -EINVAL;
10729 ++
10730 + endpoint = &interface->endpoint[0].desc;
10731 + if (!usb_endpoint_is_int_in(endpoint))
10732 + return -EIO;
10733 +diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
10734 +index f2261ab54701..18663d4edae5 100644
10735 +--- a/drivers/input/misc/pwm-beeper.c
10736 ++++ b/drivers/input/misc/pwm-beeper.c
10737 +@@ -20,21 +20,40 @@
10738 + #include <linux/platform_device.h>
10739 + #include <linux/pwm.h>
10740 + #include <linux/slab.h>
10741 ++#include <linux/workqueue.h>
10742 +
10743 + struct pwm_beeper {
10744 + struct input_dev *input;
10745 + struct pwm_device *pwm;
10746 ++ struct work_struct work;
10747 + unsigned long period;
10748 + };
10749 +
10750 + #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
10751 +
10752 ++static void __pwm_beeper_set(struct pwm_beeper *beeper)
10753 ++{
10754 ++ unsigned long period = beeper->period;
10755 ++
10756 ++ if (period) {
10757 ++ pwm_config(beeper->pwm, period / 2, period);
10758 ++ pwm_enable(beeper->pwm);
10759 ++ } else
10760 ++ pwm_disable(beeper->pwm);
10761 ++}
10762 ++
10763 ++static void pwm_beeper_work(struct work_struct *work)
10764 ++{
10765 ++ struct pwm_beeper *beeper =
10766 ++ container_of(work, struct pwm_beeper, work);
10767 ++
10768 ++ __pwm_beeper_set(beeper);
10769 ++}
10770 ++
10771 + static int pwm_beeper_event(struct input_dev *input,
10772 + unsigned int type, unsigned int code, int value)
10773 + {
10774 +- int ret = 0;
10775 + struct pwm_beeper *beeper = input_get_drvdata(input);
10776 +- unsigned long period;
10777 +
10778 + if (type != EV_SND || value < 0)
10779 + return -EINVAL;
10780 +@@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input,
10781 + return -EINVAL;
10782 + }
10783 +
10784 +- if (value == 0) {
10785 +- pwm_disable(beeper->pwm);
10786 +- } else {
10787 +- period = HZ_TO_NANOSECONDS(value);
10788 +- ret = pwm_config(beeper->pwm, period / 2, period);
10789 +- if (ret)
10790 +- return ret;
10791 +- ret = pwm_enable(beeper->pwm);
10792 +- if (ret)
10793 +- return ret;
10794 +- beeper->period = period;
10795 +- }
10796 ++ if (value == 0)
10797 ++ beeper->period = 0;
10798 ++ else
10799 ++ beeper->period = HZ_TO_NANOSECONDS(value);
10800 ++
10801 ++ schedule_work(&beeper->work);
10802 +
10803 + return 0;
10804 + }
10805 +
10806 ++static void pwm_beeper_stop(struct pwm_beeper *beeper)
10807 ++{
10808 ++ cancel_work_sync(&beeper->work);
10809 ++
10810 ++ if (beeper->period)
10811 ++ pwm_disable(beeper->pwm);
10812 ++}
10813 ++
10814 ++static void pwm_beeper_close(struct input_dev *input)
10815 ++{
10816 ++ struct pwm_beeper *beeper = input_get_drvdata(input);
10817 ++
10818 ++ pwm_beeper_stop(beeper);
10819 ++}
10820 ++
10821 + static int pwm_beeper_probe(struct platform_device *pdev)
10822 + {
10823 + unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
10824 +@@ -87,6 +115,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
10825 + goto err_free;
10826 + }
10827 +
10828 ++ INIT_WORK(&beeper->work, pwm_beeper_work);
10829 ++
10830 + beeper->input = input_allocate_device();
10831 + if (!beeper->input) {
10832 + dev_err(&pdev->dev, "Failed to allocate input device\n");
10833 +@@ -106,6 +136,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
10834 + beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
10835 +
10836 + beeper->input->event = pwm_beeper_event;
10837 ++ beeper->input->close = pwm_beeper_close;
10838 +
10839 + input_set_drvdata(beeper->input, beeper);
10840 +
10841 +@@ -135,7 +166,6 @@ static int pwm_beeper_remove(struct platform_device *pdev)
10842 +
10843 + input_unregister_device(beeper->input);
10844 +
10845 +- pwm_disable(beeper->pwm);
10846 + pwm_free(beeper->pwm);
10847 +
10848 + kfree(beeper);
10849 +@@ -147,8 +177,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev)
10850 + {
10851 + struct pwm_beeper *beeper = dev_get_drvdata(dev);
10852 +
10853 +- if (beeper->period)
10854 +- pwm_disable(beeper->pwm);
10855 ++ pwm_beeper_stop(beeper);
10856 +
10857 + return 0;
10858 + }
10859 +@@ -157,10 +186,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev)
10860 + {
10861 + struct pwm_beeper *beeper = dev_get_drvdata(dev);
10862 +
10863 +- if (beeper->period) {
10864 +- pwm_config(beeper->pwm, beeper->period / 2, beeper->period);
10865 +- pwm_enable(beeper->pwm);
10866 +- }
10867 ++ if (beeper->period)
10868 ++ __pwm_beeper_set(beeper);
10869 +
10870 + return 0;
10871 + }
10872 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
10873 +index 6025eb430c0a..a41d8328c064 100644
10874 +--- a/drivers/input/mouse/synaptics.c
10875 ++++ b/drivers/input/mouse/synaptics.c
10876 +@@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
10877 + if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
10878 + return;
10879 +
10880 +- /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
10881 +- if (SYN_ID_FULL(priv->identity) == 0x801 &&
10882 ++ /* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
10883 ++ if ((SYN_ID_FULL(priv->identity) == 0x801 ||
10884 ++ SYN_ID_FULL(priv->identity) == 0x802) &&
10885 + !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
10886 + return;
10887 +
10888 +diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
10889 +index 3a7f3a4a4396..7c18249d6c8e 100644
10890 +--- a/drivers/input/tablet/gtco.c
10891 ++++ b/drivers/input/tablet/gtco.c
10892 +@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
10893 + goto err_free_buf;
10894 + }
10895 +
10896 ++ /* Sanity check that a device has an endpoint */
10897 ++ if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
10898 ++ dev_err(&usbinterface->dev,
10899 ++ "Invalid number of endpoints\n");
10900 ++ error = -EINVAL;
10901 ++ goto err_free_urb;
10902 ++ }
10903 ++
10904 + /*
10905 + * The endpoint is always altsetting 0, we know this since we know
10906 + * this device only has one interrupt endpoint
10907 +@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
10908 + * HID report descriptor
10909 + */
10910 + if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
10911 +- HID_DEVICE_TYPE, &hid_desc) != 0){
10912 ++ HID_DEVICE_TYPE, &hid_desc) != 0) {
10913 + dev_err(&usbinterface->dev,
10914 + "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
10915 + error = -EIO;
10916 +diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
10917 +index 9bbadaaf6bc3..7b3845aa5983 100644
10918 +--- a/drivers/input/touchscreen/zforce_ts.c
10919 ++++ b/drivers/input/touchscreen/zforce_ts.c
10920 +@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
10921 + point.coord_x = point.coord_y = 0;
10922 + }
10923 +
10924 +- point.state = payload[9 * i + 5] & 0x03;
10925 +- point.id = (payload[9 * i + 5] & 0xfc) >> 2;
10926 ++ point.state = payload[9 * i + 5] & 0x0f;
10927 ++ point.id = (payload[9 * i + 5] & 0xf0) >> 4;
10928 +
10929 + /* determine touch major, minor and orientation */
10930 + point.area_major = max(payload[9 * i + 6],
10931 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
10932 +index 374c129219ef..5efadad4615b 100644
10933 +--- a/drivers/iommu/amd_iommu.c
10934 ++++ b/drivers/iommu/amd_iommu.c
10935 +@@ -92,6 +92,7 @@ struct iommu_dev_data {
10936 + struct list_head dev_data_list; /* For global dev_data_list */
10937 + struct protection_domain *domain; /* Domain the device is bound to */
10938 + u16 devid; /* PCI Device ID */
10939 ++ u16 alias; /* Alias Device ID */
10940 + bool iommu_v2; /* Device can make use of IOMMUv2 */
10941 + bool passthrough; /* Device is identity mapped */
10942 + struct {
10943 +@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
10944 + return container_of(dom, struct protection_domain, domain);
10945 + }
10946 +
10947 ++static inline u16 get_device_id(struct device *dev)
10948 ++{
10949 ++ struct pci_dev *pdev = to_pci_dev(dev);
10950 ++
10951 ++ return PCI_DEVID(pdev->bus->number, pdev->devfn);
10952 ++}
10953 ++
10954 + static struct iommu_dev_data *alloc_dev_data(u16 devid)
10955 + {
10956 + struct iommu_dev_data *dev_data;
10957 +@@ -203,6 +211,68 @@ out_unlock:
10958 + return dev_data;
10959 + }
10960 +
10961 ++static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
10962 ++{
10963 ++ *(u16 *)data = alias;
10964 ++ return 0;
10965 ++}
10966 ++
10967 ++static u16 get_alias(struct device *dev)
10968 ++{
10969 ++ struct pci_dev *pdev = to_pci_dev(dev);
10970 ++ u16 devid, ivrs_alias, pci_alias;
10971 ++
10972 ++ devid = get_device_id(dev);
10973 ++ ivrs_alias = amd_iommu_alias_table[devid];
10974 ++ pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
10975 ++
10976 ++ if (ivrs_alias == pci_alias)
10977 ++ return ivrs_alias;
10978 ++
10979 ++ /*
10980 ++ * DMA alias showdown
10981 ++ *
10982 ++ * The IVRS is fairly reliable in telling us about aliases, but it
10983 ++ * can't know about every screwy device. If we don't have an IVRS
10984 ++ * reported alias, use the PCI reported alias. In that case we may
10985 ++ * still need to initialize the rlookup and dev_table entries if the
10986 ++ * alias is to a non-existent device.
10987 ++ */
10988 ++ if (ivrs_alias == devid) {
10989 ++ if (!amd_iommu_rlookup_table[pci_alias]) {
10990 ++ amd_iommu_rlookup_table[pci_alias] =
10991 ++ amd_iommu_rlookup_table[devid];
10992 ++ memcpy(amd_iommu_dev_table[pci_alias].data,
10993 ++ amd_iommu_dev_table[devid].data,
10994 ++ sizeof(amd_iommu_dev_table[pci_alias].data));
10995 ++ }
10996 ++
10997 ++ return pci_alias;
10998 ++ }
10999 ++
11000 ++ pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
11001 ++ "for device %s[%04x:%04x], kernel reported alias "
11002 ++ "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
11003 ++ PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
11004 ++ PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
11005 ++ PCI_FUNC(pci_alias));
11006 ++
11007 ++ /*
11008 ++ * If we don't have a PCI DMA alias and the IVRS alias is on the same
11009 ++ * bus, then the IVRS table may know about a quirk that we don't.
11010 ++ */
11011 ++ if (pci_alias == devid &&
11012 ++ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
11013 ++ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
11014 ++ pdev->dma_alias_devfn = ivrs_alias & 0xff;
11015 ++ pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
11016 ++ PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
11017 ++ dev_name(dev));
11018 ++ }
11019 ++
11020 ++ return ivrs_alias;
11021 ++}
11022 ++
11023 + static struct iommu_dev_data *find_dev_data(u16 devid)
11024 + {
11025 + struct iommu_dev_data *dev_data;
11026 +@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
11027 + return dev_data;
11028 + }
11029 +
11030 +-static inline u16 get_device_id(struct device *dev)
11031 +-{
11032 +- struct pci_dev *pdev = to_pci_dev(dev);
11033 +-
11034 +- return PCI_DEVID(pdev->bus->number, pdev->devfn);
11035 +-}
11036 +-
11037 + static struct iommu_dev_data *get_dev_data(struct device *dev)
11038 + {
11039 + return dev->archdata.iommu;
11040 +@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
11041 + if (!dev_data)
11042 + return -ENOMEM;
11043 +
11044 ++ dev_data->alias = get_alias(dev);
11045 ++
11046 + if (pci_iommuv2_capable(pdev)) {
11047 + struct amd_iommu *iommu;
11048 +
11049 +@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
11050 + u16 devid, alias;
11051 +
11052 + devid = get_device_id(dev);
11053 +- alias = amd_iommu_alias_table[devid];
11054 ++ alias = get_alias(dev);
11055 +
11056 + memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
11057 + memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
11058 +@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
11059 + int ret;
11060 +
11061 + iommu = amd_iommu_rlookup_table[dev_data->devid];
11062 +- alias = amd_iommu_alias_table[dev_data->devid];
11063 ++ alias = dev_data->alias;
11064 +
11065 + ret = iommu_flush_dte(iommu, dev_data->devid);
11066 + if (!ret && alias != dev_data->devid)
11067 +@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
11068 + bool ats;
11069 +
11070 + iommu = amd_iommu_rlookup_table[dev_data->devid];
11071 +- alias = amd_iommu_alias_table[dev_data->devid];
11072 ++ alias = dev_data->alias;
11073 + ats = dev_data->ats.enabled;
11074 +
11075 + /* Update data structures */
11076 +@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
11077 + return;
11078 +
11079 + iommu = amd_iommu_rlookup_table[dev_data->devid];
11080 +- alias = amd_iommu_alias_table[dev_data->devid];
11081 ++ alias = dev_data->alias;
11082 +
11083 + /* decrease reference counters */
11084 + dev_data->domain->dev_iommu[iommu->index] -= 1;
11085 +diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
11086 +index 72d6182666cb..58f2fe687a24 100644
11087 +--- a/drivers/iommu/dma-iommu.c
11088 ++++ b/drivers/iommu/dma-iommu.c
11089 +@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
11090 + unsigned int s_length = sg_dma_len(s);
11091 + unsigned int s_dma_len = s->length;
11092 +
11093 +- s->offset = s_offset;
11094 ++ s->offset += s_offset;
11095 + s->length = s_length;
11096 + sg_dma_address(s) = dma_addr + s_offset;
11097 + dma_addr += s_dma_len;
11098 +@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
11099 +
11100 + for_each_sg(sg, s, nents, i) {
11101 + if (sg_dma_address(s) != DMA_ERROR_CODE)
11102 +- s->offset = sg_dma_address(s);
11103 ++ s->offset += sg_dma_address(s);
11104 + if (sg_dma_len(s))
11105 + s->length = sg_dma_len(s);
11106 + sg_dma_address(s) = DMA_ERROR_CODE;
11107 +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
11108 +index 0e3b0092ec92..515bb8b80952 100644
11109 +--- a/drivers/iommu/iommu.c
11110 ++++ b/drivers/iommu/iommu.c
11111 +@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
11112 + if (!group->default_domain) {
11113 + group->default_domain = __iommu_domain_alloc(dev->bus,
11114 + IOMMU_DOMAIN_DMA);
11115 +- group->domain = group->default_domain;
11116 ++ if (!group->domain)
11117 ++ group->domain = group->default_domain;
11118 + }
11119 +
11120 + ret = iommu_group_add_device(group, dev);
11121 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
11122 +index d7be6ddc34f6..2fc499a2207e 100644
11123 +--- a/drivers/irqchip/irq-gic-v3.c
11124 ++++ b/drivers/irqchip/irq-gic-v3.c
11125 +@@ -361,6 +361,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
11126 + if (static_key_true(&supports_deactivate))
11127 + gic_write_dir(irqnr);
11128 + #ifdef CONFIG_SMP
11129 ++ /*
11130 ++ * Unlike GICv2, we don't need an smp_rmb() here.
11131 ++ * The control dependency from gic_read_iar to
11132 ++ * the ISB in gic_write_eoir is enough to ensure
11133 ++ * that any shared data read by handle_IPI will
11134 ++ * be read after the ACK.
11135 ++ */
11136 + handle_IPI(irqnr, regs);
11137 + #else
11138 + WARN_ONCE(true, "Unexpected SGI received!\n");
11139 +@@ -380,6 +387,15 @@ static void __init gic_dist_init(void)
11140 + writel_relaxed(0, base + GICD_CTLR);
11141 + gic_dist_wait_for_rwp();
11142 +
11143 ++ /*
11144 ++ * Configure SPIs as non-secure Group-1. This will only matter
11145 ++ * if the GIC only has a single security state. This will not
11146 ++ * do the right thing if the kernel is running in secure mode,
11147 ++ * but that's not the intended use case anyway.
11148 ++ */
11149 ++ for (i = 32; i < gic_data.irq_nr; i += 32)
11150 ++ writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
11151 ++
11152 + gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
11153 +
11154 + /* Enable distributor with ARE, Group1 */
11155 +@@ -494,6 +510,9 @@ static void gic_cpu_init(void)
11156 +
11157 + rbase = gic_data_rdist_sgi_base();
11158 +
11159 ++ /* Configure SGIs/PPIs as non-secure Group-1 */
11160 ++ writel_relaxed(~0, rbase + GICR_IGROUPR0);
11161 ++
11162 + gic_cpu_config(rbase, gic_redist_wait_for_rwp);
11163 +
11164 + /* Give LPIs a spin */
11165 +diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
11166 +index 8f9ebf714e2b..eef950046ac0 100644
11167 +--- a/drivers/irqchip/irq-gic.c
11168 ++++ b/drivers/irqchip/irq-gic.c
11169 +@@ -344,6 +344,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
11170 + if (static_key_true(&supports_deactivate))
11171 + writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
11172 + #ifdef CONFIG_SMP
11173 ++ /*
11174 ++ * Ensure any shared data written by the CPU sending
11175 ++ * the IPI is read after we've read the ACK register
11176 ++ * on the GIC.
11177 ++ *
11178 ++ * Pairs with the write barrier in gic_raise_softirq
11179 ++ */
11180 ++ smp_rmb();
11181 + handle_IPI(irqnr, regs);
11182 + #endif
11183 + continue;
11184 +diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
11185 +index efe50845939d..17304705f2cf 100644
11186 +--- a/drivers/irqchip/irq-mxs.c
11187 ++++ b/drivers/irqchip/irq-mxs.c
11188 +@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
11189 + void __iomem *icoll_base;
11190 +
11191 + icoll_base = of_io_request_and_map(np, 0, np->name);
11192 +- if (!icoll_base)
11193 ++ if (IS_ERR(icoll_base))
11194 + panic("%s: unable to map resource", np->full_name);
11195 + return icoll_base;
11196 + }
11197 +diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
11198 +index 0820f67cc9a7..668730c5cb66 100644
11199 +--- a/drivers/irqchip/irq-sunxi-nmi.c
11200 ++++ b/drivers/irqchip/irq-sunxi-nmi.c
11201 +@@ -160,9 +160,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
11202 +
11203 + gc = irq_get_domain_generic_chip(domain, 0);
11204 + gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
11205 +- if (!gc->reg_base) {
11206 ++ if (IS_ERR(gc->reg_base)) {
11207 + pr_err("unable to map resource\n");
11208 +- ret = -ENOMEM;
11209 ++ ret = PTR_ERR(gc->reg_base);
11210 + goto fail_irqd_remove;
11211 + }
11212 +
11213 +diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
11214 +index 307db1ea22de..b7ddfb352792 100644
11215 +--- a/drivers/lightnvm/rrpc.c
11216 ++++ b/drivers/lightnvm/rrpc.c
11217 +@@ -499,12 +499,21 @@ static void rrpc_gc_queue(struct work_struct *work)
11218 + struct rrpc *rrpc = gcb->rrpc;
11219 + struct rrpc_block *rblk = gcb->rblk;
11220 + struct nvm_lun *lun = rblk->parent->lun;
11221 ++ struct nvm_block *blk = rblk->parent;
11222 + struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
11223 +
11224 + spin_lock(&rlun->lock);
11225 + list_add_tail(&rblk->prio, &rlun->prio_list);
11226 + spin_unlock(&rlun->lock);
11227 +
11228 ++ spin_lock(&lun->lock);
11229 ++ lun->nr_open_blocks--;
11230 ++ lun->nr_closed_blocks++;
11231 ++ blk->state &= ~NVM_BLK_ST_OPEN;
11232 ++ blk->state |= NVM_BLK_ST_CLOSED;
11233 ++ list_move_tail(&rblk->list, &rlun->closed_list);
11234 ++ spin_unlock(&lun->lock);
11235 ++
11236 + mempool_free(gcb, rrpc->gcb_pool);
11237 + pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
11238 + rblk->parent->id);
11239 +@@ -668,20 +677,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
11240 + lun = rblk->parent->lun;
11241 +
11242 + cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
11243 +- if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
11244 +- struct nvm_block *blk = rblk->parent;
11245 +- struct rrpc_lun *rlun = rblk->rlun;
11246 +-
11247 +- spin_lock(&lun->lock);
11248 +- lun->nr_open_blocks--;
11249 +- lun->nr_closed_blocks++;
11250 +- blk->state &= ~NVM_BLK_ST_OPEN;
11251 +- blk->state |= NVM_BLK_ST_CLOSED;
11252 +- list_move_tail(&rblk->list, &rlun->closed_list);
11253 +- spin_unlock(&lun->lock);
11254 +-
11255 ++ if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
11256 + rrpc_run_gc(rrpc, rblk);
11257 +- }
11258 + }
11259 + }
11260 +
11261 +diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
11262 +index 004926955263..b0155b05cddb 100644
11263 +--- a/drivers/mcb/mcb-parse.c
11264 ++++ b/drivers/mcb/mcb-parse.c
11265 +@@ -57,7 +57,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
11266 + mdev->id = GDD_DEV(reg1);
11267 + mdev->rev = GDD_REV(reg1);
11268 + mdev->var = GDD_VAR(reg1);
11269 +- mdev->bar = GDD_BAR(reg1);
11270 ++ mdev->bar = GDD_BAR(reg2);
11271 + mdev->group = GDD_GRP(reg2);
11272 + mdev->inst = GDD_INS(reg2);
11273 +
11274 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
11275 +index 8d0ead98eb6e..a296425a7270 100644
11276 +--- a/drivers/md/bcache/super.c
11277 ++++ b/drivers/md/bcache/super.c
11278 +@@ -1015,8 +1015,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
11279 + */
11280 + atomic_set(&dc->count, 1);
11281 +
11282 +- if (bch_cached_dev_writeback_start(dc))
11283 ++ /* Block writeback thread, but spawn it */
11284 ++ down_write(&dc->writeback_lock);
11285 ++ if (bch_cached_dev_writeback_start(dc)) {
11286 ++ up_write(&dc->writeback_lock);
11287 + return -ENOMEM;
11288 ++ }
11289 +
11290 + if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
11291 + bch_sectors_dirty_init(dc);
11292 +@@ -1028,6 +1032,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
11293 + bch_cached_dev_run(dc);
11294 + bcache_device_link(&dc->disk, c, "bdev");
11295 +
11296 ++ /* Allow the writeback thread to proceed */
11297 ++ up_write(&dc->writeback_lock);
11298 ++
11299 + pr_info("Caching %s as %s on set %pU",
11300 + bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
11301 + dc->disk.c->sb.set_uuid);
11302 +@@ -1366,6 +1373,9 @@ static void cache_set_flush(struct closure *cl)
11303 + struct btree *b;
11304 + unsigned i;
11305 +
11306 ++ if (!c)
11307 ++ closure_return(cl);
11308 ++
11309 + bch_cache_accounting_destroy(&c->accounting);
11310 +
11311 + kobject_put(&c->internal);
11312 +@@ -1828,11 +1838,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
11313 + return 0;
11314 + }
11315 +
11316 +-static void register_cache(struct cache_sb *sb, struct page *sb_page,
11317 ++static int register_cache(struct cache_sb *sb, struct page *sb_page,
11318 + struct block_device *bdev, struct cache *ca)
11319 + {
11320 + char name[BDEVNAME_SIZE];
11321 +- const char *err = "cannot allocate memory";
11322 ++ const char *err = NULL;
11323 ++ int ret = 0;
11324 +
11325 + memcpy(&ca->sb, sb, sizeof(struct cache_sb));
11326 + ca->bdev = bdev;
11327 +@@ -1847,27 +1858,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
11328 + if (blk_queue_discard(bdev_get_queue(ca->bdev)))
11329 + ca->discard = CACHE_DISCARD(&ca->sb);
11330 +
11331 +- if (cache_alloc(sb, ca) != 0)
11332 ++ ret = cache_alloc(sb, ca);
11333 ++ if (ret != 0)
11334 + goto err;
11335 +
11336 +- err = "error creating kobject";
11337 +- if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
11338 +- goto err;
11339 ++ if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
11340 ++ err = "error calling kobject_add";
11341 ++ ret = -ENOMEM;
11342 ++ goto out;
11343 ++ }
11344 +
11345 + mutex_lock(&bch_register_lock);
11346 + err = register_cache_set(ca);
11347 + mutex_unlock(&bch_register_lock);
11348 +
11349 +- if (err)
11350 +- goto err;
11351 ++ if (err) {
11352 ++ ret = -ENODEV;
11353 ++ goto out;
11354 ++ }
11355 +
11356 + pr_info("registered cache device %s", bdevname(bdev, name));
11357 ++
11358 + out:
11359 + kobject_put(&ca->kobj);
11360 +- return;
11361 ++
11362 + err:
11363 +- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
11364 +- goto out;
11365 ++ if (err)
11366 ++ pr_notice("error opening %s: %s", bdevname(bdev, name), err);
11367 ++
11368 ++ return ret;
11369 + }
11370 +
11371 + /* Global interfaces/init */
11372 +@@ -1965,7 +1984,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
11373 + if (!ca)
11374 + goto err_close;
11375 +
11376 +- register_cache(sb, sb_page, bdev, ca);
11377 ++ if (register_cache(sb, sb_page, bdev, ca) != 0)
11378 ++ goto err_close;
11379 + }
11380 + out:
11381 + if (sb_page)
11382 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
11383 +index f6543f3a970f..3970cda10080 100644
11384 +--- a/drivers/md/dm-cache-metadata.c
11385 ++++ b/drivers/md/dm-cache-metadata.c
11386 +@@ -867,18 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
11387 + return 0;
11388 + }
11389 +
11390 +-#define WRITE_LOCK(cmd) \
11391 +- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
11392 +- return -EINVAL; \
11393 +- down_write(&cmd->root_lock)
11394 ++static bool cmd_write_lock(struct dm_cache_metadata *cmd)
11395 ++{
11396 ++ down_write(&cmd->root_lock);
11397 ++ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
11398 ++ up_write(&cmd->root_lock);
11399 ++ return false;
11400 ++ }
11401 ++ return true;
11402 ++}
11403 ++
11404 ++#define WRITE_LOCK(cmd) \
11405 ++ do { \
11406 ++ if (!cmd_write_lock((cmd))) \
11407 ++ return -EINVAL; \
11408 ++ } while(0)
11409 +
11410 +-#define WRITE_LOCK_VOID(cmd) \
11411 +- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
11412 +- return; \
11413 +- down_write(&cmd->root_lock)
11414 ++#define WRITE_LOCK_VOID(cmd) \
11415 ++ do { \
11416 ++ if (!cmd_write_lock((cmd))) \
11417 ++ return; \
11418 ++ } while(0)
11419 +
11420 + #define WRITE_UNLOCK(cmd) \
11421 +- up_write(&cmd->root_lock)
11422 ++ up_write(&(cmd)->root_lock)
11423 ++
11424 ++static bool cmd_read_lock(struct dm_cache_metadata *cmd)
11425 ++{
11426 ++ down_read(&cmd->root_lock);
11427 ++ if (cmd->fail_io) {
11428 ++ up_read(&cmd->root_lock);
11429 ++ return false;
11430 ++ }
11431 ++ return true;
11432 ++}
11433 ++
11434 ++#define READ_LOCK(cmd) \
11435 ++ do { \
11436 ++ if (!cmd_read_lock((cmd))) \
11437 ++ return -EINVAL; \
11438 ++ } while(0)
11439 ++
11440 ++#define READ_LOCK_VOID(cmd) \
11441 ++ do { \
11442 ++ if (!cmd_read_lock((cmd))) \
11443 ++ return; \
11444 ++ } while(0)
11445 ++
11446 ++#define READ_UNLOCK(cmd) \
11447 ++ up_read(&(cmd)->root_lock)
11448 +
11449 + int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
11450 + {
11451 +@@ -1015,22 +1052,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
11452 + {
11453 + int r;
11454 +
11455 +- down_read(&cmd->root_lock);
11456 ++ READ_LOCK(cmd);
11457 + r = __load_discards(cmd, fn, context);
11458 +- up_read(&cmd->root_lock);
11459 ++ READ_UNLOCK(cmd);
11460 +
11461 + return r;
11462 + }
11463 +
11464 +-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
11465 ++int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
11466 + {
11467 +- dm_cblock_t r;
11468 +-
11469 +- down_read(&cmd->root_lock);
11470 +- r = cmd->cache_blocks;
11471 +- up_read(&cmd->root_lock);
11472 ++ READ_LOCK(cmd);
11473 ++ *result = cmd->cache_blocks;
11474 ++ READ_UNLOCK(cmd);
11475 +
11476 +- return r;
11477 ++ return 0;
11478 + }
11479 +
11480 + static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
11481 +@@ -1188,9 +1223,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
11482 + {
11483 + int r;
11484 +
11485 +- down_read(&cmd->root_lock);
11486 ++ READ_LOCK(cmd);
11487 + r = __load_mappings(cmd, policy, fn, context);
11488 +- up_read(&cmd->root_lock);
11489 ++ READ_UNLOCK(cmd);
11490 +
11491 + return r;
11492 + }
11493 +@@ -1215,18 +1250,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
11494 +
11495 + void dm_cache_dump(struct dm_cache_metadata *cmd)
11496 + {
11497 +- down_read(&cmd->root_lock);
11498 ++ READ_LOCK_VOID(cmd);
11499 + __dump_mappings(cmd);
11500 +- up_read(&cmd->root_lock);
11501 ++ READ_UNLOCK(cmd);
11502 + }
11503 +
11504 + int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
11505 + {
11506 + int r;
11507 +
11508 +- down_read(&cmd->root_lock);
11509 ++ READ_LOCK(cmd);
11510 + r = cmd->changed;
11511 +- up_read(&cmd->root_lock);
11512 ++ READ_UNLOCK(cmd);
11513 +
11514 + return r;
11515 + }
11516 +@@ -1276,9 +1311,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
11517 + void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
11518 + struct dm_cache_statistics *stats)
11519 + {
11520 +- down_read(&cmd->root_lock);
11521 ++ READ_LOCK_VOID(cmd);
11522 + *stats = cmd->stats;
11523 +- up_read(&cmd->root_lock);
11524 ++ READ_UNLOCK(cmd);
11525 + }
11526 +
11527 + void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
11528 +@@ -1312,9 +1347,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
11529 + {
11530 + int r = -EINVAL;
11531 +
11532 +- down_read(&cmd->root_lock);
11533 ++ READ_LOCK(cmd);
11534 + r = dm_sm_get_nr_free(cmd->metadata_sm, result);
11535 +- up_read(&cmd->root_lock);
11536 ++ READ_UNLOCK(cmd);
11537 +
11538 + return r;
11539 + }
11540 +@@ -1324,9 +1359,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
11541 + {
11542 + int r = -EINVAL;
11543 +
11544 +- down_read(&cmd->root_lock);
11545 ++ READ_LOCK(cmd);
11546 + r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
11547 +- up_read(&cmd->root_lock);
11548 ++ READ_UNLOCK(cmd);
11549 +
11550 + return r;
11551 + }
11552 +@@ -1417,7 +1452,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
11553 +
11554 + int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
11555 + {
11556 +- return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
11557 ++ int r;
11558 ++
11559 ++ READ_LOCK(cmd);
11560 ++ r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
11561 ++ READ_UNLOCK(cmd);
11562 ++
11563 ++ return r;
11564 + }
11565 +
11566 + void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
11567 +@@ -1440,10 +1481,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
11568 + struct dm_block *sblock;
11569 + struct cache_disk_superblock *disk_super;
11570 +
11571 +- /*
11572 +- * We ignore fail_io for this function.
11573 +- */
11574 +- down_write(&cmd->root_lock);
11575 ++ WRITE_LOCK(cmd);
11576 + set_bit(NEEDS_CHECK, &cmd->flags);
11577 +
11578 + r = superblock_lock(cmd, &sblock);
11579 +@@ -1458,19 +1496,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
11580 + dm_bm_unlock(sblock);
11581 +
11582 + out:
11583 +- up_write(&cmd->root_lock);
11584 ++ WRITE_UNLOCK(cmd);
11585 + return r;
11586 + }
11587 +
11588 +-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
11589 ++int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
11590 + {
11591 +- bool needs_check;
11592 ++ READ_LOCK(cmd);
11593 ++ *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
11594 ++ READ_UNLOCK(cmd);
11595 +
11596 +- down_read(&cmd->root_lock);
11597 +- needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
11598 +- up_read(&cmd->root_lock);
11599 +-
11600 +- return needs_check;
11601 ++ return 0;
11602 + }
11603 +
11604 + int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
11605 +diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
11606 +index 2ffee21f318d..8528744195e5 100644
11607 +--- a/drivers/md/dm-cache-metadata.h
11608 ++++ b/drivers/md/dm-cache-metadata.h
11609 +@@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
11610 + * origin blocks to map to.
11611 + */
11612 + int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
11613 +-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
11614 ++int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
11615 +
11616 + int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
11617 + sector_t discard_block_size,
11618 +@@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
11619 + */
11620 + int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
11621 +
11622 +-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
11623 ++int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
11624 + int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
11625 + void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
11626 + void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
11627 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
11628 +index 5780accffa30..bb9b92ebbf8e 100644
11629 +--- a/drivers/md/dm-cache-target.c
11630 ++++ b/drivers/md/dm-cache-target.c
11631 +@@ -984,9 +984,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
11632 +
11633 + static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
11634 + {
11635 +- bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
11636 ++ bool needs_check;
11637 + enum cache_metadata_mode old_mode = get_cache_mode(cache);
11638 +
11639 ++ if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
11640 ++ DMERR("unable to read needs_check flag, setting failure mode");
11641 ++ new_mode = CM_FAIL;
11642 ++ }
11643 ++
11644 + if (new_mode == CM_WRITE && needs_check) {
11645 + DMERR("%s: unable to switch cache to write mode until repaired.",
11646 + cache_device_name(cache));
11647 +@@ -3510,6 +3515,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
11648 + char buf[BDEVNAME_SIZE];
11649 + struct cache *cache = ti->private;
11650 + dm_cblock_t residency;
11651 ++ bool needs_check;
11652 +
11653 + switch (type) {
11654 + case STATUSTYPE_INFO:
11655 +@@ -3583,7 +3589,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
11656 + else
11657 + DMEMIT("rw ");
11658 +
11659 +- if (dm_cache_metadata_needs_check(cache->cmd))
11660 ++ r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
11661 ++
11662 ++ if (r || needs_check)
11663 + DMEMIT("needs_check ");
11664 + else
11665 + DMEMIT("- ");
11666 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
11667 +index 3766386080a4..e4d1bafe78c1 100644
11668 +--- a/drivers/md/dm-snap.c
11669 ++++ b/drivers/md/dm-snap.c
11670 +@@ -1105,6 +1105,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
11671 + int i;
11672 + int r = -EINVAL;
11673 + char *origin_path, *cow_path;
11674 ++ dev_t origin_dev, cow_dev;
11675 + unsigned args_used, num_flush_bios = 1;
11676 + fmode_t origin_mode = FMODE_READ;
11677 +
11678 +@@ -1135,11 +1136,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
11679 + ti->error = "Cannot get origin device";
11680 + goto bad_origin;
11681 + }
11682 ++ origin_dev = s->origin->bdev->bd_dev;
11683 +
11684 + cow_path = argv[0];
11685 + argv++;
11686 + argc--;
11687 +
11688 ++ cow_dev = dm_get_dev_t(cow_path);
11689 ++ if (cow_dev && cow_dev == origin_dev) {
11690 ++ ti->error = "COW device cannot be the same as origin device";
11691 ++ r = -EINVAL;
11692 ++ goto bad_cow;
11693 ++ }
11694 ++
11695 + r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
11696 + if (r) {
11697 + ti->error = "Cannot get COW device";
11698 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
11699 +index 061152a43730..cb5d0daf53bb 100644
11700 +--- a/drivers/md/dm-table.c
11701 ++++ b/drivers/md/dm-table.c
11702 +@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
11703 + }
11704 +
11705 + /*
11706 ++ * Convert the path to a device
11707 ++ */
11708 ++dev_t dm_get_dev_t(const char *path)
11709 ++{
11710 ++ dev_t uninitialized_var(dev);
11711 ++ struct block_device *bdev;
11712 ++
11713 ++ bdev = lookup_bdev(path);
11714 ++ if (IS_ERR(bdev))
11715 ++ dev = name_to_dev_t(path);
11716 ++ else {
11717 ++ dev = bdev->bd_dev;
11718 ++ bdput(bdev);
11719 ++ }
11720 ++
11721 ++ return dev;
11722 ++}
11723 ++EXPORT_SYMBOL_GPL(dm_get_dev_t);
11724 ++
11725 ++/*
11726 + * Add a device to the list, or just increment the usage count if
11727 + * it's already present.
11728 + */
11729 +@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
11730 + struct dm_dev **result)
11731 + {
11732 + int r;
11733 +- dev_t uninitialized_var(dev);
11734 ++ dev_t dev;
11735 + struct dm_dev_internal *dd;
11736 + struct dm_table *t = ti->table;
11737 +- struct block_device *bdev;
11738 +
11739 + BUG_ON(!t);
11740 +
11741 +- /* convert the path to a device */
11742 +- bdev = lookup_bdev(path);
11743 +- if (IS_ERR(bdev)) {
11744 +- dev = name_to_dev_t(path);
11745 +- if (!dev)
11746 +- return -ENODEV;
11747 +- } else {
11748 +- dev = bdev->bd_dev;
11749 +- bdput(bdev);
11750 +- }
11751 ++ dev = dm_get_dev_t(path);
11752 ++ if (!dev)
11753 ++ return -ENODEV;
11754 +
11755 + dd = find_device(&t->devices, dev);
11756 + if (!dd) {
11757 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
11758 +index f962d6453afd..185010d9cccc 100644
11759 +--- a/drivers/md/dm-thin-metadata.c
11760 ++++ b/drivers/md/dm-thin-metadata.c
11761 +@@ -1981,5 +1981,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
11762 +
11763 + void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
11764 + {
11765 +- dm_tm_issue_prefetches(pmd->tm);
11766 ++ down_read(&pmd->root_lock);
11767 ++ if (!pmd->fail_io)
11768 ++ dm_tm_issue_prefetches(pmd->tm);
11769 ++ up_read(&pmd->root_lock);
11770 + }
11771 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
11772 +index dd834927bc66..c338aebb4ccd 100644
11773 +--- a/drivers/md/dm.c
11774 ++++ b/drivers/md/dm.c
11775 +@@ -1109,12 +1109,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
11776 + * back into ->request_fn() could deadlock attempting to grab the
11777 + * queue lock again.
11778 + */
11779 +- if (run_queue) {
11780 +- if (md->queue->mq_ops)
11781 +- blk_mq_run_hw_queues(md->queue, true);
11782 +- else
11783 +- blk_run_queue_async(md->queue);
11784 +- }
11785 ++ if (!md->queue->mq_ops && run_queue)
11786 ++ blk_run_queue_async(md->queue);
11787 +
11788 + /*
11789 + * dm_put() must be at the end of this function. See the comment above
11790 +@@ -1214,9 +1210,9 @@ static void dm_requeue_original_request(struct mapped_device *md,
11791 + {
11792 + int rw = rq_data_dir(rq);
11793 +
11794 ++ rq_end_stats(md, rq);
11795 + dm_unprep_request(rq);
11796 +
11797 +- rq_end_stats(md, rq);
11798 + if (!rq->q->mq_ops)
11799 + old_requeue_request(rq);
11800 + else {
11801 +@@ -1336,7 +1332,10 @@ static void dm_complete_request(struct request *rq, int error)
11802 + struct dm_rq_target_io *tio = tio_from_request(rq);
11803 +
11804 + tio->error = error;
11805 +- blk_complete_request(rq);
11806 ++ if (!rq->q->mq_ops)
11807 ++ blk_complete_request(rq);
11808 ++ else
11809 ++ blk_mq_complete_request(rq, error);
11810 + }
11811 +
11812 + /*
11813 +diff --git a/drivers/md/md.c b/drivers/md/md.c
11814 +index e55e6cf9ec17..7551278030d8 100644
11815 +--- a/drivers/md/md.c
11816 ++++ b/drivers/md/md.c
11817 +@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
11818 + * go away inside make_request
11819 + */
11820 + sectors = bio_sectors(bio);
11821 ++ /* bio could be mergeable after passing to underlayer */
11822 ++ bio->bi_rw &= ~REQ_NOMERGE;
11823 + mddev->pers->make_request(mddev, bio);
11824 +
11825 + cpu = part_stat_lock();
11826 +diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
11827 +index 0a72ab6e6c20..dd483bb2e111 100644
11828 +--- a/drivers/md/multipath.c
11829 ++++ b/drivers/md/multipath.c
11830 +@@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
11831 + }
11832 + multipath = conf->multipaths + mp_bh->path;
11833 +
11834 +- mp_bh->bio = *bio;
11835 ++ bio_init(&mp_bh->bio);
11836 ++ __bio_clone_fast(&mp_bh->bio, bio);
11837 ++
11838 + mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
11839 + mp_bh->bio.bi_bdev = multipath->rdev->bdev;
11840 + mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
11841 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
11842 +index 4e3843f7d245..bb5bce059eb4 100644
11843 +--- a/drivers/md/raid1.c
11844 ++++ b/drivers/md/raid1.c
11845 +@@ -2274,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
11846 + if (fail) {
11847 + spin_lock_irq(&conf->device_lock);
11848 + list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
11849 ++ conf->nr_queued++;
11850 + spin_unlock_irq(&conf->device_lock);
11851 + md_wakeup_thread(conf->mddev->thread);
11852 + } else {
11853 +@@ -2391,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
11854 + LIST_HEAD(tmp);
11855 + spin_lock_irqsave(&conf->device_lock, flags);
11856 + if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
11857 +- list_add(&tmp, &conf->bio_end_io_list);
11858 +- list_del_init(&conf->bio_end_io_list);
11859 ++ while (!list_empty(&conf->bio_end_io_list)) {
11860 ++ list_move(conf->bio_end_io_list.prev, &tmp);
11861 ++ conf->nr_queued--;
11862 ++ }
11863 + }
11864 + spin_unlock_irqrestore(&conf->device_lock, flags);
11865 + while (!list_empty(&tmp)) {
11866 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
11867 +index 1c1447dd3417..e3fd725d5c4d 100644
11868 +--- a/drivers/md/raid10.c
11869 ++++ b/drivers/md/raid10.c
11870 +@@ -2664,6 +2664,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
11871 + if (fail) {
11872 + spin_lock_irq(&conf->device_lock);
11873 + list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
11874 ++ conf->nr_queued++;
11875 + spin_unlock_irq(&conf->device_lock);
11876 + md_wakeup_thread(conf->mddev->thread);
11877 + } else {
11878 +@@ -2691,8 +2692,10 @@ static void raid10d(struct md_thread *thread)
11879 + LIST_HEAD(tmp);
11880 + spin_lock_irqsave(&conf->device_lock, flags);
11881 + if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
11882 +- list_add(&tmp, &conf->bio_end_io_list);
11883 +- list_del_init(&conf->bio_end_io_list);
11884 ++ while (!list_empty(&conf->bio_end_io_list)) {
11885 ++ list_move(conf->bio_end_io_list.prev, &tmp);
11886 ++ conf->nr_queued--;
11887 ++ }
11888 + }
11889 + spin_unlock_irqrestore(&conf->device_lock, flags);
11890 + while (!list_empty(&tmp)) {
11891 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
11892 +index b4f02c9959f2..32d52878f182 100644
11893 +--- a/drivers/md/raid5.c
11894 ++++ b/drivers/md/raid5.c
11895 +@@ -340,8 +340,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
11896 + int hash)
11897 + {
11898 + int size;
11899 +- unsigned long do_wakeup = 0;
11900 +- int i = 0;
11901 ++ bool do_wakeup = false;
11902 + unsigned long flags;
11903 +
11904 + if (hash == NR_STRIPE_HASH_LOCKS) {
11905 +@@ -362,19 +361,15 @@ static void release_inactive_stripe_list(struct r5conf *conf,
11906 + !list_empty(list))
11907 + atomic_dec(&conf->empty_inactive_list_nr);
11908 + list_splice_tail_init(list, conf->inactive_list + hash);
11909 +- do_wakeup |= 1 << hash;
11910 ++ do_wakeup = true;
11911 + spin_unlock_irqrestore(conf->hash_locks + hash, flags);
11912 + }
11913 + size--;
11914 + hash--;
11915 + }
11916 +
11917 +- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
11918 +- if (do_wakeup & (1 << i))
11919 +- wake_up(&conf->wait_for_stripe[i]);
11920 +- }
11921 +-
11922 + if (do_wakeup) {
11923 ++ wake_up(&conf->wait_for_stripe);
11924 + if (atomic_read(&conf->active_stripes) == 0)
11925 + wake_up(&conf->wait_for_quiescent);
11926 + if (conf->retry_read_aligned)
11927 +@@ -687,15 +682,14 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
11928 + if (!sh) {
11929 + set_bit(R5_INACTIVE_BLOCKED,
11930 + &conf->cache_state);
11931 +- wait_event_exclusive_cmd(
11932 +- conf->wait_for_stripe[hash],
11933 ++ wait_event_lock_irq(
11934 ++ conf->wait_for_stripe,
11935 + !list_empty(conf->inactive_list + hash) &&
11936 + (atomic_read(&conf->active_stripes)
11937 + < (conf->max_nr_stripes * 3 / 4)
11938 + || !test_bit(R5_INACTIVE_BLOCKED,
11939 + &conf->cache_state)),
11940 +- spin_unlock_irq(conf->hash_locks + hash),
11941 +- spin_lock_irq(conf->hash_locks + hash));
11942 ++ *(conf->hash_locks + hash));
11943 + clear_bit(R5_INACTIVE_BLOCKED,
11944 + &conf->cache_state);
11945 + } else {
11946 +@@ -720,9 +714,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
11947 + }
11948 + } while (sh == NULL);
11949 +
11950 +- if (!list_empty(conf->inactive_list + hash))
11951 +- wake_up(&conf->wait_for_stripe[hash]);
11952 +-
11953 + spin_unlock_irq(conf->hash_locks + hash);
11954 + return sh;
11955 + }
11956 +@@ -2089,6 +2080,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
11957 + unsigned long cpu;
11958 + int err = 0;
11959 +
11960 ++ /*
11961 ++ * Never shrink. And mddev_suspend() could deadlock if this is called
11962 ++ * from raid5d. In that case, scribble_disks and scribble_sectors
11963 ++ * should equal to new_disks and new_sectors
11964 ++ */
11965 ++ if (conf->scribble_disks >= new_disks &&
11966 ++ conf->scribble_sectors >= new_sectors)
11967 ++ return 0;
11968 + mddev_suspend(conf->mddev);
11969 + get_online_cpus();
11970 + for_each_present_cpu(cpu) {
11971 +@@ -2110,6 +2109,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
11972 + }
11973 + put_online_cpus();
11974 + mddev_resume(conf->mddev);
11975 ++ if (!err) {
11976 ++ conf->scribble_disks = new_disks;
11977 ++ conf->scribble_sectors = new_sectors;
11978 ++ }
11979 + return err;
11980 + }
11981 +
11982 +@@ -2190,7 +2193,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
11983 + cnt = 0;
11984 + list_for_each_entry(nsh, &newstripes, lru) {
11985 + lock_device_hash_lock(conf, hash);
11986 +- wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
11987 ++ wait_event_cmd(conf->wait_for_stripe,
11988 + !list_empty(conf->inactive_list + hash),
11989 + unlock_device_hash_lock(conf, hash),
11990 + lock_device_hash_lock(conf, hash));
11991 +@@ -4236,7 +4239,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
11992 + WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
11993 + (1 << STRIPE_SYNCING) |
11994 + (1 << STRIPE_REPLACED) |
11995 +- (1 << STRIPE_PREREAD_ACTIVE) |
11996 + (1 << STRIPE_DELAYED) |
11997 + (1 << STRIPE_BIT_DELAY) |
11998 + (1 << STRIPE_FULL_WRITE) |
11999 +@@ -4251,6 +4253,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
12000 + (1 << STRIPE_REPLACED)));
12001 +
12002 + set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
12003 ++ (1 << STRIPE_PREREAD_ACTIVE) |
12004 + (1 << STRIPE_DEGRADED)),
12005 + head_sh->state & (1 << STRIPE_INSYNC));
12006 +
12007 +@@ -6413,6 +6416,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
12008 + }
12009 + put_online_cpus();
12010 +
12011 ++ if (!err) {
12012 ++ conf->scribble_disks = max(conf->raid_disks,
12013 ++ conf->previous_raid_disks);
12014 ++ conf->scribble_sectors = max(conf->chunk_sectors,
12015 ++ conf->prev_chunk_sectors);
12016 ++ }
12017 + return err;
12018 + }
12019 +
12020 +@@ -6503,9 +6512,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
12021 + seqcount_init(&conf->gen_lock);
12022 + mutex_init(&conf->cache_size_mutex);
12023 + init_waitqueue_head(&conf->wait_for_quiescent);
12024 +- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
12025 +- init_waitqueue_head(&conf->wait_for_stripe[i]);
12026 +- }
12027 ++ init_waitqueue_head(&conf->wait_for_stripe);
12028 + init_waitqueue_head(&conf->wait_for_overlap);
12029 + INIT_LIST_HEAD(&conf->handle_list);
12030 + INIT_LIST_HEAD(&conf->hold_list);
12031 +@@ -7014,8 +7021,8 @@ static int raid5_run(struct mddev *mddev)
12032 + }
12033 +
12034 + if (discard_supported &&
12035 +- mddev->queue->limits.max_discard_sectors >= stripe &&
12036 +- mddev->queue->limits.discard_granularity >= stripe)
12037 ++ mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
12038 ++ mddev->queue->limits.discard_granularity >= stripe)
12039 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
12040 + mddev->queue);
12041 + else
12042 +diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
12043 +index a415e1cd39b8..517d4b68a1be 100644
12044 +--- a/drivers/md/raid5.h
12045 ++++ b/drivers/md/raid5.h
12046 +@@ -510,6 +510,8 @@ struct r5conf {
12047 + * conversions
12048 + */
12049 + } __percpu *percpu;
12050 ++ int scribble_disks;
12051 ++ int scribble_sectors;
12052 + #ifdef CONFIG_HOTPLUG_CPU
12053 + struct notifier_block cpu_notify;
12054 + #endif
12055 +@@ -522,7 +524,7 @@ struct r5conf {
12056 + atomic_t empty_inactive_list_nr;
12057 + struct llist_head released_stripes;
12058 + wait_queue_head_t wait_for_quiescent;
12059 +- wait_queue_head_t wait_for_stripe[NR_STRIPE_HASH_LOCKS];
12060 ++ wait_queue_head_t wait_for_stripe;
12061 + wait_queue_head_t wait_for_overlap;
12062 + unsigned long cache_state;
12063 + #define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
12064 +diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
12065 +index 471fd23b5c5c..08d2c6bf7341 100644
12066 +--- a/drivers/media/i2c/adv7511.c
12067 ++++ b/drivers/media/i2c/adv7511.c
12068 +@@ -1161,12 +1161,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
12069 + }
12070 + }
12071 +
12072 ++static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
12073 ++{
12074 ++ struct adv7511_state *state = get_adv7511_state(sd);
12075 ++ struct adv7511_edid_detect ed;
12076 ++
12077 ++ /* We failed to read the EDID, so send an event for this. */
12078 ++ ed.present = false;
12079 ++ ed.segment = adv7511_rd(sd, 0xc4);
12080 ++ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
12081 ++ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
12082 ++}
12083 ++
12084 + static void adv7511_edid_handler(struct work_struct *work)
12085 + {
12086 + struct delayed_work *dwork = to_delayed_work(work);
12087 + struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
12088 + struct v4l2_subdev *sd = &state->sd;
12089 +- struct adv7511_edid_detect ed;
12090 +
12091 + v4l2_dbg(1, debug, sd, "%s:\n", __func__);
12092 +
12093 +@@ -1191,9 +1202,7 @@ static void adv7511_edid_handler(struct work_struct *work)
12094 + }
12095 +
12096 + /* We failed to read the EDID, so send an event for this. */
12097 +- ed.present = false;
12098 +- ed.segment = adv7511_rd(sd, 0xc4);
12099 +- v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
12100 ++ adv7511_notify_no_edid(sd);
12101 + v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
12102 + }
12103 +
12104 +@@ -1264,7 +1273,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
12105 + /* update read only ctrls */
12106 + v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
12107 + v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
12108 +- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
12109 +
12110 + if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
12111 + v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
12112 +@@ -1294,6 +1302,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
12113 + }
12114 + adv7511_s_power(sd, false);
12115 + memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
12116 ++ adv7511_notify_no_edid(sd);
12117 + }
12118 + }
12119 +
12120 +@@ -1370,6 +1379,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
12121 + }
12122 + /* one more segment read ok */
12123 + state->edid.segments = segment + 1;
12124 ++ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
12125 + if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
12126 + /* Request next EDID segment */
12127 + v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
12128 +@@ -1389,7 +1399,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
12129 + ed.present = true;
12130 + ed.segment = 0;
12131 + state->edid_detect_counter++;
12132 +- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
12133 + v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
12134 + return ed.present;
12135 + }
12136 +diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
12137 +index 9400e996087b..bedbd51fb77c 100644
12138 +--- a/drivers/media/pci/bt8xx/bttv-driver.c
12139 ++++ b/drivers/media/pci/bt8xx/bttv-driver.c
12140 +@@ -2334,6 +2334,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
12141 + return 0;
12142 + }
12143 +
12144 ++static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
12145 ++ unsigned int *width_mask,
12146 ++ unsigned int *width_bias)
12147 ++{
12148 ++ if (fmt->flags & FORMAT_FLAGS_PLANAR) {
12149 ++ *width_mask = ~15; /* width must be a multiple of 16 pixels */
12150 ++ *width_bias = 8; /* nearest */
12151 ++ } else {
12152 ++ *width_mask = ~3; /* width must be a multiple of 4 pixels */
12153 ++ *width_bias = 2; /* nearest */
12154 ++ }
12155 ++}
12156 ++
12157 + static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
12158 + struct v4l2_format *f)
12159 + {
12160 +@@ -2343,6 +2356,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
12161 + enum v4l2_field field;
12162 + __s32 width, height;
12163 + __s32 height2;
12164 ++ unsigned int width_mask, width_bias;
12165 + int rc;
12166 +
12167 + fmt = format_by_fourcc(f->fmt.pix.pixelformat);
12168 +@@ -2375,9 +2389,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
12169 + width = f->fmt.pix.width;
12170 + height = f->fmt.pix.height;
12171 +
12172 ++ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
12173 + rc = limit_scaled_size_lock(fh, &width, &height, field,
12174 +- /* width_mask: 4 pixels */ ~3,
12175 +- /* width_bias: nearest */ 2,
12176 ++ width_mask, width_bias,
12177 + /* adjust_size */ 1,
12178 + /* adjust_crop */ 0);
12179 + if (0 != rc)
12180 +@@ -2410,6 +2424,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
12181 + struct bttv_fh *fh = priv;
12182 + struct bttv *btv = fh->btv;
12183 + __s32 width, height;
12184 ++ unsigned int width_mask, width_bias;
12185 + enum v4l2_field field;
12186 +
12187 + retval = bttv_switch_type(fh, f->type);
12188 +@@ -2424,9 +2439,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
12189 + height = f->fmt.pix.height;
12190 + field = f->fmt.pix.field;
12191 +
12192 ++ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
12193 ++ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
12194 + retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
12195 +- /* width_mask: 4 pixels */ ~3,
12196 +- /* width_bias: nearest */ 2,
12197 ++ width_mask, width_bias,
12198 + /* adjust_size */ 1,
12199 + /* adjust_crop */ 1);
12200 + if (0 != retval)
12201 +@@ -2434,8 +2450,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
12202 +
12203 + f->fmt.pix.field = field;
12204 +
12205 +- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
12206 +-
12207 + /* update our state informations */
12208 + fh->fmt = fmt;
12209 + fh->cap.field = f->fmt.pix.field;
12210 +diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
12211 +index a63c1366a64e..1293563b7dce 100644
12212 +--- a/drivers/media/pci/saa7134/saa7134-video.c
12213 ++++ b/drivers/media/pci/saa7134/saa7134-video.c
12214 +@@ -1219,10 +1219,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
12215 + f->fmt.pix.height = dev->height;
12216 + f->fmt.pix.field = dev->field;
12217 + f->fmt.pix.pixelformat = dev->fmt->fourcc;
12218 +- f->fmt.pix.bytesperline =
12219 +- (f->fmt.pix.width * dev->fmt->depth) >> 3;
12220 ++ if (dev->fmt->planar)
12221 ++ f->fmt.pix.bytesperline = f->fmt.pix.width;
12222 ++ else
12223 ++ f->fmt.pix.bytesperline =
12224 ++ (f->fmt.pix.width * dev->fmt->depth) / 8;
12225 + f->fmt.pix.sizeimage =
12226 +- f->fmt.pix.height * f->fmt.pix.bytesperline;
12227 ++ (f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
12228 + f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
12229 + return 0;
12230 + }
12231 +@@ -1298,10 +1301,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
12232 + if (f->fmt.pix.height > maxh)
12233 + f->fmt.pix.height = maxh;
12234 + f->fmt.pix.width &= ~0x03;
12235 +- f->fmt.pix.bytesperline =
12236 +- (f->fmt.pix.width * fmt->depth) >> 3;
12237 ++ if (fmt->planar)
12238 ++ f->fmt.pix.bytesperline = f->fmt.pix.width;
12239 ++ else
12240 ++ f->fmt.pix.bytesperline =
12241 ++ (f->fmt.pix.width * fmt->depth) / 8;
12242 + f->fmt.pix.sizeimage =
12243 +- f->fmt.pix.height * f->fmt.pix.bytesperline;
12244 ++ (f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
12245 + f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
12246 +
12247 + return 0;
12248 +diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
12249 +index 7d28899f89ce..6efe9d002961 100644
12250 +--- a/drivers/media/platform/coda/coda-bit.c
12251 ++++ b/drivers/media/platform/coda/coda-bit.c
12252 +@@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
12253 +
12254 + /* Calculate bytesused field */
12255 + if (dst_buf->sequence == 0) {
12256 +- vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
12257 ++ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
12258 + ctx->vpu_header_size[0] +
12259 + ctx->vpu_header_size[1] +
12260 + ctx->vpu_header_size[2]);
12261 +diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
12262 +index 2d782ce94a67..7ae89c684783 100644
12263 +--- a/drivers/media/platform/coda/coda-common.c
12264 ++++ b/drivers/media/platform/coda/coda-common.c
12265 +@@ -2118,14 +2118,12 @@ static int coda_probe(struct platform_device *pdev)
12266 +
12267 + pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
12268 +
12269 +- if (of_id) {
12270 ++ if (of_id)
12271 + dev->devtype = of_id->data;
12272 +- } else if (pdev_id) {
12273 ++ else if (pdev_id)
12274 + dev->devtype = &coda_devdata[pdev_id->driver_data];
12275 +- } else {
12276 +- ret = -EINVAL;
12277 +- goto err_v4l2_register;
12278 +- }
12279 ++ else
12280 ++ return -EINVAL;
12281 +
12282 + spin_lock_init(&dev->irqlock);
12283 + INIT_LIST_HEAD(&dev->instances);
12284 +diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
12285 +index 6310acab60e7..d41ae950d1a1 100644
12286 +--- a/drivers/media/platform/vsp1/vsp1_sru.c
12287 ++++ b/drivers/media/platform/vsp1/vsp1_sru.c
12288 +@@ -154,6 +154,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
12289 + mutex_lock(sru->ctrls.lock);
12290 + ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
12291 + & (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
12292 ++ vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
12293 + mutex_unlock(sru->ctrls.lock);
12294 +
12295 + vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
12296 +diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
12297 +index 9e29e70a78d7..d8988801dde0 100644
12298 +--- a/drivers/media/usb/au0828/au0828-core.c
12299 ++++ b/drivers/media/usb/au0828/au0828-core.c
12300 +@@ -192,7 +192,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
12301 + Set the status so poll routines can check and avoid
12302 + access after disconnect.
12303 + */
12304 +- dev->dev_state = DEV_DISCONNECTED;
12305 ++ set_bit(DEV_DISCONNECTED, &dev->dev_state);
12306 +
12307 + au0828_rc_unregister(dev);
12308 + /* Digital TV */
12309 +diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
12310 +index b0f067971979..3d6687f0407d 100644
12311 +--- a/drivers/media/usb/au0828/au0828-input.c
12312 ++++ b/drivers/media/usb/au0828/au0828-input.c
12313 +@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
12314 + bool first = true;
12315 +
12316 + /* do nothing if device is disconnected */
12317 +- if (ir->dev->dev_state == DEV_DISCONNECTED)
12318 ++ if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
12319 + return 0;
12320 +
12321 + /* Check IR int */
12322 +@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
12323 + cancel_delayed_work_sync(&ir->work);
12324 +
12325 + /* do nothing if device is disconnected */
12326 +- if (ir->dev->dev_state != DEV_DISCONNECTED) {
12327 ++ if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
12328 + /* Disable IR */
12329 + au8522_rc_clear(ir, 0xe0, 1 << 4);
12330 + }
12331 +diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
12332 +index a13625722848..8bc69af874a0 100644
12333 +--- a/drivers/media/usb/au0828/au0828-video.c
12334 ++++ b/drivers/media/usb/au0828/au0828-video.c
12335 +@@ -104,14 +104,13 @@ static inline void print_err_status(struct au0828_dev *dev,
12336 +
12337 + static int check_dev(struct au0828_dev *dev)
12338 + {
12339 +- if (dev->dev_state & DEV_DISCONNECTED) {
12340 ++ if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
12341 + pr_info("v4l2 ioctl: device not present\n");
12342 + return -ENODEV;
12343 + }
12344 +
12345 +- if (dev->dev_state & DEV_MISCONFIGURED) {
12346 +- pr_info("v4l2 ioctl: device is misconfigured; "
12347 +- "close and open it again\n");
12348 ++ if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
12349 ++ pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
12350 + return -EIO;
12351 + }
12352 + return 0;
12353 +@@ -519,8 +518,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
12354 + if (!dev)
12355 + return 0;
12356 +
12357 +- if ((dev->dev_state & DEV_DISCONNECTED) ||
12358 +- (dev->dev_state & DEV_MISCONFIGURED))
12359 ++ if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
12360 ++ test_bit(DEV_MISCONFIGURED, &dev->dev_state))
12361 + return 0;
12362 +
12363 + if (urb->status < 0) {
12364 +@@ -822,10 +821,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
12365 + int ret = 0;
12366 +
12367 + dev->stream_state = STREAM_INTERRUPT;
12368 +- if (dev->dev_state == DEV_DISCONNECTED)
12369 ++ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
12370 + return -ENODEV;
12371 + else if (ret) {
12372 +- dev->dev_state = DEV_MISCONFIGURED;
12373 ++ set_bit(DEV_MISCONFIGURED, &dev->dev_state);
12374 + dprintk(1, "%s device is misconfigured!\n", __func__);
12375 + return ret;
12376 + }
12377 +@@ -1014,7 +1013,7 @@ static int au0828_v4l2_open(struct file *filp)
12378 + int ret;
12379 +
12380 + dprintk(1,
12381 +- "%s called std_set %d dev_state %d stream users %d users %d\n",
12382 ++ "%s called std_set %d dev_state %ld stream users %d users %d\n",
12383 + __func__, dev->std_set_in_tuner_core, dev->dev_state,
12384 + dev->streaming_users, dev->users);
12385 +
12386 +@@ -1033,7 +1032,7 @@ static int au0828_v4l2_open(struct file *filp)
12387 + au0828_analog_stream_enable(dev);
12388 + au0828_analog_stream_reset(dev);
12389 + dev->stream_state = STREAM_OFF;
12390 +- dev->dev_state |= DEV_INITIALIZED;
12391 ++ set_bit(DEV_INITIALIZED, &dev->dev_state);
12392 + }
12393 + dev->users++;
12394 + mutex_unlock(&dev->lock);
12395 +@@ -1047,7 +1046,7 @@ static int au0828_v4l2_close(struct file *filp)
12396 + struct video_device *vdev = video_devdata(filp);
12397 +
12398 + dprintk(1,
12399 +- "%s called std_set %d dev_state %d stream users %d users %d\n",
12400 ++ "%s called std_set %d dev_state %ld stream users %d users %d\n",
12401 + __func__, dev->std_set_in_tuner_core, dev->dev_state,
12402 + dev->streaming_users, dev->users);
12403 +
12404 +@@ -1063,7 +1062,7 @@ static int au0828_v4l2_close(struct file *filp)
12405 + del_timer_sync(&dev->vbi_timeout);
12406 + }
12407 +
12408 +- if (dev->dev_state == DEV_DISCONNECTED)
12409 ++ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
12410 + goto end;
12411 +
12412 + if (dev->users == 1) {
12413 +@@ -1092,7 +1091,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
12414 + .type = V4L2_TUNER_ANALOG_TV,
12415 + };
12416 +
12417 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12418 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12419 + dev->std_set_in_tuner_core, dev->dev_state);
12420 +
12421 + if (dev->std_set_in_tuner_core)
12422 +@@ -1164,7 +1163,7 @@ static int vidioc_querycap(struct file *file, void *priv,
12423 + struct video_device *vdev = video_devdata(file);
12424 + struct au0828_dev *dev = video_drvdata(file);
12425 +
12426 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12427 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12428 + dev->std_set_in_tuner_core, dev->dev_state);
12429 +
12430 + strlcpy(cap->driver, "au0828", sizeof(cap->driver));
12431 +@@ -1207,7 +1206,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
12432 + {
12433 + struct au0828_dev *dev = video_drvdata(file);
12434 +
12435 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12436 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12437 + dev->std_set_in_tuner_core, dev->dev_state);
12438 +
12439 + f->fmt.pix.width = dev->width;
12440 +@@ -1226,7 +1225,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
12441 + {
12442 + struct au0828_dev *dev = video_drvdata(file);
12443 +
12444 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12445 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12446 + dev->std_set_in_tuner_core, dev->dev_state);
12447 +
12448 + return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
12449 +@@ -1238,7 +1237,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
12450 + struct au0828_dev *dev = video_drvdata(file);
12451 + int rc;
12452 +
12453 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12454 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12455 + dev->std_set_in_tuner_core, dev->dev_state);
12456 +
12457 + rc = check_dev(dev);
12458 +@@ -1260,7 +1259,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
12459 + {
12460 + struct au0828_dev *dev = video_drvdata(file);
12461 +
12462 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12463 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12464 + dev->std_set_in_tuner_core, dev->dev_state);
12465 +
12466 + if (norm == dev->std)
12467 +@@ -1292,7 +1291,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
12468 + {
12469 + struct au0828_dev *dev = video_drvdata(file);
12470 +
12471 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12472 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12473 + dev->std_set_in_tuner_core, dev->dev_state);
12474 +
12475 + *norm = dev->std;
12476 +@@ -1315,7 +1314,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
12477 + [AU0828_VMUX_DEBUG] = "tv debug"
12478 + };
12479 +
12480 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12481 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12482 + dev->std_set_in_tuner_core, dev->dev_state);
12483 +
12484 + tmp = input->index;
12485 +@@ -1345,7 +1344,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
12486 + {
12487 + struct au0828_dev *dev = video_drvdata(file);
12488 +
12489 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12490 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12491 + dev->std_set_in_tuner_core, dev->dev_state);
12492 +
12493 + *i = dev->ctrl_input;
12494 +@@ -1356,7 +1355,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
12495 + {
12496 + int i;
12497 +
12498 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12499 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12500 + dev->std_set_in_tuner_core, dev->dev_state);
12501 +
12502 + switch (AUVI_INPUT(index).type) {
12503 +@@ -1441,7 +1440,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
12504 + {
12505 + struct au0828_dev *dev = video_drvdata(file);
12506 +
12507 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12508 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12509 + dev->std_set_in_tuner_core, dev->dev_state);
12510 +
12511 + a->index = dev->ctrl_ainput;
12512 +@@ -1461,7 +1460,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
12513 + if (a->index != dev->ctrl_ainput)
12514 + return -EINVAL;
12515 +
12516 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12517 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12518 + dev->std_set_in_tuner_core, dev->dev_state);
12519 + return 0;
12520 + }
12521 +@@ -1473,7 +1472,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
12522 + if (t->index != 0)
12523 + return -EINVAL;
12524 +
12525 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12526 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12527 + dev->std_set_in_tuner_core, dev->dev_state);
12528 +
12529 + strcpy(t->name, "Auvitek tuner");
12530 +@@ -1493,7 +1492,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
12531 + if (t->index != 0)
12532 + return -EINVAL;
12533 +
12534 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12535 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12536 + dev->std_set_in_tuner_core, dev->dev_state);
12537 +
12538 + au0828_init_tuner(dev);
12539 +@@ -1515,7 +1514,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
12540 +
12541 + if (freq->tuner != 0)
12542 + return -EINVAL;
12543 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12544 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12545 + dev->std_set_in_tuner_core, dev->dev_state);
12546 + freq->frequency = dev->ctrl_freq;
12547 + return 0;
12548 +@@ -1530,7 +1529,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
12549 + if (freq->tuner != 0)
12550 + return -EINVAL;
12551 +
12552 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12553 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12554 + dev->std_set_in_tuner_core, dev->dev_state);
12555 +
12556 + au0828_init_tuner(dev);
12557 +@@ -1556,7 +1555,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
12558 + {
12559 + struct au0828_dev *dev = video_drvdata(file);
12560 +
12561 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12562 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12563 + dev->std_set_in_tuner_core, dev->dev_state);
12564 +
12565 + format->fmt.vbi.samples_per_line = dev->vbi_width;
12566 +@@ -1582,7 +1581,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
12567 + if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
12568 + return -EINVAL;
12569 +
12570 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12571 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12572 + dev->std_set_in_tuner_core, dev->dev_state);
12573 +
12574 + cc->bounds.left = 0;
12575 +@@ -1604,7 +1603,7 @@ static int vidioc_g_register(struct file *file, void *priv,
12576 + {
12577 + struct au0828_dev *dev = video_drvdata(file);
12578 +
12579 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12580 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12581 + dev->std_set_in_tuner_core, dev->dev_state);
12582 +
12583 + reg->val = au0828_read(dev, reg->reg);
12584 +@@ -1617,7 +1616,7 @@ static int vidioc_s_register(struct file *file, void *priv,
12585 + {
12586 + struct au0828_dev *dev = video_drvdata(file);
12587 +
12588 +- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
12589 ++ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
12590 + dev->std_set_in_tuner_core, dev->dev_state);
12591 +
12592 + return au0828_writereg(dev, reg->reg, reg->val);
12593 +diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
12594 +index 8276072bc55a..b28a05d76618 100644
12595 +--- a/drivers/media/usb/au0828/au0828.h
12596 ++++ b/drivers/media/usb/au0828/au0828.h
12597 +@@ -21,6 +21,7 @@
12598 +
12599 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12600 +
12601 ++#include <linux/bitops.h>
12602 + #include <linux/usb.h>
12603 + #include <linux/i2c.h>
12604 + #include <linux/i2c-algo-bit.h>
12605 +@@ -122,9 +123,9 @@ enum au0828_stream_state {
12606 +
12607 + /* device state */
12608 + enum au0828_dev_state {
12609 +- DEV_INITIALIZED = 0x01,
12610 +- DEV_DISCONNECTED = 0x02,
12611 +- DEV_MISCONFIGURED = 0x04
12612 ++ DEV_INITIALIZED = 0,
12613 ++ DEV_DISCONNECTED = 1,
12614 ++ DEV_MISCONFIGURED = 2
12615 + };
12616 +
12617 + struct au0828_dev;
12618 +@@ -248,7 +249,7 @@ struct au0828_dev {
12619 + int input_type;
12620 + int std_set_in_tuner_core;
12621 + unsigned int ctrl_input;
12622 +- enum au0828_dev_state dev_state;
12623 ++ long unsigned int dev_state; /* defined at enum au0828_dev_state */;
12624 + enum au0828_stream_state stream_state;
12625 + wait_queue_head_t open;
12626 +
12627 +diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
12628 +index 086cf1c7bd7d..18aed5dd325e 100644
12629 +--- a/drivers/media/usb/pwc/pwc-if.c
12630 ++++ b/drivers/media/usb/pwc/pwc-if.c
12631 +@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
12632 + { USB_DEVICE(0x0471, 0x0312) },
12633 + { USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
12634 + { USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
12635 ++ { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
12636 + { USB_DEVICE(0x069A, 0x0001) }, /* Askey */
12637 + { USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
12638 + { USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
12639 +@@ -810,6 +811,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
12640 + name = "Philips SPC 900NC webcam";
12641 + type_id = 740;
12642 + break;
12643 ++ case 0x032C:
12644 ++ PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
12645 ++ name = "Philips SPC 880NC webcam";
12646 ++ type_id = 740;
12647 ++ break;
12648 + default:
12649 + return -ENODEV;
12650 + break;
12651 +diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
12652 +index de9ff3bb8edd..6996ab8db108 100644
12653 +--- a/drivers/media/usb/usbvision/usbvision-video.c
12654 ++++ b/drivers/media/usb/usbvision/usbvision-video.c
12655 +@@ -1461,13 +1461,6 @@ static int usbvision_probe(struct usb_interface *intf,
12656 + printk(KERN_INFO "%s: %s found\n", __func__,
12657 + usbvision_device_data[model].model_string);
12658 +
12659 +- /*
12660 +- * this is a security check.
12661 +- * an exploit using an incorrect bInterfaceNumber is known
12662 +- */
12663 +- if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
12664 +- return -ENODEV;
12665 +-
12666 + if (usbvision_device_data[model].interface >= 0)
12667 + interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
12668 + else if (ifnum < dev->actconfig->desc.bNumInterfaces)
12669 +diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
12670 +index 8fd84a67478a..019644ff627d 100644
12671 +--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
12672 ++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
12673 +@@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
12674 + get_user(kp->index, &up->index) ||
12675 + get_user(kp->type, &up->type) ||
12676 + get_user(kp->flags, &up->flags) ||
12677 +- get_user(kp->memory, &up->memory))
12678 ++ get_user(kp->memory, &up->memory) ||
12679 ++ get_user(kp->length, &up->length))
12680 + return -EFAULT;
12681 +
12682 + if (V4L2_TYPE_IS_OUTPUT(kp->type))
12683 +@@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
12684 + return -EFAULT;
12685 +
12686 + if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
12687 +- if (get_user(kp->length, &up->length))
12688 +- return -EFAULT;
12689 +-
12690 + num_planes = kp->length;
12691 + if (num_planes == 0) {
12692 + kp->m.planes = NULL;
12693 +@@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
12694 + } else {
12695 + switch (kp->memory) {
12696 + case V4L2_MEMORY_MMAP:
12697 +- if (get_user(kp->length, &up->length) ||
12698 +- get_user(kp->m.offset, &up->m.offset))
12699 ++ if (get_user(kp->m.offset, &up->m.offset))
12700 + return -EFAULT;
12701 + break;
12702 + case V4L2_MEMORY_USERPTR:
12703 + {
12704 + compat_long_t tmp;
12705 +
12706 +- if (get_user(kp->length, &up->length) ||
12707 +- get_user(tmp, &up->m.userptr))
12708 ++ if (get_user(tmp, &up->m.userptr))
12709 + return -EFAULT;
12710 +
12711 + kp->m.userptr = (unsigned long)compat_ptr(tmp);
12712 +@@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
12713 + copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
12714 + put_user(kp->sequence, &up->sequence) ||
12715 + put_user(kp->reserved2, &up->reserved2) ||
12716 +- put_user(kp->reserved, &up->reserved))
12717 ++ put_user(kp->reserved, &up->reserved) ||
12718 ++ put_user(kp->length, &up->length))
12719 + return -EFAULT;
12720 +
12721 + if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
12722 +@@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
12723 + } else {
12724 + switch (kp->memory) {
12725 + case V4L2_MEMORY_MMAP:
12726 +- if (put_user(kp->length, &up->length) ||
12727 +- put_user(kp->m.offset, &up->m.offset))
12728 ++ if (put_user(kp->m.offset, &up->m.offset))
12729 + return -EFAULT;
12730 + break;
12731 + case V4L2_MEMORY_USERPTR:
12732 +- if (put_user(kp->length, &up->length) ||
12733 +- put_user(kp->m.userptr, &up->m.userptr))
12734 ++ if (put_user(kp->m.userptr, &up->m.userptr))
12735 + return -EFAULT;
12736 + break;
12737 + case V4L2_MEMORY_OVERLAY:
12738 +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
12739 +index ff8953ae52d1..d7d7c52a3060 100644
12740 +--- a/drivers/media/v4l2-core/videobuf2-core.c
12741 ++++ b/drivers/media/v4l2-core/videobuf2-core.c
12742 +@@ -1643,7 +1643,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
12743 + * Will sleep if required for nonblocking == false.
12744 + */
12745 + static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
12746 +- int nonblocking)
12747 ++ void *pb, int nonblocking)
12748 + {
12749 + unsigned long flags;
12750 + int ret;
12751 +@@ -1664,10 +1664,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
12752 + /*
12753 + * Only remove the buffer from done_list if v4l2_buffer can handle all
12754 + * the planes.
12755 +- * Verifying planes is NOT necessary since it already has been checked
12756 +- * before the buffer is queued/prepared. So it can never fail.
12757 + */
12758 +- list_del(&(*vb)->done_entry);
12759 ++ ret = call_bufop(q, verify_planes_array, *vb, pb);
12760 ++ if (!ret)
12761 ++ list_del(&(*vb)->done_entry);
12762 + spin_unlock_irqrestore(&q->done_lock, flags);
12763 +
12764 + return ret;
12765 +@@ -1746,7 +1746,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
12766 + struct vb2_buffer *vb = NULL;
12767 + int ret;
12768 +
12769 +- ret = __vb2_get_done_vb(q, &vb, nonblocking);
12770 ++ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
12771 + if (ret < 0)
12772 + return ret;
12773 +
12774 +@@ -2293,6 +2293,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
12775 + return POLLERR;
12776 +
12777 + /*
12778 ++ * If this quirk is set and QBUF hasn't been called yet then
12779 ++ * return POLLERR as well. This only affects capture queues, output
12780 ++ * queues will always initialize waiting_for_buffers to false.
12781 ++ * This quirk is set by V4L2 for backwards compatibility reasons.
12782 ++ */
12783 ++ if (q->quirk_poll_must_check_waiting_for_buffers &&
12784 ++ q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
12785 ++ return POLLERR;
12786 ++
12787 ++ /*
12788 + * For output streams you can call write() as long as there are fewer
12789 + * buffers queued than there are buffers available.
12790 + */
12791 +diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
12792 +index dbec5923fcf0..3c3b517f1d1c 100644
12793 +--- a/drivers/media/v4l2-core/videobuf2-memops.c
12794 ++++ b/drivers/media/v4l2-core/videobuf2-memops.c
12795 +@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
12796 + vec = frame_vector_create(nr);
12797 + if (!vec)
12798 + return ERR_PTR(-ENOMEM);
12799 +- ret = get_vaddr_frames(start, nr, write, 1, vec);
12800 ++ ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
12801 + if (ret < 0)
12802 + goto out_destroy;
12803 + /* We accept only complete set of PFNs */
12804 +diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
12805 +index 91f552124050..0b1b8c7b6ce5 100644
12806 +--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
12807 ++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
12808 +@@ -765,6 +765,12 @@ int vb2_queue_init(struct vb2_queue *q)
12809 + q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
12810 + q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
12811 + == V4L2_BUF_FLAG_TIMESTAMP_COPY;
12812 ++ /*
12813 ++ * For compatibility with vb1: if QBUF hasn't been called yet, then
12814 ++ * return POLLERR as well. This only affects capture queues, output
12815 ++ * queues will always initialize waiting_for_buffers to false.
12816 ++ */
12817 ++ q->quirk_poll_must_check_waiting_for_buffers = true;
12818 +
12819 + return vb2_core_queue_init(q);
12820 + }
12821 +@@ -818,14 +824,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
12822 + poll_wait(file, &fh->wait, wait);
12823 + }
12824 +
12825 +- /*
12826 +- * For compatibility with vb1: if QBUF hasn't been called yet, then
12827 +- * return POLLERR as well. This only affects capture queues, output
12828 +- * queues will always initialize waiting_for_buffers to false.
12829 +- */
12830 +- if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
12831 +- return POLLERR;
12832 +-
12833 + return res | vb2_core_poll(q, file, wait);
12834 + }
12835 + EXPORT_SYMBOL_GPL(vb2_poll);
12836 +diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
12837 +index 1743788f1595..1bbbe877ba7e 100644
12838 +--- a/drivers/mfd/intel-lpss.c
12839 ++++ b/drivers/mfd/intel-lpss.c
12840 +@@ -453,6 +453,7 @@ int intel_lpss_probe(struct device *dev,
12841 + err_remove_ltr:
12842 + intel_lpss_debugfs_remove(lpss);
12843 + intel_lpss_ltr_hide(lpss);
12844 ++ intel_lpss_unregister_clock(lpss);
12845 +
12846 + err_clk_register:
12847 + ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
12848 +diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
12849 +index b7b3e8ee64f2..c30290f33430 100644
12850 +--- a/drivers/mfd/omap-usb-tll.c
12851 ++++ b/drivers/mfd/omap-usb-tll.c
12852 +@@ -269,6 +269,8 @@ static int usbtll_omap_probe(struct platform_device *pdev)
12853 +
12854 + if (IS_ERR(tll->ch_clk[i]))
12855 + dev_dbg(dev, "can't get clock : %s\n", clkname);
12856 ++ else
12857 ++ clk_prepare(tll->ch_clk[i]);
12858 + }
12859 +
12860 + pm_runtime_put_sync(dev);
12861 +@@ -301,9 +303,12 @@ static int usbtll_omap_remove(struct platform_device *pdev)
12862 + tll_dev = NULL;
12863 + spin_unlock(&tll_lock);
12864 +
12865 +- for (i = 0; i < tll->nch; i++)
12866 +- if (!IS_ERR(tll->ch_clk[i]))
12867 ++ for (i = 0; i < tll->nch; i++) {
12868 ++ if (!IS_ERR(tll->ch_clk[i])) {
12869 ++ clk_unprepare(tll->ch_clk[i]);
12870 + clk_put(tll->ch_clk[i]);
12871 ++ }
12872 ++ }
12873 +
12874 + pm_runtime_disable(&pdev->dev);
12875 + return 0;
12876 +@@ -420,7 +425,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
12877 + if (IS_ERR(tll->ch_clk[i]))
12878 + continue;
12879 +
12880 +- r = clk_prepare_enable(tll->ch_clk[i]);
12881 ++ r = clk_enable(tll->ch_clk[i]);
12882 + if (r) {
12883 + dev_err(tll_dev,
12884 + "Error enabling ch %d clock: %d\n", i, r);
12885 +@@ -448,7 +453,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
12886 + for (i = 0; i < tll->nch; i++) {
12887 + if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
12888 + if (!IS_ERR(tll->ch_clk[i]))
12889 +- clk_disable_unprepare(tll->ch_clk[i]);
12890 ++ clk_disable(tll->ch_clk[i]);
12891 + }
12892 + }
12893 +
12894 +diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
12895 +index 054fc10cb3b6..b22c03264270 100644
12896 +--- a/drivers/misc/Kconfig
12897 ++++ b/drivers/misc/Kconfig
12898 +@@ -440,7 +440,7 @@ config ARM_CHARLCD
12899 + still useful.
12900 +
12901 + config BMP085
12902 +- bool
12903 ++ tristate
12904 + depends on SYSFS
12905 +
12906 + config BMP085_I2C
12907 +diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
12908 +index 15e88078ba1e..f1a0b99f5a9a 100644
12909 +--- a/drivers/misc/ad525x_dpot.c
12910 ++++ b/drivers/misc/ad525x_dpot.c
12911 +@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
12912 + */
12913 + value = swab16(value);
12914 +
12915 +- if (dpot->uid == DPOT_UID(AD5271_ID))
12916 ++ if (dpot->uid == DPOT_UID(AD5274_ID))
12917 + value = value >> 2;
12918 + return value;
12919 + default:
12920 +diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
12921 +index 09a406058c46..efbb6945eb18 100644
12922 +--- a/drivers/misc/cxl/irq.c
12923 ++++ b/drivers/misc/cxl/irq.c
12924 +@@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
12925 + void cxl_unmap_irq(unsigned int virq, void *cookie)
12926 + {
12927 + free_irq(virq, cookie);
12928 +- irq_dispose_mapping(virq);
12929 + }
12930 +
12931 + static int cxl_register_one_irq(struct cxl *adapter,
12932 +diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
12933 +index cd0403f09267..e79c0371ee6f 100644
12934 +--- a/drivers/misc/mei/amthif.c
12935 ++++ b/drivers/misc/mei/amthif.c
12936 +@@ -417,8 +417,10 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
12937 +
12938 + dev = cl->dev;
12939 +
12940 +- if (dev->iamthif_state != MEI_IAMTHIF_READING)
12941 ++ if (dev->iamthif_state != MEI_IAMTHIF_READING) {
12942 ++ mei_irq_discard_msg(dev, mei_hdr);
12943 + return 0;
12944 ++ }
12945 +
12946 + ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
12947 + if (ret)
12948 +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
12949 +index 0b05aa938799..a77643954523 100644
12950 +--- a/drivers/misc/mei/bus.c
12951 ++++ b/drivers/misc/mei/bus.c
12952 +@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
12953 + bus = cl->dev;
12954 +
12955 + mutex_lock(&bus->device_lock);
12956 ++ if (bus->dev_state != MEI_DEV_ENABLED) {
12957 ++ rets = -ENODEV;
12958 ++ goto out;
12959 ++ }
12960 ++
12961 + if (!mei_cl_is_connected(cl)) {
12962 + rets = -ENODEV;
12963 + goto out;
12964 +@@ -109,6 +114,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
12965 + bus = cl->dev;
12966 +
12967 + mutex_lock(&bus->device_lock);
12968 ++ if (bus->dev_state != MEI_DEV_ENABLED) {
12969 ++ rets = -ENODEV;
12970 ++ goto out;
12971 ++ }
12972 +
12973 + cb = mei_cl_read_cb(cl, NULL);
12974 + if (cb)
12975 +@@ -213,17 +222,23 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv);
12976 + static void mei_cl_bus_event_work(struct work_struct *work)
12977 + {
12978 + struct mei_cl_device *cldev;
12979 ++ struct mei_device *bus;
12980 +
12981 + cldev = container_of(work, struct mei_cl_device, event_work);
12982 +
12983 ++ bus = cldev->bus;
12984 ++
12985 + if (cldev->event_cb)
12986 + cldev->event_cb(cldev, cldev->events, cldev->event_context);
12987 +
12988 + cldev->events = 0;
12989 +
12990 + /* Prepare for the next read */
12991 +- if (cldev->events_mask & BIT(MEI_CL_EVENT_RX))
12992 ++ if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
12993 ++ mutex_lock(&bus->device_lock);
12994 + mei_cl_read_start(cldev->cl, 0, NULL);
12995 ++ mutex_unlock(&bus->device_lock);
12996 ++ }
12997 + }
12998 +
12999 + /**
13000 +@@ -287,6 +302,7 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
13001 + unsigned long events_mask,
13002 + mei_cldev_event_cb_t event_cb, void *context)
13003 + {
13004 ++ struct mei_device *bus = cldev->bus;
13005 + int ret;
13006 +
13007 + if (cldev->event_cb)
13008 +@@ -299,15 +315,17 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
13009 + INIT_WORK(&cldev->event_work, mei_cl_bus_event_work);
13010 +
13011 + if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
13012 ++ mutex_lock(&bus->device_lock);
13013 + ret = mei_cl_read_start(cldev->cl, 0, NULL);
13014 ++ mutex_unlock(&bus->device_lock);
13015 + if (ret && ret != -EBUSY)
13016 + return ret;
13017 + }
13018 +
13019 + if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
13020 +- mutex_lock(&cldev->cl->dev->device_lock);
13021 ++ mutex_lock(&bus->device_lock);
13022 + ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
13023 +- mutex_unlock(&cldev->cl->dev->device_lock);
13024 ++ mutex_unlock(&bus->device_lock);
13025 + if (ret)
13026 + return ret;
13027 + }
13028 +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
13029 +index a6c87c713193..958af84884b5 100644
13030 +--- a/drivers/misc/mei/client.c
13031 ++++ b/drivers/misc/mei/client.c
13032 +@@ -1735,6 +1735,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
13033 + wake_up(&cl->wait);
13034 +
13035 + break;
13036 ++ case MEI_FOP_DISCONNECT_RSP:
13037 ++ mei_io_cb_free(cb);
13038 ++ mei_cl_set_disconnected(cl);
13039 ++ break;
13040 + default:
13041 + BUG_ON(0);
13042 + }
13043 +diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
13044 +index e7b7aad0999b..fd8a9f057ea6 100644
13045 +--- a/drivers/misc/mei/hbm.c
13046 ++++ b/drivers/misc/mei/hbm.c
13047 +@@ -873,8 +873,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
13048 + cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
13049 + if (!cb)
13050 + return -ENOMEM;
13051 +- cl_dbg(dev, cl, "add disconnect response as first\n");
13052 +- list_add(&cb->list, &dev->ctrl_wr_list.list);
13053 ++ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
13054 + }
13055 + return 0;
13056 + }
13057 +diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
13058 +index 64b568a0268d..d1df797c7568 100644
13059 +--- a/drivers/misc/mei/interrupt.c
13060 ++++ b/drivers/misc/mei/interrupt.c
13061 +@@ -76,7 +76,6 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
13062 + * @dev: mei device
13063 + * @hdr: message header
13064 + */
13065 +-static inline
13066 + void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
13067 + {
13068 + /*
13069 +@@ -184,10 +183,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
13070 + return -EMSGSIZE;
13071 +
13072 + ret = mei_hbm_cl_disconnect_rsp(dev, cl);
13073 +- mei_cl_set_disconnected(cl);
13074 +- mei_io_cb_free(cb);
13075 +- mei_me_cl_put(cl->me_cl);
13076 +- cl->me_cl = NULL;
13077 ++ list_move_tail(&cb->list, &cmpl_list->list);
13078 +
13079 + return ret;
13080 + }
13081 +diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
13082 +index 4250555d5e72..1b06e2fd6858 100644
13083 +--- a/drivers/misc/mei/mei_dev.h
13084 ++++ b/drivers/misc/mei/mei_dev.h
13085 +@@ -782,6 +782,8 @@ bool mei_hbuf_acquire(struct mei_device *dev);
13086 +
13087 + bool mei_write_is_idle(struct mei_device *dev);
13088 +
13089 ++void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr);
13090 ++
13091 + #if IS_ENABLED(CONFIG_DEBUG_FS)
13092 + int mei_dbgfs_register(struct mei_device *dev, const char *name);
13093 + void mei_dbgfs_deregister(struct mei_device *dev);
13094 +diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
13095 +index 8310b4dbff06..6a451bd65bf3 100644
13096 +--- a/drivers/misc/mic/scif/scif_rma.c
13097 ++++ b/drivers/misc/mic/scif/scif_rma.c
13098 +@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
13099 + if ((map_flags & SCIF_MAP_FIXED) &&
13100 + ((ALIGN(offset, PAGE_SIZE) != offset) ||
13101 + (offset < 0) ||
13102 +- (offset + (off_t)len < offset)))
13103 ++ (len > LONG_MAX - offset)))
13104 + return -EINVAL;
13105 +
13106 + might_sleep();
13107 +@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
13108 + if ((map_flags & SCIF_MAP_FIXED) &&
13109 + ((ALIGN(offset, PAGE_SIZE) != offset) ||
13110 + (offset < 0) ||
13111 +- (offset + (off_t)len < offset)))
13112 ++ (len > LONG_MAX - offset)))
13113 + return -EINVAL;
13114 +
13115 + /* Unsupported protection requested */
13116 +@@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
13117 +
13118 + /* Offset is not page aligned or offset+len wraps around */
13119 + if ((ALIGN(offset, PAGE_SIZE) != offset) ||
13120 +- (offset + (off_t)len < offset))
13121 ++ (offset < 0) ||
13122 ++ (len > LONG_MAX - offset))
13123 + return -EINVAL;
13124 +
13125 + err = scif_verify_epd(ep);
13126 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
13127 +index fe207e542032..7630b6b7eb75 100644
13128 +--- a/drivers/mmc/card/block.c
13129 ++++ b/drivers/mmc/card/block.c
13130 +@@ -86,7 +86,6 @@ static int max_devices;
13131 +
13132 + /* TODO: Replace these with struct ida */
13133 + static DECLARE_BITMAP(dev_use, MAX_DEVICES);
13134 +-static DECLARE_BITMAP(name_use, MAX_DEVICES);
13135 +
13136 + /*
13137 + * There is one mmc_blk_data per slot.
13138 +@@ -105,7 +104,6 @@ struct mmc_blk_data {
13139 + unsigned int usage;
13140 + unsigned int read_only;
13141 + unsigned int part_type;
13142 +- unsigned int name_idx;
13143 + unsigned int reset_done;
13144 + #define MMC_BLK_READ BIT(0)
13145 + #define MMC_BLK_WRITE BIT(1)
13146 +@@ -589,6 +587,14 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
13147 + struct mmc_card *card;
13148 + int err = 0, ioc_err = 0;
13149 +
13150 ++ /*
13151 ++ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
13152 ++ * whole block device, not on a partition. This prevents overspray
13153 ++ * between sibling partitions.
13154 ++ */
13155 ++ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
13156 ++ return -EPERM;
13157 ++
13158 + idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
13159 + if (IS_ERR(idata))
13160 + return PTR_ERR(idata);
13161 +@@ -631,6 +637,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
13162 + int i, err = 0, ioc_err = 0;
13163 + __u64 num_of_cmds;
13164 +
13165 ++ /*
13166 ++ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
13167 ++ * whole block device, not on a partition. This prevents overspray
13168 ++ * between sibling partitions.
13169 ++ */
13170 ++ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
13171 ++ return -EPERM;
13172 ++
13173 + if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
13174 + sizeof(num_of_cmds)))
13175 + return -EFAULT;
13176 +@@ -688,14 +702,6 @@ cmd_err:
13177 + static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
13178 + unsigned int cmd, unsigned long arg)
13179 + {
13180 +- /*
13181 +- * The caller must have CAP_SYS_RAWIO, and must be calling this on the
13182 +- * whole block device, not on a partition. This prevents overspray
13183 +- * between sibling partitions.
13184 +- */
13185 +- if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
13186 +- return -EPERM;
13187 +-
13188 + switch (cmd) {
13189 + case MMC_IOC_CMD:
13190 + return mmc_blk_ioctl_cmd(bdev,
13191 +@@ -2194,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
13192 + goto out;
13193 + }
13194 +
13195 +- /*
13196 +- * !subname implies we are creating main mmc_blk_data that will be
13197 +- * associated with mmc_card with dev_set_drvdata. Due to device
13198 +- * partitions, devidx will not coincide with a per-physical card
13199 +- * index anymore so we keep track of a name index.
13200 +- */
13201 +- if (!subname) {
13202 +- md->name_idx = find_first_zero_bit(name_use, max_devices);
13203 +- __set_bit(md->name_idx, name_use);
13204 +- } else
13205 +- md->name_idx = ((struct mmc_blk_data *)
13206 +- dev_to_disk(parent)->private_data)->name_idx;
13207 +-
13208 + md->area_type = area_type;
13209 +
13210 + /*
13211 +@@ -2256,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
13212 + */
13213 +
13214 + snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
13215 +- "mmcblk%u%s", md->name_idx, subname ? subname : "");
13216 ++ "mmcblk%u%s", card->host->index, subname ? subname : "");
13217 +
13218 + if (mmc_card_mmc(card))
13219 + blk_queue_logical_block_size(md->queue.queue,
13220 +@@ -2410,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
13221 + struct list_head *pos, *q;
13222 + struct mmc_blk_data *part_md;
13223 +
13224 +- __clear_bit(md->name_idx, name_use);
13225 + list_for_each_safe(pos, q, &md->part) {
13226 + part_md = list_entry(pos, struct mmc_blk_data, part);
13227 + list_del(pos);
13228 +@@ -2502,11 +2494,12 @@ static const struct mmc_fixup blk_fixups[] =
13229 + MMC_QUIRK_BLK_NO_CMD23),
13230 +
13231 + /*
13232 +- * Some Micron MMC cards needs longer data read timeout than
13233 +- * indicated in CSD.
13234 ++ * Some MMC cards need longer data read timeout than indicated in CSD.
13235 + */
13236 + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
13237 + MMC_QUIRK_LONG_READ_TIME),
13238 ++ MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
13239 ++ MMC_QUIRK_LONG_READ_TIME),
13240 +
13241 + /*
13242 + * On these Samsung MoviNAND parts, performing secure erase or
13243 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
13244 +index f95d41ffc766..228a81bcea49 100644
13245 +--- a/drivers/mmc/core/core.c
13246 ++++ b/drivers/mmc/core/core.c
13247 +@@ -868,11 +868,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
13248 + /*
13249 + * Some cards require longer data read timeout than indicated in CSD.
13250 + * Address this by setting the read timeout to a "reasonably high"
13251 +- * value. For the cards tested, 300ms has proven enough. If necessary,
13252 ++ * value. For the cards tested, 600ms has proven enough. If necessary,
13253 + * this value can be increased if other problematic cards require this.
13254 + */
13255 + if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
13256 +- data->timeout_ns = 300000000;
13257 ++ data->timeout_ns = 600000000;
13258 + data->timeout_clks = 0;
13259 + }
13260 +
13261 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
13262 +index bf49e44571f2..07a419fedd43 100644
13263 +--- a/drivers/mmc/core/mmc.c
13264 ++++ b/drivers/mmc/core/mmc.c
13265 +@@ -333,6 +333,9 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
13266 + }
13267 + }
13268 +
13269 ++/* Minimum partition switch timeout in milliseconds */
13270 ++#define MMC_MIN_PART_SWITCH_TIME 300
13271 ++
13272 + /*
13273 + * Decode extended CSD.
13274 + */
13275 +@@ -397,6 +400,10 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
13276 +
13277 + /* EXT_CSD value is in units of 10ms, but we store in ms */
13278 + card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
13279 ++ /* Some eMMC set the value too low so set a minimum */
13280 ++ if (card->ext_csd.part_time &&
13281 ++ card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
13282 ++ card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
13283 +
13284 + /* Sleep / awake timeout in 100ns units */
13285 + if (sa_shift > 0 && sa_shift <= 0x17)
13286 +diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
13287 +index 1526b8a10b09..3b944fc70eec 100644
13288 +--- a/drivers/mmc/host/Kconfig
13289 ++++ b/drivers/mmc/host/Kconfig
13290 +@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
13291 + config MMC_SDHCI_ACPI
13292 + tristate "SDHCI support for ACPI enumerated SDHCI controllers"
13293 + depends on MMC_SDHCI && ACPI
13294 ++ select IOSF_MBI if X86
13295 + help
13296 + This selects support for ACPI enumerated SDHCI controllers,
13297 + identified by ACPI Compatibility ID PNP0D40 or specific
13298 +diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
13299 +index 851ccd9ac868..25c179592125 100644
13300 +--- a/drivers/mmc/host/atmel-mci.c
13301 ++++ b/drivers/mmc/host/atmel-mci.c
13302 +@@ -2443,7 +2443,7 @@ static int atmci_configure_dma(struct atmel_mci *host)
13303 + struct mci_platform_data *pdata = host->pdev->dev.platform_data;
13304 + dma_cap_mask_t mask;
13305 +
13306 +- if (!pdata->dma_filter)
13307 ++ if (!pdata || !pdata->dma_filter)
13308 + return -ENODEV;
13309 +
13310 + dma_cap_zero(mask);
13311 +diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
13312 +index 3446097a43c0..e77d79c8cd9f 100644
13313 +--- a/drivers/mmc/host/mmc_spi.c
13314 ++++ b/drivers/mmc/host/mmc_spi.c
13315 +@@ -1442,6 +1442,12 @@ static int mmc_spi_probe(struct spi_device *spi)
13316 + host->pdata->cd_debounce);
13317 + if (status != 0)
13318 + goto fail_add_host;
13319 ++
13320 ++ /* The platform has a CD GPIO signal that may support
13321 ++ * interrupts, so let mmc_gpiod_request_cd_irq() decide
13322 ++ * if polling is needed or not.
13323 ++ */
13324 ++ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
13325 + mmc_gpiod_request_cd_irq(mmc);
13326 + }
13327 +
13328 +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
13329 +index a5cda926d38e..e517be7f03bf 100644
13330 +--- a/drivers/mmc/host/sdhci-acpi.c
13331 ++++ b/drivers/mmc/host/sdhci-acpi.c
13332 +@@ -41,6 +41,11 @@
13333 + #include <linux/mmc/pm.h>
13334 + #include <linux/mmc/slot-gpio.h>
13335 +
13336 ++#ifdef CONFIG_X86
13337 ++#include <asm/cpu_device_id.h>
13338 ++#include <asm/iosf_mbi.h>
13339 ++#endif
13340 ++
13341 + #include "sdhci.h"
13342 +
13343 + enum {
13344 +@@ -146,6 +151,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
13345 + .ops = &sdhci_acpi_ops_int,
13346 + };
13347 +
13348 ++#ifdef CONFIG_X86
13349 ++
13350 ++static bool sdhci_acpi_byt(void)
13351 ++{
13352 ++ static const struct x86_cpu_id byt[] = {
13353 ++ { X86_VENDOR_INTEL, 6, 0x37 },
13354 ++ {}
13355 ++ };
13356 ++
13357 ++ return x86_match_cpu(byt);
13358 ++}
13359 ++
13360 ++#define BYT_IOSF_SCCEP 0x63
13361 ++#define BYT_IOSF_OCP_NETCTRL0 0x1078
13362 ++#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
13363 ++
13364 ++static void sdhci_acpi_byt_setting(struct device *dev)
13365 ++{
13366 ++ u32 val = 0;
13367 ++
13368 ++ if (!sdhci_acpi_byt())
13369 ++ return;
13370 ++
13371 ++ if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
13372 ++ &val)) {
13373 ++ dev_err(dev, "%s read error\n", __func__);
13374 ++ return;
13375 ++ }
13376 ++
13377 ++ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
13378 ++ return;
13379 ++
13380 ++ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
13381 ++
13382 ++ if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
13383 ++ val)) {
13384 ++ dev_err(dev, "%s write error\n", __func__);
13385 ++ return;
13386 ++ }
13387 ++
13388 ++ dev_dbg(dev, "%s completed\n", __func__);
13389 ++}
13390 ++
13391 ++static bool sdhci_acpi_byt_defer(struct device *dev)
13392 ++{
13393 ++ if (!sdhci_acpi_byt())
13394 ++ return false;
13395 ++
13396 ++ if (!iosf_mbi_available())
13397 ++ return true;
13398 ++
13399 ++ sdhci_acpi_byt_setting(dev);
13400 ++
13401 ++ return false;
13402 ++}
13403 ++
13404 ++#else
13405 ++
13406 ++static inline void sdhci_acpi_byt_setting(struct device *dev)
13407 ++{
13408 ++}
13409 ++
13410 ++static inline bool sdhci_acpi_byt_defer(struct device *dev)
13411 ++{
13412 ++ return false;
13413 ++}
13414 ++
13415 ++#endif
13416 ++
13417 + static int bxt_get_cd(struct mmc_host *mmc)
13418 + {
13419 + int gpio_cd = mmc_gpio_get_cd(mmc);
13420 +@@ -233,7 +307,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
13421 + .chip = &sdhci_acpi_chip_int,
13422 + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
13423 + MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
13424 +- MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
13425 ++ MMC_CAP_WAIT_WHILE_BUSY,
13426 + .caps2 = MMC_CAP2_HC_ERASE_SZ,
13427 + .flags = SDHCI_ACPI_RUNTIME_PM,
13428 + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
13429 +@@ -248,7 +322,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
13430 + SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
13431 + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
13432 + .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
13433 +- MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
13434 ++ MMC_CAP_WAIT_WHILE_BUSY,
13435 + .flags = SDHCI_ACPI_RUNTIME_PM,
13436 + .pm_caps = MMC_PM_KEEP_POWER,
13437 + .probe_slot = sdhci_acpi_sdio_probe_slot,
13438 +@@ -260,7 +334,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
13439 + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
13440 + .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
13441 + SDHCI_QUIRK2_STOP_WITH_TC,
13442 +- .caps = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
13443 ++ .caps = MMC_CAP_WAIT_WHILE_BUSY,
13444 + .probe_slot = sdhci_acpi_sd_probe_slot,
13445 + };
13446 +
13447 +@@ -322,7 +396,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
13448 + {
13449 + struct device *dev = &pdev->dev;
13450 + acpi_handle handle = ACPI_HANDLE(dev);
13451 +- struct acpi_device *device;
13452 ++ struct acpi_device *device, *child;
13453 + struct sdhci_acpi_host *c;
13454 + struct sdhci_host *host;
13455 + struct resource *iomem;
13456 +@@ -334,9 +408,17 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
13457 + if (acpi_bus_get_device(handle, &device))
13458 + return -ENODEV;
13459 +
13460 ++ /* Power on the SDHCI controller and its children */
13461 ++ acpi_device_fix_up_power(device);
13462 ++ list_for_each_entry(child, &device->children, node)
13463 ++ acpi_device_fix_up_power(child);
13464 ++
13465 + if (acpi_bus_get_status(device) || !device->status.present)
13466 + return -ENODEV;
13467 +
13468 ++ if (sdhci_acpi_byt_defer(dev))
13469 ++ return -EPROBE_DEFER;
13470 ++
13471 + hid = acpi_device_hid(device);
13472 + uid = device->pnp.unique_id;
13473 +
13474 +@@ -460,6 +542,8 @@ static int sdhci_acpi_resume(struct device *dev)
13475 + {
13476 + struct sdhci_acpi_host *c = dev_get_drvdata(dev);
13477 +
13478 ++ sdhci_acpi_byt_setting(&c->pdev->dev);
13479 ++
13480 + return sdhci_resume_host(c->host);
13481 + }
13482 +
13483 +@@ -483,6 +567,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
13484 + {
13485 + struct sdhci_acpi_host *c = dev_get_drvdata(dev);
13486 +
13487 ++ sdhci_acpi_byt_setting(&c->pdev->dev);
13488 ++
13489 + return sdhci_runtime_resume_host(c->host);
13490 + }
13491 +
13492 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
13493 +index df3b8eced8c4..807f930a7c14 100644
13494 +--- a/drivers/mmc/host/sdhci-pci-core.c
13495 ++++ b/drivers/mmc/host/sdhci-pci-core.c
13496 +@@ -361,7 +361,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
13497 + {
13498 + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
13499 + MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
13500 +- MMC_CAP_BUS_WIDTH_TEST |
13501 + MMC_CAP_WAIT_WHILE_BUSY;
13502 + slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
13503 + slot->hw_reset = sdhci_pci_int_hw_reset;
13504 +@@ -377,19 +376,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
13505 + static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
13506 + {
13507 + slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
13508 +- MMC_CAP_BUS_WIDTH_TEST |
13509 + MMC_CAP_WAIT_WHILE_BUSY;
13510 + return 0;
13511 + }
13512 +
13513 + static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
13514 + {
13515 +- slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST |
13516 +- MMC_CAP_WAIT_WHILE_BUSY;
13517 ++ slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
13518 + slot->cd_con_id = NULL;
13519 + slot->cd_idx = 0;
13520 + slot->cd_override_level = true;
13521 + if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
13522 ++ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
13523 + slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
13524 + slot->host->mmc_host_ops.get_cd = bxt_get_cd;
13525 +
13526 +@@ -1173,6 +1171,30 @@ static const struct pci_device_id pci_ids[] = {
13527 +
13528 + {
13529 + .vendor = PCI_VENDOR_ID_INTEL,
13530 ++ .device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
13531 ++ .subvendor = PCI_ANY_ID,
13532 ++ .subdevice = PCI_ANY_ID,
13533 ++ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
13534 ++ },
13535 ++
13536 ++ {
13537 ++ .vendor = PCI_VENDOR_ID_INTEL,
13538 ++ .device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
13539 ++ .subvendor = PCI_ANY_ID,
13540 ++ .subdevice = PCI_ANY_ID,
13541 ++ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
13542 ++ },
13543 ++
13544 ++ {
13545 ++ .vendor = PCI_VENDOR_ID_INTEL,
13546 ++ .device = PCI_DEVICE_ID_INTEL_BXTM_SD,
13547 ++ .subvendor = PCI_ANY_ID,
13548 ++ .subdevice = PCI_ANY_ID,
13549 ++ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
13550 ++ },
13551 ++
13552 ++ {
13553 ++ .vendor = PCI_VENDOR_ID_INTEL,
13554 + .device = PCI_DEVICE_ID_INTEL_APL_EMMC,
13555 + .subvendor = PCI_ANY_ID,
13556 + .subdevice = PCI_ANY_ID,
13557 +diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
13558 +index d1a0b4db60db..89e7151684a1 100644
13559 +--- a/drivers/mmc/host/sdhci-pci.h
13560 ++++ b/drivers/mmc/host/sdhci-pci.h
13561 +@@ -28,6 +28,9 @@
13562 + #define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
13563 + #define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
13564 + #define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
13565 ++#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca
13566 ++#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc
13567 ++#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0
13568 + #define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
13569 + #define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
13570 + #define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
13571 +diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
13572 +index f5edf9d3a18a..452bf500c82e 100644
13573 +--- a/drivers/mmc/host/sdhci-pxav3.c
13574 ++++ b/drivers/mmc/host/sdhci-pxav3.c
13575 +@@ -137,6 +137,10 @@ static int armada_38x_quirks(struct platform_device *pdev,
13576 +
13577 + host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
13578 + host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
13579 ++
13580 ++ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
13581 ++ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
13582 ++
13583 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
13584 + "conf-sdio3");
13585 + if (res) {
13586 +@@ -150,7 +154,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
13587 + * Configuration register, if the adjustment is not done,
13588 + * remove them from the capabilities.
13589 + */
13590 +- host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
13591 + host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
13592 +
13593 + dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
13594 +@@ -161,7 +164,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
13595 + * controller has different capabilities than the ones shown
13596 + * in its registers
13597 + */
13598 +- host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
13599 + if (of_property_read_bool(np, "no-1-8-v")) {
13600 + host->caps &= ~SDHCI_CAN_VDD_180;
13601 + host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
13602 +@@ -307,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
13603 + __func__, uhs, ctrl_2);
13604 + }
13605 +
13606 ++static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
13607 ++ unsigned short vdd)
13608 ++{
13609 ++ struct mmc_host *mmc = host->mmc;
13610 ++ u8 pwr = host->pwr;
13611 ++
13612 ++ sdhci_set_power(host, mode, vdd);
13613 ++
13614 ++ if (host->pwr == pwr)
13615 ++ return;
13616 ++
13617 ++ if (host->pwr == 0)
13618 ++ vdd = 0;
13619 ++
13620 ++ if (!IS_ERR(mmc->supply.vmmc)) {
13621 ++ spin_unlock_irq(&host->lock);
13622 ++ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
13623 ++ spin_lock_irq(&host->lock);
13624 ++ }
13625 ++}
13626 ++
13627 + static const struct sdhci_ops pxav3_sdhci_ops = {
13628 + .set_clock = sdhci_set_clock,
13629 ++ .set_power = pxav3_set_power,
13630 + .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
13631 + .get_max_clock = sdhci_pltfm_clk_get_max_clock,
13632 + .set_bus_width = sdhci_set_bus_width,
13633 +diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
13634 +index 83c4bf7bc16c..0004721cd213 100644
13635 +--- a/drivers/mmc/host/sdhci-tegra.c
13636 ++++ b/drivers/mmc/host/sdhci-tegra.c
13637 +@@ -147,10 +147,16 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
13638 + /* Advertise UHS modes as supported by host */
13639 + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
13640 + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
13641 ++ else
13642 ++ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
13643 + if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
13644 + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
13645 ++ else
13646 ++ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
13647 + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
13648 + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
13649 ++ else
13650 ++ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
13651 + sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
13652 +
13653 + clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
13654 +@@ -188,7 +194,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
13655 + unsigned long host_clk;
13656 +
13657 + if (!clock)
13658 +- return;
13659 ++ return sdhci_set_clock(host, clock);
13660 +
13661 + host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
13662 + clk_set_rate(pltfm_host->clk, host_clk);
13663 +@@ -335,6 +341,10 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
13664 +
13665 + static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
13666 + .pdata = &sdhci_tegra114_pdata,
13667 ++};
13668 ++
13669 ++static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
13670 ++ .pdata = &sdhci_tegra114_pdata,
13671 + .nvquirks = NVQUIRK_ENABLE_SDR50 |
13672 + NVQUIRK_ENABLE_DDR50 |
13673 + NVQUIRK_ENABLE_SDR104,
13674 +@@ -357,7 +367,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
13675 +
13676 + static const struct of_device_id sdhci_tegra_dt_match[] = {
13677 + { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
13678 +- { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
13679 ++ { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
13680 + { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
13681 + { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
13682 + { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
13683 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
13684 +index add9fdfd1d8f..6d485b5fa5ca 100644
13685 +--- a/drivers/mmc/host/sdhci.c
13686 ++++ b/drivers/mmc/host/sdhci.c
13687 +@@ -465,8 +465,6 @@ static void sdhci_adma_mark_end(void *desc)
13688 + static int sdhci_adma_table_pre(struct sdhci_host *host,
13689 + struct mmc_data *data)
13690 + {
13691 +- int direction;
13692 +-
13693 + void *desc;
13694 + void *align;
13695 + dma_addr_t addr;
13696 +@@ -483,20 +481,9 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
13697 + * We currently guess that it is LE.
13698 + */
13699 +
13700 +- if (data->flags & MMC_DATA_READ)
13701 +- direction = DMA_FROM_DEVICE;
13702 +- else
13703 +- direction = DMA_TO_DEVICE;
13704 +-
13705 +- host->align_addr = dma_map_single(mmc_dev(host->mmc),
13706 +- host->align_buffer, host->align_buffer_sz, direction);
13707 +- if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
13708 +- goto fail;
13709 +- BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
13710 +-
13711 + host->sg_count = sdhci_pre_dma_transfer(host, data);
13712 + if (host->sg_count < 0)
13713 +- goto unmap_align;
13714 ++ return -EINVAL;
13715 +
13716 + desc = host->adma_table;
13717 + align = host->align_buffer;
13718 +@@ -570,22 +557,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
13719 + /* nop, end, valid */
13720 + sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
13721 + }
13722 +-
13723 +- /*
13724 +- * Resync align buffer as we might have changed it.
13725 +- */
13726 +- if (data->flags & MMC_DATA_WRITE) {
13727 +- dma_sync_single_for_device(mmc_dev(host->mmc),
13728 +- host->align_addr, host->align_buffer_sz, direction);
13729 +- }
13730 +-
13731 + return 0;
13732 +-
13733 +-unmap_align:
13734 +- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
13735 +- host->align_buffer_sz, direction);
13736 +-fail:
13737 +- return -EINVAL;
13738 + }
13739 +
13740 + static void sdhci_adma_table_post(struct sdhci_host *host,
13741 +@@ -605,9 +577,6 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
13742 + else
13743 + direction = DMA_TO_DEVICE;
13744 +
13745 +- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
13746 +- host->align_buffer_sz, direction);
13747 +-
13748 + /* Do a quick scan of the SG list for any unaligned mappings */
13749 + has_unaligned = false;
13750 + for_each_sg(data->sg, sg, host->sg_count, i)
13751 +@@ -666,9 +635,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
13752 + if (!data)
13753 + target_timeout = cmd->busy_timeout * 1000;
13754 + else {
13755 +- target_timeout = data->timeout_ns / 1000;
13756 +- if (host->clock)
13757 +- target_timeout += data->timeout_clks / host->clock;
13758 ++ target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
13759 ++ if (host->clock && data->timeout_clks) {
13760 ++ unsigned long long val;
13761 ++
13762 ++ /*
13763 ++ * data->timeout_clks is in units of clock cycles.
13764 ++ * host->clock is in Hz. target_timeout is in us.
13765 ++ * Hence, us = 1000000 * cycles / Hz. Round up.
13766 ++ */
13767 ++ val = 1000000 * data->timeout_clks;
13768 ++ if (do_div(val, host->clock))
13769 ++ target_timeout++;
13770 ++ target_timeout += val;
13771 ++ }
13772 + }
13773 +
13774 + /*
13775 +@@ -1003,6 +983,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
13776 +
13777 + WARN_ON(host->cmd);
13778 +
13779 ++ /* Initially, a command has no error */
13780 ++ cmd->error = 0;
13781 ++
13782 + /* Wait max 10 ms */
13783 + timeout = 10;
13784 +
13785 +@@ -1097,8 +1080,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
13786 + }
13787 + }
13788 +
13789 +- host->cmd->error = 0;
13790 +-
13791 + /* Finished CMD23, now send actual command. */
13792 + if (host->cmd == host->mrq->sbc) {
13793 + host->cmd = NULL;
13794 +@@ -1269,10 +1250,24 @@ clock_set:
13795 + }
13796 + EXPORT_SYMBOL_GPL(sdhci_set_clock);
13797 +
13798 +-static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
13799 +- unsigned short vdd)
13800 ++static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
13801 ++ unsigned short vdd)
13802 + {
13803 + struct mmc_host *mmc = host->mmc;
13804 ++
13805 ++ spin_unlock_irq(&host->lock);
13806 ++ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
13807 ++ spin_lock_irq(&host->lock);
13808 ++
13809 ++ if (mode != MMC_POWER_OFF)
13810 ++ sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
13811 ++ else
13812 ++ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
13813 ++}
13814 ++
13815 ++void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
13816 ++ unsigned short vdd)
13817 ++{
13818 + u8 pwr = 0;
13819 +
13820 + if (mode != MMC_POWER_OFF) {
13821 +@@ -1304,7 +1299,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
13822 + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
13823 + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
13824 + sdhci_runtime_pm_bus_off(host);
13825 +- vdd = 0;
13826 + } else {
13827 + /*
13828 + * Spec says that we should clear the power reg before setting
13829 +@@ -1335,12 +1329,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
13830 + if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
13831 + mdelay(10);
13832 + }
13833 ++}
13834 ++EXPORT_SYMBOL_GPL(sdhci_set_power);
13835 +
13836 +- if (!IS_ERR(mmc->supply.vmmc)) {
13837 +- spin_unlock_irq(&host->lock);
13838 +- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
13839 +- spin_lock_irq(&host->lock);
13840 +- }
13841 ++static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
13842 ++ unsigned short vdd)
13843 ++{
13844 ++ struct mmc_host *mmc = host->mmc;
13845 ++
13846 ++ if (host->ops->set_power)
13847 ++ host->ops->set_power(host, mode, vdd);
13848 ++ else if (!IS_ERR(mmc->supply.vmmc))
13849 ++ sdhci_set_power_reg(host, mode, vdd);
13850 ++ else
13851 ++ sdhci_set_power(host, mode, vdd);
13852 + }
13853 +
13854 + /*****************************************************************************\
13855 +@@ -1490,7 +1492,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
13856 + }
13857 + }
13858 +
13859 +- sdhci_set_power(host, ios->power_mode, ios->vdd);
13860 ++ __sdhci_set_power(host, ios->power_mode, ios->vdd);
13861 +
13862 + if (host->ops->platform_send_init_74_clocks)
13863 + host->ops->platform_send_init_74_clocks(host, ios->power_mode);
13864 +@@ -2114,14 +2116,13 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
13865 + struct sdhci_host *host = mmc_priv(mmc);
13866 + struct mmc_data *data = mrq->data;
13867 +
13868 +- if (host->flags & SDHCI_REQ_USE_DMA) {
13869 +- if (data->host_cookie == COOKIE_GIVEN ||
13870 +- data->host_cookie == COOKIE_MAPPED)
13871 +- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
13872 +- data->flags & MMC_DATA_WRITE ?
13873 +- DMA_TO_DEVICE : DMA_FROM_DEVICE);
13874 +- data->host_cookie = COOKIE_UNMAPPED;
13875 +- }
13876 ++ if (data->host_cookie == COOKIE_GIVEN ||
13877 ++ data->host_cookie == COOKIE_MAPPED)
13878 ++ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
13879 ++ data->flags & MMC_DATA_WRITE ?
13880 ++ DMA_TO_DEVICE : DMA_FROM_DEVICE);
13881 ++
13882 ++ data->host_cookie = COOKIE_UNMAPPED;
13883 + }
13884 +
13885 + static int sdhci_pre_dma_transfer(struct sdhci_host *host,
13886 +@@ -2238,6 +2239,22 @@ static void sdhci_tasklet_finish(unsigned long param)
13887 + mrq = host->mrq;
13888 +
13889 + /*
13890 ++ * Always unmap the data buffers if they were mapped by
13891 ++ * sdhci_prepare_data() whenever we finish with a request.
13892 ++ * This avoids leaking DMA mappings on error.
13893 ++ */
13894 ++ if (host->flags & SDHCI_REQ_USE_DMA) {
13895 ++ struct mmc_data *data = mrq->data;
13896 ++
13897 ++ if (data && data->host_cookie == COOKIE_MAPPED) {
13898 ++ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
13899 ++ (data->flags & MMC_DATA_READ) ?
13900 ++ DMA_FROM_DEVICE : DMA_TO_DEVICE);
13901 ++ data->host_cookie = COOKIE_UNMAPPED;
13902 ++ }
13903 ++ }
13904 ++
13905 ++ /*
13906 + * The controller needs a reset of internal state machines
13907 + * upon error conditions.
13908 + */
13909 +@@ -2322,13 +2339,30 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
13910 + return;
13911 + }
13912 +
13913 +- if (intmask & SDHCI_INT_TIMEOUT)
13914 +- host->cmd->error = -ETIMEDOUT;
13915 +- else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
13916 +- SDHCI_INT_INDEX))
13917 +- host->cmd->error = -EILSEQ;
13918 ++ if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
13919 ++ SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
13920 ++ if (intmask & SDHCI_INT_TIMEOUT)
13921 ++ host->cmd->error = -ETIMEDOUT;
13922 ++ else
13923 ++ host->cmd->error = -EILSEQ;
13924 ++
13925 ++ /*
13926 ++ * If this command initiates a data phase and a response
13927 ++ * CRC error is signalled, the card can start transferring
13928 ++ * data - the card may have received the command without
13929 ++ * error. We must not terminate the mmc_request early.
13930 ++ *
13931 ++ * If the card did not receive the command or returned an
13932 ++ * error which prevented it sending data, the data phase
13933 ++ * will time out.
13934 ++ */
13935 ++ if (host->cmd->data &&
13936 ++ (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
13937 ++ SDHCI_INT_CRC) {
13938 ++ host->cmd = NULL;
13939 ++ return;
13940 ++ }
13941 +
13942 +- if (host->cmd->error) {
13943 + tasklet_schedule(&host->finish_tasklet);
13944 + return;
13945 + }
13946 +@@ -2967,14 +3001,21 @@ int sdhci_add_host(struct sdhci_host *host)
13947 + &host->adma_addr,
13948 + GFP_KERNEL);
13949 + host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
13950 +- host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
13951 ++ host->align_buffer = dma_alloc_coherent(mmc_dev(mmc),
13952 ++ host->align_buffer_sz,
13953 ++ &host->align_addr,
13954 ++ GFP_KERNEL);
13955 + if (!host->adma_table || !host->align_buffer) {
13956 + if (host->adma_table)
13957 + dma_free_coherent(mmc_dev(mmc),
13958 + host->adma_table_sz,
13959 + host->adma_table,
13960 + host->adma_addr);
13961 +- kfree(host->align_buffer);
13962 ++ if (host->align_buffer)
13963 ++ dma_free_coherent(mmc_dev(mmc),
13964 ++ host->align_buffer_sz,
13965 ++ host->align_buffer,
13966 ++ host->align_addr);
13967 + pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
13968 + mmc_hostname(mmc));
13969 + host->flags &= ~SDHCI_USE_ADMA;
13970 +@@ -2986,10 +3027,14 @@ int sdhci_add_host(struct sdhci_host *host)
13971 + host->flags &= ~SDHCI_USE_ADMA;
13972 + dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
13973 + host->adma_table, host->adma_addr);
13974 +- kfree(host->align_buffer);
13975 ++ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
13976 ++ host->align_buffer, host->align_addr);
13977 + host->adma_table = NULL;
13978 + host->align_buffer = NULL;
13979 + }
13980 ++
13981 ++ /* dma_alloc_coherent returns page aligned and sized buffers */
13982 ++ BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
13983 + }
13984 +
13985 + /*
13986 +@@ -3072,14 +3117,14 @@ int sdhci_add_host(struct sdhci_host *host)
13987 + if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
13988 + host->timeout_clk *= 1000;
13989 +
13990 ++ if (override_timeout_clk)
13991 ++ host->timeout_clk = override_timeout_clk;
13992 ++
13993 + mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
13994 + host->ops->get_max_timeout_count(host) : 1 << 27;
13995 + mmc->max_busy_timeout /= host->timeout_clk;
13996 + }
13997 +
13998 +- if (override_timeout_clk)
13999 +- host->timeout_clk = override_timeout_clk;
14000 +-
14001 + mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
14002 + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
14003 +
14004 +@@ -3452,7 +3497,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
14005 + if (host->adma_table)
14006 + dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
14007 + host->adma_table, host->adma_addr);
14008 +- kfree(host->align_buffer);
14009 ++ if (host->align_buffer)
14010 ++ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
14011 ++ host->align_buffer, host->align_addr);
14012 +
14013 + host->adma_table = NULL;
14014 + host->align_buffer = NULL;
14015 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
14016 +index 0115e9907bf8..033d72b5bbd5 100644
14017 +--- a/drivers/mmc/host/sdhci.h
14018 ++++ b/drivers/mmc/host/sdhci.h
14019 +@@ -529,6 +529,8 @@ struct sdhci_ops {
14020 + #endif
14021 +
14022 + void (*set_clock)(struct sdhci_host *host, unsigned int clock);
14023 ++ void (*set_power)(struct sdhci_host *host, unsigned char mode,
14024 ++ unsigned short vdd);
14025 +
14026 + int (*enable_dma)(struct sdhci_host *host);
14027 + unsigned int (*get_max_clock)(struct sdhci_host *host);
14028 +@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
14029 + }
14030 +
14031 + void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
14032 ++void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
14033 ++ unsigned short vdd);
14034 + void sdhci_set_bus_width(struct sdhci_host *host, int width);
14035 + void sdhci_reset(struct sdhci_host *host, u8 mask);
14036 + void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
14037 +diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
14038 +index 844fc07d22cd..f7009c1cb90c 100644
14039 +--- a/drivers/mtd/nand/brcmnand/brcmnand.c
14040 ++++ b/drivers/mtd/nand/brcmnand/brcmnand.c
14041 +@@ -311,6 +311,36 @@ static const u16 brcmnand_regs_v60[] = {
14042 + [BRCMNAND_FC_BASE] = 0x400,
14043 + };
14044 +
14045 ++/* BRCMNAND v7.1 */
14046 ++static const u16 brcmnand_regs_v71[] = {
14047 ++ [BRCMNAND_CMD_START] = 0x04,
14048 ++ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
14049 ++ [BRCMNAND_CMD_ADDRESS] = 0x0c,
14050 ++ [BRCMNAND_INTFC_STATUS] = 0x14,
14051 ++ [BRCMNAND_CS_SELECT] = 0x18,
14052 ++ [BRCMNAND_CS_XOR] = 0x1c,
14053 ++ [BRCMNAND_LL_OP] = 0x20,
14054 ++ [BRCMNAND_CS0_BASE] = 0x50,
14055 ++ [BRCMNAND_CS1_BASE] = 0,
14056 ++ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
14057 ++ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
14058 ++ [BRCMNAND_UNCORR_COUNT] = 0xfc,
14059 ++ [BRCMNAND_CORR_COUNT] = 0x100,
14060 ++ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
14061 ++ [BRCMNAND_CORR_ADDR] = 0x110,
14062 ++ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
14063 ++ [BRCMNAND_UNCORR_ADDR] = 0x118,
14064 ++ [BRCMNAND_SEMAPHORE] = 0x150,
14065 ++ [BRCMNAND_ID] = 0x194,
14066 ++ [BRCMNAND_ID_EXT] = 0x198,
14067 ++ [BRCMNAND_LL_RDATA] = 0x19c,
14068 ++ [BRCMNAND_OOB_READ_BASE] = 0x200,
14069 ++ [BRCMNAND_OOB_READ_10_BASE] = 0,
14070 ++ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
14071 ++ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
14072 ++ [BRCMNAND_FC_BASE] = 0x400,
14073 ++};
14074 ++
14075 + enum brcmnand_cs_reg {
14076 + BRCMNAND_CS_CFG_EXT = 0,
14077 + BRCMNAND_CS_CFG,
14078 +@@ -406,7 +436,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
14079 + }
14080 +
14081 + /* Register offsets */
14082 +- if (ctrl->nand_version >= 0x0600)
14083 ++ if (ctrl->nand_version >= 0x0701)
14084 ++ ctrl->reg_offsets = brcmnand_regs_v71;
14085 ++ else if (ctrl->nand_version >= 0x0600)
14086 + ctrl->reg_offsets = brcmnand_regs_v60;
14087 + else if (ctrl->nand_version >= 0x0500)
14088 + ctrl->reg_offsets = brcmnand_regs_v50;
14089 +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
14090 +index f2c8ff398d6c..171d146645ba 100644
14091 +--- a/drivers/mtd/nand/nand_base.c
14092 ++++ b/drivers/mtd/nand/nand_base.c
14093 +@@ -4021,7 +4021,6 @@ static int nand_dt_init(struct nand_chip *chip)
14094 + * This is the first phase of the normal nand_scan() function. It reads the
14095 + * flash ID and sets up MTD fields accordingly.
14096 + *
14097 +- * The mtd->owner field must be set to the module of the caller.
14098 + */
14099 + int nand_scan_ident(struct mtd_info *mtd, int maxchips,
14100 + struct nand_flash_dev *table)
14101 +@@ -4443,19 +4442,12 @@ EXPORT_SYMBOL(nand_scan_tail);
14102 + *
14103 + * This fills out all the uninitialized function pointers with the defaults.
14104 + * The flash ID is read and the mtd/chip structures are filled with the
14105 +- * appropriate values. The mtd->owner field must be set to the module of the
14106 +- * caller.
14107 ++ * appropriate values.
14108 + */
14109 + int nand_scan(struct mtd_info *mtd, int maxchips)
14110 + {
14111 + int ret;
14112 +
14113 +- /* Many callers got this wrong, so check for it for a while... */
14114 +- if (!mtd->owner && caller_is_module()) {
14115 +- pr_crit("%s called with NULL mtd->owner!\n", __func__);
14116 +- BUG();
14117 +- }
14118 +-
14119 + ret = nand_scan_ident(mtd, maxchips, NULL);
14120 + if (!ret)
14121 + ret = nand_scan_tail(mtd);
14122 +diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
14123 +index 86fc245dc71a..fd78644469fa 100644
14124 +--- a/drivers/mtd/nand/pxa3xx_nand.c
14125 ++++ b/drivers/mtd/nand/pxa3xx_nand.c
14126 +@@ -1738,7 +1738,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
14127 + if (ret < 0)
14128 + return ret;
14129 +
14130 +- if (use_dma) {
14131 ++ if (!np && use_dma) {
14132 + r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
14133 + if (r == NULL) {
14134 + dev_err(&pdev->dev,
14135 +diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
14136 +index 43b3392ffee7..652d01832873 100644
14137 +--- a/drivers/mtd/onenand/onenand_base.c
14138 ++++ b/drivers/mtd/onenand/onenand_base.c
14139 +@@ -2599,6 +2599,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
14140 + */
14141 + static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
14142 + {
14143 ++ struct onenand_chip *this = mtd->priv;
14144 + int ret;
14145 +
14146 + ret = onenand_block_isbad(mtd, ofs);
14147 +@@ -2610,7 +2611,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
14148 + }
14149 +
14150 + onenand_get_device(mtd, FL_WRITING);
14151 +- ret = mtd_block_markbad(mtd, ofs);
14152 ++ ret = this->block_markbad(mtd, ofs);
14153 + onenand_release_device(mtd);
14154 + return ret;
14155 + }
14156 +diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
14157 +index ed0c19c558b5..3028c06547c1 100644
14158 +--- a/drivers/mtd/spi-nor/spi-nor.c
14159 ++++ b/drivers/mtd/spi-nor/spi-nor.c
14160 +@@ -1100,45 +1100,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
14161 + return 0;
14162 + }
14163 +
14164 +-static int micron_quad_enable(struct spi_nor *nor)
14165 +-{
14166 +- int ret;
14167 +- u8 val;
14168 +-
14169 +- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
14170 +- if (ret < 0) {
14171 +- dev_err(nor->dev, "error %d reading EVCR\n", ret);
14172 +- return ret;
14173 +- }
14174 +-
14175 +- write_enable(nor);
14176 +-
14177 +- /* set EVCR, enable quad I/O */
14178 +- nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
14179 +- ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
14180 +- if (ret < 0) {
14181 +- dev_err(nor->dev, "error while writing EVCR register\n");
14182 +- return ret;
14183 +- }
14184 +-
14185 +- ret = spi_nor_wait_till_ready(nor);
14186 +- if (ret)
14187 +- return ret;
14188 +-
14189 +- /* read EVCR and check it */
14190 +- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
14191 +- if (ret < 0) {
14192 +- dev_err(nor->dev, "error %d reading EVCR\n", ret);
14193 +- return ret;
14194 +- }
14195 +- if (val & EVCR_QUAD_EN_MICRON) {
14196 +- dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
14197 +- return -EINVAL;
14198 +- }
14199 +-
14200 +- return 0;
14201 +-}
14202 +-
14203 + static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
14204 + {
14205 + int status;
14206 +@@ -1152,12 +1113,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
14207 + }
14208 + return status;
14209 + case SNOR_MFR_MICRON:
14210 +- status = micron_quad_enable(nor);
14211 +- if (status) {
14212 +- dev_err(nor->dev, "Micron quad-read not enabled\n");
14213 +- return -EINVAL;
14214 +- }
14215 +- return status;
14216 ++ return 0;
14217 + default:
14218 + status = spansion_quad_enable(nor);
14219 + if (status) {
14220 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
14221 +index 5b9834cf2820..96fddb016bf1 100644
14222 +--- a/drivers/mtd/ubi/eba.c
14223 ++++ b/drivers/mtd/ubi/eba.c
14224 +@@ -426,8 +426,25 @@ retry:
14225 + pnum, vol_id, lnum);
14226 + err = -EBADMSG;
14227 + } else {
14228 +- err = -EINVAL;
14229 +- ubi_ro_mode(ubi);
14230 ++ /*
14231 ++ * Ending up here in the non-Fastmap case
14232 ++ * is a clear bug as the VID header had to
14233 ++ * be present at scan time to have it referenced.
14234 ++ * With fastmap the story is more complicated.
14235 ++ * Fastmap has the mapping info without the need
14236 ++ * of a full scan. So the LEB could have been
14237 ++ * unmapped, Fastmap cannot know this and keeps
14238 ++ * the LEB referenced.
14239 ++ * This is valid and works as the layer above UBI
14240 ++ * has to do bookkeeping about used/referenced
14241 ++ * LEBs in any case.
14242 ++ */
14243 ++ if (ubi->fast_attach) {
14244 ++ err = -EBADMSG;
14245 ++ } else {
14246 ++ err = -EINVAL;
14247 ++ ubi_ro_mode(ubi);
14248 ++ }
14249 + }
14250 + }
14251 + goto out_free;
14252 +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
14253 +index 263b439e21a8..990898b9dc72 100644
14254 +--- a/drivers/mtd/ubi/fastmap.c
14255 ++++ b/drivers/mtd/ubi/fastmap.c
14256 +@@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
14257 + ubi_msg(ubi, "fastmap WL pool size: %d",
14258 + ubi->fm_wl_pool.max_size);
14259 + ubi->fm_disabled = 0;
14260 ++ ubi->fast_attach = 1;
14261 +
14262 + ubi_free_vid_hdr(ubi, vh);
14263 + kfree(ech);
14264 +diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
14265 +index 2974b67f6c6c..de1ea2e4c37d 100644
14266 +--- a/drivers/mtd/ubi/ubi.h
14267 ++++ b/drivers/mtd/ubi/ubi.h
14268 +@@ -462,6 +462,7 @@ struct ubi_debug_info {
14269 + * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
14270 + * @fm_work: fastmap work queue
14271 + * @fm_work_scheduled: non-zero if fastmap work was scheduled
14272 ++ * @fast_attach: non-zero if UBI was attached by fastmap
14273 + *
14274 + * @used: RB-tree of used physical eraseblocks
14275 + * @erroneous: RB-tree of erroneous used physical eraseblocks
14276 +@@ -570,6 +571,7 @@ struct ubi_device {
14277 + size_t fm_size;
14278 + struct work_struct fm_work;
14279 + int fm_work_scheduled;
14280 ++ int fast_attach;
14281 +
14282 + /* Wear-leveling sub-system's stuff */
14283 + struct rb_root used;
14284 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
14285 +index b7f1a9919033..5ec8195b02c0 100644
14286 +--- a/drivers/net/bonding/bond_main.c
14287 ++++ b/drivers/net/bonding/bond_main.c
14288 +@@ -3308,6 +3308,30 @@ static int bond_close(struct net_device *bond_dev)
14289 + return 0;
14290 + }
14291 +
14292 ++/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
14293 ++ * that some drivers can provide 32bit values only.
14294 ++ */
14295 ++static void bond_fold_stats(struct rtnl_link_stats64 *_res,
14296 ++ const struct rtnl_link_stats64 *_new,
14297 ++ const struct rtnl_link_stats64 *_old)
14298 ++{
14299 ++ const u64 *new = (const u64 *)_new;
14300 ++ const u64 *old = (const u64 *)_old;
14301 ++ u64 *res = (u64 *)_res;
14302 ++ int i;
14303 ++
14304 ++ for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
14305 ++ u64 nv = new[i];
14306 ++ u64 ov = old[i];
14307 ++
14308 ++ /* detects if this particular field is 32bit only */
14309 ++ if (((nv | ov) >> 32) == 0)
14310 ++ res[i] += (u32)nv - (u32)ov;
14311 ++ else
14312 ++ res[i] += nv - ov;
14313 ++ }
14314 ++}
14315 ++
14316 + static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
14317 + struct rtnl_link_stats64 *stats)
14318 + {
14319 +@@ -3316,43 +3340,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
14320 + struct list_head *iter;
14321 + struct slave *slave;
14322 +
14323 ++ spin_lock(&bond->stats_lock);
14324 + memcpy(stats, &bond->bond_stats, sizeof(*stats));
14325 +
14326 +- bond_for_each_slave(bond, slave, iter) {
14327 +- const struct rtnl_link_stats64 *sstats =
14328 ++ rcu_read_lock();
14329 ++ bond_for_each_slave_rcu(bond, slave, iter) {
14330 ++ const struct rtnl_link_stats64 *new =
14331 + dev_get_stats(slave->dev, &temp);
14332 +- struct rtnl_link_stats64 *pstats = &slave->slave_stats;
14333 +-
14334 +- stats->rx_packets += sstats->rx_packets - pstats->rx_packets;
14335 +- stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
14336 +- stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
14337 +- stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
14338 +-
14339 +- stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
14340 +- stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
14341 +- stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
14342 +- stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;
14343 +-
14344 +- stats->multicast += sstats->multicast - pstats->multicast;
14345 +- stats->collisions += sstats->collisions - pstats->collisions;
14346 +-
14347 +- stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
14348 +- stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
14349 +- stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
14350 +- stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
14351 +- stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
14352 +- stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;
14353 +-
14354 +- stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
14355 +- stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
14356 +- stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
14357 +- stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
14358 +- stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;
14359 ++
14360 ++ bond_fold_stats(stats, new, &slave->slave_stats);
14361 +
14362 + /* save off the slave stats for the next run */
14363 +- memcpy(pstats, sstats, sizeof(*sstats));
14364 ++ memcpy(&slave->slave_stats, new, sizeof(*new));
14365 + }
14366 ++ rcu_read_unlock();
14367 ++
14368 + memcpy(&bond->bond_stats, stats, sizeof(*stats));
14369 ++ spin_unlock(&bond->stats_lock);
14370 +
14371 + return stats;
14372 + }
14373 +@@ -4166,6 +4170,7 @@ void bond_setup(struct net_device *bond_dev)
14374 + struct bonding *bond = netdev_priv(bond_dev);
14375 +
14376 + spin_lock_init(&bond->mode_lock);
14377 ++ spin_lock_init(&bond->stats_lock);
14378 + bond->params = bonding_defaults;
14379 +
14380 + /* Initialize pointers */
14381 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
14382 +index 141c2a42d7ed..910c12e2638e 100644
14383 +--- a/drivers/net/can/dev.c
14384 ++++ b/drivers/net/can/dev.c
14385 +@@ -696,11 +696,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
14386 + /* allow change of MTU according to the CANFD ability of the device */
14387 + switch (new_mtu) {
14388 + case CAN_MTU:
14389 ++ /* 'CANFD-only' controllers can not switch to CAN_MTU */
14390 ++ if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
14391 ++ return -EINVAL;
14392 ++
14393 + priv->ctrlmode &= ~CAN_CTRLMODE_FD;
14394 + break;
14395 +
14396 + case CANFD_MTU:
14397 +- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
14398 ++ /* check for potential CANFD ability */
14399 ++ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
14400 ++ !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
14401 + return -EINVAL;
14402 +
14403 + priv->ctrlmode |= CAN_CTRLMODE_FD;
14404 +@@ -782,6 +788,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
14405 + = { .len = sizeof(struct can_bittiming_const) },
14406 + };
14407 +
14408 ++static int can_validate(struct nlattr *tb[], struct nlattr *data[])
14409 ++{
14410 ++ bool is_can_fd = false;
14411 ++
14412 ++ /* Make sure that valid CAN FD configurations always consist of
14413 ++ * - nominal/arbitration bittiming
14414 ++ * - data bittiming
14415 ++ * - control mode with CAN_CTRLMODE_FD set
14416 ++ */
14417 ++
14418 ++ if (data[IFLA_CAN_CTRLMODE]) {
14419 ++ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
14420 ++
14421 ++ is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
14422 ++ }
14423 ++
14424 ++ if (is_can_fd) {
14425 ++ if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
14426 ++ return -EOPNOTSUPP;
14427 ++ }
14428 ++
14429 ++ if (data[IFLA_CAN_DATA_BITTIMING]) {
14430 ++ if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
14431 ++ return -EOPNOTSUPP;
14432 ++ }
14433 ++
14434 ++ return 0;
14435 ++}
14436 ++
14437 + static int can_changelink(struct net_device *dev,
14438 + struct nlattr *tb[], struct nlattr *data[])
14439 + {
14440 +@@ -813,19 +848,31 @@ static int can_changelink(struct net_device *dev,
14441 +
14442 + if (data[IFLA_CAN_CTRLMODE]) {
14443 + struct can_ctrlmode *cm;
14444 ++ u32 ctrlstatic;
14445 ++ u32 maskedflags;
14446 +
14447 + /* Do not allow changing controller mode while running */
14448 + if (dev->flags & IFF_UP)
14449 + return -EBUSY;
14450 + cm = nla_data(data[IFLA_CAN_CTRLMODE]);
14451 ++ ctrlstatic = priv->ctrlmode_static;
14452 ++ maskedflags = cm->flags & cm->mask;
14453 ++
14454 ++ /* check whether provided bits are allowed to be passed */
14455 ++ if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
14456 ++ return -EOPNOTSUPP;
14457 ++
14458 ++ /* do not check for static fd-non-iso if 'fd' is disabled */
14459 ++ if (!(maskedflags & CAN_CTRLMODE_FD))
14460 ++ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
14461 +
14462 +- /* check whether changed bits are allowed to be modified */
14463 +- if (cm->mask & ~priv->ctrlmode_supported)
14464 ++ /* make sure static options are provided by configuration */
14465 ++ if ((maskedflags & ctrlstatic) != ctrlstatic)
14466 + return -EOPNOTSUPP;
14467 +
14468 + /* clear bits to be modified and copy the flag values */
14469 + priv->ctrlmode &= ~cm->mask;
14470 +- priv->ctrlmode |= (cm->flags & cm->mask);
14471 ++ priv->ctrlmode |= maskedflags;
14472 +
14473 + /* CAN_CTRLMODE_FD can only be set when driver supports FD */
14474 + if (priv->ctrlmode & CAN_CTRLMODE_FD)
14475 +@@ -966,6 +1013,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
14476 + .maxtype = IFLA_CAN_MAX,
14477 + .policy = can_policy,
14478 + .setup = can_setup,
14479 ++ .validate = can_validate,
14480 + .newlink = can_newlink,
14481 + .changelink = can_changelink,
14482 + .get_size = can_get_size,
14483 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
14484 +index 39cf911f7a1e..195f15edb32e 100644
14485 +--- a/drivers/net/can/m_can/m_can.c
14486 ++++ b/drivers/net/can/m_can/m_can.c
14487 +@@ -955,7 +955,7 @@ static struct net_device *alloc_m_can_dev(void)
14488 + priv->can.do_get_berr_counter = m_can_get_berr_counter;
14489 +
14490 + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
14491 +- priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
14492 ++ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
14493 +
14494 + /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
14495 + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
14496 +diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
14497 +index 8f76f4558a88..2ff465848b65 100644
14498 +--- a/drivers/net/ethernet/atheros/atlx/atl2.c
14499 ++++ b/drivers/net/ethernet/atheros/atlx/atl2.c
14500 +@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14501 +
14502 + err = -EIO;
14503 +
14504 +- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
14505 ++ netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
14506 + netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
14507 +
14508 + /* Init PHY as early as possible due to power saving issue */
14509 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
14510 +index d7e01a74e927..6746fd03cb3a 100644
14511 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
14512 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
14513 +@@ -1197,7 +1197,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
14514 + dev->stats.tx_bytes += tx_cb_ptr->skb->len;
14515 + dma_unmap_single(&dev->dev,
14516 + dma_unmap_addr(tx_cb_ptr, dma_addr),
14517 +- tx_cb_ptr->skb->len,
14518 ++ dma_unmap_len(tx_cb_ptr, dma_len),
14519 + DMA_TO_DEVICE);
14520 + bcmgenet_free_cb(tx_cb_ptr);
14521 + } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
14522 +@@ -1308,7 +1308,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
14523 + }
14524 +
14525 + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
14526 +- dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
14527 ++ dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
14528 + length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
14529 + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
14530 + DMA_TX_APPEND_CRC;
14531 +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
14532 +index 767347b1f631..f50bdbfaae7c 100644
14533 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
14534 ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
14535 +@@ -519,6 +519,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
14536 + nicvf_config_vlan_stripping(nic, nic->netdev->features);
14537 +
14538 + /* Enable Receive queue */
14539 ++ memset(&rq_cfg, 0, sizeof(struct rq_cfg));
14540 + rq_cfg.ena = 1;
14541 + rq_cfg.tcp_ena = 0;
14542 + nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
14543 +@@ -551,6 +552,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
14544 + qidx, (u64)(cq->dmem.phys_base));
14545 +
14546 + /* Enable Completion queue */
14547 ++ memset(&cq_cfg, 0, sizeof(struct cq_cfg));
14548 + cq_cfg.ena = 1;
14549 + cq_cfg.reset = 0;
14550 + cq_cfg.caching = 0;
14551 +@@ -599,6 +601,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
14552 + qidx, (u64)(sq->dmem.phys_base));
14553 +
14554 + /* Enable send queue & set queue size */
14555 ++ memset(&sq_cfg, 0, sizeof(struct sq_cfg));
14556 + sq_cfg.ena = 1;
14557 + sq_cfg.reset = 0;
14558 + sq_cfg.ldwb = 0;
14559 +@@ -635,6 +638,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
14560 +
14561 + /* Enable RBDR & set queue size */
14562 + /* Buffer size should be in multiples of 128 bytes */
14563 ++ memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
14564 + rbdr_cfg.ena = 1;
14565 + rbdr_cfg.reset = 0;
14566 + rbdr_cfg.ldwb = 0;
14567 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
14568 +index 41c81f6ec630..0d6e8c177650 100644
14569 +--- a/drivers/net/ethernet/freescale/fec_main.c
14570 ++++ b/drivers/net/ethernet/freescale/fec_main.c
14571 +@@ -1566,9 +1566,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
14572 + struct fec_enet_private *fep = netdev_priv(ndev);
14573 +
14574 + for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
14575 +- clear_bit(queue_id, &fep->work_rx);
14576 +- pkt_received += fec_enet_rx_queue(ndev,
14577 ++ int ret;
14578 ++
14579 ++ ret = fec_enet_rx_queue(ndev,
14580 + budget - pkt_received, queue_id);
14581 ++
14582 ++ if (ret < budget - pkt_received)
14583 ++ clear_bit(queue_id, &fep->work_rx);
14584 ++
14585 ++ pkt_received += ret;
14586 + }
14587 + return pkt_received;
14588 + }
14589 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
14590 +index b0ae69f84493..2ee05cebea75 100644
14591 +--- a/drivers/net/ethernet/marvell/mvneta.c
14592 ++++ b/drivers/net/ethernet/marvell/mvneta.c
14593 +@@ -3070,17 +3070,17 @@ static int mvneta_stop(struct net_device *dev)
14594 + struct mvneta_port *pp = netdev_priv(dev);
14595 +
14596 + /* Inform that we are stopping so we don't want to setup the
14597 +- * driver for new CPUs in the notifiers
14598 ++ * driver for new CPUs in the notifiers. The code of the
14599 ++ * notifier for CPU online is protected by the same spinlock,
14600 ++ * so when we get the lock, the notifer work is done.
14601 + */
14602 + spin_lock(&pp->lock);
14603 + pp->is_stopped = true;
14604 ++ spin_unlock(&pp->lock);
14605 ++
14606 + mvneta_stop_dev(pp);
14607 + mvneta_mdio_remove(pp);
14608 + unregister_cpu_notifier(&pp->cpu_notifier);
14609 +- /* Now that the notifier are unregistered, we can release le
14610 +- * lock
14611 +- */
14612 +- spin_unlock(&pp->lock);
14613 + on_each_cpu(mvneta_percpu_disable, pp, true);
14614 + free_percpu_irq(dev->irq, pp->ports);
14615 + mvneta_cleanup_rxqs(pp);
14616 +@@ -3612,6 +3612,7 @@ static int mvneta_probe(struct platform_device *pdev)
14617 + dev->ethtool_ops = &mvneta_eth_tool_ops;
14618 +
14619 + pp = netdev_priv(dev);
14620 ++ spin_lock_init(&pp->lock);
14621 + pp->phy_node = phy_node;
14622 + pp->phy_interface = phy_mode;
14623 +
14624 +@@ -3720,7 +3721,7 @@ static int mvneta_probe(struct platform_device *pdev)
14625 + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
14626 + dev->hw_features |= dev->features;
14627 + dev->vlan_features |= dev->features;
14628 +- dev->priv_flags |= IFF_UNICAST_FLT;
14629 ++ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
14630 + dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
14631 +
14632 + err = register_netdev(dev);
14633 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
14634 +index 41440b2b20a3..03ef9aca21e4 100644
14635 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
14636 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
14637 +@@ -704,7 +704,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
14638 +
14639 + if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
14640 + return -1;
14641 +- hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
14642 ++ hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
14643 +
14644 + csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
14645 + sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
14646 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
14647 +index e0946ab22010..0debb611da8b 100644
14648 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
14649 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
14650 +@@ -402,7 +402,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
14651 + u32 packets = 0;
14652 + u32 bytes = 0;
14653 + int factor = priv->cqe_factor;
14654 +- u64 timestamp = 0;
14655 + int done = 0;
14656 + int budget = priv->tx_work_limit;
14657 + u32 last_nr_txbb;
14658 +@@ -442,9 +441,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
14659 + new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
14660 +
14661 + do {
14662 ++ u64 timestamp = 0;
14663 ++
14664 + txbbs_skipped += last_nr_txbb;
14665 + ring_index = (ring_index + last_nr_txbb) & size_mask;
14666 +- if (ring->tx_info[ring_index].ts_requested)
14667 ++
14668 ++ if (unlikely(ring->tx_info[ring_index].ts_requested))
14669 + timestamp = mlx4_en_get_cqe_ts(cqe);
14670 +
14671 + /* free next descriptor */
14672 +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
14673 +index 25ce1b030a00..cd9b2b28df88 100644
14674 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
14675 ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
14676 +@@ -3141,7 +3141,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
14677 + case QP_TRANS_RTS2RTS:
14678 + case QP_TRANS_SQD2SQD:
14679 + case QP_TRANS_SQD2RTS:
14680 +- if (slave != mlx4_master_func_num(dev))
14681 ++ if (slave != mlx4_master_func_num(dev)) {
14682 + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
14683 + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
14684 + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
14685 +@@ -3160,6 +3160,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
14686 + if (qp_ctx->alt_path.mgid_index >= num_gids)
14687 + return -EINVAL;
14688 + }
14689 ++ }
14690 + break;
14691 + default:
14692 + break;
14693 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
14694 +index 402994bf7e16..e293a2ec2775 100644
14695 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
14696 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
14697 +@@ -1389,24 +1389,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
14698 + return 0;
14699 + }
14700 +
14701 +-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
14702 ++static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
14703 + {
14704 +- struct mlx5e_priv *priv = netdev_priv(netdev);
14705 + struct mlx5_core_dev *mdev = priv->mdev;
14706 +- int hw_mtu;
14707 ++ u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
14708 + int err;
14709 +
14710 +- err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
14711 ++ err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
14712 + if (err)
14713 + return err;
14714 +
14715 +- mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
14716 ++ /* Update vport context MTU */
14717 ++ mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
14718 ++ return 0;
14719 ++}
14720 ++
14721 ++static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
14722 ++{
14723 ++ struct mlx5_core_dev *mdev = priv->mdev;
14724 ++ u16 hw_mtu = 0;
14725 ++ int err;
14726 ++
14727 ++ err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
14728 ++ if (err || !hw_mtu) /* fallback to port oper mtu */
14729 ++ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
14730 ++
14731 ++ *mtu = MLX5E_HW2SW_MTU(hw_mtu);
14732 ++}
14733 ++
14734 ++static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
14735 ++{
14736 ++ struct mlx5e_priv *priv = netdev_priv(netdev);
14737 ++ u16 mtu;
14738 ++ int err;
14739 +
14740 +- if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
14741 +- netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
14742 +- __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
14743 ++ err = mlx5e_set_mtu(priv, netdev->mtu);
14744 ++ if (err)
14745 ++ return err;
14746 +
14747 +- netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
14748 ++ mlx5e_query_mtu(priv, &mtu);
14749 ++ if (mtu != netdev->mtu)
14750 ++ netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
14751 ++ __func__, mtu, netdev->mtu);
14752 ++
14753 ++ netdev->mtu = mtu;
14754 + return 0;
14755 + }
14756 +
14757 +@@ -1906,22 +1932,27 @@ static int mlx5e_set_features(struct net_device *netdev,
14758 + return err;
14759 + }
14760 +
14761 ++#define MXL5_HW_MIN_MTU 64
14762 ++#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
14763 ++
14764 + static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
14765 + {
14766 + struct mlx5e_priv *priv = netdev_priv(netdev);
14767 + struct mlx5_core_dev *mdev = priv->mdev;
14768 + bool was_opened;
14769 +- int max_mtu;
14770 ++ u16 max_mtu;
14771 ++ u16 min_mtu;
14772 + int err = 0;
14773 +
14774 + mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
14775 +
14776 + max_mtu = MLX5E_HW2SW_MTU(max_mtu);
14777 ++ min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
14778 +
14779 +- if (new_mtu > max_mtu) {
14780 ++ if (new_mtu > max_mtu || new_mtu < min_mtu) {
14781 + netdev_err(netdev,
14782 +- "%s: Bad MTU (%d) > (%d) Max\n",
14783 +- __func__, new_mtu, max_mtu);
14784 ++ "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
14785 ++ __func__, new_mtu, min_mtu, max_mtu);
14786 + return -EINVAL;
14787 + }
14788 +
14789 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
14790 +index 6f68dba8d7ed..cc901852f1a9 100644
14791 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
14792 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
14793 +@@ -957,33 +957,6 @@ unlock_fg:
14794 + return rule;
14795 + }
14796 +
14797 +-static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
14798 +- u8 match_criteria_enable,
14799 +- u32 *match_criteria,
14800 +- u32 *match_value,
14801 +- u8 action,
14802 +- u32 flow_tag,
14803 +- struct mlx5_flow_destination *dest)
14804 +-{
14805 +- struct mlx5_flow_rule *rule;
14806 +- struct mlx5_flow_group *g;
14807 +-
14808 +- g = create_autogroup(ft, match_criteria_enable, match_criteria);
14809 +- if (IS_ERR(g))
14810 +- return (void *)g;
14811 +-
14812 +- rule = add_rule_fg(g, match_value,
14813 +- action, flow_tag, dest);
14814 +- if (IS_ERR(rule)) {
14815 +- /* Remove assumes refcount > 0 and autogroup creates a group
14816 +- * with a refcount = 0.
14817 +- */
14818 +- tree_get_node(&g->node);
14819 +- tree_remove_node(&g->node);
14820 +- }
14821 +- return rule;
14822 +-}
14823 +-
14824 + struct mlx5_flow_rule *
14825 + mlx5_add_flow_rule(struct mlx5_flow_table *ft,
14826 + u8 match_criteria_enable,
14827 +@@ -1008,8 +981,23 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
14828 + goto unlock;
14829 + }
14830 +
14831 +- rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
14832 +- match_value, action, flow_tag, dest);
14833 ++ g = create_autogroup(ft, match_criteria_enable, match_criteria);
14834 ++ if (IS_ERR(g)) {
14835 ++ rule = (void *)g;
14836 ++ goto unlock;
14837 ++ }
14838 ++
14839 ++ rule = add_rule_fg(g, match_value,
14840 ++ action, flow_tag, dest);
14841 ++ if (IS_ERR(rule)) {
14842 ++ /* Remove assumes refcount > 0 and autogroup creates a group
14843 ++ * with a refcount = 0.
14844 ++ */
14845 ++ unlock_ref_node(&ft->node);
14846 ++ tree_get_node(&g->node);
14847 ++ tree_remove_node(&g->node);
14848 ++ return rule;
14849 ++ }
14850 + unlock:
14851 + unlock_ref_node(&ft->node);
14852 + return rule;
14853 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
14854 +index 1545a944c309..b86fe50d5d93 100644
14855 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
14856 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
14857 +@@ -423,6 +423,10 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
14858 + HCA_CAP_OPMOD_GET_CUR);
14859 + if (err)
14860 + return err;
14861 ++ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
14862 ++ HCA_CAP_OPMOD_GET_MAX);
14863 ++ if (err)
14864 ++ return err;
14865 + } else {
14866 + return 0;
14867 + }
14868 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
14869 +index a87e773e93f3..53a793bc2e3d 100644
14870 +--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
14871 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
14872 +@@ -246,8 +246,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
14873 + }
14874 + EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
14875 +
14876 +-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
14877 +- int *max_mtu, int *oper_mtu, u8 port)
14878 ++static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
14879 ++ u16 *max_mtu, u16 *oper_mtu, u8 port)
14880 + {
14881 + u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
14882 + u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
14883 +@@ -267,7 +267,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
14884 + *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
14885 + }
14886 +
14887 +-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
14888 ++int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
14889 + {
14890 + u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
14891 + u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
14892 +@@ -282,14 +282,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
14893 + }
14894 + EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
14895 +
14896 +-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
14897 ++void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
14898 + u8 port)
14899 + {
14900 + mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
14901 + }
14902 + EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
14903 +
14904 +-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
14905 ++void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
14906 + u8 port)
14907 + {
14908 + mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
14909 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
14910 +index c7398b95aecd..6d5f56e73b5d 100644
14911 +--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
14912 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
14913 +@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
14914 + }
14915 + EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
14916 +
14917 ++int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
14918 ++{
14919 ++ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
14920 ++ u32 *out;
14921 ++ int err;
14922 ++
14923 ++ out = mlx5_vzalloc(outlen);
14924 ++ if (!out)
14925 ++ return -ENOMEM;
14926 ++
14927 ++ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
14928 ++ if (!err)
14929 ++ *mtu = MLX5_GET(query_nic_vport_context_out, out,
14930 ++ nic_vport_context.mtu);
14931 ++
14932 ++ kvfree(out);
14933 ++ return err;
14934 ++}
14935 ++EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
14936 ++
14937 ++int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
14938 ++{
14939 ++ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
14940 ++ void *in;
14941 ++ int err;
14942 ++
14943 ++ in = mlx5_vzalloc(inlen);
14944 ++ if (!in)
14945 ++ return -ENOMEM;
14946 ++
14947 ++ MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
14948 ++ MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
14949 ++
14950 ++ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
14951 ++
14952 ++ kvfree(in);
14953 ++ return err;
14954 ++}
14955 ++EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
14956 ++
14957 + int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
14958 + u32 vport,
14959 + enum mlx5_list_type list_type,
14960 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
14961 +index 3b89ed2f3c76..65a115fc0c96 100644
14962 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
14963 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
14964 +@@ -118,6 +118,8 @@ struct mlxsw_sp {
14965 + #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
14966 + unsigned int interval; /* ms */
14967 + } fdb_notify;
14968 ++#define MLXSW_SP_MIN_AGEING_TIME 10
14969 ++#define MLXSW_SP_MAX_AGEING_TIME 1000000
14970 + #define MLXSW_SP_DEFAULT_AGEING_TIME 300
14971 + u32 ageing_time;
14972 + struct mlxsw_sp_upper master_bridge;
14973 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
14974 +index 7b56098acc58..e1c74efff51a 100644
14975 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
14976 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
14977 +@@ -311,8 +311,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
14978 + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
14979 + u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
14980 +
14981 +- if (switchdev_trans_ph_prepare(trans))
14982 +- return 0;
14983 ++ if (switchdev_trans_ph_prepare(trans)) {
14984 ++ if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
14985 ++ ageing_time > MLXSW_SP_MAX_AGEING_TIME)
14986 ++ return -ERANGE;
14987 ++ else
14988 ++ return 0;
14989 ++ }
14990 +
14991 + return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
14992 + }
14993 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
14994 +index 46bbea8e023c..55007f1e6bbc 100644
14995 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
14996 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
14997 +@@ -566,6 +566,7 @@ struct qlcnic_adapter_stats {
14998 + u64 tx_dma_map_error;
14999 + u64 spurious_intr;
15000 + u64 mac_filter_limit_overrun;
15001 ++ u64 mbx_spurious_intr;
15002 + };
15003 +
15004 + /*
15005 +@@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
15006 + unsigned long status;
15007 + spinlock_t queue_lock; /* Mailbox queue lock */
15008 + spinlock_t aen_lock; /* Mailbox response/AEN lock */
15009 +- atomic_t rsp_status;
15010 ++ u32 rsp_status;
15011 + u32 num_cmds;
15012 + };
15013 +
15014 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
15015 +index 37a731be7d39..f9640d5ce6ba 100644
15016 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
15017 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
15018 +@@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
15019 +
15020 + static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
15021 + {
15022 +- atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
15023 ++ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
15024 + complete(&mbx->completion);
15025 + }
15026 +
15027 +@@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
15028 + if (event & QLCNIC_MBX_ASYNC_EVENT) {
15029 + __qlcnic_83xx_process_aen(adapter);
15030 + } else {
15031 +- if (atomic_read(&mbx->rsp_status) != rsp_status)
15032 ++ if (mbx->rsp_status != rsp_status)
15033 + qlcnic_83xx_notify_mbx_response(mbx);
15034 + }
15035 + out:
15036 +@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
15037 + if (event & QLCNIC_MBX_ASYNC_EVENT) {
15038 + __qlcnic_83xx_process_aen(adapter);
15039 + } else {
15040 +- if (atomic_read(&mbx->rsp_status) != rsp_status)
15041 ++ if (mbx->rsp_status != rsp_status)
15042 + qlcnic_83xx_notify_mbx_response(mbx);
15043 + }
15044 + }
15045 +@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
15046 +
15047 + static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
15048 + {
15049 ++ u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
15050 + struct qlcnic_adapter *adapter = data;
15051 + struct qlcnic_mailbox *mbx;
15052 +- u32 mask, resp, event;
15053 + unsigned long flags;
15054 +
15055 + mbx = adapter->ahw->mailbox;
15056 +@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
15057 + goto out;
15058 +
15059 + event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
15060 +- if (event & QLCNIC_MBX_ASYNC_EVENT)
15061 ++ if (event & QLCNIC_MBX_ASYNC_EVENT) {
15062 + __qlcnic_83xx_process_aen(adapter);
15063 +- else
15064 +- qlcnic_83xx_notify_mbx_response(mbx);
15065 ++ } else {
15066 ++ if (mbx->rsp_status != rsp_status)
15067 ++ qlcnic_83xx_notify_mbx_response(mbx);
15068 ++ else
15069 ++ adapter->stats.mbx_spurious_intr++;
15070 ++ }
15071 +
15072 + out:
15073 + mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
15074 +@@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
15075 + struct qlcnic_adapter *adapter = mbx->adapter;
15076 + const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
15077 + struct device *dev = &adapter->pdev->dev;
15078 +- atomic_t *rsp_status = &mbx->rsp_status;
15079 + struct list_head *head = &mbx->cmd_q;
15080 + struct qlcnic_hardware_context *ahw;
15081 + struct qlcnic_cmd_args *cmd = NULL;
15082 ++ unsigned long flags;
15083 +
15084 + ahw = adapter->ahw;
15085 +
15086 +@@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
15087 + return;
15088 + }
15089 +
15090 +- atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
15091 ++ spin_lock_irqsave(&mbx->aen_lock, flags);
15092 ++ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
15093 ++ spin_unlock_irqrestore(&mbx->aen_lock, flags);
15094 +
15095 + spin_lock(&mbx->queue_lock);
15096 +
15097 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
15098 +index 494e8105adee..0a2318cad34d 100644
15099 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
15100 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
15101 +@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
15102 + QLC_OFF(stats.mac_filter_limit_overrun)},
15103 + {"spurious intr", QLC_SIZEOF(stats.spurious_intr),
15104 + QLC_OFF(stats.spurious_intr)},
15105 +-
15106 ++ {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
15107 ++ QLC_OFF(stats.mbx_spurious_intr)},
15108 + };
15109 +
15110 + static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
15111 +diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
15112 +index 997976426799..b28e73ea2c25 100644
15113 +--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
15114 ++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
15115 +@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
15116 + return;
15117 + }
15118 + skb_reserve(new_skb, NET_IP_ALIGN);
15119 ++
15120 ++ pci_dma_sync_single_for_cpu(qdev->pdev,
15121 ++ dma_unmap_addr(sbq_desc, mapaddr),
15122 ++ dma_unmap_len(sbq_desc, maplen),
15123 ++ PCI_DMA_FROMDEVICE);
15124 ++
15125 + memcpy(skb_put(new_skb, length), skb->data, length);
15126 ++
15127 ++ pci_dma_sync_single_for_device(qdev->pdev,
15128 ++ dma_unmap_addr(sbq_desc, mapaddr),
15129 ++ dma_unmap_len(sbq_desc, maplen),
15130 ++ PCI_DMA_FROMDEVICE);
15131 + skb = new_skb;
15132 +
15133 + /* Frame error, so drop the packet. */
15134 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
15135 +index 738449992876..01f6d5bbd420 100644
15136 +--- a/drivers/net/ethernet/renesas/sh_eth.c
15137 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
15138 +@@ -1136,11 +1136,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
15139 + break;
15140 + sh_eth_set_receive_align(skb);
15141 +
15142 +- /* RX descriptor */
15143 +- rxdesc = &mdp->rx_ring[i];
15144 + /* The size of the buffer is a multiple of 32 bytes. */
15145 + buf_len = ALIGN(mdp->rx_buf_sz, 32);
15146 +- rxdesc->len = cpu_to_le32(buf_len << 16);
15147 + dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
15148 + DMA_FROM_DEVICE);
15149 + if (dma_mapping_error(&ndev->dev, dma_addr)) {
15150 +@@ -1148,6 +1145,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
15151 + break;
15152 + }
15153 + mdp->rx_skbuff[i] = skb;
15154 ++
15155 ++ /* RX descriptor */
15156 ++ rxdesc = &mdp->rx_ring[i];
15157 ++ rxdesc->len = cpu_to_le32(buf_len << 16);
15158 + rxdesc->addr = cpu_to_le32(dma_addr);
15159 + rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
15160 +
15161 +@@ -1163,7 +1164,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
15162 + mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
15163 +
15164 + /* Mark the last entry as wrapping the ring. */
15165 +- rxdesc->status |= cpu_to_le32(RD_RDLE);
15166 ++ if (rxdesc)
15167 ++ rxdesc->status |= cpu_to_le32(RD_RDLE);
15168 +
15169 + memset(mdp->tx_ring, 0, tx_ringsize);
15170 +
15171 +diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
15172 +index 166a7fc87e2f..f39e7198e818 100644
15173 +--- a/drivers/net/ethernet/rocker/rocker.c
15174 ++++ b/drivers/net/ethernet/rocker/rocker.c
15175 +@@ -239,6 +239,7 @@ struct rocker {
15176 + struct {
15177 + u64 id;
15178 + } hw;
15179 ++ unsigned long ageing_time;
15180 + spinlock_t cmd_ring_lock; /* for cmd ring accesses */
15181 + struct rocker_dma_ring_info cmd_ring;
15182 + struct rocker_dma_ring_info event_ring;
15183 +@@ -3704,7 +3705,7 @@ static void rocker_fdb_cleanup(unsigned long data)
15184 + struct rocker_port *rocker_port;
15185 + struct rocker_fdb_tbl_entry *entry;
15186 + struct hlist_node *tmp;
15187 +- unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
15188 ++ unsigned long next_timer = jiffies + rocker->ageing_time;
15189 + unsigned long expires;
15190 + unsigned long lock_flags;
15191 + int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
15192 +@@ -4367,8 +4368,12 @@ static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
15193 + struct switchdev_trans *trans,
15194 + u32 ageing_time)
15195 + {
15196 ++ struct rocker *rocker = rocker_port->rocker;
15197 ++
15198 + if (!switchdev_trans_ph_prepare(trans)) {
15199 + rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
15200 ++ if (rocker_port->ageing_time < rocker->ageing_time)
15201 ++ rocker->ageing_time = rocker_port->ageing_time;
15202 + mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
15203 + }
15204 +
15205 +@@ -5206,10 +5211,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
15206 + goto err_init_tbls;
15207 + }
15208 +
15209 ++ rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
15210 + setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
15211 + (unsigned long) rocker);
15212 + mod_timer(&rocker->fdb_cleanup_timer, jiffies);
15213 +
15214 ++ rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
15215 ++
15216 + err = rocker_probe_ports(rocker);
15217 + if (err) {
15218 + dev_err(&pdev->dev, "failed to probe ports\n");
15219 +diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
15220 +index 696852eb23c3..7a3f990c1935 100644
15221 +--- a/drivers/net/irda/irtty-sir.c
15222 ++++ b/drivers/net/irda/irtty-sir.c
15223 +@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
15224 +
15225 + /* Module stuff handled via irda_ldisc.owner - Jean II */
15226 +
15227 +- /* First make sure we're not already connected. */
15228 +- if (tty->disc_data != NULL) {
15229 +- priv = tty->disc_data;
15230 +- if (priv && priv->magic == IRTTY_MAGIC) {
15231 +- ret = -EEXIST;
15232 +- goto out;
15233 +- }
15234 +- tty->disc_data = NULL; /* ### */
15235 +- }
15236 +-
15237 + /* stop the underlying driver */
15238 + irtty_stop_receiver(tty, TRUE);
15239 + if (tty->ops->stop)
15240 +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
15241 +index d636d051fac8..95394edd1ed5 100644
15242 +--- a/drivers/net/macvtap.c
15243 ++++ b/drivers/net/macvtap.c
15244 +@@ -760,6 +760,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
15245 + macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
15246 + if (copylen > good_linear)
15247 + copylen = good_linear;
15248 ++ else if (copylen < ETH_HLEN)
15249 ++ copylen = ETH_HLEN;
15250 + linear = copylen;
15251 + i = *from;
15252 + iov_iter_advance(&i, copylen);
15253 +@@ -769,10 +771,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
15254 +
15255 + if (!zerocopy) {
15256 + copylen = len;
15257 +- if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
15258 ++ linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
15259 ++ if (linear > good_linear)
15260 + linear = good_linear;
15261 +- else
15262 +- linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
15263 ++ else if (linear < ETH_HLEN)
15264 ++ linear = ETH_HLEN;
15265 + }
15266 +
15267 + skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
15268 +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
15269 +index d61da9ece3ba..aafe237b25ac 100644
15270 +--- a/drivers/net/ppp/ppp_generic.c
15271 ++++ b/drivers/net/ppp/ppp_generic.c
15272 +@@ -575,7 +575,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
15273 +
15274 + static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
15275 + {
15276 +- struct ppp_file *pf = file->private_data;
15277 ++ struct ppp_file *pf;
15278 + struct ppp *ppp;
15279 + int err = -EFAULT, val, val2, i;
15280 + struct ppp_idle idle;
15281 +@@ -585,9 +585,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
15282 + void __user *argp = (void __user *)arg;
15283 + int __user *p = argp;
15284 +
15285 +- if (!pf)
15286 +- return ppp_unattached_ioctl(current->nsproxy->net_ns,
15287 +- pf, file, cmd, arg);
15288 ++ mutex_lock(&ppp_mutex);
15289 ++
15290 ++ pf = file->private_data;
15291 ++ if (!pf) {
15292 ++ err = ppp_unattached_ioctl(current->nsproxy->net_ns,
15293 ++ pf, file, cmd, arg);
15294 ++ goto out;
15295 ++ }
15296 +
15297 + if (cmd == PPPIOCDETACH) {
15298 + /*
15299 +@@ -602,7 +607,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
15300 + * this fd and reopening /dev/ppp.
15301 + */
15302 + err = -EINVAL;
15303 +- mutex_lock(&ppp_mutex);
15304 + if (pf->kind == INTERFACE) {
15305 + ppp = PF_TO_PPP(pf);
15306 + rtnl_lock();
15307 +@@ -616,15 +620,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
15308 + } else
15309 + pr_warn("PPPIOCDETACH file->f_count=%ld\n",
15310 + atomic_long_read(&file->f_count));
15311 +- mutex_unlock(&ppp_mutex);
15312 +- return err;
15313 ++ goto out;
15314 + }
15315 +
15316 + if (pf->kind == CHANNEL) {
15317 + struct channel *pch;
15318 + struct ppp_channel *chan;
15319 +
15320 +- mutex_lock(&ppp_mutex);
15321 + pch = PF_TO_CHANNEL(pf);
15322 +
15323 + switch (cmd) {
15324 +@@ -646,17 +648,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
15325 + err = chan->ops->ioctl(chan, cmd, arg);
15326 + up_read(&pch->chan_sem);
15327 + }
15328 +- mutex_unlock(&ppp_mutex);
15329 +- return err;
15330 ++ goto out;
15331 + }
15332 +
15333 + if (pf->kind != INTERFACE) {
15334 + /* can't happen */
15335 + pr_err("PPP: not interface or channel??\n");
15336 +- return -EINVAL;
15337 ++ err = -EINVAL;
15338 ++ goto out;
15339 + }
15340 +
15341 +- mutex_lock(&ppp_mutex);
15342 + ppp = PF_TO_PPP(pf);
15343 + switch (cmd) {
15344 + case PPPIOCSMRU:
15345 +@@ -831,7 +832,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
15346 + default:
15347 + err = -ENOTTY;
15348 + }
15349 ++
15350 ++out:
15351 + mutex_unlock(&ppp_mutex);
15352 ++
15353 + return err;
15354 + }
15355 +
15356 +@@ -844,7 +848,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
15357 + struct ppp_net *pn;
15358 + int __user *p = (int __user *)arg;
15359 +
15360 +- mutex_lock(&ppp_mutex);
15361 + switch (cmd) {
15362 + case PPPIOCNEWUNIT:
15363 + /* Create a new ppp unit */
15364 +@@ -894,7 +897,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
15365 + default:
15366 + err = -ENOTTY;
15367 + }
15368 +- mutex_unlock(&ppp_mutex);
15369 ++
15370 + return err;
15371 + }
15372 +
15373 +@@ -2304,7 +2307,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
15374 +
15375 + pch->ppp = NULL;
15376 + pch->chan = chan;
15377 +- pch->chan_net = net;
15378 ++ pch->chan_net = get_net(net);
15379 + chan->ppp = pch;
15380 + init_ppp_file(&pch->file, CHANNEL);
15381 + pch->file.hdrlen = chan->hdrlen;
15382 +@@ -2401,6 +2404,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
15383 + spin_lock_bh(&pn->all_channels_lock);
15384 + list_del(&pch->list);
15385 + spin_unlock_bh(&pn->all_channels_lock);
15386 ++ put_net(pch->chan_net);
15387 ++ pch->chan_net = NULL;
15388 +
15389 + pch->file.dead = 1;
15390 + wake_up_interruptible(&pch->file.rwait);
15391 +diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
15392 +index 01f08a7751f7..e7034c55e796 100644
15393 +--- a/drivers/net/rionet.c
15394 ++++ b/drivers/net/rionet.c
15395 +@@ -280,7 +280,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
15396 + struct net_device *ndev = dev_id;
15397 + struct rionet_private *rnet = netdev_priv(ndev);
15398 +
15399 +- spin_lock(&rnet->lock);
15400 ++ spin_lock(&rnet->tx_lock);
15401 +
15402 + if (netif_msg_intr(rnet))
15403 + printk(KERN_INFO
15404 +@@ -299,7 +299,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
15405 + if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
15406 + netif_wake_queue(ndev);
15407 +
15408 +- spin_unlock(&rnet->lock);
15409 ++ spin_unlock(&rnet->tx_lock);
15410 + }
15411 +
15412 + static int rionet_open(struct net_device *ndev)
15413 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
15414 +index 88bb8cc3555b..81ecc2ed8137 100644
15415 +--- a/drivers/net/tun.c
15416 ++++ b/drivers/net/tun.c
15417 +@@ -621,7 +621,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
15418 +
15419 + /* Re-attach the filter to persist device */
15420 + if (!skip_filter && (tun->filter_attached == true)) {
15421 +- err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
15422 ++ err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
15423 ++ lockdep_rtnl_is_held());
15424 + if (!err)
15425 + goto out;
15426 + }
15427 +@@ -1000,7 +1001,6 @@ static void tun_net_init(struct net_device *dev)
15428 + /* Zero header length */
15429 + dev->type = ARPHRD_NONE;
15430 + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
15431 +- dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
15432 + break;
15433 +
15434 + case IFF_TAP:
15435 +@@ -1012,7 +1012,6 @@ static void tun_net_init(struct net_device *dev)
15436 +
15437 + eth_hw_addr_random(dev);
15438 +
15439 +- dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
15440 + break;
15441 + }
15442 + }
15443 +@@ -1466,6 +1465,8 @@ static void tun_setup(struct net_device *dev)
15444 +
15445 + dev->ethtool_ops = &tun_ethtool_ops;
15446 + dev->destructor = tun_free_netdev;
15447 ++ /* We prefer our own queue length */
15448 ++ dev->tx_queue_len = TUN_READQ_SIZE;
15449 + }
15450 +
15451 + /* Trivial set of netlink ops to allow deleting tun or tap
15452 +@@ -1807,7 +1808,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
15453 +
15454 + for (i = 0; i < n; i++) {
15455 + tfile = rtnl_dereference(tun->tfiles[i]);
15456 +- sk_detach_filter(tfile->socket.sk);
15457 ++ __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
15458 + }
15459 +
15460 + tun->filter_attached = false;
15461 +@@ -1820,7 +1821,8 @@ static int tun_attach_filter(struct tun_struct *tun)
15462 +
15463 + for (i = 0; i < tun->numqueues; i++) {
15464 + tfile = rtnl_dereference(tun->tfiles[i]);
15465 +- ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
15466 ++ ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
15467 ++ lockdep_rtnl_is_held());
15468 + if (ret) {
15469 + tun_detach_filter(tun, i);
15470 + return ret;
15471 +diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
15472 +index 0c5c22b84da8..7de5ab589e4e 100644
15473 +--- a/drivers/net/usb/asix_common.c
15474 ++++ b/drivers/net/usb/asix_common.c
15475 +@@ -66,7 +66,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
15476 + * buffer.
15477 + */
15478 + if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
15479 +- offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32);
15480 ++ offset = ((rx->remaining + 1) & 0xfffe);
15481 + rx->header = get_unaligned_le32(skb->data + offset);
15482 + offset = 0;
15483 +
15484 +diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
15485 +index bdd83d95ec0a..96a5028621c8 100644
15486 +--- a/drivers/net/usb/cdc_mbim.c
15487 ++++ b/drivers/net/usb/cdc_mbim.c
15488 +@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
15489 + { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
15490 + .driver_info = (unsigned long)&cdc_mbim_info,
15491 + },
15492 +- /* Huawei E3372 fails unless NDP comes after the IP packets */
15493 +- { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
15494 ++
15495 ++ /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
15496 ++ * (12d1:157d), are known to fail unless the NDP is placed
15497 ++ * after the IP packets. Applying the quirk to all Huawei
15498 ++ * devices is broader than necessary, but harmless.
15499 ++ */
15500 ++ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
15501 + .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
15502 + },
15503 + /* default entry */
15504 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
15505 +index a3a4ccf7cf52..1232a8c608b4 100644
15506 +--- a/drivers/net/usb/qmi_wwan.c
15507 ++++ b/drivers/net/usb/qmi_wwan.c
15508 +@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = {
15509 + {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
15510 + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
15511 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
15512 ++ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
15513 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
15514 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
15515 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
15516 +diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
15517 +index 44541dbc5c28..69b994f3b8c5 100644
15518 +--- a/drivers/net/wan/farsync.c
15519 ++++ b/drivers/net/wan/farsync.c
15520 +@@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
15521 + dev->mem_start = card->phys_mem
15522 + + BUF_OFFSET ( txBuffer[i][0][0]);
15523 + dev->mem_end = card->phys_mem
15524 +- + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
15525 ++ + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
15526 + dev->base_addr = card->pci_conf;
15527 + dev->irq = card->irq;
15528 +
15529 +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
15530 +index 91afa3ae414c..a192d451dab2 100644
15531 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
15532 ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
15533 +@@ -2143,11 +2143,7 @@ EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
15534 + void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
15535 + struct sk_buff *skb)
15536 + {
15537 +- struct ath10k_pktlog_10_4_hdr *hdr =
15538 +- (struct ath10k_pktlog_10_4_hdr *)skb->data;
15539 +-
15540 +- trace_ath10k_htt_pktlog(ar, hdr->payload,
15541 +- sizeof(*hdr) + __le16_to_cpu(hdr->size));
15542 ++ trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
15543 + dev_kfree_skb_any(skb);
15544 + }
15545 + EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
15546 +diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
15547 +index 8f8793004b9f..1b271b99c49e 100644
15548 +--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
15549 ++++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
15550 +@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
15551 + };
15552 + static const int inc[4] = { 0, 100, 0, 0 };
15553 +
15554 ++ memset(&mask_m, 0, sizeof(int8_t) * 123);
15555 ++ memset(&mask_p, 0, sizeof(int8_t) * 123);
15556 ++
15557 + cur_bin = -6000;
15558 + upper = bin + 100;
15559 + lower = bin - 100;
15560 +@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
15561 + int tmp, new;
15562 + int i;
15563 +
15564 +- int8_t mask_m[123];
15565 +- int8_t mask_p[123];
15566 + int cur_bb_spur;
15567 + bool is2GHz = IS_CHAN_2GHZ(chan);
15568 +
15569 +- memset(&mask_m, 0, sizeof(int8_t) * 123);
15570 +- memset(&mask_p, 0, sizeof(int8_t) * 123);
15571 +-
15572 + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
15573 + cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
15574 + if (AR_NO_SPUR == cur_bb_spur)
15575 +diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
15576 +index db6624527d99..53d7445a5d12 100644
15577 +--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
15578 ++++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
15579 +@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
15580 + int i;
15581 + struct chan_centers centers;
15582 +
15583 +- int8_t mask_m[123];
15584 +- int8_t mask_p[123];
15585 + int cur_bb_spur;
15586 + bool is2GHz = IS_CHAN_2GHZ(chan);
15587 +
15588 +- memset(&mask_m, 0, sizeof(int8_t) * 123);
15589 +- memset(&mask_p, 0, sizeof(int8_t) * 123);
15590 +-
15591 + ath9k_hw_get_channel_centers(ah, chan, &centers);
15592 + freq = centers.synth_center;
15593 +
15594 +diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
15595 +index 73fb4232f9f2..a794157a147d 100644
15596 +--- a/drivers/net/wireless/ath/ath9k/eeprom.c
15597 ++++ b/drivers/net/wireless/ath/ath9k/eeprom.c
15598 +@@ -477,10 +477,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
15599 +
15600 + if (match) {
15601 + if (AR_SREV_9287(ah)) {
15602 +- /* FIXME: array overrun? */
15603 + for (i = 0; i < numXpdGains; i++) {
15604 + minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
15605 +- maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
15606 ++ maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
15607 + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
15608 + data_9287[idxL].pwrPdg[i],
15609 + data_9287[idxL].vpdPdg[i],
15610 +@@ -490,7 +489,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
15611 + } else if (eeprom_4k) {
15612 + for (i = 0; i < numXpdGains; i++) {
15613 + minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
15614 +- maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
15615 ++ maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
15616 + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
15617 + data_4k[idxL].pwrPdg[i],
15618 + data_4k[idxL].vpdPdg[i],
15619 +@@ -500,7 +499,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
15620 + } else {
15621 + for (i = 0; i < numXpdGains; i++) {
15622 + minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
15623 +- maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
15624 ++ maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
15625 + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
15626 + data_def[idxL].pwrPdg[i],
15627 + data_def[idxL].vpdPdg[i],
15628 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
15629 +index 2ca783fa50cf..7e269f9aa607 100644
15630 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
15631 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
15632 +@@ -32,7 +32,7 @@
15633 + #define BRCMF_FLOWRING_LOW (BRCMF_FLOWRING_HIGH - 256)
15634 + #define BRCMF_FLOWRING_INVALID_IFIDX 0xff
15635 +
15636 +-#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
15637 ++#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
15638 + #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
15639 +
15640 + static const u8 brcmf_flowring_prio2fifo[] = {
15641 +@@ -68,7 +68,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
15642 + u8 prio, u8 ifidx)
15643 + {
15644 + struct brcmf_flowring_hash *hash;
15645 +- u8 hash_idx;
15646 ++ u16 hash_idx;
15647 + u32 i;
15648 + bool found;
15649 + bool sta;
15650 +@@ -88,6 +88,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
15651 + }
15652 + hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
15653 + BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
15654 ++ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
15655 + found = false;
15656 + hash = flow->hash;
15657 + for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
15658 +@@ -98,6 +99,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
15659 + break;
15660 + }
15661 + hash_idx++;
15662 ++ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
15663 + }
15664 + if (found)
15665 + return hash[hash_idx].flowid;
15666 +@@ -111,7 +113,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
15667 + {
15668 + struct brcmf_flowring_ring *ring;
15669 + struct brcmf_flowring_hash *hash;
15670 +- u8 hash_idx;
15671 ++ u16 hash_idx;
15672 + u32 i;
15673 + bool found;
15674 + u8 fifo;
15675 +@@ -131,6 +133,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
15676 + }
15677 + hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
15678 + BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
15679 ++ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
15680 + found = false;
15681 + hash = flow->hash;
15682 + for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
15683 +@@ -140,6 +143,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
15684 + break;
15685 + }
15686 + hash_idx++;
15687 ++ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
15688 + }
15689 + if (found) {
15690 + for (i = 0; i < flow->nrofrings; i++) {
15691 +@@ -169,7 +173,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
15692 + }
15693 +
15694 +
15695 +-u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
15696 ++u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid)
15697 + {
15698 + struct brcmf_flowring_ring *ring;
15699 +
15700 +@@ -179,7 +183,7 @@ u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
15701 + }
15702 +
15703 +
15704 +-static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
15705 ++static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
15706 + bool blocked)
15707 + {
15708 + struct brcmf_flowring_ring *ring;
15709 +@@ -228,10 +232,10 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
15710 + }
15711 +
15712 +
15713 +-void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
15714 ++void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
15715 + {
15716 + struct brcmf_flowring_ring *ring;
15717 +- u8 hash_idx;
15718 ++ u16 hash_idx;
15719 + struct sk_buff *skb;
15720 +
15721 + ring = flow->rings[flowid];
15722 +@@ -253,7 +257,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
15723 + }
15724 +
15725 +
15726 +-u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
15727 ++u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
15728 + struct sk_buff *skb)
15729 + {
15730 + struct brcmf_flowring_ring *ring;
15731 +@@ -279,7 +283,7 @@ u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
15732 + }
15733 +
15734 +
15735 +-struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
15736 ++struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid)
15737 + {
15738 + struct brcmf_flowring_ring *ring;
15739 + struct sk_buff *skb;
15740 +@@ -300,7 +304,7 @@ struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
15741 + }
15742 +
15743 +
15744 +-void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
15745 ++void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
15746 + struct sk_buff *skb)
15747 + {
15748 + struct brcmf_flowring_ring *ring;
15749 +@@ -311,7 +315,7 @@ void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
15750 + }
15751 +
15752 +
15753 +-u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
15754 ++u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid)
15755 + {
15756 + struct brcmf_flowring_ring *ring;
15757 +
15758 +@@ -326,7 +330,7 @@ u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
15759 + }
15760 +
15761 +
15762 +-void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
15763 ++void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid)
15764 + {
15765 + struct brcmf_flowring_ring *ring;
15766 +
15767 +@@ -340,10 +344,10 @@ void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
15768 + }
15769 +
15770 +
15771 +-u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
15772 ++u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid)
15773 + {
15774 + struct brcmf_flowring_ring *ring;
15775 +- u8 hash_idx;
15776 ++ u16 hash_idx;
15777 +
15778 + ring = flow->rings[flowid];
15779 + hash_idx = ring->hash_id;
15780 +@@ -384,7 +388,7 @@ void brcmf_flowring_detach(struct brcmf_flowring *flow)
15781 + struct brcmf_pub *drvr = bus_if->drvr;
15782 + struct brcmf_flowring_tdls_entry *search;
15783 + struct brcmf_flowring_tdls_entry *remove;
15784 +- u8 flowid;
15785 ++ u16 flowid;
15786 +
15787 + for (flowid = 0; flowid < flow->nrofrings; flowid++) {
15788 + if (flow->rings[flowid])
15789 +@@ -408,7 +412,7 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
15790 + struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
15791 + struct brcmf_pub *drvr = bus_if->drvr;
15792 + u32 i;
15793 +- u8 flowid;
15794 ++ u16 flowid;
15795 +
15796 + if (flow->addr_mode[ifidx] != addr_mode) {
15797 + for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
15798 +@@ -434,7 +438,7 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
15799 + struct brcmf_flowring_tdls_entry *prev;
15800 + struct brcmf_flowring_tdls_entry *search;
15801 + u32 i;
15802 +- u8 flowid;
15803 ++ u16 flowid;
15804 + bool sta;
15805 +
15806 + sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
15807 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
15808 +index 95fd1c9675d1..068e68d94999 100644
15809 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
15810 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
15811 +@@ -16,7 +16,7 @@
15812 + #define BRCMFMAC_FLOWRING_H
15813 +
15814 +
15815 +-#define BRCMF_FLOWRING_HASHSIZE 256
15816 ++#define BRCMF_FLOWRING_HASHSIZE 512 /* has to be 2^x */
15817 + #define BRCMF_FLOWRING_INVALID_ID 0xFFFFFFFF
15818 +
15819 +
15820 +@@ -24,7 +24,7 @@ struct brcmf_flowring_hash {
15821 + u8 mac[ETH_ALEN];
15822 + u8 fifo;
15823 + u8 ifidx;
15824 +- u8 flowid;
15825 ++ u16 flowid;
15826 + };
15827 +
15828 + enum ring_status {
15829 +@@ -61,16 +61,16 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
15830 + u8 prio, u8 ifidx);
15831 + u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
15832 + u8 prio, u8 ifidx);
15833 +-void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
15834 +-void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
15835 +-u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
15836 +-u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
15837 ++void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid);
15838 ++void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid);
15839 ++u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid);
15840 ++u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
15841 + struct sk_buff *skb);
15842 +-struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
15843 +-void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
15844 ++struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid);
15845 ++void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
15846 + struct sk_buff *skb);
15847 +-u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid);
15848 +-u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid);
15849 ++u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid);
15850 ++u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid);
15851 + struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings);
15852 + void brcmf_flowring_detach(struct brcmf_flowring *flow);
15853 + void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
15854 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
15855 +index c2bdb91746cf..922966734a7f 100644
15856 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
15857 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
15858 +@@ -677,7 +677,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
15859 + }
15860 +
15861 +
15862 +-static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
15863 ++static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
15864 + {
15865 + struct brcmf_flowring *flow = msgbuf->flow;
15866 + struct brcmf_commonring *commonring;
15867 +@@ -1310,7 +1310,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
15868 + }
15869 +
15870 +
15871 +-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
15872 ++void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
15873 + {
15874 + struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
15875 + struct msgbuf_tx_flowring_delete_req *delete;
15876 +@@ -1415,6 +1415,13 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
15877 + u32 count;
15878 +
15879 + if_msgbuf = drvr->bus_if->msgbuf;
15880 ++
15881 ++ if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
15882 ++ brcmf_err("driver not configured for this many flowrings %d\n",
15883 ++ if_msgbuf->nrof_flowrings);
15884 ++ if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
15885 ++ }
15886 ++
15887 + msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
15888 + if (!msgbuf)
15889 + goto fail;
15890 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
15891 +index 3d513e407e3d..ee6906a3c3f6 100644
15892 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
15893 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
15894 +@@ -33,7 +33,7 @@
15895 +
15896 +
15897 + int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
15898 +-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
15899 ++void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);
15900 + int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
15901 + void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
15902 + #else
15903 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
15904 +index d70a1716f3e0..1486f33a743e 100644
15905 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
15906 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
15907 +@@ -1143,6 +1143,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
15908 + /* the fw is stopped, the aux sta is dead: clean up driver state */
15909 + iwl_mvm_del_aux_sta(mvm);
15910 +
15911 ++ iwl_free_fw_paging(mvm);
15912 ++
15913 + /*
15914 + * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
15915 + * won't be called in this case).
15916 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
15917 +index e80be9a59520..89ea70deeb84 100644
15918 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
15919 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
15920 +@@ -684,8 +684,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
15921 + for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
15922 + kfree(mvm->nvm_sections[i].data);
15923 +
15924 +- iwl_free_fw_paging(mvm);
15925 +-
15926 + iwl_mvm_tof_clean(mvm);
15927 +
15928 + ieee80211_free_hw(mvm->hw);
15929 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
15930 +index 5a854c609477..1198caac35c8 100644
15931 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
15932 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
15933 +@@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
15934 + */
15935 + val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
15936 + if (val & (BIT(1) | BIT(17))) {
15937 +- IWL_INFO(trans,
15938 +- "can't access the RSA semaphore it is write protected\n");
15939 ++ IWL_DEBUG_INFO(trans,
15940 ++ "can't access the RSA semaphore it is write protected\n");
15941 + return 0;
15942 + }
15943 +
15944 +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
15945 +index ff3ee9dfbbd5..23bae87d4d3d 100644
15946 +--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
15947 ++++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
15948 +@@ -607,11 +607,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
15949 +
15950 + case EVENT_PS_AWAKE:
15951 + mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
15952 +- if (!adapter->pps_uapsd_mode && priv->port_open &&
15953 ++ if (!adapter->pps_uapsd_mode &&
15954 ++ (priv->port_open ||
15955 ++ (priv->bss_mode == NL80211_IFTYPE_ADHOC)) &&
15956 + priv->media_connected && adapter->sleep_period.period) {
15957 +- adapter->pps_uapsd_mode = true;
15958 +- mwifiex_dbg(adapter, EVENT,
15959 +- "event: PPS/UAPSD mode activated\n");
15960 ++ adapter->pps_uapsd_mode = true;
15961 ++ mwifiex_dbg(adapter, EVENT,
15962 ++ "event: PPS/UAPSD mode activated\n");
15963 + }
15964 + adapter->tx_lock_flag = false;
15965 + if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
15966 +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
15967 +index 6a4fc5d183cf..d7db6f23e728 100644
15968 +--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
15969 ++++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
15970 +@@ -314,6 +314,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
15971 + mwifiex_dbg(adapter, ERROR,
15972 + "Attempt to reconnect on csa closed chan(%d)\n",
15973 + bss_desc->channel);
15974 ++ ret = -1;
15975 + goto done;
15976 + }
15977 +
15978 +diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
15979 +index acccd6734e3b..499e5a741c62 100644
15980 +--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
15981 ++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
15982 +@@ -475,7 +475,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
15983 + priv = adapter->priv[i];
15984 + if (!priv)
15985 + continue;
15986 +- if (!priv->port_open)
15987 ++ if (!priv->port_open &&
15988 ++ (priv->bss_mode != NL80211_IFTYPE_ADHOC))
15989 + continue;
15990 + if (adapter->if_ops.is_port_ready &&
15991 + !adapter->if_ops.is_port_ready(priv))
15992 +@@ -1099,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
15993 +
15994 + priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
15995 +
15996 +- if (!priv_tmp->port_open ||
15997 ++ if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
15998 ++ !priv_tmp->port_open) ||
15999 + (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
16000 + continue;
16001 +
16002 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
16003 +index bf9afbf46c1b..4b0bb6b4f6f1 100644
16004 +--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
16005 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
16006 +@@ -1026,6 +1026,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
16007 + { USB_DEVICE(0x0411, 0x01a2) },
16008 + { USB_DEVICE(0x0411, 0x01ee) },
16009 + { USB_DEVICE(0x0411, 0x01a8) },
16010 ++ { USB_DEVICE(0x0411, 0x01fd) },
16011 + /* Corega */
16012 + { USB_DEVICE(0x07aa, 0x002f) },
16013 + { USB_DEVICE(0x07aa, 0x003c) },
16014 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
16015 +index 28f7010e7108..1aca77719521 100644
16016 +--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
16017 ++++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
16018 +@@ -41,7 +41,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
16019 + struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
16020 + struct rtl_phy *rtlphy = &(rtlpriv->phy);
16021 + struct rtl_sta_info *sta_entry = NULL;
16022 +- u8 wireless_mode = 0;
16023 ++ u16 wireless_mode = 0;
16024 +
16025 + /*
16026 + *this rate is no use for true rate, firmware
16027 +@@ -99,7 +99,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
16028 + {
16029 + struct rtl_mac *mac = rtl_mac(rtlpriv);
16030 + struct rtl_sta_info *sta_entry = NULL;
16031 +- u8 wireless_mode = 0;
16032 ++ u16 wireless_mode = 0;
16033 + u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0;
16034 +
16035 + if (sta) {
16036 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
16037 +index bbb789f8990b..5e5719b26774 100644
16038 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
16039 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
16040 +@@ -3855,7 +3855,7 @@ void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw)
16041 + {
16042 + struct rtl_priv *rtlpriv = rtl_priv(hw);
16043 + struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
16044 +- u8 wireless_mode = mac->mode;
16045 ++ u16 wireless_mode = mac->mode;
16046 + u8 sifs_timer, r2t_sifs;
16047 +
16048 + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
16049 +diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
16050 +index 4544752a2ba8..84397b190cc0 100644
16051 +--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
16052 ++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
16053 +@@ -1323,14 +1323,13 @@ struct rtl_tid_data {
16054 +
16055 + struct rtl_sta_info {
16056 + struct list_head list;
16057 +- u8 ratr_index;
16058 +- u8 wireless_mode;
16059 +- u8 mimo_ps;
16060 +- u8 mac_addr[ETH_ALEN];
16061 + struct rtl_tid_data tids[MAX_TID_COUNT];
16062 +-
16063 + /* just used for ap adhoc or mesh*/
16064 + struct rssi_sta rssi_stat;
16065 ++ u16 wireless_mode;
16066 ++ u8 ratr_index;
16067 ++ u8 mimo_ps;
16068 ++ u8 mac_addr[ETH_ALEN];
16069 + } __packed;
16070 +
16071 + struct rtl_priv;
16072 +diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
16073 +index c96405498bf4..4b59f67724de 100644
16074 +--- a/drivers/net/wireless/ti/wlcore/event.c
16075 ++++ b/drivers/net/wireless/ti/wlcore/event.c
16076 +@@ -38,7 +38,7 @@
16077 +
16078 + int wlcore_event_fw_logger(struct wl1271 *wl)
16079 + {
16080 +- u32 ret;
16081 ++ int ret;
16082 + struct fw_logger_information fw_log;
16083 + u8 *buffer;
16084 + u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
16085 +diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
16086 +index 1f44a151d206..d5a099b022e4 100644
16087 +--- a/drivers/nfc/st21nfca/i2c.c
16088 ++++ b/drivers/nfc/st21nfca/i2c.c
16089 +@@ -524,8 +524,10 @@ static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client)
16090 + /* Get EN GPIO from ACPI */
16091 + gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1,
16092 + GPIOD_OUT_LOW);
16093 +- if (!IS_ERR(gpiod_ena))
16094 +- phy->gpio_ena = desc_to_gpio(gpiod_ena);
16095 ++ if (!IS_ERR(gpiod_ena)) {
16096 ++ nfc_err(dev, "Unable to get ENABLE GPIO\n");
16097 ++ return -ENODEV;
16098 ++ }
16099 +
16100 + phy->gpio_ena = desc_to_gpio(gpiod_ena);
16101 +
16102 +diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
16103 +index 588803ad6847..6ccba0d862df 100644
16104 +--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
16105 ++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
16106 +@@ -357,20 +357,6 @@ static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
16107 + return 0;
16108 + }
16109 +
16110 +-static int amd_ntb_peer_db_addr(struct ntb_dev *ntb,
16111 +- phys_addr_t *db_addr,
16112 +- resource_size_t *db_size)
16113 +-{
16114 +- struct amd_ntb_dev *ndev = ntb_ndev(ntb);
16115 +-
16116 +- if (db_addr)
16117 +- *db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET);
16118 +- if (db_size)
16119 +- *db_size = sizeof(u32);
16120 +-
16121 +- return 0;
16122 +-}
16123 +-
16124 + static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
16125 + {
16126 + struct amd_ntb_dev *ndev = ntb_ndev(ntb);
16127 +@@ -415,20 +401,6 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb,
16128 + return 0;
16129 + }
16130 +
16131 +-static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
16132 +- phys_addr_t *spad_addr)
16133 +-{
16134 +- struct amd_ntb_dev *ndev = ntb_ndev(ntb);
16135 +-
16136 +- if (idx < 0 || idx >= ndev->spad_count)
16137 +- return -EINVAL;
16138 +-
16139 +- if (spad_addr)
16140 +- *spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET +
16141 +- ndev->peer_spad + (idx << 2));
16142 +- return 0;
16143 +-}
16144 +-
16145 + static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
16146 + {
16147 + struct amd_ntb_dev *ndev = ntb_ndev(ntb);
16148 +@@ -472,12 +444,10 @@ static const struct ntb_dev_ops amd_ntb_ops = {
16149 + .db_clear = amd_ntb_db_clear,
16150 + .db_set_mask = amd_ntb_db_set_mask,
16151 + .db_clear_mask = amd_ntb_db_clear_mask,
16152 +- .peer_db_addr = amd_ntb_peer_db_addr,
16153 + .peer_db_set = amd_ntb_peer_db_set,
16154 + .spad_count = amd_ntb_spad_count,
16155 + .spad_read = amd_ntb_spad_read,
16156 + .spad_write = amd_ntb_spad_write,
16157 +- .peer_spad_addr = amd_ntb_peer_spad_addr,
16158 + .peer_spad_read = amd_ntb_peer_spad_read,
16159 + .peer_spad_write = amd_ntb_peer_spad_write,
16160 + };
16161 +diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
16162 +index c8a37ba4b4f9..6bdc1e7b7503 100644
16163 +--- a/drivers/ntb/test/ntb_perf.c
16164 ++++ b/drivers/ntb/test/ntb_perf.c
16165 +@@ -178,7 +178,7 @@ static void perf_copy_callback(void *data)
16166 + atomic_dec(&pctx->dma_sync);
16167 + }
16168 +
16169 +-static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
16170 ++static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
16171 + char *src, size_t size)
16172 + {
16173 + struct perf_ctx *perf = pctx->perf;
16174 +@@ -189,7 +189,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
16175 + dma_cookie_t cookie;
16176 + size_t src_off, dst_off;
16177 + struct perf_mw *mw = &perf->mw;
16178 +- u64 vbase, dst_vaddr;
16179 ++ void __iomem *vbase;
16180 ++ void __iomem *dst_vaddr;
16181 + dma_addr_t dst_phys;
16182 + int retries = 0;
16183 +
16184 +@@ -204,14 +205,14 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
16185 + }
16186 +
16187 + device = chan->device;
16188 +- src_off = (size_t)src & ~PAGE_MASK;
16189 +- dst_off = (size_t)dst & ~PAGE_MASK;
16190 ++ src_off = (uintptr_t)src & ~PAGE_MASK;
16191 ++ dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
16192 +
16193 + if (!is_dma_copy_aligned(device, src_off, dst_off, size))
16194 + return -ENODEV;
16195 +
16196 +- vbase = (u64)(u64 *)mw->vbase;
16197 +- dst_vaddr = (u64)(u64 *)dst;
16198 ++ vbase = mw->vbase;
16199 ++ dst_vaddr = dst;
16200 + dst_phys = mw->phys_addr + (dst_vaddr - vbase);
16201 +
16202 + unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
16203 +@@ -261,13 +262,13 @@ err_get_unmap:
16204 + return 0;
16205 + }
16206 +
16207 +-static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src,
16208 ++static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
16209 + u64 buf_size, u64 win_size, u64 total)
16210 + {
16211 + int chunks, total_chunks, i;
16212 + int copied_chunks = 0;
16213 + u64 copied = 0, result;
16214 +- char *tmp = dst;
16215 ++ char __iomem *tmp = dst;
16216 + u64 perf, diff_us;
16217 + ktime_t kstart, kstop, kdiff;
16218 +
16219 +@@ -324,7 +325,7 @@ static int ntb_perf_thread(void *data)
16220 + struct perf_ctx *perf = pctx->perf;
16221 + struct pci_dev *pdev = perf->ntb->pdev;
16222 + struct perf_mw *mw = &perf->mw;
16223 +- char *dst;
16224 ++ char __iomem *dst;
16225 + u64 win_size, buf_size, total;
16226 + void *src;
16227 + int rc, node, i;
16228 +@@ -364,7 +365,7 @@ static int ntb_perf_thread(void *data)
16229 + if (buf_size > MAX_TEST_SIZE)
16230 + buf_size = MAX_TEST_SIZE;
16231 +
16232 +- dst = (char *)mw->vbase;
16233 ++ dst = (char __iomem *)mw->vbase;
16234 +
16235 + atomic_inc(&perf->tsync);
16236 + while (atomic_read(&perf->tsync) != perf->perf_threads)
16237 +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
16238 +index 5d28e9405f32..cdbab06bf74f 100644
16239 +--- a/drivers/nvdimm/bus.c
16240 ++++ b/drivers/nvdimm/bus.c
16241 +@@ -335,7 +335,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
16242 + [ND_CMD_IMPLEMENTED] = { },
16243 + [ND_CMD_SMART] = {
16244 + .out_num = 2,
16245 +- .out_sizes = { 4, 8, },
16246 ++ .out_sizes = { 4, 128, },
16247 + },
16248 + [ND_CMD_SMART_THRESHOLD] = {
16249 + .out_num = 2,
16250 +@@ -513,10 +513,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
16251 +
16252 + /* fail write commands (when read-only) */
16253 + if (read_only)
16254 +- switch (ioctl_cmd) {
16255 +- case ND_IOCTL_VENDOR:
16256 +- case ND_IOCTL_SET_CONFIG_DATA:
16257 +- case ND_IOCTL_ARS_START:
16258 ++ switch (cmd) {
16259 ++ case ND_CMD_VENDOR:
16260 ++ case ND_CMD_SET_CONFIG_DATA:
16261 ++ case ND_CMD_ARS_START:
16262 + dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
16263 + nvdimm ? nvdimm_cmd_name(cmd)
16264 + : nvdimm_bus_cmd_name(cmd));
16265 +diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
16266 +index ae81a2f1da50..f0b56b3aac4d 100644
16267 +--- a/drivers/nvdimm/pfn_devs.c
16268 ++++ b/drivers/nvdimm/pfn_devs.c
16269 +@@ -315,7 +315,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
16270 + } else {
16271 + /* from init we validate */
16272 + if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
16273 +- return -EINVAL;
16274 ++ return -ENODEV;
16275 + }
16276 +
16277 + if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
16278 +diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
16279 +index 8d0b54670184..02c574f8ccb3 100644
16280 +--- a/drivers/nvdimm/pmem.c
16281 ++++ b/drivers/nvdimm/pmem.c
16282 +@@ -66,22 +66,25 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
16283 + unsigned int len, unsigned int off, int rw,
16284 + sector_t sector)
16285 + {
16286 ++ int rc = 0;
16287 + void *mem = kmap_atomic(page);
16288 + phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
16289 + void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
16290 +
16291 + if (rw == READ) {
16292 + if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
16293 +- return -EIO;
16294 +- memcpy_from_pmem(mem + off, pmem_addr, len);
16295 +- flush_dcache_page(page);
16296 ++ rc = -EIO;
16297 ++ else {
16298 ++ memcpy_from_pmem(mem + off, pmem_addr, len);
16299 ++ flush_dcache_page(page);
16300 ++ }
16301 + } else {
16302 + flush_dcache_page(page);
16303 + memcpy_to_pmem(pmem_addr, mem + off, len);
16304 + }
16305 +
16306 + kunmap_atomic(mem);
16307 +- return 0;
16308 ++ return rc;
16309 + }
16310 +
16311 + static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
16312 +@@ -311,9 +314,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
16313 + * implementation will limit the pfns advertised through
16314 + * ->direct_access() to those that are included in the memmap.
16315 + */
16316 +- if (nd_pfn->mode == PFN_MODE_PMEM)
16317 +- offset = ALIGN(SZ_8K + 64 * npfns, nd_pfn->align);
16318 +- else if (nd_pfn->mode == PFN_MODE_RAM)
16319 ++ if (nd_pfn->mode == PFN_MODE_PMEM) {
16320 ++ unsigned long memmap_size;
16321 ++
16322 ++ /*
16323 ++ * vmemmap_populate_hugepages() allocates the memmap array in
16324 ++ * HPAGE_SIZE chunks.
16325 ++ */
16326 ++ memmap_size = ALIGN(64 * npfns, PMD_SIZE);
16327 ++ offset = ALIGN(SZ_8K + memmap_size, nd_pfn->align);
16328 ++ } else if (nd_pfn->mode == PFN_MODE_RAM)
16329 + offset = ALIGN(SZ_8K, nd_pfn->align);
16330 + else
16331 + goto err;
16332 +diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
16333 +index 8ba19bba3156..2bb3c5799ac4 100644
16334 +--- a/drivers/nvmem/mxs-ocotp.c
16335 ++++ b/drivers/nvmem/mxs-ocotp.c
16336 +@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
16337 + if (ret)
16338 + goto close_banks;
16339 +
16340 +- while (val_size) {
16341 ++ while (val_size >= reg_size) {
16342 + if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
16343 + /* fill up non-data register */
16344 + *buf = 0;
16345 +@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
16346 + }
16347 +
16348 + buf++;
16349 +- val_size--;
16350 ++ val_size -= reg_size;
16351 + offset += reg_size;
16352 + }
16353 +
16354 +diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
16355 +index 1a3556a9e9ea..ed01c0172e4a 100644
16356 +--- a/drivers/of/of_reserved_mem.c
16357 ++++ b/drivers/of/of_reserved_mem.c
16358 +@@ -32,11 +32,13 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
16359 + phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
16360 + phys_addr_t *res_base)
16361 + {
16362 ++ phys_addr_t base;
16363 + /*
16364 + * We use __memblock_alloc_base() because memblock_alloc_base()
16365 + * panic()s on allocation failure.
16366 + */
16367 +- phys_addr_t base = __memblock_alloc_base(size, align, end);
16368 ++ end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
16369 ++ base = __memblock_alloc_base(size, align, end);
16370 + if (!base)
16371 + return -ENOMEM;
16372 +
16373 +diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
16374 +index fe600964fa50..88ccfeaa49c7 100644
16375 +--- a/drivers/pci/host/pci-imx6.c
16376 ++++ b/drivers/pci/host/pci-imx6.c
16377 +@@ -32,7 +32,7 @@
16378 + #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
16379 +
16380 + struct imx6_pcie {
16381 +- struct gpio_desc *reset_gpio;
16382 ++ int reset_gpio;
16383 + struct clk *pcie_bus;
16384 + struct clk *pcie_phy;
16385 + struct clk *pcie;
16386 +@@ -287,10 +287,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
16387 + usleep_range(200, 500);
16388 +
16389 + /* Some boards don't have PCIe reset GPIO. */
16390 +- if (imx6_pcie->reset_gpio) {
16391 +- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
16392 ++ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
16393 ++ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
16394 + msleep(100);
16395 +- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
16396 ++ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
16397 + }
16398 + return 0;
16399 +
16400 +@@ -561,6 +561,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
16401 + {
16402 + struct imx6_pcie *imx6_pcie;
16403 + struct pcie_port *pp;
16404 ++ struct device_node *np = pdev->dev.of_node;
16405 + struct resource *dbi_base;
16406 + int ret;
16407 +
16408 +@@ -581,8 +582,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
16409 + return PTR_ERR(pp->dbi_base);
16410 +
16411 + /* Fetch GPIOs */
16412 +- imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
16413 +- GPIOD_OUT_LOW);
16414 ++ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
16415 ++ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
16416 ++ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
16417 ++ GPIOF_OUT_INIT_LOW, "PCIe reset");
16418 ++ if (ret) {
16419 ++ dev_err(&pdev->dev, "unable to get reset gpio\n");
16420 ++ return ret;
16421 ++ }
16422 ++ }
16423 +
16424 + /* Fetch clocks */
16425 + imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
16426 +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
16427 +index 6d7ab9bb0d5a..6b0056e9c33e 100644
16428 +--- a/drivers/pci/probe.c
16429 ++++ b/drivers/pci/probe.c
16430 +@@ -179,6 +179,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
16431 + u16 orig_cmd;
16432 + struct pci_bus_region region, inverted_region;
16433 +
16434 ++ if (dev->non_compliant_bars)
16435 ++ return 0;
16436 ++
16437 + mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
16438 +
16439 + /* No printks while decoding is disabled! */
16440 +@@ -1171,6 +1174,7 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
16441 + int pci_setup_device(struct pci_dev *dev)
16442 + {
16443 + u32 class;
16444 ++ u16 cmd;
16445 + u8 hdr_type;
16446 + int pos = 0;
16447 + struct pci_bus_region region;
16448 +@@ -1214,6 +1218,16 @@ int pci_setup_device(struct pci_dev *dev)
16449 + /* device class may be changed after fixup */
16450 + class = dev->class >> 8;
16451 +
16452 ++ if (dev->non_compliant_bars) {
16453 ++ pci_read_config_word(dev, PCI_COMMAND, &cmd);
16454 ++ if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
16455 ++ dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
16456 ++ cmd &= ~PCI_COMMAND_IO;
16457 ++ cmd &= ~PCI_COMMAND_MEMORY;
16458 ++ pci_write_config_word(dev, PCI_COMMAND, cmd);
16459 ++ }
16460 ++ }
16461 ++
16462 + switch (dev->hdr_type) { /* header type */
16463 + case PCI_HEADER_TYPE_NORMAL: /* standard header */
16464 + if (class == PCI_CLASS_BRIDGE_PCI)
16465 +diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
16466 +index 4c2fa05b4589..944674ee3464 100644
16467 +--- a/drivers/pcmcia/db1xxx_ss.c
16468 ++++ b/drivers/pcmcia/db1xxx_ss.c
16469 +@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
16470 + int stschg_irq; /* card-status-change irq */
16471 + int card_irq; /* card irq */
16472 + int eject_irq; /* db1200/pb1200 have these */
16473 ++ int insert_gpio; /* db1000 carddetect gpio */
16474 +
16475 + #define BOARD_TYPE_DEFAULT 0 /* most boards */
16476 + #define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
16477 +@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
16478 + /* carddetect gpio: low-active */
16479 + static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
16480 + {
16481 +- return !gpio_get_value(irq_to_gpio(sock->insert_irq));
16482 ++ return !gpio_get_value(sock->insert_gpio);
16483 + }
16484 +
16485 + static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
16486 +@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
16487 + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
16488 + sock->card_irq = r ? r->start : 0;
16489 +
16490 +- /* insert: irq which triggers on card insertion/ejection */
16491 ++ /* insert: irq which triggers on card insertion/ejection
16492 ++ * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
16493 ++ */
16494 + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
16495 + sock->insert_irq = r ? r->start : -1;
16496 ++ if (sock->board_type == BOARD_TYPE_DEFAULT) {
16497 ++ sock->insert_gpio = r ? r->start : -1;
16498 ++ sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
16499 ++ }
16500 +
16501 + /* stschg: irq which trigger on card status change (optional) */
16502 + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
16503 +diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
16504 +index 0f5997ceb494..08b1d93da9fe 100644
16505 +--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
16506 ++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
16507 +@@ -779,7 +779,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
16508 + }
16509 + if (num_pulls) {
16510 + err = of_property_read_u32_index(np, "brcm,pull",
16511 +- (num_funcs > 1) ? i : 0, &pull);
16512 ++ (num_pulls > 1) ? i : 0, &pull);
16513 + if (err)
16514 + goto out;
16515 + err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
16516 +diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
16517 +index a5bb93987378..1029aa7889b5 100644
16518 +--- a/drivers/pinctrl/freescale/pinctrl-imx.c
16519 ++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
16520 +@@ -726,19 +726,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
16521 +
16522 + if (of_property_read_bool(dev_np, "fsl,input-sel")) {
16523 + np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
16524 +- if (np) {
16525 +- ipctl->input_sel_base = of_iomap(np, 0);
16526 +- if (IS_ERR(ipctl->input_sel_base)) {
16527 +- of_node_put(np);
16528 +- dev_err(&pdev->dev,
16529 +- "iomuxc input select base address not found\n");
16530 +- return PTR_ERR(ipctl->input_sel_base);
16531 +- }
16532 +- } else {
16533 ++ if (!np) {
16534 + dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
16535 + return -EINVAL;
16536 + }
16537 ++
16538 ++ ipctl->input_sel_base = of_iomap(np, 0);
16539 + of_node_put(np);
16540 ++ if (!ipctl->input_sel_base) {
16541 ++ dev_err(&pdev->dev,
16542 ++ "iomuxc input select base address not found\n");
16543 ++ return -ENOMEM;
16544 ++ }
16545 + }
16546 +
16547 + imx_pinctrl_desc.name = dev_name(&pdev->dev);
16548 +diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
16549 +index e96e86d2e745..3878d23ca7a8 100644
16550 +--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
16551 ++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
16552 +@@ -949,7 +949,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
16553 + struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
16554 + int eint_num, virq, eint_offset;
16555 + unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
16556 +- static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
16557 ++ static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
16558 ++ 128000, 256000};
16559 + const struct mtk_desc_pin *pin;
16560 + struct irq_data *d;
16561 +
16562 +@@ -967,9 +968,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
16563 + if (!mtk_eint_can_en_debounce(pctl, eint_num))
16564 + return -ENOSYS;
16565 +
16566 +- dbnc = ARRAY_SIZE(dbnc_arr);
16567 +- for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
16568 +- if (debounce <= dbnc_arr[i]) {
16569 ++ dbnc = ARRAY_SIZE(debounce_time);
16570 ++ for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
16571 ++ if (debounce <= debounce_time[i]) {
16572 + dbnc = i;
16573 + break;
16574 + }
16575 +diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
16576 +index 352406108fa0..c8969dd49449 100644
16577 +--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
16578 ++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
16579 +@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
16580 + int val;
16581 +
16582 + if (pull)
16583 +- pullidx = data_out ? 1 : 2;
16584 ++ pullidx = data_out ? 2 : 1;
16585 +
16586 + seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
16587 + gpio,
16588 +diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
16589 +index ee69db6ae1c7..e1c0d4e1bb33 100644
16590 +--- a/drivers/pinctrl/pinctrl-at91-pio4.c
16591 ++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
16592 +@@ -722,9 +722,11 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
16593 + break;
16594 + case PIN_CONFIG_BIAS_PULL_UP:
16595 + conf |= ATMEL_PIO_PUEN_MASK;
16596 ++ conf &= (~ATMEL_PIO_PDEN_MASK);
16597 + break;
16598 + case PIN_CONFIG_BIAS_PULL_DOWN:
16599 + conf |= ATMEL_PIO_PDEN_MASK;
16600 ++ conf &= (~ATMEL_PIO_PUEN_MASK);
16601 + break;
16602 + case PIN_CONFIG_DRIVE_OPEN_DRAIN:
16603 + if (arg == 0)
16604 +diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
16605 +index 856f736cb1a6..2673cd9d106e 100644
16606 +--- a/drivers/pinctrl/pinctrl-pistachio.c
16607 ++++ b/drivers/pinctrl/pinctrl-pistachio.c
16608 +@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
16609 + "mfio83",
16610 + };
16611 +
16612 +-static const char * const pistachio_sys_pll_lock_groups[] = {
16613 ++static const char * const pistachio_audio_pll_lock_groups[] = {
16614 + "mfio84",
16615 + };
16616 +
16617 +-static const char * const pistachio_wifi_pll_lock_groups[] = {
16618 ++static const char * const pistachio_rpu_v_pll_lock_groups[] = {
16619 + "mfio85",
16620 + };
16621 +
16622 +-static const char * const pistachio_bt_pll_lock_groups[] = {
16623 ++static const char * const pistachio_rpu_l_pll_lock_groups[] = {
16624 + "mfio86",
16625 + };
16626 +
16627 +-static const char * const pistachio_rpu_v_pll_lock_groups[] = {
16628 ++static const char * const pistachio_sys_pll_lock_groups[] = {
16629 + "mfio87",
16630 + };
16631 +
16632 +-static const char * const pistachio_rpu_l_pll_lock_groups[] = {
16633 ++static const char * const pistachio_wifi_pll_lock_groups[] = {
16634 + "mfio88",
16635 + };
16636 +
16637 +-static const char * const pistachio_audio_pll_lock_groups[] = {
16638 ++static const char * const pistachio_bt_pll_lock_groups[] = {
16639 + "mfio89",
16640 + };
16641 +
16642 +@@ -559,12 +559,12 @@ enum pistachio_mux_option {
16643 + PISTACHIO_FUNCTION_DREQ4,
16644 + PISTACHIO_FUNCTION_DREQ5,
16645 + PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
16646 ++ PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
16647 ++ PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
16648 ++ PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
16649 + PISTACHIO_FUNCTION_SYS_PLL_LOCK,
16650 + PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
16651 + PISTACHIO_FUNCTION_BT_PLL_LOCK,
16652 +- PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
16653 +- PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
16654 +- PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
16655 + PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
16656 + PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
16657 + PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
16658 +@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
16659 + FUNCTION(dreq4),
16660 + FUNCTION(dreq5),
16661 + FUNCTION(mips_pll_lock),
16662 ++ FUNCTION(audio_pll_lock),
16663 ++ FUNCTION(rpu_v_pll_lock),
16664 ++ FUNCTION(rpu_l_pll_lock),
16665 + FUNCTION(sys_pll_lock),
16666 + FUNCTION(wifi_pll_lock),
16667 + FUNCTION(bt_pll_lock),
16668 +- FUNCTION(rpu_v_pll_lock),
16669 +- FUNCTION(rpu_l_pll_lock),
16670 +- FUNCTION(audio_pll_lock),
16671 + FUNCTION(debug_raw_cca_ind),
16672 + FUNCTION(debug_ed_sec20_cca_ind),
16673 + FUNCTION(debug_ed_sec40_cca_ind),
16674 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
16675 +index d24e5f1d1525..bd2e657163b8 100644
16676 +--- a/drivers/pinctrl/pinctrl-single.c
16677 ++++ b/drivers/pinctrl/pinctrl-single.c
16678 +@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
16679 +
16680 + /* Parse pins in each row from LSB */
16681 + while (mask) {
16682 +- bit_pos = ffs(mask);
16683 ++ bit_pos = __ffs(mask);
16684 + pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
16685 +- mask_pos = ((pcs->fmask) << (bit_pos - 1));
16686 ++ mask_pos = ((pcs->fmask) << bit_pos);
16687 + val_pos = val & mask_pos;
16688 + submask = mask & mask_pos;
16689 +
16690 +@@ -1844,7 +1844,7 @@ static int pcs_probe(struct platform_device *pdev)
16691 + ret = of_property_read_u32(np, "pinctrl-single,function-mask",
16692 + &pcs->fmask);
16693 + if (!ret) {
16694 +- pcs->fshift = ffs(pcs->fmask) - 1;
16695 ++ pcs->fshift = __ffs(pcs->fmask);
16696 + pcs->fmax = pcs->fmask >> pcs->fshift;
16697 + } else {
16698 + /* If mask property doesn't exist, function mux is invalid. */
16699 +diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
16700 +index 181ea98a63b7..2b0d70217bbd 100644
16701 +--- a/drivers/pinctrl/sh-pfc/core.c
16702 ++++ b/drivers/pinctrl/sh-pfc/core.c
16703 +@@ -545,7 +545,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
16704 + return ret;
16705 + }
16706 +
16707 +- pinctrl_provide_dummies();
16708 ++ /* Enable dummy states for those platforms without pinctrl support */
16709 ++ if (!of_have_populated_dt())
16710 ++ pinctrl_provide_dummies();
16711 +
16712 + ret = sh_pfc_init_ranges(pfc);
16713 + if (ret < 0)
16714 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
16715 +index 00265f0435a7..8b381d69df86 100644
16716 +--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
16717 ++++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
16718 +@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
16719 + .pins = sun8i_a33_pins,
16720 + .npins = ARRAY_SIZE(sun8i_a33_pins),
16721 + .irq_banks = 2,
16722 ++ .irq_bank_base = 1,
16723 + };
16724 +
16725 + static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
16726 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
16727 +index 7a2465f5e71e..884c2b314567 100644
16728 +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
16729 ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
16730 +@@ -578,7 +578,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
16731 + static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
16732 + {
16733 + struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
16734 +- u32 reg = sunxi_irq_cfg_reg(d->hwirq);
16735 ++ u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
16736 + u8 index = sunxi_irq_cfg_offset(d->hwirq);
16737 + unsigned long flags;
16738 + u32 regval;
16739 +@@ -625,7 +625,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
16740 + static void sunxi_pinctrl_irq_ack(struct irq_data *d)
16741 + {
16742 + struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
16743 +- u32 status_reg = sunxi_irq_status_reg(d->hwirq);
16744 ++ u32 status_reg = sunxi_irq_status_reg(d->hwirq,
16745 ++ pctl->desc->irq_bank_base);
16746 + u8 status_idx = sunxi_irq_status_offset(d->hwirq);
16747 +
16748 + /* Clear the IRQ */
16749 +@@ -635,7 +636,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
16750 + static void sunxi_pinctrl_irq_mask(struct irq_data *d)
16751 + {
16752 + struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
16753 +- u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
16754 ++ u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
16755 + u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
16756 + unsigned long flags;
16757 + u32 val;
16758 +@@ -652,7 +653,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
16759 + static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
16760 + {
16761 + struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
16762 +- u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
16763 ++ u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
16764 + u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
16765 + unsigned long flags;
16766 + u32 val;
16767 +@@ -744,7 +745,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
16768 + if (bank == pctl->desc->irq_banks)
16769 + return;
16770 +
16771 +- reg = sunxi_irq_status_reg_from_bank(bank);
16772 ++ reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
16773 + val = readl(pctl->membase + reg);
16774 +
16775 + if (val) {
16776 +@@ -1023,9 +1024,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
16777 +
16778 + for (i = 0; i < pctl->desc->irq_banks; i++) {
16779 + /* Mask and clear all IRQs before registering a handler */
16780 +- writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
16781 ++ writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
16782 ++ pctl->desc->irq_bank_base));
16783 + writel(0xffffffff,
16784 +- pctl->membase + sunxi_irq_status_reg_from_bank(i));
16785 ++ pctl->membase + sunxi_irq_status_reg_from_bank(i,
16786 ++ pctl->desc->irq_bank_base));
16787 +
16788 + irq_set_chained_handler_and_data(pctl->irq[i],
16789 + sunxi_pinctrl_irq_handler,
16790 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
16791 +index e248e81a0f9e..0afce1ab12d0 100644
16792 +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
16793 ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
16794 +@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
16795 + int npins;
16796 + unsigned pin_base;
16797 + unsigned irq_banks;
16798 ++ unsigned irq_bank_base;
16799 + bool irq_read_needs_mux;
16800 + };
16801 +
16802 +@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
16803 + return pin_num * PULL_PINS_BITS;
16804 + }
16805 +
16806 +-static inline u32 sunxi_irq_cfg_reg(u16 irq)
16807 ++static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
16808 + {
16809 + u8 bank = irq / IRQ_PER_BANK;
16810 + u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
16811 +
16812 +- return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
16813 ++ return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
16814 + }
16815 +
16816 + static inline u32 sunxi_irq_cfg_offset(u16 irq)
16817 +@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
16818 + return irq_num * IRQ_CFG_IRQ_BITS;
16819 + }
16820 +
16821 +-static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
16822 ++static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
16823 + {
16824 +- return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
16825 ++ return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
16826 + }
16827 +
16828 +-static inline u32 sunxi_irq_ctrl_reg(u16 irq)
16829 ++static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
16830 + {
16831 + u8 bank = irq / IRQ_PER_BANK;
16832 +
16833 +- return sunxi_irq_ctrl_reg_from_bank(bank);
16834 ++ return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
16835 + }
16836 +
16837 + static inline u32 sunxi_irq_ctrl_offset(u16 irq)
16838 +@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
16839 + return irq_num * IRQ_CTRL_IRQ_BITS;
16840 + }
16841 +
16842 +-static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
16843 ++static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
16844 + {
16845 +- return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
16846 ++ return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
16847 + }
16848 +
16849 +-static inline u32 sunxi_irq_status_reg(u16 irq)
16850 ++static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
16851 + {
16852 + u8 bank = irq / IRQ_PER_BANK;
16853 +
16854 +- return sunxi_irq_status_reg_from_bank(bank);
16855 ++ return sunxi_irq_status_reg_from_bank(bank, bank_base);
16856 + }
16857 +
16858 + static inline u32 sunxi_irq_status_offset(u16 irq)
16859 +diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
16860 +index cd410e392550..d33e9ad3218f 100644
16861 +--- a/drivers/platform/x86/dell-rbtn.c
16862 ++++ b/drivers/platform/x86/dell-rbtn.c
16863 +@@ -28,6 +28,7 @@ struct rbtn_data {
16864 + enum rbtn_type type;
16865 + struct rfkill *rfkill;
16866 + struct input_dev *input_dev;
16867 ++ bool suspended;
16868 + };
16869 +
16870 +
16871 +@@ -220,9 +221,55 @@ static const struct acpi_device_id rbtn_ids[] = {
16872 + { "", 0 },
16873 + };
16874 +
16875 ++#ifdef CONFIG_PM_SLEEP
16876 ++static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
16877 ++{
16878 ++ struct rbtn_data *rbtn_data = context;
16879 ++
16880 ++ rbtn_data->suspended = false;
16881 ++}
16882 ++
16883 ++static int rbtn_suspend(struct device *dev)
16884 ++{
16885 ++ struct acpi_device *device = to_acpi_device(dev);
16886 ++ struct rbtn_data *rbtn_data = acpi_driver_data(device);
16887 ++
16888 ++ rbtn_data->suspended = true;
16889 ++
16890 ++ return 0;
16891 ++}
16892 ++
16893 ++static int rbtn_resume(struct device *dev)
16894 ++{
16895 ++ struct acpi_device *device = to_acpi_device(dev);
16896 ++ struct rbtn_data *rbtn_data = acpi_driver_data(device);
16897 ++ acpi_status status;
16898 ++
16899 ++ /*
16900 ++ * Upon resume, some BIOSes send an ACPI notification thet triggers
16901 ++ * an unwanted input event. In order to ignore it, we use a flag
16902 ++ * that we set at suspend and clear once we have received the extra
16903 ++ * ACPI notification. Since ACPI notifications are delivered
16904 ++ * asynchronously to drivers, we clear the flag from the workqueue
16905 ++ * used to deliver the notifications. This should be enough
16906 ++ * to have the flag cleared only after we received the extra
16907 ++ * notification, if any.
16908 ++ */
16909 ++ status = acpi_os_execute(OSL_NOTIFY_HANDLER,
16910 ++ rbtn_clear_suspended_flag, rbtn_data);
16911 ++ if (ACPI_FAILURE(status))
16912 ++ rbtn_clear_suspended_flag(rbtn_data);
16913 ++
16914 ++ return 0;
16915 ++}
16916 ++#endif
16917 ++
16918 ++static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
16919 ++
16920 + static struct acpi_driver rbtn_driver = {
16921 + .name = "dell-rbtn",
16922 + .ids = rbtn_ids,
16923 ++ .drv.pm = &rbtn_pm_ops,
16924 + .ops = {
16925 + .add = rbtn_add,
16926 + .remove = rbtn_remove,
16927 +@@ -384,6 +431,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event)
16928 + {
16929 + struct rbtn_data *rbtn_data = device->driver_data;
16930 +
16931 ++ /*
16932 ++ * Some BIOSes send a notification at resume.
16933 ++ * Ignore it to prevent unwanted input events.
16934 ++ */
16935 ++ if (rbtn_data->suspended) {
16936 ++ dev_dbg(&device->dev, "ACPI notification ignored\n");
16937 ++ return;
16938 ++ }
16939 ++
16940 + if (event != 0x80) {
16941 + dev_info(&device->dev, "Received unknown event (0x%x)\n",
16942 + event);
16943 +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
16944 +index d78ee151c9e4..be3bc2f4edd4 100644
16945 +--- a/drivers/platform/x86/ideapad-laptop.c
16946 ++++ b/drivers/platform/x86/ideapad-laptop.c
16947 +@@ -865,6 +865,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
16948 + },
16949 + },
16950 + {
16951 ++ .ident = "Lenovo ideapad Y700-15ISK",
16952 ++ .matches = {
16953 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
16954 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
16955 ++ },
16956 ++ },
16957 ++ {
16958 ++ .ident = "Lenovo ideapad Y700 Touch-15ISK",
16959 ++ .matches = {
16960 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
16961 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
16962 ++ },
16963 ++ },
16964 ++ {
16965 + .ident = "Lenovo ideapad Y700-17ISK",
16966 + .matches = {
16967 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
16968 +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
16969 +index 73833079bac8..d6baea6a7544 100644
16970 +--- a/drivers/platform/x86/toshiba_acpi.c
16971 ++++ b/drivers/platform/x86/toshiba_acpi.c
16972 +@@ -133,7 +133,7 @@ MODULE_LICENSE("GPL");
16973 + /* Field definitions */
16974 + #define HCI_ACCEL_MASK 0x7fff
16975 + #define HCI_HOTKEY_DISABLE 0x0b
16976 +-#define HCI_HOTKEY_ENABLE 0x01
16977 ++#define HCI_HOTKEY_ENABLE 0x09
16978 + #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
16979 + #define HCI_LCD_BRIGHTNESS_BITS 3
16980 + #define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
16981 +diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
16982 +index 423ce087cd9c..5d5adee16886 100644
16983 +--- a/drivers/pwm/pwm-brcmstb.c
16984 ++++ b/drivers/pwm/pwm-brcmstb.c
16985 +@@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
16986 +
16987 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
16988 + p->base = devm_ioremap_resource(&pdev->dev, res);
16989 +- if (!p->base) {
16990 +- ret = -ENOMEM;
16991 ++ if (IS_ERR(p->base)) {
16992 ++ ret = PTR_ERR(p->base);
16993 + goto out_clk;
16994 + }
16995 +
16996 +diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
16997 +index 826634ec0d5c..e0679eb399f6 100644
16998 +--- a/drivers/pwm/pwm-omap-dmtimer.c
16999 ++++ b/drivers/pwm/pwm-omap-dmtimer.c
17000 +@@ -31,6 +31,7 @@
17001 + #include <linux/time.h>
17002 +
17003 + #define DM_TIMER_LOAD_MIN 0xfffffffe
17004 ++#define DM_TIMER_MAX 0xffffffff
17005 +
17006 + struct pwm_omap_dmtimer_chip {
17007 + struct pwm_chip chip;
17008 +@@ -46,13 +47,9 @@ to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
17009 + return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
17010 + }
17011 +
17012 +-static int pwm_omap_dmtimer_calc_value(unsigned long clk_rate, int ns)
17013 ++static u32 pwm_omap_dmtimer_get_clock_cycles(unsigned long clk_rate, int ns)
17014 + {
17015 +- u64 c = (u64)clk_rate * ns;
17016 +-
17017 +- do_div(c, NSEC_PER_SEC);
17018 +-
17019 +- return DM_TIMER_LOAD_MIN - c;
17020 ++ return DIV_ROUND_CLOSEST_ULL((u64)clk_rate * ns, NSEC_PER_SEC);
17021 + }
17022 +
17023 + static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
17024 +@@ -99,7 +96,8 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
17025 + int duty_ns, int period_ns)
17026 + {
17027 + struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
17028 +- int load_value, match_value;
17029 ++ u32 period_cycles, duty_cycles;
17030 ++ u32 load_value, match_value;
17031 + struct clk *fclk;
17032 + unsigned long clk_rate;
17033 + bool timer_active;
17034 +@@ -117,15 +115,13 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
17035 + fclk = omap->pdata->get_fclk(omap->dm_timer);
17036 + if (!fclk) {
17037 + dev_err(chip->dev, "invalid pmtimer fclk\n");
17038 +- mutex_unlock(&omap->mutex);
17039 +- return -EINVAL;
17040 ++ goto err_einval;
17041 + }
17042 +
17043 + clk_rate = clk_get_rate(fclk);
17044 + if (!clk_rate) {
17045 + dev_err(chip->dev, "invalid pmtimer fclk rate\n");
17046 +- mutex_unlock(&omap->mutex);
17047 +- return -EINVAL;
17048 ++ goto err_einval;
17049 + }
17050 +
17051 + dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
17052 +@@ -133,11 +129,45 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
17053 + /*
17054 + * Calculate the appropriate load and match values based on the
17055 + * specified period and duty cycle. The load value determines the
17056 +- * cycle time and the match value determines the duty cycle.
17057 ++ * period time and the match value determines the duty time.
17058 ++ *
17059 ++ * The period lasts for (DM_TIMER_MAX-load_value+1) clock cycles.
17060 ++ * Similarly, the active time lasts (match_value-load_value+1) cycles.
17061 ++ * The non-active time is the remainder: (DM_TIMER_MAX-match_value)
17062 ++ * clock cycles.
17063 ++ *
17064 ++ * NOTE: It is required that: load_value <= match_value < DM_TIMER_MAX
17065 ++ *
17066 ++ * References:
17067 ++ * OMAP4430/60/70 TRM sections 22.2.4.10 and 22.2.4.11
17068 ++ * AM335x Sitara TRM sections 20.1.3.5 and 20.1.3.6
17069 + */
17070 +- load_value = pwm_omap_dmtimer_calc_value(clk_rate, period_ns);
17071 +- match_value = pwm_omap_dmtimer_calc_value(clk_rate,
17072 +- period_ns - duty_ns);
17073 ++ period_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, period_ns);
17074 ++ duty_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, duty_ns);
17075 ++
17076 ++ if (period_cycles < 2) {
17077 ++ dev_info(chip->dev,
17078 ++ "period %d ns too short for clock rate %lu Hz\n",
17079 ++ period_ns, clk_rate);
17080 ++ goto err_einval;
17081 ++ }
17082 ++
17083 ++ if (duty_cycles < 1) {
17084 ++ dev_dbg(chip->dev,
17085 ++ "duty cycle %d ns is too short for clock rate %lu Hz\n",
17086 ++ duty_ns, clk_rate);
17087 ++ dev_dbg(chip->dev, "using minimum of 1 clock cycle\n");
17088 ++ duty_cycles = 1;
17089 ++ } else if (duty_cycles >= period_cycles) {
17090 ++ dev_dbg(chip->dev,
17091 ++ "duty cycle %d ns is too long for period %d ns at clock rate %lu Hz\n",
17092 ++ duty_ns, period_ns, clk_rate);
17093 ++ dev_dbg(chip->dev, "using maximum of 1 clock cycle less than period\n");
17094 ++ duty_cycles = period_cycles - 1;
17095 ++ }
17096 ++
17097 ++ load_value = (DM_TIMER_MAX - period_cycles) + 1;
17098 ++ match_value = load_value + duty_cycles - 1;
17099 +
17100 + /*
17101 + * We MUST stop the associated dual-mode timer before attempting to
17102 +@@ -166,6 +196,11 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
17103 + mutex_unlock(&omap->mutex);
17104 +
17105 + return 0;
17106 ++
17107 ++err_einval:
17108 ++ mutex_unlock(&omap->mutex);
17109 ++
17110 ++ return -EINVAL;
17111 + }
17112 +
17113 + static int pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
17114 +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
17115 +index f2e1a39ce0f3..5cf4a97e0304 100644
17116 +--- a/drivers/regulator/axp20x-regulator.c
17117 ++++ b/drivers/regulator/axp20x-regulator.c
17118 +@@ -221,10 +221,10 @@ static const struct regulator_desc axp22x_regulators[] = {
17119 + AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
17120 + AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
17121 + AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
17122 +- AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
17123 ++ AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
17124 + AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
17125 + AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
17126 +- AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
17127 ++ AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
17128 + AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
17129 + AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
17130 + AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
17131 +diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
17132 +index 3242ffc0cb25..09dce49609c1 100644
17133 +--- a/drivers/regulator/s2mps11.c
17134 ++++ b/drivers/regulator/s2mps11.c
17135 +@@ -306,7 +306,7 @@ static struct regulator_ops s2mps11_buck_ops = {
17136 + .enable_mask = S2MPS11_ENABLE_MASK \
17137 + }
17138 +
17139 +-#define regulator_desc_s2mps11_buck6_10(num, min, step) { \
17140 ++#define regulator_desc_s2mps11_buck67810(num, min, step) { \
17141 + .name = "BUCK"#num, \
17142 + .id = S2MPS11_BUCK##num, \
17143 + .ops = &s2mps11_buck_ops, \
17144 +@@ -322,6 +322,22 @@ static struct regulator_ops s2mps11_buck_ops = {
17145 + .enable_mask = S2MPS11_ENABLE_MASK \
17146 + }
17147 +
17148 ++#define regulator_desc_s2mps11_buck9 { \
17149 ++ .name = "BUCK9", \
17150 ++ .id = S2MPS11_BUCK9, \
17151 ++ .ops = &s2mps11_buck_ops, \
17152 ++ .type = REGULATOR_VOLTAGE, \
17153 ++ .owner = THIS_MODULE, \
17154 ++ .min_uV = MIN_3000_MV, \
17155 ++ .uV_step = STEP_25_MV, \
17156 ++ .n_voltages = S2MPS11_BUCK9_N_VOLTAGES, \
17157 ++ .ramp_delay = S2MPS11_RAMP_DELAY, \
17158 ++ .vsel_reg = S2MPS11_REG_B9CTRL2, \
17159 ++ .vsel_mask = S2MPS11_BUCK9_VSEL_MASK, \
17160 ++ .enable_reg = S2MPS11_REG_B9CTRL1, \
17161 ++ .enable_mask = S2MPS11_ENABLE_MASK \
17162 ++}
17163 ++
17164 + static const struct regulator_desc s2mps11_regulators[] = {
17165 + regulator_desc_s2mps11_ldo(1, STEP_25_MV),
17166 + regulator_desc_s2mps11_ldo(2, STEP_50_MV),
17167 +@@ -366,11 +382,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
17168 + regulator_desc_s2mps11_buck1_4(3),
17169 + regulator_desc_s2mps11_buck1_4(4),
17170 + regulator_desc_s2mps11_buck5,
17171 +- regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
17172 +- regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
17173 +- regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
17174 +- regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
17175 +- regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
17176 ++ regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
17177 ++ regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
17178 ++ regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
17179 ++ regulator_desc_s2mps11_buck9,
17180 ++ regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
17181 + };
17182 +
17183 + static struct regulator_ops s2mps14_reg_ops;
17184 +diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
17185 +index 58f5d3b8e981..27343e1c43ef 100644
17186 +--- a/drivers/regulator/s5m8767.c
17187 ++++ b/drivers/regulator/s5m8767.c
17188 +@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
17189 + }
17190 + }
17191 +
17192 +- if (i < s5m8767->num_regulators)
17193 +- *enable_ctrl =
17194 +- s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
17195 ++ if (i >= s5m8767->num_regulators)
17196 ++ return -EINVAL;
17197 ++
17198 ++ *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
17199 +
17200 + return 0;
17201 + }
17202 +@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
17203 + else
17204 + regulators[id].vsel_mask = 0xff;
17205 +
17206 +- s5m8767_get_register(s5m8767, id, &enable_reg,
17207 ++ ret = s5m8767_get_register(s5m8767, id, &enable_reg,
17208 + &enable_val);
17209 ++ if (ret) {
17210 ++ dev_err(s5m8767->dev, "error reading registers\n");
17211 ++ return ret;
17212 ++ }
17213 + regulators[id].enable_reg = enable_reg;
17214 + regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
17215 + regulators[id].enable_val = enable_val;
17216 +diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
17217 +index 535050fc5e9f..08e0ff8c786a 100644
17218 +--- a/drivers/rtc/rtc-ds1685.c
17219 ++++ b/drivers/rtc/rtc-ds1685.c
17220 +@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
17221 + * Only use this where you are certain another lock will not be held.
17222 + */
17223 + static inline void
17224 +-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
17225 ++ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
17226 + {
17227 +- spin_lock_irqsave(&rtc->lock, flags);
17228 ++ spin_lock_irqsave(&rtc->lock, *flags);
17229 + ds1685_rtc_switch_to_bank1(rtc);
17230 + }
17231 +
17232 +@@ -1300,7 +1300,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
17233 + {
17234 + struct ds1685_priv *rtc = dev_get_drvdata(dev);
17235 + u8 reg = 0, bit = 0, tmp;
17236 +- unsigned long flags = 0;
17237 ++ unsigned long flags;
17238 + long int val = 0;
17239 + const struct ds1685_rtc_ctrl_regs *reg_info =
17240 + ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
17241 +@@ -1321,7 +1321,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
17242 + bit = reg_info->bit;
17243 +
17244 + /* Safe to spinlock during a write. */
17245 +- ds1685_rtc_begin_ctrl_access(rtc, flags);
17246 ++ ds1685_rtc_begin_ctrl_access(rtc, &flags);
17247 + tmp = rtc->read(rtc, reg);
17248 + rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
17249 + ds1685_rtc_end_ctrl_access(rtc, flags);
17250 +diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
17251 +index 097325d96db5..b1b4746a0eab 100644
17252 +--- a/drivers/rtc/rtc-hym8563.c
17253 ++++ b/drivers/rtc/rtc-hym8563.c
17254 +@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
17255 + * it does not seem to carry it over a subsequent write/read.
17256 + * So we'll limit ourself to 100 years, starting at 2000 for now.
17257 + */
17258 +- buf[6] = tm->tm_year - 100;
17259 ++ buf[6] = bin2bcd(tm->tm_year - 100);
17260 +
17261 + /*
17262 + * CTL1 only contains TEST-mode bits apart from stop,
17263 +diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
17264 +index 7184a0eda793..725dccae24e7 100644
17265 +--- a/drivers/rtc/rtc-max77686.c
17266 ++++ b/drivers/rtc/rtc-max77686.c
17267 +@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
17268 +
17269 + info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
17270 + MAX77686_RTCIRQ_RTCA1);
17271 +- if (!info->virq) {
17272 ++ if (info->virq <= 0) {
17273 + ret = -ENXIO;
17274 + goto err_rtc;
17275 + }
17276 +diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
17277 +index bd911bafb809..17341feadad1 100644
17278 +--- a/drivers/rtc/rtc-rx8025.c
17279 ++++ b/drivers/rtc/rtc-rx8025.c
17280 +@@ -65,7 +65,6 @@
17281 +
17282 + static const struct i2c_device_id rx8025_id[] = {
17283 + { "rx8025", 0 },
17284 +- { "rv8803", 1 },
17285 + { }
17286 + };
17287 + MODULE_DEVICE_TABLE(i2c, rx8025_id);
17288 +diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
17289 +index f64c282275b3..e1b86bb01062 100644
17290 +--- a/drivers/rtc/rtc-vr41xx.c
17291 ++++ b/drivers/rtc/rtc-vr41xx.c
17292 +@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
17293 + }
17294 +
17295 + static const struct rtc_class_ops vr41xx_rtc_ops = {
17296 +- .release = vr41xx_rtc_release,
17297 +- .ioctl = vr41xx_rtc_ioctl,
17298 +- .read_time = vr41xx_rtc_read_time,
17299 +- .set_time = vr41xx_rtc_set_time,
17300 +- .read_alarm = vr41xx_rtc_read_alarm,
17301 +- .set_alarm = vr41xx_rtc_set_alarm,
17302 ++ .release = vr41xx_rtc_release,
17303 ++ .ioctl = vr41xx_rtc_ioctl,
17304 ++ .read_time = vr41xx_rtc_read_time,
17305 ++ .set_time = vr41xx_rtc_set_time,
17306 ++ .read_alarm = vr41xx_rtc_read_alarm,
17307 ++ .set_alarm = vr41xx_rtc_set_alarm,
17308 ++ .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
17309 + };
17310 +
17311 + static int rtc_probe(struct platform_device *pdev)
17312 +diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
17313 +index d72867257346..3eff2a69fe08 100644
17314 +--- a/drivers/scsi/NCR5380.c
17315 ++++ b/drivers/scsi/NCR5380.c
17316 +@@ -760,7 +760,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
17317 + struct NCR5380_cmd *ncmd;
17318 + struct scsi_cmnd *cmd;
17319 +
17320 +- if (list_empty(&hostdata->autosense)) {
17321 ++ if (hostdata->sensing || list_empty(&hostdata->autosense)) {
17322 + list_for_each_entry(ncmd, &hostdata->unissued, list) {
17323 + cmd = NCR5380_to_scmd(ncmd);
17324 + dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
17325 +@@ -793,7 +793,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
17326 + struct NCR5380_hostdata *hostdata = shost_priv(instance);
17327 + struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
17328 +
17329 +- if (hostdata->sensing) {
17330 ++ if (hostdata->sensing == cmd) {
17331 + scsi_eh_restore_cmnd(cmd, &hostdata->ses);
17332 + list_add(&ncmd->list, &hostdata->autosense);
17333 + hostdata->sensing = NULL;
17334 +@@ -815,15 +815,17 @@ static void NCR5380_main(struct work_struct *work)
17335 + struct NCR5380_hostdata *hostdata =
17336 + container_of(work, struct NCR5380_hostdata, main_task);
17337 + struct Scsi_Host *instance = hostdata->host;
17338 +- struct scsi_cmnd *cmd;
17339 + int done;
17340 +
17341 + do {
17342 + done = 1;
17343 +
17344 + spin_lock_irq(&hostdata->lock);
17345 +- while (!hostdata->connected &&
17346 +- (cmd = dequeue_next_cmd(instance))) {
17347 ++ while (!hostdata->connected && !hostdata->selecting) {
17348 ++ struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
17349 ++
17350 ++ if (!cmd)
17351 ++ break;
17352 +
17353 + dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
17354 +
17355 +@@ -840,8 +842,7 @@ static void NCR5380_main(struct work_struct *work)
17356 + * entire unit.
17357 + */
17358 +
17359 +- cmd = NCR5380_select(instance, cmd);
17360 +- if (!cmd) {
17361 ++ if (!NCR5380_select(instance, cmd)) {
17362 + dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
17363 + } else {
17364 + dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
17365 +@@ -1056,6 +1057,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
17366 + /* Reselection interrupt */
17367 + goto out;
17368 + }
17369 ++ if (!hostdata->selecting) {
17370 ++ /* Command was aborted */
17371 ++ NCR5380_write(MODE_REG, MR_BASE);
17372 ++ goto out;
17373 ++ }
17374 + if (err < 0) {
17375 + NCR5380_write(MODE_REG, MR_BASE);
17376 + shost_printk(KERN_ERR, instance,
17377 +@@ -1759,9 +1765,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
17378 + unsigned char msgout = NOP;
17379 + int sink = 0;
17380 + int len;
17381 +-#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
17382 + int transfersize;
17383 +-#endif
17384 + unsigned char *data;
17385 + unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
17386 + struct scsi_cmnd *cmd;
17387 +@@ -1798,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
17388 + do_abort(instance);
17389 + cmd->result = DID_ERROR << 16;
17390 + complete_cmd(instance, cmd);
17391 ++ hostdata->connected = NULL;
17392 + return;
17393 + #endif
17394 + case PHASE_DATAIN:
17395 +@@ -1847,20 +1852,23 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
17396 + sink = 1;
17397 + do_abort(instance);
17398 + cmd->result = DID_ERROR << 16;
17399 +- complete_cmd(instance, cmd);
17400 + /* XXX - need to source or sink data here, as appropriate */
17401 + } else
17402 + cmd->SCp.this_residual -= transfersize - len;
17403 + } else
17404 + #endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
17405 + {
17406 +- spin_unlock_irq(&hostdata->lock);
17407 +- NCR5380_transfer_pio(instance, &phase,
17408 +- (int *)&cmd->SCp.this_residual,
17409 ++ /* Break up transfer into 3 ms chunks,
17410 ++ * presuming 6 accesses per handshake.
17411 ++ */
17412 ++ transfersize = min((unsigned long)cmd->SCp.this_residual,
17413 ++ hostdata->accesses_per_ms / 2);
17414 ++ len = transfersize;
17415 ++ NCR5380_transfer_pio(instance, &phase, &len,
17416 + (unsigned char **)&cmd->SCp.ptr);
17417 +- spin_lock_irq(&hostdata->lock);
17418 ++ cmd->SCp.this_residual -= transfersize - len;
17419 + }
17420 +- break;
17421 ++ return;
17422 + case PHASE_MSGIN:
17423 + len = 1;
17424 + data = &tmp;
17425 +@@ -2292,14 +2300,17 @@ static bool list_del_cmd(struct list_head *haystack,
17426 + * [disconnected -> connected ->]...
17427 + * [autosense -> connected ->] done
17428 + *
17429 +- * If cmd is unissued then just remove it.
17430 +- * If cmd is disconnected, try to select the target.
17431 +- * If cmd is connected, try to send an abort message.
17432 +- * If cmd is waiting for autosense, give it a chance to complete but check
17433 +- * that it isn't left connected.
17434 + * If cmd was not found at all then presumably it has already been completed,
17435 + * in which case return SUCCESS to try to avoid further EH measures.
17436 ++ *
17437 + * If the command has not completed yet, we must not fail to find it.
17438 ++ * We have no option but to forget the aborted command (even if it still
17439 ++ * lacks sense data). The mid-layer may re-issue a command that is in error
17440 ++ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
17441 ++ * this driver are such that a command can appear on one queue only.
17442 ++ *
17443 ++ * The lock protects driver data structures, but EH handlers also use it
17444 ++ * to serialize their own execution and prevent their own re-entry.
17445 + */
17446 +
17447 + static int NCR5380_abort(struct scsi_cmnd *cmd)
17448 +@@ -2322,6 +2333,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
17449 + "abort: removed %p from issue queue\n", cmd);
17450 + cmd->result = DID_ABORT << 16;
17451 + cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
17452 ++ goto out;
17453 + }
17454 +
17455 + if (hostdata->selecting == cmd) {
17456 +@@ -2336,58 +2348,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
17457 + if (list_del_cmd(&hostdata->disconnected, cmd)) {
17458 + dsprintk(NDEBUG_ABORT, instance,
17459 + "abort: removed %p from disconnected list\n", cmd);
17460 +- cmd->result = DID_ERROR << 16;
17461 +- if (!hostdata->connected)
17462 +- NCR5380_select(instance, cmd);
17463 +- if (hostdata->connected != cmd) {
17464 +- complete_cmd(instance, cmd);
17465 +- result = FAILED;
17466 +- goto out;
17467 +- }
17468 ++ /* Can't call NCR5380_select() and send ABORT because that
17469 ++ * means releasing the lock. Need a bus reset.
17470 ++ */
17471 ++ set_host_byte(cmd, DID_ERROR);
17472 ++ complete_cmd(instance, cmd);
17473 ++ result = FAILED;
17474 ++ goto out;
17475 + }
17476 +
17477 + if (hostdata->connected == cmd) {
17478 + dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
17479 + hostdata->connected = NULL;
17480 +- if (do_abort(instance)) {
17481 +- set_host_byte(cmd, DID_ERROR);
17482 +- complete_cmd(instance, cmd);
17483 +- result = FAILED;
17484 +- goto out;
17485 +- }
17486 +- set_host_byte(cmd, DID_ABORT);
17487 + #ifdef REAL_DMA
17488 + hostdata->dma_len = 0;
17489 + #endif
17490 +- if (cmd->cmnd[0] == REQUEST_SENSE)
17491 +- complete_cmd(instance, cmd);
17492 +- else {
17493 +- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
17494 +-
17495 +- /* Perform autosense for this command */
17496 +- list_add(&ncmd->list, &hostdata->autosense);
17497 +- }
17498 +- }
17499 +-
17500 +- if (list_find_cmd(&hostdata->autosense, cmd)) {
17501 +- dsprintk(NDEBUG_ABORT, instance,
17502 +- "abort: found %p on sense queue\n", cmd);
17503 +- spin_unlock_irqrestore(&hostdata->lock, flags);
17504 +- queue_work(hostdata->work_q, &hostdata->main_task);
17505 +- msleep(1000);
17506 +- spin_lock_irqsave(&hostdata->lock, flags);
17507 +- if (list_del_cmd(&hostdata->autosense, cmd)) {
17508 +- dsprintk(NDEBUG_ABORT, instance,
17509 +- "abort: removed %p from sense queue\n", cmd);
17510 +- set_host_byte(cmd, DID_ABORT);
17511 +- complete_cmd(instance, cmd);
17512 +- goto out;
17513 +- }
17514 +- }
17515 +-
17516 +- if (hostdata->connected == cmd) {
17517 +- dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
17518 +- hostdata->connected = NULL;
17519 + if (do_abort(instance)) {
17520 + set_host_byte(cmd, DID_ERROR);
17521 + complete_cmd(instance, cmd);
17522 +@@ -2395,9 +2370,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
17523 + goto out;
17524 + }
17525 + set_host_byte(cmd, DID_ABORT);
17526 +-#ifdef REAL_DMA
17527 +- hostdata->dma_len = 0;
17528 +-#endif
17529 ++ complete_cmd(instance, cmd);
17530 ++ goto out;
17531 ++ }
17532 ++
17533 ++ if (list_del_cmd(&hostdata->autosense, cmd)) {
17534 ++ dsprintk(NDEBUG_ABORT, instance,
17535 ++ "abort: removed %p from sense queue\n", cmd);
17536 ++ set_host_byte(cmd, DID_ERROR);
17537 + complete_cmd(instance, cmd);
17538 + }
17539 +
17540 +@@ -2450,7 +2430,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
17541 + * commands!
17542 + */
17543 +
17544 +- hostdata->selecting = NULL;
17545 ++ if (list_del_cmd(&hostdata->unissued, cmd)) {
17546 ++ cmd->result = DID_RESET << 16;
17547 ++ cmd->scsi_done(cmd);
17548 ++ }
17549 ++
17550 ++ if (hostdata->selecting) {
17551 ++ hostdata->selecting->result = DID_RESET << 16;
17552 ++ complete_cmd(instance, hostdata->selecting);
17553 ++ hostdata->selecting = NULL;
17554 ++ }
17555 +
17556 + list_for_each_entry(ncmd, &hostdata->disconnected, list) {
17557 + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
17558 +@@ -2458,6 +2447,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
17559 + set_host_byte(cmd, DID_RESET);
17560 + cmd->scsi_done(cmd);
17561 + }
17562 ++ INIT_LIST_HEAD(&hostdata->disconnected);
17563 +
17564 + list_for_each_entry(ncmd, &hostdata->autosense, list) {
17565 + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
17566 +@@ -2465,6 +2455,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
17567 + set_host_byte(cmd, DID_RESET);
17568 + cmd->scsi_done(cmd);
17569 + }
17570 ++ INIT_LIST_HEAD(&hostdata->autosense);
17571 +
17572 + if (hostdata->connected) {
17573 + set_host_byte(hostdata->connected, DID_RESET);
17574 +@@ -2472,12 +2463,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
17575 + hostdata->connected = NULL;
17576 + }
17577 +
17578 +- if (hostdata->sensing) {
17579 +- set_host_byte(hostdata->connected, DID_RESET);
17580 +- complete_cmd(instance, hostdata->sensing);
17581 +- hostdata->sensing = NULL;
17582 +- }
17583 +-
17584 + for (i = 0; i < 8; ++i)
17585 + hostdata->busy[i] = 0;
17586 + #ifdef REAL_DMA
17587 +diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
17588 +index 074878b55a0b..d044f3f273be 100644
17589 +--- a/drivers/scsi/aacraid/aacraid.h
17590 ++++ b/drivers/scsi/aacraid/aacraid.h
17591 +@@ -944,6 +944,7 @@ struct fib {
17592 + */
17593 + struct list_head fiblink;
17594 + void *data;
17595 ++ u32 vector_no;
17596 + struct hw_fib *hw_fib_va; /* Actual shared object */
17597 + dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
17598 + };
17599 +@@ -2113,6 +2114,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
17600 + int aac_acquire_irq(struct aac_dev *dev);
17601 + void aac_free_irq(struct aac_dev *dev);
17602 + const char *aac_driverinfo(struct Scsi_Host *);
17603 ++void aac_fib_vector_assign(struct aac_dev *dev);
17604 + struct fib *aac_fib_alloc(struct aac_dev *dev);
17605 + int aac_fib_setup(struct aac_dev *dev);
17606 + void aac_fib_map_free(struct aac_dev *dev);
17607 +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
17608 +index a1f90fe849c9..4cbf54928640 100644
17609 +--- a/drivers/scsi/aacraid/commsup.c
17610 ++++ b/drivers/scsi/aacraid/commsup.c
17611 +@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
17612 +
17613 + void aac_fib_map_free(struct aac_dev *dev)
17614 + {
17615 +- pci_free_consistent(dev->pdev,
17616 +- dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
17617 +- dev->hw_fib_va, dev->hw_fib_pa);
17618 ++ if (dev->hw_fib_va && dev->max_fib_size) {
17619 ++ pci_free_consistent(dev->pdev,
17620 ++ (dev->max_fib_size *
17621 ++ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
17622 ++ dev->hw_fib_va, dev->hw_fib_pa);
17623 ++ }
17624 + dev->hw_fib_va = NULL;
17625 + dev->hw_fib_pa = 0;
17626 + }
17627 +
17628 ++void aac_fib_vector_assign(struct aac_dev *dev)
17629 ++{
17630 ++ u32 i = 0;
17631 ++ u32 vector = 1;
17632 ++ struct fib *fibptr = NULL;
17633 ++
17634 ++ for (i = 0, fibptr = &dev->fibs[i];
17635 ++ i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
17636 ++ i++, fibptr++) {
17637 ++ if ((dev->max_msix == 1) ||
17638 ++ (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
17639 ++ - dev->vector_cap))) {
17640 ++ fibptr->vector_no = 0;
17641 ++ } else {
17642 ++ fibptr->vector_no = vector;
17643 ++ vector++;
17644 ++ if (vector == dev->max_msix)
17645 ++ vector = 1;
17646 ++ }
17647 ++ }
17648 ++}
17649 ++
17650 + /**
17651 + * aac_fib_setup - setup the fibs
17652 + * @dev: Adapter to set up
17653 +@@ -151,6 +176,12 @@ int aac_fib_setup(struct aac_dev * dev)
17654 + hw_fib_pa = hw_fib_pa +
17655 + dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
17656 + }
17657 ++
17658 ++ /*
17659 ++ *Assign vector numbers to fibs
17660 ++ */
17661 ++ aac_fib_vector_assign(dev);
17662 ++
17663 + /*
17664 + * Add the fib chain to the free list
17665 + */
17666 +diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
17667 +index 76eaa38ffd6e..8a8e84548d64 100644
17668 +--- a/drivers/scsi/aacraid/linit.c
17669 ++++ b/drivers/scsi/aacraid/linit.c
17670 +@@ -1404,8 +1404,18 @@ static int aac_acquire_resources(struct aac_dev *dev)
17671 +
17672 + aac_adapter_enable_int(dev);
17673 +
17674 +- if (!dev->sync_mode)
17675 ++ /*max msix may change after EEH
17676 ++ * Re-assign vectors to fibs
17677 ++ */
17678 ++ aac_fib_vector_assign(dev);
17679 ++
17680 ++ if (!dev->sync_mode) {
17681 ++ /* After EEH recovery or suspend resume, max_msix count
17682 ++ * may change, therfore updating in init as well.
17683 ++ */
17684 + aac_adapter_start(dev);
17685 ++ dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
17686 ++ }
17687 + return 0;
17688 +
17689 + error_iounmap:
17690 +diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
17691 +index 2aa34ea8ceb1..bc0203f3d243 100644
17692 +--- a/drivers/scsi/aacraid/src.c
17693 ++++ b/drivers/scsi/aacraid/src.c
17694 +@@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
17695 + break;
17696 + if (dev->msi_enabled && dev->max_msix > 1)
17697 + atomic_dec(&dev->rrq_outstanding[vector_no]);
17698 +- aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
17699 + dev->host_rrq[index++] = 0;
17700 ++ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
17701 + if (index == (vector_no + 1) * dev->vector_cap)
17702 + index = vector_no * dev->vector_cap;
17703 + dev->host_rrq_idx[vector_no] = index;
17704 +@@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
17705 + #endif
17706 +
17707 + u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
17708 ++ u16 vector_no;
17709 +
17710 + atomic_inc(&q->numpending);
17711 +
17712 + if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
17713 + dev->max_msix > 1) {
17714 +- u_int16_t vector_no, first_choice = 0xffff;
17715 +-
17716 +- vector_no = dev->fibs_pushed_no % dev->max_msix;
17717 +- do {
17718 +- vector_no += 1;
17719 +- if (vector_no == dev->max_msix)
17720 +- vector_no = 1;
17721 +- if (atomic_read(&dev->rrq_outstanding[vector_no]) <
17722 +- dev->vector_cap)
17723 +- break;
17724 +- if (0xffff == first_choice)
17725 +- first_choice = vector_no;
17726 +- else if (vector_no == first_choice)
17727 +- break;
17728 +- } while (1);
17729 +- if (vector_no == first_choice)
17730 +- vector_no = 0;
17731 +- atomic_inc(&dev->rrq_outstanding[vector_no]);
17732 +- if (dev->fibs_pushed_no == 0xffffffff)
17733 +- dev->fibs_pushed_no = 0;
17734 +- else
17735 +- dev->fibs_pushed_no++;
17736 ++ vector_no = fib->vector_no;
17737 + fib->hw_fib_va->header.Handle += (vector_no << 16);
17738 ++ } else {
17739 ++ vector_no = 0;
17740 + }
17741 +
17742 ++ atomic_inc(&dev->rrq_outstanding[vector_no]);
17743 ++
17744 + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
17745 + /* Calculate the amount to the fibsize bits */
17746 + fibsize = (hdr_size + 127) / 128 - 1;
17747 +diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
17748 +index b846a4683562..fc6a83188c1e 100644
17749 +--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
17750 ++++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
17751 +@@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
17752 + case AHC_DEV_Q_TAGGED:
17753 + scsi_change_queue_depth(sdev,
17754 + dev->openings + dev->active);
17755 ++ break;
17756 + default:
17757 + /*
17758 + * We allow the OS to queue 2 untagged transactions to
17759 +diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
17760 +index e65478651ca9..389825ba5d96 100644
17761 +--- a/drivers/scsi/atari_NCR5380.c
17762 ++++ b/drivers/scsi/atari_NCR5380.c
17763 +@@ -862,7 +862,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
17764 + struct NCR5380_cmd *ncmd;
17765 + struct scsi_cmnd *cmd;
17766 +
17767 +- if (list_empty(&hostdata->autosense)) {
17768 ++ if (hostdata->sensing || list_empty(&hostdata->autosense)) {
17769 + list_for_each_entry(ncmd, &hostdata->unissued, list) {
17770 + cmd = NCR5380_to_scmd(ncmd);
17771 + dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
17772 +@@ -901,7 +901,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
17773 + struct NCR5380_hostdata *hostdata = shost_priv(instance);
17774 + struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
17775 +
17776 +- if (hostdata->sensing) {
17777 ++ if (hostdata->sensing == cmd) {
17778 + scsi_eh_restore_cmnd(cmd, &hostdata->ses);
17779 + list_add(&ncmd->list, &hostdata->autosense);
17780 + hostdata->sensing = NULL;
17781 +@@ -923,7 +923,6 @@ static void NCR5380_main(struct work_struct *work)
17782 + struct NCR5380_hostdata *hostdata =
17783 + container_of(work, struct NCR5380_hostdata, main_task);
17784 + struct Scsi_Host *instance = hostdata->host;
17785 +- struct scsi_cmnd *cmd;
17786 + int done;
17787 +
17788 + /*
17789 +@@ -936,8 +935,11 @@ static void NCR5380_main(struct work_struct *work)
17790 + done = 1;
17791 +
17792 + spin_lock_irq(&hostdata->lock);
17793 +- while (!hostdata->connected &&
17794 +- (cmd = dequeue_next_cmd(instance))) {
17795 ++ while (!hostdata->connected && !hostdata->selecting) {
17796 ++ struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
17797 ++
17798 ++ if (!cmd)
17799 ++ break;
17800 +
17801 + dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
17802 +
17803 +@@ -960,8 +962,7 @@ static void NCR5380_main(struct work_struct *work)
17804 + #ifdef SUPPORT_TAGS
17805 + cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE);
17806 + #endif
17807 +- cmd = NCR5380_select(instance, cmd);
17808 +- if (!cmd) {
17809 ++ if (!NCR5380_select(instance, cmd)) {
17810 + dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
17811 + maybe_release_dma_irq(instance);
17812 + } else {
17813 +@@ -1257,6 +1258,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
17814 + /* Reselection interrupt */
17815 + goto out;
17816 + }
17817 ++ if (!hostdata->selecting) {
17818 ++ /* Command was aborted */
17819 ++ NCR5380_write(MODE_REG, MR_BASE);
17820 ++ goto out;
17821 ++ }
17822 + if (err < 0) {
17823 + NCR5380_write(MODE_REG, MR_BASE);
17824 + shost_printk(KERN_ERR, instance,
17825 +@@ -1838,9 +1844,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
17826 + unsigned char msgout = NOP;
17827 + int sink = 0;
17828 + int len;
17829 +-#if defined(REAL_DMA)
17830 + int transfersize;
17831 +-#endif
17832 + unsigned char *data;
17833 + unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
17834 + struct scsi_cmnd *cmd;
17835 +@@ -1909,6 +1913,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
17836 + do_abort(instance);
17837 + cmd->result = DID_ERROR << 16;
17838 + complete_cmd(instance, cmd);
17839 ++ hostdata->connected = NULL;
17840 + return;
17841 + #endif
17842 + case PHASE_DATAIN:
17843 +@@ -1966,7 +1971,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
17844 + sink = 1;
17845 + do_abort(instance);
17846 + cmd->result = DID_ERROR << 16;
17847 +- complete_cmd(instance, cmd);
17848 + /* XXX - need to source or sink data here, as appropriate */
17849 + } else {
17850 + #ifdef REAL_DMA
17851 +@@ -1983,18 +1987,22 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
17852 + } else
17853 + #endif /* defined(REAL_DMA) */
17854 + {
17855 +- spin_unlock_irq(&hostdata->lock);
17856 +- NCR5380_transfer_pio(instance, &phase,
17857 +- (int *)&cmd->SCp.this_residual,
17858 ++ /* Break up transfer into 3 ms chunks,
17859 ++ * presuming 6 accesses per handshake.
17860 ++ */
17861 ++ transfersize = min((unsigned long)cmd->SCp.this_residual,
17862 ++ hostdata->accesses_per_ms / 2);
17863 ++ len = transfersize;
17864 ++ NCR5380_transfer_pio(instance, &phase, &len,
17865 + (unsigned char **)&cmd->SCp.ptr);
17866 +- spin_lock_irq(&hostdata->lock);
17867 ++ cmd->SCp.this_residual -= transfersize - len;
17868 + }
17869 + #if defined(CONFIG_SUN3) && defined(REAL_DMA)
17870 + /* if we had intended to dma that command clear it */
17871 + if (sun3_dma_setup_done == cmd)
17872 + sun3_dma_setup_done = NULL;
17873 + #endif
17874 +- break;
17875 ++ return;
17876 + case PHASE_MSGIN:
17877 + len = 1;
17878 + data = &tmp;
17879 +@@ -2487,14 +2495,17 @@ static bool list_del_cmd(struct list_head *haystack,
17880 + * [disconnected -> connected ->]...
17881 + * [autosense -> connected ->] done
17882 + *
17883 +- * If cmd is unissued then just remove it.
17884 +- * If cmd is disconnected, try to select the target.
17885 +- * If cmd is connected, try to send an abort message.
17886 +- * If cmd is waiting for autosense, give it a chance to complete but check
17887 +- * that it isn't left connected.
17888 + * If cmd was not found at all then presumably it has already been completed,
17889 + * in which case return SUCCESS to try to avoid further EH measures.
17890 ++ *
17891 + * If the command has not completed yet, we must not fail to find it.
17892 ++ * We have no option but to forget the aborted command (even if it still
17893 ++ * lacks sense data). The mid-layer may re-issue a command that is in error
17894 ++ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
17895 ++ * this driver are such that a command can appear on one queue only.
17896 ++ *
17897 ++ * The lock protects driver data structures, but EH handlers also use it
17898 ++ * to serialize their own execution and prevent their own re-entry.
17899 + */
17900 +
17901 + static int NCR5380_abort(struct scsi_cmnd *cmd)
17902 +@@ -2517,6 +2528,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
17903 + "abort: removed %p from issue queue\n", cmd);
17904 + cmd->result = DID_ABORT << 16;
17905 + cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
17906 ++ goto out;
17907 + }
17908 +
17909 + if (hostdata->selecting == cmd) {
17910 +@@ -2531,58 +2543,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
17911 + if (list_del_cmd(&hostdata->disconnected, cmd)) {
17912 + dsprintk(NDEBUG_ABORT, instance,
17913 + "abort: removed %p from disconnected list\n", cmd);
17914 +- cmd->result = DID_ERROR << 16;
17915 +- if (!hostdata->connected)
17916 +- NCR5380_select(instance, cmd);
17917 +- if (hostdata->connected != cmd) {
17918 +- complete_cmd(instance, cmd);
17919 +- result = FAILED;
17920 +- goto out;
17921 +- }
17922 ++ /* Can't call NCR5380_select() and send ABORT because that
17923 ++ * means releasing the lock. Need a bus reset.
17924 ++ */
17925 ++ set_host_byte(cmd, DID_ERROR);
17926 ++ complete_cmd(instance, cmd);
17927 ++ result = FAILED;
17928 ++ goto out;
17929 + }
17930 +
17931 + if (hostdata->connected == cmd) {
17932 + dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
17933 + hostdata->connected = NULL;
17934 +- if (do_abort(instance)) {
17935 +- set_host_byte(cmd, DID_ERROR);
17936 +- complete_cmd(instance, cmd);
17937 +- result = FAILED;
17938 +- goto out;
17939 +- }
17940 +- set_host_byte(cmd, DID_ABORT);
17941 + #ifdef REAL_DMA
17942 + hostdata->dma_len = 0;
17943 + #endif
17944 +- if (cmd->cmnd[0] == REQUEST_SENSE)
17945 +- complete_cmd(instance, cmd);
17946 +- else {
17947 +- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
17948 +-
17949 +- /* Perform autosense for this command */
17950 +- list_add(&ncmd->list, &hostdata->autosense);
17951 +- }
17952 +- }
17953 +-
17954 +- if (list_find_cmd(&hostdata->autosense, cmd)) {
17955 +- dsprintk(NDEBUG_ABORT, instance,
17956 +- "abort: found %p on sense queue\n", cmd);
17957 +- spin_unlock_irqrestore(&hostdata->lock, flags);
17958 +- queue_work(hostdata->work_q, &hostdata->main_task);
17959 +- msleep(1000);
17960 +- spin_lock_irqsave(&hostdata->lock, flags);
17961 +- if (list_del_cmd(&hostdata->autosense, cmd)) {
17962 +- dsprintk(NDEBUG_ABORT, instance,
17963 +- "abort: removed %p from sense queue\n", cmd);
17964 +- set_host_byte(cmd, DID_ABORT);
17965 +- complete_cmd(instance, cmd);
17966 +- goto out;
17967 +- }
17968 +- }
17969 +-
17970 +- if (hostdata->connected == cmd) {
17971 +- dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
17972 +- hostdata->connected = NULL;
17973 + if (do_abort(instance)) {
17974 + set_host_byte(cmd, DID_ERROR);
17975 + complete_cmd(instance, cmd);
17976 +@@ -2590,9 +2565,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
17977 + goto out;
17978 + }
17979 + set_host_byte(cmd, DID_ABORT);
17980 +-#ifdef REAL_DMA
17981 +- hostdata->dma_len = 0;
17982 +-#endif
17983 ++ complete_cmd(instance, cmd);
17984 ++ goto out;
17985 ++ }
17986 ++
17987 ++ if (list_del_cmd(&hostdata->autosense, cmd)) {
17988 ++ dsprintk(NDEBUG_ABORT, instance,
17989 ++ "abort: removed %p from sense queue\n", cmd);
17990 ++ set_host_byte(cmd, DID_ERROR);
17991 + complete_cmd(instance, cmd);
17992 + }
17993 +
17994 +@@ -2646,7 +2626,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
17995 + * commands!
17996 + */
17997 +
17998 +- hostdata->selecting = NULL;
17999 ++ if (list_del_cmd(&hostdata->unissued, cmd)) {
18000 ++ cmd->result = DID_RESET << 16;
18001 ++ cmd->scsi_done(cmd);
18002 ++ }
18003 ++
18004 ++ if (hostdata->selecting) {
18005 ++ hostdata->selecting->result = DID_RESET << 16;
18006 ++ complete_cmd(instance, hostdata->selecting);
18007 ++ hostdata->selecting = NULL;
18008 ++ }
18009 +
18010 + list_for_each_entry(ncmd, &hostdata->disconnected, list) {
18011 + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
18012 +@@ -2654,6 +2643,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
18013 + set_host_byte(cmd, DID_RESET);
18014 + cmd->scsi_done(cmd);
18015 + }
18016 ++ INIT_LIST_HEAD(&hostdata->disconnected);
18017 +
18018 + list_for_each_entry(ncmd, &hostdata->autosense, list) {
18019 + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
18020 +@@ -2661,6 +2651,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
18021 + set_host_byte(cmd, DID_RESET);
18022 + cmd->scsi_done(cmd);
18023 + }
18024 ++ INIT_LIST_HEAD(&hostdata->autosense);
18025 +
18026 + if (hostdata->connected) {
18027 + set_host_byte(hostdata->connected, DID_RESET);
18028 +@@ -2668,12 +2659,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
18029 + hostdata->connected = NULL;
18030 + }
18031 +
18032 +- if (hostdata->sensing) {
18033 +- set_host_byte(hostdata->connected, DID_RESET);
18034 +- complete_cmd(instance, hostdata->sensing);
18035 +- hostdata->sensing = NULL;
18036 +- }
18037 +-
18038 + #ifdef SUPPORT_TAGS
18039 + free_all_tags(hostdata);
18040 + #endif
18041 +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
18042 +index cb9072a841be..069e5c50abd0 100644
18043 +--- a/drivers/scsi/be2iscsi/be_main.c
18044 ++++ b/drivers/scsi/be2iscsi/be_main.c
18045 +@@ -4468,6 +4468,7 @@ put_shost:
18046 + scsi_host_put(phba->shost);
18047 + free_kset:
18048 + iscsi_boot_destroy_kset(phba->boot_kset);
18049 ++ phba->boot_kset = NULL;
18050 + return -ENOMEM;
18051 + }
18052 +
18053 +diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
18054 +index e5647d59224f..0b331c9c0a8f 100644
18055 +--- a/drivers/scsi/device_handler/Kconfig
18056 ++++ b/drivers/scsi/device_handler/Kconfig
18057 +@@ -13,13 +13,13 @@ menuconfig SCSI_DH
18058 +
18059 + config SCSI_DH_RDAC
18060 + tristate "LSI RDAC Device Handler"
18061 +- depends on SCSI_DH
18062 ++ depends on SCSI_DH && SCSI
18063 + help
18064 + If you have a LSI RDAC select y. Otherwise, say N.
18065 +
18066 + config SCSI_DH_HP_SW
18067 + tristate "HP/COMPAQ MSA Device Handler"
18068 +- depends on SCSI_DH
18069 ++ depends on SCSI_DH && SCSI
18070 + help
18071 + If you have a HP/COMPAQ MSA device that requires START_STOP to
18072 + be sent to start it and cannot upgrade the firmware then select y.
18073 +@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
18074 +
18075 + config SCSI_DH_EMC
18076 + tristate "EMC CLARiiON Device Handler"
18077 +- depends on SCSI_DH
18078 ++ depends on SCSI_DH && SCSI
18079 + help
18080 + If you have a EMC CLARiiON select y. Otherwise, say N.
18081 +
18082 + config SCSI_DH_ALUA
18083 + tristate "SPC-3 ALUA Device Handler"
18084 +- depends on SCSI_DH
18085 ++ depends on SCSI_DH && SCSI
18086 + help
18087 + SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
18088 + Access (ALUA).
18089 +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
18090 +index a544366a367e..f57d02c3b6cf 100644
18091 +--- a/drivers/scsi/lpfc/lpfc_init.c
18092 ++++ b/drivers/scsi/lpfc/lpfc_init.c
18093 +@@ -2860,7 +2860,7 @@ lpfc_online(struct lpfc_hba *phba)
18094 + }
18095 +
18096 + vports = lpfc_create_vport_work_array(phba);
18097 +- if (vports != NULL)
18098 ++ if (vports != NULL) {
18099 + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
18100 + struct Scsi_Host *shost;
18101 + shost = lpfc_shost_from_vport(vports[i]);
18102 +@@ -2877,7 +2877,8 @@ lpfc_online(struct lpfc_hba *phba)
18103 + }
18104 + spin_unlock_irq(shost->host_lock);
18105 + }
18106 +- lpfc_destroy_vport_work_array(phba, vports);
18107 ++ }
18108 ++ lpfc_destroy_vport_work_array(phba, vports);
18109 +
18110 + lpfc_unblock_mgmt_io(phba);
18111 + return 0;
18112 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
18113 +index 97a1c1c33b05..00ce3e269a43 100644
18114 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
18115 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
18116 +@@ -6282,12 +6282,13 @@ out:
18117 + }
18118 +
18119 + for (i = 0; i < ioc->sge_count; i++) {
18120 +- if (kbuff_arr[i])
18121 ++ if (kbuff_arr[i]) {
18122 + dma_free_coherent(&instance->pdev->dev,
18123 + le32_to_cpu(kern_sge32[i].length),
18124 + kbuff_arr[i],
18125 + le32_to_cpu(kern_sge32[i].phys_addr));
18126 + kbuff_arr[i] = NULL;
18127 ++ }
18128 + }
18129 +
18130 + megasas_return_cmd(instance, cmd);
18131 +diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
18132 +index 5d0ec42a9317..634254a52301 100644
18133 +--- a/drivers/scsi/qla1280.c
18134 ++++ b/drivers/scsi/qla1280.c
18135 +@@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
18136 + .eh_bus_reset_handler = qla1280_eh_bus_reset,
18137 + .eh_host_reset_handler = qla1280_eh_adapter_reset,
18138 + .bios_param = qla1280_biosparam,
18139 +- .can_queue = 0xfffff,
18140 ++ .can_queue = MAX_OUTSTANDING_COMMANDS,
18141 + .this_id = -1,
18142 + .sg_tablesize = SG_ALL,
18143 + .use_clustering = ENABLE_CLUSTERING,
18144 +diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
18145 +index b1bf42b93fcc..1deb6adc411f 100644
18146 +--- a/drivers/scsi/scsi.c
18147 ++++ b/drivers/scsi/scsi.c
18148 +@@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev)
18149 + int pg83_supported = 0;
18150 + unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
18151 +
18152 +- if (sdev->skip_vpd_pages)
18153 ++ if (!scsi_device_supports_vpd(sdev))
18154 + return;
18155 ++
18156 + retry_pg0:
18157 + vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
18158 + if (!vpd_buf)
18159 +diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
18160 +index c126966130ab..ce79de822e46 100644
18161 +--- a/drivers/scsi/scsi_common.c
18162 ++++ b/drivers/scsi/scsi_common.c
18163 +@@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
18164 + ucp[3] = 0;
18165 + put_unaligned_be64(info, &ucp[4]);
18166 + } else if ((buf[0] & 0x7f) == 0x70) {
18167 +- buf[0] |= 0x80;
18168 +- put_unaligned_be64(info, &buf[3]);
18169 ++ /*
18170 ++ * Only set the 'VALID' bit if we can represent the value
18171 ++ * correctly; otherwise just fill out the lower bytes and
18172 ++ * clear the 'VALID' flag.
18173 ++ */
18174 ++ if (info <= 0xffffffffUL)
18175 ++ buf[0] |= 0x80;
18176 ++ else
18177 ++ buf[0] &= 0x7f;
18178 ++ put_unaligned_be32((u32)info, &buf[3]);
18179 + }
18180 +
18181 + return 0;
18182 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
18183 +index 6a820668d442..b7cc6027cb7b 100644
18184 +--- a/drivers/scsi/scsi_scan.c
18185 ++++ b/drivers/scsi/scsi_scan.c
18186 +@@ -315,6 +315,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
18187 + struct Scsi_Host *shost = dev_to_shost(dev->parent);
18188 + unsigned long flags;
18189 +
18190 ++ BUG_ON(starget->state == STARGET_DEL);
18191 + starget->state = STARGET_DEL;
18192 + transport_destroy_device(dev);
18193 + spin_lock_irqsave(shost->host_lock, flags);
18194 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
18195 +index 00bc7218a7f8..9e5f893aa3ad 100644
18196 +--- a/drivers/scsi/scsi_sysfs.c
18197 ++++ b/drivers/scsi/scsi_sysfs.c
18198 +@@ -1272,18 +1272,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
18199 + void scsi_remove_target(struct device *dev)
18200 + {
18201 + struct Scsi_Host *shost = dev_to_shost(dev->parent);
18202 +- struct scsi_target *starget, *last_target = NULL;
18203 ++ struct scsi_target *starget;
18204 + unsigned long flags;
18205 +
18206 + restart:
18207 + spin_lock_irqsave(shost->host_lock, flags);
18208 + list_for_each_entry(starget, &shost->__targets, siblings) {
18209 + if (starget->state == STARGET_DEL ||
18210 +- starget == last_target)
18211 ++ starget->state == STARGET_REMOVE)
18212 + continue;
18213 + if (starget->dev.parent == dev || &starget->dev == dev) {
18214 + kref_get(&starget->reap_ref);
18215 +- last_target = starget;
18216 ++ starget->state = STARGET_REMOVE;
18217 + spin_unlock_irqrestore(shost->host_lock, flags);
18218 + __scsi_remove_target(starget);
18219 + scsi_target_reap(starget);
18220 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
18221 +index d749da765df1..974ca5b45f8d 100644
18222 +--- a/drivers/scsi/sd.c
18223 ++++ b/drivers/scsi/sd.c
18224 +@@ -648,7 +648,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
18225 + */
18226 + if (sdkp->lbprz) {
18227 + q->limits.discard_alignment = 0;
18228 +- q->limits.discard_granularity = 1;
18229 ++ q->limits.discard_granularity = logical_block_size;
18230 + } else {
18231 + q->limits.discard_alignment = sdkp->unmap_alignment *
18232 + logical_block_size;
18233 +@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
18234 + struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
18235 + struct scsi_device *sdp = sdkp->device;
18236 + struct Scsi_Host *host = sdp->host;
18237 ++ sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
18238 + int diskinfo[4];
18239 +
18240 + /* default to most commonly used values */
18241 +- diskinfo[0] = 0x40; /* 1 << 6 */
18242 +- diskinfo[1] = 0x20; /* 1 << 5 */
18243 +- diskinfo[2] = sdkp->capacity >> 11;
18244 +-
18245 ++ diskinfo[0] = 0x40; /* 1 << 6 */
18246 ++ diskinfo[1] = 0x20; /* 1 << 5 */
18247 ++ diskinfo[2] = capacity >> 11;
18248 ++
18249 + /* override with calculated, extended default, or driver values */
18250 + if (host->hostt->bios_param)
18251 +- host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
18252 ++ host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
18253 + else
18254 +- scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
18255 ++ scsicam_bios_param(bdev, capacity, diskinfo);
18256 +
18257 + geo->heads = diskinfo[0];
18258 + geo->sectors = diskinfo[1];
18259 +@@ -2337,14 +2338,6 @@ got_data:
18260 + if (sdkp->capacity > 0xffffffff)
18261 + sdp->use_16_for_rw = 1;
18262 +
18263 +- /* Rescale capacity to 512-byte units */
18264 +- if (sector_size == 4096)
18265 +- sdkp->capacity <<= 3;
18266 +- else if (sector_size == 2048)
18267 +- sdkp->capacity <<= 2;
18268 +- else if (sector_size == 1024)
18269 +- sdkp->capacity <<= 1;
18270 +-
18271 + blk_queue_physical_block_size(sdp->request_queue,
18272 + sdkp->physical_block_size);
18273 + sdkp->device->sector_size = sector_size;
18274 +@@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
18275 + sdkp->ws10 = 1;
18276 + }
18277 +
18278 +-static int sd_try_extended_inquiry(struct scsi_device *sdp)
18279 +-{
18280 +- /* Attempt VPD inquiry if the device blacklist explicitly calls
18281 +- * for it.
18282 +- */
18283 +- if (sdp->try_vpd_pages)
18284 +- return 1;
18285 +- /*
18286 +- * Although VPD inquiries can go to SCSI-2 type devices,
18287 +- * some USB ones crash on receiving them, and the pages
18288 +- * we currently ask for are for SPC-3 and beyond
18289 +- */
18290 +- if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
18291 +- return 1;
18292 +- return 0;
18293 +-}
18294 +-
18295 +-static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
18296 +-{
18297 +- return blocks << (ilog2(sdev->sector_size) - 9);
18298 +-}
18299 +-
18300 + /**
18301 + * sd_revalidate_disk - called the first time a new disk is seen,
18302 + * performs disk spin up, read_capacity, etc.
18303 +@@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
18304 + if (sdkp->media_present) {
18305 + sd_read_capacity(sdkp, buffer);
18306 +
18307 +- if (sd_try_extended_inquiry(sdp)) {
18308 ++ if (scsi_device_supports_vpd(sdp)) {
18309 + sd_read_block_provisioning(sdkp);
18310 + sd_read_block_limits(sdkp);
18311 + sd_read_block_characteristics(sdkp);
18312 +@@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
18313 + /* Combine with controller limits */
18314 + q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
18315 +
18316 +- set_capacity(disk, sdkp->capacity);
18317 ++ set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
18318 + sd_config_write_same(sdkp);
18319 + kfree(buffer);
18320 +
18321 +diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
18322 +index 5f2a84aff29f..654630bb7d0e 100644
18323 +--- a/drivers/scsi/sd.h
18324 ++++ b/drivers/scsi/sd.h
18325 +@@ -65,7 +65,7 @@ struct scsi_disk {
18326 + struct device dev;
18327 + struct gendisk *disk;
18328 + atomic_t openers;
18329 +- sector_t capacity; /* size in 512-byte sectors */
18330 ++ sector_t capacity; /* size in logical blocks */
18331 + u32 max_xfer_blocks;
18332 + u32 opt_xfer_blocks;
18333 + u32 max_ws_blocks;
18334 +@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
18335 + return 0;
18336 + }
18337 +
18338 ++static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
18339 ++{
18340 ++ return blocks << (ilog2(sdev->sector_size) - 9);
18341 ++}
18342 ++
18343 + /*
18344 + * A DIF-capable target device can be formatted with different
18345 + * protection schemes. Currently 0 through 3 are defined:
18346 +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
18347 +index 5e820674432c..ae7d9bdf409c 100644
18348 +--- a/drivers/scsi/sg.c
18349 ++++ b/drivers/scsi/sg.c
18350 +@@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
18351 + else
18352 + hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
18353 + hp->dxfer_len = mxsize;
18354 +- if (hp->dxfer_direction == SG_DXFER_TO_DEV)
18355 ++ if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
18356 ++ (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
18357 + hp->dxferp = (char __user *)buf + cmd_size;
18358 + else
18359 + hp->dxferp = NULL;
18360 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
18361 +index 292c04eec9ad..3ddcabb790a8 100644
18362 +--- a/drivers/scsi/storvsc_drv.c
18363 ++++ b/drivers/scsi/storvsc_drv.c
18364 +@@ -914,8 +914,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
18365 + do_work = true;
18366 + process_err_fn = storvsc_remove_lun;
18367 + break;
18368 +- case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
18369 +- if ((asc == 0x2a) && (ascq == 0x9)) {
18370 ++ case SRB_STATUS_ABORTED:
18371 ++ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
18372 ++ (asc == 0x2a) && (ascq == 0x9)) {
18373 + do_work = true;
18374 + process_err_fn = storvsc_device_scan;
18375 + /*
18376 +diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
18377 +index 534c58937a56..4a65c5bda146 100644
18378 +--- a/drivers/soc/rockchip/pm_domains.c
18379 ++++ b/drivers/soc/rockchip/pm_domains.c
18380 +@@ -419,6 +419,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
18381 + if (error) {
18382 + dev_err(dev, "failed to handle node %s: %d\n",
18383 + node->name, error);
18384 ++ of_node_put(node);
18385 + goto err_out;
18386 + }
18387 + }
18388 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
18389 +index ab9914ad8365..64232ecbb821 100644
18390 +--- a/drivers/spi/spi-pxa2xx.c
18391 ++++ b/drivers/spi/spi-pxa2xx.c
18392 +@@ -111,7 +111,7 @@ static const struct lpss_config lpss_platforms[] = {
18393 + .reg_general = -1,
18394 + .reg_ssp = 0x20,
18395 + .reg_cs_ctrl = 0x24,
18396 +- .reg_capabilities = 0xfc,
18397 ++ .reg_capabilities = -1,
18398 + .rx_threshold = 1,
18399 + .tx_threshold_lo = 32,
18400 + .tx_threshold_hi = 56,
18401 +diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
18402 +index 7cb1b2d710c1..475fb44c1883 100644
18403 +--- a/drivers/spi/spi-rockchip.c
18404 ++++ b/drivers/spi/spi-rockchip.c
18405 +@@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
18406 + static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
18407 + {
18408 + u32 ser;
18409 +- struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
18410 ++ struct spi_master *master = spi->master;
18411 ++ struct rockchip_spi *rs = spi_master_get_devdata(master);
18412 ++
18413 ++ pm_runtime_get_sync(rs->dev);
18414 +
18415 + ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
18416 +
18417 +@@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
18418 + ser &= ~(1 << spi->chip_select);
18419 +
18420 + writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
18421 ++
18422 ++ pm_runtime_put_sync(rs->dev);
18423 + }
18424 +
18425 + static int rockchip_spi_prepare_message(struct spi_master *master,
18426 +diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
18427 +index 64318fcfacf2..5044c6198332 100644
18428 +--- a/drivers/spi/spi-ti-qspi.c
18429 ++++ b/drivers/spi/spi-ti-qspi.c
18430 +@@ -94,6 +94,7 @@ struct ti_qspi {
18431 + #define QSPI_FLEN(n) ((n - 1) << 0)
18432 + #define QSPI_WLEN_MAX_BITS 128
18433 + #define QSPI_WLEN_MAX_BYTES 16
18434 ++#define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
18435 +
18436 + /* STATUS REGISTER */
18437 + #define BUSY 0x01
18438 +@@ -224,16 +225,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
18439 + return -ETIMEDOUT;
18440 + }
18441 +
18442 +-static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
18443 ++static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
18444 ++ int count)
18445 + {
18446 +- int wlen, count, xfer_len;
18447 ++ int wlen, xfer_len;
18448 + unsigned int cmd;
18449 + const u8 *txbuf;
18450 + u32 data;
18451 +
18452 + txbuf = t->tx_buf;
18453 + cmd = qspi->cmd | QSPI_WR_SNGL;
18454 +- count = t->len;
18455 + wlen = t->bits_per_word >> 3; /* in bytes */
18456 + xfer_len = wlen;
18457 +
18458 +@@ -293,9 +294,10 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
18459 + return 0;
18460 + }
18461 +
18462 +-static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
18463 ++static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
18464 ++ int count)
18465 + {
18466 +- int wlen, count;
18467 ++ int wlen;
18468 + unsigned int cmd;
18469 + u8 *rxbuf;
18470 +
18471 +@@ -312,7 +314,6 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
18472 + cmd |= QSPI_RD_SNGL;
18473 + break;
18474 + }
18475 +- count = t->len;
18476 + wlen = t->bits_per_word >> 3; /* in bytes */
18477 +
18478 + while (count) {
18479 +@@ -343,12 +344,13 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
18480 + return 0;
18481 + }
18482 +
18483 +-static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
18484 ++static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
18485 ++ int count)
18486 + {
18487 + int ret;
18488 +
18489 + if (t->tx_buf) {
18490 +- ret = qspi_write_msg(qspi, t);
18491 ++ ret = qspi_write_msg(qspi, t, count);
18492 + if (ret) {
18493 + dev_dbg(qspi->dev, "Error while writing\n");
18494 + return ret;
18495 +@@ -356,7 +358,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
18496 + }
18497 +
18498 + if (t->rx_buf) {
18499 +- ret = qspi_read_msg(qspi, t);
18500 ++ ret = qspi_read_msg(qspi, t, count);
18501 + if (ret) {
18502 + dev_dbg(qspi->dev, "Error while reading\n");
18503 + return ret;
18504 +@@ -373,7 +375,8 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
18505 + struct spi_device *spi = m->spi;
18506 + struct spi_transfer *t;
18507 + int status = 0, ret;
18508 +- int frame_length;
18509 ++ unsigned int frame_len_words, transfer_len_words;
18510 ++ int wlen;
18511 +
18512 + /* setup device control reg */
18513 + qspi->dc = 0;
18514 +@@ -385,30 +388,38 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
18515 + if (spi->mode & SPI_CS_HIGH)
18516 + qspi->dc |= QSPI_CSPOL(spi->chip_select);
18517 +
18518 +- frame_length = (m->frame_length << 3) / spi->bits_per_word;
18519 +-
18520 +- frame_length = clamp(frame_length, 0, QSPI_FRAME);
18521 ++ frame_len_words = 0;
18522 ++ list_for_each_entry(t, &m->transfers, transfer_list)
18523 ++ frame_len_words += t->len / (t->bits_per_word >> 3);
18524 ++ frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
18525 +
18526 + /* setup command reg */
18527 + qspi->cmd = 0;
18528 + qspi->cmd |= QSPI_EN_CS(spi->chip_select);
18529 +- qspi->cmd |= QSPI_FLEN(frame_length);
18530 ++ qspi->cmd |= QSPI_FLEN(frame_len_words);
18531 +
18532 + ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
18533 +
18534 + mutex_lock(&qspi->list_lock);
18535 +
18536 + list_for_each_entry(t, &m->transfers, transfer_list) {
18537 +- qspi->cmd |= QSPI_WLEN(t->bits_per_word);
18538 ++ qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
18539 ++ QSPI_WLEN(t->bits_per_word));
18540 ++
18541 ++ wlen = t->bits_per_word >> 3;
18542 ++ transfer_len_words = min(t->len / wlen, frame_len_words);
18543 +
18544 +- ret = qspi_transfer_msg(qspi, t);
18545 ++ ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
18546 + if (ret) {
18547 + dev_dbg(qspi->dev, "transfer message failed\n");
18548 + mutex_unlock(&qspi->list_lock);
18549 + return -EINVAL;
18550 + }
18551 +
18552 +- m->actual_length += t->len;
18553 ++ m->actual_length += transfer_len_words * wlen;
18554 ++ frame_len_words -= transfer_len_words;
18555 ++ if (frame_len_words == 0)
18556 ++ break;
18557 + }
18558 +
18559 + mutex_unlock(&qspi->list_lock);
18560 +diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
18561 +index e237e9f3312d..df560216d702 100644
18562 +--- a/drivers/staging/android/ion/ion.c
18563 ++++ b/drivers/staging/android/ion/ion.c
18564 +@@ -251,8 +251,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
18565 + * memory coming from the heaps is ready for dma, ie if it has a
18566 + * cached mapping that mapping has been invalidated
18567 + */
18568 +- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
18569 ++ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
18570 + sg_dma_address(sg) = sg_phys(sg);
18571 ++ sg_dma_len(sg) = sg->length;
18572 ++ }
18573 + mutex_lock(&dev->buffer_lock);
18574 + ion_buffer_add(dev, buffer);
18575 + mutex_unlock(&dev->buffer_lock);
18576 +diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
18577 +index b8dcf5a26cc4..58d46893e5ff 100644
18578 +--- a/drivers/staging/android/ion/ion_test.c
18579 ++++ b/drivers/staging/android/ion/ion_test.c
18580 +@@ -285,8 +285,8 @@ static int __init ion_test_init(void)
18581 + {
18582 + ion_test_pdev = platform_device_register_simple("ion-test",
18583 + -1, NULL, 0);
18584 +- if (!ion_test_pdev)
18585 +- return -ENODEV;
18586 ++ if (IS_ERR(ion_test_pdev))
18587 ++ return PTR_ERR(ion_test_pdev);
18588 +
18589 + return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
18590 + }
18591 +diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
18592 +index 940781183fac..3be10963f98b 100644
18593 +--- a/drivers/staging/comedi/drivers/das1800.c
18594 ++++ b/drivers/staging/comedi/drivers/das1800.c
18595 +@@ -567,14 +567,17 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
18596 + struct comedi_isadma_desc *desc;
18597 + int i;
18598 +
18599 +- outb(0x0, dev->iobase + DAS1800_STATUS); /* disable conversions */
18600 +- outb(0x0, dev->iobase + DAS1800_CONTROL_B); /* disable interrupts and dma */
18601 +- outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* disable and clear fifo and stop triggering */
18602 +-
18603 +- for (i = 0; i < 2; i++) {
18604 +- desc = &dma->desc[i];
18605 +- if (desc->chan)
18606 +- comedi_isadma_disable(desc->chan);
18607 ++ /* disable and stop conversions */
18608 ++ outb(0x0, dev->iobase + DAS1800_STATUS);
18609 ++ outb(0x0, dev->iobase + DAS1800_CONTROL_B);
18610 ++ outb(0x0, dev->iobase + DAS1800_CONTROL_A);
18611 ++
18612 ++ if (dma) {
18613 ++ for (i = 0; i < 2; i++) {
18614 ++ desc = &dma->desc[i];
18615 ++ if (desc->chan)
18616 ++ comedi_isadma_disable(desc->chan);
18617 ++ }
18618 + }
18619 +
18620 + return 0;
18621 +@@ -934,13 +937,14 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
18622 + {
18623 + struct das1800_private *devpriv = dev->private;
18624 + struct comedi_isadma *dma = devpriv->dma;
18625 +- struct comedi_isadma_desc *desc = &dma->desc[0];
18626 ++ struct comedi_isadma_desc *desc;
18627 + unsigned int bytes;
18628 +
18629 + if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
18630 + return;
18631 +
18632 + dma->cur_dma = 0;
18633 ++ desc = &dma->desc[0];
18634 +
18635 + /* determine a dma transfer size to fill buffer in 0.3 sec */
18636 + bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
18637 +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
18638 +index 5e8130a7d670..0e9f77924e26 100644
18639 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c
18640 ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
18641 +@@ -246,24 +246,24 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
18642 + {
18643 + if (dev->mmio)
18644 + writel(data, dev->mmio + reg);
18645 +-
18646 +- outl(data, dev->iobase + reg);
18647 ++ else
18648 ++ outl(data, dev->iobase + reg);
18649 + }
18650 +
18651 + static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
18652 + {
18653 + if (dev->mmio)
18654 + writew(data, dev->mmio + reg);
18655 +-
18656 +- outw(data, dev->iobase + reg);
18657 ++ else
18658 ++ outw(data, dev->iobase + reg);
18659 + }
18660 +
18661 + static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
18662 + {
18663 + if (dev->mmio)
18664 + writeb(data, dev->mmio + reg);
18665 +-
18666 +- outb(data, dev->iobase + reg);
18667 ++ else
18668 ++ outb(data, dev->iobase + reg);
18669 + }
18670 +
18671 + static uint32_t ni_readl(struct comedi_device *dev, int reg)
18672 +diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
18673 +index 437f723bb34d..823e47910004 100644
18674 +--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
18675 ++++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
18676 +@@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
18677 + unsigned long flags;
18678 + int ret = 0;
18679 +
18680 +- if (trig_num != cmd->start_src)
18681 ++ if (trig_num != cmd->start_arg)
18682 + return -EINVAL;
18683 +
18684 + spin_lock_irqsave(&counter->lock, flags);
18685 +diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
18686 +index 05de0dad8762..4c6f1d7d2eaf 100644
18687 +--- a/drivers/staging/rdma/hfi1/TODO
18688 ++++ b/drivers/staging/rdma/hfi1/TODO
18689 +@@ -3,4 +3,4 @@ July, 2015
18690 + - Remove unneeded file entries in sysfs
18691 + - Remove software processing of IB protocol and place in library for use
18692 + by qib, ipath (if still present), hfi1, and eventually soft-roce
18693 +-
18694 ++- Replace incorrect uAPI
18695 +diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
18696 +index d57d549052c8..29ae777556d2 100644
18697 +--- a/drivers/staging/rdma/hfi1/file_ops.c
18698 ++++ b/drivers/staging/rdma/hfi1/file_ops.c
18699 +@@ -52,6 +52,8 @@
18700 + #include <linux/vmalloc.h>
18701 + #include <linux/io.h>
18702 +
18703 ++#include <rdma/ib.h>
18704 ++
18705 + #include "hfi.h"
18706 + #include "pio.h"
18707 + #include "device.h"
18708 +@@ -194,6 +196,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
18709 + int uctxt_required = 1;
18710 + int must_be_root = 0;
18711 +
18712 ++ /* FIXME: This interface cannot continue out of staging */
18713 ++ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
18714 ++ return -EACCES;
18715 ++
18716 + if (count < sizeof(cmd)) {
18717 + ret = -EINVAL;
18718 + goto bail;
18719 +diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
18720 +index 86de50c9f7f5..b3d6541b3896 100644
18721 +--- a/drivers/staging/wilc1000/wilc_spi.c
18722 ++++ b/drivers/staging/wilc1000/wilc_spi.c
18723 +@@ -120,8 +120,6 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
18724 +
18725 + #define USE_SPI_DMA 0
18726 +
18727 +-static const struct wilc1000_ops wilc1000_spi_ops;
18728 +-
18729 + static int wilc_bus_probe(struct spi_device *spi)
18730 + {
18731 + int ret, gpio;
18732 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
18733 +index 867bc6d0a68a..43d8b42c0f22 100644
18734 +--- a/drivers/target/target_core_transport.c
18735 ++++ b/drivers/target/target_core_transport.c
18736 +@@ -2596,8 +2596,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
18737 +
18738 + list_for_each_entry_safe(se_cmd, tmp_cmd,
18739 + &se_sess->sess_wait_list, se_cmd_list) {
18740 +- list_del_init(&se_cmd->se_cmd_list);
18741 +-
18742 + pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
18743 + " %d\n", se_cmd, se_cmd->t_state,
18744 + se_cmd->se_tfo->get_cmd_state(se_cmd));
18745 +diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
18746 +index b58e3fb9b311..433085a97626 100644
18747 +--- a/drivers/thermal/rockchip_thermal.c
18748 ++++ b/drivers/thermal/rockchip_thermal.c
18749 +@@ -693,15 +693,14 @@ static int rockchip_configure_from_dt(struct device *dev,
18750 + thermal->chip->tshut_temp);
18751 + thermal->tshut_temp = thermal->chip->tshut_temp;
18752 + } else {
18753 ++ if (shut_temp > INT_MAX) {
18754 ++ dev_err(dev, "Invalid tshut temperature specified: %d\n",
18755 ++ shut_temp);
18756 ++ return -ERANGE;
18757 ++ }
18758 + thermal->tshut_temp = shut_temp;
18759 + }
18760 +
18761 +- if (thermal->tshut_temp > INT_MAX) {
18762 +- dev_err(dev, "Invalid tshut temperature specified: %d\n",
18763 +- thermal->tshut_temp);
18764 +- return -ERANGE;
18765 +- }
18766 +-
18767 + if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
18768 + dev_warn(dev,
18769 + "Missing tshut mode property, using default (%s)\n",
18770 +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
18771 +index a0a8fd1235e2..d4b54653ecf8 100644
18772 +--- a/drivers/thermal/thermal_core.c
18773 ++++ b/drivers/thermal/thermal_core.c
18774 +@@ -454,6 +454,10 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
18775 + {
18776 + enum thermal_trip_type type;
18777 +
18778 ++ /* Ignore disabled trip points */
18779 ++ if (test_bit(trip, &tz->trips_disabled))
18780 ++ return;
18781 ++
18782 + tz->ops->get_trip_type(tz, trip, &type);
18783 +
18784 + if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
18785 +@@ -1800,6 +1804,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
18786 + {
18787 + struct thermal_zone_device *tz;
18788 + enum thermal_trip_type trip_type;
18789 ++ int trip_temp;
18790 + int result;
18791 + int count;
18792 + int passive = 0;
18793 +@@ -1871,9 +1876,15 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
18794 + goto unregister;
18795 +
18796 + for (count = 0; count < trips; count++) {
18797 +- tz->ops->get_trip_type(tz, count, &trip_type);
18798 ++ if (tz->ops->get_trip_type(tz, count, &trip_type))
18799 ++ set_bit(count, &tz->trips_disabled);
18800 + if (trip_type == THERMAL_TRIP_PASSIVE)
18801 + passive = 1;
18802 ++ if (tz->ops->get_trip_temp(tz, count, &trip_temp))
18803 ++ set_bit(count, &tz->trips_disabled);
18804 ++ /* Check for bogus trip points */
18805 ++ if (trip_temp == 0)
18806 ++ set_bit(count, &tz->trips_disabled);
18807 + }
18808 +
18809 + if (!passive) {
18810 +diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
18811 +index 0dde34e3a7c5..545c60c826a1 100644
18812 +--- a/drivers/thunderbolt/eeprom.c
18813 ++++ b/drivers/thunderbolt/eeprom.c
18814 +@@ -444,6 +444,7 @@ int tb_drom_read(struct tb_switch *sw)
18815 + return tb_drom_parse_entries(sw);
18816 + err:
18817 + kfree(sw->drom);
18818 ++ sw->drom = NULL;
18819 + return -EIO;
18820 +
18821 + }
18822 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
18823 +index c3fe026d3168..9aff37186246 100644
18824 +--- a/drivers/tty/n_gsm.c
18825 ++++ b/drivers/tty/n_gsm.c
18826 +@@ -2045,7 +2045,9 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
18827 + }
18828 + }
18829 + spin_unlock(&gsm_mux_lock);
18830 +- WARN_ON(i == MAX_MUX);
18831 ++ /* open failed before registering => nothing to do */
18832 ++ if (i == MAX_MUX)
18833 ++ return;
18834 +
18835 + /* In theory disconnecting DLCI 0 is sufficient but for some
18836 + modems this is apparently not the case. */
18837 +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
18838 +index bbc4ce66c2c1..644ddb841d9f 100644
18839 +--- a/drivers/tty/n_hdlc.c
18840 ++++ b/drivers/tty/n_hdlc.c
18841 +@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
18842 + add_wait_queue(&tty->read_wait, &wait);
18843 +
18844 + for (;;) {
18845 +- if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
18846 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
18847 + ret = -EIO;
18848 + break;
18849 + }
18850 +@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
18851 + /* set bits for operations that won't block */
18852 + if (n_hdlc->rx_buf_list.head)
18853 + mask |= POLLIN | POLLRDNORM; /* readable */
18854 +- if (test_bit(TTY_OTHER_DONE, &tty->flags))
18855 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
18856 + mask |= POLLHUP;
18857 + if (tty_hung_up_p(filp))
18858 + mask |= POLLHUP;
18859 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
18860 +index b280abaad91b..c12def71ed37 100644
18861 +--- a/drivers/tty/n_tty.c
18862 ++++ b/drivers/tty/n_tty.c
18863 +@@ -1963,18 +1963,6 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
18864 + return ldata->commit_head - ldata->read_tail >= amt;
18865 + }
18866 +
18867 +-static inline int check_other_done(struct tty_struct *tty)
18868 +-{
18869 +- int done = test_bit(TTY_OTHER_DONE, &tty->flags);
18870 +- if (done) {
18871 +- /* paired with cmpxchg() in check_other_closed(); ensures
18872 +- * read buffer head index is not stale
18873 +- */
18874 +- smp_mb__after_atomic();
18875 +- }
18876 +- return done;
18877 +-}
18878 +-
18879 + /**
18880 + * copy_from_read_buf - copy read data directly
18881 + * @tty: terminal device
18882 +@@ -2170,7 +2158,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
18883 + struct n_tty_data *ldata = tty->disc_data;
18884 + unsigned char __user *b = buf;
18885 + DEFINE_WAIT_FUNC(wait, woken_wake_function);
18886 +- int c, done;
18887 ++ int c;
18888 + int minimum, time;
18889 + ssize_t retval = 0;
18890 + long timeout;
18891 +@@ -2238,32 +2226,35 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
18892 + ((minimum - (b - buf)) >= 1))
18893 + ldata->minimum_to_wake = (minimum - (b - buf));
18894 +
18895 +- done = check_other_done(tty);
18896 +-
18897 + if (!input_available_p(tty, 0)) {
18898 +- if (done) {
18899 +- retval = -EIO;
18900 +- break;
18901 +- }
18902 +- if (tty_hung_up_p(file))
18903 +- break;
18904 +- if (!timeout)
18905 +- break;
18906 +- if (file->f_flags & O_NONBLOCK) {
18907 +- retval = -EAGAIN;
18908 +- break;
18909 +- }
18910 +- if (signal_pending(current)) {
18911 +- retval = -ERESTARTSYS;
18912 +- break;
18913 +- }
18914 + up_read(&tty->termios_rwsem);
18915 ++ tty_buffer_flush_work(tty->port);
18916 ++ down_read(&tty->termios_rwsem);
18917 ++ if (!input_available_p(tty, 0)) {
18918 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
18919 ++ retval = -EIO;
18920 ++ break;
18921 ++ }
18922 ++ if (tty_hung_up_p(file))
18923 ++ break;
18924 ++ if (!timeout)
18925 ++ break;
18926 ++ if (file->f_flags & O_NONBLOCK) {
18927 ++ retval = -EAGAIN;
18928 ++ break;
18929 ++ }
18930 ++ if (signal_pending(current)) {
18931 ++ retval = -ERESTARTSYS;
18932 ++ break;
18933 ++ }
18934 ++ up_read(&tty->termios_rwsem);
18935 +
18936 +- timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
18937 +- timeout);
18938 ++ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
18939 ++ timeout);
18940 +
18941 +- down_read(&tty->termios_rwsem);
18942 +- continue;
18943 ++ down_read(&tty->termios_rwsem);
18944 ++ continue;
18945 ++ }
18946 + }
18947 +
18948 + if (ldata->icanon && !L_EXTPROC(tty)) {
18949 +@@ -2445,12 +2436,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
18950 +
18951 + poll_wait(file, &tty->read_wait, wait);
18952 + poll_wait(file, &tty->write_wait, wait);
18953 +- if (check_other_done(tty))
18954 +- mask |= POLLHUP;
18955 + if (input_available_p(tty, 1))
18956 + mask |= POLLIN | POLLRDNORM;
18957 ++ else {
18958 ++ tty_buffer_flush_work(tty->port);
18959 ++ if (input_available_p(tty, 1))
18960 ++ mask |= POLLIN | POLLRDNORM;
18961 ++ }
18962 + if (tty->packet && tty->link->ctrl_status)
18963 + mask |= POLLPRI | POLLIN | POLLRDNORM;
18964 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
18965 ++ mask |= POLLHUP;
18966 + if (tty_hung_up_p(file))
18967 + mask |= POLLHUP;
18968 + if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
18969 +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
18970 +index 2348fa613707..6427a39bd360 100644
18971 +--- a/drivers/tty/pty.c
18972 ++++ b/drivers/tty/pty.c
18973 +@@ -59,7 +59,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
18974 + if (!tty->link)
18975 + return;
18976 + set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
18977 +- tty_flip_buffer_push(tty->link->port);
18978 ++ wake_up_interruptible(&tty->link->read_wait);
18979 + wake_up_interruptible(&tty->link->write_wait);
18980 + if (tty->driver->subtype == PTY_TYPE_MASTER) {
18981 + set_bit(TTY_OTHER_CLOSED, &tty->flags);
18982 +@@ -247,9 +247,7 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
18983 + goto out;
18984 +
18985 + clear_bit(TTY_IO_ERROR, &tty->flags);
18986 +- /* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
18987 + clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
18988 +- clear_bit(TTY_OTHER_DONE, &tty->link->flags);
18989 + set_bit(TTY_THROTTLED, &tty->flags);
18990 + return 0;
18991 +
18992 +diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
18993 +index 88531a36b69c..ed489880e62b 100644
18994 +--- a/drivers/tty/serial/8250/8250_mid.c
18995 ++++ b/drivers/tty/serial/8250/8250_mid.c
18996 +@@ -14,6 +14,7 @@
18997 + #include <linux/pci.h>
18998 +
18999 + #include <linux/dma/hsu.h>
19000 ++#include <linux/8250_pci.h>
19001 +
19002 + #include "8250.h"
19003 +
19004 +@@ -24,6 +25,7 @@
19005 + #define PCI_DEVICE_ID_INTEL_DNV_UART 0x19d8
19006 +
19007 + /* Intel MID Specific registers */
19008 ++#define INTEL_MID_UART_DNV_FISR 0x08
19009 + #define INTEL_MID_UART_PS 0x30
19010 + #define INTEL_MID_UART_MUL 0x34
19011 + #define INTEL_MID_UART_DIV 0x38
19012 +@@ -31,6 +33,7 @@
19013 + struct mid8250;
19014 +
19015 + struct mid8250_board {
19016 ++ unsigned int flags;
19017 + unsigned long freq;
19018 + unsigned int base_baud;
19019 + int (*setup)(struct mid8250 *, struct uart_port *p);
19020 +@@ -88,16 +91,16 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
19021 + static int dnv_handle_irq(struct uart_port *p)
19022 + {
19023 + struct mid8250 *mid = p->private_data;
19024 +- int ret;
19025 +-
19026 +- ret = hsu_dma_irq(&mid->dma_chip, 0);
19027 +- ret |= hsu_dma_irq(&mid->dma_chip, 1);
19028 +-
19029 +- /* For now, letting the HW generate separate interrupt for the UART */
19030 +- if (ret)
19031 +- return ret;
19032 +-
19033 +- return serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
19034 ++ unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
19035 ++ int ret = IRQ_NONE;
19036 ++
19037 ++ if (fisr & BIT(2))
19038 ++ ret |= hsu_dma_irq(&mid->dma_chip, 1);
19039 ++ if (fisr & BIT(1))
19040 ++ ret |= hsu_dma_irq(&mid->dma_chip, 0);
19041 ++ if (fisr & BIT(0))
19042 ++ ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
19043 ++ return ret;
19044 + }
19045 +
19046 + #define DNV_DMA_CHAN_OFFSET 0x80
19047 +@@ -106,12 +109,13 @@ static int dnv_setup(struct mid8250 *mid, struct uart_port *p)
19048 + {
19049 + struct hsu_dma_chip *chip = &mid->dma_chip;
19050 + struct pci_dev *pdev = to_pci_dev(p->dev);
19051 ++ unsigned int bar = FL_GET_BASE(mid->board->flags);
19052 + int ret;
19053 +
19054 + chip->dev = &pdev->dev;
19055 + chip->irq = pdev->irq;
19056 + chip->regs = p->membase;
19057 +- chip->length = pci_resource_len(pdev, 0);
19058 ++ chip->length = pci_resource_len(pdev, bar);
19059 + chip->offset = DNV_DMA_CHAN_OFFSET;
19060 +
19061 + /* Falling back to PIO mode if DMA probing fails */
19062 +@@ -217,6 +221,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
19063 + {
19064 + struct uart_8250_port uart;
19065 + struct mid8250 *mid;
19066 ++ unsigned int bar;
19067 + int ret;
19068 +
19069 + ret = pcim_enable_device(pdev);
19070 +@@ -230,6 +235,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
19071 + return -ENOMEM;
19072 +
19073 + mid->board = (struct mid8250_board *)id->driver_data;
19074 ++ bar = FL_GET_BASE(mid->board->flags);
19075 +
19076 + memset(&uart, 0, sizeof(struct uart_8250_port));
19077 +
19078 +@@ -242,8 +248,8 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
19079 + uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
19080 + uart.port.set_termios = mid8250_set_termios;
19081 +
19082 +- uart.port.mapbase = pci_resource_start(pdev, 0);
19083 +- uart.port.membase = pcim_iomap(pdev, 0, 0);
19084 ++ uart.port.mapbase = pci_resource_start(pdev, bar);
19085 ++ uart.port.membase = pcim_iomap(pdev, bar, 0);
19086 + if (!uart.port.membase)
19087 + return -ENOMEM;
19088 +
19089 +@@ -282,18 +288,21 @@ static void mid8250_remove(struct pci_dev *pdev)
19090 + }
19091 +
19092 + static const struct mid8250_board pnw_board = {
19093 ++ .flags = FL_BASE0,
19094 + .freq = 50000000,
19095 + .base_baud = 115200,
19096 + .setup = pnw_setup,
19097 + };
19098 +
19099 + static const struct mid8250_board tng_board = {
19100 ++ .flags = FL_BASE0,
19101 + .freq = 38400000,
19102 + .base_baud = 1843200,
19103 + .setup = tng_setup,
19104 + };
19105 +
19106 + static const struct mid8250_board dnv_board = {
19107 ++ .flags = FL_BASE1,
19108 + .freq = 133333333,
19109 + .base_baud = 115200,
19110 + .setup = dnv_setup,
19111 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
19112 +index 7cd6f9a90542..c1d4a8fa9be8 100644
19113 +--- a/drivers/tty/serial/8250/8250_pci.c
19114 ++++ b/drivers/tty/serial/8250/8250_pci.c
19115 +@@ -1401,6 +1401,9 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
19116 + unsigned long m, n;
19117 + u32 reg;
19118 +
19119 ++ /* Gracefully handle the B0 case: fall back to B9600 */
19120 ++ fuart = fuart ? fuart : 9600 * 16;
19121 ++
19122 + /* Get Fuart closer to Fref */
19123 + fuart *= rounddown_pow_of_two(fref / fuart);
19124 +
19125 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
19126 +index 8d262bce97e4..720b9465b12e 100644
19127 +--- a/drivers/tty/serial/8250/8250_port.c
19128 ++++ b/drivers/tty/serial/8250/8250_port.c
19129 +@@ -731,22 +731,16 @@ static int size_fifo(struct uart_8250_port *up)
19130 + */
19131 + static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
19132 + {
19133 +- unsigned char old_dll, old_dlm, old_lcr;
19134 +- unsigned int id;
19135 ++ unsigned char old_lcr;
19136 ++ unsigned int id, old_dl;
19137 +
19138 + old_lcr = serial_in(p, UART_LCR);
19139 + serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
19140 ++ old_dl = serial_dl_read(p);
19141 ++ serial_dl_write(p, 0);
19142 ++ id = serial_dl_read(p);
19143 ++ serial_dl_write(p, old_dl);
19144 +
19145 +- old_dll = serial_in(p, UART_DLL);
19146 +- old_dlm = serial_in(p, UART_DLM);
19147 +-
19148 +- serial_out(p, UART_DLL, 0);
19149 +- serial_out(p, UART_DLM, 0);
19150 +-
19151 +- id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
19152 +-
19153 +- serial_out(p, UART_DLL, old_dll);
19154 +- serial_out(p, UART_DLM, old_dlm);
19155 + serial_out(p, UART_LCR, old_lcr);
19156 +
19157 + return id;
19158 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
19159 +index 1c0884d8ef32..172a8ccb0b63 100644
19160 +--- a/drivers/tty/serial/atmel_serial.c
19161 ++++ b/drivers/tty/serial/atmel_serial.c
19162 +@@ -273,6 +273,13 @@ static bool atmel_use_dma_rx(struct uart_port *port)
19163 + return atmel_port->use_dma_rx;
19164 + }
19165 +
19166 ++static bool atmel_use_fifo(struct uart_port *port)
19167 ++{
19168 ++ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
19169 ++
19170 ++ return atmel_port->fifo_size;
19171 ++}
19172 ++
19173 + static unsigned int atmel_get_lines_status(struct uart_port *port)
19174 + {
19175 + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
19176 +@@ -2082,7 +2089,12 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
19177 + mode |= ATMEL_US_USMODE_RS485;
19178 + } else if (termios->c_cflag & CRTSCTS) {
19179 + /* RS232 with hardware handshake (RTS/CTS) */
19180 +- mode |= ATMEL_US_USMODE_HWHS;
19181 ++ if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
19182 ++ dev_info(port->dev, "not enabling hardware flow control because DMA is used");
19183 ++ termios->c_cflag &= ~CRTSCTS;
19184 ++ } else {
19185 ++ mode |= ATMEL_US_USMODE_HWHS;
19186 ++ }
19187 + } else {
19188 + /* RS232 without hadware handshake */
19189 + mode |= ATMEL_US_USMODE_NORMAL;
19190 +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
19191 +index d72cd736bdc6..8320173af846 100644
19192 +--- a/drivers/tty/serial/samsung.c
19193 ++++ b/drivers/tty/serial/samsung.c
19194 +@@ -1263,6 +1263,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
19195 + /* check to see if we need to change clock source */
19196 +
19197 + if (ourport->baudclk != clk) {
19198 ++ clk_prepare_enable(clk);
19199 ++
19200 + s3c24xx_serial_setsource(port, clk_sel);
19201 +
19202 + if (!IS_ERR(ourport->baudclk)) {
19203 +@@ -1270,8 +1272,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
19204 + ourport->baudclk = ERR_PTR(-EINVAL);
19205 + }
19206 +
19207 +- clk_prepare_enable(clk);
19208 +-
19209 + ourport->baudclk = clk;
19210 + ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
19211 + }
19212 +diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
19213 +index 3cd31e0d4bd9..fb31eecb708d 100644
19214 +--- a/drivers/tty/tty_buffer.c
19215 ++++ b/drivers/tty/tty_buffer.c
19216 +@@ -37,29 +37,6 @@
19217 +
19218 + #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
19219 +
19220 +-/*
19221 +- * If all tty flip buffers have been processed by flush_to_ldisc() or
19222 +- * dropped by tty_buffer_flush(), check if the linked pty has been closed.
19223 +- * If so, wake the reader/poll to process
19224 +- */
19225 +-static inline void check_other_closed(struct tty_struct *tty)
19226 +-{
19227 +- unsigned long flags, old;
19228 +-
19229 +- /* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
19230 +- for (flags = ACCESS_ONCE(tty->flags);
19231 +- test_bit(TTY_OTHER_CLOSED, &flags);
19232 +- ) {
19233 +- old = flags;
19234 +- __set_bit(TTY_OTHER_DONE, &flags);
19235 +- flags = cmpxchg(&tty->flags, old, flags);
19236 +- if (old == flags) {
19237 +- wake_up_interruptible(&tty->read_wait);
19238 +- break;
19239 +- }
19240 +- }
19241 +-}
19242 +-
19243 + /**
19244 + * tty_buffer_lock_exclusive - gain exclusive access to buffer
19245 + * tty_buffer_unlock_exclusive - release exclusive access
19246 +@@ -254,8 +231,6 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
19247 + if (ld && ld->ops->flush_buffer)
19248 + ld->ops->flush_buffer(tty);
19249 +
19250 +- check_other_closed(tty);
19251 +-
19252 + atomic_dec(&buf->priority);
19253 + mutex_unlock(&buf->lock);
19254 + }
19255 +@@ -505,10 +480,8 @@ static void flush_to_ldisc(struct work_struct *work)
19256 + */
19257 + count = smp_load_acquire(&head->commit) - head->read;
19258 + if (!count) {
19259 +- if (next == NULL) {
19260 +- check_other_closed(tty);
19261 ++ if (next == NULL)
19262 + break;
19263 +- }
19264 + buf->head = next;
19265 + tty_buffer_free(port, head);
19266 + continue;
19267 +@@ -597,3 +570,8 @@ bool tty_buffer_cancel_work(struct tty_port *port)
19268 + {
19269 + return cancel_work_sync(&port->buf.work);
19270 + }
19271 ++
19272 ++void tty_buffer_flush_work(struct tty_port *port)
19273 ++{
19274 ++ flush_work(&port->buf.work);
19275 ++}
19276 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
19277 +index bd51bdd0a7bf..2b5382ea4842 100644
19278 +--- a/drivers/tty/vt/vt.c
19279 ++++ b/drivers/tty/vt/vt.c
19280 +@@ -3583,9 +3583,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
19281 + goto err;
19282 +
19283 + desc = csw->con_startup();
19284 +-
19285 +- if (!desc)
19286 ++ if (!desc) {
19287 ++ retval = -ENODEV;
19288 + goto err;
19289 ++ }
19290 +
19291 + retval = -EINVAL;
19292 +
19293 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
19294 +index fa4e23930614..d37fdcc3143c 100644
19295 +--- a/drivers/usb/class/cdc-acm.c
19296 ++++ b/drivers/usb/class/cdc-acm.c
19297 +@@ -1114,6 +1114,9 @@ static int acm_probe(struct usb_interface *intf,
19298 + if (quirks == NO_UNION_NORMAL) {
19299 + data_interface = usb_ifnum_to_if(usb_dev, 1);
19300 + control_interface = usb_ifnum_to_if(usb_dev, 0);
19301 ++ /* we would crash */
19302 ++ if (!data_interface || !control_interface)
19303 ++ return -ENODEV;
19304 + goto skip_normal_probe;
19305 + }
19306 +
19307 +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
19308 +index 56593a9a8726..dadd1e8dfe09 100644
19309 +--- a/drivers/usb/core/driver.c
19310 ++++ b/drivers/usb/core/driver.c
19311 +@@ -284,7 +284,7 @@ static int usb_probe_interface(struct device *dev)
19312 + struct usb_device *udev = interface_to_usbdev(intf);
19313 + const struct usb_device_id *id;
19314 + int error = -ENODEV;
19315 +- int lpm_disable_error;
19316 ++ int lpm_disable_error = -ENODEV;
19317 +
19318 + dev_dbg(dev, "%s\n", __func__);
19319 +
19320 +@@ -336,12 +336,14 @@ static int usb_probe_interface(struct device *dev)
19321 + * setting during probe, that should also be fine. usb_set_interface()
19322 + * will attempt to disable LPM, and fail if it can't disable it.
19323 + */
19324 +- lpm_disable_error = usb_unlocked_disable_lpm(udev);
19325 +- if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
19326 +- dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
19327 +- __func__, driver->name);
19328 +- error = lpm_disable_error;
19329 +- goto err;
19330 ++ if (driver->disable_hub_initiated_lpm) {
19331 ++ lpm_disable_error = usb_unlocked_disable_lpm(udev);
19332 ++ if (lpm_disable_error) {
19333 ++ dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
19334 ++ __func__, driver->name);
19335 ++ error = lpm_disable_error;
19336 ++ goto err;
19337 ++ }
19338 + }
19339 +
19340 + /* Carry out a deferred switch to altsetting 0 */
19341 +@@ -391,7 +393,8 @@ static int usb_unbind_interface(struct device *dev)
19342 + struct usb_interface *intf = to_usb_interface(dev);
19343 + struct usb_host_endpoint *ep, **eps = NULL;
19344 + struct usb_device *udev;
19345 +- int i, j, error, r, lpm_disable_error;
19346 ++ int i, j, error, r;
19347 ++ int lpm_disable_error = -ENODEV;
19348 +
19349 + intf->condition = USB_INTERFACE_UNBINDING;
19350 +
19351 +@@ -399,12 +402,13 @@ static int usb_unbind_interface(struct device *dev)
19352 + udev = interface_to_usbdev(intf);
19353 + error = usb_autoresume_device(udev);
19354 +
19355 +- /* Hub-initiated LPM policy may change, so attempt to disable LPM until
19356 ++ /* If hub-initiated LPM policy may change, attempt to disable LPM until
19357 + * the driver is unbound. If LPM isn't disabled, that's fine because it
19358 + * wouldn't be enabled unless all the bound interfaces supported
19359 + * hub-initiated LPM.
19360 + */
19361 +- lpm_disable_error = usb_unlocked_disable_lpm(udev);
19362 ++ if (driver->disable_hub_initiated_lpm)
19363 ++ lpm_disable_error = usb_unlocked_disable_lpm(udev);
19364 +
19365 + /*
19366 + * Terminate all URBs for this interface unless the driver
19367 +@@ -502,11 +506,15 @@ static int usb_unbind_interface(struct device *dev)
19368 + int usb_driver_claim_interface(struct usb_driver *driver,
19369 + struct usb_interface *iface, void *priv)
19370 + {
19371 +- struct device *dev = &iface->dev;
19372 ++ struct device *dev;
19373 + struct usb_device *udev;
19374 + int retval = 0;
19375 +- int lpm_disable_error;
19376 ++ int lpm_disable_error = -ENODEV;
19377 +
19378 ++ if (!iface)
19379 ++ return -ENODEV;
19380 ++
19381 ++ dev = &iface->dev;
19382 + if (dev->driver)
19383 + return -EBUSY;
19384 +
19385 +@@ -522,12 +530,14 @@ int usb_driver_claim_interface(struct usb_driver *driver,
19386 +
19387 + iface->condition = USB_INTERFACE_BOUND;
19388 +
19389 +- /* Disable LPM until this driver is bound. */
19390 +- lpm_disable_error = usb_unlocked_disable_lpm(udev);
19391 +- if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
19392 +- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
19393 +- __func__, driver->name);
19394 +- return -ENOMEM;
19395 ++ /* See the comment about disabling LPM in usb_probe_interface(). */
19396 ++ if (driver->disable_hub_initiated_lpm) {
19397 ++ lpm_disable_error = usb_unlocked_disable_lpm(udev);
19398 ++ if (lpm_disable_error) {
19399 ++ dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
19400 ++ __func__, driver->name);
19401 ++ return -ENOMEM;
19402 ++ }
19403 + }
19404 +
19405 + /* Claimed interfaces are initially inactive (suspended) and
19406 +diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
19407 +index 9eb1cff28bd4..b8b580e5ae6e 100644
19408 +--- a/drivers/usb/core/hcd-pci.c
19409 ++++ b/drivers/usb/core/hcd-pci.c
19410 +@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
19411 + if (companion->bus != pdev->bus ||
19412 + PCI_SLOT(companion->devfn) != slot)
19413 + continue;
19414 ++
19415 ++ /*
19416 ++ * Companion device should be either UHCI,OHCI or EHCI host
19417 ++ * controller, otherwise skip.
19418 ++ */
19419 ++ if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
19420 ++ companion->class != CL_EHCI)
19421 ++ continue;
19422 ++
19423 + companion_hcd = pci_get_drvdata(companion);
19424 + if (!companion_hcd || !companion_hcd->self.root_hub)
19425 + continue;
19426 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
19427 +index 51b436918f78..84f65743f29a 100644
19428 +--- a/drivers/usb/core/hub.c
19429 ++++ b/drivers/usb/core/hub.c
19430 +@@ -4292,7 +4292,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
19431 + {
19432 + struct usb_device *hdev = hub->hdev;
19433 + struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
19434 +- int i, j, retval;
19435 ++ int retries, operations, retval, i;
19436 + unsigned delay = HUB_SHORT_RESET_TIME;
19437 + enum usb_device_speed oldspeed = udev->speed;
19438 + const char *speed;
19439 +@@ -4394,7 +4394,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
19440 + * first 8 bytes of the device descriptor to get the ep0 maxpacket
19441 + * value.
19442 + */
19443 +- for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
19444 ++ for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
19445 + bool did_new_scheme = false;
19446 +
19447 + if (use_new_scheme(udev, retry_counter)) {
19448 +@@ -4421,7 +4421,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
19449 + * 255 is for WUSB devices, we actually need to use
19450 + * 512 (WUSB1.0[4.8.1]).
19451 + */
19452 +- for (j = 0; j < 3; ++j) {
19453 ++ for (operations = 0; operations < 3; ++operations) {
19454 + buf->bMaxPacketSize0 = 0;
19455 + r = usb_control_msg(udev, usb_rcvaddr0pipe(),
19456 + USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
19457 +@@ -4441,7 +4441,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
19458 + r = -EPROTO;
19459 + break;
19460 + }
19461 +- if (r == 0)
19462 ++ /*
19463 ++ * Some devices time out if they are powered on
19464 ++ * when already connected. They need a second
19465 ++ * reset. But only on the first attempt,
19466 ++ * lest we get into a time out/reset loop
19467 ++ */
19468 ++ if (r == 0 || (r == -ETIMEDOUT && retries == 0))
19469 + break;
19470 + }
19471 + udev->descriptor.bMaxPacketSize0 =
19472 +@@ -4473,7 +4479,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
19473 + * authorization will assign the final address.
19474 + */
19475 + if (udev->wusb == 0) {
19476 +- for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
19477 ++ for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
19478 + retval = hub_set_address(udev, devnum);
19479 + if (retval >= 0)
19480 + break;
19481 +diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
19482 +index 14718a9ffcfb..460c855be0d0 100644
19483 +--- a/drivers/usb/core/port.c
19484 ++++ b/drivers/usb/core/port.c
19485 +@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
19486 +
19487 + return retval;
19488 + }
19489 +-
19490 +-static int usb_port_prepare(struct device *dev)
19491 +-{
19492 +- return 1;
19493 +-}
19494 + #endif
19495 +
19496 + static const struct dev_pm_ops usb_port_pm_ops = {
19497 + #ifdef CONFIG_PM
19498 + .runtime_suspend = usb_port_runtime_suspend,
19499 + .runtime_resume = usb_port_runtime_resume,
19500 +- .prepare = usb_port_prepare,
19501 + #endif
19502 + };
19503 +
19504 +diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
19505 +index ebb29caa3fe4..77e4c9bc0ab1 100644
19506 +--- a/drivers/usb/core/usb.c
19507 ++++ b/drivers/usb/core/usb.c
19508 +@@ -311,13 +311,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
19509 +
19510 + static int usb_dev_prepare(struct device *dev)
19511 + {
19512 +- struct usb_device *udev = to_usb_device(dev);
19513 +-
19514 +- /* Return 0 if the current wakeup setting is wrong, otherwise 1 */
19515 +- if (udev->do_remote_wakeup != device_may_wakeup(dev))
19516 +- return 0;
19517 +-
19518 +- return 1;
19519 ++ return 0; /* Implement eventually? */
19520 + }
19521 +
19522 + static void usb_dev_complete(struct device *dev)
19523 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
19524 +index cf43e9e18368..97ef75af9632 100644
19525 +--- a/drivers/usb/gadget/function/f_fs.c
19526 ++++ b/drivers/usb/gadget/function/f_fs.c
19527 +@@ -646,24 +646,23 @@ static void ffs_user_copy_worker(struct work_struct *work)
19528 + work);
19529 + int ret = io_data->req->status ? io_data->req->status :
19530 + io_data->req->actual;
19531 ++ bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
19532 +
19533 + if (io_data->read && ret > 0) {
19534 + use_mm(io_data->mm);
19535 + ret = copy_to_iter(io_data->buf, ret, &io_data->data);
19536 +- if (iov_iter_count(&io_data->data))
19537 ++ if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
19538 + ret = -EFAULT;
19539 + unuse_mm(io_data->mm);
19540 + }
19541 +
19542 + io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
19543 +
19544 +- if (io_data->ffs->ffs_eventfd &&
19545 +- !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
19546 ++ if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
19547 + eventfd_signal(io_data->ffs->ffs_eventfd, 1);
19548 +
19549 + usb_ep_free_request(io_data->ep, io_data->req);
19550 +
19551 +- io_data->kiocb->private = NULL;
19552 + if (io_data->read)
19553 + kfree(io_data->to_free);
19554 + kfree(io_data->buf);
19555 +diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
19556 +index 223ccf89d226..a4f664062e0c 100644
19557 +--- a/drivers/usb/gadget/function/f_mass_storage.c
19558 ++++ b/drivers/usb/gadget/function/f_mass_storage.c
19559 +@@ -2977,25 +2977,6 @@ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
19560 + }
19561 + EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
19562 +
19563 +-int fsg_common_run_thread(struct fsg_common *common)
19564 +-{
19565 +- common->state = FSG_STATE_IDLE;
19566 +- /* Tell the thread to start working */
19567 +- common->thread_task =
19568 +- kthread_create(fsg_main_thread, common, "file-storage");
19569 +- if (IS_ERR(common->thread_task)) {
19570 +- common->state = FSG_STATE_TERMINATED;
19571 +- return PTR_ERR(common->thread_task);
19572 +- }
19573 +-
19574 +- DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
19575 +-
19576 +- wake_up_process(common->thread_task);
19577 +-
19578 +- return 0;
19579 +-}
19580 +-EXPORT_SYMBOL_GPL(fsg_common_run_thread);
19581 +-
19582 + static void fsg_common_release(struct kref *ref)
19583 + {
19584 + struct fsg_common *common = container_of(ref, struct fsg_common, ref);
19585 +@@ -3005,6 +2986,7 @@ static void fsg_common_release(struct kref *ref)
19586 + if (common->state != FSG_STATE_TERMINATED) {
19587 + raise_exception(common, FSG_STATE_EXIT);
19588 + wait_for_completion(&common->thread_notifier);
19589 ++ common->thread_task = NULL;
19590 + }
19591 +
19592 + for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
19593 +@@ -3050,9 +3032,21 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
19594 + if (ret)
19595 + return ret;
19596 + fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
19597 +- ret = fsg_common_run_thread(fsg->common);
19598 +- if (ret)
19599 ++ }
19600 ++
19601 ++ if (!common->thread_task) {
19602 ++ common->state = FSG_STATE_IDLE;
19603 ++ common->thread_task =
19604 ++ kthread_create(fsg_main_thread, common, "file-storage");
19605 ++ if (IS_ERR(common->thread_task)) {
19606 ++ int ret = PTR_ERR(common->thread_task);
19607 ++ common->thread_task = NULL;
19608 ++ common->state = FSG_STATE_TERMINATED;
19609 + return ret;
19610 ++ }
19611 ++ DBG(common, "I/O thread pid: %d\n",
19612 ++ task_pid_nr(common->thread_task));
19613 ++ wake_up_process(common->thread_task);
19614 + }
19615 +
19616 + fsg->gadget = gadget;
19617 +diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
19618 +index 445df6775609..b6a9918eaefb 100644
19619 +--- a/drivers/usb/gadget/function/f_mass_storage.h
19620 ++++ b/drivers/usb/gadget/function/f_mass_storage.h
19621 +@@ -153,8 +153,6 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg);
19622 + void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
19623 + const char *pn);
19624 +
19625 +-int fsg_common_run_thread(struct fsg_common *common);
19626 +-
19627 + void fsg_config_from_params(struct fsg_config *cfg,
19628 + const struct fsg_module_parameters *params,
19629 + unsigned int fsg_num_buffers);
19630 +diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
19631 +index c16089efc322..c39de65a448b 100644
19632 +--- a/drivers/usb/gadget/legacy/acm_ms.c
19633 ++++ b/drivers/usb/gadget/legacy/acm_ms.c
19634 +@@ -133,10 +133,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
19635 + if (status < 0)
19636 + goto put_msg;
19637 +
19638 +- status = fsg_common_run_thread(opts->common);
19639 +- if (status)
19640 +- goto remove_acm;
19641 +-
19642 + status = usb_add_function(c, f_msg);
19643 + if (status)
19644 + goto remove_acm;
19645 +diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
19646 +index e61af53c7d2b..125974f32f50 100644
19647 +--- a/drivers/usb/gadget/legacy/mass_storage.c
19648 ++++ b/drivers/usb/gadget/legacy/mass_storage.c
19649 +@@ -132,10 +132,6 @@ static int msg_do_config(struct usb_configuration *c)
19650 + if (IS_ERR(f_msg))
19651 + return PTR_ERR(f_msg);
19652 +
19653 +- ret = fsg_common_run_thread(opts->common);
19654 +- if (ret)
19655 +- goto put_func;
19656 +-
19657 + ret = usb_add_function(c, f_msg);
19658 + if (ret)
19659 + goto put_func;
19660 +diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
19661 +index 229d704a620b..a70a406580ea 100644
19662 +--- a/drivers/usb/gadget/legacy/multi.c
19663 ++++ b/drivers/usb/gadget/legacy/multi.c
19664 +@@ -137,7 +137,6 @@ static struct usb_function *f_msg_rndis;
19665 +
19666 + static int rndis_do_config(struct usb_configuration *c)
19667 + {
19668 +- struct fsg_opts *fsg_opts;
19669 + int ret;
19670 +
19671 + if (gadget_is_otg(c->cdev->gadget)) {
19672 +@@ -169,11 +168,6 @@ static int rndis_do_config(struct usb_configuration *c)
19673 + goto err_fsg;
19674 + }
19675 +
19676 +- fsg_opts = fsg_opts_from_func_inst(fi_msg);
19677 +- ret = fsg_common_run_thread(fsg_opts->common);
19678 +- if (ret)
19679 +- goto err_run;
19680 +-
19681 + ret = usb_add_function(c, f_msg_rndis);
19682 + if (ret)
19683 + goto err_run;
19684 +@@ -225,7 +219,6 @@ static struct usb_function *f_msg_multi;
19685 +
19686 + static int cdc_do_config(struct usb_configuration *c)
19687 + {
19688 +- struct fsg_opts *fsg_opts;
19689 + int ret;
19690 +
19691 + if (gadget_is_otg(c->cdev->gadget)) {
19692 +@@ -258,11 +251,6 @@ static int cdc_do_config(struct usb_configuration *c)
19693 + goto err_fsg;
19694 + }
19695 +
19696 +- fsg_opts = fsg_opts_from_func_inst(fi_msg);
19697 +- ret = fsg_common_run_thread(fsg_opts->common);
19698 +- if (ret)
19699 +- goto err_run;
19700 +-
19701 + ret = usb_add_function(c, f_msg_multi);
19702 + if (ret)
19703 + goto err_run;
19704 +diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
19705 +index 09975046c694..b1e535f4022e 100644
19706 +--- a/drivers/usb/gadget/legacy/nokia.c
19707 ++++ b/drivers/usb/gadget/legacy/nokia.c
19708 +@@ -152,7 +152,6 @@ static int nokia_bind_config(struct usb_configuration *c)
19709 + struct usb_function *f_ecm;
19710 + struct usb_function *f_obex2 = NULL;
19711 + struct usb_function *f_msg;
19712 +- struct fsg_opts *fsg_opts;
19713 + int status = 0;
19714 + int obex1_stat = -1;
19715 + int obex2_stat = -1;
19716 +@@ -222,12 +221,6 @@ static int nokia_bind_config(struct usb_configuration *c)
19717 + goto err_ecm;
19718 + }
19719 +
19720 +- fsg_opts = fsg_opts_from_func_inst(fi_msg);
19721 +-
19722 +- status = fsg_common_run_thread(fsg_opts->common);
19723 +- if (status)
19724 +- goto err_msg;
19725 +-
19726 + status = usb_add_function(c, f_msg);
19727 + if (status)
19728 + goto err_msg;
19729 +diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
19730 +index b86a6f03592e..e272b3ba1d14 100644
19731 +--- a/drivers/usb/gadget/udc/udc-core.c
19732 ++++ b/drivers/usb/gadget/udc/udc-core.c
19733 +@@ -75,7 +75,7 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
19734 + mapped = dma_map_sg(dev, req->sg, req->num_sgs,
19735 + is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
19736 + if (mapped == 0) {
19737 +- dev_err(&gadget->dev, "failed to map SGs\n");
19738 ++ dev_err(dev, "failed to map SGs\n");
19739 + return -EFAULT;
19740 + }
19741 +
19742 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
19743 +index 5cd080e0a685..743d9a20e248 100644
19744 +--- a/drivers/usb/host/xhci-mem.c
19745 ++++ b/drivers/usb/host/xhci-mem.c
19746 +@@ -1873,6 +1873,12 @@ no_bw:
19747 + kfree(xhci->rh_bw);
19748 + kfree(xhci->ext_caps);
19749 +
19750 ++ xhci->usb2_ports = NULL;
19751 ++ xhci->usb3_ports = NULL;
19752 ++ xhci->port_array = NULL;
19753 ++ xhci->rh_bw = NULL;
19754 ++ xhci->ext_caps = NULL;
19755 ++
19756 + xhci->page_size = 0;
19757 + xhci->page_shift = 0;
19758 + xhci->bus_state[0].bus_suspended = 0;
19759 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
19760 +index f0640b7a1c42..48672fac7ff3 100644
19761 +--- a/drivers/usb/host/xhci-pci.c
19762 ++++ b/drivers/usb/host/xhci-pci.c
19763 +@@ -48,6 +48,7 @@
19764 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
19765 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
19766 + #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
19767 ++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
19768 +
19769 + static const char hcd_name[] = "xhci_hcd";
19770 +
19771 +@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
19772 + (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
19773 + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
19774 + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
19775 +- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
19776 ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
19777 ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
19778 + xhci->quirks |= XHCI_PME_STUCK_QUIRK;
19779 + }
19780 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
19781 +@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
19782 + struct xhci_hcd *xhci;
19783 +
19784 + xhci = hcd_to_xhci(pci_get_drvdata(dev));
19785 ++ xhci->xhc_state |= XHCI_STATE_REMOVING;
19786 + if (xhci->shared_hcd) {
19787 + usb_remove_hcd(xhci->shared_hcd);
19788 + usb_put_hcd(xhci->shared_hcd);
19789 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
19790 +index d39d6bf1d090..d4962208be30 100644
19791 +--- a/drivers/usb/host/xhci-plat.c
19792 ++++ b/drivers/usb/host/xhci-plat.c
19793 +@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
19794 +
19795 + static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
19796 + {
19797 ++ struct usb_hcd *hcd = xhci_to_hcd(xhci);
19798 ++
19799 + /*
19800 + * As of now platform drivers don't provide MSI support so we ensure
19801 + * here that the generic code does not try to make a pci_dev from our
19802 + * dev struct in order to setup MSI
19803 + */
19804 + xhci->quirks |= XHCI_PLAT;
19805 ++
19806 ++ /*
19807 ++ * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
19808 ++ * to 1. However, these SoCs don't support 64-bit address memory
19809 ++ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
19810 ++ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
19811 ++ * xhci_gen_setup().
19812 ++ */
19813 ++ if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
19814 ++ xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
19815 ++ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
19816 + }
19817 +
19818 + /* called during probe() after chip reset completes */
19819 +diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
19820 +index 5a2e2e3936c4..529c3c40f901 100644
19821 +--- a/drivers/usb/host/xhci-plat.h
19822 ++++ b/drivers/usb/host/xhci-plat.h
19823 +@@ -14,7 +14,7 @@
19824 + #include "xhci.h" /* for hcd_to_xhci() */
19825 +
19826 + enum xhci_plat_type {
19827 +- XHCI_PLAT_TYPE_MARVELL_ARMADA,
19828 ++ XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
19829 + XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
19830 + XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
19831 + };
19832 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
19833 +index 3915657e6078..a85a1c993d61 100644
19834 +--- a/drivers/usb/host/xhci-ring.c
19835 ++++ b/drivers/usb/host/xhci-ring.c
19836 +@@ -4014,7 +4014,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
19837 + int reserved_trbs = xhci->cmd_ring_reserved_trbs;
19838 + int ret;
19839 +
19840 +- if (xhci->xhc_state) {
19841 ++ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
19842 ++ (xhci->xhc_state & XHCI_STATE_HALTED)) {
19843 + xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
19844 + return -ESHUTDOWN;
19845 + }
19846 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
19847 +index 0c8087d3c313..8e713cca58ed 100644
19848 +--- a/drivers/usb/host/xhci.c
19849 ++++ b/drivers/usb/host/xhci.c
19850 +@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
19851 + "waited %u microseconds.\n",
19852 + XHCI_MAX_HALT_USEC);
19853 + if (!ret)
19854 +- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
19855 ++ /* clear state flags. Including dying, halted or removing */
19856 ++ xhci->xhc_state = 0;
19857 +
19858 + return ret;
19859 + }
19860 +@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
19861 + /* Resume root hubs only when have pending events. */
19862 + status = readl(&xhci->op_regs->status);
19863 + if (status & STS_EINT) {
19864 +- usb_hcd_resume_root_hub(hcd);
19865 + usb_hcd_resume_root_hub(xhci->shared_hcd);
19866 ++ usb_hcd_resume_root_hub(hcd);
19867 + }
19868 + }
19869 +
19870 +@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
19871 +
19872 + /* Re-enable port polling. */
19873 + xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
19874 +- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
19875 +- usb_hcd_poll_rh_status(hcd);
19876 + set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
19877 + usb_hcd_poll_rh_status(xhci->shared_hcd);
19878 ++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
19879 ++ usb_hcd_poll_rh_status(hcd);
19880 +
19881 + return retval;
19882 + }
19883 +@@ -2770,7 +2771,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
19884 + if (ret <= 0)
19885 + return ret;
19886 + xhci = hcd_to_xhci(hcd);
19887 +- if (xhci->xhc_state & XHCI_STATE_DYING)
19888 ++ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
19889 ++ (xhci->xhc_state & XHCI_STATE_REMOVING))
19890 + return -ENODEV;
19891 +
19892 + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
19893 +@@ -3817,7 +3819,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
19894 +
19895 + mutex_lock(&xhci->mutex);
19896 +
19897 +- if (xhci->xhc_state) /* dying or halted */
19898 ++ if (xhci->xhc_state) /* dying, removing or halted */
19899 + goto out;
19900 +
19901 + if (!udev->slot_id) {
19902 +@@ -4944,6 +4946,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
19903 + return retval;
19904 + xhci_dbg(xhci, "Reset complete\n");
19905 +
19906 ++ /*
19907 ++ * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
19908 ++ * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
19909 ++ * address memory pointers actually. So, this driver clears the AC64
19910 ++ * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
19911 ++ * DMA_BIT_MASK(32)) in this xhci_gen_setup().
19912 ++ */
19913 ++ if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
19914 ++ xhci->hcc_params &= ~BIT(0);
19915 ++
19916 + /* Set dma_mask and coherent_dma_mask to 64-bits,
19917 + * if xHC supports 64-bit addressing */
19918 + if (HCC_64BIT_ADDR(xhci->hcc_params) &&
19919 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
19920 +index cc651383ce5a..1cdea4a8c895 100644
19921 +--- a/drivers/usb/host/xhci.h
19922 ++++ b/drivers/usb/host/xhci.h
19923 +@@ -1596,6 +1596,7 @@ struct xhci_hcd {
19924 + */
19925 + #define XHCI_STATE_DYING (1 << 0)
19926 + #define XHCI_STATE_HALTED (1 << 1)
19927 ++#define XHCI_STATE_REMOVING (1 << 2)
19928 + /* Statistics */
19929 + int error_bitmask;
19930 + unsigned int quirks;
19931 +@@ -1632,6 +1633,7 @@ struct xhci_hcd {
19932 + #define XHCI_PME_STUCK_QUIRK (1 << 20)
19933 + #define XHCI_MTK_HOST (1 << 21)
19934 + #define XHCI_SSIC_PORT_UNUSED (1 << 22)
19935 ++#define XHCI_NO_64BIT_SUPPORT (1 << 23)
19936 + unsigned int num_active_eps;
19937 + unsigned int limit_active_eps;
19938 + /* There are two roothubs to keep track of bus suspend info for */
19939 +diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
19940 +index c6bfd13f6c92..1950e87b4219 100644
19941 +--- a/drivers/usb/misc/iowarrior.c
19942 ++++ b/drivers/usb/misc/iowarrior.c
19943 +@@ -787,6 +787,12 @@ static int iowarrior_probe(struct usb_interface *interface,
19944 + iface_desc = interface->cur_altsetting;
19945 + dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
19946 +
19947 ++ if (iface_desc->desc.bNumEndpoints < 1) {
19948 ++ dev_err(&interface->dev, "Invalid number of endpoints\n");
19949 ++ retval = -EINVAL;
19950 ++ goto error;
19951 ++ }
19952 ++
19953 + /* set up the endpoint information */
19954 + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
19955 + endpoint = &iface_desc->endpoint[i].desc;
19956 +diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
19957 +index 92fdb6e9faff..c78ff95a43be 100644
19958 +--- a/drivers/usb/misc/usbtest.c
19959 ++++ b/drivers/usb/misc/usbtest.c
19960 +@@ -529,6 +529,7 @@ static struct scatterlist *
19961 + alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
19962 + {
19963 + struct scatterlist *sg;
19964 ++ unsigned int n_size = 0;
19965 + unsigned i;
19966 + unsigned size = max;
19967 + unsigned maxpacket =
19968 +@@ -561,7 +562,8 @@ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
19969 + break;
19970 + case 1:
19971 + for (j = 0; j < size; j++)
19972 +- *buf++ = (u8) ((j % maxpacket) % 63);
19973 ++ *buf++ = (u8) (((j + n_size) % maxpacket) % 63);
19974 ++ n_size += size;
19975 + break;
19976 + }
19977 +
19978 +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
19979 +index c0f5c652d272..f1893e08e51a 100644
19980 +--- a/drivers/usb/renesas_usbhs/fifo.c
19981 ++++ b/drivers/usb/renesas_usbhs/fifo.c
19982 +@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
19983 + goto __usbhs_pkt_handler_end;
19984 + }
19985 +
19986 +- ret = func(pkt, &is_done);
19987 ++ if (likely(func))
19988 ++ ret = func(pkt, &is_done);
19989 +
19990 + if (is_done)
19991 + __usbhsf_pkt_del(pkt);
19992 +@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
19993 +
19994 + pkt->trans = len;
19995 +
19996 ++ usbhsf_tx_irq_ctrl(pipe, 0);
19997 + INIT_WORK(&pkt->work, xfer_work);
19998 + schedule_work(&pkt->work);
19999 +
20000 +diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
20001 +index 657f9672ceba..251053551866 100644
20002 +--- a/drivers/usb/renesas_usbhs/mod_gadget.c
20003 ++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
20004 +@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
20005 + struct usbhs_pipe *pipe = pkt->pipe;
20006 + struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
20007 + struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
20008 ++ unsigned long flags;
20009 +
20010 + ureq->req.actual = pkt->actual;
20011 +
20012 +- usbhsg_queue_pop(uep, ureq, 0);
20013 ++ usbhs_lock(priv, flags);
20014 ++ if (uep)
20015 ++ __usbhsg_queue_pop(uep, ureq, 0);
20016 ++ usbhs_unlock(priv, flags);
20017 + }
20018 +
20019 + static void usbhsg_queue_push(struct usbhsg_uep *uep,
20020 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
20021 +index 73a366de5102..a543cdc0f88f 100644
20022 +--- a/drivers/usb/serial/cp210x.c
20023 ++++ b/drivers/usb/serial/cp210x.c
20024 +@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
20025 + { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
20026 + { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
20027 + { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
20028 ++ { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
20029 + { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
20030 + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
20031 + { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
20032 +@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
20033 + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
20034 + { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
20035 + { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
20036 ++ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
20037 + { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
20038 + { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
20039 + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
20040 +@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
20041 + { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
20042 + { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
20043 + { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
20044 ++ { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
20045 ++ { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
20046 + { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
20047 + { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
20048 + { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
20049 +@@ -165,6 +169,7 @@ static const struct usb_device_id id_table[] = {
20050 + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
20051 + { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
20052 + { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
20053 ++ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
20054 + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
20055 + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
20056 + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
20057 +diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
20058 +index 01bf53392819..244acb1299a9 100644
20059 +--- a/drivers/usb/serial/cypress_m8.c
20060 ++++ b/drivers/usb/serial/cypress_m8.c
20061 +@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
20062 + struct usb_serial *serial = port->serial;
20063 + struct cypress_private *priv;
20064 +
20065 ++ if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
20066 ++ dev_err(&port->dev, "required endpoint is missing\n");
20067 ++ return -ENODEV;
20068 ++ }
20069 ++
20070 + priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
20071 + if (!priv)
20072 + return -ENOMEM;
20073 +@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
20074 + cypress_set_termios(tty, port, &priv->tmp_termios);
20075 +
20076 + /* setup the port and start reading from the device */
20077 +- if (!port->interrupt_in_urb) {
20078 +- dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
20079 +- __func__);
20080 +- return -1;
20081 +- }
20082 +-
20083 + usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
20084 + usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
20085 + port->interrupt_in_urb->transfer_buffer,
20086 +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
20087 +index 12b0e67473ba..3df7b7ec178e 100644
20088 +--- a/drivers/usb/serial/digi_acceleport.c
20089 ++++ b/drivers/usb/serial/digi_acceleport.c
20090 +@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
20091 +
20092 + static int digi_startup(struct usb_serial *serial)
20093 + {
20094 ++ struct device *dev = &serial->interface->dev;
20095 + struct digi_serial *serial_priv;
20096 + int ret;
20097 ++ int i;
20098 ++
20099 ++ /* check whether the device has the expected number of endpoints */
20100 ++ if (serial->num_port_pointers < serial->type->num_ports + 1) {
20101 ++ dev_err(dev, "OOB endpoints missing\n");
20102 ++ return -ENODEV;
20103 ++ }
20104 ++
20105 ++ for (i = 0; i < serial->type->num_ports + 1 ; i++) {
20106 ++ if (!serial->port[i]->read_urb) {
20107 ++ dev_err(dev, "bulk-in endpoint missing\n");
20108 ++ return -ENODEV;
20109 ++ }
20110 ++ if (!serial->port[i]->write_urb) {
20111 ++ dev_err(dev, "bulk-out endpoint missing\n");
20112 ++ return -ENODEV;
20113 ++ }
20114 ++ }
20115 +
20116 + serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
20117 + if (!serial_priv)
20118 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
20119 +index 8c660ae401d8..b61f12160d37 100644
20120 +--- a/drivers/usb/serial/ftdi_sio.c
20121 ++++ b/drivers/usb/serial/ftdi_sio.c
20122 +@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
20123 + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
20124 + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
20125 + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
20126 ++ /* ICP DAS I-756xU devices */
20127 ++ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
20128 ++ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
20129 ++ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
20130 + { } /* Terminating entry */
20131 + };
20132 +
20133 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
20134 +index a84df2513994..c5d6c1e73e8e 100644
20135 +--- a/drivers/usb/serial/ftdi_sio_ids.h
20136 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
20137 +@@ -872,6 +872,14 @@
20138 + #define NOVITUS_BONO_E_PID 0x6010
20139 +
20140 + /*
20141 ++ * ICPDAS I-756*U devices
20142 ++ */
20143 ++#define ICPDAS_VID 0x1b5c
20144 ++#define ICPDAS_I7560U_PID 0x0103
20145 ++#define ICPDAS_I7561U_PID 0x0104
20146 ++#define ICPDAS_I7563U_PID 0x0105
20147 ++
20148 ++/*
20149 + * RT Systems programming cables for various ham radios
20150 + */
20151 + #define RTSYSTEMS_VID 0x2100 /* Vendor ID */
20152 +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
20153 +index f49327d20ee8..0a935b1e5475 100644
20154 +--- a/drivers/usb/serial/io_edgeport.c
20155 ++++ b/drivers/usb/serial/io_edgeport.c
20156 +@@ -2849,14 +2849,16 @@ static int edge_startup(struct usb_serial *serial)
20157 + /* not set up yet, so do it now */
20158 + edge_serial->interrupt_read_urb =
20159 + usb_alloc_urb(0, GFP_KERNEL);
20160 +- if (!edge_serial->interrupt_read_urb)
20161 +- return -ENOMEM;
20162 ++ if (!edge_serial->interrupt_read_urb) {
20163 ++ response = -ENOMEM;
20164 ++ break;
20165 ++ }
20166 +
20167 + edge_serial->interrupt_in_buffer =
20168 + kmalloc(buffer_size, GFP_KERNEL);
20169 + if (!edge_serial->interrupt_in_buffer) {
20170 +- usb_free_urb(edge_serial->interrupt_read_urb);
20171 +- return -ENOMEM;
20172 ++ response = -ENOMEM;
20173 ++ break;
20174 + }
20175 + edge_serial->interrupt_in_endpoint =
20176 + endpoint->bEndpointAddress;
20177 +@@ -2884,14 +2886,16 @@ static int edge_startup(struct usb_serial *serial)
20178 + /* not set up yet, so do it now */
20179 + edge_serial->read_urb =
20180 + usb_alloc_urb(0, GFP_KERNEL);
20181 +- if (!edge_serial->read_urb)
20182 +- return -ENOMEM;
20183 ++ if (!edge_serial->read_urb) {
20184 ++ response = -ENOMEM;
20185 ++ break;
20186 ++ }
20187 +
20188 + edge_serial->bulk_in_buffer =
20189 + kmalloc(buffer_size, GFP_KERNEL);
20190 + if (!edge_serial->bulk_in_buffer) {
20191 +- usb_free_urb(edge_serial->read_urb);
20192 +- return -ENOMEM;
20193 ++ response = -ENOMEM;
20194 ++ break;
20195 + }
20196 + edge_serial->bulk_in_endpoint =
20197 + endpoint->bEndpointAddress;
20198 +@@ -2917,9 +2921,22 @@ static int edge_startup(struct usb_serial *serial)
20199 + }
20200 + }
20201 +
20202 +- if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
20203 +- dev_err(ddev, "Error - the proper endpoints were not found!\n");
20204 +- return -ENODEV;
20205 ++ if (response || !interrupt_in_found || !bulk_in_found ||
20206 ++ !bulk_out_found) {
20207 ++ if (!response) {
20208 ++ dev_err(ddev, "expected endpoints not found\n");
20209 ++ response = -ENODEV;
20210 ++ }
20211 ++
20212 ++ usb_free_urb(edge_serial->interrupt_read_urb);
20213 ++ kfree(edge_serial->interrupt_in_buffer);
20214 ++
20215 ++ usb_free_urb(edge_serial->read_urb);
20216 ++ kfree(edge_serial->bulk_in_buffer);
20217 ++
20218 ++ kfree(edge_serial);
20219 ++
20220 ++ return response;
20221 + }
20222 +
20223 + /* start interrupt read for this edgeport this interrupt will
20224 +@@ -2942,16 +2959,9 @@ static void edge_disconnect(struct usb_serial *serial)
20225 + {
20226 + struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
20227 +
20228 +- /* stop reads and writes on all ports */
20229 +- /* free up our endpoint stuff */
20230 + if (edge_serial->is_epic) {
20231 + usb_kill_urb(edge_serial->interrupt_read_urb);
20232 +- usb_free_urb(edge_serial->interrupt_read_urb);
20233 +- kfree(edge_serial->interrupt_in_buffer);
20234 +-
20235 + usb_kill_urb(edge_serial->read_urb);
20236 +- usb_free_urb(edge_serial->read_urb);
20237 +- kfree(edge_serial->bulk_in_buffer);
20238 + }
20239 + }
20240 +
20241 +@@ -2964,6 +2974,16 @@ static void edge_release(struct usb_serial *serial)
20242 + {
20243 + struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
20244 +
20245 ++ if (edge_serial->is_epic) {
20246 ++ usb_kill_urb(edge_serial->interrupt_read_urb);
20247 ++ usb_free_urb(edge_serial->interrupt_read_urb);
20248 ++ kfree(edge_serial->interrupt_in_buffer);
20249 ++
20250 ++ usb_kill_urb(edge_serial->read_urb);
20251 ++ usb_free_urb(edge_serial->read_urb);
20252 ++ kfree(edge_serial->bulk_in_buffer);
20253 ++ }
20254 ++
20255 + kfree(edge_serial);
20256 + }
20257 +
20258 +diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
20259 +index e07b15ed5814..7faa901ee47f 100644
20260 +--- a/drivers/usb/serial/keyspan.c
20261 ++++ b/drivers/usb/serial/keyspan.c
20262 +@@ -2376,6 +2376,10 @@ static void keyspan_release(struct usb_serial *serial)
20263 +
20264 + s_priv = usb_get_serial_data(serial);
20265 +
20266 ++ /* Make sure to unlink the URBs submitted in attach. */
20267 ++ usb_kill_urb(s_priv->instat_urb);
20268 ++ usb_kill_urb(s_priv->indat_urb);
20269 ++
20270 + usb_free_urb(s_priv->instat_urb);
20271 + usb_free_urb(s_priv->indat_urb);
20272 + usb_free_urb(s_priv->glocont_urb);
20273 +diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
20274 +index fd707d6a10e2..89726f702202 100644
20275 +--- a/drivers/usb/serial/mct_u232.c
20276 ++++ b/drivers/usb/serial/mct_u232.c
20277 +@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
20278 +
20279 + static int mct_u232_port_probe(struct usb_serial_port *port)
20280 + {
20281 ++ struct usb_serial *serial = port->serial;
20282 + struct mct_u232_private *priv;
20283 +
20284 ++ /* check first to simplify error handling */
20285 ++ if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
20286 ++ dev_err(&port->dev, "expected endpoint missing\n");
20287 ++ return -ENODEV;
20288 ++ }
20289 ++
20290 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
20291 + if (!priv)
20292 + return -ENOMEM;
20293 +
20294 + /* Use second interrupt-in endpoint for reading. */
20295 +- priv->read_urb = port->serial->port[1]->interrupt_in_urb;
20296 ++ priv->read_urb = serial->port[1]->interrupt_in_urb;
20297 + priv->read_urb->context = port;
20298 +
20299 + spin_lock_init(&priv->lock);
20300 +diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
20301 +index 31a8b47f1ac6..c6596cbcc4b6 100644
20302 +--- a/drivers/usb/serial/mxuport.c
20303 ++++ b/drivers/usb/serial/mxuport.c
20304 +@@ -1259,6 +1259,15 @@ static int mxuport_attach(struct usb_serial *serial)
20305 + return 0;
20306 + }
20307 +
20308 ++static void mxuport_release(struct usb_serial *serial)
20309 ++{
20310 ++ struct usb_serial_port *port0 = serial->port[0];
20311 ++ struct usb_serial_port *port1 = serial->port[1];
20312 ++
20313 ++ usb_serial_generic_close(port1);
20314 ++ usb_serial_generic_close(port0);
20315 ++}
20316 ++
20317 + static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
20318 + {
20319 + struct mxuport_port *mxport = usb_get_serial_port_data(port);
20320 +@@ -1361,6 +1370,7 @@ static struct usb_serial_driver mxuport_device = {
20321 + .probe = mxuport_probe,
20322 + .port_probe = mxuport_port_probe,
20323 + .attach = mxuport_attach,
20324 ++ .release = mxuport_release,
20325 + .calc_num_ports = mxuport_calc_num_ports,
20326 + .open = mxuport_open,
20327 + .close = mxuport_close,
20328 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
20329 +index 348e19834b83..d96d423d00e6 100644
20330 +--- a/drivers/usb/serial/option.c
20331 ++++ b/drivers/usb/serial/option.c
20332 +@@ -375,18 +375,22 @@ static void option_instat_callback(struct urb *urb);
20333 + #define HAIER_PRODUCT_CE81B 0x10f8
20334 + #define HAIER_PRODUCT_CE100 0x2009
20335 +
20336 +-/* Cinterion (formerly Siemens) products */
20337 +-#define SIEMENS_VENDOR_ID 0x0681
20338 +-#define CINTERION_VENDOR_ID 0x1e2d
20339 ++/* Gemalto's Cinterion products (formerly Siemens) */
20340 ++#define SIEMENS_VENDOR_ID 0x0681
20341 ++#define CINTERION_VENDOR_ID 0x1e2d
20342 ++#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
20343 + #define CINTERION_PRODUCT_HC25_MDM 0x0047
20344 +-#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
20345 ++#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
20346 + #define CINTERION_PRODUCT_HC28_MDM 0x004C
20347 +-#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
20348 + #define CINTERION_PRODUCT_EU3_E 0x0051
20349 + #define CINTERION_PRODUCT_EU3_P 0x0052
20350 + #define CINTERION_PRODUCT_PH8 0x0053
20351 + #define CINTERION_PRODUCT_AHXX 0x0055
20352 + #define CINTERION_PRODUCT_PLXX 0x0060
20353 ++#define CINTERION_PRODUCT_PH8_2RMNET 0x0082
20354 ++#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
20355 ++#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
20356 ++#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
20357 +
20358 + /* Olivetti products */
20359 + #define OLIVETTI_VENDOR_ID 0x0b3c
20360 +@@ -633,6 +637,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
20361 + .reserved = BIT(1) | BIT(2) | BIT(3),
20362 + };
20363 +
20364 ++static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
20365 ++ .reserved = BIT(4) | BIT(5),
20366 ++};
20367 ++
20368 + static const struct usb_device_id option_ids[] = {
20369 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
20370 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
20371 +@@ -1602,7 +1610,79 @@ static const struct usb_device_id option_ids[] = {
20372 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
20373 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
20374 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
20375 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
20376 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
20377 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
20378 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
20379 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
20380 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
20381 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
20382 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
20383 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
20384 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
20385 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
20386 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
20387 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
20388 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
20389 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
20390 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
20391 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
20392 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
20393 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
20394 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
20395 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
20396 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
20397 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
20398 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
20399 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
20400 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
20401 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
20402 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
20403 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
20404 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
20405 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
20406 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
20407 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
20408 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
20409 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
20410 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
20411 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
20412 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
20413 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
20414 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
20415 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
20416 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
20417 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
20418 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
20419 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
20420 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
20421 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
20422 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
20423 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
20424 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
20425 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
20426 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
20427 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
20428 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
20429 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
20430 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
20431 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
20432 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
20433 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
20434 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
20435 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
20436 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
20437 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
20438 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
20439 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
20440 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
20441 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
20442 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
20443 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
20444 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
20445 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
20446 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
20447 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
20448 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
20449 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
20450 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
20451 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
20452 +@@ -1613,6 +1693,61 @@ static const struct usb_device_id option_ids[] = {
20453 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
20454 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
20455 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
20456 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
20457 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
20458 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
20459 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
20460 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
20461 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
20462 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
20463 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
20464 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
20465 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
20466 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
20467 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
20468 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
20469 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
20470 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
20471 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
20472 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
20473 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
20474 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
20475 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
20476 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
20477 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
20478 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
20479 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
20480 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
20481 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
20482 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
20483 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
20484 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
20485 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
20486 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
20487 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
20488 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
20489 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
20490 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
20491 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
20492 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
20493 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
20494 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
20495 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
20496 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
20497 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
20498 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
20499 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
20500 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
20501 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
20502 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
20503 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
20504 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
20505 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
20506 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
20507 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
20508 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
20509 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
20510 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
20511 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) },
20512 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) },
20513 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) },
20514 +@@ -1712,7 +1847,13 @@ static const struct usb_device_id option_ids[] = {
20515 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
20516 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
20517 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
20518 +- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
20519 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
20520 ++ .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
20521 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
20522 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
20523 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
20524 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
20525 ++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
20526 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
20527 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
20528 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
20529 +@@ -1818,6 +1959,8 @@ static const struct usb_device_id option_ids[] = {
20530 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
20531 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
20532 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
20533 ++ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
20534 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
20535 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
20536 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
20537 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
20538 +diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
20539 +index 504f5bff79c0..b18974cbd995 100644
20540 +--- a/drivers/usb/serial/quatech2.c
20541 ++++ b/drivers/usb/serial/quatech2.c
20542 +@@ -141,6 +141,7 @@ static void qt2_release(struct usb_serial *serial)
20543 +
20544 + serial_priv = usb_get_serial_data(serial);
20545 +
20546 ++ usb_kill_urb(serial_priv->read_urb);
20547 + usb_free_urb(serial_priv->read_urb);
20548 + kfree(serial_priv->read_buffer);
20549 + kfree(serial_priv);
20550 +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
20551 +index 9ff9404f99d7..e4ade8d89eb5 100644
20552 +--- a/drivers/usb/storage/uas.c
20553 ++++ b/drivers/usb/storage/uas.c
20554 +@@ -2,7 +2,7 @@
20555 + * USB Attached SCSI
20556 + * Note that this is not the same as the USB Mass Storage driver
20557 + *
20558 +- * Copyright Hans de Goede <hdegoede@××××××.com> for Red Hat, Inc. 2013 - 2014
20559 ++ * Copyright Hans de Goede <hdegoede@××××××.com> for Red Hat, Inc. 2013 - 2016
20560 + * Copyright Matthew Wilcox for Intel Corp, 2010
20561 + * Copyright Sarah Sharp for Intel Corp, 2010
20562 + *
20563 +@@ -757,6 +757,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
20564 + return SUCCESS;
20565 + }
20566 +
20567 ++static int uas_target_alloc(struct scsi_target *starget)
20568 ++{
20569 ++ struct uas_dev_info *devinfo = (struct uas_dev_info *)
20570 ++ dev_to_shost(starget->dev.parent)->hostdata;
20571 ++
20572 ++ if (devinfo->flags & US_FL_NO_REPORT_LUNS)
20573 ++ starget->no_report_luns = 1;
20574 ++
20575 ++ return 0;
20576 ++}
20577 ++
20578 + static int uas_slave_alloc(struct scsi_device *sdev)
20579 + {
20580 + struct uas_dev_info *devinfo =
20581 +@@ -800,7 +811,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
20582 + if (devinfo->flags & US_FL_BROKEN_FUA)
20583 + sdev->broken_fua = 1;
20584 +
20585 +- scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
20586 + return 0;
20587 + }
20588 +
20589 +@@ -808,11 +818,12 @@ static struct scsi_host_template uas_host_template = {
20590 + .module = THIS_MODULE,
20591 + .name = "uas",
20592 + .queuecommand = uas_queuecommand,
20593 ++ .target_alloc = uas_target_alloc,
20594 + .slave_alloc = uas_slave_alloc,
20595 + .slave_configure = uas_slave_configure,
20596 + .eh_abort_handler = uas_eh_abort_handler,
20597 + .eh_bus_reset_handler = uas_eh_bus_reset_handler,
20598 +- .can_queue = 65536, /* Is there a limit on the _host_ ? */
20599 ++ .can_queue = MAX_CMNDS,
20600 + .this_id = -1,
20601 + .sg_tablesize = SG_NONE,
20602 + .skip_settle_delay = 1,
20603 +@@ -932,6 +943,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
20604 + if (result)
20605 + goto set_alt0;
20606 +
20607 ++ /*
20608 ++ * 1 tag is reserved for untagged commands +
20609 ++ * 1 tag to avoid off by one errors in some bridge firmwares
20610 ++ */
20611 ++ shost->can_queue = devinfo->qdepth - 2;
20612 ++
20613 + usb_set_intfdata(intf, shost);
20614 + result = scsi_add_host(shost, &intf->dev);
20615 + if (result)
20616 +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
20617 +index ccc113e83d88..53341a77d89f 100644
20618 +--- a/drivers/usb/storage/unusual_uas.h
20619 ++++ b/drivers/usb/storage/unusual_uas.h
20620 +@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
20621 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
20622 + US_FL_NO_ATA_1X),
20623 +
20624 ++/* Reported-by: David Webb <djw@××××××.uk> */
20625 ++UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
20626 ++ "Seagate",
20627 ++ "Expansion Desk",
20628 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
20629 ++ US_FL_NO_REPORT_LUNS),
20630 ++
20631 + /* Reported-by: Hans de Goede <hdegoede@××××××.com> */
20632 + UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
20633 + "Seagate",
20634 +diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
20635 +index 43576ed31ccd..9de988a0f856 100644
20636 +--- a/drivers/usb/storage/usb.c
20637 ++++ b/drivers/usb/storage/usb.c
20638 +@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
20639 + US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
20640 + US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
20641 + US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
20642 +- US_FL_MAX_SECTORS_240);
20643 ++ US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
20644 +
20645 + p = quirks;
20646 + while (*p) {
20647 +@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
20648 + case 'i':
20649 + f |= US_FL_IGNORE_DEVICE;
20650 + break;
20651 ++ case 'j':
20652 ++ f |= US_FL_NO_REPORT_LUNS;
20653 ++ break;
20654 + case 'l':
20655 + f |= US_FL_NOT_LOCKABLE;
20656 + break;
20657 +diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
20658 +index facaaf003f19..e40da7759a0e 100644
20659 +--- a/drivers/usb/usbip/usbip_common.c
20660 ++++ b/drivers/usb/usbip/usbip_common.c
20661 +@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
20662 + if (!(size > 0))
20663 + return 0;
20664 +
20665 ++ if (size > urb->transfer_buffer_length) {
20666 ++ /* should not happen, probably malicious packet */
20667 ++ if (ud->side == USBIP_STUB) {
20668 ++ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
20669 ++ return 0;
20670 ++ } else {
20671 ++ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
20672 ++ return -EPIPE;
20673 ++ }
20674 ++ }
20675 ++
20676 + ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
20677 + if (ret != size) {
20678 + dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
20679 +diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
20680 +index 8ea45a5cd806..d889ef2048df 100644
20681 +--- a/drivers/video/fbdev/Kconfig
20682 ++++ b/drivers/video/fbdev/Kconfig
20683 +@@ -2246,7 +2246,6 @@ config XEN_FBDEV_FRONTEND
20684 + select FB_SYS_IMAGEBLIT
20685 + select FB_SYS_FOPS
20686 + select FB_DEFERRED_IO
20687 +- select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
20688 + select XEN_XENBUS_FRONTEND
20689 + default y
20690 + help
20691 +diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
20692 +index 9362424c2340..f9ef06d0cd48 100644
20693 +--- a/drivers/video/fbdev/amba-clcd.c
20694 ++++ b/drivers/video/fbdev/amba-clcd.c
20695 +@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
20696 + fb->off_ienb = CLCD_PL111_IENB;
20697 + fb->off_cntl = CLCD_PL111_CNTL;
20698 + } else {
20699 +-#ifdef CONFIG_ARCH_VERSATILE
20700 +- fb->off_ienb = CLCD_PL111_IENB;
20701 +- fb->off_cntl = CLCD_PL111_CNTL;
20702 +-#else
20703 +- fb->off_ienb = CLCD_PL110_IENB;
20704 +- fb->off_cntl = CLCD_PL110_CNTL;
20705 +-#endif
20706 ++ if (of_machine_is_compatible("arm,versatile-ab") ||
20707 ++ of_machine_is_compatible("arm,versatile-pb")) {
20708 ++ fb->off_ienb = CLCD_PL111_IENB;
20709 ++ fb->off_cntl = CLCD_PL111_CNTL;
20710 ++ } else {
20711 ++ fb->off_ienb = CLCD_PL110_IENB;
20712 ++ fb->off_cntl = CLCD_PL110_CNTL;
20713 ++ }
20714 + }
20715 +
20716 + fb->clk = clk_get(&fb->dev->dev, NULL);
20717 +diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
20718 +index 6b2a06d09f2b..d8d583d32a37 100644
20719 +--- a/drivers/video/fbdev/da8xx-fb.c
20720 ++++ b/drivers/video/fbdev/da8xx-fb.c
20721 +@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
20722 + .lower_margin = 2,
20723 + .hsync_len = 0,
20724 + .vsync_len = 0,
20725 +- .sync = FB_SYNC_CLK_INVERT |
20726 +- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
20727 ++ .sync = FB_SYNC_CLK_INVERT,
20728 + },
20729 + /* Sharp LK043T1DG01 */
20730 + [1] = {
20731 +@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
20732 + .lower_margin = 2,
20733 + .hsync_len = 41,
20734 + .vsync_len = 10,
20735 +- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
20736 ++ .sync = 0,
20737 + .flag = 0,
20738 + },
20739 + [2] = {
20740 +@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
20741 + .lower_margin = 10,
20742 + .hsync_len = 10,
20743 + .vsync_len = 10,
20744 +- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
20745 ++ .sync = 0,
20746 + .flag = 0,
20747 + },
20748 + [3] = {
20749 +diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
20750 +index 7760fc1a2218..1f413a2f70b6 100644
20751 +--- a/drivers/virtio/virtio_pci_modern.c
20752 ++++ b/drivers/virtio/virtio_pci_modern.c
20753 +@@ -17,6 +17,7 @@
20754 + *
20755 + */
20756 +
20757 ++#include <linux/delay.h>
20758 + #define VIRTIO_PCI_NO_LEGACY
20759 + #include "virtio_pci_common.h"
20760 +
20761 +@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
20762 + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
20763 + /* 0 status means a reset. */
20764 + vp_iowrite8(0, &vp_dev->common->device_status);
20765 +- /* Flush out the status write, and flush in device writes,
20766 +- * including MSI-X interrupts, if any. */
20767 +- vp_ioread8(&vp_dev->common->device_status);
20768 ++ /* After writing 0 to device_status, the driver MUST wait for a read of
20769 ++ * device_status to return 0 before reinitializing the device.
20770 ++ * This will flush out the status write, and flush in device writes,
20771 ++ * including MSI-X interrupts, if any.
20772 ++ */
20773 ++ while (vp_ioread8(&vp_dev->common->device_status))
20774 ++ msleep(1);
20775 + /* Flush pending VQ/configuration callbacks. */
20776 + vp_synchronize_vectors(vdev);
20777 + }
20778 +diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
20779 +index 71e78ef4b736..3a75f3b53452 100644
20780 +--- a/drivers/watchdog/rc32434_wdt.c
20781 ++++ b/drivers/watchdog/rc32434_wdt.c
20782 +@@ -237,7 +237,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
20783 + return -EINVAL;
20784 + /* Fall through */
20785 + case WDIOC_GETTIMEOUT:
20786 +- return copy_to_user(argp, &timeout, sizeof(int));
20787 ++ return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
20788 + default:
20789 + return -ENOTTY;
20790 + }
20791 +diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
20792 +index 6467b91f2245..028618c5eeba 100644
20793 +--- a/drivers/watchdog/sp5100_tco.c
20794 ++++ b/drivers/watchdog/sp5100_tco.c
20795 +@@ -73,6 +73,13 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
20796 + /*
20797 + * Some TCO specific functions
20798 + */
20799 ++
20800 ++static bool tco_has_sp5100_reg_layout(struct pci_dev *dev)
20801 ++{
20802 ++ return dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
20803 ++ dev->revision < 0x40;
20804 ++}
20805 ++
20806 + static void tco_timer_start(void)
20807 + {
20808 + u32 val;
20809 +@@ -129,7 +136,7 @@ static void tco_timer_enable(void)
20810 + {
20811 + int val;
20812 +
20813 +- if (sp5100_tco_pci->revision >= 0x40) {
20814 ++ if (!tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
20815 + /* For SB800 or later */
20816 + /* Set the Watchdog timer resolution to 1 sec */
20817 + outb(SB800_PM_WATCHDOG_CONFIG, SB800_IO_PM_INDEX_REG);
20818 +@@ -342,8 +349,7 @@ static unsigned char sp5100_tco_setupdevice(void)
20819 + /*
20820 + * Determine type of southbridge chipset.
20821 + */
20822 +- if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
20823 +- sp5100_tco_pci->revision < 0x40) {
20824 ++ if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
20825 + dev_name = SP5100_DEVNAME;
20826 + index_reg = SP5100_IO_PM_INDEX_REG;
20827 + data_reg = SP5100_IO_PM_DATA_REG;
20828 +@@ -388,8 +394,7 @@ static unsigned char sp5100_tco_setupdevice(void)
20829 + * Secondly, Find the watchdog timer MMIO address
20830 + * from SBResource_MMIO register.
20831 + */
20832 +- if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
20833 +- sp5100_tco_pci->revision < 0x40) {
20834 ++ if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
20835 + /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
20836 + pci_read_config_dword(sp5100_tco_pci,
20837 + SP5100_SB_RESOURCE_MMIO_BASE, &val);
20838 +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
20839 +index 12eab503efd1..364bc44610c1 100644
20840 +--- a/drivers/xen/balloon.c
20841 ++++ b/drivers/xen/balloon.c
20842 +@@ -152,6 +152,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
20843 + static void balloon_process(struct work_struct *work);
20844 + static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
20845 +
20846 ++static void release_memory_resource(struct resource *resource);
20847 ++
20848 + /* When ballooning out (allocating memory to return to Xen) we don't really
20849 + want the kernel to try too hard since that can trigger the oom killer. */
20850 + #define GFP_BALLOON \
20851 +@@ -268,6 +270,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
20852 + return NULL;
20853 + }
20854 +
20855 ++#ifdef CONFIG_SPARSEMEM
20856 ++ {
20857 ++ unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
20858 ++ unsigned long pfn = res->start >> PAGE_SHIFT;
20859 ++
20860 ++ if (pfn > limit) {
20861 ++ pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
20862 ++ pfn, limit);
20863 ++ release_memory_resource(res);
20864 ++ return NULL;
20865 ++ }
20866 ++ }
20867 ++#endif
20868 ++
20869 + return res;
20870 + }
20871 +
20872 +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
20873 +index 524c22146429..44367783f07a 100644
20874 +--- a/drivers/xen/events/events_base.c
20875 ++++ b/drivers/xen/events/events_base.c
20876 +@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
20877 + struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
20878 + int rc = 0;
20879 +
20880 +- irq_move_irq(data);
20881 ++ if (!VALID_EVTCHN(evtchn))
20882 ++ return;
20883 +
20884 +- if (VALID_EVTCHN(evtchn))
20885 ++ if (unlikely(irqd_is_setaffinity_pending(data))) {
20886 ++ int masked = test_and_set_mask(evtchn);
20887 ++
20888 ++ clear_evtchn(evtchn);
20889 ++
20890 ++ irq_move_masked_irq(data);
20891 ++
20892 ++ if (!masked)
20893 ++ unmask_evtchn(evtchn);
20894 ++ } else
20895 + clear_evtchn(evtchn);
20896 +
20897 + if (pirq_needs_eoi(data->irq)) {
20898 +@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
20899 + {
20900 + int evtchn = evtchn_from_irq(data->irq);
20901 +
20902 +- irq_move_irq(data);
20903 ++ if (!VALID_EVTCHN(evtchn))
20904 ++ return;
20905 +
20906 +- if (VALID_EVTCHN(evtchn))
20907 ++ if (unlikely(irqd_is_setaffinity_pending(data))) {
20908 ++ int masked = test_and_set_mask(evtchn);
20909 ++
20910 ++ clear_evtchn(evtchn);
20911 ++
20912 ++ irq_move_masked_irq(data);
20913 ++
20914 ++ if (!masked)
20915 ++ unmask_evtchn(evtchn);
20916 ++ } else
20917 + clear_evtchn(evtchn);
20918 + }
20919 +
20920 +diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
20921 +index 38272ad24551..f4edd6df3df2 100644
20922 +--- a/drivers/xen/evtchn.c
20923 ++++ b/drivers/xen/evtchn.c
20924 +@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
20925 + {
20926 + unsigned int new_size;
20927 + evtchn_port_t *new_ring, *old_ring;
20928 +- unsigned int p, c;
20929 +
20930 + /*
20931 + * Ensure the ring is large enough to capture all possible
20932 +@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
20933 + /*
20934 + * Copy the old ring contents to the new ring.
20935 + *
20936 +- * If the ring contents crosses the end of the current ring,
20937 +- * it needs to be copied in two chunks.
20938 ++ * To take care of wrapping, a full ring, and the new index
20939 ++ * pointing into the second half, simply copy the old contents
20940 ++ * twice.
20941 + *
20942 + * +---------+ +------------------+
20943 +- * |34567 12| -> | 1234567 |
20944 +- * +-----p-c-+ +------------------+
20945 ++ * |34567 12| -> |34567 1234567 12|
20946 ++ * +-----p-c-+ +-------c------p---+
20947 + */
20948 +- p = evtchn_ring_offset(u, u->ring_prod);
20949 +- c = evtchn_ring_offset(u, u->ring_cons);
20950 +- if (p < c) {
20951 +- memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
20952 +- memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
20953 +- } else
20954 +- memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
20955 ++ memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
20956 ++ memcpy(new_ring + u->ring_size, old_ring,
20957 ++ u->ring_size * sizeof(*u->ring));
20958 +
20959 + u->ring = new_ring;
20960 + u->ring_size = new_size;
20961 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
20962 +index 769e0ff1b4ce..dea6486a7508 100644
20963 +--- a/fs/btrfs/ctree.c
20964 ++++ b/fs/btrfs/ctree.c
20965 +@@ -19,6 +19,7 @@
20966 + #include <linux/sched.h>
20967 + #include <linux/slab.h>
20968 + #include <linux/rbtree.h>
20969 ++#include <linux/vmalloc.h>
20970 + #include "ctree.h"
20971 + #include "disk-io.h"
20972 + #include "transaction.h"
20973 +@@ -5361,10 +5362,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
20974 + goto out;
20975 + }
20976 +
20977 +- tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
20978 ++ tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
20979 + if (!tmp_buf) {
20980 +- ret = -ENOMEM;
20981 +- goto out;
20982 ++ tmp_buf = vmalloc(left_root->nodesize);
20983 ++ if (!tmp_buf) {
20984 ++ ret = -ENOMEM;
20985 ++ goto out;
20986 ++ }
20987 + }
20988 +
20989 + left_path->search_commit_root = 1;
20990 +@@ -5565,7 +5569,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
20991 + out:
20992 + btrfs_free_path(left_path);
20993 + btrfs_free_path(right_path);
20994 +- kfree(tmp_buf);
20995 ++ kvfree(tmp_buf);
20996 + return ret;
20997 + }
20998 +
20999 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
21000 +index bfe4a337fb4d..6661ad8b4088 100644
21001 +--- a/fs/btrfs/ctree.h
21002 ++++ b/fs/btrfs/ctree.h
21003 +@@ -2252,7 +2252,7 @@ struct btrfs_ioctl_defrag_range_args {
21004 + #define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26)
21005 +
21006 + #define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
21007 +-#define BTRFS_DEFAULT_MAX_INLINE (8192)
21008 ++#define BTRFS_DEFAULT_MAX_INLINE (2048)
21009 +
21010 + #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
21011 + #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
21012 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
21013 +index cbb7dbfb3fff..218f51a5dbab 100644
21014 +--- a/fs/btrfs/dev-replace.c
21015 ++++ b/fs/btrfs/dev-replace.c
21016 +@@ -394,6 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
21017 + dev_replace->cursor_right = 0;
21018 + dev_replace->is_valid = 1;
21019 + dev_replace->item_needs_writeback = 1;
21020 ++ atomic64_set(&dev_replace->num_write_errors, 0);
21021 ++ atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
21022 + args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
21023 + btrfs_dev_replace_unlock(dev_replace);
21024 +
21025 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
21026 +index 4545e2e2ad45..ae6e3e36fdf0 100644
21027 +--- a/fs/btrfs/disk-io.c
21028 ++++ b/fs/btrfs/disk-io.c
21029 +@@ -303,7 +303,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
21030 + err = map_private_extent_buffer(buf, offset, 32,
21031 + &kaddr, &map_start, &map_len);
21032 + if (err)
21033 +- return 1;
21034 ++ return err;
21035 + cur_len = min(len, map_len - (offset - map_start));
21036 + crc = btrfs_csum_data(kaddr + offset - map_start,
21037 + crc, cur_len);
21038 +@@ -313,7 +313,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
21039 + if (csum_size > sizeof(inline_result)) {
21040 + result = kzalloc(csum_size, GFP_NOFS);
21041 + if (!result)
21042 +- return 1;
21043 ++ return -ENOMEM;
21044 + } else {
21045 + result = (char *)&inline_result;
21046 + }
21047 +@@ -334,7 +334,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
21048 + val, found, btrfs_header_level(buf));
21049 + if (result != (char *)&inline_result)
21050 + kfree(result);
21051 +- return 1;
21052 ++ return -EUCLEAN;
21053 + }
21054 + } else {
21055 + write_extent_buffer(buf, result, 0, csum_size);
21056 +@@ -513,11 +513,21 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
21057 + eb = (struct extent_buffer *)page->private;
21058 + if (page != eb->pages[0])
21059 + return 0;
21060 ++
21061 + found_start = btrfs_header_bytenr(eb);
21062 +- if (WARN_ON(found_start != start || !PageUptodate(page)))
21063 +- return 0;
21064 +- csum_tree_block(fs_info, eb, 0);
21065 +- return 0;
21066 ++ /*
21067 ++ * Please do not consolidate these warnings into a single if.
21068 ++ * It is useful to know what went wrong.
21069 ++ */
21070 ++ if (WARN_ON(found_start != start))
21071 ++ return -EUCLEAN;
21072 ++ if (WARN_ON(!PageUptodate(page)))
21073 ++ return -EUCLEAN;
21074 ++
21075 ++ ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
21076 ++ btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
21077 ++
21078 ++ return csum_tree_block(fs_info, eb, 0);
21079 + }
21080 +
21081 + static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
21082 +@@ -660,10 +670,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
21083 + eb, found_level);
21084 +
21085 + ret = csum_tree_block(root->fs_info, eb, 1);
21086 +- if (ret) {
21087 +- ret = -EIO;
21088 ++ if (ret)
21089 + goto err;
21090 +- }
21091 +
21092 + /*
21093 + * If this is a leaf block and it is corrupt, set the corrupt bit so
21094 +@@ -1830,7 +1838,7 @@ static int cleaner_kthread(void *arg)
21095 + */
21096 + btrfs_delete_unused_bgs(root->fs_info);
21097 + sleep:
21098 +- if (!try_to_freeze() && !again) {
21099 ++ if (!again) {
21100 + set_current_state(TASK_INTERRUPTIBLE);
21101 + if (!kthread_should_stop())
21102 + schedule();
21103 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
21104 +index 098bb8f690c9..5d956b869e03 100644
21105 +--- a/fs/btrfs/file.c
21106 ++++ b/fs/btrfs/file.c
21107 +@@ -1883,7 +1883,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
21108 + */
21109 + int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
21110 + {
21111 +- struct dentry *dentry = file->f_path.dentry;
21112 ++ struct dentry *dentry = file_dentry(file);
21113 + struct inode *inode = d_inode(dentry);
21114 + struct btrfs_root *root = BTRFS_I(inode)->root;
21115 + struct btrfs_trans_handle *trans;
21116 +@@ -1996,10 +1996,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
21117 + */
21118 + smp_mb();
21119 + if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
21120 +- (BTRFS_I(inode)->last_trans <=
21121 +- root->fs_info->last_trans_committed &&
21122 +- (full_sync ||
21123 +- !btrfs_have_ordered_extents_in_range(inode, start, len)))) {
21124 ++ (full_sync && BTRFS_I(inode)->last_trans <=
21125 ++ root->fs_info->last_trans_committed) ||
21126 ++ (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
21127 ++ BTRFS_I(inode)->last_trans
21128 ++ <= root->fs_info->last_trans_committed)) {
21129 + /*
21130 + * We'v had everything committed since the last time we were
21131 + * modified so clear this flag in case it was set for whatever
21132 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
21133 +index d96f5cf38a2d..f407e487c687 100644
21134 +--- a/fs/btrfs/inode.c
21135 ++++ b/fs/btrfs/inode.c
21136 +@@ -4211,11 +4211,20 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
21137 + {
21138 + int ret;
21139 +
21140 ++ /*
21141 ++ * This is only used to apply pressure to the enospc system, we don't
21142 ++ * intend to use this reservation at all.
21143 ++ */
21144 + bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
21145 ++ bytes_deleted *= root->nodesize;
21146 + ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
21147 + bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
21148 +- if (!ret)
21149 ++ if (!ret) {
21150 ++ trace_btrfs_space_reservation(root->fs_info, "transaction",
21151 ++ trans->transid,
21152 ++ bytes_deleted, 1);
21153 + trans->bytes_reserved += bytes_deleted;
21154 ++ }
21155 + return ret;
21156 +
21157 + }
21158 +@@ -7414,7 +7423,26 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
21159 + cached_state, GFP_NOFS);
21160 +
21161 + if (ordered) {
21162 +- btrfs_start_ordered_extent(inode, ordered, 1);
21163 ++ /*
21164 ++ * If we are doing a DIO read and the ordered extent we
21165 ++ * found is for a buffered write, we can not wait for it
21166 ++ * to complete and retry, because if we do so we can
21167 ++ * deadlock with concurrent buffered writes on page
21168 ++ * locks. This happens only if our DIO read covers more
21169 ++ * than one extent map, if at this point has already
21170 ++ * created an ordered extent for a previous extent map
21171 ++ * and locked its range in the inode's io tree, and a
21172 ++ * concurrent write against that previous extent map's
21173 ++ * range and this range started (we unlock the ranges
21174 ++ * in the io tree only when the bios complete and
21175 ++ * buffered writes always lock pages before attempting
21176 ++ * to lock range in the io tree).
21177 ++ */
21178 ++ if (writing ||
21179 ++ test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
21180 ++ btrfs_start_ordered_extent(inode, ordered, 1);
21181 ++ else
21182 ++ ret = -ENOTBLK;
21183 + btrfs_put_ordered_extent(ordered);
21184 + } else {
21185 + /*
21186 +@@ -7431,9 +7459,11 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
21187 + * that page.
21188 + */
21189 + ret = -ENOTBLK;
21190 +- break;
21191 + }
21192 +
21193 ++ if (ret)
21194 ++ break;
21195 ++
21196 + cond_resched();
21197 + }
21198 +
21199 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
21200 +index 48aee9846329..e3791f268489 100644
21201 +--- a/fs/btrfs/ioctl.c
21202 ++++ b/fs/btrfs/ioctl.c
21203 +@@ -59,6 +59,7 @@
21204 + #include "props.h"
21205 + #include "sysfs.h"
21206 + #include "qgroup.h"
21207 ++#include "tree-log.h"
21208 +
21209 + #ifdef CONFIG_64BIT
21210 + /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
21211 +@@ -1656,7 +1657,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
21212 +
21213 + src_inode = file_inode(src.file);
21214 + if (src_inode->i_sb != file_inode(file)->i_sb) {
21215 +- btrfs_info(BTRFS_I(src_inode)->root->fs_info,
21216 ++ btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
21217 + "Snapshot src from another FS");
21218 + ret = -EXDEV;
21219 + } else if (!inode_owner_or_capable(src_inode)) {
21220 +@@ -2097,8 +2098,6 @@ static noinline int search_ioctl(struct inode *inode,
21221 + key.offset = (u64)-1;
21222 + root = btrfs_read_fs_root_no_name(info, &key);
21223 + if (IS_ERR(root)) {
21224 +- btrfs_err(info, "could not find root %llu",
21225 +- sk->tree_id);
21226 + btrfs_free_path(path);
21227 + return -ENOENT;
21228 + }
21229 +@@ -2476,6 +2475,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
21230 + trans->block_rsv = &block_rsv;
21231 + trans->bytes_reserved = block_rsv.size;
21232 +
21233 ++ btrfs_record_snapshot_destroy(trans, dir);
21234 ++
21235 + ret = btrfs_unlink_subvol(trans, root, dir,
21236 + dest->root_key.objectid,
21237 + dentry->d_name.name,
21238 +@@ -3068,6 +3069,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
21239 + ret = extent_same_check_offsets(src, loff, &len, olen);
21240 + if (ret)
21241 + goto out_unlock;
21242 ++ ret = extent_same_check_offsets(src, dst_loff, &len, olen);
21243 ++ if (ret)
21244 ++ goto out_unlock;
21245 +
21246 + /*
21247 + * Single inode case wants the same checks, except we
21248 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
21249 +index 5279fdae7142..7173360eea7a 100644
21250 +--- a/fs/btrfs/qgroup.c
21251 ++++ b/fs/btrfs/qgroup.c
21252 +@@ -1842,8 +1842,10 @@ out:
21253 + }
21254 +
21255 + /*
21256 +- * copy the acounting information between qgroups. This is necessary when a
21257 +- * snapshot or a subvolume is created
21258 ++ * Copy the acounting information between qgroups. This is necessary
21259 ++ * when a snapshot or a subvolume is created. Throwing an error will
21260 ++ * cause a transaction abort so we take extra care here to only error
21261 ++ * when a readonly fs is a reasonable outcome.
21262 + */
21263 + int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
21264 + struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
21265 +@@ -1873,15 +1875,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
21266 + 2 * inherit->num_excl_copies;
21267 + for (i = 0; i < nums; ++i) {
21268 + srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
21269 +- if (!srcgroup) {
21270 +- ret = -EINVAL;
21271 +- goto out;
21272 +- }
21273 +
21274 +- if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
21275 +- ret = -EINVAL;
21276 +- goto out;
21277 +- }
21278 ++ /*
21279 ++ * Zero out invalid groups so we can ignore
21280 ++ * them later.
21281 ++ */
21282 ++ if (!srcgroup ||
21283 ++ ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
21284 ++ *i_qgroups = 0ULL;
21285 ++
21286 + ++i_qgroups;
21287 + }
21288 + }
21289 +@@ -1916,17 +1918,19 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
21290 + */
21291 + if (inherit) {
21292 + i_qgroups = (u64 *)(inherit + 1);
21293 +- for (i = 0; i < inherit->num_qgroups; ++i) {
21294 ++ for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
21295 ++ if (*i_qgroups == 0)
21296 ++ continue;
21297 + ret = add_qgroup_relation_item(trans, quota_root,
21298 + objectid, *i_qgroups);
21299 +- if (ret)
21300 ++ if (ret && ret != -EEXIST)
21301 + goto out;
21302 + ret = add_qgroup_relation_item(trans, quota_root,
21303 + *i_qgroups, objectid);
21304 +- if (ret)
21305 ++ if (ret && ret != -EEXIST)
21306 + goto out;
21307 +- ++i_qgroups;
21308 + }
21309 ++ ret = 0;
21310 + }
21311 +
21312 +
21313 +@@ -1987,17 +1991,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
21314 +
21315 + i_qgroups = (u64 *)(inherit + 1);
21316 + for (i = 0; i < inherit->num_qgroups; ++i) {
21317 +- ret = add_relation_rb(quota_root->fs_info, objectid,
21318 +- *i_qgroups);
21319 +- if (ret)
21320 +- goto unlock;
21321 ++ if (*i_qgroups) {
21322 ++ ret = add_relation_rb(quota_root->fs_info, objectid,
21323 ++ *i_qgroups);
21324 ++ if (ret)
21325 ++ goto unlock;
21326 ++ }
21327 + ++i_qgroups;
21328 + }
21329 +
21330 +- for (i = 0; i < inherit->num_ref_copies; ++i) {
21331 ++ for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
21332 + struct btrfs_qgroup *src;
21333 + struct btrfs_qgroup *dst;
21334 +
21335 ++ if (!i_qgroups[0] || !i_qgroups[1])
21336 ++ continue;
21337 ++
21338 + src = find_qgroup_rb(fs_info, i_qgroups[0]);
21339 + dst = find_qgroup_rb(fs_info, i_qgroups[1]);
21340 +
21341 +@@ -2008,12 +2017,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
21342 +
21343 + dst->rfer = src->rfer - level_size;
21344 + dst->rfer_cmpr = src->rfer_cmpr - level_size;
21345 +- i_qgroups += 2;
21346 + }
21347 +- for (i = 0; i < inherit->num_excl_copies; ++i) {
21348 ++ for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
21349 + struct btrfs_qgroup *src;
21350 + struct btrfs_qgroup *dst;
21351 +
21352 ++ if (!i_qgroups[0] || !i_qgroups[1])
21353 ++ continue;
21354 ++
21355 + src = find_qgroup_rb(fs_info, i_qgroups[0]);
21356 + dst = find_qgroup_rb(fs_info, i_qgroups[1]);
21357 +
21358 +@@ -2024,7 +2035,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
21359 +
21360 + dst->excl = src->excl + level_size;
21361 + dst->excl_cmpr = src->excl_cmpr + level_size;
21362 +- i_qgroups += 2;
21363 + }
21364 +
21365 + unlock:
21366 +diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
21367 +index 619f92963e27..49b3fb73ffbf 100644
21368 +--- a/fs/btrfs/reada.c
21369 ++++ b/fs/btrfs/reada.c
21370 +@@ -265,7 +265,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
21371 + spin_unlock(&fs_info->reada_lock);
21372 +
21373 + if (ret == 1) {
21374 +- if (logical >= zone->start && logical < zone->end)
21375 ++ if (logical >= zone->start && logical <= zone->end)
21376 + return zone;
21377 + spin_lock(&fs_info->reada_lock);
21378 + kref_put(&zone->refcnt, reada_zone_release);
21379 +@@ -679,7 +679,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
21380 + */
21381 + ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
21382 + dev->reada_next >> PAGE_CACHE_SHIFT, 1);
21383 +- if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
21384 ++ if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
21385 + ret = reada_pick_zone(dev);
21386 + if (!ret) {
21387 + spin_unlock(&fs_info->reada_lock);
21388 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
21389 +index 2bd0011450df..5c806f0d443d 100644
21390 +--- a/fs/btrfs/relocation.c
21391 ++++ b/fs/btrfs/relocation.c
21392 +@@ -1850,6 +1850,7 @@ again:
21393 + eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
21394 + if (IS_ERR(eb)) {
21395 + ret = PTR_ERR(eb);
21396 ++ break;
21397 + } else if (!extent_buffer_uptodate(eb)) {
21398 + ret = -EIO;
21399 + free_extent_buffer(eb);
21400 +diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
21401 +index 0e1e61a7ec23..d39f714dabeb 100644
21402 +--- a/fs/btrfs/tests/btrfs-tests.c
21403 ++++ b/fs/btrfs/tests/btrfs-tests.c
21404 +@@ -189,12 +189,6 @@ btrfs_alloc_dummy_block_group(unsigned long length)
21405 + kfree(cache);
21406 + return NULL;
21407 + }
21408 +- cache->fs_info = btrfs_alloc_dummy_fs_info();
21409 +- if (!cache->fs_info) {
21410 +- kfree(cache->free_space_ctl);
21411 +- kfree(cache);
21412 +- return NULL;
21413 +- }
21414 +
21415 + cache->key.objectid = 0;
21416 + cache->key.offset = length;
21417 +diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
21418 +index d05fe1ab4808..7cea4462acd5 100644
21419 +--- a/fs/btrfs/tests/free-space-tree-tests.c
21420 ++++ b/fs/btrfs/tests/free-space-tree-tests.c
21421 +@@ -485,6 +485,7 @@ static int run_test(test_func_t test_func, int bitmaps)
21422 + cache->bitmap_low_thresh = 0;
21423 + cache->bitmap_high_thresh = (u32)-1;
21424 + cache->needs_free_space = 1;
21425 ++ cache->fs_info = root->fs_info;
21426 +
21427 + btrfs_init_dummy_trans(&trans);
21428 +
21429 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
21430 +index 978c3a810893..58ae0a2ce65c 100644
21431 +--- a/fs/btrfs/tree-log.c
21432 ++++ b/fs/btrfs/tree-log.c
21433 +@@ -4414,6 +4414,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
21434 + return ret;
21435 + }
21436 +
21437 ++/*
21438 ++ * When we are logging a new inode X, check if it doesn't have a reference that
21439 ++ * matches the reference from some other inode Y created in a past transaction
21440 ++ * and that was renamed in the current transaction. If we don't do this, then at
21441 ++ * log replay time we can lose inode Y (and all its files if it's a directory):
21442 ++ *
21443 ++ * mkdir /mnt/x
21444 ++ * echo "hello world" > /mnt/x/foobar
21445 ++ * sync
21446 ++ * mv /mnt/x /mnt/y
21447 ++ * mkdir /mnt/x # or touch /mnt/x
21448 ++ * xfs_io -c fsync /mnt/x
21449 ++ * <power fail>
21450 ++ * mount fs, trigger log replay
21451 ++ *
21452 ++ * After the log replay procedure, we would lose the first directory and all its
21453 ++ * files (file foobar).
21454 ++ * For the case where inode Y is not a directory we simply end up losing it:
21455 ++ *
21456 ++ * echo "123" > /mnt/foo
21457 ++ * sync
21458 ++ * mv /mnt/foo /mnt/bar
21459 ++ * echo "abc" > /mnt/foo
21460 ++ * xfs_io -c fsync /mnt/foo
21461 ++ * <power fail>
21462 ++ *
21463 ++ * We also need this for cases where a snapshot entry is replaced by some other
21464 ++ * entry (file or directory) otherwise we end up with an unreplayable log due to
21465 ++ * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
21466 ++ * if it were a regular entry:
21467 ++ *
21468 ++ * mkdir /mnt/x
21469 ++ * btrfs subvolume snapshot /mnt /mnt/x/snap
21470 ++ * btrfs subvolume delete /mnt/x/snap
21471 ++ * rmdir /mnt/x
21472 ++ * mkdir /mnt/x
21473 ++ * fsync /mnt/x or fsync some new file inside it
21474 ++ * <power fail>
21475 ++ *
21476 ++ * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
21477 ++ * the same transaction.
21478 ++ */
21479 ++static int btrfs_check_ref_name_override(struct extent_buffer *eb,
21480 ++ const int slot,
21481 ++ const struct btrfs_key *key,
21482 ++ struct inode *inode)
21483 ++{
21484 ++ int ret;
21485 ++ struct btrfs_path *search_path;
21486 ++ char *name = NULL;
21487 ++ u32 name_len = 0;
21488 ++ u32 item_size = btrfs_item_size_nr(eb, slot);
21489 ++ u32 cur_offset = 0;
21490 ++ unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
21491 ++
21492 ++ search_path = btrfs_alloc_path();
21493 ++ if (!search_path)
21494 ++ return -ENOMEM;
21495 ++ search_path->search_commit_root = 1;
21496 ++ search_path->skip_locking = 1;
21497 ++
21498 ++ while (cur_offset < item_size) {
21499 ++ u64 parent;
21500 ++ u32 this_name_len;
21501 ++ u32 this_len;
21502 ++ unsigned long name_ptr;
21503 ++ struct btrfs_dir_item *di;
21504 ++
21505 ++ if (key->type == BTRFS_INODE_REF_KEY) {
21506 ++ struct btrfs_inode_ref *iref;
21507 ++
21508 ++ iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
21509 ++ parent = key->offset;
21510 ++ this_name_len = btrfs_inode_ref_name_len(eb, iref);
21511 ++ name_ptr = (unsigned long)(iref + 1);
21512 ++ this_len = sizeof(*iref) + this_name_len;
21513 ++ } else {
21514 ++ struct btrfs_inode_extref *extref;
21515 ++
21516 ++ extref = (struct btrfs_inode_extref *)(ptr +
21517 ++ cur_offset);
21518 ++ parent = btrfs_inode_extref_parent(eb, extref);
21519 ++ this_name_len = btrfs_inode_extref_name_len(eb, extref);
21520 ++ name_ptr = (unsigned long)&extref->name;
21521 ++ this_len = sizeof(*extref) + this_name_len;
21522 ++ }
21523 ++
21524 ++ if (this_name_len > name_len) {
21525 ++ char *new_name;
21526 ++
21527 ++ new_name = krealloc(name, this_name_len, GFP_NOFS);
21528 ++ if (!new_name) {
21529 ++ ret = -ENOMEM;
21530 ++ goto out;
21531 ++ }
21532 ++ name_len = this_name_len;
21533 ++ name = new_name;
21534 ++ }
21535 ++
21536 ++ read_extent_buffer(eb, name, name_ptr, this_name_len);
21537 ++ di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
21538 ++ search_path, parent,
21539 ++ name, this_name_len, 0);
21540 ++ if (di && !IS_ERR(di)) {
21541 ++ ret = 1;
21542 ++ goto out;
21543 ++ } else if (IS_ERR(di)) {
21544 ++ ret = PTR_ERR(di);
21545 ++ goto out;
21546 ++ }
21547 ++ btrfs_release_path(search_path);
21548 ++
21549 ++ cur_offset += this_len;
21550 ++ }
21551 ++ ret = 0;
21552 ++out:
21553 ++ btrfs_free_path(search_path);
21554 ++ kfree(name);
21555 ++ return ret;
21556 ++}
21557 ++
21558 + /* log a single inode in the tree log.
21559 + * At least one parent directory for this inode must exist in the tree
21560 + * or be logged already.
21561 +@@ -4500,7 +4621,22 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
21562 +
21563 + mutex_lock(&BTRFS_I(inode)->log_mutex);
21564 +
21565 +- btrfs_get_logged_extents(inode, &logged_list, start, end);
21566 ++ /*
21567 ++ * Collect ordered extents only if we are logging data. This is to
21568 ++ * ensure a subsequent request to log this inode in LOG_INODE_ALL mode
21569 ++ * will process the ordered extents if they still exists at the time,
21570 ++ * because when we collect them we test and set for the flag
21571 ++ * BTRFS_ORDERED_LOGGED to prevent multiple log requests to process the
21572 ++ * same ordered extents. The consequence for the LOG_INODE_ALL log mode
21573 ++ * not processing the ordered extents is that we end up logging the
21574 ++ * corresponding file extent items, based on the extent maps in the
21575 ++ * inode's extent_map_tree's modified_list, without logging the
21576 ++ * respective checksums (since the may still be only attached to the
21577 ++ * ordered extents and have not been inserted in the csum tree by
21578 ++ * btrfs_finish_ordered_io() yet).
21579 ++ */
21580 ++ if (inode_only == LOG_INODE_ALL)
21581 ++ btrfs_get_logged_extents(inode, &logged_list, start, end);
21582 +
21583 + /*
21584 + * a brute force approach to making sure we get the most uptodate
21585 +@@ -4586,6 +4722,22 @@ again:
21586 + if (min_key.type == BTRFS_INODE_ITEM_KEY)
21587 + need_log_inode_item = false;
21588 +
21589 ++ if ((min_key.type == BTRFS_INODE_REF_KEY ||
21590 ++ min_key.type == BTRFS_INODE_EXTREF_KEY) &&
21591 ++ BTRFS_I(inode)->generation == trans->transid) {
21592 ++ ret = btrfs_check_ref_name_override(path->nodes[0],
21593 ++ path->slots[0],
21594 ++ &min_key, inode);
21595 ++ if (ret < 0) {
21596 ++ err = ret;
21597 ++ goto out_unlock;
21598 ++ } else if (ret > 0) {
21599 ++ err = 1;
21600 ++ btrfs_set_log_full_commit(root->fs_info, trans);
21601 ++ goto out_unlock;
21602 ++ }
21603 ++ }
21604 ++
21605 + /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
21606 + if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
21607 + if (ins_nr == 0)
21608 +@@ -4772,6 +4924,42 @@ out_unlock:
21609 + }
21610 +
21611 + /*
21612 ++ * Check if we must fallback to a transaction commit when logging an inode.
21613 ++ * This must be called after logging the inode and is used only in the context
21614 ++ * when fsyncing an inode requires the need to log some other inode - in which
21615 ++ * case we can't lock the i_mutex of each other inode we need to log as that
21616 ++ * can lead to deadlocks with concurrent fsync against other inodes (as we can
21617 ++ * log inodes up or down in the hierarchy) or rename operations for example. So
21618 ++ * we take the log_mutex of the inode after we have logged it and then check for
21619 ++ * its last_unlink_trans value - this is safe because any task setting
21620 ++ * last_unlink_trans must take the log_mutex and it must do this before it does
21621 ++ * the actual unlink operation, so if we do this check before a concurrent task
21622 ++ * sets last_unlink_trans it means we've logged a consistent version/state of
21623 ++ * all the inode items, otherwise we are not sure and must do a transaction
21624 ++ * commit (the concurrent task migth have only updated last_unlink_trans before
21625 ++ * we logged the inode or it might have also done the unlink).
21626 ++ */
21627 ++static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
21628 ++ struct inode *inode)
21629 ++{
21630 ++ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
21631 ++ bool ret = false;
21632 ++
21633 ++ mutex_lock(&BTRFS_I(inode)->log_mutex);
21634 ++ if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) {
21635 ++ /*
21636 ++ * Make sure any commits to the log are forced to be full
21637 ++ * commits.
21638 ++ */
21639 ++ btrfs_set_log_full_commit(fs_info, trans);
21640 ++ ret = true;
21641 ++ }
21642 ++ mutex_unlock(&BTRFS_I(inode)->log_mutex);
21643 ++
21644 ++ return ret;
21645 ++}
21646 ++
21647 ++/*
21648 + * follow the dentry parent pointers up the chain and see if any
21649 + * of the directories in it require a full commit before they can
21650 + * be logged. Returns zero if nothing special needs to be done or 1 if
21651 +@@ -4784,7 +4972,6 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
21652 + u64 last_committed)
21653 + {
21654 + int ret = 0;
21655 +- struct btrfs_root *root;
21656 + struct dentry *old_parent = NULL;
21657 + struct inode *orig_inode = inode;
21658 +
21659 +@@ -4816,14 +5003,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
21660 + BTRFS_I(inode)->logged_trans = trans->transid;
21661 + smp_mb();
21662 +
21663 +- if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
21664 +- root = BTRFS_I(inode)->root;
21665 +-
21666 +- /*
21667 +- * make sure any commits to the log are forced
21668 +- * to be full commits
21669 +- */
21670 +- btrfs_set_log_full_commit(root->fs_info, trans);
21671 ++ if (btrfs_must_commit_transaction(trans, inode)) {
21672 + ret = 1;
21673 + break;
21674 + }
21675 +@@ -4982,6 +5162,9 @@ process_leaf:
21676 + btrfs_release_path(path);
21677 + ret = btrfs_log_inode(trans, root, di_inode,
21678 + log_mode, 0, LLONG_MAX, ctx);
21679 ++ if (!ret &&
21680 ++ btrfs_must_commit_transaction(trans, di_inode))
21681 ++ ret = 1;
21682 + iput(di_inode);
21683 + if (ret)
21684 + goto next_dir_inode;
21685 +@@ -5096,6 +5279,9 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
21686 +
21687 + ret = btrfs_log_inode(trans, root, dir_inode,
21688 + LOG_INODE_ALL, 0, LLONG_MAX, ctx);
21689 ++ if (!ret &&
21690 ++ btrfs_must_commit_transaction(trans, dir_inode))
21691 ++ ret = 1;
21692 + iput(dir_inode);
21693 + if (ret)
21694 + goto out;
21695 +@@ -5447,6 +5633,9 @@ error:
21696 + * They revolve around files there were unlinked from the directory, and
21697 + * this function updates the parent directory so that a full commit is
21698 + * properly done if it is fsync'd later after the unlinks are done.
21699 ++ *
21700 ++ * Must be called before the unlink operations (updates to the subvolume tree,
21701 ++ * inodes, etc) are done.
21702 + */
21703 + void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
21704 + struct inode *dir, struct inode *inode,
21705 +@@ -5462,8 +5651,11 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
21706 + * into the file. When the file is logged we check it and
21707 + * don't log the parents if the file is fully on disk.
21708 + */
21709 +- if (S_ISREG(inode->i_mode))
21710 ++ if (S_ISREG(inode->i_mode)) {
21711 ++ mutex_lock(&BTRFS_I(inode)->log_mutex);
21712 + BTRFS_I(inode)->last_unlink_trans = trans->transid;
21713 ++ mutex_unlock(&BTRFS_I(inode)->log_mutex);
21714 ++ }
21715 +
21716 + /*
21717 + * if this directory was already logged any new
21718 +@@ -5494,7 +5686,29 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
21719 + return;
21720 +
21721 + record:
21722 ++ mutex_lock(&BTRFS_I(dir)->log_mutex);
21723 + BTRFS_I(dir)->last_unlink_trans = trans->transid;
21724 ++ mutex_unlock(&BTRFS_I(dir)->log_mutex);
21725 ++}
21726 ++
21727 ++/*
21728 ++ * Make sure that if someone attempts to fsync the parent directory of a deleted
21729 ++ * snapshot, it ends up triggering a transaction commit. This is to guarantee
21730 ++ * that after replaying the log tree of the parent directory's root we will not
21731 ++ * see the snapshot anymore and at log replay time we will not see any log tree
21732 ++ * corresponding to the deleted snapshot's root, which could lead to replaying
21733 ++ * it after replaying the log tree of the parent directory (which would replay
21734 ++ * the snapshot delete operation).
21735 ++ *
21736 ++ * Must be called before the actual snapshot destroy operation (updates to the
21737 ++ * parent root and tree of tree roots trees, etc) are done.
21738 ++ */
21739 ++void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
21740 ++ struct inode *dir)
21741 ++{
21742 ++ mutex_lock(&BTRFS_I(dir)->log_mutex);
21743 ++ BTRFS_I(dir)->last_unlink_trans = trans->transid;
21744 ++ mutex_unlock(&BTRFS_I(dir)->log_mutex);
21745 + }
21746 +
21747 + /*
21748 +diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
21749 +index 6916a781ea02..a9f1b75d080d 100644
21750 +--- a/fs/btrfs/tree-log.h
21751 ++++ b/fs/btrfs/tree-log.h
21752 +@@ -79,6 +79,8 @@ int btrfs_pin_log_trans(struct btrfs_root *root);
21753 + void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
21754 + struct inode *dir, struct inode *inode,
21755 + int for_rename);
21756 ++void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
21757 ++ struct inode *dir);
21758 + int btrfs_log_new_name(struct btrfs_trans_handle *trans,
21759 + struct inode *inode, struct inode *old_dir,
21760 + struct dentry *parent);
21761 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
21762 +index 59727e32ed0f..af0ec2d5ad0e 100644
21763 +--- a/fs/cifs/sess.c
21764 ++++ b/fs/cifs/sess.c
21765 +@@ -400,19 +400,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
21766 + sec_blob->LmChallengeResponse.MaximumLength = 0;
21767 +
21768 + sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
21769 +- rc = setup_ntlmv2_rsp(ses, nls_cp);
21770 +- if (rc) {
21771 +- cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
21772 +- goto setup_ntlmv2_ret;
21773 ++ if (ses->user_name != NULL) {
21774 ++ rc = setup_ntlmv2_rsp(ses, nls_cp);
21775 ++ if (rc) {
21776 ++ cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
21777 ++ goto setup_ntlmv2_ret;
21778 ++ }
21779 ++ memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
21780 ++ ses->auth_key.len - CIFS_SESS_KEY_SIZE);
21781 ++ tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
21782 ++
21783 ++ sec_blob->NtChallengeResponse.Length =
21784 ++ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
21785 ++ sec_blob->NtChallengeResponse.MaximumLength =
21786 ++ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
21787 ++ } else {
21788 ++ /*
21789 ++ * don't send an NT Response for anonymous access
21790 ++ */
21791 ++ sec_blob->NtChallengeResponse.Length = 0;
21792 ++ sec_blob->NtChallengeResponse.MaximumLength = 0;
21793 + }
21794 +- memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
21795 +- ses->auth_key.len - CIFS_SESS_KEY_SIZE);
21796 +- tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
21797 +-
21798 +- sec_blob->NtChallengeResponse.Length =
21799 +- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
21800 +- sec_blob->NtChallengeResponse.MaximumLength =
21801 +- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
21802 +
21803 + if (ses->domainName == NULL) {
21804 + sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
21805 +@@ -670,20 +678,24 @@ sess_auth_lanman(struct sess_data *sess_data)
21806 +
21807 + pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
21808 +
21809 +- /* no capabilities flags in old lanman negotiation */
21810 +- pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
21811 +-
21812 +- /* Calculate hash with password and copy into bcc_ptr.
21813 +- * Encryption Key (stored as in cryptkey) gets used if the
21814 +- * security mode bit in Negottiate Protocol response states
21815 +- * to use challenge/response method (i.e. Password bit is 1).
21816 +- */
21817 +- rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
21818 +- ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
21819 +- true : false, lnm_session_key);
21820 +-
21821 +- memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
21822 +- bcc_ptr += CIFS_AUTH_RESP_SIZE;
21823 ++ if (ses->user_name != NULL) {
21824 ++ /* no capabilities flags in old lanman negotiation */
21825 ++ pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
21826 ++
21827 ++ /* Calculate hash with password and copy into bcc_ptr.
21828 ++ * Encryption Key (stored as in cryptkey) gets used if the
21829 ++ * security mode bit in Negottiate Protocol response states
21830 ++ * to use challenge/response method (i.e. Password bit is 1).
21831 ++ */
21832 ++ rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
21833 ++ ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
21834 ++ true : false, lnm_session_key);
21835 ++
21836 ++ memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
21837 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
21838 ++ } else {
21839 ++ pSMB->old_req.PasswordLength = 0;
21840 ++ }
21841 +
21842 + /*
21843 + * can not sign if LANMAN negotiated so no need
21844 +@@ -769,26 +781,31 @@ sess_auth_ntlm(struct sess_data *sess_data)
21845 + capabilities = cifs_ssetup_hdr(ses, pSMB);
21846 +
21847 + pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
21848 +- pSMB->req_no_secext.CaseInsensitivePasswordLength =
21849 +- cpu_to_le16(CIFS_AUTH_RESP_SIZE);
21850 +- pSMB->req_no_secext.CaseSensitivePasswordLength =
21851 +- cpu_to_le16(CIFS_AUTH_RESP_SIZE);
21852 +-
21853 +- /* calculate ntlm response and session key */
21854 +- rc = setup_ntlm_response(ses, sess_data->nls_cp);
21855 +- if (rc) {
21856 +- cifs_dbg(VFS, "Error %d during NTLM authentication\n",
21857 +- rc);
21858 +- goto out;
21859 +- }
21860 ++ if (ses->user_name != NULL) {
21861 ++ pSMB->req_no_secext.CaseInsensitivePasswordLength =
21862 ++ cpu_to_le16(CIFS_AUTH_RESP_SIZE);
21863 ++ pSMB->req_no_secext.CaseSensitivePasswordLength =
21864 ++ cpu_to_le16(CIFS_AUTH_RESP_SIZE);
21865 ++
21866 ++ /* calculate ntlm response and session key */
21867 ++ rc = setup_ntlm_response(ses, sess_data->nls_cp);
21868 ++ if (rc) {
21869 ++ cifs_dbg(VFS, "Error %d during NTLM authentication\n",
21870 ++ rc);
21871 ++ goto out;
21872 ++ }
21873 +
21874 +- /* copy ntlm response */
21875 +- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
21876 +- CIFS_AUTH_RESP_SIZE);
21877 +- bcc_ptr += CIFS_AUTH_RESP_SIZE;
21878 +- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
21879 +- CIFS_AUTH_RESP_SIZE);
21880 +- bcc_ptr += CIFS_AUTH_RESP_SIZE;
21881 ++ /* copy ntlm response */
21882 ++ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
21883 ++ CIFS_AUTH_RESP_SIZE);
21884 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
21885 ++ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
21886 ++ CIFS_AUTH_RESP_SIZE);
21887 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
21888 ++ } else {
21889 ++ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
21890 ++ pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
21891 ++ }
21892 +
21893 + if (ses->capabilities & CAP_UNICODE) {
21894 + /* unicode strings must be word aligned */
21895 +@@ -878,22 +895,26 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
21896 + /* LM2 password would be here if we supported it */
21897 + pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
21898 +
21899 +- /* calculate nlmv2 response and session key */
21900 +- rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
21901 +- if (rc) {
21902 +- cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
21903 +- goto out;
21904 +- }
21905 ++ if (ses->user_name != NULL) {
21906 ++ /* calculate nlmv2 response and session key */
21907 ++ rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
21908 ++ if (rc) {
21909 ++ cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
21910 ++ goto out;
21911 ++ }
21912 +
21913 +- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
21914 +- ses->auth_key.len - CIFS_SESS_KEY_SIZE);
21915 +- bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
21916 ++ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
21917 ++ ses->auth_key.len - CIFS_SESS_KEY_SIZE);
21918 ++ bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
21919 +
21920 +- /* set case sensitive password length after tilen may get
21921 +- * assigned, tilen is 0 otherwise.
21922 +- */
21923 +- pSMB->req_no_secext.CaseSensitivePasswordLength =
21924 +- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
21925 ++ /* set case sensitive password length after tilen may get
21926 ++ * assigned, tilen is 0 otherwise.
21927 ++ */
21928 ++ pSMB->req_no_secext.CaseSensitivePasswordLength =
21929 ++ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
21930 ++ } else {
21931 ++ pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
21932 ++ }
21933 +
21934 + if (ses->capabilities & CAP_UNICODE) {
21935 + if (sess_data->iov[0].iov_len % 2) {
21936 +diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
21937 +index bc0bb9c34f72..0ffa18094335 100644
21938 +--- a/fs/cifs/smb2glob.h
21939 ++++ b/fs/cifs/smb2glob.h
21940 +@@ -44,6 +44,7 @@
21941 + #define SMB2_OP_DELETE 7
21942 + #define SMB2_OP_HARDLINK 8
21943 + #define SMB2_OP_SET_EOF 9
21944 ++#define SMB2_OP_RMDIR 10
21945 +
21946 + /* Used when constructing chained read requests. */
21947 + #define CHAINED_REQUEST 1
21948 +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
21949 +index 899bbc86f73e..4f0231e685a9 100644
21950 +--- a/fs/cifs/smb2inode.c
21951 ++++ b/fs/cifs/smb2inode.c
21952 +@@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
21953 + * SMB2_open() call.
21954 + */
21955 + break;
21956 ++ case SMB2_OP_RMDIR:
21957 ++ tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
21958 ++ fid.volatile_fid);
21959 ++ break;
21960 + case SMB2_OP_RENAME:
21961 + tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
21962 + fid.volatile_fid, (__le16 *)data);
21963 +@@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
21964 + struct cifs_sb_info *cifs_sb)
21965 + {
21966 + return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
21967 +- CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
21968 +- NULL, SMB2_OP_DELETE);
21969 ++ CREATE_NOT_FILE,
21970 ++ NULL, SMB2_OP_RMDIR);
21971 + }
21972 +
21973 + int
21974 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
21975 +index 42e1f440eb1e..8f38e33d365b 100644
21976 +--- a/fs/cifs/smb2pdu.c
21977 ++++ b/fs/cifs/smb2pdu.c
21978 +@@ -2575,6 +2575,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
21979 + }
21980 +
21981 + int
21982 ++SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
21983 ++ u64 persistent_fid, u64 volatile_fid)
21984 ++{
21985 ++ __u8 delete_pending = 1;
21986 ++ void *data;
21987 ++ unsigned int size;
21988 ++
21989 ++ data = &delete_pending;
21990 ++ size = 1; /* sizeof __u8 */
21991 ++
21992 ++ return send_set_info(xid, tcon, persistent_fid, volatile_fid,
21993 ++ current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
21994 ++ &size);
21995 ++}
21996 ++
21997 ++int
21998 + SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
21999 + u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
22000 + {
22001 +diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
22002 +index 4f07dc93608d..eb2cde2f64ba 100644
22003 +--- a/fs/cifs/smb2proto.h
22004 ++++ b/fs/cifs/smb2proto.h
22005 +@@ -141,6 +141,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
22006 + extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
22007 + u64 persistent_fid, u64 volatile_fid,
22008 + __le16 *target_file);
22009 ++extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
22010 ++ u64 persistent_fid, u64 volatile_fid);
22011 + extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
22012 + u64 persistent_fid, u64 volatile_fid,
22013 + __le16 *target_file);
22014 +diff --git a/fs/coredump.c b/fs/coredump.c
22015 +index 9ea87e9fdccf..47c32c3bfa1d 100644
22016 +--- a/fs/coredump.c
22017 ++++ b/fs/coredump.c
22018 +@@ -32,6 +32,9 @@
22019 + #include <linux/pipe_fs_i.h>
22020 + #include <linux/oom.h>
22021 + #include <linux/compat.h>
22022 ++#include <linux/sched.h>
22023 ++#include <linux/fs.h>
22024 ++#include <linux/path.h>
22025 + #include <linux/timekeeping.h>
22026 +
22027 + #include <asm/uaccess.h>
22028 +@@ -649,6 +652,8 @@ void do_coredump(const siginfo_t *siginfo)
22029 + }
22030 + } else {
22031 + struct inode *inode;
22032 ++ int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
22033 ++ O_LARGEFILE | O_EXCL;
22034 +
22035 + if (cprm.limit < binfmt->min_coredump)
22036 + goto fail_unlock;
22037 +@@ -687,10 +692,27 @@ void do_coredump(const siginfo_t *siginfo)
22038 + * what matters is that at least one of the two processes
22039 + * writes its coredump successfully, not which one.
22040 + */
22041 +- cprm.file = filp_open(cn.corename,
22042 +- O_CREAT | 2 | O_NOFOLLOW |
22043 +- O_LARGEFILE | O_EXCL,
22044 +- 0600);
22045 ++ if (need_suid_safe) {
22046 ++ /*
22047 ++ * Using user namespaces, normal user tasks can change
22048 ++ * their current->fs->root to point to arbitrary
22049 ++ * directories. Since the intention of the "only dump
22050 ++ * with a fully qualified path" rule is to control where
22051 ++ * coredumps may be placed using root privileges,
22052 ++ * current->fs->root must not be used. Instead, use the
22053 ++ * root directory of init_task.
22054 ++ */
22055 ++ struct path root;
22056 ++
22057 ++ task_lock(&init_task);
22058 ++ get_fs_root(init_task.fs, &root);
22059 ++ task_unlock(&init_task);
22060 ++ cprm.file = file_open_root(root.dentry, root.mnt,
22061 ++ cn.corename, open_flags, 0600);
22062 ++ path_put(&root);
22063 ++ } else {
22064 ++ cprm.file = filp_open(cn.corename, open_flags, 0600);
22065 ++ }
22066 + if (IS_ERR(cprm.file))
22067 + goto fail_unlock;
22068 +
22069 +diff --git a/fs/dcache.c b/fs/dcache.c
22070 +index 2398f9f94337..7566b2689609 100644
22071 +--- a/fs/dcache.c
22072 ++++ b/fs/dcache.c
22073 +@@ -1667,7 +1667,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
22074 + DCACHE_OP_REVALIDATE |
22075 + DCACHE_OP_WEAK_REVALIDATE |
22076 + DCACHE_OP_DELETE |
22077 +- DCACHE_OP_SELECT_INODE));
22078 ++ DCACHE_OP_SELECT_INODE |
22079 ++ DCACHE_OP_REAL));
22080 + dentry->d_op = op;
22081 + if (!op)
22082 + return;
22083 +@@ -1685,6 +1686,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
22084 + dentry->d_flags |= DCACHE_OP_PRUNE;
22085 + if (op->d_select_inode)
22086 + dentry->d_flags |= DCACHE_OP_SELECT_INODE;
22087 ++ if (op->d_real)
22088 ++ dentry->d_flags |= DCACHE_OP_REAL;
22089 +
22090 + }
22091 + EXPORT_SYMBOL(d_set_d_op);
22092 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
22093 +index bece948b363d..8580831ed237 100644
22094 +--- a/fs/debugfs/inode.c
22095 ++++ b/fs/debugfs/inode.c
22096 +@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
22097 + if (unlikely(!inode))
22098 + return failed_creating(dentry);
22099 +
22100 +- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
22101 ++ make_empty_dir_inode(inode);
22102 + inode->i_flags |= S_AUTOMOUNT;
22103 + inode->i_private = data;
22104 + dentry->d_fsdata = (void *)f;
22105 +diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
22106 +index 38f7562489bb..25634c353191 100644
22107 +--- a/fs/ext4/crypto.c
22108 ++++ b/fs/ext4/crypto.c
22109 +@@ -34,6 +34,7 @@
22110 + #include <linux/random.h>
22111 + #include <linux/scatterlist.h>
22112 + #include <linux/spinlock_types.h>
22113 ++#include <linux/namei.h>
22114 +
22115 + #include "ext4_extents.h"
22116 + #include "xattr.h"
22117 +@@ -475,13 +476,19 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
22118 + */
22119 + static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
22120 + {
22121 +- struct inode *dir = d_inode(dentry->d_parent);
22122 +- struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
22123 ++ struct dentry *dir;
22124 ++ struct ext4_crypt_info *ci;
22125 + int dir_has_key, cached_with_key;
22126 +
22127 +- if (!ext4_encrypted_inode(dir))
22128 +- return 0;
22129 ++ if (flags & LOOKUP_RCU)
22130 ++ return -ECHILD;
22131 +
22132 ++ dir = dget_parent(dentry);
22133 ++ if (!ext4_encrypted_inode(d_inode(dir))) {
22134 ++ dput(dir);
22135 ++ return 0;
22136 ++ }
22137 ++ ci = EXT4_I(d_inode(dir))->i_crypt_info;
22138 + if (ci && ci->ci_keyring_key &&
22139 + (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
22140 + (1 << KEY_FLAG_REVOKED) |
22141 +@@ -491,6 +498,7 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
22142 + /* this should eventually be an flag in d_flags */
22143 + cached_with_key = dentry->d_fsdata != NULL;
22144 + dir_has_key = (ci != NULL);
22145 ++ dput(dir);
22146 +
22147 + /*
22148 + * If the dentry was cached without the key, and it is a
22149 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
22150 +index 157b458a69d4..b213449a5d1b 100644
22151 +--- a/fs/ext4/ext4.h
22152 ++++ b/fs/ext4/ext4.h
22153 +@@ -900,6 +900,29 @@ do { \
22154 + #include "extents_status.h"
22155 +
22156 + /*
22157 ++ * Lock subclasses for i_data_sem in the ext4_inode_info structure.
22158 ++ *
22159 ++ * These are needed to avoid lockdep false positives when we need to
22160 ++ * allocate blocks to the quota inode during ext4_map_blocks(), while
22161 ++ * holding i_data_sem for a normal (non-quota) inode. Since we don't
22162 ++ * do quota tracking for the quota inode, this avoids deadlock (as
22163 ++ * well as infinite recursion, since it isn't turtles all the way
22164 ++ * down...)
22165 ++ *
22166 ++ * I_DATA_SEM_NORMAL - Used for most inodes
22167 ++ * I_DATA_SEM_OTHER - Used by move_inode.c for the second normal inode
22168 ++ * where the second inode has larger inode number
22169 ++ * than the first
22170 ++ * I_DATA_SEM_QUOTA - Used for quota inodes only
22171 ++ */
22172 ++enum {
22173 ++ I_DATA_SEM_NORMAL = 0,
22174 ++ I_DATA_SEM_OTHER,
22175 ++ I_DATA_SEM_QUOTA,
22176 ++};
22177 ++
22178 ++
22179 ++/*
22180 + * fourth extended file system inode data in memory
22181 + */
22182 + struct ext4_inode_info {
22183 +diff --git a/fs/ext4/file.c b/fs/ext4/file.c
22184 +index 4cd318f31cbe..38847f38b34a 100644
22185 +--- a/fs/ext4/file.c
22186 ++++ b/fs/ext4/file.c
22187 +@@ -335,7 +335,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
22188 + struct super_block *sb = inode->i_sb;
22189 + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
22190 + struct vfsmount *mnt = filp->f_path.mnt;
22191 +- struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
22192 ++ struct dentry *dir;
22193 + struct path path;
22194 + char buf[64], *cp;
22195 + int ret;
22196 +@@ -379,14 +379,18 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
22197 + if (ext4_encryption_info(inode) == NULL)
22198 + return -ENOKEY;
22199 + }
22200 +- if (ext4_encrypted_inode(dir) &&
22201 +- !ext4_is_child_context_consistent_with_parent(dir, inode)) {
22202 ++
22203 ++ dir = dget_parent(file_dentry(filp));
22204 ++ if (ext4_encrypted_inode(d_inode(dir)) &&
22205 ++ !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
22206 + ext4_warning(inode->i_sb,
22207 + "Inconsistent encryption contexts: %lu/%lu\n",
22208 +- (unsigned long) dir->i_ino,
22209 ++ (unsigned long) d_inode(dir)->i_ino,
22210 + (unsigned long) inode->i_ino);
22211 ++ dput(dir);
22212 + return -EPERM;
22213 + }
22214 ++ dput(dir);
22215 + /*
22216 + * Set up the jbd2_inode if we are opening the inode for
22217 + * writing and the journal is present
22218 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
22219 +index aee960b1af34..e6218cbc8332 100644
22220 +--- a/fs/ext4/inode.c
22221 ++++ b/fs/ext4/inode.c
22222 +@@ -5261,6 +5261,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
22223 + might_sleep();
22224 + trace_ext4_mark_inode_dirty(inode, _RET_IP_);
22225 + err = ext4_reserve_inode_write(handle, inode, &iloc);
22226 ++ if (err)
22227 ++ return err;
22228 + if (ext4_handle_valid(handle) &&
22229 + EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
22230 + !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
22231 +@@ -5291,9 +5293,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
22232 + }
22233 + }
22234 + }
22235 +- if (!err)
22236 +- err = ext4_mark_iloc_dirty(handle, inode, &iloc);
22237 +- return err;
22238 ++ return ext4_mark_iloc_dirty(handle, inode, &iloc);
22239 + }
22240 +
22241 + /*
22242 +diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
22243 +index 4098acc701c3..796ff0eafd3c 100644
22244 +--- a/fs/ext4/move_extent.c
22245 ++++ b/fs/ext4/move_extent.c
22246 +@@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
22247 + {
22248 + if (first < second) {
22249 + down_write(&EXT4_I(first)->i_data_sem);
22250 +- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
22251 ++ down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
22252 + } else {
22253 + down_write(&EXT4_I(second)->i_data_sem);
22254 +- down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
22255 ++ down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
22256 +
22257 + }
22258 + }
22259 +@@ -484,6 +484,13 @@ mext_check_arguments(struct inode *orig_inode,
22260 + return -EBUSY;
22261 + }
22262 +
22263 ++ if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
22264 ++ ext4_debug("ext4 move extent: The argument files should "
22265 ++ "not be quota files [ino:orig %lu, donor %lu]\n",
22266 ++ orig_inode->i_ino, donor_inode->i_ino);
22267 ++ return -EBUSY;
22268 ++ }
22269 ++
22270 + /* Ext4 move extent supports only extent based file */
22271 + if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
22272 + ext4_debug("ext4 move extent: orig file is not extents "
22273 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
22274 +index 3ed01ec011d7..a76ca677fd1a 100644
22275 +--- a/fs/ext4/super.c
22276 ++++ b/fs/ext4/super.c
22277 +@@ -1324,9 +1324,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
22278 + return -1;
22279 + }
22280 + if (ext4_has_feature_quota(sb)) {
22281 +- ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
22282 +- "when QUOTA feature is enabled");
22283 +- return -1;
22284 ++ ext4_msg(sb, KERN_INFO, "Journaled quota options "
22285 ++ "ignored when QUOTA feature is enabled");
22286 ++ return 1;
22287 + }
22288 + qname = match_strdup(args);
22289 + if (!qname) {
22290 +@@ -1689,10 +1689,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
22291 + return -1;
22292 + }
22293 + if (ext4_has_feature_quota(sb)) {
22294 +- ext4_msg(sb, KERN_ERR,
22295 +- "Cannot set journaled quota options "
22296 ++ ext4_msg(sb, KERN_INFO,
22297 ++ "Quota format mount options ignored "
22298 + "when QUOTA feature is enabled");
22299 +- return -1;
22300 ++ return 1;
22301 + }
22302 + sbi->s_jquota_fmt = m->mount_opt;
22303 + #endif
22304 +@@ -1753,11 +1753,11 @@ static int parse_options(char *options, struct super_block *sb,
22305 + #ifdef CONFIG_QUOTA
22306 + if (ext4_has_feature_quota(sb) &&
22307 + (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
22308 +- ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
22309 +- "feature is enabled");
22310 +- return 0;
22311 +- }
22312 +- if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
22313 ++ ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
22314 ++ "mount options ignored.");
22315 ++ clear_opt(sb, USRQUOTA);
22316 ++ clear_opt(sb, GRPQUOTA);
22317 ++ } else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
22318 + if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
22319 + clear_opt(sb, USRQUOTA);
22320 +
22321 +@@ -5021,6 +5021,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
22322 + EXT4_SB(sb)->s_jquota_fmt, type);
22323 + }
22324 +
22325 ++static void lockdep_set_quota_inode(struct inode *inode, int subclass)
22326 ++{
22327 ++ struct ext4_inode_info *ei = EXT4_I(inode);
22328 ++
22329 ++ /* The first argument of lockdep_set_subclass has to be
22330 ++ * *exactly* the same as the argument to init_rwsem() --- in
22331 ++ * this case, in init_once() --- or lockdep gets unhappy
22332 ++ * because the name of the lock is set using the
22333 ++ * stringification of the argument to init_rwsem().
22334 ++ */
22335 ++ (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
22336 ++ lockdep_set_subclass(&ei->i_data_sem, subclass);
22337 ++}
22338 ++
22339 + /*
22340 + * Standard function to be called on quota_on
22341 + */
22342 +@@ -5060,8 +5074,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
22343 + if (err)
22344 + return err;
22345 + }
22346 +-
22347 +- return dquot_quota_on(sb, type, format_id, path);
22348 ++ lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
22349 ++ err = dquot_quota_on(sb, type, format_id, path);
22350 ++ if (err)
22351 ++ lockdep_set_quota_inode(path->dentry->d_inode,
22352 ++ I_DATA_SEM_NORMAL);
22353 ++ return err;
22354 + }
22355 +
22356 + static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
22357 +@@ -5088,8 +5106,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
22358 +
22359 + /* Don't account quota for quota files to avoid recursion */
22360 + qf_inode->i_flags |= S_NOQUOTA;
22361 ++ lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
22362 + err = dquot_enable(qf_inode, type, format_id, flags);
22363 + iput(qf_inode);
22364 ++ if (err)
22365 ++ lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
22366 +
22367 + return err;
22368 + }
22369 +diff --git a/fs/f2fs/crypto_policy.c b/fs/f2fs/crypto_policy.c
22370 +index d4a96af513c2..596f02490f27 100644
22371 +--- a/fs/f2fs/crypto_policy.c
22372 ++++ b/fs/f2fs/crypto_policy.c
22373 +@@ -192,7 +192,8 @@ int f2fs_inherit_context(struct inode *parent, struct inode *child,
22374 + return res;
22375 +
22376 + ci = F2FS_I(parent)->i_crypt_info;
22377 +- BUG_ON(ci == NULL);
22378 ++ if (ci == NULL)
22379 ++ return -ENOKEY;
22380 +
22381 + ctx.format = F2FS_ENCRYPTION_CONTEXT_FORMAT_V1;
22382 +
22383 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
22384 +index 5c06db17e41f..44802599fa67 100644
22385 +--- a/fs/f2fs/data.c
22386 ++++ b/fs/f2fs/data.c
22387 +@@ -67,7 +67,6 @@ static void f2fs_write_end_io(struct bio *bio)
22388 + f2fs_restore_and_release_control_page(&page);
22389 +
22390 + if (unlikely(bio->bi_error)) {
22391 +- set_page_dirty(page);
22392 + set_bit(AS_EIO, &page->mapping->flags);
22393 + f2fs_stop_checkpoint(sbi);
22394 + }
22395 +@@ -504,7 +503,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
22396 + struct dnode_of_data dn;
22397 + u64 start = F2FS_BYTES_TO_BLK(offset);
22398 + u64 len = F2FS_BYTES_TO_BLK(count);
22399 +- bool allocated;
22400 ++ bool allocated = false;
22401 + u64 end_offset;
22402 + int err = 0;
22403 +
22404 +@@ -546,7 +545,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
22405 + f2fs_put_dnode(&dn);
22406 + f2fs_unlock_op(sbi);
22407 +
22408 +- f2fs_balance_fs(sbi, dn.node_changed);
22409 ++ f2fs_balance_fs(sbi, allocated);
22410 + }
22411 + return err;
22412 +
22413 +@@ -556,7 +555,7 @@ sync_out:
22414 + f2fs_put_dnode(&dn);
22415 + out:
22416 + f2fs_unlock_op(sbi);
22417 +- f2fs_balance_fs(sbi, dn.node_changed);
22418 ++ f2fs_balance_fs(sbi, allocated);
22419 + return err;
22420 + }
22421 +
22422 +@@ -650,14 +649,14 @@ get_next:
22423 + if (dn.ofs_in_node >= end_offset) {
22424 + if (allocated)
22425 + sync_inode_page(&dn);
22426 +- allocated = false;
22427 + f2fs_put_dnode(&dn);
22428 +
22429 + if (create) {
22430 + f2fs_unlock_op(sbi);
22431 +- f2fs_balance_fs(sbi, dn.node_changed);
22432 ++ f2fs_balance_fs(sbi, allocated);
22433 + f2fs_lock_op(sbi);
22434 + }
22435 ++ allocated = false;
22436 +
22437 + set_new_dnode(&dn, inode, NULL, NULL, 0);
22438 + err = get_dnode_of_data(&dn, pgofs, mode);
22439 +@@ -715,7 +714,7 @@ put_out:
22440 + unlock_out:
22441 + if (create) {
22442 + f2fs_unlock_op(sbi);
22443 +- f2fs_balance_fs(sbi, dn.node_changed);
22444 ++ f2fs_balance_fs(sbi, allocated);
22445 + }
22446 + out:
22447 + trace_f2fs_map_blocks(inode, map, err);
22448 +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
22449 +index faa7495e2d7e..30e6b6563494 100644
22450 +--- a/fs/f2fs/dir.c
22451 ++++ b/fs/f2fs/dir.c
22452 +@@ -892,11 +892,19 @@ out:
22453 + return err;
22454 + }
22455 +
22456 ++static int f2fs_dir_open(struct inode *inode, struct file *filp)
22457 ++{
22458 ++ if (f2fs_encrypted_inode(inode))
22459 ++ return f2fs_get_encryption_info(inode) ? -EACCES : 0;
22460 ++ return 0;
22461 ++}
22462 ++
22463 + const struct file_operations f2fs_dir_operations = {
22464 + .llseek = generic_file_llseek,
22465 + .read = generic_read_dir,
22466 + .iterate = f2fs_readdir,
22467 + .fsync = f2fs_sync_file,
22468 ++ .open = f2fs_dir_open,
22469 + .unlocked_ioctl = f2fs_ioctl,
22470 + #ifdef CONFIG_COMPAT
22471 + .compat_ioctl = f2fs_compat_ioctl,
22472 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
22473 +index ea272be62677..5a322bc00ac4 100644
22474 +--- a/fs/f2fs/file.c
22475 ++++ b/fs/f2fs/file.c
22476 +@@ -425,6 +425,8 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
22477 + err = f2fs_get_encryption_info(inode);
22478 + if (err)
22479 + return 0;
22480 ++ if (!f2fs_encrypted_inode(inode))
22481 ++ return -ENOKEY;
22482 + }
22483 +
22484 + /* we don't need to use inline_data strictly */
22485 +@@ -444,7 +446,9 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
22486 + if (!ret && f2fs_encrypted_inode(inode)) {
22487 + ret = f2fs_get_encryption_info(inode);
22488 + if (ret)
22489 +- ret = -EACCES;
22490 ++ return -EACCES;
22491 ++ if (!f2fs_encrypted_inode(inode))
22492 ++ return -ENOKEY;
22493 + }
22494 + return ret;
22495 + }
22496 +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
22497 +index 6f944e5eb76e..7e9e38769660 100644
22498 +--- a/fs/f2fs/namei.c
22499 ++++ b/fs/f2fs/namei.c
22500 +@@ -980,12 +980,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
22501 + }
22502 + memcpy(cstr.name, sd->encrypted_path, cstr.len);
22503 +
22504 +- /* this is broken symlink case */
22505 +- if (unlikely(cstr.name[0] == 0)) {
22506 +- res = -ENOENT;
22507 +- goto errout;
22508 +- }
22509 +-
22510 + if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
22511 + max_size) {
22512 + /* Symlink data on the disk is corrupted */
22513 +@@ -1002,6 +996,12 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
22514 +
22515 + kfree(cstr.name);
22516 +
22517 ++ /* this is broken symlink case */
22518 ++ if (unlikely(pstr.name[0] == 0)) {
22519 ++ res = -ENOENT;
22520 ++ goto errout;
22521 ++ }
22522 ++
22523 + paddr = pstr.name;
22524 +
22525 + /* Null-terminate the name */
22526 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
22527 +index 6134832baaaf..013a62b2f8ca 100644
22528 +--- a/fs/f2fs/super.c
22529 ++++ b/fs/f2fs/super.c
22530 +@@ -926,9 +926,25 @@ static loff_t max_file_blocks(void)
22531 + return result;
22532 + }
22533 +
22534 ++static int __f2fs_commit_super(struct buffer_head *bh,
22535 ++ struct f2fs_super_block *super)
22536 ++{
22537 ++ lock_buffer(bh);
22538 ++ if (super)
22539 ++ memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
22540 ++ set_buffer_uptodate(bh);
22541 ++ set_buffer_dirty(bh);
22542 ++ unlock_buffer(bh);
22543 ++
22544 ++ /* it's rare case, we can do fua all the time */
22545 ++ return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
22546 ++}
22547 ++
22548 + static inline bool sanity_check_area_boundary(struct super_block *sb,
22549 +- struct f2fs_super_block *raw_super)
22550 ++ struct buffer_head *bh)
22551 + {
22552 ++ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
22553 ++ (bh->b_data + F2FS_SUPER_OFFSET);
22554 + u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
22555 + u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
22556 + u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
22557 +@@ -942,6 +958,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
22558 + u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
22559 + u32 segment_count = le32_to_cpu(raw_super->segment_count);
22560 + u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
22561 ++ u64 main_end_blkaddr = main_blkaddr +
22562 ++ (segment_count_main << log_blocks_per_seg);
22563 ++ u64 seg_end_blkaddr = segment0_blkaddr +
22564 ++ (segment_count << log_blocks_per_seg);
22565 +
22566 + if (segment0_blkaddr != cp_blkaddr) {
22567 + f2fs_msg(sb, KERN_INFO,
22568 +@@ -986,22 +1006,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
22569 + return true;
22570 + }
22571 +
22572 +- if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
22573 +- segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
22574 ++ if (main_end_blkaddr > seg_end_blkaddr) {
22575 + f2fs_msg(sb, KERN_INFO,
22576 +- "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
22577 ++ "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
22578 + main_blkaddr,
22579 +- segment0_blkaddr + (segment_count << log_blocks_per_seg),
22580 ++ segment0_blkaddr +
22581 ++ (segment_count << log_blocks_per_seg),
22582 + segment_count_main << log_blocks_per_seg);
22583 + return true;
22584 ++ } else if (main_end_blkaddr < seg_end_blkaddr) {
22585 ++ int err = 0;
22586 ++ char *res;
22587 ++
22588 ++ /* fix in-memory information all the time */
22589 ++ raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
22590 ++ segment0_blkaddr) >> log_blocks_per_seg);
22591 ++
22592 ++ if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
22593 ++ res = "internally";
22594 ++ } else {
22595 ++ err = __f2fs_commit_super(bh, NULL);
22596 ++ res = err ? "failed" : "done";
22597 ++ }
22598 ++ f2fs_msg(sb, KERN_INFO,
22599 ++ "Fix alignment : %s, start(%u) end(%u) block(%u)",
22600 ++ res, main_blkaddr,
22601 ++ segment0_blkaddr +
22602 ++ (segment_count << log_blocks_per_seg),
22603 ++ segment_count_main << log_blocks_per_seg);
22604 ++ if (err)
22605 ++ return true;
22606 + }
22607 +-
22608 + return false;
22609 + }
22610 +
22611 + static int sanity_check_raw_super(struct super_block *sb,
22612 +- struct f2fs_super_block *raw_super)
22613 ++ struct buffer_head *bh)
22614 + {
22615 ++ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
22616 ++ (bh->b_data + F2FS_SUPER_OFFSET);
22617 + unsigned int blocksize;
22618 +
22619 + if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
22620 +@@ -1068,7 +1111,7 @@ static int sanity_check_raw_super(struct super_block *sb,
22621 + }
22622 +
22623 + /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
22624 +- if (sanity_check_area_boundary(sb, raw_super))
22625 ++ if (sanity_check_area_boundary(sb, bh))
22626 + return 1;
22627 +
22628 + return 0;
22629 +@@ -1134,103 +1177,87 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
22630 +
22631 + /*
22632 + * Read f2fs raw super block.
22633 +- * Because we have two copies of super block, so read the first one at first,
22634 +- * if the first one is invalid, move to read the second one.
22635 ++ * Because we have two copies of super block, so read both of them
22636 ++ * to get the first valid one. If any one of them is broken, we pass
22637 ++ * them recovery flag back to the caller.
22638 + */
22639 + static int read_raw_super_block(struct super_block *sb,
22640 + struct f2fs_super_block **raw_super,
22641 + int *valid_super_block, int *recovery)
22642 + {
22643 +- int block = 0;
22644 ++ int block;
22645 + struct buffer_head *bh;
22646 +- struct f2fs_super_block *super, *buf;
22647 ++ struct f2fs_super_block *super;
22648 + int err = 0;
22649 +
22650 + super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
22651 + if (!super)
22652 + return -ENOMEM;
22653 +-retry:
22654 +- bh = sb_bread(sb, block);
22655 +- if (!bh) {
22656 +- *recovery = 1;
22657 +- f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
22658 ++
22659 ++ for (block = 0; block < 2; block++) {
22660 ++ bh = sb_bread(sb, block);
22661 ++ if (!bh) {
22662 ++ f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
22663 + block + 1);
22664 +- err = -EIO;
22665 +- goto next;
22666 +- }
22667 ++ err = -EIO;
22668 ++ continue;
22669 ++ }
22670 +
22671 +- buf = (struct f2fs_super_block *)(bh->b_data + F2FS_SUPER_OFFSET);
22672 ++ /* sanity checking of raw super */
22673 ++ if (sanity_check_raw_super(sb, bh)) {
22674 ++ f2fs_msg(sb, KERN_ERR,
22675 ++ "Can't find valid F2FS filesystem in %dth superblock",
22676 ++ block + 1);
22677 ++ err = -EINVAL;
22678 ++ brelse(bh);
22679 ++ continue;
22680 ++ }
22681 +
22682 +- /* sanity checking of raw super */
22683 +- if (sanity_check_raw_super(sb, buf)) {
22684 ++ if (!*raw_super) {
22685 ++ memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
22686 ++ sizeof(*super));
22687 ++ *valid_super_block = block;
22688 ++ *raw_super = super;
22689 ++ }
22690 + brelse(bh);
22691 +- *recovery = 1;
22692 +- f2fs_msg(sb, KERN_ERR,
22693 +- "Can't find valid F2FS filesystem in %dth superblock",
22694 +- block + 1);
22695 +- err = -EINVAL;
22696 +- goto next;
22697 + }
22698 +
22699 +- if (!*raw_super) {
22700 +- memcpy(super, buf, sizeof(*super));
22701 +- *valid_super_block = block;
22702 +- *raw_super = super;
22703 +- }
22704 +- brelse(bh);
22705 +-
22706 +-next:
22707 +- /* check the validity of the second superblock */
22708 +- if (block == 0) {
22709 +- block++;
22710 +- goto retry;
22711 +- }
22712 ++ /* Fail to read any one of the superblocks*/
22713 ++ if (err < 0)
22714 ++ *recovery = 1;
22715 +
22716 + /* No valid superblock */
22717 +- if (!*raw_super) {
22718 ++ if (!*raw_super)
22719 + kfree(super);
22720 +- return err;
22721 +- }
22722 ++ else
22723 ++ err = 0;
22724 +
22725 +- return 0;
22726 ++ return err;
22727 + }
22728 +
22729 +-static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
22730 ++int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
22731 + {
22732 +- struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
22733 + struct buffer_head *bh;
22734 + int err;
22735 +
22736 +- bh = sb_getblk(sbi->sb, block);
22737 ++ /* write back-up superblock first */
22738 ++ bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
22739 + if (!bh)
22740 + return -EIO;
22741 +-
22742 +- lock_buffer(bh);
22743 +- memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
22744 +- set_buffer_uptodate(bh);
22745 +- set_buffer_dirty(bh);
22746 +- unlock_buffer(bh);
22747 +-
22748 +- /* it's rare case, we can do fua all the time */
22749 +- err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
22750 ++ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
22751 + brelse(bh);
22752 +
22753 +- return err;
22754 +-}
22755 +-
22756 +-int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
22757 +-{
22758 +- int err;
22759 +-
22760 +- /* write back-up superblock first */
22761 +- err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
22762 +-
22763 + /* if we are in recovery path, skip writing valid superblock */
22764 + if (recover || err)
22765 + return err;
22766 +
22767 + /* write current valid superblock */
22768 +- return __f2fs_commit_super(sbi, sbi->valid_super_block);
22769 ++ bh = sb_getblk(sbi->sb, sbi->valid_super_block);
22770 ++ if (!bh)
22771 ++ return -EIO;
22772 ++ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
22773 ++ brelse(bh);
22774 ++ return err;
22775 + }
22776 +
22777 + static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
22778 +diff --git a/fs/fhandle.c b/fs/fhandle.c
22779 +index d59712dfa3e7..ca3c3dd01789 100644
22780 +--- a/fs/fhandle.c
22781 ++++ b/fs/fhandle.c
22782 +@@ -228,7 +228,7 @@ long do_handle_open(int mountdirfd,
22783 + path_put(&path);
22784 + return fd;
22785 + }
22786 +- file = file_open_root(path.dentry, path.mnt, "", open_flag);
22787 ++ file = file_open_root(path.dentry, path.mnt, "", open_flag, 0);
22788 + if (IS_ERR(file)) {
22789 + put_unused_fd(fd);
22790 + retval = PTR_ERR(file);
22791 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
22792 +index 5c46ed9f3e14..fee81e8768c9 100644
22793 +--- a/fs/fs-writeback.c
22794 ++++ b/fs/fs-writeback.c
22795 +@@ -281,13 +281,15 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
22796 + wb_get(wb);
22797 + spin_unlock(&inode->i_lock);
22798 + spin_lock(&wb->list_lock);
22799 +- wb_put(wb); /* not gonna deref it anymore */
22800 +
22801 + /* i_wb may have changed inbetween, can't use inode_to_wb() */
22802 +- if (likely(wb == inode->i_wb))
22803 +- return wb; /* @inode already has ref */
22804 ++ if (likely(wb == inode->i_wb)) {
22805 ++ wb_put(wb); /* @inode already has ref */
22806 ++ return wb;
22807 ++ }
22808 +
22809 + spin_unlock(&wb->list_lock);
22810 ++ wb_put(wb);
22811 + cpu_relax();
22812 + spin_lock(&inode->i_lock);
22813 + }
22814 +@@ -1337,10 +1339,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
22815 + * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
22816 + * and does more profound writeback list handling in writeback_sb_inodes().
22817 + */
22818 +-static int
22819 +-writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
22820 +- struct writeback_control *wbc)
22821 ++static int writeback_single_inode(struct inode *inode,
22822 ++ struct writeback_control *wbc)
22823 + {
22824 ++ struct bdi_writeback *wb;
22825 + int ret = 0;
22826 +
22827 + spin_lock(&inode->i_lock);
22828 +@@ -1378,7 +1380,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
22829 + ret = __writeback_single_inode(inode, wbc);
22830 +
22831 + wbc_detach_inode(wbc);
22832 +- spin_lock(&wb->list_lock);
22833 ++
22834 ++ wb = inode_to_wb_and_lock_list(inode);
22835 + spin_lock(&inode->i_lock);
22836 + /*
22837 + * If inode is clean, remove it from writeback lists. Otherwise don't
22838 +@@ -1453,6 +1456,7 @@ static long writeback_sb_inodes(struct super_block *sb,
22839 +
22840 + while (!list_empty(&wb->b_io)) {
22841 + struct inode *inode = wb_inode(wb->b_io.prev);
22842 ++ struct bdi_writeback *tmp_wb;
22843 +
22844 + if (inode->i_sb != sb) {
22845 + if (work->sb) {
22846 +@@ -1543,15 +1547,23 @@ static long writeback_sb_inodes(struct super_block *sb,
22847 + cond_resched();
22848 + }
22849 +
22850 +-
22851 +- spin_lock(&wb->list_lock);
22852 ++ /*
22853 ++ * Requeue @inode if still dirty. Be careful as @inode may
22854 ++ * have been switched to another wb in the meantime.
22855 ++ */
22856 ++ tmp_wb = inode_to_wb_and_lock_list(inode);
22857 + spin_lock(&inode->i_lock);
22858 + if (!(inode->i_state & I_DIRTY_ALL))
22859 + wrote++;
22860 +- requeue_inode(inode, wb, &wbc);
22861 ++ requeue_inode(inode, tmp_wb, &wbc);
22862 + inode_sync_complete(inode);
22863 + spin_unlock(&inode->i_lock);
22864 +
22865 ++ if (unlikely(tmp_wb != wb)) {
22866 ++ spin_unlock(&tmp_wb->list_lock);
22867 ++ spin_lock(&wb->list_lock);
22868 ++ }
22869 ++
22870 + /*
22871 + * bail out to wb_writeback() often enough to check
22872 + * background threshold and other termination conditions.
22873 +@@ -2338,7 +2350,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
22874 + */
22875 + int write_inode_now(struct inode *inode, int sync)
22876 + {
22877 +- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
22878 + struct writeback_control wbc = {
22879 + .nr_to_write = LONG_MAX,
22880 + .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
22881 +@@ -2350,7 +2361,7 @@ int write_inode_now(struct inode *inode, int sync)
22882 + wbc.nr_to_write = 0;
22883 +
22884 + might_sleep();
22885 +- return writeback_single_inode(inode, wb, &wbc);
22886 ++ return writeback_single_inode(inode, &wbc);
22887 + }
22888 + EXPORT_SYMBOL(write_inode_now);
22889 +
22890 +@@ -2367,7 +2378,7 @@ EXPORT_SYMBOL(write_inode_now);
22891 + */
22892 + int sync_inode(struct inode *inode, struct writeback_control *wbc)
22893 + {
22894 +- return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
22895 ++ return writeback_single_inode(inode, wbc);
22896 + }
22897 + EXPORT_SYMBOL(sync_inode);
22898 +
22899 +diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
22900 +index 8e3ee1936c7e..c5b6b7165489 100644
22901 +--- a/fs/fuse/cuse.c
22902 ++++ b/fs/fuse/cuse.c
22903 +@@ -90,7 +90,7 @@ static struct list_head *cuse_conntbl_head(dev_t devt)
22904 +
22905 + static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
22906 + {
22907 +- struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
22908 ++ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
22909 + loff_t pos = 0;
22910 +
22911 + return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
22912 +@@ -98,7 +98,7 @@ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
22913 +
22914 + static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from)
22915 + {
22916 +- struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
22917 ++ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
22918 + loff_t pos = 0;
22919 + /*
22920 + * No locking or generic_write_checks(), the server is
22921 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
22922 +index b03d253ece15..416108b42412 100644
22923 +--- a/fs/fuse/file.c
22924 ++++ b/fs/fuse/file.c
22925 +@@ -528,6 +528,11 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
22926 + }
22927 + }
22928 +
22929 ++static void fuse_io_release(struct kref *kref)
22930 ++{
22931 ++ kfree(container_of(kref, struct fuse_io_priv, refcnt));
22932 ++}
22933 ++
22934 + static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
22935 + {
22936 + if (io->err)
22937 +@@ -585,8 +590,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
22938 + }
22939 +
22940 + io->iocb->ki_complete(io->iocb, res, 0);
22941 +- kfree(io);
22942 + }
22943 ++
22944 ++ kref_put(&io->refcnt, fuse_io_release);
22945 + }
22946 +
22947 + static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
22948 +@@ -613,6 +619,7 @@ static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
22949 + size_t num_bytes, struct fuse_io_priv *io)
22950 + {
22951 + spin_lock(&io->lock);
22952 ++ kref_get(&io->refcnt);
22953 + io->size += num_bytes;
22954 + io->reqs++;
22955 + spin_unlock(&io->lock);
22956 +@@ -691,7 +698,7 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
22957 +
22958 + static int fuse_do_readpage(struct file *file, struct page *page)
22959 + {
22960 +- struct fuse_io_priv io = { .async = 0, .file = file };
22961 ++ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
22962 + struct inode *inode = page->mapping->host;
22963 + struct fuse_conn *fc = get_fuse_conn(inode);
22964 + struct fuse_req *req;
22965 +@@ -984,7 +991,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
22966 + size_t res;
22967 + unsigned offset;
22968 + unsigned i;
22969 +- struct fuse_io_priv io = { .async = 0, .file = file };
22970 ++ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
22971 +
22972 + for (i = 0; i < req->num_pages; i++)
22973 + fuse_wait_on_page_writeback(inode, req->pages[i]->index);
22974 +@@ -1398,7 +1405,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
22975 +
22976 + static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
22977 + {
22978 +- struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
22979 ++ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
22980 + return __fuse_direct_read(&io, to, &iocb->ki_pos);
22981 + }
22982 +
22983 +@@ -1406,7 +1413,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
22984 + {
22985 + struct file *file = iocb->ki_filp;
22986 + struct inode *inode = file_inode(file);
22987 +- struct fuse_io_priv io = { .async = 0, .file = file };
22988 ++ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
22989 + ssize_t res;
22990 +
22991 + if (is_bad_inode(inode))
22992 +@@ -2843,6 +2850,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
22993 + loff_t i_size;
22994 + size_t count = iov_iter_count(iter);
22995 + struct fuse_io_priv *io;
22996 ++ bool is_sync = is_sync_kiocb(iocb);
22997 +
22998 + pos = offset;
22999 + inode = file->f_mapping->host;
23000 +@@ -2863,6 +2871,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
23001 + if (!io)
23002 + return -ENOMEM;
23003 + spin_lock_init(&io->lock);
23004 ++ kref_init(&io->refcnt);
23005 + io->reqs = 1;
23006 + io->bytes = -1;
23007 + io->size = 0;
23008 +@@ -2882,12 +2891,18 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
23009 + * to wait on real async I/O requests, so we must submit this request
23010 + * synchronously.
23011 + */
23012 +- if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
23013 ++ if (!is_sync && (offset + count > i_size) &&
23014 + iov_iter_rw(iter) == WRITE)
23015 + io->async = false;
23016 +
23017 +- if (io->async && is_sync_kiocb(iocb))
23018 ++ if (io->async && is_sync) {
23019 ++ /*
23020 ++ * Additional reference to keep io around after
23021 ++ * calling fuse_aio_complete()
23022 ++ */
23023 ++ kref_get(&io->refcnt);
23024 + io->done = &wait;
23025 ++ }
23026 +
23027 + if (iov_iter_rw(iter) == WRITE) {
23028 + ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
23029 +@@ -2900,14 +2915,14 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
23030 + fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
23031 +
23032 + /* we have a non-extending, async request, so return */
23033 +- if (!is_sync_kiocb(iocb))
23034 ++ if (!is_sync)
23035 + return -EIOCBQUEUED;
23036 +
23037 + wait_for_completion(&wait);
23038 + ret = fuse_get_res_by_io(io);
23039 + }
23040 +
23041 +- kfree(io);
23042 ++ kref_put(&io->refcnt, fuse_io_release);
23043 +
23044 + if (iov_iter_rw(iter) == WRITE) {
23045 + if (ret > 0)
23046 +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
23047 +index ce394b5fe6b4..eddbe02c4028 100644
23048 +--- a/fs/fuse/fuse_i.h
23049 ++++ b/fs/fuse/fuse_i.h
23050 +@@ -22,6 +22,7 @@
23051 + #include <linux/rbtree.h>
23052 + #include <linux/poll.h>
23053 + #include <linux/workqueue.h>
23054 ++#include <linux/kref.h>
23055 +
23056 + /** Max number of pages that can be used in a single read request */
23057 + #define FUSE_MAX_PAGES_PER_REQ 32
23058 +@@ -243,6 +244,7 @@ struct fuse_args {
23059 +
23060 + /** The request IO state (for asynchronous processing) */
23061 + struct fuse_io_priv {
23062 ++ struct kref refcnt;
23063 + int async;
23064 + spinlock_t lock;
23065 + unsigned reqs;
23066 +@@ -256,6 +258,13 @@ struct fuse_io_priv {
23067 + struct completion *done;
23068 + };
23069 +
23070 ++#define FUSE_IO_PRIV_SYNC(f) \
23071 ++{ \
23072 ++ .refcnt = { ATOMIC_INIT(1) }, \
23073 ++ .async = 0, \
23074 ++ .file = f, \
23075 ++}
23076 ++
23077 + /**
23078 + * Request flags
23079 + *
23080 +diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
23081 +index 458cf463047b..82067ca22f2b 100644
23082 +--- a/fs/hpfs/super.c
23083 ++++ b/fs/hpfs/super.c
23084 +@@ -15,6 +15,7 @@
23085 + #include <linux/sched.h>
23086 + #include <linux/bitmap.h>
23087 + #include <linux/slab.h>
23088 ++#include <linux/seq_file.h>
23089 +
23090 + /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
23091 +
23092 +@@ -453,10 +454,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
23093 + int lowercase, eas, chk, errs, chkdsk, timeshift;
23094 + int o;
23095 + struct hpfs_sb_info *sbi = hpfs_sb(s);
23096 +- char *new_opts = kstrdup(data, GFP_KERNEL);
23097 +-
23098 +- if (!new_opts)
23099 +- return -ENOMEM;
23100 +
23101 + sync_filesystem(s);
23102 +
23103 +@@ -493,17 +490,44 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
23104 +
23105 + if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
23106 +
23107 +- replace_mount_options(s, new_opts);
23108 +-
23109 + hpfs_unlock(s);
23110 + return 0;
23111 +
23112 + out_err:
23113 + hpfs_unlock(s);
23114 +- kfree(new_opts);
23115 + return -EINVAL;
23116 + }
23117 +
23118 ++static int hpfs_show_options(struct seq_file *seq, struct dentry *root)
23119 ++{
23120 ++ struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb);
23121 ++
23122 ++ seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid));
23123 ++ seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid));
23124 ++ seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777));
23125 ++ if (sbi->sb_lowercase)
23126 ++ seq_printf(seq, ",case=lower");
23127 ++ if (!sbi->sb_chk)
23128 ++ seq_printf(seq, ",check=none");
23129 ++ if (sbi->sb_chk == 2)
23130 ++ seq_printf(seq, ",check=strict");
23131 ++ if (!sbi->sb_err)
23132 ++ seq_printf(seq, ",errors=continue");
23133 ++ if (sbi->sb_err == 2)
23134 ++ seq_printf(seq, ",errors=panic");
23135 ++ if (!sbi->sb_chkdsk)
23136 ++ seq_printf(seq, ",chkdsk=no");
23137 ++ if (sbi->sb_chkdsk == 2)
23138 ++ seq_printf(seq, ",chkdsk=always");
23139 ++ if (!sbi->sb_eas)
23140 ++ seq_printf(seq, ",eas=no");
23141 ++ if (sbi->sb_eas == 1)
23142 ++ seq_printf(seq, ",eas=ro");
23143 ++ if (sbi->sb_timeshift)
23144 ++ seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift);
23145 ++ return 0;
23146 ++}
23147 ++
23148 + /* Super operations */
23149 +
23150 + static const struct super_operations hpfs_sops =
23151 +@@ -514,7 +538,7 @@ static const struct super_operations hpfs_sops =
23152 + .put_super = hpfs_put_super,
23153 + .statfs = hpfs_statfs,
23154 + .remount_fs = hpfs_remount_fs,
23155 +- .show_options = generic_show_options,
23156 ++ .show_options = hpfs_show_options,
23157 + };
23158 +
23159 + static int hpfs_fill_super(struct super_block *s, void *options, int silent)
23160 +@@ -537,8 +561,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
23161 +
23162 + int o;
23163 +
23164 +- save_mount_options(s, options);
23165 +-
23166 + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
23167 + if (!sbi) {
23168 + return -ENOMEM;
23169 +diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
23170 +index 5384ceb35b1c..98b3eb7d8eaf 100644
23171 +--- a/fs/isofs/rock.c
23172 ++++ b/fs/isofs/rock.c
23173 +@@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
23174 + int retnamlen = 0;
23175 + int truncate = 0;
23176 + int ret = 0;
23177 ++ char *p;
23178 ++ int len;
23179 +
23180 + if (!ISOFS_SB(inode->i_sb)->s_rock)
23181 + return 0;
23182 +@@ -267,12 +269,17 @@ repeat:
23183 + rr->u.NM.flags);
23184 + break;
23185 + }
23186 +- if ((strlen(retname) + rr->len - 5) >= 254) {
23187 ++ len = rr->len - 5;
23188 ++ if (retnamlen + len >= 254) {
23189 + truncate = 1;
23190 + break;
23191 + }
23192 +- strncat(retname, rr->u.NM.name, rr->len - 5);
23193 +- retnamlen += rr->len - 5;
23194 ++ p = memchr(rr->u.NM.name, '\0', len);
23195 ++ if (unlikely(p))
23196 ++ len = p - rr->u.NM.name;
23197 ++ memcpy(retname + retnamlen, rr->u.NM.name, len);
23198 ++ retnamlen += len;
23199 ++ retname[retnamlen] = '\0';
23200 + break;
23201 + case SIG('R', 'E'):
23202 + kfree(rs.buffer);
23203 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
23204 +index 81e622681c82..624a57a9c4aa 100644
23205 +--- a/fs/jbd2/journal.c
23206 ++++ b/fs/jbd2/journal.c
23207 +@@ -1408,11 +1408,12 @@ out:
23208 + /**
23209 + * jbd2_mark_journal_empty() - Mark on disk journal as empty.
23210 + * @journal: The journal to update.
23211 ++ * @write_op: With which operation should we write the journal sb
23212 + *
23213 + * Update a journal's dynamic superblock fields to show that journal is empty.
23214 + * Write updated superblock to disk waiting for IO to complete.
23215 + */
23216 +-static void jbd2_mark_journal_empty(journal_t *journal)
23217 ++static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
23218 + {
23219 + journal_superblock_t *sb = journal->j_superblock;
23220 +
23221 +@@ -1430,7 +1431,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
23222 + sb->s_start = cpu_to_be32(0);
23223 + read_unlock(&journal->j_state_lock);
23224 +
23225 +- jbd2_write_superblock(journal, WRITE_FUA);
23226 ++ jbd2_write_superblock(journal, write_op);
23227 +
23228 + /* Log is no longer empty */
23229 + write_lock(&journal->j_state_lock);
23230 +@@ -1716,7 +1717,13 @@ int jbd2_journal_destroy(journal_t *journal)
23231 + if (journal->j_sb_buffer) {
23232 + if (!is_journal_aborted(journal)) {
23233 + mutex_lock(&journal->j_checkpoint_mutex);
23234 +- jbd2_mark_journal_empty(journal);
23235 ++
23236 ++ write_lock(&journal->j_state_lock);
23237 ++ journal->j_tail_sequence =
23238 ++ ++journal->j_transaction_sequence;
23239 ++ write_unlock(&journal->j_state_lock);
23240 ++
23241 ++ jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
23242 + mutex_unlock(&journal->j_checkpoint_mutex);
23243 + } else
23244 + err = -EIO;
23245 +@@ -1975,7 +1982,7 @@ int jbd2_journal_flush(journal_t *journal)
23246 + * the magic code for a fully-recovered superblock. Any future
23247 + * commits of data to the journal will restore the current
23248 + * s_start value. */
23249 +- jbd2_mark_journal_empty(journal);
23250 ++ jbd2_mark_journal_empty(journal, WRITE_FUA);
23251 + mutex_unlock(&journal->j_checkpoint_mutex);
23252 + write_lock(&journal->j_state_lock);
23253 + J_ASSERT(!journal->j_running_transaction);
23254 +@@ -2021,7 +2028,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
23255 + if (write) {
23256 + /* Lock to make assertions happy... */
23257 + mutex_lock(&journal->j_checkpoint_mutex);
23258 +- jbd2_mark_journal_empty(journal);
23259 ++ jbd2_mark_journal_empty(journal, WRITE_FUA);
23260 + mutex_unlock(&journal->j_checkpoint_mutex);
23261 + }
23262 +
23263 +diff --git a/fs/namei.c b/fs/namei.c
23264 +index 9c590e0f66e9..7824bfb89ada 100644
23265 +--- a/fs/namei.c
23266 ++++ b/fs/namei.c
23267 +@@ -2968,22 +2968,10 @@ no_open:
23268 + dentry = lookup_real(dir, dentry, nd->flags);
23269 + if (IS_ERR(dentry))
23270 + return PTR_ERR(dentry);
23271 +-
23272 +- if (create_error) {
23273 +- int open_flag = op->open_flag;
23274 +-
23275 +- error = create_error;
23276 +- if ((open_flag & O_EXCL)) {
23277 +- if (!dentry->d_inode)
23278 +- goto out;
23279 +- } else if (!dentry->d_inode) {
23280 +- goto out;
23281 +- } else if ((open_flag & O_TRUNC) &&
23282 +- d_is_reg(dentry)) {
23283 +- goto out;
23284 +- }
23285 +- /* will fail later, go on to get the right error */
23286 +- }
23287 ++ }
23288 ++ if (create_error && !dentry->d_inode) {
23289 ++ error = create_error;
23290 ++ goto out;
23291 + }
23292 + looked_up:
23293 + path->dentry = dentry;
23294 +@@ -4258,7 +4246,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
23295 + bool new_is_dir = false;
23296 + unsigned max_links = new_dir->i_sb->s_max_links;
23297 +
23298 +- if (source == target)
23299 ++ /*
23300 ++ * Check source == target.
23301 ++ * On overlayfs need to look at underlying inodes.
23302 ++ */
23303 ++ if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
23304 + return 0;
23305 +
23306 + error = may_delete(old_dir, old_dentry, is_dir);
23307 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
23308 +index 9cce67043f92..7ded17764754 100644
23309 +--- a/fs/nfs/dir.c
23310 ++++ b/fs/nfs/dir.c
23311 +@@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
23312 + again:
23313 + timestamp = jiffies;
23314 + gencount = nfs_inc_attr_generation_counter();
23315 +- error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
23316 ++ error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
23317 + NFS_SERVER(inode)->dtsize, desc->plus);
23318 + if (error < 0) {
23319 + /* We requested READDIRPLUS, but the server doesn't grok it */
23320 +@@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
23321 + count++;
23322 +
23323 + if (desc->plus != 0)
23324 +- nfs_prime_dcache(desc->file->f_path.dentry, entry);
23325 ++ nfs_prime_dcache(file_dentry(desc->file), entry);
23326 +
23327 + status = nfs_readdir_add_to_array(entry, page);
23328 + if (status != 0)
23329 +@@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
23330 + */
23331 + static int nfs_readdir(struct file *file, struct dir_context *ctx)
23332 + {
23333 +- struct dentry *dentry = file->f_path.dentry;
23334 ++ struct dentry *dentry = file_dentry(file);
23335 + struct inode *inode = d_inode(dentry);
23336 + nfs_readdir_descriptor_t my_desc,
23337 + *desc = &my_desc;
23338 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
23339 +index 86faecf8f328..847b678af4f0 100644
23340 +--- a/fs/nfs/inode.c
23341 ++++ b/fs/nfs/inode.c
23342 +@@ -940,7 +940,7 @@ int nfs_open(struct inode *inode, struct file *filp)
23343 + {
23344 + struct nfs_open_context *ctx;
23345 +
23346 +- ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
23347 ++ ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
23348 + if (IS_ERR(ctx))
23349 + return PTR_ERR(ctx);
23350 + nfs_file_set_open_context(filp, ctx);
23351 +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
23352 +index 57ca1c8039c1..2a9ff14cfb3b 100644
23353 +--- a/fs/nfs/nfs4file.c
23354 ++++ b/fs/nfs/nfs4file.c
23355 +@@ -26,7 +26,7 @@ static int
23356 + nfs4_file_open(struct inode *inode, struct file *filp)
23357 + {
23358 + struct nfs_open_context *ctx;
23359 +- struct dentry *dentry = filp->f_path.dentry;
23360 ++ struct dentry *dentry = file_dentry(filp);
23361 + struct dentry *parent = NULL;
23362 + struct inode *dir;
23363 + unsigned openflags = filp->f_flags;
23364 +@@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
23365 + parent = dget_parent(dentry);
23366 + dir = d_inode(parent);
23367 +
23368 +- ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
23369 ++ ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
23370 + err = PTR_ERR(ctx);
23371 + if (IS_ERR(ctx))
23372 + goto out;
23373 +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
23374 +index 4cba7865f496..f8082c7cde8b 100644
23375 +--- a/fs/nfsd/nfs4proc.c
23376 ++++ b/fs/nfsd/nfs4proc.c
23377 +@@ -878,6 +878,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
23378 + &exp, &dentry);
23379 + if (err)
23380 + return err;
23381 ++ fh_unlock(&cstate->current_fh);
23382 + if (d_really_is_negative(dentry)) {
23383 + exp_put(exp);
23384 + err = nfserr_noent;
23385 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
23386 +index d6ef0955a979..1600ec470ce7 100644
23387 +--- a/fs/nfsd/nfs4xdr.c
23388 ++++ b/fs/nfsd/nfs4xdr.c
23389 +@@ -1072,8 +1072,9 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
23390 +
23391 + READ_BUF(4);
23392 + rename->rn_snamelen = be32_to_cpup(p++);
23393 +- READ_BUF(rename->rn_snamelen + 4);
23394 ++ READ_BUF(rename->rn_snamelen);
23395 + SAVEMEM(rename->rn_sname, rename->rn_snamelen);
23396 ++ READ_BUF(4);
23397 + rename->rn_tnamelen = be32_to_cpup(p++);
23398 + READ_BUF(rename->rn_tnamelen);
23399 + SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
23400 +@@ -1155,13 +1156,14 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
23401 + READ_BUF(8);
23402 + setclientid->se_callback_prog = be32_to_cpup(p++);
23403 + setclientid->se_callback_netid_len = be32_to_cpup(p++);
23404 +-
23405 +- READ_BUF(setclientid->se_callback_netid_len + 4);
23406 ++ READ_BUF(setclientid->se_callback_netid_len);
23407 + SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
23408 ++ READ_BUF(4);
23409 + setclientid->se_callback_addr_len = be32_to_cpup(p++);
23410 +
23411 +- READ_BUF(setclientid->se_callback_addr_len + 4);
23412 ++ READ_BUF(setclientid->se_callback_addr_len);
23413 + SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
23414 ++ READ_BUF(4);
23415 + setclientid->se_callback_ident = be32_to_cpup(p++);
23416 +
23417 + DECODE_TAIL;
23418 +@@ -1835,8 +1837,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
23419 +
23420 + READ_BUF(4);
23421 + argp->taglen = be32_to_cpup(p++);
23422 +- READ_BUF(argp->taglen + 8);
23423 ++ READ_BUF(argp->taglen);
23424 + SAVEMEM(argp->tag, argp->taglen);
23425 ++ READ_BUF(8);
23426 + argp->minorversion = be32_to_cpup(p++);
23427 + argp->opcnt = be32_to_cpup(p++);
23428 + max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2);
23429 +diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
23430 +index 0cdf497c91ef..2162434728c0 100644
23431 +--- a/fs/ocfs2/acl.c
23432 ++++ b/fs/ocfs2/acl.c
23433 +@@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
23434 + brelse(di_bh);
23435 + return acl;
23436 + }
23437 ++
23438 ++int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
23439 ++{
23440 ++ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
23441 ++ struct posix_acl *acl;
23442 ++ int ret;
23443 ++
23444 ++ if (S_ISLNK(inode->i_mode))
23445 ++ return -EOPNOTSUPP;
23446 ++
23447 ++ if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
23448 ++ return 0;
23449 ++
23450 ++ acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
23451 ++ if (IS_ERR(acl) || !acl)
23452 ++ return PTR_ERR(acl);
23453 ++ ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
23454 ++ if (ret)
23455 ++ return ret;
23456 ++ ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
23457 ++ acl, NULL, NULL);
23458 ++ posix_acl_release(acl);
23459 ++ return ret;
23460 ++}
23461 ++
23462 ++/*
23463 ++ * Initialize the ACLs of a new inode. If parent directory has default ACL,
23464 ++ * then clone to new inode. Called from ocfs2_mknod.
23465 ++ */
23466 ++int ocfs2_init_acl(handle_t *handle,
23467 ++ struct inode *inode,
23468 ++ struct inode *dir,
23469 ++ struct buffer_head *di_bh,
23470 ++ struct buffer_head *dir_bh,
23471 ++ struct ocfs2_alloc_context *meta_ac,
23472 ++ struct ocfs2_alloc_context *data_ac)
23473 ++{
23474 ++ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
23475 ++ struct posix_acl *acl = NULL;
23476 ++ int ret = 0, ret2;
23477 ++ umode_t mode;
23478 ++
23479 ++ if (!S_ISLNK(inode->i_mode)) {
23480 ++ if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
23481 ++ acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
23482 ++ dir_bh);
23483 ++ if (IS_ERR(acl))
23484 ++ return PTR_ERR(acl);
23485 ++ }
23486 ++ if (!acl) {
23487 ++ mode = inode->i_mode & ~current_umask();
23488 ++ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
23489 ++ if (ret) {
23490 ++ mlog_errno(ret);
23491 ++ goto cleanup;
23492 ++ }
23493 ++ }
23494 ++ }
23495 ++ if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
23496 ++ if (S_ISDIR(inode->i_mode)) {
23497 ++ ret = ocfs2_set_acl(handle, inode, di_bh,
23498 ++ ACL_TYPE_DEFAULT, acl,
23499 ++ meta_ac, data_ac);
23500 ++ if (ret)
23501 ++ goto cleanup;
23502 ++ }
23503 ++ mode = inode->i_mode;
23504 ++ ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
23505 ++ if (ret < 0)
23506 ++ return ret;
23507 ++
23508 ++ ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
23509 ++ if (ret2) {
23510 ++ mlog_errno(ret2);
23511 ++ ret = ret2;
23512 ++ goto cleanup;
23513 ++ }
23514 ++ if (ret > 0) {
23515 ++ ret = ocfs2_set_acl(handle, inode,
23516 ++ di_bh, ACL_TYPE_ACCESS,
23517 ++ acl, meta_ac, data_ac);
23518 ++ }
23519 ++ }
23520 ++cleanup:
23521 ++ posix_acl_release(acl);
23522 ++ return ret;
23523 ++}
23524 +diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
23525 +index 3fce68d08625..2783a75b3999 100644
23526 +--- a/fs/ocfs2/acl.h
23527 ++++ b/fs/ocfs2/acl.h
23528 +@@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
23529 + struct posix_acl *acl,
23530 + struct ocfs2_alloc_context *meta_ac,
23531 + struct ocfs2_alloc_context *data_ac);
23532 ++extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
23533 ++extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
23534 ++ struct buffer_head *, struct buffer_head *,
23535 ++ struct ocfs2_alloc_context *,
23536 ++ struct ocfs2_alloc_context *);
23537 +
23538 + #endif /* OCFS2_ACL_H */
23539 +diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
23540 +index a76b9ea7722e..a2370e2c7295 100644
23541 +--- a/fs/ocfs2/cluster/heartbeat.c
23542 ++++ b/fs/ocfs2/cluster/heartbeat.c
23543 +@@ -1445,8 +1445,8 @@ static void o2hb_region_release(struct config_item *item)
23544 + debugfs_remove(reg->hr_debug_dir);
23545 + kfree(reg->hr_db_livenodes);
23546 + kfree(reg->hr_db_regnum);
23547 +- kfree(reg->hr_debug_elapsed_time);
23548 +- kfree(reg->hr_debug_pinned);
23549 ++ kfree(reg->hr_db_elapsed_time);
23550 ++ kfree(reg->hr_db_pinned);
23551 +
23552 + spin_lock(&o2hb_live_lock);
23553 + list_del(&reg->hr_all_item);
23554 +diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
23555 +index e36d63ff1783..f90931335c6b 100644
23556 +--- a/fs/ocfs2/dlm/dlmconvert.c
23557 ++++ b/fs/ocfs2/dlm/dlmconvert.c
23558 +@@ -262,6 +262,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
23559 + struct dlm_lock *lock, int flags, int type)
23560 + {
23561 + enum dlm_status status;
23562 ++ u8 old_owner = res->owner;
23563 +
23564 + mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
23565 + lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
23566 +@@ -287,6 +288,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
23567 + status = DLM_DENIED;
23568 + goto bail;
23569 + }
23570 ++
23571 ++ if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
23572 ++ mlog(0, "last convert request returned DLM_RECOVERING, but "
23573 ++ "owner has already queued and sent ast to me. res %.*s, "
23574 ++ "(cookie=%u:%llu, type=%d, conv=%d)\n",
23575 ++ res->lockname.len, res->lockname.name,
23576 ++ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
23577 ++ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
23578 ++ lock->ml.type, lock->ml.convert_type);
23579 ++ status = DLM_NORMAL;
23580 ++ goto bail;
23581 ++ }
23582 ++
23583 + res->state |= DLM_LOCK_RES_IN_PROGRESS;
23584 + /* move lock to local convert queue */
23585 + /* do not alter lock refcount. switching lists. */
23586 +@@ -316,11 +330,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
23587 + spin_lock(&res->spinlock);
23588 + res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
23589 + lock->convert_pending = 0;
23590 +- /* if it failed, move it back to granted queue */
23591 ++ /* if it failed, move it back to granted queue.
23592 ++ * if master returns DLM_NORMAL and then down before sending ast,
23593 ++ * it may have already been moved to granted queue, reset to
23594 ++ * DLM_RECOVERING and retry convert */
23595 + if (status != DLM_NORMAL) {
23596 + if (status != DLM_NOTQUEUED)
23597 + dlm_error(status);
23598 + dlm_revert_pending_convert(res, lock);
23599 ++ } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
23600 ++ (old_owner != res->owner)) {
23601 ++ mlog(0, "res %.*s is in recovering or has been recovered.\n",
23602 ++ res->lockname.len, res->lockname.name);
23603 ++ status = DLM_RECOVERING;
23604 + }
23605 + bail:
23606 + spin_unlock(&res->spinlock);
23607 +diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
23608 +index b94a425f0175..23d0ab881f6e 100644
23609 +--- a/fs/ocfs2/dlm/dlmrecovery.c
23610 ++++ b/fs/ocfs2/dlm/dlmrecovery.c
23611 +@@ -2071,7 +2071,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
23612 + dlm_lock_get(lock);
23613 + if (lock->convert_pending) {
23614 + /* move converting lock back to granted */
23615 +- BUG_ON(i != DLM_CONVERTING_LIST);
23616 + mlog(0, "node died with convert pending "
23617 + "on %.*s. move back to granted list.\n",
23618 + res->lockname.len, res->lockname.name);
23619 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
23620 +index 7cb38fdca229..3d60fda1cb09 100644
23621 +--- a/fs/ocfs2/file.c
23622 ++++ b/fs/ocfs2/file.c
23623 +@@ -1268,20 +1268,20 @@ bail_unlock_rw:
23624 + if (size_change)
23625 + ocfs2_rw_unlock(inode, 1);
23626 + bail:
23627 +- brelse(bh);
23628 +
23629 + /* Release quota pointers in case we acquired them */
23630 + for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
23631 + dqput(transfer_to[qtype]);
23632 +
23633 + if (!status && attr->ia_valid & ATTR_MODE) {
23634 +- status = posix_acl_chmod(inode, inode->i_mode);
23635 ++ status = ocfs2_acl_chmod(inode, bh);
23636 + if (status < 0)
23637 + mlog_errno(status);
23638 + }
23639 + if (inode_locked)
23640 + ocfs2_inode_unlock(inode, 1);
23641 +
23642 ++ brelse(bh);
23643 + return status;
23644 + }
23645 +
23646 +diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
23647 +index 6b3e87189a64..a8f1225e6d9b 100644
23648 +--- a/fs/ocfs2/namei.c
23649 ++++ b/fs/ocfs2/namei.c
23650 +@@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir,
23651 + struct ocfs2_dir_lookup_result lookup = { NULL, };
23652 + sigset_t oldset;
23653 + int did_block_signals = 0;
23654 +- struct posix_acl *default_acl = NULL, *acl = NULL;
23655 + struct ocfs2_dentry_lock *dl = NULL;
23656 +
23657 + trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
23658 +@@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir,
23659 + goto leave;
23660 + }
23661 +
23662 +- status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
23663 +- if (status) {
23664 +- mlog_errno(status);
23665 +- goto leave;
23666 +- }
23667 +-
23668 + handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
23669 + S_ISDIR(mode),
23670 + xattr_credits));
23671 +@@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir,
23672 + inc_nlink(dir);
23673 + }
23674 +
23675 +- if (default_acl) {
23676 +- status = ocfs2_set_acl(handle, inode, new_fe_bh,
23677 +- ACL_TYPE_DEFAULT, default_acl,
23678 +- meta_ac, data_ac);
23679 +- }
23680 +- if (!status && acl) {
23681 +- status = ocfs2_set_acl(handle, inode, new_fe_bh,
23682 +- ACL_TYPE_ACCESS, acl,
23683 +- meta_ac, data_ac);
23684 +- }
23685 ++ status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
23686 ++ meta_ac, data_ac);
23687 +
23688 + if (status < 0) {
23689 + mlog_errno(status);
23690 +@@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir,
23691 + d_instantiate(dentry, inode);
23692 + status = 0;
23693 + leave:
23694 +- if (default_acl)
23695 +- posix_acl_release(default_acl);
23696 +- if (acl)
23697 +- posix_acl_release(acl);
23698 + if (status < 0 && did_quota_inode)
23699 + dquot_free_inode(inode);
23700 + if (handle)
23701 +diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
23702 +index 3eff031aaf26..9919964c5b3b 100644
23703 +--- a/fs/ocfs2/refcounttree.c
23704 ++++ b/fs/ocfs2/refcounttree.c
23705 +@@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
23706 + struct inode *inode = d_inode(old_dentry);
23707 + struct buffer_head *old_bh = NULL;
23708 + struct inode *new_orphan_inode = NULL;
23709 +- struct posix_acl *default_acl, *acl;
23710 +- umode_t mode;
23711 +
23712 + if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
23713 + return -EOPNOTSUPP;
23714 +
23715 +- mode = inode->i_mode;
23716 +- error = posix_acl_create(dir, &mode, &default_acl, &acl);
23717 +- if (error) {
23718 +- mlog_errno(error);
23719 +- return error;
23720 +- }
23721 +
23722 +- error = ocfs2_create_inode_in_orphan(dir, mode,
23723 ++ error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
23724 + &new_orphan_inode);
23725 + if (error) {
23726 + mlog_errno(error);
23727 +@@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
23728 + /* If the security isn't preserved, we need to re-initialize them. */
23729 + if (!preserve) {
23730 + error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
23731 +- &new_dentry->d_name,
23732 +- default_acl, acl);
23733 ++ &new_dentry->d_name);
23734 + if (error)
23735 + mlog_errno(error);
23736 + }
23737 + out:
23738 +- if (default_acl)
23739 +- posix_acl_release(default_acl);
23740 +- if (acl)
23741 +- posix_acl_release(acl);
23742 + if (!error) {
23743 + error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
23744 + new_dentry);
23745 +diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
23746 +index 7d3d979f57d9..f19b7381a998 100644
23747 +--- a/fs/ocfs2/xattr.c
23748 ++++ b/fs/ocfs2/xattr.c
23749 +@@ -7216,12 +7216,10 @@ out:
23750 + */
23751 + int ocfs2_init_security_and_acl(struct inode *dir,
23752 + struct inode *inode,
23753 +- const struct qstr *qstr,
23754 +- struct posix_acl *default_acl,
23755 +- struct posix_acl *acl)
23756 ++ const struct qstr *qstr)
23757 + {
23758 +- struct buffer_head *dir_bh = NULL;
23759 + int ret = 0;
23760 ++ struct buffer_head *dir_bh = NULL;
23761 +
23762 + ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
23763 + if (ret) {
23764 +@@ -7234,11 +7232,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
23765 + mlog_errno(ret);
23766 + goto leave;
23767 + }
23768 +-
23769 +- if (!ret && default_acl)
23770 +- ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
23771 +- if (!ret && acl)
23772 +- ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
23773 ++ ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
23774 ++ if (ret)
23775 ++ mlog_errno(ret);
23776 +
23777 + ocfs2_inode_unlock(dir, 0);
23778 + brelse(dir_bh);
23779 +diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
23780 +index f10d5b93c366..1633cc15ea1f 100644
23781 +--- a/fs/ocfs2/xattr.h
23782 ++++ b/fs/ocfs2/xattr.h
23783 +@@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
23784 + bool preserve_security);
23785 + int ocfs2_init_security_and_acl(struct inode *dir,
23786 + struct inode *inode,
23787 +- const struct qstr *qstr,
23788 +- struct posix_acl *default_acl,
23789 +- struct posix_acl *acl);
23790 ++ const struct qstr *qstr);
23791 + #endif /* OCFS2_XATTR_H */
23792 +diff --git a/fs/open.c b/fs/open.c
23793 +index 55bdc75e2172..081d3d6df74b 100644
23794 +--- a/fs/open.c
23795 ++++ b/fs/open.c
23796 +@@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path);
23797 + int vfs_open(const struct path *path, struct file *file,
23798 + const struct cred *cred)
23799 + {
23800 +- struct dentry *dentry = path->dentry;
23801 +- struct inode *inode = dentry->d_inode;
23802 ++ struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
23803 +
23804 +- file->f_path = *path;
23805 +- if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
23806 +- inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
23807 +- if (IS_ERR(inode))
23808 +- return PTR_ERR(inode);
23809 +- }
23810 ++ if (IS_ERR(inode))
23811 ++ return PTR_ERR(inode);
23812 +
23813 ++ file->f_path = *path;
23814 + return do_dentry_open(file, inode, NULL, cred);
23815 + }
23816 +
23817 +@@ -992,14 +988,12 @@ struct file *filp_open(const char *filename, int flags, umode_t mode)
23818 + EXPORT_SYMBOL(filp_open);
23819 +
23820 + struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
23821 +- const char *filename, int flags)
23822 ++ const char *filename, int flags, umode_t mode)
23823 + {
23824 + struct open_flags op;
23825 +- int err = build_open_flags(flags, 0, &op);
23826 ++ int err = build_open_flags(flags, mode, &op);
23827 + if (err)
23828 + return ERR_PTR(err);
23829 +- if (flags & O_CREAT)
23830 +- return ERR_PTR(-EINVAL);
23831 + return do_file_open_root(dentry, mnt, filename, &op);
23832 + }
23833 + EXPORT_SYMBOL(file_open_root);
23834 +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
23835 +index 619ad4b016d2..4399ea804447 100644
23836 +--- a/fs/overlayfs/super.c
23837 ++++ b/fs/overlayfs/super.c
23838 +@@ -295,6 +295,37 @@ static void ovl_dentry_release(struct dentry *dentry)
23839 + }
23840 + }
23841 +
23842 ++static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
23843 ++{
23844 ++ struct dentry *real;
23845 ++
23846 ++ if (d_is_dir(dentry)) {
23847 ++ if (!inode || inode == d_inode(dentry))
23848 ++ return dentry;
23849 ++ goto bug;
23850 ++ }
23851 ++
23852 ++ real = ovl_dentry_upper(dentry);
23853 ++ if (real && (!inode || inode == d_inode(real)))
23854 ++ return real;
23855 ++
23856 ++ real = ovl_dentry_lower(dentry);
23857 ++ if (!real)
23858 ++ goto bug;
23859 ++
23860 ++ if (!inode || inode == d_inode(real))
23861 ++ return real;
23862 ++
23863 ++ /* Handle recursion */
23864 ++ if (real->d_flags & DCACHE_OP_REAL)
23865 ++ return real->d_op->d_real(real, inode);
23866 ++
23867 ++bug:
23868 ++ WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
23869 ++ inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
23870 ++ return dentry;
23871 ++}
23872 ++
23873 + static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
23874 + {
23875 + struct ovl_entry *oe = dentry->d_fsdata;
23876 +@@ -339,11 +370,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
23877 + static const struct dentry_operations ovl_dentry_operations = {
23878 + .d_release = ovl_dentry_release,
23879 + .d_select_inode = ovl_d_select_inode,
23880 ++ .d_real = ovl_d_real,
23881 + };
23882 +
23883 + static const struct dentry_operations ovl_reval_dentry_operations = {
23884 + .d_release = ovl_dentry_release,
23885 + .d_select_inode = ovl_d_select_inode,
23886 ++ .d_real = ovl_d_real,
23887 + .d_revalidate = ovl_dentry_revalidate,
23888 + .d_weak_revalidate = ovl_dentry_weak_revalidate,
23889 + };
23890 +diff --git a/fs/pnode.c b/fs/pnode.c
23891 +index c524fdddc7fb..99899705b105 100644
23892 +--- a/fs/pnode.c
23893 ++++ b/fs/pnode.c
23894 +@@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
23895 +
23896 + /* all accesses are serialized by namespace_sem */
23897 + static struct user_namespace *user_ns;
23898 +-static struct mount *last_dest, *last_source, *dest_master;
23899 ++static struct mount *last_dest, *first_source, *last_source, *dest_master;
23900 + static struct mountpoint *mp;
23901 + static struct hlist_head *list;
23902 +
23903 +@@ -221,20 +221,22 @@ static int propagate_one(struct mount *m)
23904 + type = CL_MAKE_SHARED;
23905 + } else {
23906 + struct mount *n, *p;
23907 ++ bool done;
23908 + for (n = m; ; n = p) {
23909 + p = n->mnt_master;
23910 +- if (p == dest_master || IS_MNT_MARKED(p)) {
23911 +- while (last_dest->mnt_master != p) {
23912 +- last_source = last_source->mnt_master;
23913 +- last_dest = last_source->mnt_parent;
23914 +- }
23915 +- if (!peers(n, last_dest)) {
23916 +- last_source = last_source->mnt_master;
23917 +- last_dest = last_source->mnt_parent;
23918 +- }
23919 ++ if (p == dest_master || IS_MNT_MARKED(p))
23920 + break;
23921 +- }
23922 + }
23923 ++ do {
23924 ++ struct mount *parent = last_source->mnt_parent;
23925 ++ if (last_source == first_source)
23926 ++ break;
23927 ++ done = parent->mnt_master == p;
23928 ++ if (done && peers(n, parent))
23929 ++ break;
23930 ++ last_source = last_source->mnt_master;
23931 ++ } while (!done);
23932 ++
23933 + type = CL_SLAVE;
23934 + /* beginning of peer group among the slaves? */
23935 + if (IS_MNT_SHARED(m))
23936 +@@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
23937 + */
23938 + user_ns = current->nsproxy->mnt_ns->user_ns;
23939 + last_dest = dest_mnt;
23940 ++ first_source = source_mnt;
23941 + last_source = source_mnt;
23942 + mp = dest_mp;
23943 + list = tree_list;
23944 +diff --git a/fs/proc/base.c b/fs/proc/base.c
23945 +index 4f764c2ac1a5..45f2162e55b2 100644
23946 +--- a/fs/proc/base.c
23947 ++++ b/fs/proc/base.c
23948 +@@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
23949 + struct mm_struct *mm = file->private_data;
23950 + unsigned long env_start, env_end;
23951 +
23952 +- if (!mm)
23953 ++ /* Ensure the process spawned far enough to have an environment. */
23954 ++ if (!mm || !mm->env_end)
23955 + return 0;
23956 +
23957 + page = (char *)__get_free_page(GFP_TEMPORARY);
23958 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
23959 +index fa95ab2d3674..9d2f3e0a6360 100644
23960 +--- a/fs/proc/task_mmu.c
23961 ++++ b/fs/proc/task_mmu.c
23962 +@@ -1504,6 +1504,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
23963 + return page;
23964 + }
23965 +
23966 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
23967 ++static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
23968 ++ struct vm_area_struct *vma,
23969 ++ unsigned long addr)
23970 ++{
23971 ++ struct page *page;
23972 ++ int nid;
23973 ++
23974 ++ if (!pmd_present(pmd))
23975 ++ return NULL;
23976 ++
23977 ++ page = vm_normal_page_pmd(vma, addr, pmd);
23978 ++ if (!page)
23979 ++ return NULL;
23980 ++
23981 ++ if (PageReserved(page))
23982 ++ return NULL;
23983 ++
23984 ++ nid = page_to_nid(page);
23985 ++ if (!node_isset(nid, node_states[N_MEMORY]))
23986 ++ return NULL;
23987 ++
23988 ++ return page;
23989 ++}
23990 ++#endif
23991 ++
23992 + static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
23993 + unsigned long end, struct mm_walk *walk)
23994 + {
23995 +@@ -1513,14 +1539,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
23996 + pte_t *orig_pte;
23997 + pte_t *pte;
23998 +
23999 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
24000 + ptl = pmd_trans_huge_lock(pmd, vma);
24001 + if (ptl) {
24002 +- pte_t huge_pte = *(pte_t *)pmd;
24003 + struct page *page;
24004 +
24005 +- page = can_gather_numa_stats(huge_pte, vma, addr);
24006 ++ page = can_gather_numa_stats_pmd(*pmd, vma, addr);
24007 + if (page)
24008 +- gather_stats(page, md, pte_dirty(huge_pte),
24009 ++ gather_stats(page, md, pmd_dirty(*pmd),
24010 + HPAGE_PMD_SIZE/PAGE_SIZE);
24011 + spin_unlock(ptl);
24012 + return 0;
24013 +@@ -1528,6 +1554,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
24014 +
24015 + if (pmd_trans_unstable(pmd))
24016 + return 0;
24017 ++#endif
24018 + orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
24019 + do {
24020 + struct page *page = can_gather_numa_stats(*pte, vma, addr);
24021 +diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
24022 +index 2256e7e23e67..3f1190d18991 100644
24023 +--- a/fs/proc_namespace.c
24024 ++++ b/fs/proc_namespace.c
24025 +@@ -199,6 +199,8 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
24026 + if (sb->s_op->show_devname) {
24027 + seq_puts(m, "device ");
24028 + err = sb->s_op->show_devname(m, mnt_path.dentry);
24029 ++ if (err)
24030 ++ goto out;
24031 + } else {
24032 + if (r->mnt_devname) {
24033 + seq_puts(m, "device ");
24034 +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
24035 +index 3c3b81bb6dfe..850d17fa0aa3 100644
24036 +--- a/fs/quota/dquot.c
24037 ++++ b/fs/quota/dquot.c
24038 +@@ -1398,7 +1398,7 @@ static int dquot_active(const struct inode *inode)
24039 + static int __dquot_initialize(struct inode *inode, int type)
24040 + {
24041 + int cnt, init_needed = 0;
24042 +- struct dquot **dquots, *got[MAXQUOTAS];
24043 ++ struct dquot **dquots, *got[MAXQUOTAS] = {};
24044 + struct super_block *sb = inode->i_sb;
24045 + qsize_t rsv;
24046 + int ret = 0;
24047 +@@ -1415,7 +1415,6 @@ static int __dquot_initialize(struct inode *inode, int type)
24048 + int rc;
24049 + struct dquot *dquot;
24050 +
24051 +- got[cnt] = NULL;
24052 + if (type != -1 && cnt != type)
24053 + continue;
24054 + /*
24055 +diff --git a/fs/splice.c b/fs/splice.c
24056 +index 82bc0d64fc38..19e0b103d253 100644
24057 +--- a/fs/splice.c
24058 ++++ b/fs/splice.c
24059 +@@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
24060 + unsigned int spd_pages = spd->nr_pages;
24061 + int ret, do_wakeup, page_nr;
24062 +
24063 ++ if (!spd_pages)
24064 ++ return 0;
24065 ++
24066 + ret = 0;
24067 + do_wakeup = 0;
24068 + page_nr = 0;
24069 +diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
24070 +index 0ef7c2ed3f8a..4fa14820e2e2 100644
24071 +--- a/fs/xfs/xfs_attr_list.c
24072 ++++ b/fs/xfs/xfs_attr_list.c
24073 +@@ -202,8 +202,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
24074 + sbp->namelen,
24075 + sbp->valuelen,
24076 + &sbp->name[sbp->namelen]);
24077 +- if (error)
24078 ++ if (error) {
24079 ++ kmem_free(sbuf);
24080 + return error;
24081 ++ }
24082 + if (context->seen_enough)
24083 + break;
24084 + cursor->offset++;
24085 +@@ -454,14 +456,13 @@ xfs_attr3_leaf_list_int(
24086 + args.rmtblkcnt = xfs_attr3_rmt_blocks(
24087 + args.dp->i_mount, valuelen);
24088 + retval = xfs_attr_rmtval_get(&args);
24089 +- if (retval)
24090 +- return retval;
24091 +- retval = context->put_listent(context,
24092 +- entry->flags,
24093 +- name_rmt->name,
24094 +- (int)name_rmt->namelen,
24095 +- valuelen,
24096 +- args.value);
24097 ++ if (!retval)
24098 ++ retval = context->put_listent(context,
24099 ++ entry->flags,
24100 ++ name_rmt->name,
24101 ++ (int)name_rmt->namelen,
24102 ++ valuelen,
24103 ++ args.value);
24104 + kmem_free(args.value);
24105 + } else {
24106 + retval = context->put_listent(context,
24107 +diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
24108 +index c30266e94806..8ef0ccbf8167 100644
24109 +--- a/include/asm-generic/bitops/lock.h
24110 ++++ b/include/asm-generic/bitops/lock.h
24111 +@@ -29,16 +29,16 @@ do { \
24112 + * @nr: the bit to set
24113 + * @addr: the address to start counting from
24114 + *
24115 +- * This operation is like clear_bit_unlock, however it is not atomic.
24116 +- * It does provide release barrier semantics so it can be used to unlock
24117 +- * a bit lock, however it would only be used if no other CPU can modify
24118 +- * any bits in the memory until the lock is released (a good example is
24119 +- * if the bit lock itself protects access to the other bits in the word).
24120 ++ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
24121 ++ * the bits in the word are protected by this lock some archs can use weaker
24122 ++ * ops to safely unlock.
24123 ++ *
24124 ++ * See for example x86's implementation.
24125 + */
24126 + #define __clear_bit_unlock(nr, addr) \
24127 + do { \
24128 +- smp_mb(); \
24129 +- __clear_bit(nr, addr); \
24130 ++ smp_mb__before_atomic(); \
24131 ++ clear_bit(nr, addr); \
24132 + } while (0)
24133 +
24134 + #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
24135 +diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
24136 +index e56272c919b5..bf2d34c9d804 100644
24137 +--- a/include/asm-generic/futex.h
24138 ++++ b/include/asm-generic/futex.h
24139 +@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
24140 + u32 val;
24141 +
24142 + preempt_disable();
24143 +- if (unlikely(get_user(val, uaddr) != 0))
24144 ++ if (unlikely(get_user(val, uaddr) != 0)) {
24145 ++ preempt_enable();
24146 + return -EFAULT;
24147 ++ }
24148 +
24149 +- if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
24150 ++ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
24151 ++ preempt_enable();
24152 + return -EFAULT;
24153 ++ }
24154 +
24155 + *uval = val;
24156 + preempt_enable();
24157 +diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
24158 +index 39e1cb201b8e..332da3ad8eb5 100644
24159 +--- a/include/asm-generic/qspinlock.h
24160 ++++ b/include/asm-generic/qspinlock.h
24161 +@@ -28,7 +28,30 @@
24162 + */
24163 + static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
24164 + {
24165 +- return atomic_read(&lock->val);
24166 ++ /*
24167 ++ * queued_spin_lock_slowpath() can ACQUIRE the lock before
24168 ++ * issuing the unordered store that sets _Q_LOCKED_VAL.
24169 ++ *
24170 ++ * See both smp_cond_acquire() sites for more detail.
24171 ++ *
24172 ++ * This however means that in code like:
24173 ++ *
24174 ++ * spin_lock(A) spin_lock(B)
24175 ++ * spin_unlock_wait(B) spin_is_locked(A)
24176 ++ * do_something() do_something()
24177 ++ *
24178 ++ * Both CPUs can end up running do_something() because the store
24179 ++ * setting _Q_LOCKED_VAL will pass through the loads in
24180 ++ * spin_unlock_wait() and/or spin_is_locked().
24181 ++ *
24182 ++ * Avoid this by issuing a full memory barrier between the spin_lock()
24183 ++ * and the loads in spin_unlock_wait() and spin_is_locked().
24184 ++ *
24185 ++ * Note that regular mutual exclusion doesn't care about this
24186 ++ * delayed store.
24187 ++ */
24188 ++ smp_mb();
24189 ++ return atomic_read(&lock->val) & _Q_LOCKED_MASK;
24190 + }
24191 +
24192 + /**
24193 +@@ -108,6 +131,8 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
24194 + */
24195 + static inline void queued_spin_unlock_wait(struct qspinlock *lock)
24196 + {
24197 ++ /* See queued_spin_is_locked() */
24198 ++ smp_mb();
24199 + while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
24200 + cpu_relax();
24201 + }
24202 +diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
24203 +index 3d1a3af5cf59..a2508a8f9a9c 100644
24204 +--- a/include/asm-generic/siginfo.h
24205 ++++ b/include/asm-generic/siginfo.h
24206 +@@ -17,21 +17,6 @@
24207 + struct siginfo;
24208 + void do_schedule_next_timer(struct siginfo *info);
24209 +
24210 +-#ifndef HAVE_ARCH_COPY_SIGINFO
24211 +-
24212 +-#include <linux/string.h>
24213 +-
24214 +-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
24215 +-{
24216 +- if (from->si_code < 0)
24217 +- memcpy(to, from, sizeof(*to));
24218 +- else
24219 +- /* _sigchld is currently the largest know union member */
24220 +- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
24221 +-}
24222 +-
24223 +-#endif
24224 +-
24225 + extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
24226 +
24227 + #endif
24228 +diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
24229 +index 461a0558bca4..cebecff536a3 100644
24230 +--- a/include/drm/drm_cache.h
24231 ++++ b/include/drm/drm_cache.h
24232 +@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
24233 + {
24234 + #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
24235 + return false;
24236 ++#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
24237 ++ return false;
24238 + #else
24239 + return true;
24240 + #endif
24241 +diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
24242 +index 42cf2d991bf4..4ea7e55f20b0 100644
24243 +--- a/include/keys/trusted-type.h
24244 ++++ b/include/keys/trusted-type.h
24245 +@@ -38,7 +38,7 @@ struct trusted_key_options {
24246 + unsigned char pcrinfo[MAX_PCRINFO_SIZE];
24247 + int pcrlock;
24248 + uint32_t hash;
24249 +- uint32_t digest_len;
24250 ++ uint32_t policydigest_len;
24251 + unsigned char policydigest[MAX_DIGEST_SIZE];
24252 + uint32_t policyhandle;
24253 + };
24254 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
24255 +index 83d1926c61e4..67bc2da5d233 100644
24256 +--- a/include/linux/bpf.h
24257 ++++ b/include/linux/bpf.h
24258 +@@ -165,12 +165,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
24259 + void bpf_register_map_type(struct bpf_map_type_list *tl);
24260 +
24261 + struct bpf_prog *bpf_prog_get(u32 ufd);
24262 ++struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
24263 + void bpf_prog_put(struct bpf_prog *prog);
24264 + void bpf_prog_put_rcu(struct bpf_prog *prog);
24265 +
24266 + struct bpf_map *bpf_map_get_with_uref(u32 ufd);
24267 + struct bpf_map *__bpf_map_get(struct fd f);
24268 +-void bpf_map_inc(struct bpf_map *map, bool uref);
24269 ++struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
24270 + void bpf_map_put_with_uref(struct bpf_map *map);
24271 + void bpf_map_put(struct bpf_map *map);
24272 +
24273 +diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
24274 +index 735f9f8c4e43..5261751f6bd4 100644
24275 +--- a/include/linux/can/dev.h
24276 ++++ b/include/linux/can/dev.h
24277 +@@ -40,8 +40,11 @@ struct can_priv {
24278 + struct can_clock clock;
24279 +
24280 + enum can_state state;
24281 +- u32 ctrlmode;
24282 +- u32 ctrlmode_supported;
24283 ++
24284 ++ /* CAN controller features - see include/uapi/linux/can/netlink.h */
24285 ++ u32 ctrlmode; /* current options setting */
24286 ++ u32 ctrlmode_supported; /* options that can be modified by netlink */
24287 ++ u32 ctrlmode_static; /* static enabled options for driver/hardware */
24288 +
24289 + int restart_ms;
24290 + struct timer_list restart_timer;
24291 +@@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
24292 + return skb->len == CANFD_MTU;
24293 + }
24294 +
24295 ++/* helper to define static CAN controller features at device creation time */
24296 ++static inline void can_set_static_ctrlmode(struct net_device *dev,
24297 ++ u32 static_mode)
24298 ++{
24299 ++ struct can_priv *priv = netdev_priv(dev);
24300 ++
24301 ++ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
24302 ++ priv->ctrlmode = static_mode;
24303 ++ priv->ctrlmode_static = static_mode;
24304 ++
24305 ++ /* override MTU which was set by default in can_setup()? */
24306 ++ if (static_mode & CAN_CTRLMODE_FD)
24307 ++ dev->mtu = CANFD_MTU;
24308 ++}
24309 ++
24310 + /* get data length from can_dlc with sanitized can_dlc */
24311 + u8 can_dlc2len(u8 can_dlc);
24312 +
24313 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
24314 +index 789471dba6fb..7fc7cb7872e3 100644
24315 +--- a/include/linux/cgroup-defs.h
24316 ++++ b/include/linux/cgroup-defs.h
24317 +@@ -210,6 +210,9 @@ struct css_set {
24318 + /* all css_task_iters currently walking this cset */
24319 + struct list_head task_iters;
24320 +
24321 ++ /* dead and being drained, ignore for migration */
24322 ++ bool dead;
24323 ++
24324 + /* For RCU-protected deletion */
24325 + struct rcu_head rcu_head;
24326 + };
24327 +@@ -439,6 +442,7 @@ struct cgroup_subsys {
24328 + int (*can_attach)(struct cgroup_taskset *tset);
24329 + void (*cancel_attach)(struct cgroup_taskset *tset);
24330 + void (*attach)(struct cgroup_taskset *tset);
24331 ++ void (*post_attach)(void);
24332 + int (*can_fork)(struct task_struct *task);
24333 + void (*cancel_fork)(struct task_struct *task);
24334 + void (*fork)(struct task_struct *task);
24335 +diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
24336 +index 1143e38555a4..408a60dca353 100644
24337 +--- a/include/linux/clk-provider.h
24338 ++++ b/include/linux/clk-provider.h
24339 +@@ -385,6 +385,7 @@ struct clk_divider {
24340 + #define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
24341 +
24342 + extern const struct clk_ops clk_divider_ops;
24343 ++extern const struct clk_ops clk_divider_ro_ops;
24344 +
24345 + unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
24346 + unsigned int val, const struct clk_div_table *table,
24347 +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
24348 +index 22ab246feed3..eeae401a2412 100644
24349 +--- a/include/linux/compiler-gcc.h
24350 ++++ b/include/linux/compiler-gcc.h
24351 +@@ -199,7 +199,7 @@
24352 + #define unreachable() __builtin_unreachable()
24353 +
24354 + /* Mark a function definition as prohibited from being cloned. */
24355 +-#define __noclone __attribute__((__noclone__))
24356 ++#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
24357 +
24358 + #endif /* GCC_VERSION >= 40500 */
24359 +
24360 +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
24361 +index fea160ee5803..85a868ccb493 100644
24362 +--- a/include/linux/cpuset.h
24363 ++++ b/include/linux/cpuset.h
24364 +@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
24365 + task_unlock(current);
24366 + }
24367 +
24368 +-extern void cpuset_post_attach_flush(void);
24369 +-
24370 + #else /* !CONFIG_CPUSETS */
24371 +
24372 + static inline bool cpusets_enabled(void) { return false; }
24373 +@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
24374 + return false;
24375 + }
24376 +
24377 +-static inline void cpuset_post_attach_flush(void)
24378 +-{
24379 +-}
24380 +-
24381 + #endif /* !CONFIG_CPUSETS */
24382 +
24383 + #endif /* _LINUX_CPUSET_H */
24384 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
24385 +index c4b5f4b3f8f8..96c1a2da92d7 100644
24386 +--- a/include/linux/dcache.h
24387 ++++ b/include/linux/dcache.h
24388 +@@ -161,6 +161,7 @@ struct dentry_operations {
24389 + struct vfsmount *(*d_automount)(struct path *);
24390 + int (*d_manage)(struct dentry *, bool);
24391 + struct inode *(*d_select_inode)(struct dentry *, unsigned);
24392 ++ struct dentry *(*d_real)(struct dentry *, struct inode *);
24393 + } ____cacheline_aligned;
24394 +
24395 + /*
24396 +@@ -227,6 +228,7 @@ struct dentry_operations {
24397 + #define DCACHE_MAY_FREE 0x00800000
24398 + #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
24399 + #define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
24400 ++#define DCACHE_OP_REAL 0x08000000
24401 +
24402 + extern seqlock_t rename_lock;
24403 +
24404 +@@ -582,4 +584,24 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
24405 + return upper;
24406 + }
24407 +
24408 ++static inline struct dentry *d_real(struct dentry *dentry)
24409 ++{
24410 ++ if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
24411 ++ return dentry->d_op->d_real(dentry, NULL);
24412 ++ else
24413 ++ return dentry;
24414 ++}
24415 ++
24416 ++static inline struct inode *vfs_select_inode(struct dentry *dentry,
24417 ++ unsigned open_flags)
24418 ++{
24419 ++ struct inode *inode = d_inode(dentry);
24420 ++
24421 ++ if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
24422 ++ inode = dentry->d_op->d_select_inode(dentry, open_flags);
24423 ++
24424 ++ return inode;
24425 ++}
24426 ++
24427 ++
24428 + #endif /* __LINUX_DCACHE_H */
24429 +diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
24430 +index ec1c61c87d89..899ab9f8549e 100644
24431 +--- a/include/linux/device-mapper.h
24432 ++++ b/include/linux/device-mapper.h
24433 +@@ -124,6 +124,8 @@ struct dm_dev {
24434 + char name[16];
24435 + };
24436 +
24437 ++dev_t dm_get_dev_t(const char *path);
24438 ++
24439 + /*
24440 + * Constructors should call these functions to ensure destination devices
24441 + * are opened/closed correctly.
24442 +diff --git a/include/linux/filter.h b/include/linux/filter.h
24443 +index 43aa1f8855c7..a51a5361695f 100644
24444 +--- a/include/linux/filter.h
24445 ++++ b/include/linux/filter.h
24446 +@@ -465,10 +465,14 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
24447 + void bpf_prog_destroy(struct bpf_prog *fp);
24448 +
24449 + int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
24450 ++int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
24451 ++ bool locked);
24452 + int sk_attach_bpf(u32 ufd, struct sock *sk);
24453 + int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
24454 + int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
24455 + int sk_detach_filter(struct sock *sk);
24456 ++int __sk_detach_filter(struct sock *sk, bool locked);
24457 ++
24458 + int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
24459 + unsigned int len);
24460 +
24461 +diff --git a/include/linux/fs.h b/include/linux/fs.h
24462 +index ae681002100a..83c77b093ce2 100644
24463 +--- a/include/linux/fs.h
24464 ++++ b/include/linux/fs.h
24465 +@@ -1234,6 +1234,16 @@ static inline struct inode *file_inode(const struct file *f)
24466 + return f->f_inode;
24467 + }
24468 +
24469 ++static inline struct dentry *file_dentry(const struct file *file)
24470 ++{
24471 ++ struct dentry *dentry = file->f_path.dentry;
24472 ++
24473 ++ if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
24474 ++ return dentry->d_op->d_real(dentry, file_inode(file));
24475 ++ else
24476 ++ return dentry;
24477 ++}
24478 ++
24479 + static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
24480 + {
24481 + return locks_lock_inode_wait(file_inode(filp), fl);
24482 +@@ -2259,7 +2269,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
24483 + extern struct file *file_open_name(struct filename *, int, umode_t);
24484 + extern struct file *filp_open(const char *, int, umode_t);
24485 + extern struct file *file_open_root(struct dentry *, struct vfsmount *,
24486 +- const char *, int);
24487 ++ const char *, int, umode_t);
24488 + extern struct file * dentry_open(const struct path *, int, const struct cred *);
24489 + extern int filp_close(struct file *, fl_owner_t id);
24490 +
24491 +diff --git a/include/linux/hash.h b/include/linux/hash.h
24492 +index 1afde47e1528..79c52fa81cac 100644
24493 +--- a/include/linux/hash.h
24494 ++++ b/include/linux/hash.h
24495 +@@ -32,12 +32,28 @@
24496 + #error Wordsize not 32 or 64
24497 + #endif
24498 +
24499 ++/*
24500 ++ * The above primes are actively bad for hashing, since they are
24501 ++ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
24502 ++ * real problems. Besides, the "prime" part is pointless for the
24503 ++ * multiplicative hash.
24504 ++ *
24505 ++ * Although a random odd number will do, it turns out that the golden
24506 ++ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
24507 ++ * properties.
24508 ++ *
24509 ++ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
24510 ++ * (See Knuth vol 3, section 6.4, exercise 9.)
24511 ++ */
24512 ++#define GOLDEN_RATIO_32 0x61C88647
24513 ++#define GOLDEN_RATIO_64 0x61C8864680B583EBull
24514 ++
24515 + static __always_inline u64 hash_64(u64 val, unsigned int bits)
24516 + {
24517 + u64 hash = val;
24518 +
24519 +-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
24520 +- hash = hash * GOLDEN_RATIO_PRIME_64;
24521 ++#if BITS_PER_LONG == 64
24522 ++ hash = hash * GOLDEN_RATIO_64;
24523 + #else
24524 + /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
24525 + u64 n = hash;
24526 +diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
24527 +index a338a688ee4a..dcb89e3515db 100644
24528 +--- a/include/linux/if_bridge.h
24529 ++++ b/include/linux/if_bridge.h
24530 +@@ -46,10 +46,6 @@ struct br_ip_list {
24531 + #define BR_LEARNING_SYNC BIT(9)
24532 + #define BR_PROXYARP_WIFI BIT(10)
24533 +
24534 +-/* values as per ieee8021QBridgeFdbAgingTime */
24535 +-#define BR_MIN_AGEING_TIME (10 * HZ)
24536 +-#define BR_MAX_AGEING_TIME (1000000 * HZ)
24537 +-
24538 + #define BR_DEFAULT_AGEING_TIME (300 * HZ)
24539 +
24540 + extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
24541 +diff --git a/include/linux/kernel.h b/include/linux/kernel.h
24542 +index f31638c6e873..95452f72349a 100644
24543 +--- a/include/linux/kernel.h
24544 ++++ b/include/linux/kernel.h
24545 +@@ -635,7 +635,7 @@ do { \
24546 +
24547 + #define do_trace_printk(fmt, args...) \
24548 + do { \
24549 +- static const char *trace_printk_fmt \
24550 ++ static const char *trace_printk_fmt __used \
24551 + __attribute__((section("__trace_printk_fmt"))) = \
24552 + __builtin_constant_p(fmt) ? fmt : NULL; \
24553 + \
24554 +@@ -679,7 +679,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
24555 + */
24556 +
24557 + #define trace_puts(str) ({ \
24558 +- static const char *trace_printk_fmt \
24559 ++ static const char *trace_printk_fmt __used \
24560 + __attribute__((section("__trace_printk_fmt"))) = \
24561 + __builtin_constant_p(str) ? str : NULL; \
24562 + \
24563 +@@ -701,7 +701,7 @@ extern void trace_dump_stack(int skip);
24564 + #define ftrace_vprintk(fmt, vargs) \
24565 + do { \
24566 + if (__builtin_constant_p(fmt)) { \
24567 +- static const char *trace_printk_fmt \
24568 ++ static const char *trace_printk_fmt __used \
24569 + __attribute__((section("__trace_printk_fmt"))) = \
24570 + __builtin_constant_p(fmt) ? fmt : NULL; \
24571 + \
24572 +diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
24573 +index b288965e8101..2c14eeca46f0 100644
24574 +--- a/include/linux/mfd/samsung/s2mps11.h
24575 ++++ b/include/linux/mfd/samsung/s2mps11.h
24576 +@@ -173,10 +173,12 @@ enum s2mps11_regulators {
24577 +
24578 + #define S2MPS11_LDO_VSEL_MASK 0x3F
24579 + #define S2MPS11_BUCK_VSEL_MASK 0xFF
24580 ++#define S2MPS11_BUCK9_VSEL_MASK 0x1F
24581 + #define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
24582 + #define S2MPS11_ENABLE_SHIFT 0x06
24583 + #define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
24584 + #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
24585 ++#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
24586 + #define S2MPS11_RAMP_DELAY 25000 /* uV/us */
24587 +
24588 + #define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4)
24589 +diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
24590 +index 987764afa65c..f8b83792939b 100644
24591 +--- a/include/linux/mlx5/device.h
24592 ++++ b/include/linux/mlx5/device.h
24593 +@@ -363,6 +363,17 @@ enum {
24594 + MLX5_CAP_OFF_CMDIF_CSUM = 46,
24595 + };
24596 +
24597 ++enum {
24598 ++ /*
24599 ++ * Max wqe size for rdma read is 512 bytes, so this
24600 ++ * limits our max_sge_rd as the wqe needs to fit:
24601 ++ * - ctrl segment (16 bytes)
24602 ++ * - rdma segment (16 bytes)
24603 ++ * - scatter elements (16 bytes each)
24604 ++ */
24605 ++ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
24606 ++};
24607 ++
24608 + struct mlx5_inbox_hdr {
24609 + __be16 opcode;
24610 + u8 rsvd[4];
24611 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
24612 +index 1e3006dcf35d..46dd88e7071b 100644
24613 +--- a/include/linux/mlx5/driver.h
24614 ++++ b/include/linux/mlx5/driver.h
24615 +@@ -813,9 +813,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
24616 + int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
24617 + enum mlx5_port_status *status);
24618 +
24619 +-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
24620 +-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
24621 +-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
24622 ++int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
24623 ++void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
24624 ++void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
24625 + u8 port);
24626 +
24627 + int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
24628 +diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
24629 +index 123771003e68..a3f3c71124d3 100644
24630 +--- a/include/linux/mlx5/vport.h
24631 ++++ b/include/linux/mlx5/vport.h
24632 +@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
24633 + u16 vport, u8 *addr);
24634 + int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
24635 + u16 vport, u8 *addr);
24636 ++int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
24637 ++int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
24638 + int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
24639 + u64 *system_image_guid);
24640 + int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
24641 +diff --git a/include/linux/mm.h b/include/linux/mm.h
24642 +index 516e14944339..e4e59f9d82f1 100644
24643 +--- a/include/linux/mm.h
24644 ++++ b/include/linux/mm.h
24645 +@@ -456,11 +456,20 @@ static inline int page_mapcount(struct page *page)
24646 +
24647 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
24648 + int total_mapcount(struct page *page);
24649 ++int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
24650 + #else
24651 + static inline int total_mapcount(struct page *page)
24652 + {
24653 + return page_mapcount(page);
24654 + }
24655 ++static inline int page_trans_huge_mapcount(struct page *page,
24656 ++ int *total_mapcount)
24657 ++{
24658 ++ int mapcount = page_mapcount(page);
24659 ++ if (total_mapcount)
24660 ++ *total_mapcount = mapcount;
24661 ++ return mapcount;
24662 ++}
24663 + #endif
24664 +
24665 + static inline int page_count(struct page *page)
24666 +@@ -1010,6 +1019,8 @@ static inline bool page_mapped(struct page *page)
24667 + page = compound_head(page);
24668 + if (atomic_read(compound_mapcount_ptr(page)) >= 0)
24669 + return true;
24670 ++ if (PageHuge(page))
24671 ++ return false;
24672 + for (i = 0; i < hpage_nr_pages(page); i++) {
24673 + if (atomic_read(&page[i]._mapcount) >= 0)
24674 + return true;
24675 +@@ -1117,6 +1128,8 @@ struct zap_details {
24676 +
24677 + struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
24678 + pte_t pte);
24679 ++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
24680 ++ pmd_t pmd);
24681 +
24682 + int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
24683 + unsigned long size);
24684 +diff --git a/include/linux/net.h b/include/linux/net.h
24685 +index 0b4ac7da583a..25ef630f1bd6 100644
24686 +--- a/include/linux/net.h
24687 ++++ b/include/linux/net.h
24688 +@@ -245,7 +245,15 @@ do { \
24689 + net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
24690 + #define net_info_ratelimited(fmt, ...) \
24691 + net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
24692 +-#if defined(DEBUG)
24693 ++#if defined(CONFIG_DYNAMIC_DEBUG)
24694 ++#define net_dbg_ratelimited(fmt, ...) \
24695 ++do { \
24696 ++ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
24697 ++ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
24698 ++ net_ratelimit()) \
24699 ++ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
24700 ++} while (0)
24701 ++#elif defined(DEBUG)
24702 + #define net_dbg_ratelimited(fmt, ...) \
24703 + net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
24704 + #else
24705 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
24706 +index 5440b7b705eb..6d1d8f4f759b 100644
24707 +--- a/include/linux/netdevice.h
24708 ++++ b/include/linux/netdevice.h
24709 +@@ -267,6 +267,7 @@ struct header_ops {
24710 + void (*cache_update)(struct hh_cache *hh,
24711 + const struct net_device *dev,
24712 + const unsigned char *haddr);
24713 ++ bool (*validate)(const char *ll_header, unsigned int len);
24714 + };
24715 +
24716 + /* These flag bits are private to the generic network queueing
24717 +@@ -1420,8 +1421,7 @@ enum netdev_priv_flags {
24718 + * @dma: DMA channel
24719 + * @mtu: Interface MTU value
24720 + * @type: Interface hardware type
24721 +- * @hard_header_len: Hardware header length, which means that this is the
24722 +- * minimum size of a packet.
24723 ++ * @hard_header_len: Maximum hardware header length.
24724 + *
24725 + * @needed_headroom: Extra headroom the hardware may need, but not in all
24726 + * cases can this be guaranteed
24727 +@@ -2627,6 +2627,24 @@ static inline int dev_parse_header(const struct sk_buff *skb,
24728 + return dev->header_ops->parse(skb, haddr);
24729 + }
24730 +
24731 ++/* ll_header must have at least hard_header_len allocated */
24732 ++static inline bool dev_validate_header(const struct net_device *dev,
24733 ++ char *ll_header, int len)
24734 ++{
24735 ++ if (likely(len >= dev->hard_header_len))
24736 ++ return true;
24737 ++
24738 ++ if (capable(CAP_SYS_RAWIO)) {
24739 ++ memset(ll_header + len, 0, dev->hard_header_len - len);
24740 ++ return true;
24741 ++ }
24742 ++
24743 ++ if (dev->header_ops && dev->header_ops->validate)
24744 ++ return dev->header_ops->validate(ll_header, len);
24745 ++
24746 ++ return false;
24747 ++}
24748 ++
24749 + typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
24750 + int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
24751 + static inline int unregister_gifconf(unsigned int family)
24752 +diff --git a/include/linux/pci.h b/include/linux/pci.h
24753 +index 27716254dcc5..60042ab5d7bd 100644
24754 +--- a/include/linux/pci.h
24755 ++++ b/include/linux/pci.h
24756 +@@ -359,6 +359,7 @@ struct pci_dev {
24757 + unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
24758 + unsigned int irq_managed:1;
24759 + unsigned int has_secondary_link:1;
24760 ++ unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
24761 + pci_dev_flags_t dev_flags;
24762 + atomic_t enable_cnt; /* pci_enable_device has been called */
24763 +
24764 +diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
24765 +index 2a330ec9e2af..d1397c8ed94e 100644
24766 +--- a/include/linux/platform_data/mmp_dma.h
24767 ++++ b/include/linux/platform_data/mmp_dma.h
24768 +@@ -14,6 +14,7 @@
24769 +
24770 + struct mmp_dma_platdata {
24771 + int dma_channels;
24772 ++ int nb_requestors;
24773 + };
24774 +
24775 + #endif /* MMP_DMA_H */
24776 +diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
24777 +index 1c33dd7da4a7..4ae95f7e8597 100644
24778 +--- a/include/linux/rculist_nulls.h
24779 ++++ b/include/linux/rculist_nulls.h
24780 +@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
24781 + if (!is_a_nulls(first))
24782 + first->pprev = &n->next;
24783 + }
24784 ++
24785 ++/**
24786 ++ * hlist_nulls_add_tail_rcu
24787 ++ * @n: the element to add to the hash list.
24788 ++ * @h: the list to add to.
24789 ++ *
24790 ++ * Description:
24791 ++ * Adds the specified element to the end of the specified hlist_nulls,
24792 ++ * while permitting racing traversals. NOTE: tail insertion requires
24793 ++ * list traversal.
24794 ++ *
24795 ++ * The caller must take whatever precautions are necessary
24796 ++ * (such as holding appropriate locks) to avoid racing
24797 ++ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
24798 ++ * or hlist_nulls_del_rcu(), running on this same list.
24799 ++ * However, it is perfectly legal to run concurrently with
24800 ++ * the _rcu list-traversal primitives, such as
24801 ++ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
24802 ++ * problems on Alpha CPUs. Regardless of the type of CPU, the
24803 ++ * list-traversal primitive must be guarded by rcu_read_lock().
24804 ++ */
24805 ++static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
24806 ++ struct hlist_nulls_head *h)
24807 ++{
24808 ++ struct hlist_nulls_node *i, *last = NULL;
24809 ++
24810 ++ for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
24811 ++ i = hlist_nulls_next_rcu(i))
24812 ++ last = i;
24813 ++
24814 ++ if (last) {
24815 ++ n->next = last->next;
24816 ++ n->pprev = &last->next;
24817 ++ rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
24818 ++ } else {
24819 ++ hlist_nulls_add_head_rcu(n, h);
24820 ++ }
24821 ++}
24822 ++
24823 + /**
24824 + * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
24825 + * @tpos: the type * to use as a loop cursor.
24826 +diff --git a/include/linux/signal.h b/include/linux/signal.h
24827 +index 92557bbce7e7..d80259afb9e5 100644
24828 +--- a/include/linux/signal.h
24829 ++++ b/include/linux/signal.h
24830 +@@ -28,6 +28,21 @@ struct sigpending {
24831 + sigset_t signal;
24832 + };
24833 +
24834 ++#ifndef HAVE_ARCH_COPY_SIGINFO
24835 ++
24836 ++#include <linux/string.h>
24837 ++
24838 ++static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
24839 ++{
24840 ++ if (from->si_code < 0)
24841 ++ memcpy(to, from, sizeof(*to));
24842 ++ else
24843 ++ /* _sigchld is currently the largest know union member */
24844 ++ memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
24845 ++}
24846 ++
24847 ++#endif
24848 ++
24849 + /*
24850 + * Define some primitives to manipulate sigset_t.
24851 + */
24852 +diff --git a/include/linux/swap.h b/include/linux/swap.h
24853 +index d18b65c53dbb..b974a2106dd7 100644
24854 +--- a/include/linux/swap.h
24855 ++++ b/include/linux/swap.h
24856 +@@ -418,7 +418,7 @@ extern sector_t swapdev_block(int, pgoff_t);
24857 + extern int page_swapcount(struct page *);
24858 + extern int swp_swapcount(swp_entry_t entry);
24859 + extern struct swap_info_struct *page_swap_info(struct page *);
24860 +-extern int reuse_swap_page(struct page *);
24861 ++extern bool reuse_swap_page(struct page *, int *);
24862 + extern int try_to_free_swap(struct page *);
24863 + struct backing_dev_info;
24864 +
24865 +@@ -513,8 +513,8 @@ static inline int swp_swapcount(swp_entry_t entry)
24866 + return 0;
24867 + }
24868 +
24869 +-#define reuse_swap_page(page) \
24870 +- (!PageTransCompound(page) && page_mapcount(page) == 1)
24871 ++#define reuse_swap_page(page, total_mapcount) \
24872 ++ (page_trans_huge_mapcount(page, total_mapcount) == 1)
24873 +
24874 + static inline int try_to_free_swap(struct page *page)
24875 + {
24876 +@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
24877 + #ifdef CONFIG_MEMCG
24878 + static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
24879 + {
24880 ++ /* Cgroup2 doesn't have per-cgroup swappiness */
24881 ++ if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
24882 ++ return vm_swappiness;
24883 ++
24884 + /* root ? */
24885 + if (mem_cgroup_disabled() || !memcg->css.parent)
24886 + return vm_swappiness;
24887 +diff --git a/include/linux/thermal.h b/include/linux/thermal.h
24888 +index e13a1ace50e9..4a849f19e6c9 100644
24889 +--- a/include/linux/thermal.h
24890 ++++ b/include/linux/thermal.h
24891 +@@ -156,6 +156,7 @@ struct thermal_attr {
24892 + * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
24893 + * @devdata: private pointer for device private data
24894 + * @trips: number of trip points the thermal zone supports
24895 ++ * @trips_disabled; bitmap for disabled trips
24896 + * @passive_delay: number of milliseconds to wait between polls when
24897 + * performing passive cooling.
24898 + * @polling_delay: number of milliseconds to wait between polls when
24899 +@@ -191,6 +192,7 @@ struct thermal_zone_device {
24900 + struct thermal_attr *trip_hyst_attrs;
24901 + void *devdata;
24902 + int trips;
24903 ++ unsigned long trips_disabled; /* bitmap for disabled trips */
24904 + int passive_delay;
24905 + int polling_delay;
24906 + int temperature;
24907 +diff --git a/include/linux/tty.h b/include/linux/tty.h
24908 +index d9fb4b043f56..e5b996d887ce 100644
24909 +--- a/include/linux/tty.h
24910 ++++ b/include/linux/tty.h
24911 +@@ -338,7 +338,6 @@ struct tty_file_private {
24912 + #define TTY_EXCLUSIVE 3 /* Exclusive open mode */
24913 + #define TTY_DEBUG 4 /* Debugging */
24914 + #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
24915 +-#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
24916 + #define TTY_LDISC_OPEN 11 /* Line discipline is open */
24917 + #define TTY_PTY_LOCK 16 /* pty private */
24918 + #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
24919 +@@ -464,6 +463,7 @@ extern void tty_buffer_init(struct tty_port *port);
24920 + extern void tty_buffer_set_lock_subclass(struct tty_port *port);
24921 + extern bool tty_buffer_restart_work(struct tty_port *port);
24922 + extern bool tty_buffer_cancel_work(struct tty_port *port);
24923 ++extern void tty_buffer_flush_work(struct tty_port *port);
24924 + extern speed_t tty_termios_baud_rate(struct ktermios *termios);
24925 + extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
24926 + extern void tty_termios_encode_baud_rate(struct ktermios *termios,
24927 +@@ -589,7 +589,7 @@ static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
24928 + count = ld->ops->receive_buf2(ld->tty, p, f, count);
24929 + else {
24930 + count = min_t(int, count, ld->tty->receive_room);
24931 +- if (count)
24932 ++ if (count && ld->ops->receive_buf)
24933 + ld->ops->receive_buf(ld->tty, p, f, count);
24934 + }
24935 + return count;
24936 +diff --git a/include/linux/usb.h b/include/linux/usb.h
24937 +index 89533ba38691..f3dbc217ff41 100644
24938 +--- a/include/linux/usb.h
24939 ++++ b/include/linux/usb.h
24940 +@@ -1066,7 +1066,7 @@ struct usbdrv_wrap {
24941 + * for interfaces bound to this driver.
24942 + * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
24943 + * endpoints before calling the driver's disconnect method.
24944 +- * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
24945 ++ * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs
24946 + * to initiate lower power link state transitions when an idle timeout
24947 + * occurs. Device-initiated USB 3.0 link PM will still be allowed.
24948 + *
24949 +diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
24950 +index 7f5f78bd15ad..245f57dbbb61 100644
24951 +--- a/include/linux/usb_usual.h
24952 ++++ b/include/linux/usb_usual.h
24953 +@@ -79,6 +79,8 @@
24954 + /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
24955 + US_FLAG(MAX_SECTORS_240, 0x08000000) \
24956 + /* Sets max_sectors to 240 */ \
24957 ++ US_FLAG(NO_REPORT_LUNS, 0x10000000) \
24958 ++ /* Cannot handle REPORT_LUNS */ \
24959 +
24960 + #define US_FLAG(name, value) US_FL_##name = value ,
24961 + enum { US_DO_ALL_FLAGS };
24962 +diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
24963 +index 8a0f55b6c2ba..88e3ab496e8f 100644
24964 +--- a/include/media/videobuf2-core.h
24965 ++++ b/include/media/videobuf2-core.h
24966 +@@ -375,6 +375,9 @@ struct vb2_ops {
24967 + /**
24968 + * struct vb2_ops - driver-specific callbacks
24969 + *
24970 ++ * @verify_planes_array: Verify that a given user space structure contains
24971 ++ * enough planes for the buffer. This is called
24972 ++ * for each dequeued buffer.
24973 + * @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
24974 + * For V4L2 this is a struct v4l2_buffer.
24975 + * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
24976 +@@ -384,6 +387,7 @@ struct vb2_ops {
24977 + * the vb2_buffer struct.
24978 + */
24979 + struct vb2_buf_ops {
24980 ++ int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
24981 + void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
24982 + int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
24983 + struct vb2_plane *planes);
24984 +@@ -400,6 +404,9 @@ struct vb2_buf_ops {
24985 + * @fileio_read_once: report EOF after reading the first buffer
24986 + * @fileio_write_immediately: queue buffer after each write() call
24987 + * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
24988 ++ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
24989 ++ * has not been called. This is a vb1 idiom that has been adopted
24990 ++ * also by vb2.
24991 + * @lock: pointer to a mutex that protects the vb2_queue struct. The
24992 + * driver can set this to a mutex to let the v4l2 core serialize
24993 + * the queuing ioctls. If the driver wants to handle locking
24994 +@@ -463,6 +470,7 @@ struct vb2_queue {
24995 + unsigned fileio_read_once:1;
24996 + unsigned fileio_write_immediately:1;
24997 + unsigned allow_zero_bytesused:1;
24998 ++ unsigned quirk_poll_must_check_waiting_for_buffers:1;
24999 +
25000 + struct mutex *lock;
25001 + void *owner;
25002 +diff --git a/include/net/bonding.h b/include/net/bonding.h
25003 +index ee6c52053aa3..791800ddd6d9 100644
25004 +--- a/include/net/bonding.h
25005 ++++ b/include/net/bonding.h
25006 +@@ -215,6 +215,7 @@ struct bonding {
25007 + * ALB mode (6) - to sync the use and modifications of its hash table
25008 + */
25009 + spinlock_t mode_lock;
25010 ++ spinlock_t stats_lock;
25011 + u8 send_peer_notif;
25012 + u8 igmp_retrans;
25013 + #ifdef CONFIG_PROC_FS
25014 +diff --git a/include/net/codel.h b/include/net/codel.h
25015 +index 267e70210061..d168aca115cc 100644
25016 +--- a/include/net/codel.h
25017 ++++ b/include/net/codel.h
25018 +@@ -162,12 +162,14 @@ struct codel_vars {
25019 + * struct codel_stats - contains codel shared variables and stats
25020 + * @maxpacket: largest packet we've seen so far
25021 + * @drop_count: temp count of dropped packets in dequeue()
25022 ++ * @drop_len: bytes of dropped packets in dequeue()
25023 + * ecn_mark: number of packets we ECN marked instead of dropping
25024 + * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
25025 + */
25026 + struct codel_stats {
25027 + u32 maxpacket;
25028 + u32 drop_count;
25029 ++ u32 drop_len;
25030 + u32 ecn_mark;
25031 + u32 ce_mark;
25032 + };
25033 +@@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
25034 + vars->rec_inv_sqrt);
25035 + goto end;
25036 + }
25037 ++ stats->drop_len += qdisc_pkt_len(skb);
25038 + qdisc_drop(skb, sch);
25039 + stats->drop_count++;
25040 + skb = dequeue_func(vars, sch);
25041 +@@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
25042 + if (params->ecn && INET_ECN_set_ce(skb)) {
25043 + stats->ecn_mark++;
25044 + } else {
25045 ++ stats->drop_len += qdisc_pkt_len(skb);
25046 + qdisc_drop(skb, sch);
25047 + stats->drop_count++;
25048 +
25049 +diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
25050 +index 0816c872b689..a6cc576fd467 100644
25051 +--- a/include/net/ip_vs.h
25052 ++++ b/include/net/ip_vs.h
25053 +@@ -1588,6 +1588,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
25054 + }
25055 + #endif /* CONFIG_IP_VS_NFCT */
25056 +
25057 ++/* Really using conntrack? */
25058 ++static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
25059 ++ struct sk_buff *skb)
25060 ++{
25061 ++#ifdef CONFIG_IP_VS_NFCT
25062 ++ enum ip_conntrack_info ctinfo;
25063 ++ struct nf_conn *ct;
25064 ++
25065 ++ if (!(cp->flags & IP_VS_CONN_F_NFCT))
25066 ++ return false;
25067 ++ ct = nf_ct_get(skb, &ctinfo);
25068 ++ if (ct && !nf_ct_is_untracked(ct))
25069 ++ return true;
25070 ++#endif
25071 ++ return false;
25072 ++}
25073 ++
25074 + static inline int
25075 + ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
25076 + {
25077 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
25078 +index 636a362a0e03..e5bba897d206 100644
25079 +--- a/include/net/sch_generic.h
25080 ++++ b/include/net/sch_generic.h
25081 +@@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
25082 + struct Qdisc *qdisc);
25083 + void qdisc_reset(struct Qdisc *qdisc);
25084 + void qdisc_destroy(struct Qdisc *qdisc);
25085 +-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
25086 ++void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
25087 ++ unsigned int len);
25088 + struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
25089 + const struct Qdisc_ops *ops);
25090 + struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
25091 +@@ -707,6 +708,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
25092 + sch->qstats.backlog = 0;
25093 + }
25094 +
25095 ++static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
25096 ++ struct Qdisc **pold)
25097 ++{
25098 ++ struct Qdisc *old;
25099 ++
25100 ++ sch_tree_lock(sch);
25101 ++ old = *pold;
25102 ++ *pold = new;
25103 ++ if (old != NULL) {
25104 ++ qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
25105 ++ qdisc_reset(old);
25106 ++ }
25107 ++ sch_tree_unlock(sch);
25108 ++
25109 ++ return old;
25110 ++}
25111 ++
25112 + static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
25113 + struct sk_buff_head *list)
25114 + {
25115 +diff --git a/include/net/sock.h b/include/net/sock.h
25116 +index f5ea148853e2..3c688ca3456d 100644
25117 +--- a/include/net/sock.h
25118 ++++ b/include/net/sock.h
25119 +@@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
25120 +
25121 + static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
25122 + {
25123 +- hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
25124 ++ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
25125 ++ sk->sk_family == AF_INET6)
25126 ++ hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
25127 ++ else
25128 ++ hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
25129 + }
25130 +
25131 + static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
25132 +diff --git a/include/rdma/ib.h b/include/rdma/ib.h
25133 +index cf8f9e700e48..a6b93706b0fc 100644
25134 +--- a/include/rdma/ib.h
25135 ++++ b/include/rdma/ib.h
25136 +@@ -34,6 +34,7 @@
25137 + #define _RDMA_IB_H
25138 +
25139 + #include <linux/types.h>
25140 ++#include <linux/sched.h>
25141 +
25142 + struct ib_addr {
25143 + union {
25144 +@@ -86,4 +87,19 @@ struct sockaddr_ib {
25145 + __u64 sib_scope_id;
25146 + };
25147 +
25148 ++/*
25149 ++ * The IB interfaces that use write() as bi-directional ioctl() are
25150 ++ * fundamentally unsafe, since there are lots of ways to trigger "write()"
25151 ++ * calls from various contexts with elevated privileges. That includes the
25152 ++ * traditional suid executable error message writes, but also various kernel
25153 ++ * interfaces that can write to file descriptors.
25154 ++ *
25155 ++ * This function provides protection for the legacy API by restricting the
25156 ++ * calling context.
25157 ++ */
25158 ++static inline bool ib_safe_file_access(struct file *filp)
25159 ++{
25160 ++ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
25161 ++}
25162 ++
25163 + #endif /* _RDMA_IB_H */
25164 +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
25165 +index f63a16760ae9..a5d31f794cac 100644
25166 +--- a/include/scsi/scsi_device.h
25167 ++++ b/include/scsi/scsi_device.h
25168 +@@ -240,6 +240,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
25169 + enum scsi_target_state {
25170 + STARGET_CREATED = 1,
25171 + STARGET_RUNNING,
25172 ++ STARGET_REMOVE,
25173 + STARGET_DEL,
25174 + };
25175 +
25176 +@@ -513,6 +514,31 @@ static inline int scsi_device_tpgs(struct scsi_device *sdev)
25177 + return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
25178 + }
25179 +
25180 ++/**
25181 ++ * scsi_device_supports_vpd - test if a device supports VPD pages
25182 ++ * @sdev: the &struct scsi_device to test
25183 ++ *
25184 ++ * If the 'try_vpd_pages' flag is set it takes precedence.
25185 ++ * Otherwise we will assume VPD pages are supported if the
25186 ++ * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set.
25187 ++ */
25188 ++static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
25189 ++{
25190 ++ /* Attempt VPD inquiry if the device blacklist explicitly calls
25191 ++ * for it.
25192 ++ */
25193 ++ if (sdev->try_vpd_pages)
25194 ++ return 1;
25195 ++ /*
25196 ++ * Although VPD inquiries can go to SCSI-2 type devices,
25197 ++ * some USB ones crash on receiving them, and the pages
25198 ++ * we currently ask for are for SPC-3 and beyond
25199 ++ */
25200 ++ if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages)
25201 ++ return 1;
25202 ++ return 0;
25203 ++}
25204 ++
25205 + #define MODULE_ALIAS_SCSI_DEVICE(type) \
25206 + MODULE_ALIAS("scsi:t-" __stringify(type) "*")
25207 + #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
25208 +diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
25209 +index fa341fcb5829..f5842bcd9c94 100644
25210 +--- a/include/sound/hda_i915.h
25211 ++++ b/include/sound/hda_i915.h
25212 +@@ -9,7 +9,7 @@
25213 + #ifdef CONFIG_SND_HDA_I915
25214 + int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
25215 + int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
25216 +-int snd_hdac_get_display_clk(struct hdac_bus *bus);
25217 ++void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
25218 + int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
25219 + int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
25220 + bool *audio_enabled, char *buffer, int max_bytes);
25221 +@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
25222 + {
25223 + return 0;
25224 + }
25225 +-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
25226 ++static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
25227 + {
25228 +- return 0;
25229 + }
25230 + static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
25231 + int rate)
25232 +diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
25233 +index c21c38ce7450..93e63c56f48f 100644
25234 +--- a/include/sound/hdaudio.h
25235 ++++ b/include/sound/hdaudio.h
25236 +@@ -168,11 +168,13 @@ int snd_hdac_power_up(struct hdac_device *codec);
25237 + int snd_hdac_power_down(struct hdac_device *codec);
25238 + int snd_hdac_power_up_pm(struct hdac_device *codec);
25239 + int snd_hdac_power_down_pm(struct hdac_device *codec);
25240 ++int snd_hdac_keep_power_up(struct hdac_device *codec);
25241 + #else
25242 + static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
25243 + static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
25244 + static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
25245 + static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
25246 ++static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
25247 + #endif
25248 +
25249 + /*
25250 +diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
25251 +index 9cf2394f0bcf..752f5dc040a5 100644
25252 +--- a/include/uapi/linux/if.h
25253 ++++ b/include/uapi/linux/if.h
25254 +@@ -19,14 +19,20 @@
25255 + #ifndef _LINUX_IF_H
25256 + #define _LINUX_IF_H
25257 +
25258 ++#include <linux/libc-compat.h> /* for compatibility with glibc */
25259 + #include <linux/types.h> /* for "__kernel_caddr_t" et al */
25260 + #include <linux/socket.h> /* for "struct sockaddr" et al */
25261 + #include <linux/compiler.h> /* for "__user" et al */
25262 +
25263 ++#if __UAPI_DEF_IF_IFNAMSIZ
25264 + #define IFNAMSIZ 16
25265 ++#endif /* __UAPI_DEF_IF_IFNAMSIZ */
25266 + #define IFALIASZ 256
25267 + #include <linux/hdlc/ioctl.h>
25268 +
25269 ++/* For glibc compatibility. An empty enum does not compile. */
25270 ++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
25271 ++ __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
25272 + /**
25273 + * enum net_device_flags - &struct net_device flags
25274 + *
25275 +@@ -68,6 +74,8 @@
25276 + * @IFF_ECHO: echo sent packets. Volatile.
25277 + */
25278 + enum net_device_flags {
25279 ++/* for compatibility with glibc net/if.h */
25280 ++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
25281 + IFF_UP = 1<<0, /* sysfs */
25282 + IFF_BROADCAST = 1<<1, /* volatile */
25283 + IFF_DEBUG = 1<<2, /* sysfs */
25284 +@@ -84,11 +92,17 @@ enum net_device_flags {
25285 + IFF_PORTSEL = 1<<13, /* sysfs */
25286 + IFF_AUTOMEDIA = 1<<14, /* sysfs */
25287 + IFF_DYNAMIC = 1<<15, /* sysfs */
25288 ++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
25289 ++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
25290 + IFF_LOWER_UP = 1<<16, /* volatile */
25291 + IFF_DORMANT = 1<<17, /* volatile */
25292 + IFF_ECHO = 1<<18, /* volatile */
25293 ++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
25294 + };
25295 ++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
25296 +
25297 ++/* for compatibility with glibc net/if.h */
25298 ++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
25299 + #define IFF_UP IFF_UP
25300 + #define IFF_BROADCAST IFF_BROADCAST
25301 + #define IFF_DEBUG IFF_DEBUG
25302 +@@ -105,9 +119,13 @@ enum net_device_flags {
25303 + #define IFF_PORTSEL IFF_PORTSEL
25304 + #define IFF_AUTOMEDIA IFF_AUTOMEDIA
25305 + #define IFF_DYNAMIC IFF_DYNAMIC
25306 ++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
25307 ++
25308 ++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
25309 + #define IFF_LOWER_UP IFF_LOWER_UP
25310 + #define IFF_DORMANT IFF_DORMANT
25311 + #define IFF_ECHO IFF_ECHO
25312 ++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
25313 +
25314 + #define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
25315 + IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
25316 +@@ -166,6 +184,8 @@ enum {
25317 + * being very small might be worth keeping for clean configuration.
25318 + */
25319 +
25320 ++/* for compatibility with glibc net/if.h */
25321 ++#if __UAPI_DEF_IF_IFMAP
25322 + struct ifmap {
25323 + unsigned long mem_start;
25324 + unsigned long mem_end;
25325 +@@ -175,6 +195,7 @@ struct ifmap {
25326 + unsigned char port;
25327 + /* 3 bytes spare */
25328 + };
25329 ++#endif /* __UAPI_DEF_IF_IFMAP */
25330 +
25331 + struct if_settings {
25332 + unsigned int type; /* Type of physical device or protocol */
25333 +@@ -200,6 +221,8 @@ struct if_settings {
25334 + * remainder may be interface specific.
25335 + */
25336 +
25337 ++/* for compatibility with glibc net/if.h */
25338 ++#if __UAPI_DEF_IF_IFREQ
25339 + struct ifreq {
25340 + #define IFHWADDRLEN 6
25341 + union
25342 +@@ -223,6 +246,7 @@ struct ifreq {
25343 + struct if_settings ifru_settings;
25344 + } ifr_ifru;
25345 + };
25346 ++#endif /* __UAPI_DEF_IF_IFREQ */
25347 +
25348 + #define ifr_name ifr_ifrn.ifrn_name /* interface name */
25349 + #define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
25350 +@@ -249,6 +273,8 @@ struct ifreq {
25351 + * must know all networks accessible).
25352 + */
25353 +
25354 ++/* for compatibility with glibc net/if.h */
25355 ++#if __UAPI_DEF_IF_IFCONF
25356 + struct ifconf {
25357 + int ifc_len; /* size of buffer */
25358 + union {
25359 +@@ -256,6 +282,8 @@ struct ifconf {
25360 + struct ifreq __user *ifcu_req;
25361 + } ifc_ifcu;
25362 + };
25363 ++#endif /* __UAPI_DEF_IF_IFCONF */
25364 ++
25365 + #define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
25366 + #define ifc_req ifc_ifcu.ifcu_req /* array of structures */
25367 +
25368 +diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
25369 +index 7d024ceb075d..d5e38c73377c 100644
25370 +--- a/include/uapi/linux/libc-compat.h
25371 ++++ b/include/uapi/linux/libc-compat.h
25372 +@@ -51,6 +51,40 @@
25373 + /* We have included glibc headers... */
25374 + #if defined(__GLIBC__)
25375 +
25376 ++/* Coordinate with glibc net/if.h header. */
25377 ++#if defined(_NET_IF_H)
25378 ++
25379 ++/* GLIBC headers included first so don't define anything
25380 ++ * that would already be defined. */
25381 ++
25382 ++#define __UAPI_DEF_IF_IFCONF 0
25383 ++#define __UAPI_DEF_IF_IFMAP 0
25384 ++#define __UAPI_DEF_IF_IFNAMSIZ 0
25385 ++#define __UAPI_DEF_IF_IFREQ 0
25386 ++/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
25387 ++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
25388 ++/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
25389 ++#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
25390 ++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
25391 ++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
25392 ++
25393 ++#else /* _NET_IF_H */
25394 ++
25395 ++/* Linux headers included first, and we must define everything
25396 ++ * we need. The expectation is that glibc will check the
25397 ++ * __UAPI_DEF_* defines and adjust appropriately. */
25398 ++
25399 ++#define __UAPI_DEF_IF_IFCONF 1
25400 ++#define __UAPI_DEF_IF_IFMAP 1
25401 ++#define __UAPI_DEF_IF_IFNAMSIZ 1
25402 ++#define __UAPI_DEF_IF_IFREQ 1
25403 ++/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
25404 ++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
25405 ++/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
25406 ++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
25407 ++
25408 ++#endif /* _NET_IF_H */
25409 ++
25410 + /* Coordinate with glibc netinet/in.h header. */
25411 + #if defined(_NETINET_IN_H)
25412 +
25413 +@@ -117,6 +151,16 @@
25414 + * that we need. */
25415 + #else /* !defined(__GLIBC__) */
25416 +
25417 ++/* Definitions for if.h */
25418 ++#define __UAPI_DEF_IF_IFCONF 1
25419 ++#define __UAPI_DEF_IF_IFMAP 1
25420 ++#define __UAPI_DEF_IF_IFNAMSIZ 1
25421 ++#define __UAPI_DEF_IF_IFREQ 1
25422 ++/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
25423 ++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
25424 ++/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
25425 ++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
25426 ++
25427 + /* Definitions for in.h */
25428 + #define __UAPI_DEF_IN_ADDR 1
25429 + #define __UAPI_DEF_IN_IPPROTO 1
25430 +diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
25431 +index c039f1d68a09..086168e18ca8 100644
25432 +--- a/include/uapi/linux/v4l2-dv-timings.h
25433 ++++ b/include/uapi/linux/v4l2-dv-timings.h
25434 +@@ -183,7 +183,8 @@
25435 +
25436 + #define V4L2_DV_BT_CEA_3840X2160P24 { \
25437 + .type = V4L2_DV_BT_656_1120, \
25438 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
25439 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
25440 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
25441 + 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
25442 + V4L2_DV_BT_STD_CEA861, \
25443 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
25444 +@@ -191,14 +192,16 @@
25445 +
25446 + #define V4L2_DV_BT_CEA_3840X2160P25 { \
25447 + .type = V4L2_DV_BT_656_1120, \
25448 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
25449 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
25450 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
25451 + 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
25452 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
25453 + }
25454 +
25455 + #define V4L2_DV_BT_CEA_3840X2160P30 { \
25456 + .type = V4L2_DV_BT_656_1120, \
25457 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
25458 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
25459 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
25460 + 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
25461 + V4L2_DV_BT_STD_CEA861, \
25462 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
25463 +@@ -206,14 +209,16 @@
25464 +
25465 + #define V4L2_DV_BT_CEA_3840X2160P50 { \
25466 + .type = V4L2_DV_BT_656_1120, \
25467 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
25468 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
25469 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
25470 + 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
25471 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
25472 + }
25473 +
25474 + #define V4L2_DV_BT_CEA_3840X2160P60 { \
25475 + .type = V4L2_DV_BT_656_1120, \
25476 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
25477 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
25478 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
25479 + 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
25480 + V4L2_DV_BT_STD_CEA861, \
25481 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
25482 +@@ -221,7 +226,8 @@
25483 +
25484 + #define V4L2_DV_BT_CEA_4096X2160P24 { \
25485 + .type = V4L2_DV_BT_656_1120, \
25486 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
25487 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
25488 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
25489 + 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
25490 + V4L2_DV_BT_STD_CEA861, \
25491 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
25492 +@@ -229,14 +235,16 @@
25493 +
25494 + #define V4L2_DV_BT_CEA_4096X2160P25 { \
25495 + .type = V4L2_DV_BT_656_1120, \
25496 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
25497 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
25498 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
25499 + 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
25500 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
25501 + }
25502 +
25503 + #define V4L2_DV_BT_CEA_4096X2160P30 { \
25504 + .type = V4L2_DV_BT_656_1120, \
25505 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
25506 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
25507 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
25508 + 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
25509 + V4L2_DV_BT_STD_CEA861, \
25510 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
25511 +@@ -244,14 +252,16 @@
25512 +
25513 + #define V4L2_DV_BT_CEA_4096X2160P50 { \
25514 + .type = V4L2_DV_BT_656_1120, \
25515 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
25516 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
25517 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
25518 + 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
25519 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
25520 + }
25521 +
25522 + #define V4L2_DV_BT_CEA_4096X2160P60 { \
25523 + .type = V4L2_DV_BT_656_1120, \
25524 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
25525 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
25526 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
25527 + 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
25528 + V4L2_DV_BT_STD_CEA861, \
25529 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
25530 +diff --git a/include/xen/page.h b/include/xen/page.h
25531 +index 96294ac93755..9dc46cb8a0fd 100644
25532 +--- a/include/xen/page.h
25533 ++++ b/include/xen/page.h
25534 +@@ -15,9 +15,9 @@
25535 + */
25536 +
25537 + #define xen_pfn_to_page(xen_pfn) \
25538 +- ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
25539 ++ (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
25540 + #define page_to_xen_pfn(page) \
25541 +- (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
25542 ++ ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
25543 +
25544 + #define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE)
25545 +
25546 +diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
25547 +index 4504ca66118d..50da680c479f 100644
25548 +--- a/kernel/bpf/helpers.c
25549 ++++ b/kernel/bpf/helpers.c
25550 +@@ -166,7 +166,7 @@ static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
25551 + if (!task)
25552 + return -EINVAL;
25553 +
25554 +- memcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
25555 ++ strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
25556 + return 0;
25557 + }
25558 +
25559 +diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
25560 +index f2ece3c174a5..8f94ca1860cf 100644
25561 +--- a/kernel/bpf/inode.c
25562 ++++ b/kernel/bpf/inode.c
25563 +@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
25564 + {
25565 + switch (type) {
25566 + case BPF_TYPE_PROG:
25567 +- atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
25568 ++ raw = bpf_prog_inc(raw);
25569 + break;
25570 + case BPF_TYPE_MAP:
25571 +- bpf_map_inc(raw, true);
25572 ++ raw = bpf_map_inc(raw, true);
25573 + break;
25574 + default:
25575 + WARN_ON_ONCE(1);
25576 +@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
25577 + goto out;
25578 +
25579 + raw = bpf_any_get(inode->i_private, *type);
25580 +- touch_atime(&path);
25581 ++ if (!IS_ERR(raw))
25582 ++ touch_atime(&path);
25583 +
25584 + path_put(&path);
25585 + return raw;
25586 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
25587 +index 637397059f76..aa5f39772ac4 100644
25588 +--- a/kernel/bpf/syscall.c
25589 ++++ b/kernel/bpf/syscall.c
25590 +@@ -201,11 +201,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
25591 + return f.file->private_data;
25592 + }
25593 +
25594 +-void bpf_map_inc(struct bpf_map *map, bool uref)
25595 ++/* prog's and map's refcnt limit */
25596 ++#define BPF_MAX_REFCNT 32768
25597 ++
25598 ++struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
25599 + {
25600 +- atomic_inc(&map->refcnt);
25601 ++ if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
25602 ++ atomic_dec(&map->refcnt);
25603 ++ return ERR_PTR(-EBUSY);
25604 ++ }
25605 + if (uref)
25606 + atomic_inc(&map->usercnt);
25607 ++ return map;
25608 + }
25609 +
25610 + struct bpf_map *bpf_map_get_with_uref(u32 ufd)
25611 +@@ -217,7 +224,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
25612 + if (IS_ERR(map))
25613 + return map;
25614 +
25615 +- bpf_map_inc(map, true);
25616 ++ map = bpf_map_inc(map, true);
25617 + fdput(f);
25618 +
25619 + return map;
25620 +@@ -600,6 +607,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
25621 + return f.file->private_data;
25622 + }
25623 +
25624 ++struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
25625 ++{
25626 ++ if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
25627 ++ atomic_dec(&prog->aux->refcnt);
25628 ++ return ERR_PTR(-EBUSY);
25629 ++ }
25630 ++ return prog;
25631 ++}
25632 ++
25633 + /* called by sockets/tracing/seccomp before attaching program to an event
25634 + * pairs with bpf_prog_put()
25635 + */
25636 +@@ -612,7 +628,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
25637 + if (IS_ERR(prog))
25638 + return prog;
25639 +
25640 +- atomic_inc(&prog->aux->refcnt);
25641 ++ prog = bpf_prog_inc(prog);
25642 + fdput(f);
25643 +
25644 + return prog;
25645 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
25646 +index 2e7f7ab739e4..2cbfba78d3db 100644
25647 +--- a/kernel/bpf/verifier.c
25648 ++++ b/kernel/bpf/verifier.c
25649 +@@ -239,15 +239,6 @@ static const char * const reg_type_str[] = {
25650 + [CONST_IMM] = "imm",
25651 + };
25652 +
25653 +-static const struct {
25654 +- int map_type;
25655 +- int func_id;
25656 +-} func_limit[] = {
25657 +- {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
25658 +- {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
25659 +- {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
25660 +-};
25661 +-
25662 + static void print_verifier_state(struct verifier_env *env)
25663 + {
25664 + enum bpf_reg_type t;
25665 +@@ -898,24 +889,44 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
25666 +
25667 + static int check_map_func_compatibility(struct bpf_map *map, int func_id)
25668 + {
25669 +- bool bool_map, bool_func;
25670 +- int i;
25671 +-
25672 + if (!map)
25673 + return 0;
25674 +
25675 +- for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
25676 +- bool_map = (map->map_type == func_limit[i].map_type);
25677 +- bool_func = (func_id == func_limit[i].func_id);
25678 +- /* only when map & func pair match it can continue.
25679 +- * don't allow any other map type to be passed into
25680 +- * the special func;
25681 +- */
25682 +- if (bool_func && bool_map != bool_func)
25683 +- return -EINVAL;
25684 ++ /* We need a two way check, first is from map perspective ... */
25685 ++ switch (map->map_type) {
25686 ++ case BPF_MAP_TYPE_PROG_ARRAY:
25687 ++ if (func_id != BPF_FUNC_tail_call)
25688 ++ goto error;
25689 ++ break;
25690 ++ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
25691 ++ if (func_id != BPF_FUNC_perf_event_read &&
25692 ++ func_id != BPF_FUNC_perf_event_output)
25693 ++ goto error;
25694 ++ break;
25695 ++ default:
25696 ++ break;
25697 ++ }
25698 ++
25699 ++ /* ... and second from the function itself. */
25700 ++ switch (func_id) {
25701 ++ case BPF_FUNC_tail_call:
25702 ++ if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
25703 ++ goto error;
25704 ++ break;
25705 ++ case BPF_FUNC_perf_event_read:
25706 ++ case BPF_FUNC_perf_event_output:
25707 ++ if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
25708 ++ goto error;
25709 ++ break;
25710 ++ default:
25711 ++ break;
25712 + }
25713 +
25714 + return 0;
25715 ++error:
25716 ++ verbose("cannot pass map_type %d into func %d\n",
25717 ++ map->map_type, func_id);
25718 ++ return -EINVAL;
25719 + }
25720 +
25721 + static int check_call(struct verifier_env *env, int func_id)
25722 +@@ -1348,6 +1359,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
25723 + }
25724 +
25725 + if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
25726 ++ BPF_SIZE(insn->code) == BPF_DW ||
25727 + (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
25728 + verbose("BPF_LD_ABS uses reserved fields\n");
25729 + return -EINVAL;
25730 +@@ -2003,7 +2015,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
25731 + if (IS_ERR(map)) {
25732 + verbose("fd %d is not pointing to valid bpf_map\n",
25733 + insn->imm);
25734 +- fdput(f);
25735 + return PTR_ERR(map);
25736 + }
25737 +
25738 +@@ -2023,15 +2034,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
25739 + return -E2BIG;
25740 + }
25741 +
25742 +- /* remember this map */
25743 +- env->used_maps[env->used_map_cnt++] = map;
25744 +-
25745 + /* hold the map. If the program is rejected by verifier,
25746 + * the map will be released by release_maps() or it
25747 + * will be used by the valid program until it's unloaded
25748 + * and all maps are released in free_bpf_prog_info()
25749 + */
25750 +- bpf_map_inc(map, false);
25751 ++ map = bpf_map_inc(map, false);
25752 ++ if (IS_ERR(map)) {
25753 ++ fdput(f);
25754 ++ return PTR_ERR(map);
25755 ++ }
25756 ++ env->used_maps[env->used_map_cnt++] = map;
25757 ++
25758 + fdput(f);
25759 + next_insn:
25760 + insn++;
25761 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
25762 +index d27904c193da..355cd5f2b416 100644
25763 +--- a/kernel/cgroup.c
25764 ++++ b/kernel/cgroup.c
25765 +@@ -2474,6 +2474,14 @@ static void cgroup_migrate_add_src(struct css_set *src_cset,
25766 + lockdep_assert_held(&cgroup_mutex);
25767 + lockdep_assert_held(&css_set_lock);
25768 +
25769 ++ /*
25770 ++ * If ->dead, @src_set is associated with one or more dead cgroups
25771 ++ * and doesn't contain any migratable tasks. Ignore it early so
25772 ++ * that the rest of migration path doesn't get confused by it.
25773 ++ */
25774 ++ if (src_cset->dead)
25775 ++ return;
25776 ++
25777 + src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
25778 +
25779 + if (!list_empty(&src_cset->mg_preload_node))
25780 +@@ -2689,9 +2697,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
25781 + size_t nbytes, loff_t off, bool threadgroup)
25782 + {
25783 + struct task_struct *tsk;
25784 ++ struct cgroup_subsys *ss;
25785 + struct cgroup *cgrp;
25786 + pid_t pid;
25787 +- int ret;
25788 ++ int ssid, ret;
25789 +
25790 + if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
25791 + return -EINVAL;
25792 +@@ -2739,8 +2748,10 @@ out_unlock_rcu:
25793 + rcu_read_unlock();
25794 + out_unlock_threadgroup:
25795 + percpu_up_write(&cgroup_threadgroup_rwsem);
25796 ++ for_each_subsys(ss, ssid)
25797 ++ if (ss->post_attach)
25798 ++ ss->post_attach();
25799 + cgroup_kn_unlock(of->kn);
25800 +- cpuset_post_attach_flush();
25801 + return ret ?: nbytes;
25802 + }
25803 +
25804 +@@ -5114,6 +5125,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
25805 + __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
25806 + {
25807 + struct cgroup_subsys_state *css;
25808 ++ struct cgrp_cset_link *link;
25809 + int ssid;
25810 +
25811 + lockdep_assert_held(&cgroup_mutex);
25812 +@@ -5134,11 +5146,18 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
25813 + return -EBUSY;
25814 +
25815 + /*
25816 +- * Mark @cgrp dead. This prevents further task migration and child
25817 +- * creation by disabling cgroup_lock_live_group().
25818 ++ * Mark @cgrp and the associated csets dead. The former prevents
25819 ++ * further task migration and child creation by disabling
25820 ++ * cgroup_lock_live_group(). The latter makes the csets ignored by
25821 ++ * the migration path.
25822 + */
25823 + cgrp->self.flags &= ~CSS_ONLINE;
25824 +
25825 ++ spin_lock_bh(&css_set_lock);
25826 ++ list_for_each_entry(link, &cgrp->cset_links, cset_link)
25827 ++ link->cset->dead = true;
25828 ++ spin_unlock_bh(&css_set_lock);
25829 ++
25830 + /* initiate massacre of all css's */
25831 + for_each_css(css, ssid, cgrp)
25832 + kill_css(css);
25833 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
25834 +index 41989ab4db57..df16d0c9349f 100644
25835 +--- a/kernel/cpuset.c
25836 ++++ b/kernel/cpuset.c
25837 +@@ -58,7 +58,6 @@
25838 + #include <asm/uaccess.h>
25839 + #include <linux/atomic.h>
25840 + #include <linux/mutex.h>
25841 +-#include <linux/workqueue.h>
25842 + #include <linux/cgroup.h>
25843 + #include <linux/wait.h>
25844 +
25845 +@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
25846 + }
25847 + }
25848 +
25849 +-void cpuset_post_attach_flush(void)
25850 ++static void cpuset_post_attach(void)
25851 + {
25852 + flush_workqueue(cpuset_migrate_mm_wq);
25853 + }
25854 +@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
25855 + .can_attach = cpuset_can_attach,
25856 + .cancel_attach = cpuset_cancel_attach,
25857 + .attach = cpuset_attach,
25858 ++ .post_attach = cpuset_post_attach,
25859 + .bind = cpuset_bind,
25860 + .legacy_cftypes = files,
25861 + .early_init = 1,
25862 +diff --git a/kernel/events/core.c b/kernel/events/core.c
25863 +index 614614821f00..477fb6b8ca20 100644
25864 +--- a/kernel/events/core.c
25865 ++++ b/kernel/events/core.c
25866 +@@ -1090,6 +1090,7 @@ static void put_ctx(struct perf_event_context *ctx)
25867 + * function.
25868 + *
25869 + * Lock order:
25870 ++ * cred_guard_mutex
25871 + * task_struct::perf_event_mutex
25872 + * perf_event_context::mutex
25873 + * perf_event::child_mutex;
25874 +@@ -2402,14 +2403,24 @@ static void ctx_sched_out(struct perf_event_context *ctx,
25875 + cpuctx->task_ctx = NULL;
25876 + }
25877 +
25878 +- is_active ^= ctx->is_active; /* changed bits */
25879 +-
25880 ++ /*
25881 ++ * Always update time if it was set; not only when it changes.
25882 ++ * Otherwise we can 'forget' to update time for any but the last
25883 ++ * context we sched out. For example:
25884 ++ *
25885 ++ * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
25886 ++ * ctx_sched_out(.event_type = EVENT_PINNED)
25887 ++ *
25888 ++ * would only update time for the pinned events.
25889 ++ */
25890 + if (is_active & EVENT_TIME) {
25891 + /* update (and stop) ctx time */
25892 + update_context_time(ctx);
25893 + update_cgrp_time_from_cpuctx(cpuctx);
25894 + }
25895 +
25896 ++ is_active ^= ctx->is_active; /* changed bits */
25897 ++
25898 + if (!ctx->nr_active || !(is_active & EVENT_ALL))
25899 + return;
25900 +
25901 +@@ -3405,7 +3416,6 @@ static struct task_struct *
25902 + find_lively_task_by_vpid(pid_t vpid)
25903 + {
25904 + struct task_struct *task;
25905 +- int err;
25906 +
25907 + rcu_read_lock();
25908 + if (!vpid)
25909 +@@ -3419,16 +3429,7 @@ find_lively_task_by_vpid(pid_t vpid)
25910 + if (!task)
25911 + return ERR_PTR(-ESRCH);
25912 +
25913 +- /* Reuse ptrace permission checks for now. */
25914 +- err = -EACCES;
25915 +- if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
25916 +- goto errout;
25917 +-
25918 + return task;
25919 +-errout:
25920 +- put_task_struct(task);
25921 +- return ERR_PTR(err);
25922 +-
25923 + }
25924 +
25925 + /*
25926 +@@ -8001,6 +8002,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
25927 + }
25928 + }
25929 +
25930 ++ /* symmetric to unaccount_event() in _free_event() */
25931 ++ account_event(event);
25932 ++
25933 + return event;
25934 +
25935 + err_per_task:
25936 +@@ -8347,6 +8351,24 @@ SYSCALL_DEFINE5(perf_event_open,
25937 +
25938 + get_online_cpus();
25939 +
25940 ++ if (task) {
25941 ++ err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
25942 ++ if (err)
25943 ++ goto err_cpus;
25944 ++
25945 ++ /*
25946 ++ * Reuse ptrace permission checks for now.
25947 ++ *
25948 ++ * We must hold cred_guard_mutex across this and any potential
25949 ++ * perf_install_in_context() call for this new event to
25950 ++ * serialize against exec() altering our credentials (and the
25951 ++ * perf_event_exit_task() that could imply).
25952 ++ */
25953 ++ err = -EACCES;
25954 ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
25955 ++ goto err_cred;
25956 ++ }
25957 ++
25958 + if (flags & PERF_FLAG_PID_CGROUP)
25959 + cgroup_fd = pid;
25960 +
25961 +@@ -8354,7 +8376,7 @@ SYSCALL_DEFINE5(perf_event_open,
25962 + NULL, NULL, cgroup_fd);
25963 + if (IS_ERR(event)) {
25964 + err = PTR_ERR(event);
25965 +- goto err_cpus;
25966 ++ goto err_cred;
25967 + }
25968 +
25969 + if (is_sampling_event(event)) {
25970 +@@ -8364,8 +8386,6 @@ SYSCALL_DEFINE5(perf_event_open,
25971 + }
25972 + }
25973 +
25974 +- account_event(event);
25975 +-
25976 + /*
25977 + * Special case software events and allow them to be part of
25978 + * any hardware group.
25979 +@@ -8415,11 +8435,6 @@ SYSCALL_DEFINE5(perf_event_open,
25980 + goto err_context;
25981 + }
25982 +
25983 +- if (task) {
25984 +- put_task_struct(task);
25985 +- task = NULL;
25986 +- }
25987 +-
25988 + /*
25989 + * Look up the group leader (we will attach this event to it):
25990 + */
25991 +@@ -8478,6 +8493,7 @@ SYSCALL_DEFINE5(perf_event_open,
25992 + f_flags);
25993 + if (IS_ERR(event_file)) {
25994 + err = PTR_ERR(event_file);
25995 ++ event_file = NULL;
25996 + goto err_context;
25997 + }
25998 +
25999 +@@ -8516,6 +8532,11 @@ SYSCALL_DEFINE5(perf_event_open,
26000 +
26001 + WARN_ON_ONCE(ctx->parent_ctx);
26002 +
26003 ++ /*
26004 ++ * This is the point on no return; we cannot fail hereafter. This is
26005 ++ * where we start modifying current state.
26006 ++ */
26007 ++
26008 + if (move_group) {
26009 + /*
26010 + * See perf_event_ctx_lock() for comments on the details
26011 +@@ -8587,6 +8608,11 @@ SYSCALL_DEFINE5(perf_event_open,
26012 + mutex_unlock(&gctx->mutex);
26013 + mutex_unlock(&ctx->mutex);
26014 +
26015 ++ if (task) {
26016 ++ mutex_unlock(&task->signal->cred_guard_mutex);
26017 ++ put_task_struct(task);
26018 ++ }
26019 ++
26020 + put_online_cpus();
26021 +
26022 + mutex_lock(&current->perf_event_mutex);
26023 +@@ -8619,6 +8645,9 @@ err_alloc:
26024 + */
26025 + if (!event_file)
26026 + free_event(event);
26027 ++err_cred:
26028 ++ if (task)
26029 ++ mutex_unlock(&task->signal->cred_guard_mutex);
26030 + err_cpus:
26031 + put_online_cpus();
26032 + err_task:
26033 +@@ -8662,8 +8691,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
26034 + /* Mark owner so we could distinguish it from user events. */
26035 + event->owner = TASK_TOMBSTONE;
26036 +
26037 +- account_event(event);
26038 +-
26039 + ctx = find_get_context(event->pmu, task, event);
26040 + if (IS_ERR(ctx)) {
26041 + err = PTR_ERR(ctx);
26042 +@@ -8905,6 +8932,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
26043 +
26044 + /*
26045 + * When a child task exits, feed back event values to parent events.
26046 ++ *
26047 ++ * Can be called with cred_guard_mutex held when called from
26048 ++ * install_exec_creds().
26049 + */
26050 + void perf_event_exit_task(struct task_struct *child)
26051 + {
26052 +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
26053 +index 1faad2cfdb9e..287201a5d12f 100644
26054 +--- a/kernel/events/ring_buffer.c
26055 ++++ b/kernel/events/ring_buffer.c
26056 +@@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
26057 + bool truncated)
26058 + {
26059 + struct ring_buffer *rb = handle->rb;
26060 ++ bool wakeup = truncated;
26061 + unsigned long aux_head;
26062 + u64 flags = 0;
26063 +
26064 +@@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
26065 + aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
26066 +
26067 + if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
26068 +- perf_output_wakeup(handle);
26069 ++ wakeup = true;
26070 + local_add(rb->aux_watermark, &rb->aux_wakeup);
26071 + }
26072 ++
26073 ++ if (wakeup) {
26074 ++ if (truncated)
26075 ++ handle->event->pending_disable = 1;
26076 ++ perf_output_wakeup(handle);
26077 ++ }
26078 ++
26079 + handle->event = NULL;
26080 +
26081 + local_set(&rb->aux_nest, 0);
26082 +diff --git a/kernel/futex.c b/kernel/futex.c
26083 +index 5d6ce6413ef1..11b502159f3a 100644
26084 +--- a/kernel/futex.c
26085 ++++ b/kernel/futex.c
26086 +@@ -1212,10 +1212,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
26087 + if (unlikely(should_fail_futex(true)))
26088 + ret = -EFAULT;
26089 +
26090 +- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
26091 ++ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
26092 + ret = -EFAULT;
26093 +- else if (curval != uval)
26094 +- ret = -EINVAL;
26095 ++ } else if (curval != uval) {
26096 ++ /*
26097 ++ * If a unconditional UNLOCK_PI operation (user space did not
26098 ++ * try the TID->0 transition) raced with a waiter setting the
26099 ++ * FUTEX_WAITERS flag between get_user() and locking the hash
26100 ++ * bucket lock, retry the operation.
26101 ++ */
26102 ++ if ((FUTEX_TID_MASK & curval) == uval)
26103 ++ ret = -EAGAIN;
26104 ++ else
26105 ++ ret = -EINVAL;
26106 ++ }
26107 + if (ret) {
26108 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
26109 + return ret;
26110 +@@ -1442,8 +1452,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
26111 + if (likely(&hb1->chain != &hb2->chain)) {
26112 + plist_del(&q->list, &hb1->chain);
26113 + hb_waiters_dec(hb1);
26114 +- plist_add(&q->list, &hb2->chain);
26115 + hb_waiters_inc(hb2);
26116 ++ plist_add(&q->list, &hb2->chain);
26117 + q->lock_ptr = &hb2->lock;
26118 + }
26119 + get_futex_key_refs(key2);
26120 +@@ -2536,6 +2546,15 @@ retry:
26121 + if (ret == -EFAULT)
26122 + goto pi_faulted;
26123 + /*
26124 ++ * A unconditional UNLOCK_PI op raced against a waiter
26125 ++ * setting the FUTEX_WAITERS bit. Try again.
26126 ++ */
26127 ++ if (ret == -EAGAIN) {
26128 ++ spin_unlock(&hb->lock);
26129 ++ put_futex_key(&key);
26130 ++ goto retry;
26131 ++ }
26132 ++ /*
26133 + * wake_futex_pi has detected invalid state. Tell user
26134 + * space.
26135 + */
26136 +diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
26137 +index 5b9102a47ea5..c835270f0c2f 100644
26138 +--- a/kernel/locking/mcs_spinlock.h
26139 ++++ b/kernel/locking/mcs_spinlock.h
26140 +@@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
26141 + node->locked = 0;
26142 + node->next = NULL;
26143 +
26144 +- prev = xchg_acquire(lock, node);
26145 ++ /*
26146 ++ * We rely on the full barrier with global transitivity implied by the
26147 ++ * below xchg() to order the initialization stores above against any
26148 ++ * observation of @node. And to provide the ACQUIRE ordering associated
26149 ++ * with a LOCK primitive.
26150 ++ */
26151 ++ prev = xchg(lock, node);
26152 + if (likely(prev == NULL)) {
26153 + /*
26154 + * Lock acquired, don't need to set node->locked to 1. Threads
26155 +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
26156 +index b7342a24f559..b7dd5718836e 100644
26157 +--- a/kernel/power/hibernate.c
26158 ++++ b/kernel/power/hibernate.c
26159 +@@ -339,6 +339,7 @@ int hibernation_snapshot(int platform_mode)
26160 + pm_message_t msg;
26161 + int error;
26162 +
26163 ++ pm_suspend_clear_flags();
26164 + error = platform_begin(platform_mode);
26165 + if (error)
26166 + goto Close;
26167 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
26168 +index 41f6b2215aa8..1c1d2a00ad95 100644
26169 +--- a/kernel/sched/core.c
26170 ++++ b/kernel/sched/core.c
26171 +@@ -5630,6 +5630,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
26172 +
26173 + case CPU_UP_PREPARE:
26174 + rq->calc_load_update = calc_load_update;
26175 ++ account_reset_rq(rq);
26176 + break;
26177 +
26178 + case CPU_ONLINE:
26179 +@@ -7801,7 +7802,7 @@ void set_curr_task(int cpu, struct task_struct *p)
26180 + /* task_group_lock serializes the addition/removal of task groups */
26181 + static DEFINE_SPINLOCK(task_group_lock);
26182 +
26183 +-static void free_sched_group(struct task_group *tg)
26184 ++static void sched_free_group(struct task_group *tg)
26185 + {
26186 + free_fair_sched_group(tg);
26187 + free_rt_sched_group(tg);
26188 +@@ -7827,7 +7828,7 @@ struct task_group *sched_create_group(struct task_group *parent)
26189 + return tg;
26190 +
26191 + err:
26192 +- free_sched_group(tg);
26193 ++ sched_free_group(tg);
26194 + return ERR_PTR(-ENOMEM);
26195 + }
26196 +
26197 +@@ -7847,17 +7848,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
26198 + }
26199 +
26200 + /* rcu callback to free various structures associated with a task group */
26201 +-static void free_sched_group_rcu(struct rcu_head *rhp)
26202 ++static void sched_free_group_rcu(struct rcu_head *rhp)
26203 + {
26204 + /* now it should be safe to free those cfs_rqs */
26205 +- free_sched_group(container_of(rhp, struct task_group, rcu));
26206 ++ sched_free_group(container_of(rhp, struct task_group, rcu));
26207 + }
26208 +
26209 +-/* Destroy runqueue etc associated with a task group */
26210 + void sched_destroy_group(struct task_group *tg)
26211 + {
26212 + /* wait for possible concurrent references to cfs_rqs complete */
26213 +- call_rcu(&tg->rcu, free_sched_group_rcu);
26214 ++ call_rcu(&tg->rcu, sched_free_group_rcu);
26215 + }
26216 +
26217 + void sched_offline_group(struct task_group *tg)
26218 +@@ -8318,31 +8318,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
26219 + if (IS_ERR(tg))
26220 + return ERR_PTR(-ENOMEM);
26221 +
26222 ++ sched_online_group(tg, parent);
26223 ++
26224 + return &tg->css;
26225 + }
26226 +
26227 +-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
26228 ++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
26229 + {
26230 + struct task_group *tg = css_tg(css);
26231 +- struct task_group *parent = css_tg(css->parent);
26232 +
26233 +- if (parent)
26234 +- sched_online_group(tg, parent);
26235 +- return 0;
26236 ++ sched_offline_group(tg);
26237 + }
26238 +
26239 + static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
26240 + {
26241 + struct task_group *tg = css_tg(css);
26242 +
26243 +- sched_destroy_group(tg);
26244 +-}
26245 +-
26246 +-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
26247 +-{
26248 +- struct task_group *tg = css_tg(css);
26249 +-
26250 +- sched_offline_group(tg);
26251 ++ /*
26252 ++ * Relies on the RCU grace period between css_released() and this.
26253 ++ */
26254 ++ sched_free_group(tg);
26255 + }
26256 +
26257 + static void cpu_cgroup_fork(struct task_struct *task)
26258 +@@ -8702,9 +8697,8 @@ static struct cftype cpu_files[] = {
26259 +
26260 + struct cgroup_subsys cpu_cgrp_subsys = {
26261 + .css_alloc = cpu_cgroup_css_alloc,
26262 ++ .css_released = cpu_cgroup_css_released,
26263 + .css_free = cpu_cgroup_css_free,
26264 +- .css_online = cpu_cgroup_css_online,
26265 +- .css_offline = cpu_cgroup_css_offline,
26266 + .fork = cpu_cgroup_fork,
26267 + .can_attach = cpu_cgroup_can_attach,
26268 + .attach = cpu_cgroup_attach,
26269 +diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
26270 +index b2ab2ffb1adc..ab2b5fb9821d 100644
26271 +--- a/kernel/sched/cputime.c
26272 ++++ b/kernel/sched/cputime.c
26273 +@@ -262,21 +262,21 @@ static __always_inline bool steal_account_process_tick(void)
26274 + #ifdef CONFIG_PARAVIRT
26275 + if (static_key_false(&paravirt_steal_enabled)) {
26276 + u64 steal;
26277 +- cputime_t steal_ct;
26278 ++ unsigned long steal_jiffies;
26279 +
26280 + steal = paravirt_steal_clock(smp_processor_id());
26281 + steal -= this_rq()->prev_steal_time;
26282 +
26283 + /*
26284 +- * cputime_t may be less precise than nsecs (eg: if it's
26285 +- * based on jiffies). Lets cast the result to cputime
26286 ++ * steal is in nsecs but our caller is expecting steal
26287 ++ * time in jiffies. Lets cast the result to jiffies
26288 + * granularity and account the rest on the next rounds.
26289 + */
26290 +- steal_ct = nsecs_to_cputime(steal);
26291 +- this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
26292 ++ steal_jiffies = nsecs_to_jiffies(steal);
26293 ++ this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
26294 +
26295 +- account_steal_time(steal_ct);
26296 +- return steal_ct;
26297 ++ account_steal_time(jiffies_to_cputime(steal_jiffies));
26298 ++ return steal_jiffies;
26299 + }
26300 + #endif
26301 + return false;
26302 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
26303 +index 56b7d4b83947..adff850e5d42 100644
26304 +--- a/kernel/sched/fair.c
26305 ++++ b/kernel/sched/fair.c
26306 +@@ -4459,9 +4459,17 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
26307 +
26308 + /* scale is effectively 1 << i now, and >> i divides by scale */
26309 +
26310 +- old_load = this_rq->cpu_load[i] - tickless_load;
26311 ++ old_load = this_rq->cpu_load[i];
26312 + old_load = decay_load_missed(old_load, pending_updates - 1, i);
26313 +- old_load += tickless_load;
26314 ++ if (tickless_load) {
26315 ++ old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
26316 ++ /*
26317 ++ * old_load can never be a negative value because a
26318 ++ * decayed tickless_load cannot be greater than the
26319 ++ * original tickless_load.
26320 ++ */
26321 ++ old_load += tickless_load;
26322 ++ }
26323 + new_load = this_load;
26324 + /*
26325 + * Round up the averaging division if load is increasing. This
26326 +diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
26327 +index ef7159012cf3..b0b93fd33af9 100644
26328 +--- a/kernel/sched/loadavg.c
26329 ++++ b/kernel/sched/loadavg.c
26330 +@@ -99,10 +99,13 @@ long calc_load_fold_active(struct rq *this_rq)
26331 + static unsigned long
26332 + calc_load(unsigned long load, unsigned long exp, unsigned long active)
26333 + {
26334 +- load *= exp;
26335 +- load += active * (FIXED_1 - exp);
26336 +- load += 1UL << (FSHIFT - 1);
26337 +- return load >> FSHIFT;
26338 ++ unsigned long newload;
26339 ++
26340 ++ newload = load * exp + active * (FIXED_1 - exp);
26341 ++ if (active >= load)
26342 ++ newload += FIXED_1-1;
26343 ++
26344 ++ return newload / FIXED_1;
26345 + }
26346 +
26347 + #ifdef CONFIG_NO_HZ_COMMON
26348 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
26349 +index 10f16374df7f..ff87d887ff62 100644
26350 +--- a/kernel/sched/sched.h
26351 ++++ b/kernel/sched/sched.h
26352 +@@ -1738,3 +1738,16 @@ static inline u64 irq_time_read(int cpu)
26353 + }
26354 + #endif /* CONFIG_64BIT */
26355 + #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
26356 ++
26357 ++static inline void account_reset_rq(struct rq *rq)
26358 ++{
26359 ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
26360 ++ rq->prev_irq_time = 0;
26361 ++#endif
26362 ++#ifdef CONFIG_PARAVIRT
26363 ++ rq->prev_steal_time = 0;
26364 ++#endif
26365 ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
26366 ++ rq->prev_steal_time_rq = 0;
26367 ++#endif
26368 ++}
26369 +diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
26370 +index 7e7746a42a62..10a1d7dc9313 100644
26371 +--- a/kernel/sysctl_binary.c
26372 ++++ b/kernel/sysctl_binary.c
26373 +@@ -1321,7 +1321,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
26374 + }
26375 +
26376 + mnt = task_active_pid_ns(current)->proc_mnt;
26377 +- file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
26378 ++ file = file_open_root(mnt->mnt_root, mnt, pathname, flags, 0);
26379 + result = PTR_ERR(file);
26380 + if (IS_ERR(file))
26381 + goto out_putname;
26382 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
26383 +index 95181e36891a..9c143739b8d7 100644
26384 +--- a/kernel/trace/ring_buffer.c
26385 ++++ b/kernel/trace/ring_buffer.c
26386 +@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
26387 + raw_spinlock_t reader_lock; /* serialize readers */
26388 + arch_spinlock_t lock;
26389 + struct lock_class_key lock_key;
26390 +- unsigned int nr_pages;
26391 ++ unsigned long nr_pages;
26392 + unsigned int current_context;
26393 + struct list_head *pages;
26394 + struct buffer_page *head_page; /* read from head */
26395 +@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
26396 + u64 write_stamp;
26397 + u64 read_stamp;
26398 + /* ring buffer pages to update, > 0 to add, < 0 to remove */
26399 +- int nr_pages_to_update;
26400 ++ long nr_pages_to_update;
26401 + struct list_head new_pages; /* new pages to add */
26402 + struct work_struct update_pages_work;
26403 + struct completion update_done;
26404 +@@ -1128,10 +1128,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
26405 + return 0;
26406 + }
26407 +
26408 +-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
26409 ++static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
26410 + {
26411 +- int i;
26412 + struct buffer_page *bpage, *tmp;
26413 ++ long i;
26414 +
26415 + for (i = 0; i < nr_pages; i++) {
26416 + struct page *page;
26417 +@@ -1168,7 +1168,7 @@ free_pages:
26418 + }
26419 +
26420 + static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
26421 +- unsigned nr_pages)
26422 ++ unsigned long nr_pages)
26423 + {
26424 + LIST_HEAD(pages);
26425 +
26426 +@@ -1193,7 +1193,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
26427 + }
26428 +
26429 + static struct ring_buffer_per_cpu *
26430 +-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
26431 ++rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
26432 + {
26433 + struct ring_buffer_per_cpu *cpu_buffer;
26434 + struct buffer_page *bpage;
26435 +@@ -1293,8 +1293,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
26436 + struct lock_class_key *key)
26437 + {
26438 + struct ring_buffer *buffer;
26439 ++ long nr_pages;
26440 + int bsize;
26441 +- int cpu, nr_pages;
26442 ++ int cpu;
26443 +
26444 + /* keep it in its own cache line */
26445 + buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
26446 +@@ -1420,12 +1421,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
26447 + }
26448 +
26449 + static int
26450 +-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
26451 ++rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
26452 + {
26453 + struct list_head *tail_page, *to_remove, *next_page;
26454 + struct buffer_page *to_remove_page, *tmp_iter_page;
26455 + struct buffer_page *last_page, *first_page;
26456 +- unsigned int nr_removed;
26457 ++ unsigned long nr_removed;
26458 + unsigned long head_bit;
26459 + int page_entries;
26460 +
26461 +@@ -1642,7 +1643,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
26462 + int cpu_id)
26463 + {
26464 + struct ring_buffer_per_cpu *cpu_buffer;
26465 +- unsigned nr_pages;
26466 ++ unsigned long nr_pages;
26467 + int cpu, err = 0;
26468 +
26469 + /*
26470 +@@ -1656,14 +1657,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
26471 + !cpumask_test_cpu(cpu_id, buffer->cpumask))
26472 + return size;
26473 +
26474 +- size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
26475 +- size *= BUF_PAGE_SIZE;
26476 ++ nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
26477 +
26478 + /* we need a minimum of two pages */
26479 +- if (size < BUF_PAGE_SIZE * 2)
26480 +- size = BUF_PAGE_SIZE * 2;
26481 ++ if (nr_pages < 2)
26482 ++ nr_pages = 2;
26483 +
26484 +- nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
26485 ++ size = nr_pages * BUF_PAGE_SIZE;
26486 +
26487 + /*
26488 + * Don't succeed if resizing is disabled, as a reader might be
26489 +@@ -4640,8 +4640,9 @@ static int rb_cpu_notify(struct notifier_block *self,
26490 + struct ring_buffer *buffer =
26491 + container_of(self, struct ring_buffer, cpu_notify);
26492 + long cpu = (long)hcpu;
26493 +- int cpu_i, nr_pages_same;
26494 +- unsigned int nr_pages;
26495 ++ long nr_pages_same;
26496 ++ int cpu_i;
26497 ++ unsigned long nr_pages;
26498 +
26499 + switch (action) {
26500 + case CPU_UP_PREPARE:
26501 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
26502 +index d9293402ee68..8305cbb2d5a2 100644
26503 +--- a/kernel/trace/trace.c
26504 ++++ b/kernel/trace/trace.c
26505 +@@ -4949,7 +4949,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
26506 +
26507 + spd.nr_pages = i;
26508 +
26509 +- ret = splice_to_pipe(pipe, &spd);
26510 ++ if (i)
26511 ++ ret = splice_to_pipe(pipe, &spd);
26512 ++ else
26513 ++ ret = 0;
26514 + out:
26515 + splice_shrink_spd(&spd);
26516 + return ret;
26517 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
26518 +index 05ddc0820771..6f965864cc02 100644
26519 +--- a/kernel/trace/trace_events.c
26520 ++++ b/kernel/trace/trace_events.c
26521 +@@ -2095,8 +2095,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
26522 + trace_create_file("filter", 0644, file->dir, file,
26523 + &ftrace_event_filter_fops);
26524 +
26525 +- trace_create_file("trigger", 0644, file->dir, file,
26526 +- &event_trigger_fops);
26527 ++ /*
26528 ++ * Only event directories that can be enabled should have
26529 ++ * triggers.
26530 ++ */
26531 ++ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
26532 ++ trace_create_file("trigger", 0644, file->dir, file,
26533 ++ &event_trigger_fops);
26534 +
26535 + trace_create_file("format", 0444, file->dir, call,
26536 + &ftrace_event_format_fops);
26537 +diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
26538 +index e4e56589ec1d..be3222b7d72e 100644
26539 +--- a/kernel/trace/trace_irqsoff.c
26540 ++++ b/kernel/trace/trace_irqsoff.c
26541 +@@ -109,8 +109,12 @@ static int func_prolog_dec(struct trace_array *tr,
26542 + return 0;
26543 +
26544 + local_save_flags(*flags);
26545 +- /* slight chance to get a false positive on tracing_cpu */
26546 +- if (!irqs_disabled_flags(*flags))
26547 ++ /*
26548 ++ * Slight chance to get a false positive on tracing_cpu,
26549 ++ * although I'm starting to think there isn't a chance.
26550 ++ * Leave this for now just to be paranoid.
26551 ++ */
26552 ++ if (!irqs_disabled_flags(*flags) && !preempt_count())
26553 + return 0;
26554 +
26555 + *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
26556 +diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
26557 +index 060df67dbdd1..f96f0383f6c6 100644
26558 +--- a/kernel/trace/trace_printk.c
26559 ++++ b/kernel/trace/trace_printk.c
26560 +@@ -296,6 +296,9 @@ static int t_show(struct seq_file *m, void *v)
26561 + const char *str = *fmt;
26562 + int i;
26563 +
26564 ++ if (!*fmt)
26565 ++ return 0;
26566 ++
26567 + seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
26568 +
26569 + /*
26570 +diff --git a/kernel/watchdog.c b/kernel/watchdog.c
26571 +index b3ace6ebbba3..9acb29f280ec 100644
26572 +--- a/kernel/watchdog.c
26573 ++++ b/kernel/watchdog.c
26574 +@@ -923,6 +923,9 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
26575 + * both lockup detectors are disabled if proc_watchdog_update()
26576 + * returns an error.
26577 + */
26578 ++ if (old == new)
26579 ++ goto out;
26580 ++
26581 + err = proc_watchdog_update();
26582 + }
26583 + out:
26584 +@@ -967,7 +970,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
26585 + int proc_watchdog_thresh(struct ctl_table *table, int write,
26586 + void __user *buffer, size_t *lenp, loff_t *ppos)
26587 + {
26588 +- int err, old;
26589 ++ int err, old, new;
26590 +
26591 + get_online_cpus();
26592 + mutex_lock(&watchdog_proc_mutex);
26593 +@@ -987,6 +990,10 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
26594 + /*
26595 + * Update the sample period. Restore on failure.
26596 + */
26597 ++ new = ACCESS_ONCE(watchdog_thresh);
26598 ++ if (old == new)
26599 ++ goto out;
26600 ++
26601 + set_sample_period();
26602 + err = proc_watchdog_update();
26603 + if (err) {
26604 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
26605 +index 7ff5dc7d2ac5..f48c80e4ba75 100644
26606 +--- a/kernel/workqueue.c
26607 ++++ b/kernel/workqueue.c
26608 +@@ -667,6 +667,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
26609 + */
26610 + smp_wmb();
26611 + set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
26612 ++ /*
26613 ++ * The following mb guarantees that previous clear of a PENDING bit
26614 ++ * will not be reordered with any speculative LOADS or STORES from
26615 ++ * work->current_func, which is executed afterwards. This possible
26616 ++ * reordering can lead to a missed execution on attempt to qeueue
26617 ++ * the same @work. E.g. consider this case:
26618 ++ *
26619 ++ * CPU#0 CPU#1
26620 ++ * ---------------------------- --------------------------------
26621 ++ *
26622 ++ * 1 STORE event_indicated
26623 ++ * 2 queue_work_on() {
26624 ++ * 3 test_and_set_bit(PENDING)
26625 ++ * 4 } set_..._and_clear_pending() {
26626 ++ * 5 set_work_data() # clear bit
26627 ++ * 6 smp_mb()
26628 ++ * 7 work->current_func() {
26629 ++ * 8 LOAD event_indicated
26630 ++ * }
26631 ++ *
26632 ++ * Without an explicit full barrier speculative LOAD on line 8 can
26633 ++ * be executed before CPU#0 does STORE on line 1. If that happens,
26634 ++ * CPU#0 observes the PENDING bit is still set and new execution of
26635 ++ * a @work is not queued in a hope, that CPU#1 will eventually
26636 ++ * finish the queued @work. Meanwhile CPU#1 does not see
26637 ++ * event_indicated is set, because speculative LOAD was executed
26638 ++ * before actual STORE.
26639 ++ */
26640 ++ smp_mb();
26641 + }
26642 +
26643 + static void clear_work_data(struct work_struct *work)
26644 +@@ -4527,6 +4556,17 @@ static void rebind_workers(struct worker_pool *pool)
26645 + pool->attrs->cpumask) < 0);
26646 +
26647 + spin_lock_irq(&pool->lock);
26648 ++
26649 ++ /*
26650 ++ * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
26651 ++ * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
26652 ++ * being reworked and this can go away in time.
26653 ++ */
26654 ++ if (!(pool->flags & POOL_DISASSOCIATED)) {
26655 ++ spin_unlock_irq(&pool->lock);
26656 ++ return;
26657 ++ }
26658 ++
26659 + pool->flags &= ~POOL_DISASSOCIATED;
26660 +
26661 + for_each_pool_worker(worker, pool) {
26662 +diff --git a/lib/assoc_array.c b/lib/assoc_array.c
26663 +index 03dd576e6773..59fd7c0b119c 100644
26664 +--- a/lib/assoc_array.c
26665 ++++ b/lib/assoc_array.c
26666 +@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
26667 + free_slot = i;
26668 + continue;
26669 + }
26670 +- if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
26671 ++ if (assoc_array_ptr_is_leaf(ptr) &&
26672 ++ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
26673 ++ index_key)) {
26674 + pr_devel("replace in slot %d\n", i);
26675 + edit->leaf_p = &node->slots[i];
26676 + edit->dead_leaf = node->slots[i];
26677 +diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
26678 +index abcecdc2d0f2..0710a62ad2f6 100644
26679 +--- a/lib/lz4/lz4defs.h
26680 ++++ b/lib/lz4/lz4defs.h
26681 +@@ -11,8 +11,7 @@
26682 + /*
26683 + * Detects 64 bits mode
26684 + */
26685 +-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
26686 +- || defined(__ppc64__) || defined(__LP64__))
26687 ++#if defined(CONFIG_64BIT)
26688 + #define LZ4_ARCH64 1
26689 + #else
26690 + #define LZ4_ARCH64 0
26691 +@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
26692 +
26693 + #define PUT4(s, d) (A32(d) = A32(s))
26694 + #define PUT8(s, d) (A64(d) = A64(s))
26695 ++
26696 ++#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
26697 ++ (d = s - A16(p))
26698 ++
26699 + #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
26700 + do { \
26701 + A16(p) = v; \
26702 +@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
26703 + #define PUT8(s, d) \
26704 + put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
26705 +
26706 +-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
26707 +- do { \
26708 +- put_unaligned(v, (u16 *)(p)); \
26709 +- p += 2; \
26710 ++#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
26711 ++ (d = s - get_unaligned_le16(p))
26712 ++
26713 ++#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
26714 ++ do { \
26715 ++ put_unaligned_le16(v, (u16 *)(p)); \
26716 ++ p += 2; \
26717 + } while (0)
26718 + #endif
26719 +
26720 +@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
26721 +
26722 + #endif
26723 +
26724 +-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
26725 +- (d = s - get_unaligned_le16(p))
26726 +-
26727 + #define LZ4_WILDCOPY(s, d, e) \
26728 + do { \
26729 + LZ4_COPYPACKET(s, d); \
26730 +diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
26731 +index ec533a6c77b5..eb15e7dc7b65 100644
26732 +--- a/lib/mpi/mpicoder.c
26733 ++++ b/lib/mpi/mpicoder.c
26734 +@@ -128,6 +128,23 @@ leave:
26735 + }
26736 + EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
26737 +
26738 ++static int count_lzeros(MPI a)
26739 ++{
26740 ++ mpi_limb_t alimb;
26741 ++ int i, lzeros = 0;
26742 ++
26743 ++ for (i = a->nlimbs - 1; i >= 0; i--) {
26744 ++ alimb = a->d[i];
26745 ++ if (alimb == 0) {
26746 ++ lzeros += sizeof(mpi_limb_t);
26747 ++ } else {
26748 ++ lzeros += count_leading_zeros(alimb) / 8;
26749 ++ break;
26750 ++ }
26751 ++ }
26752 ++ return lzeros;
26753 ++}
26754 ++
26755 + /**
26756 + * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
26757 + *
26758 +@@ -148,7 +165,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
26759 + uint8_t *p;
26760 + mpi_limb_t alimb;
26761 + unsigned int n = mpi_get_size(a);
26762 +- int i, lzeros = 0;
26763 ++ int i, lzeros;
26764 +
26765 + if (!buf || !nbytes)
26766 + return -EINVAL;
26767 +@@ -156,14 +173,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
26768 + if (sign)
26769 + *sign = a->sign;
26770 +
26771 +- p = (void *)&a->d[a->nlimbs] - 1;
26772 +-
26773 +- for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
26774 +- if (!*p)
26775 +- lzeros++;
26776 +- else
26777 +- break;
26778 +- }
26779 ++ lzeros = count_lzeros(a);
26780 +
26781 + if (buf_len < n - lzeros) {
26782 + *nbytes = n - lzeros;
26783 +@@ -351,7 +361,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
26784 + u8 *p, *p2;
26785 + mpi_limb_t alimb, alimb2;
26786 + unsigned int n = mpi_get_size(a);
26787 +- int i, x, y = 0, lzeros = 0, buf_len;
26788 ++ int i, x, y = 0, lzeros, buf_len;
26789 +
26790 + if (!nbytes)
26791 + return -EINVAL;
26792 +@@ -359,14 +369,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
26793 + if (sign)
26794 + *sign = a->sign;
26795 +
26796 +- p = (void *)&a->d[a->nlimbs] - 1;
26797 +-
26798 +- for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
26799 +- if (!*p)
26800 +- lzeros++;
26801 +- else
26802 +- break;
26803 +- }
26804 ++ lzeros = count_lzeros(a);
26805 +
26806 + if (*nbytes < n - lzeros) {
26807 + *nbytes = n - lzeros;
26808 +diff --git a/mm/compaction.c b/mm/compaction.c
26809 +index 585de54dbe8c..29fb26970fba 100644
26810 +--- a/mm/compaction.c
26811 ++++ b/mm/compaction.c
26812 +@@ -880,16 +880,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
26813 + pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
26814 + ISOLATE_UNEVICTABLE);
26815 +
26816 +- /*
26817 +- * In case of fatal failure, release everything that might
26818 +- * have been isolated in the previous iteration, and signal
26819 +- * the failure back to caller.
26820 +- */
26821 +- if (!pfn) {
26822 +- putback_movable_pages(&cc->migratepages);
26823 +- cc->nr_migratepages = 0;
26824 ++ if (!pfn)
26825 + break;
26826 +- }
26827 +
26828 + if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
26829 + break;
26830 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
26831 +index e10a4fee88d2..a82a87b3f9c6 100644
26832 +--- a/mm/huge_memory.c
26833 ++++ b/mm/huge_memory.c
26834 +@@ -1257,15 +1257,9 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
26835 + VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
26836 + /*
26837 + * We can only reuse the page if nobody else maps the huge page or it's
26838 +- * part. We can do it by checking page_mapcount() on each sub-page, but
26839 +- * it's expensive.
26840 +- * The cheaper way is to check page_count() to be equal 1: every
26841 +- * mapcount takes page reference reference, so this way we can
26842 +- * guarantee, that the PMD is the only mapping.
26843 +- * This can give false negative if somebody pinned the page, but that's
26844 +- * fine.
26845 ++ * part.
26846 + */
26847 +- if (page_mapcount(page) == 1 && page_count(page) == 1) {
26848 ++ if (page_trans_huge_mapcount(page, NULL) == 1) {
26849 + pmd_t entry;
26850 + entry = pmd_mkyoung(orig_pmd);
26851 + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
26852 +@@ -1919,10 +1913,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
26853 + * page fault if needed.
26854 + */
26855 + return 0;
26856 +- if (vma->vm_ops)
26857 ++ if (vma->vm_ops || (vm_flags & VM_NO_THP))
26858 + /* khugepaged not yet working on file or special mappings */
26859 + return 0;
26860 +- VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
26861 + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
26862 + hend = vma->vm_end & HPAGE_PMD_MASK;
26863 + if (hstart < hend)
26864 +@@ -2039,7 +2032,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
26865 + if (pte_write(pteval)) {
26866 + writable = true;
26867 + } else {
26868 +- if (PageSwapCache(page) && !reuse_swap_page(page)) {
26869 ++ if (PageSwapCache(page) &&
26870 ++ !reuse_swap_page(page, NULL)) {
26871 + unlock_page(page);
26872 + result = SCAN_SWAP_CACHE_PAGE;
26873 + goto out;
26874 +@@ -2310,8 +2304,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
26875 + return false;
26876 + if (is_vma_temporary_stack(vma))
26877 + return false;
26878 +- VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
26879 +- return true;
26880 ++ return !(vma->vm_flags & VM_NO_THP);
26881 + }
26882 +
26883 + static void collapse_huge_page(struct mm_struct *mm,
26884 +@@ -3340,6 +3333,64 @@ int total_mapcount(struct page *page)
26885 + }
26886 +
26887 + /*
26888 ++ * This calculates accurately how many mappings a transparent hugepage
26889 ++ * has (unlike page_mapcount() which isn't fully accurate). This full
26890 ++ * accuracy is primarily needed to know if copy-on-write faults can
26891 ++ * reuse the page and change the mapping to read-write instead of
26892 ++ * copying them. At the same time this returns the total_mapcount too.
26893 ++ *
26894 ++ * The function returns the highest mapcount any one of the subpages
26895 ++ * has. If the return value is one, even if different processes are
26896 ++ * mapping different subpages of the transparent hugepage, they can
26897 ++ * all reuse it, because each process is reusing a different subpage.
26898 ++ *
26899 ++ * The total_mapcount is instead counting all virtual mappings of the
26900 ++ * subpages. If the total_mapcount is equal to "one", it tells the
26901 ++ * caller all mappings belong to the same "mm" and in turn the
26902 ++ * anon_vma of the transparent hugepage can become the vma->anon_vma
26903 ++ * local one as no other process may be mapping any of the subpages.
26904 ++ *
26905 ++ * It would be more accurate to replace page_mapcount() with
26906 ++ * page_trans_huge_mapcount(), however we only use
26907 ++ * page_trans_huge_mapcount() in the copy-on-write faults where we
26908 ++ * need full accuracy to avoid breaking page pinning, because
26909 ++ * page_trans_huge_mapcount() is slower than page_mapcount().
26910 ++ */
26911 ++int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
26912 ++{
26913 ++ int i, ret, _total_mapcount, mapcount;
26914 ++
26915 ++ /* hugetlbfs shouldn't call it */
26916 ++ VM_BUG_ON_PAGE(PageHuge(page), page);
26917 ++
26918 ++ if (likely(!PageTransCompound(page))) {
26919 ++ mapcount = atomic_read(&page->_mapcount) + 1;
26920 ++ if (total_mapcount)
26921 ++ *total_mapcount = mapcount;
26922 ++ return mapcount;
26923 ++ }
26924 ++
26925 ++ page = compound_head(page);
26926 ++
26927 ++ _total_mapcount = ret = 0;
26928 ++ for (i = 0; i < HPAGE_PMD_NR; i++) {
26929 ++ mapcount = atomic_read(&page[i]._mapcount) + 1;
26930 ++ ret = max(ret, mapcount);
26931 ++ _total_mapcount += mapcount;
26932 ++ }
26933 ++ if (PageDoubleMap(page)) {
26934 ++ ret -= 1;
26935 ++ _total_mapcount -= HPAGE_PMD_NR;
26936 ++ }
26937 ++ mapcount = compound_mapcount(page);
26938 ++ ret += mapcount;
26939 ++ _total_mapcount += mapcount;
26940 ++ if (total_mapcount)
26941 ++ *total_mapcount = _total_mapcount;
26942 ++ return ret;
26943 ++}
26944 ++
26945 ++/*
26946 + * This function splits huge page into normal pages. @page can point to any
26947 + * subpage of huge page to split. Split doesn't change the position of @page.
26948 + *
26949 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
26950 +index d06cae2de783..a65ad1d59232 100644
26951 +--- a/mm/memcontrol.c
26952 ++++ b/mm/memcontrol.c
26953 +@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
26954 + /* "mc" and its members are protected by cgroup_mutex */
26955 + static struct move_charge_struct {
26956 + spinlock_t lock; /* for from, to */
26957 ++ struct mm_struct *mm;
26958 + struct mem_cgroup *from;
26959 + struct mem_cgroup *to;
26960 + unsigned long flags;
26961 +@@ -1262,7 +1263,7 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
26962 + return limit;
26963 + }
26964 +
26965 +-static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
26966 ++static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
26967 + int order)
26968 + {
26969 + struct oom_control oc = {
26970 +@@ -1340,6 +1341,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
26971 + }
26972 + unlock:
26973 + mutex_unlock(&oom_lock);
26974 ++ return chosen;
26975 + }
26976 +
26977 + #if MAX_NUMNODES > 1
26978 +@@ -4729,6 +4731,8 @@ static void __mem_cgroup_clear_mc(void)
26979 +
26980 + static void mem_cgroup_clear_mc(void)
26981 + {
26982 ++ struct mm_struct *mm = mc.mm;
26983 ++
26984 + /*
26985 + * we must clear moving_task before waking up waiters at the end of
26986 + * task migration.
26987 +@@ -4738,7 +4742,10 @@ static void mem_cgroup_clear_mc(void)
26988 + spin_lock(&mc.lock);
26989 + mc.from = NULL;
26990 + mc.to = NULL;
26991 ++ mc.mm = NULL;
26992 + spin_unlock(&mc.lock);
26993 ++
26994 ++ mmput(mm);
26995 + }
26996 +
26997 + static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
26998 +@@ -4795,6 +4802,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
26999 + VM_BUG_ON(mc.moved_swap);
27000 +
27001 + spin_lock(&mc.lock);
27002 ++ mc.mm = mm;
27003 + mc.from = from;
27004 + mc.to = memcg;
27005 + mc.flags = move_flags;
27006 +@@ -4804,8 +4812,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
27007 + ret = mem_cgroup_precharge_mc(mm);
27008 + if (ret)
27009 + mem_cgroup_clear_mc();
27010 ++ } else {
27011 ++ mmput(mm);
27012 + }
27013 +- mmput(mm);
27014 + return ret;
27015 + }
27016 +
27017 +@@ -4914,11 +4923,11 @@ put: /* get_mctgt_type() gets the page */
27018 + return ret;
27019 + }
27020 +
27021 +-static void mem_cgroup_move_charge(struct mm_struct *mm)
27022 ++static void mem_cgroup_move_charge(void)
27023 + {
27024 + struct mm_walk mem_cgroup_move_charge_walk = {
27025 + .pmd_entry = mem_cgroup_move_charge_pte_range,
27026 +- .mm = mm,
27027 ++ .mm = mc.mm,
27028 + };
27029 +
27030 + lru_add_drain_all();
27031 +@@ -4930,7 +4939,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
27032 + atomic_inc(&mc.from->moving_account);
27033 + synchronize_rcu();
27034 + retry:
27035 +- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
27036 ++ if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
27037 + /*
27038 + * Someone who are holding the mmap_sem might be waiting in
27039 + * waitq. So we cancel all extra charges, wake up all waiters,
27040 +@@ -4947,23 +4956,16 @@ retry:
27041 + * additional charge, the page walk just aborts.
27042 + */
27043 + walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
27044 +- up_read(&mm->mmap_sem);
27045 ++ up_read(&mc.mm->mmap_sem);
27046 + atomic_dec(&mc.from->moving_account);
27047 + }
27048 +
27049 +-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
27050 ++static void mem_cgroup_move_task(void)
27051 + {
27052 +- struct cgroup_subsys_state *css;
27053 +- struct task_struct *p = cgroup_taskset_first(tset, &css);
27054 +- struct mm_struct *mm = get_task_mm(p);
27055 +-
27056 +- if (mm) {
27057 +- if (mc.to)
27058 +- mem_cgroup_move_charge(mm);
27059 +- mmput(mm);
27060 +- }
27061 +- if (mc.to)
27062 ++ if (mc.to) {
27063 ++ mem_cgroup_move_charge();
27064 + mem_cgroup_clear_mc();
27065 ++ }
27066 + }
27067 + #else /* !CONFIG_MMU */
27068 + static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
27069 +@@ -4973,7 +4975,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
27070 + static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
27071 + {
27072 + }
27073 +-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
27074 ++static void mem_cgroup_move_task(void)
27075 + {
27076 + }
27077 + #endif
27078 +@@ -5051,6 +5053,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
27079 + char *buf, size_t nbytes, loff_t off)
27080 + {
27081 + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
27082 ++ unsigned long nr_pages;
27083 + unsigned long high;
27084 + int err;
27085 +
27086 +@@ -5061,6 +5064,11 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
27087 +
27088 + memcg->high = high;
27089 +
27090 ++ nr_pages = page_counter_read(&memcg->memory);
27091 ++ if (nr_pages > high)
27092 ++ try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
27093 ++ GFP_KERNEL, true);
27094 ++
27095 + memcg_wb_domain_size_changed(memcg);
27096 + return nbytes;
27097 + }
27098 +@@ -5082,6 +5090,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
27099 + char *buf, size_t nbytes, loff_t off)
27100 + {
27101 + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
27102 ++ unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
27103 ++ bool drained = false;
27104 + unsigned long max;
27105 + int err;
27106 +
27107 +@@ -5090,9 +5100,36 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
27108 + if (err)
27109 + return err;
27110 +
27111 +- err = mem_cgroup_resize_limit(memcg, max);
27112 +- if (err)
27113 +- return err;
27114 ++ xchg(&memcg->memory.limit, max);
27115 ++
27116 ++ for (;;) {
27117 ++ unsigned long nr_pages = page_counter_read(&memcg->memory);
27118 ++
27119 ++ if (nr_pages <= max)
27120 ++ break;
27121 ++
27122 ++ if (signal_pending(current)) {
27123 ++ err = -EINTR;
27124 ++ break;
27125 ++ }
27126 ++
27127 ++ if (!drained) {
27128 ++ drain_all_stock(memcg);
27129 ++ drained = true;
27130 ++ continue;
27131 ++ }
27132 ++
27133 ++ if (nr_reclaims) {
27134 ++ if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
27135 ++ GFP_KERNEL, true))
27136 ++ nr_reclaims--;
27137 ++ continue;
27138 ++ }
27139 ++
27140 ++ mem_cgroup_events(memcg, MEMCG_OOM, 1);
27141 ++ if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
27142 ++ break;
27143 ++ }
27144 +
27145 + memcg_wb_domain_size_changed(memcg);
27146 + return nbytes;
27147 +@@ -5210,7 +5247,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
27148 + .css_reset = mem_cgroup_css_reset,
27149 + .can_attach = mem_cgroup_can_attach,
27150 + .cancel_attach = mem_cgroup_cancel_attach,
27151 +- .attach = mem_cgroup_move_task,
27152 ++ .post_attach = mem_cgroup_move_task,
27153 + .bind = mem_cgroup_bind,
27154 + .dfl_cftypes = memory_files,
27155 + .legacy_cftypes = mem_cgroup_legacy_files,
27156 +diff --git a/mm/memory.c b/mm/memory.c
27157 +index 8132787ae4d5..c1aa0e4b4096 100644
27158 +--- a/mm/memory.c
27159 ++++ b/mm/memory.c
27160 +@@ -792,6 +792,46 @@ out:
27161 + return pfn_to_page(pfn);
27162 + }
27163 +
27164 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
27165 ++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
27166 ++ pmd_t pmd)
27167 ++{
27168 ++ unsigned long pfn = pmd_pfn(pmd);
27169 ++
27170 ++ /*
27171 ++ * There is no pmd_special() but there may be special pmds, e.g.
27172 ++ * in a direct-access (dax) mapping, so let's just replicate the
27173 ++ * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
27174 ++ */
27175 ++ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
27176 ++ if (vma->vm_flags & VM_MIXEDMAP) {
27177 ++ if (!pfn_valid(pfn))
27178 ++ return NULL;
27179 ++ goto out;
27180 ++ } else {
27181 ++ unsigned long off;
27182 ++ off = (addr - vma->vm_start) >> PAGE_SHIFT;
27183 ++ if (pfn == vma->vm_pgoff + off)
27184 ++ return NULL;
27185 ++ if (!is_cow_mapping(vma->vm_flags))
27186 ++ return NULL;
27187 ++ }
27188 ++ }
27189 ++
27190 ++ if (is_zero_pfn(pfn))
27191 ++ return NULL;
27192 ++ if (unlikely(pfn > highest_memmap_pfn))
27193 ++ return NULL;
27194 ++
27195 ++ /*
27196 ++ * NOTE! We still have PageReserved() pages in the page tables.
27197 ++ * eg. VDSO mappings can cause them to exist.
27198 ++ */
27199 ++out:
27200 ++ return pfn_to_page(pfn);
27201 ++}
27202 ++#endif
27203 ++
27204 + /*
27205 + * copy one vm_area from one task to the other. Assumes the page tables
27206 + * already present in the new task to be cleared in the whole range
27207 +@@ -2317,6 +2357,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
27208 + * not dirty accountable.
27209 + */
27210 + if (PageAnon(old_page) && !PageKsm(old_page)) {
27211 ++ int total_mapcount;
27212 + if (!trylock_page(old_page)) {
27213 + page_cache_get(old_page);
27214 + pte_unmap_unlock(page_table, ptl);
27215 +@@ -2331,13 +2372,18 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
27216 + }
27217 + page_cache_release(old_page);
27218 + }
27219 +- if (reuse_swap_page(old_page)) {
27220 +- /*
27221 +- * The page is all ours. Move it to our anon_vma so
27222 +- * the rmap code will not search our parent or siblings.
27223 +- * Protected against the rmap code by the page lock.
27224 +- */
27225 +- page_move_anon_rmap(old_page, vma, address);
27226 ++ if (reuse_swap_page(old_page, &total_mapcount)) {
27227 ++ if (total_mapcount == 1) {
27228 ++ /*
27229 ++ * The page is all ours. Move it to
27230 ++ * our anon_vma so the rmap code will
27231 ++ * not search our parent or siblings.
27232 ++ * Protected against the rmap code by
27233 ++ * the page lock.
27234 ++ */
27235 ++ page_move_anon_rmap(compound_head(old_page),
27236 ++ vma, address);
27237 ++ }
27238 + unlock_page(old_page);
27239 + return wp_page_reuse(mm, vma, address, page_table, ptl,
27240 + orig_pte, old_page, 0, 0);
27241 +@@ -2562,7 +2608,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
27242 + inc_mm_counter_fast(mm, MM_ANONPAGES);
27243 + dec_mm_counter_fast(mm, MM_SWAPENTS);
27244 + pte = mk_pte(page, vma->vm_page_prot);
27245 +- if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
27246 ++ if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
27247 + pte = maybe_mkwrite(pte_mkdirty(pte), vma);
27248 + flags &= ~FAULT_FLAG_WRITE;
27249 + ret |= VM_FAULT_WRITE;
27250 +diff --git a/mm/migrate.c b/mm/migrate.c
27251 +index 3ad0fea5c438..625741faa068 100644
27252 +--- a/mm/migrate.c
27253 ++++ b/mm/migrate.c
27254 +@@ -967,7 +967,13 @@ out:
27255 + dec_zone_page_state(page, NR_ISOLATED_ANON +
27256 + page_is_file_cache(page));
27257 + /* Soft-offlined page shouldn't go through lru cache list */
27258 +- if (reason == MR_MEMORY_FAILURE) {
27259 ++ if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
27260 ++ /*
27261 ++ * With this release, we free successfully migrated
27262 ++ * page and set PG_HWPoison on just freed page
27263 ++ * intentionally. Although it's rather weird, it's how
27264 ++ * HWPoison flag works at the moment.
27265 ++ */
27266 + put_page(page);
27267 + if (!test_set_page_hwpoison(page))
27268 + num_poisoned_pages_inc();
27269 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
27270 +index 6fe7d15bd1f7..62bbf350ddf7 100644
27271 +--- a/mm/page-writeback.c
27272 ++++ b/mm/page-writeback.c
27273 +@@ -1909,7 +1909,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
27274 + if (gdtc->dirty > gdtc->bg_thresh)
27275 + return true;
27276 +
27277 +- if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
27278 ++ if (wb_stat(wb, WB_RECLAIMABLE) >
27279 ++ wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
27280 + return true;
27281 +
27282 + if (mdtc) {
27283 +@@ -1923,7 +1924,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
27284 + if (mdtc->dirty > mdtc->bg_thresh)
27285 + return true;
27286 +
27287 +- if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
27288 ++ if (wb_stat(wb, WB_RECLAIMABLE) >
27289 ++ wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
27290 + return true;
27291 + }
27292 +
27293 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
27294 +index 838ca8bb64f7..629ce645cffd 100644
27295 +--- a/mm/page_alloc.c
27296 ++++ b/mm/page_alloc.c
27297 +@@ -660,34 +660,28 @@ static inline void __free_one_page(struct page *page,
27298 + unsigned long combined_idx;
27299 + unsigned long uninitialized_var(buddy_idx);
27300 + struct page *buddy;
27301 +- unsigned int max_order = MAX_ORDER;
27302 ++ unsigned int max_order;
27303 ++
27304 ++ max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
27305 +
27306 + VM_BUG_ON(!zone_is_initialized(zone));
27307 + VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
27308 +
27309 + VM_BUG_ON(migratetype == -1);
27310 +- if (is_migrate_isolate(migratetype)) {
27311 +- /*
27312 +- * We restrict max order of merging to prevent merge
27313 +- * between freepages on isolate pageblock and normal
27314 +- * pageblock. Without this, pageblock isolation
27315 +- * could cause incorrect freepage accounting.
27316 +- */
27317 +- max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
27318 +- } else {
27319 ++ if (likely(!is_migrate_isolate(migratetype)))
27320 + __mod_zone_freepage_state(zone, 1 << order, migratetype);
27321 +- }
27322 +
27323 +- page_idx = pfn & ((1 << max_order) - 1);
27324 ++ page_idx = pfn & ((1 << MAX_ORDER) - 1);
27325 +
27326 + VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
27327 + VM_BUG_ON_PAGE(bad_range(zone, page), page);
27328 +
27329 ++continue_merging:
27330 + while (order < max_order - 1) {
27331 + buddy_idx = __find_buddy_index(page_idx, order);
27332 + buddy = page + (buddy_idx - page_idx);
27333 + if (!page_is_buddy(page, buddy, order))
27334 +- break;
27335 ++ goto done_merging;
27336 + /*
27337 + * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
27338 + * merge with it and move up one order.
27339 +@@ -704,6 +698,32 @@ static inline void __free_one_page(struct page *page,
27340 + page_idx = combined_idx;
27341 + order++;
27342 + }
27343 ++ if (max_order < MAX_ORDER) {
27344 ++ /* If we are here, it means order is >= pageblock_order.
27345 ++ * We want to prevent merge between freepages on isolate
27346 ++ * pageblock and normal pageblock. Without this, pageblock
27347 ++ * isolation could cause incorrect freepage or CMA accounting.
27348 ++ *
27349 ++ * We don't want to hit this code for the more frequent
27350 ++ * low-order merging.
27351 ++ */
27352 ++ if (unlikely(has_isolate_pageblock(zone))) {
27353 ++ int buddy_mt;
27354 ++
27355 ++ buddy_idx = __find_buddy_index(page_idx, order);
27356 ++ buddy = page + (buddy_idx - page_idx);
27357 ++ buddy_mt = get_pageblock_migratetype(buddy);
27358 ++
27359 ++ if (migratetype != buddy_mt
27360 ++ && (is_migrate_isolate(migratetype) ||
27361 ++ is_migrate_isolate(buddy_mt)))
27362 ++ goto done_merging;
27363 ++ }
27364 ++ max_order++;
27365 ++ goto continue_merging;
27366 ++ }
27367 ++
27368 ++done_merging:
27369 + set_page_order(page, order);
27370 +
27371 + /*
27372 +@@ -6194,7 +6214,7 @@ int __meminit init_per_zone_wmark_min(void)
27373 + setup_per_zone_inactive_ratio();
27374 + return 0;
27375 + }
27376 +-module_init(init_per_zone_wmark_min)
27377 ++core_initcall(init_per_zone_wmark_min)
27378 +
27379 + /*
27380 + * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
27381 +diff --git a/mm/page_isolation.c b/mm/page_isolation.c
27382 +index 92c4c36501e7..31555b689eb9 100644
27383 +--- a/mm/page_isolation.c
27384 ++++ b/mm/page_isolation.c
27385 +@@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
27386 + * now as a simple work-around, we use the next node for destination.
27387 + */
27388 + if (PageHuge(page)) {
27389 +- nodemask_t src = nodemask_of_node(page_to_nid(page));
27390 +- nodemask_t dst;
27391 +- nodes_complement(dst, src);
27392 ++ int node = next_online_node(page_to_nid(page));
27393 ++ if (node == MAX_NUMNODES)
27394 ++ node = first_online_node;
27395 + return alloc_huge_page_node(page_hstate(compound_head(page)),
27396 +- next_node(page_to_nid(page), dst));
27397 ++ node);
27398 + }
27399 +
27400 + if (PageHighMem(page))
27401 +diff --git a/mm/slub.c b/mm/slub.c
27402 +index d8fbd4a6ed59..2a722e141958 100644
27403 +--- a/mm/slub.c
27404 ++++ b/mm/slub.c
27405 +@@ -2815,6 +2815,7 @@ struct detached_freelist {
27406 + void *tail;
27407 + void *freelist;
27408 + int cnt;
27409 ++ struct kmem_cache *s;
27410 + };
27411 +
27412 + /*
27413 +@@ -2829,8 +2830,9 @@ struct detached_freelist {
27414 + * synchronization primitive. Look ahead in the array is limited due
27415 + * to performance reasons.
27416 + */
27417 +-static int build_detached_freelist(struct kmem_cache *s, size_t size,
27418 +- void **p, struct detached_freelist *df)
27419 ++static inline
27420 ++int build_detached_freelist(struct kmem_cache *s, size_t size,
27421 ++ void **p, struct detached_freelist *df)
27422 + {
27423 + size_t first_skipped_index = 0;
27424 + int lookahead = 3;
27425 +@@ -2846,8 +2848,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
27426 + if (!object)
27427 + return 0;
27428 +
27429 ++ /* Support for memcg, compiler can optimize this out */
27430 ++ df->s = cache_from_obj(s, object);
27431 ++
27432 + /* Start new detached freelist */
27433 +- set_freepointer(s, object, NULL);
27434 ++ set_freepointer(df->s, object, NULL);
27435 + df->page = virt_to_head_page(object);
27436 + df->tail = object;
27437 + df->freelist = object;
27438 +@@ -2862,7 +2867,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
27439 + /* df->page is always set at this point */
27440 + if (df->page == virt_to_head_page(object)) {
27441 + /* Opportunity build freelist */
27442 +- set_freepointer(s, object, df->freelist);
27443 ++ set_freepointer(df->s, object, df->freelist);
27444 + df->freelist = object;
27445 + df->cnt++;
27446 + p[size] = NULL; /* mark object processed */
27447 +@@ -2881,25 +2886,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
27448 + return first_skipped_index;
27449 + }
27450 +
27451 +-
27452 + /* Note that interrupts must be enabled when calling this function. */
27453 +-void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
27454 ++void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
27455 + {
27456 + if (WARN_ON(!size))
27457 + return;
27458 +
27459 + do {
27460 + struct detached_freelist df;
27461 +- struct kmem_cache *s;
27462 +-
27463 +- /* Support for memcg */
27464 +- s = cache_from_obj(orig_s, p[size - 1]);
27465 +
27466 + size = build_detached_freelist(s, size, p, &df);
27467 + if (unlikely(!df.page))
27468 + continue;
27469 +
27470 +- slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
27471 ++ slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
27472 + } while (likely(size));
27473 + }
27474 + EXPORT_SYMBOL(kmem_cache_free_bulk);
27475 +diff --git a/mm/swapfile.c b/mm/swapfile.c
27476 +index d2c37365e2d6..954fd8f72b79 100644
27477 +--- a/mm/swapfile.c
27478 ++++ b/mm/swapfile.c
27479 +@@ -916,18 +916,19 @@ out:
27480 + * to it. And as a side-effect, free up its swap: because the old content
27481 + * on disk will never be read, and seeking back there to write new content
27482 + * later would only waste time away from clustering.
27483 ++ *
27484 ++ * NOTE: total_mapcount should not be relied upon by the caller if
27485 ++ * reuse_swap_page() returns false, but it may be always overwritten
27486 ++ * (see the other implementation for CONFIG_SWAP=n).
27487 + */
27488 +-int reuse_swap_page(struct page *page)
27489 ++bool reuse_swap_page(struct page *page, int *total_mapcount)
27490 + {
27491 + int count;
27492 +
27493 + VM_BUG_ON_PAGE(!PageLocked(page), page);
27494 + if (unlikely(PageKsm(page)))
27495 +- return 0;
27496 +- /* The page is part of THP and cannot be reused */
27497 +- if (PageTransCompound(page))
27498 +- return 0;
27499 +- count = page_mapcount(page);
27500 ++ return false;
27501 ++ count = page_trans_huge_mapcount(page, total_mapcount);
27502 + if (count <= 1 && PageSwapCache(page)) {
27503 + count += page_swapcount(page);
27504 + if (count == 1 && !PageWriteback(page)) {
27505 +diff --git a/mm/vmscan.c b/mm/vmscan.c
27506 +index 71b1c29948db..c712b016e0ab 100644
27507 +--- a/mm/vmscan.c
27508 ++++ b/mm/vmscan.c
27509 +@@ -2561,7 +2561,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
27510 + sc->gfp_mask |= __GFP_HIGHMEM;
27511 +
27512 + for_each_zone_zonelist_nodemask(zone, z, zonelist,
27513 +- requested_highidx, sc->nodemask) {
27514 ++ gfp_zone(sc->gfp_mask), sc->nodemask) {
27515 + enum zone_type classzone_idx;
27516 +
27517 + if (!populated_zone(zone))
27518 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
27519 +index 2d7c4c11fc63..336149ffd07d 100644
27520 +--- a/mm/zsmalloc.c
27521 ++++ b/mm/zsmalloc.c
27522 +@@ -1732,10 +1732,13 @@ static struct page *isolate_source_page(struct size_class *class)
27523 + static unsigned long zs_can_compact(struct size_class *class)
27524 + {
27525 + unsigned long obj_wasted;
27526 ++ unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
27527 ++ unsigned long obj_used = zs_stat_get(class, OBJ_USED);
27528 +
27529 +- obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
27530 +- zs_stat_get(class, OBJ_USED);
27531 ++ if (obj_allocated <= obj_used)
27532 ++ return 0;
27533 +
27534 ++ obj_wasted = obj_allocated - obj_used;
27535 + obj_wasted /= get_maxobj_per_zspage(class->size,
27536 + class->pages_per_zspage);
27537 +
27538 +diff --git a/mm/zswap.c b/mm/zswap.c
27539 +index bf14508afd64..340261946fda 100644
27540 +--- a/mm/zswap.c
27541 ++++ b/mm/zswap.c
27542 +@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
27543 + static LIST_HEAD(zswap_pools);
27544 + /* protects zswap_pools list modification */
27545 + static DEFINE_SPINLOCK(zswap_pools_lock);
27546 ++/* pool counter to provide unique names to zpool */
27547 ++static atomic_t zswap_pools_count = ATOMIC_INIT(0);
27548 +
27549 + /* used by param callback function */
27550 + static bool zswap_init_started;
27551 +@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
27552 + static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
27553 + {
27554 + struct zswap_pool *pool;
27555 ++ char name[38]; /* 'zswap' + 32 char (max) num + \0 */
27556 + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
27557 +
27558 + pool = kzalloc(sizeof(*pool), GFP_KERNEL);
27559 +@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
27560 + return NULL;
27561 + }
27562 +
27563 +- pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
27564 ++ /* unique name for each pool specifically required by zsmalloc */
27565 ++ snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
27566 ++
27567 ++ pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
27568 + if (!pool->zpool) {
27569 + pr_err("%s zpool not available\n", type);
27570 + goto error;
27571 +diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
27572 +index b563a3f5f2a8..2fa3be965101 100644
27573 +--- a/net/ax25/ax25_ip.c
27574 ++++ b/net/ax25/ax25_ip.c
27575 +@@ -228,8 +228,23 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
27576 + }
27577 + #endif
27578 +
27579 ++static bool ax25_validate_header(const char *header, unsigned int len)
27580 ++{
27581 ++ ax25_digi digi;
27582 ++
27583 ++ if (!len)
27584 ++ return false;
27585 ++
27586 ++ if (header[0])
27587 ++ return true;
27588 ++
27589 ++ return ax25_addr_parse(header + 1, len - 1, NULL, NULL, &digi, NULL,
27590 ++ NULL);
27591 ++}
27592 ++
27593 + const struct header_ops ax25_header_ops = {
27594 + .create = ax25_hard_header,
27595 ++ .validate = ax25_validate_header,
27596 + };
27597 +
27598 + EXPORT_SYMBOL(ax25_header_ops);
27599 +diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
27600 +index a49c705fb86b..5f19133c5530 100644
27601 +--- a/net/batman-adv/distributed-arp-table.c
27602 ++++ b/net/batman-adv/distributed-arp-table.c
27603 +@@ -553,6 +553,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
27604 + * be sent to
27605 + * @bat_priv: the bat priv with all the soft interface information
27606 + * @ip_dst: ipv4 to look up in the DHT
27607 ++ * @vid: VLAN identifier
27608 + *
27609 + * An originator O is selected if and only if its DHT_ID value is one of three
27610 + * closest values (from the LEFT, with wrap around if needed) then the hash
27611 +@@ -561,7 +562,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
27612 + * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
27613 + */
27614 + static struct batadv_dat_candidate *
27615 +-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
27616 ++batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
27617 ++ unsigned short vid)
27618 + {
27619 + int select;
27620 + batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
27621 +@@ -577,7 +579,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
27622 + return NULL;
27623 +
27624 + dat.ip = ip_dst;
27625 +- dat.vid = 0;
27626 ++ dat.vid = vid;
27627 + ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
27628 + BATADV_DAT_ADDR_MAX);
27629 +
27630 +@@ -597,6 +599,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
27631 + * @bat_priv: the bat priv with all the soft interface information
27632 + * @skb: payload to send
27633 + * @ip: the DHT key
27634 ++ * @vid: VLAN identifier
27635 + * @packet_subtype: unicast4addr packet subtype to use
27636 + *
27637 + * This function copies the skb with pskb_copy() and is sent as unicast packet
27638 +@@ -607,7 +610,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
27639 + */
27640 + static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
27641 + struct sk_buff *skb, __be32 ip,
27642 +- int packet_subtype)
27643 ++ unsigned short vid, int packet_subtype)
27644 + {
27645 + int i;
27646 + bool ret = false;
27647 +@@ -616,7 +619,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
27648 + struct sk_buff *tmp_skb;
27649 + struct batadv_dat_candidate *cand;
27650 +
27651 +- cand = batadv_dat_select_candidates(bat_priv, ip);
27652 ++ cand = batadv_dat_select_candidates(bat_priv, ip, vid);
27653 + if (!cand)
27654 + goto out;
27655 +
27656 +@@ -1004,7 +1007,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
27657 + ret = true;
27658 + } else {
27659 + /* Send the request to the DHT */
27660 +- ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
27661 ++ ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
27662 + BATADV_P_DAT_DHT_GET);
27663 + }
27664 + out:
27665 +@@ -1132,8 +1135,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
27666 + /* Send the ARP reply to the candidates for both the IP addresses that
27667 + * the node obtained from the ARP reply
27668 + */
27669 +- batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
27670 +- batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
27671 ++ batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
27672 ++ batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
27673 + }
27674 +
27675 + /**
27676 +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
27677 +index e4f2646d9246..43d15d6c4b6a 100644
27678 +--- a/net/batman-adv/routing.c
27679 ++++ b/net/batman-adv/routing.c
27680 +@@ -104,6 +104,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
27681 + neigh_node = NULL;
27682 +
27683 + spin_lock_bh(&orig_node->neigh_list_lock);
27684 ++ /* curr_router used earlier may not be the current orig_ifinfo->router
27685 ++ * anymore because it was dereferenced outside of the neigh_list_lock
27686 ++ * protected region. After the new best neighbor has replace the current
27687 ++ * best neighbor the reference counter needs to decrease. Consequently,
27688 ++ * the code needs to ensure the curr_router variable contains a pointer
27689 ++ * to the replaced best neighbor.
27690 ++ */
27691 ++ curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
27692 ++
27693 + rcu_assign_pointer(orig_ifinfo->router, neigh_node);
27694 + spin_unlock_bh(&orig_node->neigh_list_lock);
27695 + batadv_orig_ifinfo_free_ref(orig_ifinfo);
27696 +diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
27697 +index 782fa33ec296..45bfdefa15a5 100644
27698 +--- a/net/batman-adv/send.c
27699 ++++ b/net/batman-adv/send.c
27700 +@@ -629,6 +629,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
27701 +
27702 + if (pending) {
27703 + hlist_del(&forw_packet->list);
27704 ++ if (!forw_packet->own)
27705 ++ atomic_inc(&bat_priv->bcast_queue_left);
27706 ++
27707 + batadv_forw_packet_free(forw_packet);
27708 + }
27709 + }
27710 +@@ -656,6 +659,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
27711 +
27712 + if (pending) {
27713 + hlist_del(&forw_packet->list);
27714 ++ if (!forw_packet->own)
27715 ++ atomic_inc(&bat_priv->batman_queue_left);
27716 ++
27717 + batadv_forw_packet_free(forw_packet);
27718 + }
27719 + }
27720 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
27721 +index ac4d08de5df4..720f1a5b81ac 100644
27722 +--- a/net/batman-adv/soft-interface.c
27723 ++++ b/net/batman-adv/soft-interface.c
27724 +@@ -407,11 +407,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
27725 + */
27726 + nf_reset(skb);
27727 +
27728 ++ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
27729 ++ goto dropped;
27730 ++
27731 + vid = batadv_get_vid(skb, 0);
27732 + ethhdr = eth_hdr(skb);
27733 +
27734 + switch (ntohs(ethhdr->h_proto)) {
27735 + case ETH_P_8021Q:
27736 ++ if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
27737 ++ goto dropped;
27738 ++
27739 + vhdr = (struct vlan_ethhdr *)skb->data;
27740 +
27741 + if (vhdr->h_vlan_encapsulated_proto != ethertype)
27742 +@@ -423,8 +429,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
27743 + }
27744 +
27745 + /* skb->dev & skb->pkt_type are set here */
27746 +- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
27747 +- goto dropped;
27748 + skb->protocol = eth_type_trans(skb, soft_iface);
27749 +
27750 + /* should not be necessary anymore as we use skb_pull_rcsum()
27751 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
27752 +index 5a5089cb6570..1363b8ffd89c 100644
27753 +--- a/net/bluetooth/mgmt.c
27754 ++++ b/net/bluetooth/mgmt.c
27755 +@@ -5979,6 +5979,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
27756 + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
27757 + MGMT_STATUS_INVALID_PARAMS);
27758 +
27759 ++ if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
27760 ++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
27761 ++ MGMT_STATUS_INVALID_PARAMS);
27762 ++
27763 + flags = __le32_to_cpu(cp->flags);
27764 + timeout = __le16_to_cpu(cp->timeout);
27765 + duration = __le16_to_cpu(cp->duration);
27766 +diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
27767 +index 263b4de4de57..60a3dbfca8a1 100644
27768 +--- a/net/bridge/br_ioctl.c
27769 ++++ b/net/bridge/br_ioctl.c
27770 +@@ -21,18 +21,19 @@
27771 + #include <asm/uaccess.h>
27772 + #include "br_private.h"
27773 +
27774 +-/* called with RTNL */
27775 + static int get_bridge_ifindices(struct net *net, int *indices, int num)
27776 + {
27777 + struct net_device *dev;
27778 + int i = 0;
27779 +
27780 +- for_each_netdev(net, dev) {
27781 ++ rcu_read_lock();
27782 ++ for_each_netdev_rcu(net, dev) {
27783 + if (i >= num)
27784 + break;
27785 + if (dev->priv_flags & IFF_EBRIDGE)
27786 + indices[i++] = dev->ifindex;
27787 + }
27788 ++ rcu_read_unlock();
27789 +
27790 + return i;
27791 + }
27792 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
27793 +index 03661d97463c..ea9893743a0f 100644
27794 +--- a/net/bridge/br_multicast.c
27795 ++++ b/net/bridge/br_multicast.c
27796 +@@ -1270,6 +1270,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
27797 + struct br_ip saddr;
27798 + unsigned long max_delay;
27799 + unsigned long now = jiffies;
27800 ++ unsigned int offset = skb_transport_offset(skb);
27801 + __be32 group;
27802 + int err = 0;
27803 +
27804 +@@ -1280,14 +1281,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
27805 +
27806 + group = ih->group;
27807 +
27808 +- if (skb->len == sizeof(*ih)) {
27809 ++ if (skb->len == offset + sizeof(*ih)) {
27810 + max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
27811 +
27812 + if (!max_delay) {
27813 + max_delay = 10 * HZ;
27814 + group = 0;
27815 + }
27816 +- } else if (skb->len >= sizeof(*ih3)) {
27817 ++ } else if (skb->len >= offset + sizeof(*ih3)) {
27818 + ih3 = igmpv3_query_hdr(skb);
27819 + if (ih3->nsrcs)
27820 + goto out;
27821 +@@ -1348,6 +1349,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
27822 + struct br_ip saddr;
27823 + unsigned long max_delay;
27824 + unsigned long now = jiffies;
27825 ++ unsigned int offset = skb_transport_offset(skb);
27826 + const struct in6_addr *group = NULL;
27827 + bool is_general_query;
27828 + int err = 0;
27829 +@@ -1357,8 +1359,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
27830 + (port && port->state == BR_STATE_DISABLED))
27831 + goto out;
27832 +
27833 +- if (skb->len == sizeof(*mld)) {
27834 +- if (!pskb_may_pull(skb, sizeof(*mld))) {
27835 ++ if (skb->len == offset + sizeof(*mld)) {
27836 ++ if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
27837 + err = -EINVAL;
27838 + goto out;
27839 + }
27840 +@@ -1367,7 +1369,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
27841 + if (max_delay)
27842 + group = &mld->mld_mca;
27843 + } else {
27844 +- if (!pskb_may_pull(skb, sizeof(*mld2q))) {
27845 ++ if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
27846 + err = -EINVAL;
27847 + goto out;
27848 + }
27849 +diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
27850 +index b3cca126b103..e2670c5baafd 100644
27851 +--- a/net/bridge/br_stp.c
27852 ++++ b/net/bridge/br_stp.c
27853 +@@ -568,6 +568,14 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
27854 +
27855 + }
27856 +
27857 ++/* Set time interval that dynamic forwarding entries live
27858 ++ * For pure software bridge, allow values outside the 802.1
27859 ++ * standard specification for special cases:
27860 ++ * 0 - entry never ages (all permanant)
27861 ++ * 1 - entry disappears (no persistance)
27862 ++ *
27863 ++ * Offloaded switch entries maybe more restrictive
27864 ++ */
27865 + int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
27866 + {
27867 + struct switchdev_attr attr = {
27868 +@@ -579,11 +587,8 @@ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
27869 + unsigned long t = clock_t_to_jiffies(ageing_time);
27870 + int err;
27871 +
27872 +- if (t < BR_MIN_AGEING_TIME || t > BR_MAX_AGEING_TIME)
27873 +- return -ERANGE;
27874 +-
27875 + err = switchdev_port_attr_set(br->dev, &attr);
27876 +- if (err)
27877 ++ if (err && err != -EOPNOTSUPP)
27878 + return err;
27879 +
27880 + br->ageing_time = t;
27881 +diff --git a/net/core/filter.c b/net/core/filter.c
27882 +index bba502f7cd57..fb2951c3532d 100644
27883 +--- a/net/core/filter.c
27884 ++++ b/net/core/filter.c
27885 +@@ -1147,7 +1147,8 @@ void bpf_prog_destroy(struct bpf_prog *fp)
27886 + }
27887 + EXPORT_SYMBOL_GPL(bpf_prog_destroy);
27888 +
27889 +-static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
27890 ++static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
27891 ++ bool locked)
27892 + {
27893 + struct sk_filter *fp, *old_fp;
27894 +
27895 +@@ -1163,10 +1164,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
27896 + return -ENOMEM;
27897 + }
27898 +
27899 +- old_fp = rcu_dereference_protected(sk->sk_filter,
27900 +- sock_owned_by_user(sk));
27901 ++ old_fp = rcu_dereference_protected(sk->sk_filter, locked);
27902 + rcu_assign_pointer(sk->sk_filter, fp);
27903 +-
27904 + if (old_fp)
27905 + sk_filter_uncharge(sk, old_fp);
27906 +
27907 +@@ -1245,7 +1244,8 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
27908 + * occurs or there is insufficient memory for the filter a negative
27909 + * errno code is returned. On success the return is zero.
27910 + */
27911 +-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
27912 ++int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
27913 ++ bool locked)
27914 + {
27915 + struct bpf_prog *prog = __get_filter(fprog, sk);
27916 + int err;
27917 +@@ -1253,7 +1253,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
27918 + if (IS_ERR(prog))
27919 + return PTR_ERR(prog);
27920 +
27921 +- err = __sk_attach_prog(prog, sk);
27922 ++ err = __sk_attach_prog(prog, sk, locked);
27923 + if (err < 0) {
27924 + __bpf_prog_release(prog);
27925 + return err;
27926 +@@ -1261,7 +1261,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
27927 +
27928 + return 0;
27929 + }
27930 +-EXPORT_SYMBOL_GPL(sk_attach_filter);
27931 ++EXPORT_SYMBOL_GPL(__sk_attach_filter);
27932 ++
27933 ++int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
27934 ++{
27935 ++ return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
27936 ++}
27937 +
27938 + int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
27939 + {
27940 +@@ -1307,7 +1312,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
27941 + if (IS_ERR(prog))
27942 + return PTR_ERR(prog);
27943 +
27944 +- err = __sk_attach_prog(prog, sk);
27945 ++ err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
27946 + if (err < 0) {
27947 + bpf_prog_put(prog);
27948 + return err;
27949 +@@ -2105,7 +2110,7 @@ static int __init register_sk_filter_ops(void)
27950 + }
27951 + late_initcall(register_sk_filter_ops);
27952 +
27953 +-int sk_detach_filter(struct sock *sk)
27954 ++int __sk_detach_filter(struct sock *sk, bool locked)
27955 + {
27956 + int ret = -ENOENT;
27957 + struct sk_filter *filter;
27958 +@@ -2113,8 +2118,7 @@ int sk_detach_filter(struct sock *sk)
27959 + if (sock_flag(sk, SOCK_FILTER_LOCKED))
27960 + return -EPERM;
27961 +
27962 +- filter = rcu_dereference_protected(sk->sk_filter,
27963 +- sock_owned_by_user(sk));
27964 ++ filter = rcu_dereference_protected(sk->sk_filter, locked);
27965 + if (filter) {
27966 + RCU_INIT_POINTER(sk->sk_filter, NULL);
27967 + sk_filter_uncharge(sk, filter);
27968 +@@ -2123,7 +2127,12 @@ int sk_detach_filter(struct sock *sk)
27969 +
27970 + return ret;
27971 + }
27972 +-EXPORT_SYMBOL_GPL(sk_detach_filter);
27973 ++EXPORT_SYMBOL_GPL(__sk_detach_filter);
27974 ++
27975 ++int sk_detach_filter(struct sock *sk)
27976 ++{
27977 ++ return __sk_detach_filter(sk, sock_owned_by_user(sk));
27978 ++}
27979 +
27980 + int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
27981 + unsigned int len)
27982 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
27983 +index 8261d95dd846..482c3717a45e 100644
27984 +--- a/net/core/rtnetlink.c
27985 ++++ b/net/core/rtnetlink.c
27986 +@@ -905,6 +905,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
27987 + + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
27988 + + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
27989 + + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
27990 ++ + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
27991 + + nla_total_size(1); /* IFLA_PROTO_DOWN */
27992 +
27993 + }
27994 +@@ -1175,14 +1176,16 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
27995 +
27996 + static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
27997 + {
27998 +- struct rtnl_link_ifmap map = {
27999 +- .mem_start = dev->mem_start,
28000 +- .mem_end = dev->mem_end,
28001 +- .base_addr = dev->base_addr,
28002 +- .irq = dev->irq,
28003 +- .dma = dev->dma,
28004 +- .port = dev->if_port,
28005 +- };
28006 ++ struct rtnl_link_ifmap map;
28007 ++
28008 ++ memset(&map, 0, sizeof(map));
28009 ++ map.mem_start = dev->mem_start;
28010 ++ map.mem_end = dev->mem_end;
28011 ++ map.base_addr = dev->base_addr;
28012 ++ map.irq = dev->irq;
28013 ++ map.dma = dev->dma;
28014 ++ map.port = dev->if_port;
28015 ++
28016 + if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
28017 + return -EMSGSIZE;
28018 +
28019 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
28020 +index 8616d1147c93..9835d9a8a7a4 100644
28021 +--- a/net/core/skbuff.c
28022 ++++ b/net/core/skbuff.c
28023 +@@ -4427,15 +4427,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
28024 + __skb_push(skb, offset);
28025 + err = __vlan_insert_tag(skb, skb->vlan_proto,
28026 + skb_vlan_tag_get(skb));
28027 +- if (err)
28028 ++ if (err) {
28029 ++ __skb_pull(skb, offset);
28030 + return err;
28031 ++ }
28032 ++
28033 + skb->protocol = skb->vlan_proto;
28034 + skb->mac_len += VLAN_HLEN;
28035 +- __skb_pull(skb, offset);
28036 +
28037 +- if (skb->ip_summed == CHECKSUM_COMPLETE)
28038 +- skb->csum = csum_add(skb->csum, csum_partial(skb->data
28039 +- + (2 * ETH_ALEN), VLAN_HLEN, 0));
28040 ++ skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
28041 ++ __skb_pull(skb, offset);
28042 + }
28043 + __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
28044 + return 0;
28045 +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
28046 +index 902d606324a0..8be8f27bfacc 100644
28047 +--- a/net/dccp/ipv4.c
28048 ++++ b/net/dccp/ipv4.c
28049 +@@ -204,8 +204,6 @@ void dccp_req_err(struct sock *sk, u64 seq)
28050 + * ICMPs are not backlogged, hence we cannot get an established
28051 + * socket here.
28052 + */
28053 +- WARN_ON(req->sk);
28054 +-
28055 + if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
28056 + NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
28057 + } else {
28058 +diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
28059 +index 607a14f20d88..b1dc096d22f8 100644
28060 +--- a/net/decnet/dn_route.c
28061 ++++ b/net/decnet/dn_route.c
28062 +@@ -1034,10 +1034,13 @@ source_ok:
28063 + if (!fld.daddr) {
28064 + fld.daddr = fld.saddr;
28065 +
28066 +- err = -EADDRNOTAVAIL;
28067 + if (dev_out)
28068 + dev_put(dev_out);
28069 ++ err = -EINVAL;
28070 + dev_out = init_net.loopback_dev;
28071 ++ if (!dev_out->dn_ptr)
28072 ++ goto out;
28073 ++ err = -EADDRNOTAVAIL;
28074 + dev_hold(dev_out);
28075 + if (!fld.daddr) {
28076 + fld.daddr =
28077 +@@ -1110,6 +1113,8 @@ source_ok:
28078 + if (dev_out == NULL)
28079 + goto out;
28080 + dn_db = rcu_dereference_raw(dev_out->dn_ptr);
28081 ++ if (!dn_db)
28082 ++ goto e_inval;
28083 + /* Possible improvement - check all devices for local addr */
28084 + if (dn_dev_islocal(dev_out, fld.daddr)) {
28085 + dev_put(dev_out);
28086 +@@ -1151,6 +1156,8 @@ select_source:
28087 + dev_put(dev_out);
28088 + dev_out = init_net.loopback_dev;
28089 + dev_hold(dev_out);
28090 ++ if (!dev_out->dn_ptr)
28091 ++ goto e_inval;
28092 + fld.flowidn_oif = dev_out->ifindex;
28093 + if (res.fi)
28094 + dn_fib_info_put(res.fi);
28095 +diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
28096 +index fa4daba8db55..d8fb47fcad05 100644
28097 +--- a/net/dsa/dsa.c
28098 ++++ b/net/dsa/dsa.c
28099 +@@ -935,6 +935,14 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
28100 + {
28101 + int i;
28102 +
28103 ++ dst->master_netdev->dsa_ptr = NULL;
28104 ++
28105 ++ /* If we used a tagging format that doesn't have an ethertype
28106 ++ * field, make sure that all packets from this point get sent
28107 ++ * without the tag and go through the regular receive path.
28108 ++ */
28109 ++ wmb();
28110 ++
28111 + for (i = 0; i < dst->pd->nr_chips; i++) {
28112 + struct dsa_switch *ds = dst->ds[i];
28113 +
28114 +@@ -988,14 +996,6 @@ static int dsa_suspend(struct device *d)
28115 + struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
28116 + int i, ret = 0;
28117 +
28118 +- dst->master_netdev->dsa_ptr = NULL;
28119 +-
28120 +- /* If we used a tagging format that doesn't have an ethertype
28121 +- * field, make sure that all packets from this point get sent
28122 +- * without the tag and go through the regular receive path.
28123 +- */
28124 +- wmb();
28125 +-
28126 + for (i = 0; i < dst->pd->nr_chips; i++) {
28127 + struct dsa_switch *ds = dst->ds[i];
28128 +
28129 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
28130 +index f6303b17546b..0212591b0077 100644
28131 +--- a/net/ipv4/devinet.c
28132 ++++ b/net/ipv4/devinet.c
28133 +@@ -334,6 +334,9 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
28134 +
28135 + ASSERT_RTNL();
28136 +
28137 ++ if (in_dev->dead)
28138 ++ goto no_promotions;
28139 ++
28140 + /* 1. Deleting primary ifaddr forces deletion all secondaries
28141 + * unless alias promotion is set
28142 + **/
28143 +@@ -380,6 +383,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
28144 + fib_del_ifaddr(ifa, ifa1);
28145 + }
28146 +
28147 ++no_promotions:
28148 + /* 2. Unlink it */
28149 +
28150 + *ifap = ifa1->ifa_next;
28151 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
28152 +index 473447593060..63566ec54794 100644
28153 +--- a/net/ipv4/fib_frontend.c
28154 ++++ b/net/ipv4/fib_frontend.c
28155 +@@ -280,7 +280,6 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
28156 + struct in_device *in_dev;
28157 + struct fib_result res;
28158 + struct rtable *rt;
28159 +- struct flowi4 fl4;
28160 + struct net *net;
28161 + int scope;
28162 +
28163 +@@ -296,14 +295,13 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
28164 +
28165 + scope = RT_SCOPE_UNIVERSE;
28166 + if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
28167 +- fl4.flowi4_oif = 0;
28168 +- fl4.flowi4_iif = LOOPBACK_IFINDEX;
28169 +- fl4.daddr = ip_hdr(skb)->saddr;
28170 +- fl4.saddr = 0;
28171 +- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
28172 +- fl4.flowi4_scope = scope;
28173 +- fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
28174 +- fl4.flowi4_tun_key.tun_id = 0;
28175 ++ struct flowi4 fl4 = {
28176 ++ .flowi4_iif = LOOPBACK_IFINDEX,
28177 ++ .daddr = ip_hdr(skb)->saddr,
28178 ++ .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
28179 ++ .flowi4_scope = scope,
28180 ++ .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
28181 ++ };
28182 + if (!fib_lookup(net, &fl4, &res, 0))
28183 + return FIB_RES_PREFSRC(net, res);
28184 + } else {
28185 +@@ -906,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
28186 + if (ifa->ifa_flags & IFA_F_SECONDARY) {
28187 + prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
28188 + if (!prim) {
28189 +- pr_warn("%s: bug: prim == NULL\n", __func__);
28190 ++ /* if the device has been deleted, we don't perform
28191 ++ * address promotion
28192 ++ */
28193 ++ if (!in_dev->dead)
28194 ++ pr_warn("%s: bug: prim == NULL\n", __func__);
28195 + return;
28196 + }
28197 + if (iprim && iprim != prim) {
28198 +@@ -922,6 +924,9 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
28199 + subnet = 1;
28200 + }
28201 +
28202 ++ if (in_dev->dead)
28203 ++ goto no_promotions;
28204 ++
28205 + /* Deletion is more complicated than add.
28206 + * We should take care of not to delete too much :-)
28207 + *
28208 +@@ -997,6 +1002,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
28209 + }
28210 + }
28211 +
28212 ++no_promotions:
28213 + if (!(ok & BRD_OK))
28214 + fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
28215 + if (subnet && ifa->ifa_prefixlen < 31) {
28216 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
28217 +index d97268e8ff10..2b68418c7198 100644
28218 +--- a/net/ipv4/fib_semantics.c
28219 ++++ b/net/ipv4/fib_semantics.c
28220 +@@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
28221 + val = 65535 - 40;
28222 + if (type == RTAX_MTU && val > 65535 - 15)
28223 + val = 65535 - 15;
28224 ++ if (type == RTAX_HOPLIMIT && val > 255)
28225 ++ val = 255;
28226 + if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
28227 + return -EINVAL;
28228 + fi->fib_metrics[type - 1] = val;
28229 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
28230 +index 41ba68de46d8..d0c1e7766214 100644
28231 +--- a/net/ipv4/ip_gre.c
28232 ++++ b/net/ipv4/ip_gre.c
28233 +@@ -179,6 +179,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
28234 + return flags;
28235 + }
28236 +
28237 ++/* Fills in tpi and returns header length to be pulled. */
28238 + static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
28239 + bool *csum_err)
28240 + {
28241 +@@ -238,7 +239,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
28242 + return -EINVAL;
28243 + }
28244 + }
28245 +- return iptunnel_pull_header(skb, hdr_len, tpi->proto);
28246 ++ return hdr_len;
28247 + }
28248 +
28249 + static void ipgre_err(struct sk_buff *skb, u32 info,
28250 +@@ -341,7 +342,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
28251 + struct tnl_ptk_info tpi;
28252 + bool csum_err = false;
28253 +
28254 +- if (parse_gre_header(skb, &tpi, &csum_err)) {
28255 ++ if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
28256 + if (!csum_err) /* ignore csum errors. */
28257 + return;
28258 + }
28259 +@@ -419,6 +420,7 @@ static int gre_rcv(struct sk_buff *skb)
28260 + {
28261 + struct tnl_ptk_info tpi;
28262 + bool csum_err = false;
28263 ++ int hdr_len;
28264 +
28265 + #ifdef CONFIG_NET_IPGRE_BROADCAST
28266 + if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
28267 +@@ -428,7 +430,10 @@ static int gre_rcv(struct sk_buff *skb)
28268 + }
28269 + #endif
28270 +
28271 +- if (parse_gre_header(skb, &tpi, &csum_err) < 0)
28272 ++ hdr_len = parse_gre_header(skb, &tpi, &csum_err);
28273 ++ if (hdr_len < 0)
28274 ++ goto drop;
28275 ++ if (iptunnel_pull_header(skb, hdr_len, tpi.proto) < 0)
28276 + goto drop;
28277 +
28278 + if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
28279 +diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
28280 +index c6eb42100e9a..ea91058b5f6f 100644
28281 +--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
28282 ++++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
28283 +@@ -108,10 +108,18 @@ static int masq_inet_event(struct notifier_block *this,
28284 + unsigned long event,
28285 + void *ptr)
28286 + {
28287 +- struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
28288 ++ struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
28289 + struct netdev_notifier_info info;
28290 +
28291 +- netdev_notifier_info_init(&info, dev);
28292 ++ /* The masq_dev_notifier will catch the case of the device going
28293 ++ * down. So if the inetdev is dead and being destroyed we have
28294 ++ * no work to do. Otherwise this is an individual address removal
28295 ++ * and we have to perform the flush.
28296 ++ */
28297 ++ if (idev->dead)
28298 ++ return NOTIFY_DONE;
28299 ++
28300 ++ netdev_notifier_info_init(&info, idev->dev);
28301 + return masq_device_event(this, event, &info);
28302 + }
28303 +
28304 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
28305 +index 02c62299d717..b050cf980a57 100644
28306 +--- a/net/ipv4/route.c
28307 ++++ b/net/ipv4/route.c
28308 +@@ -2045,6 +2045,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
28309 + */
28310 + if (fi && res->prefixlen < 4)
28311 + fi = NULL;
28312 ++ } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
28313 ++ (orig_oif != dev_out->ifindex)) {
28314 ++ /* For local routes that require a particular output interface
28315 ++ * we do not want to cache the result. Caching the result
28316 ++ * causes incorrect behaviour when there are multiple source
28317 ++ * addresses on the interface, the end result being that if the
28318 ++ * intended recipient is waiting on that interface for the
28319 ++ * packet he won't receive it because it will be delivered on
28320 ++ * the loopback interface and the IP_PKTINFO ipi_ifindex will
28321 ++ * be set to the loopback interface as well.
28322 ++ */
28323 ++ fi = NULL;
28324 + }
28325 +
28326 + fnhe = NULL;
28327 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
28328 +index 487ac67059e2..a7b1a905580b 100644
28329 +--- a/net/ipv4/tcp_ipv4.c
28330 ++++ b/net/ipv4/tcp_ipv4.c
28331 +@@ -319,8 +319,6 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
28332 + /* ICMPs are not backlogged, hence we cannot get
28333 + * an established socket here.
28334 + */
28335 +- WARN_ON(req->sk);
28336 +-
28337 + if (seq != tcp_rsk(req)->snt_isn) {
28338 + NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
28339 + } else if (abort) {
28340 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
28341 +index fda379cd600d..b6876f2f4cf2 100644
28342 +--- a/net/ipv4/tcp_output.c
28343 ++++ b/net/ipv4/tcp_output.c
28344 +@@ -2625,8 +2625,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
28345 + */
28346 + if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
28347 + skb_headroom(skb) >= 0xFFFF)) {
28348 +- struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
28349 +- GFP_ATOMIC);
28350 ++ struct sk_buff *nskb;
28351 ++
28352 ++ skb_mstamp_get(&skb->skb_mstamp);
28353 ++ nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
28354 + err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
28355 + -ENOBUFS;
28356 + } else {
28357 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
28358 +index 95d2f198017e..56218ff388c7 100644
28359 +--- a/net/ipv4/udp.c
28360 ++++ b/net/ipv4/udp.c
28361 +@@ -339,8 +339,13 @@ found:
28362 +
28363 + hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
28364 + spin_lock(&hslot2->lock);
28365 +- hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
28366 +- &hslot2->head);
28367 ++ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
28368 ++ sk->sk_family == AF_INET6)
28369 ++ hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
28370 ++ &hslot2->head);
28371 ++ else
28372 ++ hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
28373 ++ &hslot2->head);
28374 + hslot2->count++;
28375 + spin_unlock(&hslot2->lock);
28376 + }
28377 +@@ -2082,10 +2087,14 @@ void udp_v4_early_demux(struct sk_buff *skb)
28378 + if (!in_dev)
28379 + return;
28380 +
28381 +- ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
28382 +- iph->protocol);
28383 +- if (!ours)
28384 +- return;
28385 ++ /* we are supposed to accept bcast packets */
28386 ++ if (skb->pkt_type == PACKET_MULTICAST) {
28387 ++ ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
28388 ++ iph->protocol);
28389 ++ if (!ours)
28390 ++ return;
28391 ++ }
28392 ++
28393 + sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
28394 + uh->source, iph->saddr, dif);
28395 + } else if (skb->pkt_type == PACKET_HOST) {
28396 +diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
28397 +index 2ae3c4fd8aab..41f18de5dcc2 100644
28398 +--- a/net/ipv6/ila/ila_lwt.c
28399 ++++ b/net/ipv6/ila/ila_lwt.c
28400 +@@ -120,8 +120,7 @@ nla_put_failure:
28401 +
28402 + static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
28403 + {
28404 +- /* No encapsulation overhead */
28405 +- return 0;
28406 ++ return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
28407 + }
28408 +
28409 + static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
28410 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
28411 +index a163102f1803..2a6606c935cc 100644
28412 +--- a/net/ipv6/ip6_output.c
28413 ++++ b/net/ipv6/ip6_output.c
28414 +@@ -1091,8 +1091,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
28415 + int getfrag(void *from, char *to, int offset, int len,
28416 + int odd, struct sk_buff *skb),
28417 + void *from, int length, int hh_len, int fragheaderlen,
28418 +- int transhdrlen, int mtu, unsigned int flags,
28419 +- const struct flowi6 *fl6)
28420 ++ int exthdrlen, int transhdrlen, int mtu,
28421 ++ unsigned int flags, const struct flowi6 *fl6)
28422 +
28423 + {
28424 + struct sk_buff *skb;
28425 +@@ -1117,7 +1117,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
28426 + skb_put(skb, fragheaderlen + transhdrlen);
28427 +
28428 + /* initialize network header pointer */
28429 +- skb_reset_network_header(skb);
28430 ++ skb_set_network_header(skb, exthdrlen);
28431 +
28432 + /* initialize protocol header pointer */
28433 + skb->transport_header = skb->network_header + fragheaderlen;
28434 +@@ -1359,7 +1359,7 @@ emsgsize:
28435 + (rt->dst.dev->features & NETIF_F_UFO) &&
28436 + (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
28437 + err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
28438 +- hh_len, fragheaderlen,
28439 ++ hh_len, fragheaderlen, exthdrlen,
28440 + transhdrlen, mtu, flags, fl6);
28441 + if (err)
28442 + goto error;
28443 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
28444 +index 6c5dfec7a377..3991b21e24ad 100644
28445 +--- a/net/ipv6/ip6_tunnel.c
28446 ++++ b/net/ipv6/ip6_tunnel.c
28447 +@@ -343,12 +343,12 @@ static int ip6_tnl_create2(struct net_device *dev)
28448 +
28449 + t = netdev_priv(dev);
28450 +
28451 ++ dev->rtnl_link_ops = &ip6_link_ops;
28452 + err = register_netdevice(dev);
28453 + if (err < 0)
28454 + goto out;
28455 +
28456 + strcpy(t->parms.name, dev->name);
28457 +- dev->rtnl_link_ops = &ip6_link_ops;
28458 +
28459 + dev_hold(dev);
28460 + ip6_tnl_link(ip6n, t);
28461 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
28462 +index 18f3498a6c80..e2ea31175ef9 100644
28463 +--- a/net/ipv6/reassembly.c
28464 ++++ b/net/ipv6/reassembly.c
28465 +@@ -496,10 +496,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
28466 + IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
28467 +
28468 + /* Yes, and fold redundant checksum back. 8) */
28469 +- if (head->ip_summed == CHECKSUM_COMPLETE)
28470 +- head->csum = csum_partial(skb_network_header(head),
28471 +- skb_network_header_len(head),
28472 +- head->csum);
28473 ++ skb_postpush_rcsum(head, skb_network_header(head),
28474 ++ skb_network_header_len(head));
28475 +
28476 + rcu_read_lock();
28477 + IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
28478 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
28479 +index ed446639219c..18e29e2f8877 100644
28480 +--- a/net/ipv6/route.c
28481 ++++ b/net/ipv6/route.c
28482 +@@ -1737,6 +1737,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
28483 + } else {
28484 + val = nla_get_u32(nla);
28485 + }
28486 ++ if (type == RTAX_HOPLIMIT && val > 255)
28487 ++ val = 255;
28488 + if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
28489 + goto err;
28490 +
28491 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
28492 +index 422dd014aa2c..6794120f53b8 100644
28493 +--- a/net/ipv6/udp.c
28494 ++++ b/net/ipv6/udp.c
28495 +@@ -883,8 +883,8 @@ start_lookup:
28496 + flush_stack(stack, count, skb, count - 1);
28497 + } else {
28498 + if (!inner_flushed)
28499 +- UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
28500 +- proto == IPPROTO_UDPLITE);
28501 ++ UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
28502 ++ proto == IPPROTO_UDPLITE);
28503 + consume_skb(skb);
28504 + }
28505 + return 0;
28506 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
28507 +index ec22078b0914..42de4ccd159f 100644
28508 +--- a/net/l2tp/l2tp_ip.c
28509 ++++ b/net/l2tp/l2tp_ip.c
28510 +@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
28511 + struct l2tp_tunnel *tunnel = NULL;
28512 + int length;
28513 +
28514 +- /* Point to L2TP header */
28515 +- optr = ptr = skb->data;
28516 +-
28517 + if (!pskb_may_pull(skb, 4))
28518 + goto discard;
28519 +
28520 ++ /* Point to L2TP header */
28521 ++ optr = ptr = skb->data;
28522 + session_id = ntohl(*((__be32 *) ptr));
28523 + ptr += 4;
28524 +
28525 +@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
28526 + if (!pskb_may_pull(skb, length))
28527 + goto discard;
28528 +
28529 ++ /* Point to L2TP header */
28530 ++ optr = ptr = skb->data;
28531 ++ ptr += 4;
28532 + pr_debug("%s: ip recv\n", tunnel->name);
28533 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
28534 + }
28535 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
28536 +index a2c8747d2936..9ee4ddb6b397 100644
28537 +--- a/net/l2tp/l2tp_ip6.c
28538 ++++ b/net/l2tp/l2tp_ip6.c
28539 +@@ -135,12 +135,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
28540 + struct l2tp_tunnel *tunnel = NULL;
28541 + int length;
28542 +
28543 +- /* Point to L2TP header */
28544 +- optr = ptr = skb->data;
28545 +-
28546 + if (!pskb_may_pull(skb, 4))
28547 + goto discard;
28548 +
28549 ++ /* Point to L2TP header */
28550 ++ optr = ptr = skb->data;
28551 + session_id = ntohl(*((__be32 *) ptr));
28552 + ptr += 4;
28553 +
28554 +@@ -168,6 +167,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
28555 + if (!pskb_may_pull(skb, length))
28556 + goto discard;
28557 +
28558 ++ /* Point to L2TP header */
28559 ++ optr = ptr = skb->data;
28560 ++ ptr += 4;
28561 + pr_debug("%s: ip recv\n", tunnel->name);
28562 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
28563 + }
28564 +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
28565 +index 8dab4e569571..bb8edb9ef506 100644
28566 +--- a/net/llc/af_llc.c
28567 ++++ b/net/llc/af_llc.c
28568 +@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
28569 + if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
28570 + struct llc_pktinfo info;
28571 +
28572 ++ memset(&info, 0, sizeof(info));
28573 + info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
28574 + llc_pdu_decode_dsap(skb, &info.lpi_sap);
28575 + llc_pdu_decode_da(skb, info.lpi_mac);
28576 +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
28577 +index 978d3bc31df7..1b33d89906e1 100644
28578 +--- a/net/mac80211/ibss.c
28579 ++++ b/net/mac80211/ibss.c
28580 +@@ -7,6 +7,7 @@
28581 + * Copyright 2007, Michael Wu <flamingice@××××××××.net>
28582 + * Copyright 2009, Johannes Berg <johannes@××××××××××××.net>
28583 + * Copyright 2013-2014 Intel Mobile Communications GmbH
28584 ++ * Copyright(c) 2016 Intel Deutschland GmbH
28585 + *
28586 + * This program is free software; you can redistribute it and/or modify
28587 + * it under the terms of the GNU General Public License version 2 as
28588 +@@ -1485,14 +1486,21 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
28589 +
28590 + sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
28591 +
28592 +- num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
28593 +- &ifibss->chandef,
28594 +- channels,
28595 +- ARRAY_SIZE(channels));
28596 + scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
28597 +- ieee80211_request_ibss_scan(sdata, ifibss->ssid,
28598 +- ifibss->ssid_len, channels, num,
28599 +- scan_width);
28600 ++
28601 ++ if (ifibss->fixed_channel) {
28602 ++ num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
28603 ++ &ifibss->chandef,
28604 ++ channels,
28605 ++ ARRAY_SIZE(channels));
28606 ++ ieee80211_request_ibss_scan(sdata, ifibss->ssid,
28607 ++ ifibss->ssid_len, channels,
28608 ++ num, scan_width);
28609 ++ } else {
28610 ++ ieee80211_request_ibss_scan(sdata, ifibss->ssid,
28611 ++ ifibss->ssid_len, NULL,
28612 ++ 0, scan_width);
28613 ++ }
28614 + } else {
28615 + int interval = IEEE80211_SCAN_INTERVAL;
28616 +
28617 +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
28618 +index c9e325d2e120..bcb0a1b64556 100644
28619 +--- a/net/mac80211/iface.c
28620 ++++ b/net/mac80211/iface.c
28621 +@@ -977,7 +977,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
28622 + if (sdata->vif.txq) {
28623 + struct txq_info *txqi = to_txq_info(sdata->vif.txq);
28624 +
28625 ++ spin_lock_bh(&txqi->queue.lock);
28626 + ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
28627 ++ spin_unlock_bh(&txqi->queue.lock);
28628 ++
28629 + atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
28630 + }
28631 +
28632 +@@ -1747,7 +1750,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
28633 +
28634 + ret = dev_alloc_name(ndev, ndev->name);
28635 + if (ret < 0) {
28636 +- free_netdev(ndev);
28637 ++ ieee80211_if_free(ndev);
28638 + return ret;
28639 + }
28640 +
28641 +@@ -1833,7 +1836,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
28642 +
28643 + ret = register_netdevice(ndev);
28644 + if (ret) {
28645 +- free_netdev(ndev);
28646 ++ ieee80211_if_free(ndev);
28647 + return ret;
28648 + }
28649 + }
28650 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
28651 +index 60d093f40f1d..261df74eaf38 100644
28652 +--- a/net/mac80211/rx.c
28653 ++++ b/net/mac80211/rx.c
28654 +@@ -2249,7 +2249,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
28655 + struct ieee80211_local *local = rx->local;
28656 + struct ieee80211_sub_if_data *sdata = rx->sdata;
28657 + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
28658 +- u16 q, hdrlen;
28659 ++ u16 ac, q, hdrlen;
28660 +
28661 + hdr = (struct ieee80211_hdr *) skb->data;
28662 + hdrlen = ieee80211_hdrlen(hdr->frame_control);
28663 +@@ -2318,7 +2318,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
28664 + ether_addr_equal(sdata->vif.addr, hdr->addr3))
28665 + return RX_CONTINUE;
28666 +
28667 +- q = ieee80211_select_queue_80211(sdata, skb, hdr);
28668 ++ ac = ieee80211_select_queue_80211(sdata, skb, hdr);
28669 ++ q = sdata->vif.hw_queue[ac];
28670 + if (ieee80211_queue_stopped(&local->hw, q)) {
28671 + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
28672 + return RX_DROP_MONITOR;
28673 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
28674 +index a4a4f89d3ba0..23ed038cf7f9 100644
28675 +--- a/net/mac80211/sta_info.c
28676 ++++ b/net/mac80211/sta_info.c
28677 +@@ -257,11 +257,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
28678 + }
28679 +
28680 + /* Caller must hold local->sta_mtx */
28681 +-static void sta_info_hash_add(struct ieee80211_local *local,
28682 +- struct sta_info *sta)
28683 ++static int sta_info_hash_add(struct ieee80211_local *local,
28684 ++ struct sta_info *sta)
28685 + {
28686 +- rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
28687 +- sta_rht_params);
28688 ++ return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
28689 ++ sta_rht_params);
28690 + }
28691 +
28692 + static void sta_deliver_ps_frames(struct work_struct *wk)
28693 +@@ -498,11 +498,17 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
28694 + {
28695 + struct ieee80211_local *local = sta->local;
28696 + struct ieee80211_sub_if_data *sdata = sta->sdata;
28697 +- struct station_info sinfo;
28698 ++ struct station_info *sinfo;
28699 + int err = 0;
28700 +
28701 + lockdep_assert_held(&local->sta_mtx);
28702 +
28703 ++ sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
28704 ++ if (!sinfo) {
28705 ++ err = -ENOMEM;
28706 ++ goto out_err;
28707 ++ }
28708 ++
28709 + /* check if STA exists already */
28710 + if (sta_info_get_bss(sdata, sta->sta.addr)) {
28711 + err = -EEXIST;
28712 +@@ -517,7 +523,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
28713 + set_sta_flag(sta, WLAN_STA_BLOCK_BA);
28714 +
28715 + /* make the station visible */
28716 +- sta_info_hash_add(local, sta);
28717 ++ err = sta_info_hash_add(local, sta);
28718 ++ if (err)
28719 ++ goto out_drop_sta;
28720 +
28721 + list_add_tail_rcu(&sta->list, &local->sta_list);
28722 +
28723 +@@ -534,10 +542,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
28724 + ieee80211_sta_debugfs_add(sta);
28725 + rate_control_add_sta_debugfs(sta);
28726 +
28727 +- memset(&sinfo, 0, sizeof(sinfo));
28728 +- sinfo.filled = 0;
28729 +- sinfo.generation = local->sta_generation;
28730 +- cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
28731 ++ sinfo->generation = local->sta_generation;
28732 ++ cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
28733 ++ kfree(sinfo);
28734 +
28735 + sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
28736 +
28737 +@@ -552,6 +559,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
28738 + out_remove:
28739 + sta_info_hash_del(local, sta);
28740 + list_del_rcu(&sta->list);
28741 ++ out_drop_sta:
28742 + local->num_sta--;
28743 + synchronize_net();
28744 + __cleanup_single_sta(sta);
28745 +@@ -898,7 +906,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
28746 + {
28747 + struct ieee80211_local *local = sta->local;
28748 + struct ieee80211_sub_if_data *sdata = sta->sdata;
28749 +- struct station_info sinfo = {};
28750 ++ struct station_info *sinfo;
28751 + int ret;
28752 +
28753 + /*
28754 +@@ -936,8 +944,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
28755 +
28756 + sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
28757 +
28758 +- sta_set_sinfo(sta, &sinfo);
28759 +- cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
28760 ++ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
28761 ++ if (sinfo)
28762 ++ sta_set_sinfo(sta, sinfo);
28763 ++ cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
28764 ++ kfree(sinfo);
28765 +
28766 + rate_control_remove_sta_debugfs(sta);
28767 + ieee80211_sta_debugfs_remove(sta);
28768 +diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
28769 +index b18c5ed42d95..0b80a7140cc4 100644
28770 +--- a/net/mpls/af_mpls.c
28771 ++++ b/net/mpls/af_mpls.c
28772 +@@ -543,6 +543,9 @@ static struct net_device *find_outdev(struct net *net,
28773 + if (!dev)
28774 + return ERR_PTR(-ENODEV);
28775 +
28776 ++ if (IS_ERR(dev))
28777 ++ return dev;
28778 ++
28779 + /* The caller is holding rtnl anyways, so release the dev reference */
28780 + dev_put(dev);
28781 +
28782 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
28783 +index f57b4dcdb233..4da560005b0e 100644
28784 +--- a/net/netfilter/ipvs/ip_vs_core.c
28785 ++++ b/net/netfilter/ipvs/ip_vs_core.c
28786 +@@ -1757,15 +1757,34 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
28787 + cp = pp->conn_in_get(ipvs, af, skb, &iph);
28788 +
28789 + conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
28790 +- if (conn_reuse_mode && !iph.fragoffs &&
28791 +- is_new_conn(skb, &iph) && cp &&
28792 +- ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
28793 +- unlikely(!atomic_read(&cp->dest->weight))) ||
28794 +- unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
28795 +- if (!atomic_read(&cp->n_control))
28796 +- ip_vs_conn_expire_now(cp);
28797 +- __ip_vs_conn_put(cp);
28798 +- cp = NULL;
28799 ++ if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
28800 ++ bool uses_ct = false, resched = false;
28801 ++
28802 ++ if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
28803 ++ unlikely(!atomic_read(&cp->dest->weight))) {
28804 ++ resched = true;
28805 ++ uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
28806 ++ } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
28807 ++ uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
28808 ++ if (!atomic_read(&cp->n_control)) {
28809 ++ resched = true;
28810 ++ } else {
28811 ++ /* Do not reschedule controlling connection
28812 ++ * that uses conntrack while it is still
28813 ++ * referenced by controlled connection(s).
28814 ++ */
28815 ++ resched = !uses_ct;
28816 ++ }
28817 ++ }
28818 ++
28819 ++ if (resched) {
28820 ++ if (!atomic_read(&cp->n_control))
28821 ++ ip_vs_conn_expire_now(cp);
28822 ++ __ip_vs_conn_put(cp);
28823 ++ if (uses_ct)
28824 ++ return NF_DROP;
28825 ++ cp = NULL;
28826 ++ }
28827 + }
28828 +
28829 + if (unlikely(!cp)) {
28830 +diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
28831 +index 1b8d594e493a..0a6eb5c0d9e9 100644
28832 +--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
28833 ++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
28834 +@@ -70,10 +70,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
28835 + const char *dptr;
28836 + int retc;
28837 +
28838 +- ip_vs_fill_iph_skb(p->af, skb, false, &iph);
28839 ++ retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
28840 +
28841 + /* Only useful with UDP */
28842 +- if (iph.protocol != IPPROTO_UDP)
28843 ++ if (!retc || iph.protocol != IPPROTO_UDP)
28844 + return -EINVAL;
28845 + /* todo: IPv6 fragments:
28846 + * I think this only should be done for the first fragment. /HS
28847 +@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
28848 + dptr = skb->data + dataoff;
28849 + datalen = skb->len - dataoff;
28850 +
28851 +- if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
28852 ++ if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
28853 + return -EINVAL;
28854 +
28855 + /* N.B: pe_data is only set on success,
28856 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
28857 +index f60b4fdeeb8c..6b94f0bc11b8 100644
28858 +--- a/net/netfilter/nf_conntrack_core.c
28859 ++++ b/net/netfilter/nf_conntrack_core.c
28860 +@@ -1780,6 +1780,7 @@ void nf_conntrack_init_end(void)
28861 +
28862 + int nf_conntrack_init_net(struct net *net)
28863 + {
28864 ++ static atomic64_t unique_id;
28865 + int ret = -ENOMEM;
28866 + int cpu;
28867 +
28868 +@@ -1802,7 +1803,8 @@ int nf_conntrack_init_net(struct net *net)
28869 + if (!net->ct.stat)
28870 + goto err_pcpu_lists;
28871 +
28872 +- net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
28873 ++ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
28874 ++ (u64)atomic64_inc_return(&unique_id));
28875 + if (!net->ct.slabname)
28876 + goto err_slabname;
28877 +
28878 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
28879 +index f1ffb34e253f..d2bc03f0b4d7 100644
28880 +--- a/net/netlink/af_netlink.c
28881 ++++ b/net/netlink/af_netlink.c
28882 +@@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
28883 +
28884 + skb_queue_purge(&sk->sk_write_queue);
28885 +
28886 +- if (nlk->portid) {
28887 ++ if (nlk->portid && nlk->bound) {
28888 + struct netlink_notify n = {
28889 + .net = sock_net(sk),
28890 + .protocol = sk->sk_protocol,
28891 +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
28892 +index 2d59df521915..879185fe183f 100644
28893 +--- a/net/openvswitch/actions.c
28894 ++++ b/net/openvswitch/actions.c
28895 +@@ -158,9 +158,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
28896 + new_mpls_lse = (__be32 *)skb_mpls_header(skb);
28897 + *new_mpls_lse = mpls->mpls_lse;
28898 +
28899 +- if (skb->ip_summed == CHECKSUM_COMPLETE)
28900 +- skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
28901 +- MPLS_HLEN, 0));
28902 ++ skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
28903 +
28904 + hdr = eth_hdr(skb);
28905 + hdr->h_proto = mpls->mpls_ethertype;
28906 +@@ -280,7 +278,7 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
28907 + ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
28908 + mask->eth_dst);
28909 +
28910 +- ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
28911 ++ skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
28912 +
28913 + ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
28914 + ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
28915 +@@ -463,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
28916 + mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
28917 +
28918 + if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
28919 +- set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
28920 ++ set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
28921 + true);
28922 + memcpy(&flow_key->ipv6.addr.src, masked,
28923 + sizeof(flow_key->ipv6.addr.src));
28924 +@@ -485,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
28925 + NULL, &flags)
28926 + != NEXTHDR_ROUTING);
28927 +
28928 +- set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
28929 ++ set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
28930 + recalc_csum);
28931 + memcpy(&flow_key->ipv6.addr.dst, masked,
28932 + sizeof(flow_key->ipv6.addr.dst));
28933 +@@ -639,7 +637,7 @@ static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *sk
28934 + /* Reconstruct the MAC header. */
28935 + skb_push(skb, data->l2_len);
28936 + memcpy(skb->data, &data->l2_data, data->l2_len);
28937 +- ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
28938 ++ skb_postpush_rcsum(skb, skb->data, data->l2_len);
28939 + skb_reset_mac_header(skb);
28940 +
28941 + ovs_vport_send(vport, skb);
28942 +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
28943 +index ee6ff8ffc12d..9e5b9fc805fb 100644
28944 +--- a/net/openvswitch/conntrack.c
28945 ++++ b/net/openvswitch/conntrack.c
28946 +@@ -320,6 +320,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
28947 + } else if (key->eth.type == htons(ETH_P_IPV6)) {
28948 + enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
28949 +
28950 ++ skb_orphan(skb);
28951 + memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
28952 + err = nf_ct_frag6_gather(net, skb, user);
28953 + if (err)
28954 +diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
28955 +index 6a6adf314363..4e3972344aa6 100644
28956 +--- a/net/openvswitch/vport-netdev.c
28957 ++++ b/net/openvswitch/vport-netdev.c
28958 +@@ -58,7 +58,7 @@ static void netdev_port_receive(struct sk_buff *skb)
28959 + return;
28960 +
28961 + skb_push(skb, ETH_HLEN);
28962 +- ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
28963 ++ skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
28964 + ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
28965 + return;
28966 + error:
28967 +diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
28968 +index c10899cb9040..f01f28a567ad 100644
28969 +--- a/net/openvswitch/vport.h
28970 ++++ b/net/openvswitch/vport.h
28971 +@@ -185,13 +185,6 @@ static inline struct vport *vport_from_priv(void *priv)
28972 + int ovs_vport_receive(struct vport *, struct sk_buff *,
28973 + const struct ip_tunnel_info *);
28974 +
28975 +-static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
28976 +- const void *start, unsigned int len)
28977 +-{
28978 +- if (skb->ip_summed == CHECKSUM_COMPLETE)
28979 +- skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
28980 +-}
28981 +-
28982 + static inline const char *ovs_vport_name(struct vport *vport)
28983 + {
28984 + return vport->dev->name;
28985 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
28986 +index 992396aa635c..9cc7b512b472 100644
28987 +--- a/net/packet/af_packet.c
28988 ++++ b/net/packet/af_packet.c
28989 +@@ -1916,6 +1916,10 @@ retry:
28990 + goto retry;
28991 + }
28992 +
28993 ++ if (!dev_validate_header(dev, skb->data, len)) {
28994 ++ err = -EINVAL;
28995 ++ goto out_unlock;
28996 ++ }
28997 + if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
28998 + !packet_extra_vlan_len_allowed(dev, skb)) {
28999 + err = -EMSGSIZE;
29000 +@@ -2326,18 +2330,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
29001 + sock_wfree(skb);
29002 + }
29003 +
29004 +-static bool ll_header_truncated(const struct net_device *dev, int len)
29005 +-{
29006 +- /* net device doesn't like empty head */
29007 +- if (unlikely(len < dev->hard_header_len)) {
29008 +- net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
29009 +- current->comm, len, dev->hard_header_len);
29010 +- return true;
29011 +- }
29012 +-
29013 +- return false;
29014 +-}
29015 +-
29016 + static void tpacket_set_protocol(const struct net_device *dev,
29017 + struct sk_buff *skb)
29018 + {
29019 +@@ -2420,19 +2412,19 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
29020 + if (unlikely(err < 0))
29021 + return -EINVAL;
29022 + } else if (dev->hard_header_len) {
29023 +- if (ll_header_truncated(dev, tp_len))
29024 +- return -EINVAL;
29025 ++ int hdrlen = min_t(int, dev->hard_header_len, tp_len);
29026 +
29027 + skb_push(skb, dev->hard_header_len);
29028 +- err = skb_store_bits(skb, 0, data,
29029 +- dev->hard_header_len);
29030 ++ err = skb_store_bits(skb, 0, data, hdrlen);
29031 + if (unlikely(err))
29032 + return err;
29033 ++ if (!dev_validate_header(dev, skb->data, hdrlen))
29034 ++ return -EINVAL;
29035 + if (!skb->protocol)
29036 + tpacket_set_protocol(dev, skb);
29037 +
29038 +- data += dev->hard_header_len;
29039 +- to_write -= dev->hard_header_len;
29040 ++ data += hdrlen;
29041 ++ to_write -= hdrlen;
29042 + }
29043 +
29044 + offset = offset_in_page(data);
29045 +@@ -2763,9 +2755,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
29046 + offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
29047 + if (unlikely(offset < 0))
29048 + goto out_free;
29049 +- } else {
29050 +- if (ll_header_truncated(dev, len))
29051 +- goto out_free;
29052 + }
29053 +
29054 + /* Returns -EFAULT on error */
29055 +@@ -2773,6 +2762,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
29056 + if (err)
29057 + goto out_free;
29058 +
29059 ++ if (sock->type == SOCK_RAW &&
29060 ++ !dev_validate_header(dev, skb->data, len)) {
29061 ++ err = -EINVAL;
29062 ++ goto out_free;
29063 ++ }
29064 ++
29065 + sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
29066 +
29067 + if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
29068 +@@ -3441,6 +3436,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
29069 + i->ifindex = mreq->mr_ifindex;
29070 + i->alen = mreq->mr_alen;
29071 + memcpy(i->addr, mreq->mr_address, i->alen);
29072 ++ memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
29073 + i->count = 1;
29074 + i->next = po->mclist;
29075 + po->mclist = i;
29076 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
29077 +index af1acf009866..95b560f0b253 100644
29078 +--- a/net/sched/sch_api.c
29079 ++++ b/net/sched/sch_api.c
29080 +@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
29081 + return 0;
29082 + }
29083 +
29084 +-void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
29085 ++void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
29086 ++ unsigned int len)
29087 + {
29088 + const struct Qdisc_class_ops *cops;
29089 + unsigned long cl;
29090 + u32 parentid;
29091 + int drops;
29092 +
29093 +- if (n == 0)
29094 ++ if (n == 0 && len == 0)
29095 + return;
29096 + drops = max_t(int, n, 0);
29097 + rcu_read_lock();
29098 +@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
29099 + cops->put(sch, cl);
29100 + }
29101 + sch->q.qlen -= n;
29102 ++ sch->qstats.backlog -= len;
29103 + __qdisc_qstats_drop(sch, drops);
29104 + }
29105 + rcu_read_unlock();
29106 + }
29107 +-EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
29108 ++EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
29109 +
29110 + static void notify_and_destroy(struct net *net, struct sk_buff *skb,
29111 + struct nlmsghdr *n, u32 clid,
29112 +diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
29113 +index c538d9e4a8f6..baafddf229ce 100644
29114 +--- a/net/sched/sch_cbq.c
29115 ++++ b/net/sched/sch_cbq.c
29116 +@@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
29117 + new->reshape_fail = cbq_reshape_fail;
29118 + #endif
29119 + }
29120 +- sch_tree_lock(sch);
29121 +- *old = cl->q;
29122 +- cl->q = new;
29123 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
29124 +- qdisc_reset(*old);
29125 +- sch_tree_unlock(sch);
29126 +
29127 ++ *old = qdisc_replace(sch, new, &cl->q);
29128 + return 0;
29129 + }
29130 +
29131 +@@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
29132 + {
29133 + struct cbq_sched_data *q = qdisc_priv(sch);
29134 + struct cbq_class *cl = (struct cbq_class *)arg;
29135 +- unsigned int qlen;
29136 ++ unsigned int qlen, backlog;
29137 +
29138 + if (cl->filters || cl->children || cl == &q->link)
29139 + return -EBUSY;
29140 +@@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
29141 + sch_tree_lock(sch);
29142 +
29143 + qlen = cl->q->q.qlen;
29144 ++ backlog = cl->q->qstats.backlog;
29145 + qdisc_reset(cl->q);
29146 +- qdisc_tree_decrease_qlen(cl->q, qlen);
29147 ++ qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
29148 +
29149 + if (cl->next_alive)
29150 + cbq_deactivate_class(cl);
29151 +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
29152 +index 5ffb8b8337c7..0a08c860eee4 100644
29153 +--- a/net/sched/sch_choke.c
29154 ++++ b/net/sched/sch_choke.c
29155 +@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
29156 + choke_zap_tail_holes(q);
29157 +
29158 + qdisc_qstats_backlog_dec(sch, skb);
29159 ++ qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
29160 + qdisc_drop(skb, sch);
29161 +- qdisc_tree_decrease_qlen(sch, 1);
29162 + --sch->q.qlen;
29163 + }
29164 +
29165 +@@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
29166 + old = q->tab;
29167 + if (old) {
29168 + unsigned int oqlen = sch->q.qlen, tail = 0;
29169 ++ unsigned dropped = 0;
29170 +
29171 + while (q->head != q->tail) {
29172 + struct sk_buff *skb = q->tab[q->head];
29173 +@@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
29174 + ntab[tail++] = skb;
29175 + continue;
29176 + }
29177 ++ dropped += qdisc_pkt_len(skb);
29178 + qdisc_qstats_backlog_dec(sch, skb);
29179 + --sch->q.qlen;
29180 + qdisc_drop(skb, sch);
29181 + }
29182 +- qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
29183 ++ qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
29184 + q->head = 0;
29185 + q->tail = tail;
29186 + }
29187 +diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
29188 +index 535007d5f0b5..9b7e2980ee5c 100644
29189 +--- a/net/sched/sch_codel.c
29190 ++++ b/net/sched/sch_codel.c
29191 +@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
29192 +
29193 + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
29194 +
29195 +- /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
29196 ++ /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
29197 + * or HTB crashes. Defer it for next round.
29198 + */
29199 + if (q->stats.drop_count && sch->q.qlen) {
29200 +- qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
29201 ++ qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
29202 + q->stats.drop_count = 0;
29203 ++ q->stats.drop_len = 0;
29204 + }
29205 + if (skb)
29206 + qdisc_bstats_update(sch, skb);
29207 +@@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
29208 + {
29209 + struct codel_sched_data *q = qdisc_priv(sch);
29210 + struct nlattr *tb[TCA_CODEL_MAX + 1];
29211 +- unsigned int qlen;
29212 ++ unsigned int qlen, dropped = 0;
29213 + int err;
29214 +
29215 + if (!opt)
29216 +@@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
29217 + while (sch->q.qlen > sch->limit) {
29218 + struct sk_buff *skb = __skb_dequeue(&sch->q);
29219 +
29220 ++ dropped += qdisc_pkt_len(skb);
29221 + qdisc_qstats_backlog_dec(sch, skb);
29222 + qdisc_drop(skb, sch);
29223 + }
29224 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
29225 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
29226 +
29227 + sch_tree_unlock(sch);
29228 + return 0;
29229 +diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
29230 +index a1cd778240cd..a63e879e8975 100644
29231 +--- a/net/sched/sch_drr.c
29232 ++++ b/net/sched/sch_drr.c
29233 +@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
29234 + static void drr_purge_queue(struct drr_class *cl)
29235 + {
29236 + unsigned int len = cl->qdisc->q.qlen;
29237 ++ unsigned int backlog = cl->qdisc->qstats.backlog;
29238 +
29239 + qdisc_reset(cl->qdisc);
29240 +- qdisc_tree_decrease_qlen(cl->qdisc, len);
29241 ++ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
29242 + }
29243 +
29244 + static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
29245 +@@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
29246 + new = &noop_qdisc;
29247 + }
29248 +
29249 +- sch_tree_lock(sch);
29250 +- drr_purge_queue(cl);
29251 +- *old = cl->qdisc;
29252 +- cl->qdisc = new;
29253 +- sch_tree_unlock(sch);
29254 ++ *old = qdisc_replace(sch, new, &cl->qdisc);
29255 + return 0;
29256 + }
29257 +
29258 +diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
29259 +index f357f34d02d2..d0dff0cd8186 100644
29260 +--- a/net/sched/sch_dsmark.c
29261 ++++ b/net/sched/sch_dsmark.c
29262 +@@ -73,13 +73,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
29263 + new = &noop_qdisc;
29264 + }
29265 +
29266 +- sch_tree_lock(sch);
29267 +- *old = p->q;
29268 +- p->q = new;
29269 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
29270 +- qdisc_reset(*old);
29271 +- sch_tree_unlock(sch);
29272 +-
29273 ++ *old = qdisc_replace(sch, new, &p->q);
29274 + return 0;
29275 + }
29276 +
29277 +@@ -264,6 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29278 + return err;
29279 + }
29280 +
29281 ++ qdisc_qstats_backlog_inc(sch, skb);
29282 + sch->q.qlen++;
29283 +
29284 + return NET_XMIT_SUCCESS;
29285 +@@ -286,6 +281,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
29286 + return NULL;
29287 +
29288 + qdisc_bstats_update(sch, skb);
29289 ++ qdisc_qstats_backlog_dec(sch, skb);
29290 + sch->q.qlen--;
29291 +
29292 + index = skb->tc_index & (p->indices - 1);
29293 +@@ -401,6 +397,7 @@ static void dsmark_reset(struct Qdisc *sch)
29294 +
29295 + pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
29296 + qdisc_reset(p->q);
29297 ++ sch->qstats.backlog = 0;
29298 + sch->q.qlen = 0;
29299 + }
29300 +
29301 +diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
29302 +index 109b2322778f..3c6a47d66a04 100644
29303 +--- a/net/sched/sch_fq.c
29304 ++++ b/net/sched/sch_fq.c
29305 +@@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
29306 + struct fq_sched_data *q = qdisc_priv(sch);
29307 + struct nlattr *tb[TCA_FQ_MAX + 1];
29308 + int err, drop_count = 0;
29309 ++ unsigned drop_len = 0;
29310 + u32 fq_log;
29311 +
29312 + if (!opt)
29313 +@@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
29314 +
29315 + if (!skb)
29316 + break;
29317 ++ drop_len += qdisc_pkt_len(skb);
29318 + kfree_skb(skb);
29319 + drop_count++;
29320 + }
29321 +- qdisc_tree_decrease_qlen(sch, drop_count);
29322 ++ qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
29323 +
29324 + sch_tree_unlock(sch);
29325 + return err;
29326 +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
29327 +index 4c834e93dafb..d3fc8f9dd3d4 100644
29328 +--- a/net/sched/sch_fq_codel.c
29329 ++++ b/net/sched/sch_fq_codel.c
29330 +@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
29331 + static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29332 + {
29333 + struct fq_codel_sched_data *q = qdisc_priv(sch);
29334 +- unsigned int idx;
29335 ++ unsigned int idx, prev_backlog;
29336 + struct fq_codel_flow *flow;
29337 + int uninitialized_var(ret);
29338 +
29339 +@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29340 + if (++sch->q.qlen <= sch->limit)
29341 + return NET_XMIT_SUCCESS;
29342 +
29343 ++ prev_backlog = sch->qstats.backlog;
29344 + q->drop_overlimit++;
29345 + /* Return Congestion Notification only if we dropped a packet
29346 + * from this flow.
29347 +@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29348 + return NET_XMIT_CN;
29349 +
29350 + /* As we dropped a packet, better let upper stack know this */
29351 +- qdisc_tree_decrease_qlen(sch, 1);
29352 ++ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
29353 + return NET_XMIT_SUCCESS;
29354 + }
29355 +
29356 +@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
29357 + struct fq_codel_flow *flow;
29358 + struct list_head *head;
29359 + u32 prev_drop_count, prev_ecn_mark;
29360 ++ unsigned int prev_backlog;
29361 +
29362 + begin:
29363 + head = &q->new_flows;
29364 +@@ -259,6 +261,7 @@ begin:
29365 +
29366 + prev_drop_count = q->cstats.drop_count;
29367 + prev_ecn_mark = q->cstats.ecn_mark;
29368 ++ prev_backlog = sch->qstats.backlog;
29369 +
29370 + skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
29371 + dequeue);
29372 +@@ -276,12 +279,14 @@ begin:
29373 + }
29374 + qdisc_bstats_update(sch, skb);
29375 + flow->deficit -= qdisc_pkt_len(skb);
29376 +- /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
29377 ++ /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
29378 + * or HTB crashes. Defer it for next round.
29379 + */
29380 + if (q->cstats.drop_count && sch->q.qlen) {
29381 +- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
29382 ++ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
29383 ++ q->cstats.drop_len);
29384 + q->cstats.drop_count = 0;
29385 ++ q->cstats.drop_len = 0;
29386 + }
29387 + return skb;
29388 + }
29389 +@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
29390 + while (sch->q.qlen > sch->limit) {
29391 + struct sk_buff *skb = fq_codel_dequeue(sch);
29392 +
29393 ++ q->cstats.drop_len += qdisc_pkt_len(skb);
29394 + kfree_skb(skb);
29395 + q->cstats.drop_count++;
29396 + }
29397 +- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
29398 ++ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
29399 + q->cstats.drop_count = 0;
29400 ++ q->cstats.drop_len = 0;
29401 +
29402 + sch_tree_unlock(sch);
29403 + return 0;
29404 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
29405 +index 16bc83b2842a..aa4725038f94 100644
29406 +--- a/net/sched/sch_generic.c
29407 ++++ b/net/sched/sch_generic.c
29408 +@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
29409 + if (validate)
29410 + skb = validate_xmit_skb_list(skb, dev);
29411 +
29412 +- if (skb) {
29413 ++ if (likely(skb)) {
29414 + HARD_TX_LOCK(dev, txq, smp_processor_id());
29415 + if (!netif_xmit_frozen_or_stopped(txq))
29416 + skb = dev_hard_start_xmit(skb, dev, txq, &ret);
29417 +
29418 + HARD_TX_UNLOCK(dev, txq);
29419 ++ } else {
29420 ++ spin_lock(root_lock);
29421 ++ return qdisc_qlen(q);
29422 + }
29423 + spin_lock(root_lock);
29424 +
29425 +diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
29426 +index b7ebe2c87586..d783d7cc3348 100644
29427 +--- a/net/sched/sch_hfsc.c
29428 ++++ b/net/sched/sch_hfsc.c
29429 +@@ -895,9 +895,10 @@ static void
29430 + hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
29431 + {
29432 + unsigned int len = cl->qdisc->q.qlen;
29433 ++ unsigned int backlog = cl->qdisc->qstats.backlog;
29434 +
29435 + qdisc_reset(cl->qdisc);
29436 +- qdisc_tree_decrease_qlen(cl->qdisc, len);
29437 ++ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
29438 + }
29439 +
29440 + static void
29441 +@@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
29442 + new = &noop_qdisc;
29443 + }
29444 +
29445 +- sch_tree_lock(sch);
29446 +- hfsc_purge_queue(sch, cl);
29447 +- *old = cl->qdisc;
29448 +- cl->qdisc = new;
29449 +- sch_tree_unlock(sch);
29450 ++ *old = qdisc_replace(sch, new, &cl->qdisc);
29451 + return 0;
29452 + }
29453 +
29454 +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
29455 +index 86b04e31e60b..13d6f83ec491 100644
29456 +--- a/net/sched/sch_hhf.c
29457 ++++ b/net/sched/sch_hhf.c
29458 +@@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29459 + struct hhf_sched_data *q = qdisc_priv(sch);
29460 + enum wdrr_bucket_idx idx;
29461 + struct wdrr_bucket *bucket;
29462 ++ unsigned int prev_backlog;
29463 +
29464 + idx = hhf_classify(skb, sch);
29465 +
29466 +@@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29467 + if (++sch->q.qlen <= sch->limit)
29468 + return NET_XMIT_SUCCESS;
29469 +
29470 ++ prev_backlog = sch->qstats.backlog;
29471 + q->drop_overlimit++;
29472 + /* Return Congestion Notification only if we dropped a packet from this
29473 + * bucket.
29474 +@@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29475 + return NET_XMIT_CN;
29476 +
29477 + /* As we dropped a packet, better let upper stack know this. */
29478 +- qdisc_tree_decrease_qlen(sch, 1);
29479 ++ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
29480 + return NET_XMIT_SUCCESS;
29481 + }
29482 +
29483 +@@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
29484 + {
29485 + struct hhf_sched_data *q = qdisc_priv(sch);
29486 + struct nlattr *tb[TCA_HHF_MAX + 1];
29487 +- unsigned int qlen;
29488 ++ unsigned int qlen, prev_backlog;
29489 + int err;
29490 + u64 non_hh_quantum;
29491 + u32 new_quantum = q->quantum;
29492 +@@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
29493 + }
29494 +
29495 + qlen = sch->q.qlen;
29496 ++ prev_backlog = sch->qstats.backlog;
29497 + while (sch->q.qlen > sch->limit) {
29498 + struct sk_buff *skb = hhf_dequeue(sch);
29499 +
29500 + kfree_skb(skb);
29501 + }
29502 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
29503 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
29504 ++ prev_backlog - sch->qstats.backlog);
29505 +
29506 + sch_tree_unlock(sch);
29507 + return 0;
29508 +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
29509 +index 15ccd7f8fb2a..87b02ed3d5f2 100644
29510 +--- a/net/sched/sch_htb.c
29511 ++++ b/net/sched/sch_htb.c
29512 +@@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29513 + htb_activate(q, cl);
29514 + }
29515 +
29516 ++ qdisc_qstats_backlog_inc(sch, skb);
29517 + sch->q.qlen++;
29518 + return NET_XMIT_SUCCESS;
29519 + }
29520 +@@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
29521 + ok:
29522 + qdisc_bstats_update(sch, skb);
29523 + qdisc_unthrottled(sch);
29524 ++ qdisc_qstats_backlog_dec(sch, skb);
29525 + sch->q.qlen--;
29526 + return skb;
29527 + }
29528 +@@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
29529 + unsigned int len;
29530 + if (cl->un.leaf.q->ops->drop &&
29531 + (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
29532 ++ sch->qstats.backlog -= len;
29533 + sch->q.qlen--;
29534 + if (!cl->un.leaf.q->q.qlen)
29535 + htb_deactivate(q, cl);
29536 +@@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
29537 + }
29538 + cl->prio_activity = 0;
29539 + cl->cmode = HTB_CAN_SEND;
29540 +-
29541 + }
29542 + }
29543 + qdisc_watchdog_cancel(&q->watchdog);
29544 + __skb_queue_purge(&q->direct_queue);
29545 + sch->q.qlen = 0;
29546 ++ sch->qstats.backlog = 0;
29547 + memset(q->hlevel, 0, sizeof(q->hlevel));
29548 + memset(q->row_mask, 0, sizeof(q->row_mask));
29549 + for (i = 0; i < TC_HTB_NUMPRIO; i++)
29550 +@@ -1163,14 +1166,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
29551 + cl->common.classid)) == NULL)
29552 + return -ENOBUFS;
29553 +
29554 +- sch_tree_lock(sch);
29555 +- *old = cl->un.leaf.q;
29556 +- cl->un.leaf.q = new;
29557 +- if (*old != NULL) {
29558 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
29559 +- qdisc_reset(*old);
29560 +- }
29561 +- sch_tree_unlock(sch);
29562 ++ *old = qdisc_replace(sch, new, &cl->un.leaf.q);
29563 + return 0;
29564 + }
29565 +
29566 +@@ -1272,7 +1268,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
29567 + {
29568 + struct htb_sched *q = qdisc_priv(sch);
29569 + struct htb_class *cl = (struct htb_class *)arg;
29570 +- unsigned int qlen;
29571 + struct Qdisc *new_q = NULL;
29572 + int last_child = 0;
29573 +
29574 +@@ -1292,9 +1287,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
29575 + sch_tree_lock(sch);
29576 +
29577 + if (!cl->level) {
29578 +- qlen = cl->un.leaf.q->q.qlen;
29579 ++ unsigned int qlen = cl->un.leaf.q->q.qlen;
29580 ++ unsigned int backlog = cl->un.leaf.q->qstats.backlog;
29581 ++
29582 + qdisc_reset(cl->un.leaf.q);
29583 +- qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
29584 ++ qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
29585 + }
29586 +
29587 + /* delete from hash and active; remainder in destroy_class */
29588 +@@ -1428,10 +1425,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
29589 + sch_tree_lock(sch);
29590 + if (parent && !parent->level) {
29591 + unsigned int qlen = parent->un.leaf.q->q.qlen;
29592 ++ unsigned int backlog = parent->un.leaf.q->qstats.backlog;
29593 +
29594 + /* turn parent into inner node */
29595 + qdisc_reset(parent->un.leaf.q);
29596 +- qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
29597 ++ qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
29598 + qdisc_destroy(parent->un.leaf.q);
29599 + if (parent->prio_activity)
29600 + htb_deactivate(q, parent);
29601 +diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
29602 +index 4e904ca0af9d..bcdd54bb101c 100644
29603 +--- a/net/sched/sch_multiq.c
29604 ++++ b/net/sched/sch_multiq.c
29605 +@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
29606 + if (q->queues[i] != &noop_qdisc) {
29607 + struct Qdisc *child = q->queues[i];
29608 + q->queues[i] = &noop_qdisc;
29609 +- qdisc_tree_decrease_qlen(child, child->q.qlen);
29610 ++ qdisc_tree_reduce_backlog(child, child->q.qlen,
29611 ++ child->qstats.backlog);
29612 + qdisc_destroy(child);
29613 + }
29614 + }
29615 +@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
29616 + q->queues[i] = child;
29617 +
29618 + if (old != &noop_qdisc) {
29619 +- qdisc_tree_decrease_qlen(old,
29620 +- old->q.qlen);
29621 ++ qdisc_tree_reduce_backlog(old,
29622 ++ old->q.qlen,
29623 ++ old->qstats.backlog);
29624 + qdisc_destroy(old);
29625 + }
29626 + sch_tree_unlock(sch);
29627 +@@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
29628 + if (new == NULL)
29629 + new = &noop_qdisc;
29630 +
29631 +- sch_tree_lock(sch);
29632 +- *old = q->queues[band];
29633 +- q->queues[band] = new;
29634 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
29635 +- qdisc_reset(*old);
29636 +- sch_tree_unlock(sch);
29637 +-
29638 ++ *old = qdisc_replace(sch, new, &q->queues[band]);
29639 + return 0;
29640 + }
29641 +
29642 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
29643 +index 5abd1d9de989..4befe97a9034 100644
29644 +--- a/net/sched/sch_netem.c
29645 ++++ b/net/sched/sch_netem.c
29646 +@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
29647 + sch->q.qlen++;
29648 + }
29649 +
29650 ++/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
29651 ++ * when we statistically choose to corrupt one, we instead segment it, returning
29652 ++ * the first packet to be corrupted, and re-enqueue the remaining frames
29653 ++ */
29654 ++static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
29655 ++{
29656 ++ struct sk_buff *segs;
29657 ++ netdev_features_t features = netif_skb_features(skb);
29658 ++
29659 ++ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
29660 ++
29661 ++ if (IS_ERR_OR_NULL(segs)) {
29662 ++ qdisc_reshape_fail(skb, sch);
29663 ++ return NULL;
29664 ++ }
29665 ++ consume_skb(skb);
29666 ++ return segs;
29667 ++}
29668 ++
29669 + /*
29670 + * Insert one skb into qdisc.
29671 + * Note: parent depends on return value to account for queue length.
29672 +@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29673 + /* We don't fill cb now as skb_unshare() may invalidate it */
29674 + struct netem_skb_cb *cb;
29675 + struct sk_buff *skb2;
29676 ++ struct sk_buff *segs = NULL;
29677 ++ unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
29678 ++ int nb = 0;
29679 + int count = 1;
29680 ++ int rc = NET_XMIT_SUCCESS;
29681 +
29682 + /* Random duplication */
29683 + if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
29684 +@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29685 + * do it now in software before we mangle it.
29686 + */
29687 + if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
29688 ++ if (skb_is_gso(skb)) {
29689 ++ segs = netem_segment(skb, sch);
29690 ++ if (!segs)
29691 ++ return NET_XMIT_DROP;
29692 ++ } else {
29693 ++ segs = skb;
29694 ++ }
29695 ++
29696 ++ skb = segs;
29697 ++ segs = segs->next;
29698 ++
29699 + if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
29700 + (skb->ip_summed == CHECKSUM_PARTIAL &&
29701 +- skb_checksum_help(skb)))
29702 +- return qdisc_drop(skb, sch);
29703 ++ skb_checksum_help(skb))) {
29704 ++ rc = qdisc_drop(skb, sch);
29705 ++ goto finish_segs;
29706 ++ }
29707 +
29708 + skb->data[prandom_u32() % skb_headlen(skb)] ^=
29709 + 1<<(prandom_u32() % 8);
29710 +@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29711 + sch->qstats.requeues++;
29712 + }
29713 +
29714 ++finish_segs:
29715 ++ if (segs) {
29716 ++ while (segs) {
29717 ++ skb2 = segs->next;
29718 ++ segs->next = NULL;
29719 ++ qdisc_skb_cb(segs)->pkt_len = segs->len;
29720 ++ last_len = segs->len;
29721 ++ rc = qdisc_enqueue(segs, sch);
29722 ++ if (rc != NET_XMIT_SUCCESS) {
29723 ++ if (net_xmit_drop_count(rc))
29724 ++ qdisc_qstats_drop(sch);
29725 ++ } else {
29726 ++ nb++;
29727 ++ len += last_len;
29728 ++ }
29729 ++ segs = skb2;
29730 ++ }
29731 ++ sch->q.qlen += nb;
29732 ++ if (nb > 1)
29733 ++ qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
29734 ++ }
29735 + return NET_XMIT_SUCCESS;
29736 + }
29737 +
29738 +@@ -598,7 +655,8 @@ deliver:
29739 + if (unlikely(err != NET_XMIT_SUCCESS)) {
29740 + if (net_xmit_drop_count(err)) {
29741 + qdisc_qstats_drop(sch);
29742 +- qdisc_tree_decrease_qlen(sch, 1);
29743 ++ qdisc_tree_reduce_backlog(sch, 1,
29744 ++ qdisc_pkt_len(skb));
29745 + }
29746 + }
29747 + goto tfifo_dequeue;
29748 +@@ -1037,15 +1095,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
29749 + {
29750 + struct netem_sched_data *q = qdisc_priv(sch);
29751 +
29752 +- sch_tree_lock(sch);
29753 +- *old = q->qdisc;
29754 +- q->qdisc = new;
29755 +- if (*old) {
29756 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
29757 +- qdisc_reset(*old);
29758 +- }
29759 +- sch_tree_unlock(sch);
29760 +-
29761 ++ *old = qdisc_replace(sch, new, &q->qdisc);
29762 + return 0;
29763 + }
29764 +
29765 +diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
29766 +index b783a446d884..71ae3b9629f9 100644
29767 +--- a/net/sched/sch_pie.c
29768 ++++ b/net/sched/sch_pie.c
29769 +@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
29770 + {
29771 + struct pie_sched_data *q = qdisc_priv(sch);
29772 + struct nlattr *tb[TCA_PIE_MAX + 1];
29773 +- unsigned int qlen;
29774 ++ unsigned int qlen, dropped = 0;
29775 + int err;
29776 +
29777 + if (!opt)
29778 +@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
29779 + while (sch->q.qlen > sch->limit) {
29780 + struct sk_buff *skb = __skb_dequeue(&sch->q);
29781 +
29782 ++ dropped += qdisc_pkt_len(skb);
29783 + qdisc_qstats_backlog_dec(sch, skb);
29784 + qdisc_drop(skb, sch);
29785 + }
29786 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
29787 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
29788 +
29789 + sch_tree_unlock(sch);
29790 + return 0;
29791 +diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
29792 +index ba6487f2741f..fee1b15506b2 100644
29793 +--- a/net/sched/sch_prio.c
29794 ++++ b/net/sched/sch_prio.c
29795 +@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
29796 + struct Qdisc *child = q->queues[i];
29797 + q->queues[i] = &noop_qdisc;
29798 + if (child != &noop_qdisc) {
29799 +- qdisc_tree_decrease_qlen(child, child->q.qlen);
29800 ++ qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
29801 + qdisc_destroy(child);
29802 + }
29803 + }
29804 +@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
29805 + q->queues[i] = child;
29806 +
29807 + if (old != &noop_qdisc) {
29808 +- qdisc_tree_decrease_qlen(old,
29809 +- old->q.qlen);
29810 ++ qdisc_tree_reduce_backlog(old,
29811 ++ old->q.qlen,
29812 ++ old->qstats.backlog);
29813 + qdisc_destroy(old);
29814 + }
29815 + sch_tree_unlock(sch);
29816 +@@ -268,13 +269,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
29817 + if (new == NULL)
29818 + new = &noop_qdisc;
29819 +
29820 +- sch_tree_lock(sch);
29821 +- *old = q->queues[band];
29822 +- q->queues[band] = new;
29823 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
29824 +- qdisc_reset(*old);
29825 +- sch_tree_unlock(sch);
29826 +-
29827 ++ *old = qdisc_replace(sch, new, &q->queues[band]);
29828 + return 0;
29829 + }
29830 +
29831 +diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
29832 +index 3dc3a6e56052..8d2d8d953432 100644
29833 +--- a/net/sched/sch_qfq.c
29834 ++++ b/net/sched/sch_qfq.c
29835 +@@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
29836 + static void qfq_purge_queue(struct qfq_class *cl)
29837 + {
29838 + unsigned int len = cl->qdisc->q.qlen;
29839 ++ unsigned int backlog = cl->qdisc->qstats.backlog;
29840 +
29841 + qdisc_reset(cl->qdisc);
29842 +- qdisc_tree_decrease_qlen(cl->qdisc, len);
29843 ++ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
29844 + }
29845 +
29846 + static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
29847 +@@ -617,11 +618,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
29848 + new = &noop_qdisc;
29849 + }
29850 +
29851 +- sch_tree_lock(sch);
29852 +- qfq_purge_queue(cl);
29853 +- *old = cl->qdisc;
29854 +- cl->qdisc = new;
29855 +- sch_tree_unlock(sch);
29856 ++ *old = qdisc_replace(sch, new, &cl->qdisc);
29857 + return 0;
29858 + }
29859 +
29860 +diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
29861 +index 6c0534cc7758..8c0508c0e287 100644
29862 +--- a/net/sched/sch_red.c
29863 ++++ b/net/sched/sch_red.c
29864 +@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
29865 + q->flags = ctl->flags;
29866 + q->limit = ctl->limit;
29867 + if (child) {
29868 +- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
29869 ++ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
29870 ++ q->qdisc->qstats.backlog);
29871 + qdisc_destroy(q->qdisc);
29872 + q->qdisc = child;
29873 + }
29874 +@@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
29875 + if (new == NULL)
29876 + new = &noop_qdisc;
29877 +
29878 +- sch_tree_lock(sch);
29879 +- *old = q->qdisc;
29880 +- q->qdisc = new;
29881 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
29882 +- qdisc_reset(*old);
29883 +- sch_tree_unlock(sch);
29884 ++ *old = qdisc_replace(sch, new, &q->qdisc);
29885 + return 0;
29886 + }
29887 +
29888 +diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
29889 +index 5bbb6332ec57..c69611640fa5 100644
29890 +--- a/net/sched/sch_sfb.c
29891 ++++ b/net/sched/sch_sfb.c
29892 +@@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
29893 +
29894 + sch_tree_lock(sch);
29895 +
29896 +- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
29897 ++ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
29898 ++ q->qdisc->qstats.backlog);
29899 + qdisc_destroy(q->qdisc);
29900 + q->qdisc = child;
29901 +
29902 +@@ -606,12 +607,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
29903 + if (new == NULL)
29904 + new = &noop_qdisc;
29905 +
29906 +- sch_tree_lock(sch);
29907 +- *old = q->qdisc;
29908 +- q->qdisc = new;
29909 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
29910 +- qdisc_reset(*old);
29911 +- sch_tree_unlock(sch);
29912 ++ *old = qdisc_replace(sch, new, &q->qdisc);
29913 + return 0;
29914 + }
29915 +
29916 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
29917 +index 3abab534eb5c..498f0a2cb47f 100644
29918 +--- a/net/sched/sch_sfq.c
29919 ++++ b/net/sched/sch_sfq.c
29920 +@@ -346,7 +346,7 @@ static int
29921 + sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
29922 + {
29923 + struct sfq_sched_data *q = qdisc_priv(sch);
29924 +- unsigned int hash;
29925 ++ unsigned int hash, dropped;
29926 + sfq_index x, qlen;
29927 + struct sfq_slot *slot;
29928 + int uninitialized_var(ret);
29929 +@@ -461,7 +461,7 @@ enqueue:
29930 + return NET_XMIT_SUCCESS;
29931 +
29932 + qlen = slot->qlen;
29933 +- sfq_drop(sch);
29934 ++ dropped = sfq_drop(sch);
29935 + /* Return Congestion Notification only if we dropped a packet
29936 + * from this flow.
29937 + */
29938 +@@ -469,7 +469,7 @@ enqueue:
29939 + return NET_XMIT_CN;
29940 +
29941 + /* As we dropped a packet, better let upper stack know this */
29942 +- qdisc_tree_decrease_qlen(sch, 1);
29943 ++ qdisc_tree_reduce_backlog(sch, 1, dropped);
29944 + return NET_XMIT_SUCCESS;
29945 + }
29946 +
29947 +@@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch)
29948 + struct sfq_slot *slot;
29949 + struct sk_buff_head list;
29950 + int dropped = 0;
29951 ++ unsigned int drop_len = 0;
29952 +
29953 + __skb_queue_head_init(&list);
29954 +
29955 +@@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch)
29956 + if (x >= SFQ_MAX_FLOWS) {
29957 + drop:
29958 + qdisc_qstats_backlog_dec(sch, skb);
29959 ++ drop_len += qdisc_pkt_len(skb);
29960 + kfree_skb(skb);
29961 + dropped++;
29962 + continue;
29963 +@@ -594,7 +596,7 @@ drop:
29964 + }
29965 + }
29966 + sch->q.qlen -= dropped;
29967 +- qdisc_tree_decrease_qlen(sch, dropped);
29968 ++ qdisc_tree_reduce_backlog(sch, dropped, drop_len);
29969 + }
29970 +
29971 + static void sfq_perturbation(unsigned long arg)
29972 +@@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
29973 + struct sfq_sched_data *q = qdisc_priv(sch);
29974 + struct tc_sfq_qopt *ctl = nla_data(opt);
29975 + struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
29976 +- unsigned int qlen;
29977 ++ unsigned int qlen, dropped = 0;
29978 + struct red_parms *p = NULL;
29979 +
29980 + if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
29981 +@@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
29982 +
29983 + qlen = sch->q.qlen;
29984 + while (sch->q.qlen > q->limit)
29985 +- sfq_drop(sch);
29986 +- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
29987 ++ dropped += sfq_drop(sch);
29988 ++ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
29989 +
29990 + del_timer(&q->perturb_timer);
29991 + if (q->perturb_period) {
29992 +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
29993 +index a4afde14e865..c2fbde742f37 100644
29994 +--- a/net/sched/sch_tbf.c
29995 ++++ b/net/sched/sch_tbf.c
29996 +@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
29997 + struct tbf_sched_data *q = qdisc_priv(sch);
29998 + struct sk_buff *segs, *nskb;
29999 + netdev_features_t features = netif_skb_features(skb);
30000 ++ unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
30001 + int ret, nb;
30002 +
30003 + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
30004 +@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
30005 + nskb = segs->next;
30006 + segs->next = NULL;
30007 + qdisc_skb_cb(segs)->pkt_len = segs->len;
30008 ++ len += segs->len;
30009 + ret = qdisc_enqueue(segs, q->qdisc);
30010 + if (ret != NET_XMIT_SUCCESS) {
30011 + if (net_xmit_drop_count(ret))
30012 +@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
30013 + }
30014 + sch->q.qlen += nb;
30015 + if (nb > 1)
30016 +- qdisc_tree_decrease_qlen(sch, 1 - nb);
30017 ++ qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
30018 + consume_skb(skb);
30019 + return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
30020 + }
30021 +@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
30022 +
30023 + sch_tree_lock(sch);
30024 + if (child) {
30025 +- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
30026 ++ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
30027 ++ q->qdisc->qstats.backlog);
30028 + qdisc_destroy(q->qdisc);
30029 + q->qdisc = child;
30030 + }
30031 +@@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
30032 + if (new == NULL)
30033 + new = &noop_qdisc;
30034 +
30035 +- sch_tree_lock(sch);
30036 +- *old = q->qdisc;
30037 +- q->qdisc = new;
30038 +- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
30039 +- qdisc_reset(*old);
30040 +- sch_tree_unlock(sch);
30041 +-
30042 ++ *old = qdisc_replace(sch, new, &q->qdisc);
30043 + return 0;
30044 + }
30045 +
30046 +diff --git a/net/socket.c b/net/socket.c
30047 +index c044d1e8508c..db13ae893dce 100644
30048 +--- a/net/socket.c
30049 ++++ b/net/socket.c
30050 +@@ -2240,31 +2240,31 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
30051 + cond_resched();
30052 + }
30053 +
30054 +-out_put:
30055 +- fput_light(sock->file, fput_needed);
30056 +-
30057 + if (err == 0)
30058 +- return datagrams;
30059 ++ goto out_put;
30060 +
30061 +- if (datagrams != 0) {
30062 ++ if (datagrams == 0) {
30063 ++ datagrams = err;
30064 ++ goto out_put;
30065 ++ }
30066 ++
30067 ++ /*
30068 ++ * We may return less entries than requested (vlen) if the
30069 ++ * sock is non block and there aren't enough datagrams...
30070 ++ */
30071 ++ if (err != -EAGAIN) {
30072 + /*
30073 +- * We may return less entries than requested (vlen) if the
30074 +- * sock is non block and there aren't enough datagrams...
30075 ++ * ... or if recvmsg returns an error after we
30076 ++ * received some datagrams, where we record the
30077 ++ * error to return on the next call or if the
30078 ++ * app asks about it using getsockopt(SO_ERROR).
30079 + */
30080 +- if (err != -EAGAIN) {
30081 +- /*
30082 +- * ... or if recvmsg returns an error after we
30083 +- * received some datagrams, where we record the
30084 +- * error to return on the next call or if the
30085 +- * app asks about it using getsockopt(SO_ERROR).
30086 +- */
30087 +- sock->sk->sk_err = -err;
30088 +- }
30089 +-
30090 +- return datagrams;
30091 ++ sock->sk->sk_err = -err;
30092 + }
30093 ++out_put:
30094 ++ fput_light(sock->file, fput_needed);
30095 +
30096 +- return err;
30097 ++ return datagrams;
30098 + }
30099 +
30100 + SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
30101 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
30102 +index 273bc3a35425..008c25d1b9f9 100644
30103 +--- a/net/sunrpc/cache.c
30104 ++++ b/net/sunrpc/cache.c
30105 +@@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
30106 + }
30107 +
30108 + crq->q.reader = 0;
30109 +- crq->item = cache_get(h);
30110 + crq->buf = buf;
30111 + crq->len = 0;
30112 + crq->readers = 0;
30113 + spin_lock(&queue_lock);
30114 +- if (test_bit(CACHE_PENDING, &h->flags))
30115 ++ if (test_bit(CACHE_PENDING, &h->flags)) {
30116 ++ crq->item = cache_get(h);
30117 + list_add_tail(&crq->q.list, &detail->queue);
30118 +- else
30119 ++ } else
30120 + /* Lost a race, no longer PENDING, so don't enqueue */
30121 + ret = -EAGAIN;
30122 + spin_unlock(&queue_lock);
30123 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
30124 +index bbe65dcb9738..c93e67beaea7 100644
30125 +--- a/net/vmw_vsock/af_vsock.c
30126 ++++ b/net/vmw_vsock/af_vsock.c
30127 +@@ -1789,27 +1789,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
30128 + else if (sk->sk_shutdown & RCV_SHUTDOWN)
30129 + err = 0;
30130 +
30131 +- if (copied > 0) {
30132 +- /* We only do these additional bookkeeping/notification steps
30133 +- * if we actually copied something out of the queue pair
30134 +- * instead of just peeking ahead.
30135 +- */
30136 +-
30137 +- if (!(flags & MSG_PEEK)) {
30138 +- /* If the other side has shutdown for sending and there
30139 +- * is nothing more to read, then modify the socket
30140 +- * state.
30141 +- */
30142 +- if (vsk->peer_shutdown & SEND_SHUTDOWN) {
30143 +- if (vsock_stream_has_data(vsk) <= 0) {
30144 +- sk->sk_state = SS_UNCONNECTED;
30145 +- sock_set_flag(sk, SOCK_DONE);
30146 +- sk->sk_state_change(sk);
30147 +- }
30148 +- }
30149 +- }
30150 ++ if (copied > 0)
30151 + err = copied;
30152 +- }
30153 +
30154 + out:
30155 + release_sock(sk);
30156 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
30157 +index 711cb7ad6ae0..ab62d305b48b 100644
30158 +--- a/net/wireless/nl80211.c
30159 ++++ b/net/wireless/nl80211.c
30160 +@@ -13201,7 +13201,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
30161 + struct wireless_dev *wdev;
30162 + struct cfg80211_beacon_registration *reg, *tmp;
30163 +
30164 +- if (state != NETLINK_URELEASE)
30165 ++ if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
30166 + return NOTIFY_DONE;
30167 +
30168 + rcu_read_lock();
30169 +diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
30170 +index 7ecd04c21360..997ff7b2509b 100644
30171 +--- a/net/x25/x25_facilities.c
30172 ++++ b/net/x25/x25_facilities.c
30173 +@@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
30174 +
30175 + memset(&theirs, 0, sizeof(theirs));
30176 + memcpy(new, ours, sizeof(*new));
30177 ++ memset(dte, 0, sizeof(*dte));
30178 +
30179 + len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
30180 + if (len < 0)
30181 +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
30182 +index ad7f5b3f9b61..1c4ad477ce93 100644
30183 +--- a/net/xfrm/xfrm_input.c
30184 ++++ b/net/xfrm/xfrm_input.c
30185 +@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
30186 + XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
30187 +
30188 + skb_dst_force(skb);
30189 ++ dev_hold(skb->dev);
30190 +
30191 + nexthdr = x->type->input(x, skb);
30192 +
30193 + if (nexthdr == -EINPROGRESS)
30194 + return 0;
30195 + resume:
30196 ++ dev_put(skb->dev);
30197 ++
30198 + spin_lock(&x->lock);
30199 + if (nexthdr <= 0) {
30200 + if (nexthdr == -EBADMSG) {
30201 +diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
30202 +index 8d8d1ec429eb..9b96f4fb8cea 100644
30203 +--- a/samples/bpf/trace_output_kern.c
30204 ++++ b/samples/bpf/trace_output_kern.c
30205 +@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
30206 + u64 cookie;
30207 + } data;
30208 +
30209 +- memset(&data, 0, sizeof(data));
30210 + data.pid = bpf_get_current_pid_tgid();
30211 + data.cookie = 0x12345678;
30212 +
30213 +diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
30214 +index f9e47a70509c..53449a6ff6aa 100644
30215 +--- a/scripts/Makefile.extrawarn
30216 ++++ b/scripts/Makefile.extrawarn
30217 +@@ -24,6 +24,7 @@ warning-1 += $(call cc-option, -Wmissing-prototypes)
30218 + warning-1 += -Wold-style-definition
30219 + warning-1 += $(call cc-option, -Wmissing-include-dirs)
30220 + warning-1 += $(call cc-option, -Wunused-but-set-variable)
30221 ++warning-1 += $(call cc-option, -Wunused-const-variable)
30222 + warning-1 += $(call cc-disable-warning, missing-field-initializers)
30223 + warning-1 += $(call cc-disable-warning, sign-compare)
30224 +
30225 +diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
30226 +index f085f5968c52..ce8cc9c006e5 100644
30227 +--- a/scripts/coccinelle/iterators/use_after_iter.cocci
30228 ++++ b/scripts/coccinelle/iterators/use_after_iter.cocci
30229 +@@ -123,7 +123,7 @@ list_remove_head(x,c,...)
30230 + |
30231 + sizeof(<+...c...+>)
30232 + |
30233 +-&c->member
30234 ++ &c->member
30235 + |
30236 + c = E
30237 + |
30238 +diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
30239 +index 25db8cff44a2..0a35d6dbfb80 100644
30240 +--- a/scripts/gdb/linux/modules.py
30241 ++++ b/scripts/gdb/linux/modules.py
30242 +@@ -73,10 +73,11 @@ class LxLsmod(gdb.Command):
30243 + " " if utils.get_long_type().sizeof == 8 else ""))
30244 +
30245 + for module in module_list():
30246 ++ layout = module['core_layout']
30247 + gdb.write("{address} {name:<19} {size:>8} {ref}".format(
30248 +- address=str(module['module_core']).split()[0],
30249 ++ address=str(layout['base']).split()[0],
30250 + name=module['name'].string(),
30251 +- size=str(module['core_size']),
30252 ++ size=str(layout['size']),
30253 + ref=str(module['refcnt']['counter'])))
30254 +
30255 + source_list = module['source_list']
30256 +diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
30257 +index 627750cb420d..9a0f8923f67c 100644
30258 +--- a/scripts/gdb/linux/symbols.py
30259 ++++ b/scripts/gdb/linux/symbols.py
30260 +@@ -108,7 +108,7 @@ lx-symbols command."""
30261 +
30262 + def load_module_symbols(self, module):
30263 + module_name = module['name'].string()
30264 +- module_addr = str(module['module_core']).split()[0]
30265 ++ module_addr = str(module['core_layout']['base']).split()[0]
30266 +
30267 + module_file = self._get_module_file(module_name)
30268 + if not module_file and not self.module_files_updated:
30269 +diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
30270 +index d79cba4ce3eb..ebced77deb9c 100644
30271 +--- a/scripts/kconfig/Makefile
30272 ++++ b/scripts/kconfig/Makefile
30273 +@@ -96,13 +96,15 @@ savedefconfig: $(obj)/conf
30274 + defconfig: $(obj)/conf
30275 + ifeq ($(KBUILD_DEFCONFIG),)
30276 + $< $(silent) --defconfig $(Kconfig)
30277 +-else ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
30278 ++else
30279 ++ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
30280 + @$(kecho) "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
30281 + $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
30282 + else
30283 + @$(kecho) "*** Default configuration is based on target '$(KBUILD_DEFCONFIG)'"
30284 + $(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG)
30285 + endif
30286 ++endif
30287 +
30288 + %_defconfig: $(obj)/conf
30289 + $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
30290 +diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
30291 +index 0b7dc2fd7bac..dd243d2abd87 100644
30292 +--- a/scripts/kconfig/confdata.c
30293 ++++ b/scripts/kconfig/confdata.c
30294 +@@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
30295 + if (in)
30296 + goto load;
30297 + sym_add_change_count(1);
30298 +- if (!sym_defconfig_list) {
30299 +- sym_calc_value(modules_sym);
30300 ++ if (!sym_defconfig_list)
30301 + return 1;
30302 +- }
30303 +
30304 + for_all_defaults(sym_defconfig_list, prop) {
30305 + if (expr_calc_value(prop->visible.expr) == no ||
30306 +@@ -403,7 +401,6 @@ setsym:
30307 + }
30308 + free(line);
30309 + fclose(in);
30310 +- sym_calc_value(modules_sym);
30311 + return 0;
30312 + }
30313 +
30314 +@@ -414,8 +411,12 @@ int conf_read(const char *name)
30315 +
30316 + sym_set_change_count(0);
30317 +
30318 +- if (conf_read_simple(name, S_DEF_USER))
30319 ++ if (conf_read_simple(name, S_DEF_USER)) {
30320 ++ sym_calc_value(modules_sym);
30321 + return 1;
30322 ++ }
30323 ++
30324 ++ sym_calc_value(modules_sym);
30325 +
30326 + for_all_symbols(i, sym) {
30327 + sym_calc_value(sym);
30328 +@@ -846,6 +847,7 @@ static int conf_split_config(void)
30329 +
30330 + name = conf_get_autoconfig_name();
30331 + conf_read_simple(name, S_DEF_AUTO);
30332 ++ sym_calc_value(modules_sym);
30333 +
30334 + if (chdir("include/config"))
30335 + return 1;
30336 +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
30337 +index 161dd0d67da8..a9155077feef 100644
30338 +--- a/scripts/mod/file2alias.c
30339 ++++ b/scripts/mod/file2alias.c
30340 +@@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size,
30341 + do_usb_entry_multi(symval + i, mod);
30342 + }
30343 +
30344 ++static void do_of_entry_multi(void *symval, struct module *mod)
30345 ++{
30346 ++ char alias[500];
30347 ++ int len;
30348 ++ char *tmp;
30349 ++
30350 ++ DEF_FIELD_ADDR(symval, of_device_id, name);
30351 ++ DEF_FIELD_ADDR(symval, of_device_id, type);
30352 ++ DEF_FIELD_ADDR(symval, of_device_id, compatible);
30353 ++
30354 ++ len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
30355 ++ (*type)[0] ? *type : "*");
30356 ++
30357 ++ if (compatible[0])
30358 ++ sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
30359 ++ *compatible);
30360 ++
30361 ++ /* Replace all whitespace with underscores */
30362 ++ for (tmp = alias; tmp && *tmp; tmp++)
30363 ++ if (isspace(*tmp))
30364 ++ *tmp = '_';
30365 ++
30366 ++ buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
30367 ++ strcat(alias, "C");
30368 ++ add_wildcard(alias);
30369 ++ buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
30370 ++}
30371 ++
30372 ++static void do_of_table(void *symval, unsigned long size,
30373 ++ struct module *mod)
30374 ++{
30375 ++ unsigned int i;
30376 ++ const unsigned long id_size = SIZE_of_device_id;
30377 ++
30378 ++ device_id_check(mod->name, "of", size, id_size, symval);
30379 ++
30380 ++ /* Leave last one: it's the terminator. */
30381 ++ size -= id_size;
30382 ++
30383 ++ for (i = 0; i < size; i += id_size)
30384 ++ do_of_entry_multi(symval + i, mod);
30385 ++}
30386 ++
30387 + /* Looks like: hid:bNvNpN */
30388 + static int do_hid_entry(const char *filename,
30389 + void *symval, char *alias)
30390 +@@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename,
30391 + }
30392 + ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
30393 +
30394 +-static int do_of_entry (const char *filename, void *symval, char *alias)
30395 +-{
30396 +- int len;
30397 +- char *tmp;
30398 +- DEF_FIELD_ADDR(symval, of_device_id, name);
30399 +- DEF_FIELD_ADDR(symval, of_device_id, type);
30400 +- DEF_FIELD_ADDR(symval, of_device_id, compatible);
30401 +-
30402 +- len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
30403 +- (*type)[0] ? *type : "*");
30404 +-
30405 +- if (compatible[0])
30406 +- sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
30407 +- *compatible);
30408 +-
30409 +- /* Replace all whitespace with underscores */
30410 +- for (tmp = alias; tmp && *tmp; tmp++)
30411 +- if (isspace (*tmp))
30412 +- *tmp = '_';
30413 +-
30414 +- return 1;
30415 +-}
30416 +-ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
30417 +-
30418 + static int do_vio_entry(const char *filename, void *symval,
30419 + char *alias)
30420 + {
30421 +@@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
30422 + /* First handle the "special" cases */
30423 + if (sym_is(name, namelen, "usb"))
30424 + do_usb_table(symval, sym->st_size, mod);
30425 ++ if (sym_is(name, namelen, "of"))
30426 ++ do_of_table(symval, sym->st_size, mod);
30427 + else if (sym_is(name, namelen, "pnp"))
30428 + do_pnp_device_entry(symval, sym->st_size, mod);
30429 + else if (sym_is(name, namelen, "pnp_card"))
30430 +diff --git a/scripts/package/mkspec b/scripts/package/mkspec
30431 +index 71004daefe31..fe44d68e9344 100755
30432 +--- a/scripts/package/mkspec
30433 ++++ b/scripts/package/mkspec
30434 +@@ -131,11 +131,11 @@ echo 'rm -rf $RPM_BUILD_ROOT'
30435 + echo ""
30436 + echo "%post"
30437 + echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
30438 +-echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
30439 +-echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
30440 ++echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm"
30441 ++echo "cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm"
30442 + echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
30443 +-echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
30444 +-echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
30445 ++echo "/sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
30446 ++echo "rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
30447 + echo "fi"
30448 + echo ""
30449 + echo "%files"
30450 +diff --git a/security/keys/trusted.c b/security/keys/trusted.c
30451 +index 0dcab20cdacd..90d61751ff12 100644
30452 +--- a/security/keys/trusted.c
30453 ++++ b/security/keys/trusted.c
30454 +@@ -744,6 +744,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
30455 + unsigned long handle;
30456 + unsigned long lock;
30457 + unsigned long token_mask = 0;
30458 ++ unsigned int digest_len;
30459 + int i;
30460 + int tpm2;
30461 +
30462 +@@ -752,7 +753,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
30463 + return tpm2;
30464 +
30465 + opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
30466 +- opt->digest_len = hash_digest_size[opt->hash];
30467 +
30468 + while ((p = strsep(&c, " \t"))) {
30469 + if (*p == '\0' || *p == ' ' || *p == '\t')
30470 +@@ -812,8 +812,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
30471 + for (i = 0; i < HASH_ALGO__LAST; i++) {
30472 + if (!strcmp(args[0].from, hash_algo_name[i])) {
30473 + opt->hash = i;
30474 +- opt->digest_len =
30475 +- hash_digest_size[opt->hash];
30476 + break;
30477 + }
30478 + }
30479 +@@ -825,13 +823,14 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
30480 + }
30481 + break;
30482 + case Opt_policydigest:
30483 +- if (!tpm2 ||
30484 +- strlen(args[0].from) != (2 * opt->digest_len))
30485 ++ digest_len = hash_digest_size[opt->hash];
30486 ++ if (!tpm2 || strlen(args[0].from) != (2 * digest_len))
30487 + return -EINVAL;
30488 + res = hex2bin(opt->policydigest, args[0].from,
30489 +- opt->digest_len);
30490 ++ digest_len);
30491 + if (res < 0)
30492 + return -EINVAL;
30493 ++ opt->policydigest_len = digest_len;
30494 + break;
30495 + case Opt_policyhandle:
30496 + if (!tpm2)
30497 +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
30498 +index 6b5a811e01a5..3a9b66c6e09c 100644
30499 +--- a/sound/core/pcm_lib.c
30500 ++++ b/sound/core/pcm_lib.c
30501 +@@ -322,7 +322,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
30502 + char name[16];
30503 + snd_pcm_debug_name(substream, name, sizeof(name));
30504 + pcm_err(substream->pcm,
30505 +- "BUG: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
30506 ++ "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
30507 + name, pos, runtime->buffer_size,
30508 + runtime->period_size);
30509 + }
30510 +diff --git a/sound/core/timer.c b/sound/core/timer.c
30511 +index dca817fc7894..e5e7e4368996 100644
30512 +--- a/sound/core/timer.c
30513 ++++ b/sound/core/timer.c
30514 +@@ -1041,8 +1041,8 @@ static int snd_timer_s_start(struct snd_timer * timer)
30515 + njiff += timer->sticks - priv->correction;
30516 + priv->correction = 0;
30517 + }
30518 +- priv->last_expires = priv->tlist.expires = njiff;
30519 +- add_timer(&priv->tlist);
30520 ++ priv->last_expires = njiff;
30521 ++ mod_timer(&priv->tlist, njiff);
30522 + return 0;
30523 + }
30524 +
30525 +diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
30526 +index e361024eabb6..d1a4d6973330 100644
30527 +--- a/sound/hda/hdac_device.c
30528 ++++ b/sound/hda/hdac_device.c
30529 +@@ -611,6 +611,22 @@ int snd_hdac_power_up_pm(struct hdac_device *codec)
30530 + }
30531 + EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
30532 +
30533 ++/* like snd_hdac_power_up_pm(), but only increment the pm count when
30534 ++ * already powered up. Returns -1 if not powered up, 1 if incremented
30535 ++ * or 0 if unchanged. Only used in hdac_regmap.c
30536 ++ */
30537 ++int snd_hdac_keep_power_up(struct hdac_device *codec)
30538 ++{
30539 ++ if (!atomic_inc_not_zero(&codec->in_pm)) {
30540 ++ int ret = pm_runtime_get_if_in_use(&codec->dev);
30541 ++ if (!ret)
30542 ++ return -1;
30543 ++ if (ret < 0)
30544 ++ return 0;
30545 ++ }
30546 ++ return 1;
30547 ++}
30548 ++
30549 + /**
30550 + * snd_hdac_power_down_pm - power down the codec
30551 + * @codec: the codec object
30552 +diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
30553 +index f6854dbd7d8d..69ead7150a5c 100644
30554 +--- a/sound/hda/hdac_i915.c
30555 ++++ b/sound/hda/hdac_i915.c
30556 +@@ -20,6 +20,7 @@
30557 + #include <sound/core.h>
30558 + #include <sound/hdaudio.h>
30559 + #include <sound/hda_i915.h>
30560 ++#include <sound/hda_register.h>
30561 +
30562 + static struct i915_audio_component *hdac_acomp;
30563 +
30564 +@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
30565 + }
30566 + EXPORT_SYMBOL_GPL(snd_hdac_display_power);
30567 +
30568 ++#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
30569 ++ ((pci)->device == 0x0c0c) || \
30570 ++ ((pci)->device == 0x0d0c) || \
30571 ++ ((pci)->device == 0x160c))
30572 ++
30573 + /**
30574 +- * snd_hdac_get_display_clk - Get CDCLK in kHz
30575 ++ * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
30576 + * @bus: HDA core bus
30577 + *
30578 +- * This function is supposed to be used only by a HD-audio controller
30579 +- * driver that needs the interaction with i915 graphics.
30580 ++ * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
30581 ++ * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
30582 ++ * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
30583 ++ * BCLK = CDCLK * M / N
30584 ++ * The values will be lost when the display power well is disabled and need to
30585 ++ * be restored to avoid abnormal playback speed.
30586 + *
30587 +- * This function queries CDCLK value in kHz from the graphics driver and
30588 +- * returns the value. A negative code is returned in error.
30589 ++ * Call this function at initializing and changing power well, as well as
30590 ++ * at ELD notifier for the hotplug.
30591 + */
30592 +-int snd_hdac_get_display_clk(struct hdac_bus *bus)
30593 ++void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
30594 + {
30595 + struct i915_audio_component *acomp = bus->audio_component;
30596 ++ struct pci_dev *pci = to_pci_dev(bus->dev);
30597 ++ int cdclk_freq;
30598 ++ unsigned int bclk_m, bclk_n;
30599 ++
30600 ++ if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
30601 ++ return; /* only for i915 binding */
30602 ++ if (!CONTROLLER_IN_GPU(pci))
30603 ++ return; /* only HSW/BDW */
30604 ++
30605 ++ cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
30606 ++ switch (cdclk_freq) {
30607 ++ case 337500:
30608 ++ bclk_m = 16;
30609 ++ bclk_n = 225;
30610 ++ break;
30611 ++
30612 ++ case 450000:
30613 ++ default: /* default CDCLK 450MHz */
30614 ++ bclk_m = 4;
30615 ++ bclk_n = 75;
30616 ++ break;
30617 ++
30618 ++ case 540000:
30619 ++ bclk_m = 4;
30620 ++ bclk_n = 90;
30621 ++ break;
30622 ++
30623 ++ case 675000:
30624 ++ bclk_m = 8;
30625 ++ bclk_n = 225;
30626 ++ break;
30627 ++ }
30628 +
30629 +- if (!acomp || !acomp->ops)
30630 +- return -ENODEV;
30631 +-
30632 +- return acomp->ops->get_cdclk_freq(acomp->dev);
30633 ++ snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
30634 ++ snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
30635 + }
30636 +-EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
30637 ++EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
30638 +
30639 + /* There is a fixed mapping between audio pin node and display port
30640 + * on current Intel platforms:
30641 +diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
30642 +index eb8f7c30cb09..bdbcd6b75ff6 100644
30643 +--- a/sound/hda/hdac_regmap.c
30644 ++++ b/sound/hda/hdac_regmap.c
30645 +@@ -21,13 +21,16 @@
30646 + #include <sound/hdaudio.h>
30647 + #include <sound/hda_regmap.h>
30648 +
30649 +-#ifdef CONFIG_PM
30650 +-#define codec_is_running(codec) \
30651 +- (atomic_read(&(codec)->in_pm) || \
30652 +- !pm_runtime_suspended(&(codec)->dev))
30653 +-#else
30654 +-#define codec_is_running(codec) true
30655 +-#endif
30656 ++static int codec_pm_lock(struct hdac_device *codec)
30657 ++{
30658 ++ return snd_hdac_keep_power_up(codec);
30659 ++}
30660 ++
30661 ++static void codec_pm_unlock(struct hdac_device *codec, int lock)
30662 ++{
30663 ++ if (lock == 1)
30664 ++ snd_hdac_power_down_pm(codec);
30665 ++}
30666 +
30667 + #define get_verb(reg) (((reg) >> 8) & 0xfff)
30668 +
30669 +@@ -238,20 +241,28 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
30670 + struct hdac_device *codec = context;
30671 + int verb = get_verb(reg);
30672 + int err;
30673 ++ int pm_lock = 0;
30674 +
30675 +- if (!codec_is_running(codec) && verb != AC_VERB_GET_POWER_STATE)
30676 +- return -EAGAIN;
30677 ++ if (verb != AC_VERB_GET_POWER_STATE) {
30678 ++ pm_lock = codec_pm_lock(codec);
30679 ++ if (pm_lock < 0)
30680 ++ return -EAGAIN;
30681 ++ }
30682 + reg |= (codec->addr << 28);
30683 +- if (is_stereo_amp_verb(reg))
30684 +- return hda_reg_read_stereo_amp(codec, reg, val);
30685 +- if (verb == AC_VERB_GET_PROC_COEF)
30686 +- return hda_reg_read_coef(codec, reg, val);
30687 ++ if (is_stereo_amp_verb(reg)) {
30688 ++ err = hda_reg_read_stereo_amp(codec, reg, val);
30689 ++ goto out;
30690 ++ }
30691 ++ if (verb == AC_VERB_GET_PROC_COEF) {
30692 ++ err = hda_reg_read_coef(codec, reg, val);
30693 ++ goto out;
30694 ++ }
30695 + if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE)
30696 + reg &= ~AC_AMP_FAKE_MUTE;
30697 +
30698 + err = snd_hdac_exec_verb(codec, reg, 0, val);
30699 + if (err < 0)
30700 +- return err;
30701 ++ goto out;
30702 + /* special handling for asymmetric reads */
30703 + if (verb == AC_VERB_GET_POWER_STATE) {
30704 + if (*val & AC_PWRST_ERROR)
30705 +@@ -259,7 +270,9 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
30706 + else /* take only the actual state */
30707 + *val = (*val >> 4) & 0x0f;
30708 + }
30709 +- return 0;
30710 ++ out:
30711 ++ codec_pm_unlock(codec, pm_lock);
30712 ++ return err;
30713 + }
30714 +
30715 + static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
30716 +@@ -267,6 +280,7 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
30717 + struct hdac_device *codec = context;
30718 + unsigned int verb;
30719 + int i, bytes, err;
30720 ++ int pm_lock = 0;
30721 +
30722 + if (codec->caps_overwriting)
30723 + return 0;
30724 +@@ -275,14 +289,21 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
30725 + reg |= (codec->addr << 28);
30726 + verb = get_verb(reg);
30727 +
30728 +- if (!codec_is_running(codec) && verb != AC_VERB_SET_POWER_STATE)
30729 +- return codec->lazy_cache ? 0 : -EAGAIN;
30730 ++ if (verb != AC_VERB_SET_POWER_STATE) {
30731 ++ pm_lock = codec_pm_lock(codec);
30732 ++ if (pm_lock < 0)
30733 ++ return codec->lazy_cache ? 0 : -EAGAIN;
30734 ++ }
30735 +
30736 +- if (is_stereo_amp_verb(reg))
30737 +- return hda_reg_write_stereo_amp(codec, reg, val);
30738 ++ if (is_stereo_amp_verb(reg)) {
30739 ++ err = hda_reg_write_stereo_amp(codec, reg, val);
30740 ++ goto out;
30741 ++ }
30742 +
30743 +- if (verb == AC_VERB_SET_PROC_COEF)
30744 +- return hda_reg_write_coef(codec, reg, val);
30745 ++ if (verb == AC_VERB_SET_PROC_COEF) {
30746 ++ err = hda_reg_write_coef(codec, reg, val);
30747 ++ goto out;
30748 ++ }
30749 +
30750 + switch (verb & 0xf00) {
30751 + case AC_VERB_SET_AMP_GAIN_MUTE:
30752 +@@ -319,10 +340,12 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
30753 + reg |= (verb + i) << 8 | ((val >> (8 * i)) & 0xff);
30754 + err = snd_hdac_exec_verb(codec, reg, 0, NULL);
30755 + if (err < 0)
30756 +- return err;
30757 ++ goto out;
30758 + }
30759 +
30760 +- return 0;
30761 ++ out:
30762 ++ codec_pm_unlock(codec, pm_lock);
30763 ++ return err;
30764 + }
30765 +
30766 + static const struct regmap_config hda_regmap_cfg = {
30767 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
30768 +index 7ca5b89f088a..dfaf1a93fb8a 100644
30769 +--- a/sound/pci/hda/hda_generic.c
30770 ++++ b/sound/pci/hda/hda_generic.c
30771 +@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
30772 + bool allow_powerdown)
30773 + {
30774 + hda_nid_t nid, changed = 0;
30775 +- int i, state;
30776 ++ int i, state, power;
30777 +
30778 + for (i = 0; i < path->depth; i++) {
30779 + nid = path->path[i];
30780 +@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
30781 + state = AC_PWRST_D0;
30782 + else
30783 + state = AC_PWRST_D3;
30784 +- if (!snd_hda_check_power_state(codec, nid, state)) {
30785 ++ power = snd_hda_codec_read(codec, nid, 0,
30786 ++ AC_VERB_GET_POWER_STATE, 0);
30787 ++ if (power != (state | (state << 4))) {
30788 + snd_hda_codec_write(codec, nid, 0,
30789 + AC_VERB_SET_POWER_STATE, state);
30790 + changed = nid;
30791 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
30792 +index e5240cb3749f..c0b772bb49af 100644
30793 +--- a/sound/pci/hda/hda_intel.c
30794 ++++ b/sound/pci/hda/hda_intel.c
30795 +@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
30796 + #define azx_del_card_list(chip) /* NOP */
30797 + #endif /* CONFIG_PM */
30798 +
30799 +-/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
30800 +- * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
30801 +- * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
30802 +- * BCLK = CDCLK * M / N
30803 +- * The values will be lost when the display power well is disabled and need to
30804 +- * be restored to avoid abnormal playback speed.
30805 +- */
30806 +-static void haswell_set_bclk(struct hda_intel *hda)
30807 +-{
30808 +- struct azx *chip = &hda->chip;
30809 +- int cdclk_freq;
30810 +- unsigned int bclk_m, bclk_n;
30811 +-
30812 +- if (!hda->need_i915_power)
30813 +- return;
30814 +-
30815 +- cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
30816 +- switch (cdclk_freq) {
30817 +- case 337500:
30818 +- bclk_m = 16;
30819 +- bclk_n = 225;
30820 +- break;
30821 +-
30822 +- case 450000:
30823 +- default: /* default CDCLK 450MHz */
30824 +- bclk_m = 4;
30825 +- bclk_n = 75;
30826 +- break;
30827 +-
30828 +- case 540000:
30829 +- bclk_m = 4;
30830 +- bclk_n = 90;
30831 +- break;
30832 +-
30833 +- case 675000:
30834 +- bclk_m = 8;
30835 +- bclk_n = 225;
30836 +- break;
30837 +- }
30838 +-
30839 +- azx_writew(chip, HSW_EM4, bclk_m);
30840 +- azx_writew(chip, HSW_EM5, bclk_n);
30841 +-}
30842 +-
30843 + #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
30844 + /*
30845 + * power management
30846 +@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
30847 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
30848 + && hda->need_i915_power) {
30849 + snd_hdac_display_power(azx_bus(chip), true);
30850 +- haswell_set_bclk(hda);
30851 ++ snd_hdac_i915_set_bclk(azx_bus(chip));
30852 + }
30853 + if (chip->msi)
30854 + if (pci_enable_msi(pci) < 0)
30855 +@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
30856 + bus = azx_bus(chip);
30857 + if (hda->need_i915_power) {
30858 + snd_hdac_display_power(bus, true);
30859 +- haswell_set_bclk(hda);
30860 ++ snd_hdac_i915_set_bclk(bus);
30861 + } else {
30862 + /* toggle codec wakeup bit for STATESTS read */
30863 + snd_hdac_set_codec_wakeup(bus, true);
30864 +@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
30865 + /* initialize chip */
30866 + azx_init_pci(chip);
30867 +
30868 +- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
30869 +- struct hda_intel *hda;
30870 +-
30871 +- hda = container_of(chip, struct hda_intel, chip);
30872 +- haswell_set_bclk(hda);
30873 +- }
30874 ++ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
30875 ++ snd_hdac_i915_set_bclk(bus);
30876 +
30877 + hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
30878 +
30879 +@@ -2232,6 +2184,9 @@ static const struct pci_device_id azx_ids[] = {
30880 + /* Broxton-P(Apollolake) */
30881 + { PCI_DEVICE(0x8086, 0x5a98),
30882 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
30883 ++ /* Broxton-T */
30884 ++ { PCI_DEVICE(0x8086, 0x1a98),
30885 ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
30886 + /* Haswell */
30887 + { PCI_DEVICE(0x8086, 0x0a0c),
30888 + .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
30889 +diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
30890 +index 64e0d1d81ca5..9739fce9e032 100644
30891 +--- a/sound/pci/hda/hda_sysfs.c
30892 ++++ b/sound/pci/hda/hda_sysfs.c
30893 +@@ -141,14 +141,6 @@ static int reconfig_codec(struct hda_codec *codec)
30894 + err = snd_hda_codec_configure(codec);
30895 + if (err < 0)
30896 + goto error;
30897 +- /* rebuild PCMs */
30898 +- err = snd_hda_codec_build_pcms(codec);
30899 +- if (err < 0)
30900 +- goto error;
30901 +- /* rebuild mixers */
30902 +- err = snd_hda_codec_build_controls(codec);
30903 +- if (err < 0)
30904 +- goto error;
30905 + err = snd_card_register(codec->card);
30906 + error:
30907 + snd_hda_power_down(codec);
30908 +diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
30909 +index c1c855a6c0af..80bbadc83721 100644
30910 +--- a/sound/pci/hda/patch_cirrus.c
30911 ++++ b/sound/pci/hda/patch_cirrus.c
30912 +@@ -174,8 +174,12 @@ static void cs_automute(struct hda_codec *codec)
30913 + snd_hda_gen_update_outputs(codec);
30914 +
30915 + if (spec->gpio_eapd_hp || spec->gpio_eapd_speaker) {
30916 +- spec->gpio_data = spec->gen.hp_jack_present ?
30917 +- spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
30918 ++ if (spec->gen.automute_speaker)
30919 ++ spec->gpio_data = spec->gen.hp_jack_present ?
30920 ++ spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
30921 ++ else
30922 ++ spec->gpio_data =
30923 ++ spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
30924 + snd_hda_codec_write(codec, 0x01, 0,
30925 + AC_VERB_SET_GPIO_DATA, spec->gpio_data);
30926 + }
30927 +@@ -357,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
30928 + {
30929 + struct cs_spec *spec = codec->spec;
30930 + int err;
30931 ++ int i;
30932 +
30933 + err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
30934 + if (err < 0)
30935 +@@ -366,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
30936 + if (err < 0)
30937 + return err;
30938 +
30939 ++ /* keep the ADCs powered up when it's dynamically switchable */
30940 ++ if (spec->gen.dyn_adc_switch) {
30941 ++ unsigned int done = 0;
30942 ++ for (i = 0; i < spec->gen.input_mux.num_items; i++) {
30943 ++ int idx = spec->gen.dyn_adc_idx[i];
30944 ++ if (done & (1 << idx))
30945 ++ continue;
30946 ++ snd_hda_gen_fix_pin_power(codec,
30947 ++ spec->gen.adc_nids[idx]);
30948 ++ done |= 1 << idx;
30949 ++ }
30950 ++ }
30951 ++
30952 + return 0;
30953 + }
30954 +
30955 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
30956 +index 6122b8ca872f..56fefbd85782 100644
30957 +--- a/sound/pci/hda/patch_conexant.c
30958 ++++ b/sound/pci/hda/patch_conexant.c
30959 +@@ -204,8 +204,13 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
30960 + {
30961 + struct conexant_spec *spec = codec->spec;
30962 +
30963 +- if (codec->core.vendor_id != 0x14f150f2)
30964 ++ switch (codec->core.vendor_id) {
30965 ++ case 0x14f150f2: /* CX20722 */
30966 ++ case 0x14f150f4: /* CX20724 */
30967 ++ break;
30968 ++ default:
30969 + return;
30970 ++ }
30971 +
30972 + /* Turn the CX20722 codec into D3 to avoid spurious noises
30973 + from the internal speaker during (and after) reboot */
30974 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
30975 +index bcbc4ee10130..c98e404afbe0 100644
30976 +--- a/sound/pci/hda/patch_hdmi.c
30977 ++++ b/sound/pci/hda/patch_hdmi.c
30978 +@@ -152,13 +152,17 @@ struct hdmi_spec {
30979 + struct hda_pcm_stream pcm_playback;
30980 +
30981 + /* i915/powerwell (Haswell+/Valleyview+) specific */
30982 ++ bool use_acomp_notifier; /* use i915 eld_notify callback for hotplug */
30983 + struct i915_audio_component_audio_ops i915_audio_ops;
30984 + bool i915_bound; /* was i915 bound in this driver? */
30985 + };
30986 +
30987 + #ifdef CONFIG_SND_HDA_I915
30988 +-#define codec_has_acomp(codec) \
30989 +- ((codec)->bus->core.audio_component != NULL)
30990 ++static inline bool codec_has_acomp(struct hda_codec *codec)
30991 ++{
30992 ++ struct hdmi_spec *spec = codec->spec;
30993 ++ return spec->use_acomp_notifier;
30994 ++}
30995 + #else
30996 + #define codec_has_acomp(codec) false
30997 + #endif
30998 +@@ -1562,6 +1566,7 @@ static void update_eld(struct hda_codec *codec,
30999 + eld->eld_size) != 0)
31000 + eld_changed = true;
31001 +
31002 ++ pin_eld->monitor_present = eld->monitor_present;
31003 + pin_eld->eld_valid = eld->eld_valid;
31004 + pin_eld->eld_size = eld->eld_size;
31005 + if (eld->eld_valid)
31006 +@@ -1618,6 +1623,8 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
31007 +
31008 + mutex_lock(&per_pin->lock);
31009 + pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
31010 ++ eld->monitor_present = pin_eld->monitor_present;
31011 ++
31012 + if (pin_eld->monitor_present)
31013 + eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
31014 + else
31015 +@@ -1665,11 +1672,10 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
31016 + int size;
31017 +
31018 + mutex_lock(&per_pin->lock);
31019 ++ eld->monitor_present = false;
31020 + size = snd_hdac_acomp_get_eld(&codec->bus->core, per_pin->pin_nid,
31021 + &eld->monitor_present, eld->eld_buffer,
31022 + ELD_MAX_SIZE);
31023 +- if (size < 0)
31024 +- goto unlock;
31025 + if (size > 0) {
31026 + size = min(size, ELD_MAX_SIZE);
31027 + if (snd_hdmi_parse_eld(codec, &eld->info,
31028 +@@ -1873,7 +1879,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
31029 +
31030 + /* Call sync_audio_rate to set the N/CTS/M manually if necessary */
31031 + /* Todo: add DP1.2 MST audio support later */
31032 +- snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
31033 ++ if (codec_has_acomp(codec))
31034 ++ snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
31035 +
31036 + non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
31037 + mutex_lock(&per_pin->lock);
31038 +@@ -2432,6 +2439,10 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
31039 + struct hda_codec *codec = audio_ptr;
31040 + int pin_nid = port + 0x04;
31041 +
31042 ++ /* we assume only from port-B to port-D */
31043 ++ if (port < 1 || port > 3)
31044 ++ return;
31045 ++
31046 + /* skip notification during system suspend (but not in runtime PM);
31047 + * the state will be updated at resume
31048 + */
31049 +@@ -2441,6 +2452,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
31050 + if (atomic_read(&(codec)->core.in_pm))
31051 + return;
31052 +
31053 ++ snd_hdac_i915_set_bclk(&codec->bus->core);
31054 + check_presence_and_report(codec, pin_nid);
31055 + }
31056 +
31057 +@@ -2456,11 +2468,24 @@ static int patch_generic_hdmi(struct hda_codec *codec)
31058 + codec->spec = spec;
31059 + hdmi_array_init(spec, 4);
31060 +
31061 +- /* Try to bind with i915 for any Intel codecs (if not done yet) */
31062 +- if (!codec_has_acomp(codec) &&
31063 +- (codec->core.vendor_id >> 16) == 0x8086)
31064 +- if (!snd_hdac_i915_init(&codec->bus->core))
31065 +- spec->i915_bound = true;
31066 ++#ifdef CONFIG_SND_HDA_I915
31067 ++ /* Try to bind with i915 for Intel HSW+ codecs (if not done yet) */
31068 ++ if ((codec->core.vendor_id >> 16) == 0x8086 &&
31069 ++ is_haswell_plus(codec)) {
31070 ++#if 0
31071 ++ /* on-demand binding leads to an unbalanced refcount when
31072 ++ * both i915 and hda drivers are probed concurrently;
31073 ++ * disabled temporarily for now
31074 ++ */
31075 ++ if (!codec->bus->core.audio_component)
31076 ++ if (!snd_hdac_i915_init(&codec->bus->core))
31077 ++ spec->i915_bound = true;
31078 ++#endif
31079 ++ /* use i915 audio component notifier for hotplug */
31080 ++ if (codec->bus->core.audio_component)
31081 ++ spec->use_acomp_notifier = true;
31082 ++ }
31083 ++#endif
31084 +
31085 + if (is_haswell_plus(codec)) {
31086 + intel_haswell_enable_all_pins(codec, true);
31087 +@@ -3659,6 +3684,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
31088 + HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
31089 + HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
31090 + HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
31091 ++HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
31092 + HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
31093 + HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
31094 + HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
31095 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
31096 +index 93d2156b6241..d53c25e7a1c1 100644
31097 +--- a/sound/pci/hda/patch_realtek.c
31098 ++++ b/sound/pci/hda/patch_realtek.c
31099 +@@ -335,6 +335,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
31100 + case 0x10ec0283:
31101 + case 0x10ec0286:
31102 + case 0x10ec0288:
31103 ++ case 0x10ec0295:
31104 + case 0x10ec0298:
31105 + alc_update_coef_idx(codec, 0x10, 1<<9, 0);
31106 + break;
31107 +@@ -342,6 +343,11 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
31108 + case 0x10ec0293:
31109 + alc_update_coef_idx(codec, 0xa, 1<<13, 0);
31110 + break;
31111 ++ case 0x10ec0234:
31112 ++ case 0x10ec0274:
31113 ++ case 0x10ec0294:
31114 ++ alc_update_coef_idx(codec, 0x10, 1<<15, 0);
31115 ++ break;
31116 + case 0x10ec0662:
31117 + if ((coef & 0x00f0) == 0x0030)
31118 + alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
31119 +@@ -902,6 +908,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
31120 + { 0x10ec0298, 0x1028, 0, "ALC3266" },
31121 + { 0x10ec0256, 0x1028, 0, "ALC3246" },
31122 + { 0x10ec0225, 0x1028, 0, "ALC3253" },
31123 ++ { 0x10ec0295, 0x1028, 0, "ALC3254" },
31124 + { 0x10ec0670, 0x1025, 0, "ALC669X" },
31125 + { 0x10ec0676, 0x1025, 0, "ALC679X" },
31126 + { 0x10ec0282, 0x1043, 0, "ALC3229" },
31127 +@@ -2647,6 +2654,7 @@ enum {
31128 + ALC269_TYPE_ALC255,
31129 + ALC269_TYPE_ALC256,
31130 + ALC269_TYPE_ALC225,
31131 ++ ALC269_TYPE_ALC294,
31132 + };
31133 +
31134 + /*
31135 +@@ -2677,6 +2685,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
31136 + case ALC269_TYPE_ALC255:
31137 + case ALC269_TYPE_ALC256:
31138 + case ALC269_TYPE_ALC225:
31139 ++ case ALC269_TYPE_ALC294:
31140 + ssids = alc269_ssids;
31141 + break;
31142 + default:
31143 +@@ -3690,6 +3699,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
31144 + alc_process_coef_fw(codec, coef0668);
31145 + break;
31146 + case 0x10ec0225:
31147 ++ case 0x10ec0295:
31148 + alc_process_coef_fw(codec, coef0225);
31149 + break;
31150 + }
31151 +@@ -3790,6 +3800,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
31152 + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
31153 + break;
31154 + case 0x10ec0225:
31155 ++ case 0x10ec0295:
31156 + alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
31157 + snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
31158 + alc_process_coef_fw(codec, coef0225);
31159 +@@ -3847,6 +3858,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
31160 +
31161 + switch (codec->core.vendor_id) {
31162 + case 0x10ec0225:
31163 ++ case 0x10ec0295:
31164 + alc_process_coef_fw(codec, coef0225);
31165 + break;
31166 + case 0x10ec0255:
31167 +@@ -3950,6 +3962,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
31168 + alc_process_coef_fw(codec, coef0688);
31169 + break;
31170 + case 0x10ec0225:
31171 ++ case 0x10ec0295:
31172 + alc_process_coef_fw(codec, coef0225);
31173 + break;
31174 + }
31175 +@@ -4031,6 +4044,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
31176 + alc_process_coef_fw(codec, coef0688);
31177 + break;
31178 + case 0x10ec0225:
31179 ++ case 0x10ec0295:
31180 + alc_process_coef_fw(codec, coef0225);
31181 + break;
31182 + }
31183 +@@ -4114,6 +4128,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
31184 + is_ctia = (val & 0x1c02) == 0x1c02;
31185 + break;
31186 + case 0x10ec0225:
31187 ++ case 0x10ec0295:
31188 + alc_process_coef_fw(codec, coef0225);
31189 + msleep(800);
31190 + val = alc_read_coef_idx(codec, 0x46);
31191 +@@ -4759,6 +4774,8 @@ enum {
31192 + ALC255_FIXUP_DELL_SPK_NOISE,
31193 + ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
31194 + ALC280_FIXUP_HP_HEADSET_MIC,
31195 ++ ALC221_FIXUP_HP_FRONT_MIC,
31196 ++ ALC292_FIXUP_TPT460,
31197 + };
31198 +
31199 + static const struct hda_fixup alc269_fixups[] = {
31200 +@@ -5401,6 +5418,19 @@ static const struct hda_fixup alc269_fixups[] = {
31201 + .chained = true,
31202 + .chain_id = ALC269_FIXUP_HEADSET_MIC,
31203 + },
31204 ++ [ALC221_FIXUP_HP_FRONT_MIC] = {
31205 ++ .type = HDA_FIXUP_PINS,
31206 ++ .v.pins = (const struct hda_pintbl[]) {
31207 ++ { 0x19, 0x02a19020 }, /* Front Mic */
31208 ++ { }
31209 ++ },
31210 ++ },
31211 ++ [ALC292_FIXUP_TPT460] = {
31212 ++ .type = HDA_FIXUP_FUNC,
31213 ++ .v.func = alc_fixup_tpt440_dock,
31214 ++ .chained = true,
31215 ++ .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
31216 ++ },
31217 + };
31218 +
31219 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
31220 +@@ -5434,6 +5464,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
31221 + SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
31222 + SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
31223 + SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
31224 ++ SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
31225 + SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
31226 + SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
31227 + SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
31228 +@@ -5443,8 +5474,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
31229 + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
31230 + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
31231 + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
31232 +- SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
31233 ++ SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
31234 + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
31235 ++ SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
31236 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
31237 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
31238 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
31239 +@@ -5506,6 +5538,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
31240 + SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
31241 + SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
31242 + SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
31243 ++ SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
31244 + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
31245 + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
31246 + SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
31247 +@@ -5554,8 +5587,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
31248 + SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
31249 + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
31250 + SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
31251 +- SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
31252 ++ SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
31253 + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
31254 ++ SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
31255 + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
31256 + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
31257 + SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
31258 +@@ -5566,6 +5600,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
31259 + SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
31260 + SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
31261 + SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
31262 ++ SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
31263 + SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
31264 + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
31265 + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
31266 +@@ -5648,6 +5683,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
31267 + {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
31268 + {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
31269 + {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
31270 ++ {.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
31271 + {}
31272 + };
31273 + #define ALC225_STANDARD_PINS \
31274 +@@ -5684,6 +5720,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
31275 + {0x14, 0x90170110},
31276 + {0x21, 0x02211020}),
31277 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
31278 ++ {0x14, 0x90170130},
31279 ++ {0x21, 0x02211040}),
31280 ++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
31281 + {0x12, 0x90a60140},
31282 + {0x14, 0x90170110},
31283 + {0x21, 0x02211020}),
31284 +@@ -6006,8 +6045,14 @@ static int patch_alc269(struct hda_codec *codec)
31285 + alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
31286 + break;
31287 + case 0x10ec0225:
31288 ++ case 0x10ec0295:
31289 + spec->codec_variant = ALC269_TYPE_ALC225;
31290 + break;
31291 ++ case 0x10ec0234:
31292 ++ case 0x10ec0274:
31293 ++ case 0x10ec0294:
31294 ++ spec->codec_variant = ALC269_TYPE_ALC294;
31295 ++ break;
31296 + }
31297 +
31298 + if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
31299 +@@ -6405,6 +6450,8 @@ enum {
31300 + ALC668_FIXUP_AUTO_MUTE,
31301 + ALC668_FIXUP_DELL_DISABLE_AAMIX,
31302 + ALC668_FIXUP_DELL_XPS13,
31303 ++ ALC662_FIXUP_ASUS_Nx50,
31304 ++ ALC668_FIXUP_ASUS_Nx51,
31305 + };
31306 +
31307 + static const struct hda_fixup alc662_fixups[] = {
31308 +@@ -6645,6 +6692,21 @@ static const struct hda_fixup alc662_fixups[] = {
31309 + .type = HDA_FIXUP_FUNC,
31310 + .v.func = alc_fixup_bass_chmap,
31311 + },
31312 ++ [ALC662_FIXUP_ASUS_Nx50] = {
31313 ++ .type = HDA_FIXUP_FUNC,
31314 ++ .v.func = alc_fixup_auto_mute_via_amp,
31315 ++ .chained = true,
31316 ++ .chain_id = ALC662_FIXUP_BASS_1A
31317 ++ },
31318 ++ [ALC668_FIXUP_ASUS_Nx51] = {
31319 ++ .type = HDA_FIXUP_PINS,
31320 ++ .v.pins = (const struct hda_pintbl[]) {
31321 ++ {0x1a, 0x90170151}, /* bass speaker */
31322 ++ {}
31323 ++ },
31324 ++ .chained = true,
31325 ++ .chain_id = ALC662_FIXUP_BASS_CHMAP,
31326 ++ },
31327 + };
31328 +
31329 + static const struct snd_pci_quirk alc662_fixup_tbl[] = {
31330 +@@ -6667,10 +6729,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
31331 + SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
31332 + SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
31333 + SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
31334 +- SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
31335 ++ SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
31336 ++ SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
31337 + SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
31338 ++ SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
31339 + SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
31340 + SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
31341 ++ SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
31342 ++ SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
31343 + SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
31344 + SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
31345 + SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
31346 +@@ -6901,6 +6967,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
31347 + HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
31348 + HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
31349 + HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
31350 ++ HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
31351 + HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
31352 + HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
31353 + HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
31354 +@@ -6911,6 +6978,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
31355 + HDA_CODEC_ENTRY(0x10ec0269, "ALC269", patch_alc269),
31356 + HDA_CODEC_ENTRY(0x10ec0270, "ALC270", patch_alc269),
31357 + HDA_CODEC_ENTRY(0x10ec0272, "ALC272", patch_alc662),
31358 ++ HDA_CODEC_ENTRY(0x10ec0274, "ALC274", patch_alc269),
31359 + HDA_CODEC_ENTRY(0x10ec0275, "ALC275", patch_alc269),
31360 + HDA_CODEC_ENTRY(0x10ec0276, "ALC276", patch_alc269),
31361 + HDA_CODEC_ENTRY(0x10ec0280, "ALC280", patch_alc269),
31362 +@@ -6923,6 +6991,8 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
31363 + HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
31364 + HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269),
31365 + HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269),
31366 ++ HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269),
31367 ++ HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
31368 + HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
31369 + HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
31370 + HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
31371 +diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
31372 +index 42bcbac801a3..ccdab29a8b66 100644
31373 +--- a/sound/pci/intel8x0.c
31374 ++++ b/sound/pci/intel8x0.c
31375 +@@ -2879,6 +2879,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
31376 +
31377 + static struct snd_pci_quirk intel8x0_clock_list[] = {
31378 + SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
31379 ++ SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
31380 + SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
31381 + SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
31382 + SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
31383 +diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
31384 +index c5194f5b150a..d7e71f309299 100644
31385 +--- a/sound/pci/pcxhr/pcxhr_core.c
31386 ++++ b/sound/pci/pcxhr/pcxhr_core.c
31387 +@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
31388 + }
31389 +
31390 + pcxhr_msg_thread(mgr);
31391 ++ mutex_unlock(&mgr->lock);
31392 + return IRQ_HANDLED;
31393 + }
31394 +diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
31395 +index 11d032cdc658..48dbb2fdeb09 100644
31396 +--- a/sound/soc/codecs/rt5640.c
31397 ++++ b/sound/soc/codecs/rt5640.c
31398 +@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
31399 +
31400 + /* Interface data select */
31401 + static const char * const rt5640_data_select[] = {
31402 +- "Normal", "left copy to right", "right copy to left", "Swap"};
31403 ++ "Normal", "Swap", "left copy to right", "right copy to left"};
31404 +
31405 + static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
31406 + RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
31407 +diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
31408 +index 83a7150ddc24..f84231e7d1dd 100644
31409 +--- a/sound/soc/codecs/rt5640.h
31410 ++++ b/sound/soc/codecs/rt5640.h
31411 +@@ -442,39 +442,39 @@
31412 + #define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
31413 + #define RT5640_IF1_DAC_SEL_SFT 14
31414 + #define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
31415 +-#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
31416 +-#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
31417 +-#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
31418 ++#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
31419 ++#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
31420 ++#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
31421 + #define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
31422 + #define RT5640_IF1_ADC_SEL_SFT 12
31423 + #define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
31424 +-#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
31425 +-#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
31426 +-#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
31427 ++#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
31428 ++#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
31429 ++#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
31430 + #define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
31431 + #define RT5640_IF2_DAC_SEL_SFT 10
31432 + #define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
31433 +-#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
31434 +-#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
31435 +-#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
31436 ++#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
31437 ++#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
31438 ++#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
31439 + #define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
31440 + #define RT5640_IF2_ADC_SEL_SFT 8
31441 + #define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
31442 +-#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
31443 +-#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
31444 +-#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
31445 ++#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
31446 ++#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
31447 ++#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
31448 + #define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
31449 + #define RT5640_IF3_DAC_SEL_SFT 6
31450 + #define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
31451 +-#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
31452 +-#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
31453 +-#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
31454 ++#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
31455 ++#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
31456 ++#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
31457 + #define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
31458 + #define RT5640_IF3_ADC_SEL_SFT 4
31459 + #define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
31460 +-#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
31461 +-#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
31462 +-#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
31463 ++#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
31464 ++#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
31465 ++#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
31466 +
31467 + /* REC Left Mixer Control 1 (0x3b) */
31468 + #define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
31469 +diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
31470 +index e619d5651b09..080c78e88e10 100644
31471 +--- a/sound/soc/codecs/ssm4567.c
31472 ++++ b/sound/soc/codecs/ssm4567.c
31473 +@@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
31474 + regcache_cache_only(ssm4567->regmap, !enable);
31475 +
31476 + if (enable) {
31477 ++ ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
31478 ++ 0x00);
31479 ++ if (ret)
31480 ++ return ret;
31481 ++
31482 + ret = regmap_update_bits(ssm4567->regmap,
31483 + SSM4567_REG_POWER_CTRL,
31484 + SSM4567_POWER_SPWDN, 0x00);
31485 +diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
31486 +index df65c5b494b1..b6ab3fc5789e 100644
31487 +--- a/sound/soc/samsung/s3c-i2s-v2.c
31488 ++++ b/sound/soc/samsung/s3c-i2s-v2.c
31489 +@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
31490 + #endif
31491 +
31492 + int s3c_i2sv2_register_component(struct device *dev, int id,
31493 +- struct snd_soc_component_driver *cmp_drv,
31494 ++ const struct snd_soc_component_driver *cmp_drv,
31495 + struct snd_soc_dai_driver *dai_drv)
31496 + {
31497 + struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
31498 +diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
31499 +index 90abab364b49..d0684145ed1f 100644
31500 +--- a/sound/soc/samsung/s3c-i2s-v2.h
31501 ++++ b/sound/soc/samsung/s3c-i2s-v2.h
31502 +@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
31503 + * soc core.
31504 + */
31505 + extern int s3c_i2sv2_register_component(struct device *dev, int id,
31506 +- struct snd_soc_component_driver *cmp_drv,
31507 ++ const struct snd_soc_component_driver *cmp_drv,
31508 + struct snd_soc_dai_driver *dai_drv);
31509 +
31510 + #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
31511 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
31512 +index 581175a51ecf..5e811dc02fb9 100644
31513 +--- a/sound/soc/soc-dapm.c
31514 ++++ b/sound/soc/soc-dapm.c
31515 +@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
31516 + int count = 0;
31517 + char *state = "not set";
31518 +
31519 ++ /* card won't be set for the dummy component, as a spot fix
31520 ++ * we're checking for that case specifically here but in future
31521 ++ * we will ensure that the dummy component looks like others.
31522 ++ */
31523 ++ if (!cmpnt->card)
31524 ++ return 0;
31525 ++
31526 + list_for_each_entry(w, &cmpnt->card->widgets, list) {
31527 + if (w->dapm != dapm)
31528 + continue;
31529 +diff --git a/sound/usb/clock.c b/sound/usb/clock.c
31530 +index 2ed260b10f6d..7ccbcaf6a147 100644
31531 +--- a/sound/usb/clock.c
31532 ++++ b/sound/usb/clock.c
31533 +@@ -285,6 +285,8 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
31534 + unsigned char data[3];
31535 + int err, crate;
31536 +
31537 ++ if (get_iface_desc(alts)->bNumEndpoints < 1)
31538 ++ return -EINVAL;
31539 + ep = get_endpoint(alts, 0)->bEndpointAddress;
31540 +
31541 + /* if endpoint doesn't have sampling rate control, bail out */
31542 +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
31543 +index 7b1cb365ffab..c07a7eda42a2 100644
31544 +--- a/sound/usb/endpoint.c
31545 ++++ b/sound/usb/endpoint.c
31546 +@@ -438,6 +438,9 @@ exit_clear:
31547 + *
31548 + * New endpoints will be added to chip->ep_list and must be freed by
31549 + * calling snd_usb_endpoint_free().
31550 ++ *
31551 ++ * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
31552 ++ * bNumEndpoints > 1 beforehand.
31553 + */
31554 + struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
31555 + struct usb_host_interface *alts,
31556 +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
31557 +index ddca6547399b..1f8fb0d904e0 100644
31558 +--- a/sound/usb/mixer_maps.c
31559 ++++ b/sound/usb/mixer_maps.c
31560 +@@ -349,6 +349,16 @@ static struct usbmix_name_map bose_companion5_map[] = {
31561 + };
31562 +
31563 + /*
31564 ++ * Dell usb dock with ALC4020 codec had a firmware problem where it got
31565 ++ * screwed up when zero volume is passed; just skip it as a workaround
31566 ++ */
31567 ++static const struct usbmix_name_map dell_alc4020_map[] = {
31568 ++ { 16, NULL },
31569 ++ { 19, NULL },
31570 ++ { 0 }
31571 ++};
31572 ++
31573 ++/*
31574 + * Control map entries
31575 + */
31576 +
31577 +@@ -431,6 +441,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
31578 + .map = aureon_51_2_map,
31579 + },
31580 + {
31581 ++ .id = USB_ID(0x0bda, 0x4014),
31582 ++ .map = dell_alc4020_map,
31583 ++ },
31584 ++ {
31585 + .id = USB_ID(0x0dba, 0x1000),
31586 + .map = mbox1_map,
31587 + },
31588 +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
31589 +index 279025650568..f6c3bf79af9a 100644
31590 +--- a/sound/usb/mixer_quirks.c
31591 ++++ b/sound/usb/mixer_quirks.c
31592 +@@ -1519,7 +1519,11 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
31593 +
31594 + /* use known values for that card: interface#1 altsetting#1 */
31595 + iface = usb_ifnum_to_if(chip->dev, 1);
31596 ++ if (!iface || iface->num_altsetting < 2)
31597 ++ return -EINVAL;
31598 + alts = &iface->altsetting[1];
31599 ++ if (get_iface_desc(alts)->bNumEndpoints < 1)
31600 ++ return -EINVAL;
31601 + ep = get_endpoint(alts, 0)->bEndpointAddress;
31602 +
31603 + err = snd_usb_ctl_msg(chip->dev,
31604 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
31605 +index 9245f52d43bd..44d178ee9177 100644
31606 +--- a/sound/usb/pcm.c
31607 ++++ b/sound/usb/pcm.c
31608 +@@ -159,6 +159,8 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
31609 + unsigned char data[1];
31610 + int err;
31611 +
31612 ++ if (get_iface_desc(alts)->bNumEndpoints < 1)
31613 ++ return -EINVAL;
31614 + ep = get_endpoint(alts, 0)->bEndpointAddress;
31615 +
31616 + data[0] = 1;
31617 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
31618 +index c458d60d5030..db11ecf0b74d 100644
31619 +--- a/sound/usb/quirks.c
31620 ++++ b/sound/usb/quirks.c
31621 +@@ -150,6 +150,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
31622 + usb_audio_err(chip, "cannot memdup\n");
31623 + return -ENOMEM;
31624 + }
31625 ++ INIT_LIST_HEAD(&fp->list);
31626 + if (fp->nr_rates > MAX_NR_RATES) {
31627 + kfree(fp);
31628 + return -EINVAL;
31629 +@@ -167,19 +168,20 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
31630 + stream = (fp->endpoint & USB_DIR_IN)
31631 + ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
31632 + err = snd_usb_add_audio_stream(chip, stream, fp);
31633 +- if (err < 0) {
31634 +- kfree(fp);
31635 +- kfree(rate_table);
31636 +- return err;
31637 +- }
31638 ++ if (err < 0)
31639 ++ goto error;
31640 + if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
31641 + fp->altset_idx >= iface->num_altsetting) {
31642 +- kfree(fp);
31643 +- kfree(rate_table);
31644 +- return -EINVAL;
31645 ++ err = -EINVAL;
31646 ++ goto error;
31647 + }
31648 + alts = &iface->altsetting[fp->altset_idx];
31649 + altsd = get_iface_desc(alts);
31650 ++ if (altsd->bNumEndpoints < 1) {
31651 ++ err = -EINVAL;
31652 ++ goto error;
31653 ++ }
31654 ++
31655 + fp->protocol = altsd->bInterfaceProtocol;
31656 +
31657 + if (fp->datainterval == 0)
31658 +@@ -190,6 +192,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
31659 + snd_usb_init_pitch(chip, fp->iface, alts, fp);
31660 + snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
31661 + return 0;
31662 ++
31663 ++ error:
31664 ++ list_del(&fp->list); /* unlink for avoiding double-free */
31665 ++ kfree(fp);
31666 ++ kfree(rate_table);
31667 ++ return err;
31668 + }
31669 +
31670 + static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
31671 +@@ -462,6 +470,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
31672 + fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
31673 + fp->datainterval = 0;
31674 + fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
31675 ++ INIT_LIST_HEAD(&fp->list);
31676 +
31677 + switch (fp->maxpacksize) {
31678 + case 0x120:
31679 +@@ -485,6 +494,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
31680 + ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
31681 + err = snd_usb_add_audio_stream(chip, stream, fp);
31682 + if (err < 0) {
31683 ++ list_del(&fp->list); /* unlink for avoiding double-free */
31684 + kfree(fp);
31685 + return err;
31686 + }
31687 +@@ -1121,12 +1131,18 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
31688 + switch (chip->usb_id) {
31689 + case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
31690 + case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
31691 ++ case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
31692 + case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
31693 + case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
31694 + case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
31695 ++ case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
31696 + case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
31697 + case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
31698 ++ case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
31699 + case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
31700 ++ case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
31701 ++ case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
31702 ++ case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
31703 + case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
31704 + return true;
31705 + }
31706 +diff --git a/sound/usb/stream.c b/sound/usb/stream.c
31707 +index c4dc577ab1bd..8e9548bc1f1a 100644
31708 +--- a/sound/usb/stream.c
31709 ++++ b/sound/usb/stream.c
31710 +@@ -314,7 +314,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
31711 + /*
31712 + * add this endpoint to the chip instance.
31713 + * if a stream with the same endpoint already exists, append to it.
31714 +- * if not, create a new pcm stream.
31715 ++ * if not, create a new pcm stream. note, fp is added to the substream
31716 ++ * fmt_list and will be freed on the chip instance release. do not free
31717 ++ * fp or do remove it from the substream fmt_list to avoid double-free.
31718 + */
31719 + int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
31720 + int stream,
31721 +@@ -675,6 +677,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
31722 + * (fp->maxpacksize & 0x7ff);
31723 + fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
31724 + fp->clock = clock;
31725 ++ INIT_LIST_HEAD(&fp->list);
31726 +
31727 + /* some quirks for attributes here */
31728 +
31729 +@@ -723,6 +726,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
31730 + dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
31731 + err = snd_usb_add_audio_stream(chip, stream, fp);
31732 + if (err < 0) {
31733 ++ list_del(&fp->list); /* unlink for avoiding double-free */
31734 + kfree(fp->rate_table);
31735 + kfree(fp->chmap);
31736 + kfree(fp);
31737 +diff --git a/tools/hv/Makefile b/tools/hv/Makefile
31738 +index a8ab79556926..a8c4644022a6 100644
31739 +--- a/tools/hv/Makefile
31740 ++++ b/tools/hv/Makefile
31741 +@@ -5,6 +5,8 @@ PTHREAD_LIBS = -lpthread
31742 + WARNINGS = -Wall -Wextra
31743 + CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
31744 +
31745 ++CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
31746 ++
31747 + all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
31748 + %: %.c
31749 + $(CC) $(CFLAGS) -o $@ $^
31750 +diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
31751 +index 0144b3d1bb77..88cccea3ca99 100644
31752 +--- a/tools/lib/traceevent/parse-filter.c
31753 ++++ b/tools/lib/traceevent/parse-filter.c
31754 +@@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
31755 + current_op = current_exp;
31756 +
31757 + ret = collapse_tree(current_op, parg, error_str);
31758 ++ /* collapse_tree() may free current_op, and updates parg accordingly */
31759 ++ current_op = NULL;
31760 + if (ret < 0)
31761 + goto fail;
31762 +
31763 +- *parg = current_op;
31764 +-
31765 + free(token);
31766 + return 0;
31767 +
31768 +diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
31769 +index 52ef7a9d50aa..14d9e8ffaff7 100644
31770 +--- a/tools/perf/Documentation/perf-stat.txt
31771 ++++ b/tools/perf/Documentation/perf-stat.txt
31772 +@@ -69,6 +69,14 @@ report::
31773 + --scale::
31774 + scale/normalize counter values
31775 +
31776 ++-d::
31777 ++--detailed::
31778 ++ print more detailed statistics, can be specified up to 3 times
31779 ++
31780 ++ -d: detailed events, L1 and LLC data cache
31781 ++ -d -d: more detailed events, dTLB and iTLB events
31782 ++ -d -d -d: very detailed events, adding prefetch events
31783 ++
31784 + -r::
31785 + --repeat=<n>::
31786 + repeat command and print average + stddev (max: 100). 0 means forever.
31787 +diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
31788 +index 08c09ad755d2..7bb47424bc49 100644
31789 +--- a/tools/perf/ui/browsers/hists.c
31790 ++++ b/tools/perf/ui/browsers/hists.c
31791 +@@ -302,7 +302,7 @@ static void callchain_node__init_have_children(struct callchain_node *node,
31792 + chain = list_entry(node->val.next, struct callchain_list, list);
31793 + chain->has_children = has_sibling;
31794 +
31795 +- if (node->val.next != node->val.prev) {
31796 ++ if (!list_empty(&node->val)) {
31797 + chain = list_entry(node->val.prev, struct callchain_list, list);
31798 + chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
31799 + }
31800 +@@ -844,7 +844,7 @@ next:
31801 + return row - first_row;
31802 + }
31803 +
31804 +-static int hist_browser__show_callchain(struct hist_browser *browser,
31805 ++static int hist_browser__show_callchain_graph(struct hist_browser *browser,
31806 + struct rb_root *root, int level,
31807 + unsigned short row, u64 total,
31808 + print_callchain_entry_fn print,
31809 +@@ -898,7 +898,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
31810 + else
31811 + new_total = total;
31812 +
31813 +- row += hist_browser__show_callchain(browser, &child->rb_root,
31814 ++ row += hist_browser__show_callchain_graph(browser, &child->rb_root,
31815 + new_level, row, new_total,
31816 + print, arg, is_output_full);
31817 + }
31818 +@@ -910,6 +910,43 @@ out:
31819 + return row - first_row;
31820 + }
31821 +
31822 ++static int hist_browser__show_callchain(struct hist_browser *browser,
31823 ++ struct hist_entry *entry, int level,
31824 ++ unsigned short row,
31825 ++ print_callchain_entry_fn print,
31826 ++ struct callchain_print_arg *arg,
31827 ++ check_output_full_fn is_output_full)
31828 ++{
31829 ++ u64 total = hists__total_period(entry->hists);
31830 ++ int printed;
31831 ++
31832 ++ if (callchain_param.mode == CHAIN_GRAPH_REL) {
31833 ++ if (symbol_conf.cumulate_callchain)
31834 ++ total = entry->stat_acc->period;
31835 ++ else
31836 ++ total = entry->stat.period;
31837 ++ }
31838 ++
31839 ++ if (callchain_param.mode == CHAIN_FLAT) {
31840 ++ printed = hist_browser__show_callchain_flat(browser,
31841 ++ &entry->sorted_chain, row, total,
31842 ++ print, arg, is_output_full);
31843 ++ } else if (callchain_param.mode == CHAIN_FOLDED) {
31844 ++ printed = hist_browser__show_callchain_folded(browser,
31845 ++ &entry->sorted_chain, row, total,
31846 ++ print, arg, is_output_full);
31847 ++ } else {
31848 ++ printed = hist_browser__show_callchain_graph(browser,
31849 ++ &entry->sorted_chain, level, row, total,
31850 ++ print, arg, is_output_full);
31851 ++ }
31852 ++
31853 ++ if (arg->is_current_entry)
31854 ++ browser->he_selection = entry;
31855 ++
31856 ++ return printed;
31857 ++}
31858 ++
31859 + struct hpp_arg {
31860 + struct ui_browser *b;
31861 + char folded_sign;
31862 +@@ -1084,38 +1121,14 @@ static int hist_browser__show_entry(struct hist_browser *browser,
31863 + --row_offset;
31864 +
31865 + if (folded_sign == '-' && row != browser->b.rows) {
31866 +- u64 total = hists__total_period(entry->hists);
31867 + struct callchain_print_arg arg = {
31868 + .row_offset = row_offset,
31869 + .is_current_entry = current_entry,
31870 + };
31871 +
31872 +- if (callchain_param.mode == CHAIN_GRAPH_REL) {
31873 +- if (symbol_conf.cumulate_callchain)
31874 +- total = entry->stat_acc->period;
31875 +- else
31876 +- total = entry->stat.period;
31877 +- }
31878 +-
31879 +- if (callchain_param.mode == CHAIN_FLAT) {
31880 +- printed += hist_browser__show_callchain_flat(browser,
31881 +- &entry->sorted_chain, row, total,
31882 ++ printed += hist_browser__show_callchain(browser, entry, 1, row,
31883 + hist_browser__show_callchain_entry, &arg,
31884 + hist_browser__check_output_full);
31885 +- } else if (callchain_param.mode == CHAIN_FOLDED) {
31886 +- printed += hist_browser__show_callchain_folded(browser,
31887 +- &entry->sorted_chain, row, total,
31888 +- hist_browser__show_callchain_entry, &arg,
31889 +- hist_browser__check_output_full);
31890 +- } else {
31891 +- printed += hist_browser__show_callchain(browser,
31892 +- &entry->sorted_chain, 1, row, total,
31893 +- hist_browser__show_callchain_entry, &arg,
31894 +- hist_browser__check_output_full);
31895 +- }
31896 +-
31897 +- if (arg.is_current_entry)
31898 +- browser->he_selection = entry;
31899 + }
31900 +
31901 + return printed;
31902 +@@ -1380,15 +1393,11 @@ do_offset:
31903 + static int hist_browser__fprintf_callchain(struct hist_browser *browser,
31904 + struct hist_entry *he, FILE *fp)
31905 + {
31906 +- u64 total = hists__total_period(he->hists);
31907 + struct callchain_print_arg arg = {
31908 + .fp = fp,
31909 + };
31910 +
31911 +- if (symbol_conf.cumulate_callchain)
31912 +- total = he->stat_acc->period;
31913 +-
31914 +- hist_browser__show_callchain(browser, &he->sorted_chain, 1, 0, total,
31915 ++ hist_browser__show_callchain(browser, he, 1, 0,
31916 + hist_browser__fprintf_callchain_entry, &arg,
31917 + hist_browser__check_dump_full);
31918 + return arg.printed;
31919 +@@ -2320,10 +2329,12 @@ skip_annotation:
31920 + *
31921 + * See hist_browser__show_entry.
31922 + */
31923 +- nr_options += add_script_opt(browser,
31924 +- &actions[nr_options],
31925 +- &options[nr_options],
31926 +- NULL, browser->selection->sym);
31927 ++ if (sort__has_sym && browser->selection->sym) {
31928 ++ nr_options += add_script_opt(browser,
31929 ++ &actions[nr_options],
31930 ++ &options[nr_options],
31931 ++ NULL, browser->selection->sym);
31932 ++ }
31933 + }
31934 + nr_options += add_script_opt(browser, &actions[nr_options],
31935 + &options[nr_options], NULL, NULL);
31936 +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
31937 +index 85155e91b61b..7bad5c3fa7b7 100644
31938 +--- a/tools/perf/util/event.c
31939 ++++ b/tools/perf/util/event.c
31940 +@@ -282,7 +282,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
31941 + strcpy(execname, "");
31942 +
31943 + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
31944 +- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
31945 ++ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
31946 + &event->mmap2.start, &event->mmap2.len, prot,
31947 + &event->mmap2.pgoff, &event->mmap2.maj,
31948 + &event->mmap2.min,
31949 +diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
31950 +index d81f13de2476..a7eb0eae9938 100644
31951 +--- a/tools/perf/util/evlist.c
31952 ++++ b/tools/perf/util/evlist.c
31953 +@@ -1181,12 +1181,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
31954 + */
31955 + if (cpus != evlist->cpus) {
31956 + cpu_map__put(evlist->cpus);
31957 +- evlist->cpus = cpus;
31958 ++ evlist->cpus = cpu_map__get(cpus);
31959 + }
31960 +
31961 + if (threads != evlist->threads) {
31962 + thread_map__put(evlist->threads);
31963 +- evlist->threads = threads;
31964 ++ evlist->threads = thread_map__get(threads);
31965 + }
31966 +
31967 + perf_evlist__propagate_maps(evlist);
31968 +diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
31969 +index 8e75434bd01c..4d8037a3d8a4 100644
31970 +--- a/tools/perf/util/evsel.h
31971 ++++ b/tools/perf/util/evsel.h
31972 +@@ -93,10 +93,8 @@ struct perf_evsel {
31973 + const char *unit;
31974 + struct event_format *tp_format;
31975 + off_t id_offset;
31976 +- union {
31977 +- void *priv;
31978 +- u64 db_id;
31979 +- };
31980 ++ void *priv;
31981 ++ u64 db_id;
31982 + struct cgroup_sel *cgrp;
31983 + void *handler;
31984 + struct cpu_map *cpus;
31985 +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
31986 +index 05d815851be1..4e1590ba8902 100644
31987 +--- a/tools/perf/util/intel-pt.c
31988 ++++ b/tools/perf/util/intel-pt.c
31989 +@@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
31990 + pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
31991 + ret);
31992 +
31993 +- if (pt->synth_opts.callchain)
31994 ++ if (pt->synth_opts.last_branch)
31995 + intel_pt_reset_last_branch_rb(ptq);
31996 +
31997 + return ret;
31998 +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
31999 +index 813d9b272c81..48a1c5e7dc0d 100644
32000 +--- a/tools/perf/util/parse-events.c
32001 ++++ b/tools/perf/util/parse-events.c
32002 +@@ -2101,11 +2101,11 @@ char *parse_events_formats_error_string(char *additional_terms)
32003 +
32004 + /* valid terms */
32005 + if (additional_terms) {
32006 +- if (!asprintf(&str, "valid terms: %s,%s",
32007 +- additional_terms, static_terms))
32008 ++ if (asprintf(&str, "valid terms: %s,%s",
32009 ++ additional_terms, static_terms) < 0)
32010 + goto fail;
32011 + } else {
32012 +- if (!asprintf(&str, "valid terms: %s", static_terms))
32013 ++ if (asprintf(&str, "valid terms: %s", static_terms) < 0)
32014 + goto fail;
32015 + }
32016 + return str;
32017 +diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
32018 +index b597bcc8fc78..37b4f5070353 100644
32019 +--- a/tools/perf/util/pmu.c
32020 ++++ b/tools/perf/util/pmu.c
32021 +@@ -284,13 +284,12 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
32022 + {
32023 + struct dirent *evt_ent;
32024 + DIR *event_dir;
32025 +- int ret = 0;
32026 +
32027 + event_dir = opendir(dir);
32028 + if (!event_dir)
32029 + return -EINVAL;
32030 +
32031 +- while (!ret && (evt_ent = readdir(event_dir))) {
32032 ++ while ((evt_ent = readdir(event_dir))) {
32033 + char path[PATH_MAX];
32034 + char *name = evt_ent->d_name;
32035 + FILE *file;
32036 +@@ -306,17 +305,19 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
32037 +
32038 + snprintf(path, PATH_MAX, "%s/%s", dir, name);
32039 +
32040 +- ret = -EINVAL;
32041 + file = fopen(path, "r");
32042 +- if (!file)
32043 +- break;
32044 ++ if (!file) {
32045 ++ pr_debug("Cannot open %s\n", path);
32046 ++ continue;
32047 ++ }
32048 +
32049 +- ret = perf_pmu__new_alias(head, dir, name, file);
32050 ++ if (perf_pmu__new_alias(head, dir, name, file) < 0)
32051 ++ pr_debug("Cannot set up %s\n", name);
32052 + fclose(file);
32053 + }
32054 +
32055 + closedir(event_dir);
32056 +- return ret;
32057 ++ return 0;
32058 + }
32059 +
32060 + /*
32061 +diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
32062 +index 1833103768cb..c8680984d2d6 100644
32063 +--- a/tools/perf/util/setup.py
32064 ++++ b/tools/perf/util/setup.py
32065 +@@ -22,6 +22,7 @@ cflags = getenv('CFLAGS', '').split()
32066 + # switch off several checks (need to be at the end of cflags list)
32067 + cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
32068 +
32069 ++src_perf = getenv('srctree') + '/tools/perf'
32070 + build_lib = getenv('PYTHON_EXTBUILD_LIB')
32071 + build_tmp = getenv('PYTHON_EXTBUILD_TMP')
32072 + libtraceevent = getenv('LIBTRACEEVENT')
32073 +@@ -30,6 +31,9 @@ libapikfs = getenv('LIBAPI')
32074 + ext_sources = [f.strip() for f in file('util/python-ext-sources')
32075 + if len(f.strip()) > 0 and f[0] != '#']
32076 +
32077 ++# use full paths with source files
32078 ++ext_sources = map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)
32079 ++
32080 + perf = Extension('perf',
32081 + sources = ext_sources,
32082 + include_dirs = ['util/include'],
32083 +diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
32084 +index ec722346e6ff..16892a7ca27e 100644
32085 +--- a/tools/perf/util/sort.c
32086 ++++ b/tools/perf/util/sort.c
32087 +@@ -2272,6 +2272,9 @@ static char *prefix_if_not_in(const char *pre, char *str)
32088 +
32089 + static char *setup_overhead(char *keys)
32090 + {
32091 ++ if (sort__mode == SORT_MODE__DIFF)
32092 ++ return keys;
32093 ++
32094 + keys = prefix_if_not_in("overhead", keys);
32095 +
32096 + if (symbol_conf.cumulate_callchain)
32097 +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
32098 +index ea6064696fe4..a7b9022b5c8f 100644
32099 +--- a/virt/kvm/arm/arch_timer.c
32100 ++++ b/virt/kvm/arm/arch_timer.c
32101 +@@ -86,6 +86,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
32102 + vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
32103 + vcpu->arch.timer_cpu.armed = false;
32104 +
32105 ++ WARN_ON(!kvm_timer_should_fire(vcpu));
32106 ++
32107 + /*
32108 + * If the vcpu is blocked we want to wake it up so that it will see
32109 + * the timer has expired when entering the guest.
32110 +@@ -93,10 +95,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
32111 + kvm_vcpu_kick(vcpu);
32112 + }
32113 +
32114 ++static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
32115 ++{
32116 ++ cycle_t cval, now;
32117 ++
32118 ++ cval = vcpu->arch.timer_cpu.cntv_cval;
32119 ++ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
32120 ++
32121 ++ if (now < cval) {
32122 ++ u64 ns;
32123 ++
32124 ++ ns = cyclecounter_cyc2ns(timecounter->cc,
32125 ++ cval - now,
32126 ++ timecounter->mask,
32127 ++ &timecounter->frac);
32128 ++ return ns;
32129 ++ }
32130 ++
32131 ++ return 0;
32132 ++}
32133 ++
32134 + static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
32135 + {
32136 + struct arch_timer_cpu *timer;
32137 ++ struct kvm_vcpu *vcpu;
32138 ++ u64 ns;
32139 ++
32140 + timer = container_of(hrt, struct arch_timer_cpu, timer);
32141 ++ vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
32142 ++
32143 ++ /*
32144 ++ * Check that the timer has really expired from the guest's
32145 ++ * PoV (NTP on the host may have forced it to expire
32146 ++ * early). If we should have slept longer, restart it.
32147 ++ */
32148 ++ ns = kvm_timer_compute_delta(vcpu);
32149 ++ if (unlikely(ns)) {
32150 ++ hrtimer_forward_now(hrt, ns_to_ktime(ns));
32151 ++ return HRTIMER_RESTART;
32152 ++ }
32153 ++
32154 + queue_work(wqueue, &timer->expired);
32155 + return HRTIMER_NORESTART;
32156 + }
32157 +@@ -170,8 +208,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
32158 + void kvm_timer_schedule(struct kvm_vcpu *vcpu)
32159 + {
32160 + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
32161 +- u64 ns;
32162 +- cycle_t cval, now;
32163 +
32164 + BUG_ON(timer_is_armed(timer));
32165 +
32166 +@@ -191,14 +227,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
32167 + return;
32168 +
32169 + /* The timer has not yet expired, schedule a background timer */
32170 +- cval = timer->cntv_cval;
32171 +- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
32172 +-
32173 +- ns = cyclecounter_cyc2ns(timecounter->cc,
32174 +- cval - now,
32175 +- timecounter->mask,
32176 +- &timecounter->frac);
32177 +- timer_arm(timer, ns);
32178 ++ timer_arm(timer, kvm_timer_compute_delta(vcpu));
32179 + }
32180 +
32181 + void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
32182 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
32183 +index 9102ae172d2a..298473707f17 100644
32184 +--- a/virt/kvm/kvm_main.c
32185 ++++ b/virt/kvm/kvm_main.c
32186 +@@ -537,6 +537,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
32187 + if (!kvm)
32188 + return ERR_PTR(-ENOMEM);
32189 +
32190 ++ spin_lock_init(&kvm->mmu_lock);
32191 ++ atomic_inc(&current->mm->mm_count);
32192 ++ kvm->mm = current->mm;
32193 ++ kvm_eventfd_init(kvm);
32194 ++ mutex_init(&kvm->lock);
32195 ++ mutex_init(&kvm->irq_lock);
32196 ++ mutex_init(&kvm->slots_lock);
32197 ++ atomic_set(&kvm->users_count, 1);
32198 ++ INIT_LIST_HEAD(&kvm->devices);
32199 ++
32200 + r = kvm_arch_init_vm(kvm, type);
32201 + if (r)
32202 + goto out_err_no_disable;
32203 +@@ -569,16 +579,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
32204 + goto out_err;
32205 + }
32206 +
32207 +- spin_lock_init(&kvm->mmu_lock);
32208 +- kvm->mm = current->mm;
32209 +- atomic_inc(&kvm->mm->mm_count);
32210 +- kvm_eventfd_init(kvm);
32211 +- mutex_init(&kvm->lock);
32212 +- mutex_init(&kvm->irq_lock);
32213 +- mutex_init(&kvm->slots_lock);
32214 +- atomic_set(&kvm->users_count, 1);
32215 +- INIT_LIST_HEAD(&kvm->devices);
32216 +-
32217 + r = kvm_init_mmu_notifier(kvm);
32218 + if (r)
32219 + goto out_err;
32220 +@@ -603,6 +603,7 @@ out_err_no_disable:
32221 + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
32222 + kvm_free_memslots(kvm, kvm->memslots[i]);
32223 + kvm_arch_free_vm(kvm);
32224 ++ mmdrop(current->mm);
32225 + return ERR_PTR(r);
32226 + }
32227 +