1 |
commit: 3c976b60908d193c1db1d704b18307583d62fd79 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Thu Oct 30 17:28:15 2014 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Thu Oct 30 17:28:15 2014 +0000 |
6 |
URL: http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=3c976b60 |
7 |
|
8 |
Linux patch 3.17.2 |
9 |
|
10 |
--- |
11 |
0000_README | 4 + |
12 |
1001_linux-3.17.2.patch | 8219 +++++++++++++++++++++++++++++++++++++++++++++++ |
13 |
2 files changed, 8223 insertions(+) |
14 |
|
15 |
diff --git a/0000_README b/0000_README |
16 |
index 907360b..eb0c55b 100644 |
17 |
--- a/0000_README |
18 |
+++ b/0000_README |
19 |
@@ -47,6 +47,10 @@ Patch: 1000_linux-3.17.1.patch |
20 |
From: http://www.kernel.org |
21 |
Desc: Linux 3.17.1 |
22 |
|
23 |
+Patch: 1001_linux-3.17.2.patch |
24 |
+From: http://www.kernel.org |
25 |
+Desc: Linux 3.17.2 |
26 |
+ |
27 |
Patch: 1500_XATTR_USER_PREFIX.patch |
28 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
29 |
Desc: Support for namespace user.pax.* on tmpfs. |
30 |
|
31 |
diff --git a/1001_linux-3.17.2.patch b/1001_linux-3.17.2.patch |
32 |
new file mode 100644 |
33 |
index 0000000..7791836 |
34 |
--- /dev/null |
35 |
+++ b/1001_linux-3.17.2.patch |
36 |
@@ -0,0 +1,8219 @@ |
37 |
+diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt |
38 |
+new file mode 100644 |
39 |
+index 000000000000..ea45dd3901e3 |
40 |
+--- /dev/null |
41 |
++++ b/Documentation/lzo.txt |
42 |
+@@ -0,0 +1,164 @@ |
43 |
++ |
44 |
++LZO stream format as understood by Linux's LZO decompressor |
45 |
++=========================================================== |
46 |
++ |
47 |
++Introduction |
48 |
++ |
49 |
++ This is not a specification. No specification seems to be publicly available |
50 |
++ for the LZO stream format. This document describes what input format the LZO |
51 |
++ decompressor as implemented in the Linux kernel understands. The file subject |
52 |
++ of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on |
53 |
++ the compressor nor on any other implementations though it seems likely that |
54 |
++ the format matches the standard one. The purpose of this document is to |
55 |
++ better understand what the code does in order to propose more efficient fixes |
56 |
++ for future bug reports. |
57 |
++ |
58 |
++Description |
59 |
++ |
60 |
++ The stream is composed of a series of instructions, operands, and data. The |
61 |
++ instructions consist in a few bits representing an opcode, and bits forming |
62 |
++ the operands for the instruction, whose size and position depend on the |
63 |
++ opcode and on the number of literals copied by previous instruction. The |
64 |
++ operands are used to indicate : |
65 |
++ |
66 |
++ - a distance when copying data from the dictionary (past output buffer) |
67 |
++ - a length (number of bytes to copy from dictionary) |
68 |
++ - the number of literals to copy, which is retained in variable "state" |
69 |
++ as a piece of information for next instructions. |
70 |
++ |
71 |
++ Optionally depending on the opcode and operands, extra data may follow. These |
72 |
++ extra data can be a complement for the operand (eg: a length or a distance |
73 |
++ encoded on larger values), or a literal to be copied to the output buffer. |
74 |
++ |
75 |
++ The first byte of the block follows a different encoding from other bytes, it |
76 |
++ seems to be optimized for literal use only, since there is no dictionary yet |
77 |
++ prior to that byte. |
78 |
++ |
79 |
++ Lengths are always encoded on a variable size starting with a small number |
80 |
++ of bits in the operand. If the number of bits isn't enough to represent the |
81 |
++ length, up to 255 may be added in increments by consuming more bytes with a |
82 |
++ rate of at most 255 per extra byte (thus the compression ratio cannot exceed |
83 |
++ around 255:1). The variable length encoding using #bits is always the same : |
84 |
++ |
85 |
++ length = byte & ((1 << #bits) - 1) |
86 |
++ if (!length) { |
87 |
++ length = ((1 << #bits) - 1) |
88 |
++ length += 255*(number of zero bytes) |
89 |
++ length += first-non-zero-byte |
90 |
++ } |
91 |
++ length += constant (generally 2 or 3) |
92 |
++ |
93 |
++ For references to the dictionary, distances are relative to the output |
94 |
++ pointer. Distances are encoded using very few bits belonging to certain |
95 |
++ ranges, resulting in multiple copy instructions using different encodings. |
96 |
++ Certain encodings involve one extra byte, others involve two extra bytes |
97 |
++ forming a little-endian 16-bit quantity (marked LE16 below). |
98 |
++ |
99 |
++ After any instruction except the large literal copy, 0, 1, 2 or 3 literals |
100 |
++ are copied before starting the next instruction. The number of literals that |
101 |
++ were copied may change the meaning and behaviour of the next instruction. In |
102 |
++ practice, only one instruction needs to know whether 0, less than 4, or more |
103 |
++ literals were copied. This is the information stored in the <state> variable |
104 |
++ in this implementation. This number of immediate literals to be copied is |
105 |
++ generally encoded in the last two bits of the instruction but may also be |
106 |
++ taken from the last two bits of an extra operand (eg: distance). |
107 |
++ |
108 |
++ End of stream is declared when a block copy of distance 0 is seen. Only one |
109 |
++ instruction may encode this distance (0001HLLL), it takes one LE16 operand |
110 |
++ for the distance, thus requiring 3 bytes. |
111 |
++ |
112 |
++ IMPORTANT NOTE : in the code some length checks are missing because certain |
113 |
++ instructions are called under the assumption that a certain number of bytes |
114 |
++ follow because it has already been garanteed before parsing the instructions. |
115 |
++ They just have to "refill" this credit if they consume extra bytes. This is |
116 |
++ an implementation design choice independant on the algorithm or encoding. |
117 |
++ |
118 |
++Byte sequences |
119 |
++ |
120 |
++ First byte encoding : |
121 |
++ |
122 |
++ 0..17 : follow regular instruction encoding, see below. It is worth |
123 |
++ noting that codes 16 and 17 will represent a block copy from |
124 |
++ the dictionary which is empty, and that they will always be |
125 |
++ invalid at this place. |
126 |
++ |
127 |
++ 18..21 : copy 0..3 literals |
128 |
++ state = (byte - 17) = 0..3 [ copy <state> literals ] |
129 |
++ skip byte |
130 |
++ |
131 |
++ 22..255 : copy literal string |
132 |
++ length = (byte - 17) = 4..238 |
133 |
++ state = 4 [ don't copy extra literals ] |
134 |
++ skip byte |
135 |
++ |
136 |
++ Instruction encoding : |
137 |
++ |
138 |
++ 0 0 0 0 X X X X (0..15) |
139 |
++ Depends on the number of literals copied by the last instruction. |
140 |
++ If last instruction did not copy any literal (state == 0), this |
141 |
++ encoding will be a copy of 4 or more literal, and must be interpreted |
142 |
++ like this : |
143 |
++ |
144 |
++ 0 0 0 0 L L L L (0..15) : copy long literal string |
145 |
++ length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte) |
146 |
++ state = 4 (no extra literals are copied) |
147 |
++ |
148 |
++ If last instruction used to copy between 1 to 3 literals (encoded in |
149 |
++ the instruction's opcode or distance), the instruction is a copy of a |
150 |
++ 2-byte block from the dictionary within a 1kB distance. It is worth |
151 |
++ noting that this instruction provides little savings since it uses 2 |
152 |
++ bytes to encode a copy of 2 other bytes but it encodes the number of |
153 |
++ following literals for free. It must be interpreted like this : |
154 |
++ |
155 |
++ 0 0 0 0 D D S S (0..15) : copy 2 bytes from <= 1kB distance |
156 |
++ length = 2 |
157 |
++ state = S (copy S literals after this block) |
158 |
++ Always followed by exactly one byte : H H H H H H H H |
159 |
++ distance = (H << 2) + D + 1 |
160 |
++ |
161 |
++ If last instruction used to copy 4 or more literals (as detected by |
162 |
++ state == 4), the instruction becomes a copy of a 3-byte block from the |
163 |
++ dictionary from a 2..3kB distance, and must be interpreted like this : |
164 |
++ |
165 |
++ 0 0 0 0 D D S S (0..15) : copy 3 bytes from 2..3 kB distance |
166 |
++ length = 3 |
167 |
++ state = S (copy S literals after this block) |
168 |
++ Always followed by exactly one byte : H H H H H H H H |
169 |
++ distance = (H << 2) + D + 2049 |
170 |
++ |
171 |
++ 0 0 0 1 H L L L (16..31) |
172 |
++ Copy of a block within 16..48kB distance (preferably less than 10B) |
173 |
++ length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte) |
174 |
++ Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S |
175 |
++ distance = 16384 + (H << 14) + D |
176 |
++ state = S (copy S literals after this block) |
177 |
++ End of stream is reached if distance == 16384 |
178 |
++ |
179 |
++ 0 0 1 L L L L L (32..63) |
180 |
++ Copy of small block within 16kB distance (preferably less than 34B) |
181 |
++ length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte) |
182 |
++ Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S |
183 |
++ distance = D + 1 |
184 |
++ state = S (copy S literals after this block) |
185 |
++ |
186 |
++ 0 1 L D D D S S (64..127) |
187 |
++ Copy 3-4 bytes from block within 2kB distance |
188 |
++ state = S (copy S literals after this block) |
189 |
++ length = 3 + L |
190 |
++ Always followed by exactly one byte : H H H H H H H H |
191 |
++ distance = (H << 3) + D + 1 |
192 |
++ |
193 |
++ 1 L L D D D S S (128..255) |
194 |
++ Copy 5-8 bytes from block within 2kB distance |
195 |
++ state = S (copy S literals after this block) |
196 |
++ length = 5 + L |
197 |
++ Always followed by exactly one byte : H H H H H H H H |
198 |
++ distance = (H << 3) + D + 1 |
199 |
++ |
200 |
++Authors |
201 |
++ |
202 |
++ This document was written by Willy Tarreau <w@×××.eu> on 2014/07/19 during an |
203 |
++ analysis of the decompression code available in Linux 3.16-rc5. The code is |
204 |
++ tricky, it is possible that this document contains mistakes or that a few |
205 |
++ corner cases were overlooked. In any case, please report any doubt, fix, or |
206 |
++ proposed updates to the author(s) so that the document can be updated. |
207 |
+diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt |
208 |
+index 290894176142..53838d9c6295 100644 |
209 |
+--- a/Documentation/virtual/kvm/mmu.txt |
210 |
++++ b/Documentation/virtual/kvm/mmu.txt |
211 |
+@@ -425,6 +425,20 @@ fault through the slow path. |
212 |
+ Since only 19 bits are used to store generation-number on mmio spte, all |
213 |
+ pages are zapped when there is an overflow. |
214 |
+ |
215 |
++Unfortunately, a single memory access might access kvm_memslots(kvm) multiple |
216 |
++times, the last one happening when the generation number is retrieved and |
217 |
++stored into the MMIO spte. Thus, the MMIO spte might be created based on |
218 |
++out-of-date information, but with an up-to-date generation number. |
219 |
++ |
220 |
++To avoid this, the generation number is incremented again after synchronize_srcu |
221 |
++returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a |
222 |
++memslot update, while some SRCU readers might be using the old copy. We do not |
223 |
++want to use an MMIO sptes created with an odd generation number, and we can do |
224 |
++this without losing a bit in the MMIO spte. The low bit of the generation |
225 |
++is not stored in MMIO spte, and presumed zero when it is extracted out of the |
226 |
++spte. If KVM is unlucky and creates an MMIO spte while the low bit is 1, |
227 |
++the next access to the spte will always be a cache miss. |
228 |
++ |
229 |
+ |
230 |
+ Further reading |
231 |
+ =============== |
232 |
+diff --git a/Makefile b/Makefile |
233 |
+index 46694098725d..390afde6538e 100644 |
234 |
+--- a/Makefile |
235 |
++++ b/Makefile |
236 |
+@@ -1,6 +1,6 @@ |
237 |
+ VERSION = 3 |
238 |
+ PATCHLEVEL = 17 |
239 |
+-SUBLEVEL = 1 |
240 |
++SUBLEVEL = 2 |
241 |
+ EXTRAVERSION = |
242 |
+ NAME = Shuffling Zombie Juror |
243 |
+ |
244 |
+diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile |
245 |
+index b8c5cd3ddeb9..e6aa6e77a3ec 100644 |
246 |
+--- a/arch/arm/boot/dts/Makefile |
247 |
++++ b/arch/arm/boot/dts/Makefile |
248 |
+@@ -144,8 +144,8 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += kirkwood-b3.dtb \ |
249 |
+ kirkwood-openrd-client.dtb \ |
250 |
+ kirkwood-openrd-ultimate.dtb \ |
251 |
+ kirkwood-rd88f6192.dtb \ |
252 |
+- kirkwood-rd88f6281-a0.dtb \ |
253 |
+- kirkwood-rd88f6281-a1.dtb \ |
254 |
++ kirkwood-rd88f6281-z0.dtb \ |
255 |
++ kirkwood-rd88f6281-a.dtb \ |
256 |
+ kirkwood-rs212.dtb \ |
257 |
+ kirkwood-rs409.dtb \ |
258 |
+ kirkwood-rs411.dtb \ |
259 |
+diff --git a/arch/arm/boot/dts/armada-370-netgear-rn102.dts b/arch/arm/boot/dts/armada-370-netgear-rn102.dts |
260 |
+index d6d572e5af32..285524fb915e 100644 |
261 |
+--- a/arch/arm/boot/dts/armada-370-netgear-rn102.dts |
262 |
++++ b/arch/arm/boot/dts/armada-370-netgear-rn102.dts |
263 |
+@@ -143,6 +143,10 @@ |
264 |
+ marvell,nand-enable-arbiter; |
265 |
+ nand-on-flash-bbt; |
266 |
+ |
267 |
++ /* Use Hardware BCH ECC */ |
268 |
++ nand-ecc-strength = <4>; |
269 |
++ nand-ecc-step-size = <512>; |
270 |
++ |
271 |
+ partition@0 { |
272 |
+ label = "u-boot"; |
273 |
+ reg = <0x0000000 0x180000>; /* 1.5MB */ |
274 |
+diff --git a/arch/arm/boot/dts/armada-370-netgear-rn104.dts b/arch/arm/boot/dts/armada-370-netgear-rn104.dts |
275 |
+index c5fe8b5dcdc7..4ec1ce561d34 100644 |
276 |
+--- a/arch/arm/boot/dts/armada-370-netgear-rn104.dts |
277 |
++++ b/arch/arm/boot/dts/armada-370-netgear-rn104.dts |
278 |
+@@ -145,6 +145,10 @@ |
279 |
+ marvell,nand-enable-arbiter; |
280 |
+ nand-on-flash-bbt; |
281 |
+ |
282 |
++ /* Use Hardware BCH ECC */ |
283 |
++ nand-ecc-strength = <4>; |
284 |
++ nand-ecc-step-size = <512>; |
285 |
++ |
286 |
+ partition@0 { |
287 |
+ label = "u-boot"; |
288 |
+ reg = <0x0000000 0x180000>; /* 1.5MB */ |
289 |
+diff --git a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts |
290 |
+index 0cf999abc4ed..c5ed85a70ed9 100644 |
291 |
+--- a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts |
292 |
++++ b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts |
293 |
+@@ -223,6 +223,10 @@ |
294 |
+ marvell,nand-enable-arbiter; |
295 |
+ nand-on-flash-bbt; |
296 |
+ |
297 |
++ /* Use Hardware BCH ECC */ |
298 |
++ nand-ecc-strength = <4>; |
299 |
++ nand-ecc-step-size = <512>; |
300 |
++ |
301 |
+ partition@0 { |
302 |
+ label = "u-boot"; |
303 |
+ reg = <0x0000000 0x180000>; /* 1.5MB */ |
304 |
+diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi |
305 |
+index bb23c2d33cf8..5e95a8053445 100644 |
306 |
+--- a/arch/arm/boot/dts/at91sam9263.dtsi |
307 |
++++ b/arch/arm/boot/dts/at91sam9263.dtsi |
308 |
+@@ -834,6 +834,7 @@ |
309 |
+ compatible = "atmel,hsmci"; |
310 |
+ reg = <0xfff80000 0x600>; |
311 |
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH 0>; |
312 |
++ pinctrl-names = "default"; |
313 |
+ #address-cells = <1>; |
314 |
+ #size-cells = <0>; |
315 |
+ clocks = <&mci0_clk>; |
316 |
+@@ -845,6 +846,7 @@ |
317 |
+ compatible = "atmel,hsmci"; |
318 |
+ reg = <0xfff84000 0x600>; |
319 |
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH 0>; |
320 |
++ pinctrl-names = "default"; |
321 |
+ #address-cells = <1>; |
322 |
+ #size-cells = <0>; |
323 |
+ clocks = <&mci1_clk>; |
324 |
+diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts |
325 |
+index e4cc44c98585..41a983405e7d 100644 |
326 |
+--- a/arch/arm/boot/dts/imx28-evk.dts |
327 |
++++ b/arch/arm/boot/dts/imx28-evk.dts |
328 |
+@@ -193,7 +193,6 @@ |
329 |
+ i2c0: i2c@80058000 { |
330 |
+ pinctrl-names = "default"; |
331 |
+ pinctrl-0 = <&i2c0_pins_a>; |
332 |
+- clock-frequency = <400000>; |
333 |
+ status = "okay"; |
334 |
+ |
335 |
+ sgtl5000: codec@0a { |
336 |
+diff --git a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts |
337 |
+index 8f76d28759a3..f82827d6fcff 100644 |
338 |
+--- a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts |
339 |
++++ b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts |
340 |
+@@ -123,11 +123,11 @@ |
341 |
+ |
342 |
+ dsa@0 { |
343 |
+ compatible = "marvell,dsa"; |
344 |
+- #address-cells = <2>; |
345 |
++ #address-cells = <1>; |
346 |
+ #size-cells = <0>; |
347 |
+ |
348 |
+- dsa,ethernet = <ð0>; |
349 |
+- dsa,mii-bus = <ðphy0>; |
350 |
++ dsa,ethernet = <ð0port>; |
351 |
++ dsa,mii-bus = <&mdio>; |
352 |
+ |
353 |
+ switch@0 { |
354 |
+ #address-cells = <1>; |
355 |
+@@ -169,17 +169,13 @@ |
356 |
+ |
357 |
+ &mdio { |
358 |
+ status = "okay"; |
359 |
+- |
360 |
+- ethphy0: ethernet-phy@ff { |
361 |
+- reg = <0xff>; /* No phy attached */ |
362 |
+- speed = <1000>; |
363 |
+- duplex = <1>; |
364 |
+- }; |
365 |
+ }; |
366 |
+ |
367 |
+ ð0 { |
368 |
+ status = "okay"; |
369 |
++ |
370 |
+ ethernet0-port@0 { |
371 |
+- phy-handle = <ðphy0>; |
372 |
++ speed = <1000>; |
373 |
++ duplex = <1>; |
374 |
+ }; |
375 |
+ }; |
376 |
+diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281-a.dts b/arch/arm/boot/dts/kirkwood-rd88f6281-a.dts |
377 |
+new file mode 100644 |
378 |
+index 000000000000..f2e08b3b33ea |
379 |
+--- /dev/null |
380 |
++++ b/arch/arm/boot/dts/kirkwood-rd88f6281-a.dts |
381 |
+@@ -0,0 +1,43 @@ |
382 |
++/* |
383 |
++ * Marvell RD88F6181 A Board descrition |
384 |
++ * |
385 |
++ * Andrew Lunn <andrew@××××.ch> |
386 |
++ * |
387 |
++ * This file is licensed under the terms of the GNU General Public |
388 |
++ * License version 2. This program is licensed "as is" without any |
389 |
++ * warranty of any kind, whether express or implied. |
390 |
++ * |
391 |
++ * This file contains the definitions for the board with the A0 or |
392 |
++ * higher stepping of the SoC. The ethernet switch does not have a |
393 |
++ * "wan" port. |
394 |
++ */ |
395 |
++ |
396 |
++/dts-v1/; |
397 |
++#include "kirkwood-rd88f6281.dtsi" |
398 |
++ |
399 |
++/ { |
400 |
++ model = "Marvell RD88f6281 Reference design, with A0 or higher SoC"; |
401 |
++ compatible = "marvell,rd88f6281-a", "marvell,rd88f6281","marvell,kirkwood-88f6281", "marvell,kirkwood"; |
402 |
++ |
403 |
++ dsa@0 { |
404 |
++ switch@0 { |
405 |
++ reg = <10 0>; /* MDIO address 10, switch 0 in tree */ |
406 |
++ }; |
407 |
++ }; |
408 |
++}; |
409 |
++ |
410 |
++&mdio { |
411 |
++ status = "okay"; |
412 |
++ |
413 |
++ ethphy1: ethernet-phy@11 { |
414 |
++ reg = <11>; |
415 |
++ }; |
416 |
++}; |
417 |
++ |
418 |
++ð1 { |
419 |
++ status = "okay"; |
420 |
++ |
421 |
++ ethernet1-port@0 { |
422 |
++ phy-handle = <ðphy1>; |
423 |
++ }; |
424 |
++}; |
425 |
+diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281-a0.dts b/arch/arm/boot/dts/kirkwood-rd88f6281-a0.dts |
426 |
+deleted file mode 100644 |
427 |
+index a803bbb70bc8..000000000000 |
428 |
+--- a/arch/arm/boot/dts/kirkwood-rd88f6281-a0.dts |
429 |
++++ /dev/null |
430 |
+@@ -1,26 +0,0 @@ |
431 |
+-/* |
432 |
+- * Marvell RD88F6181 A0 Board descrition |
433 |
+- * |
434 |
+- * Andrew Lunn <andrew@××××.ch> |
435 |
+- * |
436 |
+- * This file is licensed under the terms of the GNU General Public |
437 |
+- * License version 2. This program is licensed "as is" without any |
438 |
+- * warranty of any kind, whether express or implied. |
439 |
+- * |
440 |
+- * This file contains the definitions for the board with the A0 variant of |
441 |
+- * the SoC. The ethernet switch does not have a "wan" port. |
442 |
+- */ |
443 |
+- |
444 |
+-/dts-v1/; |
445 |
+-#include "kirkwood-rd88f6281.dtsi" |
446 |
+- |
447 |
+-/ { |
448 |
+- model = "Marvell RD88f6281 Reference design, with A0 SoC"; |
449 |
+- compatible = "marvell,rd88f6281-a0", "marvell,rd88f6281","marvell,kirkwood-88f6281", "marvell,kirkwood"; |
450 |
+- |
451 |
+- dsa@0 { |
452 |
+- switch@0 { |
453 |
+- reg = <10 0>; /* MDIO address 10, switch 0 in tree */ |
454 |
+- }; |
455 |
+- }; |
456 |
+-}; |
457 |
+\ No newline at end of file |
458 |
+diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281-a1.dts b/arch/arm/boot/dts/kirkwood-rd88f6281-a1.dts |
459 |
+deleted file mode 100644 |
460 |
+index baeebbf1d8c7..000000000000 |
461 |
+--- a/arch/arm/boot/dts/kirkwood-rd88f6281-a1.dts |
462 |
++++ /dev/null |
463 |
+@@ -1,31 +0,0 @@ |
464 |
+-/* |
465 |
+- * Marvell RD88F6181 A1 Board descrition |
466 |
+- * |
467 |
+- * Andrew Lunn <andrew@××××.ch> |
468 |
+- * |
469 |
+- * This file is licensed under the terms of the GNU General Public |
470 |
+- * License version 2. This program is licensed "as is" without any |
471 |
+- * warranty of any kind, whether express or implied. |
472 |
+- * |
473 |
+- * This file contains the definitions for the board with the A1 variant of |
474 |
+- * the SoC. The ethernet switch has a "wan" port. |
475 |
+- */ |
476 |
+- |
477 |
+-/dts-v1/; |
478 |
+- |
479 |
+-#include "kirkwood-rd88f6281.dtsi" |
480 |
+- |
481 |
+-/ { |
482 |
+- model = "Marvell RD88f6281 Reference design, with A1 SoC"; |
483 |
+- compatible = "marvell,rd88f6281-a1", "marvell,rd88f6281","marvell,kirkwood-88f6281", "marvell,kirkwood"; |
484 |
+- |
485 |
+- dsa@0 { |
486 |
+- switch@0 { |
487 |
+- reg = <0 0>; /* MDIO address 0, switch 0 in tree */ |
488 |
+- port@4 { |
489 |
+- reg = <4>; |
490 |
+- label = "wan"; |
491 |
+- }; |
492 |
+- }; |
493 |
+- }; |
494 |
+-}; |
495 |
+\ No newline at end of file |
496 |
+diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts b/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts |
497 |
+new file mode 100644 |
498 |
+index 000000000000..f4272b64ed7f |
499 |
+--- /dev/null |
500 |
++++ b/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts |
501 |
+@@ -0,0 +1,35 @@ |
502 |
++/* |
503 |
++ * Marvell RD88F6181 Z0 stepping descrition |
504 |
++ * |
505 |
++ * Andrew Lunn <andrew@××××.ch> |
506 |
++ * |
507 |
++ * This file is licensed under the terms of the GNU General Public |
508 |
++ * License version 2. This program is licensed "as is" without any |
509 |
++ * warranty of any kind, whether express or implied. |
510 |
++ * |
511 |
++ * This file contains the definitions for the board using the Z0 |
512 |
++ * stepping of the SoC. The ethernet switch has a "wan" port. |
513 |
++*/ |
514 |
++ |
515 |
++/dts-v1/; |
516 |
++ |
517 |
++#include "kirkwood-rd88f6281.dtsi" |
518 |
++ |
519 |
++/ { |
520 |
++ model = "Marvell RD88f6281 Reference design, with Z0 SoC"; |
521 |
++ compatible = "marvell,rd88f6281-z0", "marvell,rd88f6281","marvell,kirkwood-88f6281", "marvell,kirkwood"; |
522 |
++ |
523 |
++ dsa@0 { |
524 |
++ switch@0 { |
525 |
++ reg = <0 0>; /* MDIO address 0, switch 0 in tree */ |
526 |
++ port@4 { |
527 |
++ reg = <4>; |
528 |
++ label = "wan"; |
529 |
++ }; |
530 |
++ }; |
531 |
++ }; |
532 |
++}; |
533 |
++ |
534 |
++ð1 { |
535 |
++ status = "disabled"; |
536 |
++}; |
537 |
+diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi b/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi |
538 |
+index 26cf0e0ccefd..d195e884b3b5 100644 |
539 |
+--- a/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi |
540 |
++++ b/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi |
541 |
+@@ -37,7 +37,6 @@ |
542 |
+ |
543 |
+ ocp@f1000000 { |
544 |
+ pinctrl: pin-controller@10000 { |
545 |
+- pinctrl-0 = <&pmx_sdio_cd>; |
546 |
+ pinctrl-names = "default"; |
547 |
+ |
548 |
+ pmx_sdio_cd: pmx-sdio-cd { |
549 |
+@@ -69,8 +68,8 @@ |
550 |
+ #address-cells = <2>; |
551 |
+ #size-cells = <0>; |
552 |
+ |
553 |
+- dsa,ethernet = <ð0>; |
554 |
+- dsa,mii-bus = <ðphy1>; |
555 |
++ dsa,ethernet = <ð0port>; |
556 |
++ dsa,mii-bus = <&mdio>; |
557 |
+ |
558 |
+ switch@0 { |
559 |
+ #address-cells = <1>; |
560 |
+@@ -119,35 +118,19 @@ |
561 |
+ }; |
562 |
+ |
563 |
+ partition@300000 { |
564 |
+- label = "data"; |
565 |
++ label = "rootfs"; |
566 |
+ reg = <0x0300000 0x500000>; |
567 |
+ }; |
568 |
+ }; |
569 |
+ |
570 |
+ &mdio { |
571 |
+ status = "okay"; |
572 |
+- |
573 |
+- ethphy0: ethernet-phy@0 { |
574 |
+- reg = <0>; |
575 |
+- }; |
576 |
+- |
577 |
+- ethphy1: ethernet-phy@ff { |
578 |
+- reg = <0xff>; /* No PHY attached */ |
579 |
+- speed = <1000>; |
580 |
+- duple = <1>; |
581 |
+- }; |
582 |
+ }; |
583 |
+ |
584 |
+ ð0 { |
585 |
+ status = "okay"; |
586 |
+ ethernet0-port@0 { |
587 |
+- phy-handle = <ðphy0>; |
588 |
+- }; |
589 |
+-}; |
590 |
+- |
591 |
+-ð1 { |
592 |
+- status = "okay"; |
593 |
+- ethernet1-port@0 { |
594 |
+- phy-handle = <ðphy1>; |
595 |
++ speed = <1000>; |
596 |
++ duplex = <1>; |
597 |
+ }; |
598 |
+ }; |
599 |
+diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi |
600 |
+index afc640cd80c5..464f09a1a4a5 100644 |
601 |
+--- a/arch/arm/boot/dts/kirkwood.dtsi |
602 |
++++ b/arch/arm/boot/dts/kirkwood.dtsi |
603 |
+@@ -309,7 +309,7 @@ |
604 |
+ marvell,tx-checksum-limit = <1600>; |
605 |
+ status = "disabled"; |
606 |
+ |
607 |
+- ethernet0-port@0 { |
608 |
++ eth0port: ethernet0-port@0 { |
609 |
+ compatible = "marvell,kirkwood-eth-port"; |
610 |
+ reg = <0>; |
611 |
+ interrupts = <11>; |
612 |
+@@ -342,7 +342,7 @@ |
613 |
+ pinctrl-names = "default"; |
614 |
+ status = "disabled"; |
615 |
+ |
616 |
+- ethernet1-port@0 { |
617 |
++ eth1port: ethernet1-port@0 { |
618 |
+ compatible = "marvell,kirkwood-eth-port"; |
619 |
+ reg = <0>; |
620 |
+ interrupts = <15>; |
621 |
+diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi |
622 |
+index a0775851cce5..eaf41451ad0c 100644 |
623 |
+--- a/arch/arm/boot/dts/sama5d3_can.dtsi |
624 |
++++ b/arch/arm/boot/dts/sama5d3_can.dtsi |
625 |
+@@ -40,7 +40,7 @@ |
626 |
+ atmel,clk-output-range = <0 66000000>; |
627 |
+ }; |
628 |
+ |
629 |
+- can1_clk: can0_clk { |
630 |
++ can1_clk: can1_clk { |
631 |
+ #clock-cells = <0>; |
632 |
+ reg = <41>; |
633 |
+ atmel,clk-output-range = <0 66000000>; |
634 |
+diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c |
635 |
+index 034529d801b2..d66f102c352a 100644 |
636 |
+--- a/arch/arm/mach-at91/clock.c |
637 |
++++ b/arch/arm/mach-at91/clock.c |
638 |
+@@ -962,6 +962,7 @@ static int __init at91_clock_reset(void) |
639 |
+ } |
640 |
+ |
641 |
+ at91_pmc_write(AT91_PMC_SCDR, scdr); |
642 |
++ at91_pmc_write(AT91_PMC_PCDR, pcdr); |
643 |
+ if (cpu_is_sama5d3()) |
644 |
+ at91_pmc_write(AT91_PMC_PCDR1, pcdr1); |
645 |
+ |
646 |
+diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h |
647 |
+index 253e33bc94fb..56de5aadede2 100644 |
648 |
+--- a/arch/arm64/include/asm/compat.h |
649 |
++++ b/arch/arm64/include/asm/compat.h |
650 |
+@@ -37,8 +37,8 @@ typedef s32 compat_ssize_t; |
651 |
+ typedef s32 compat_time_t; |
652 |
+ typedef s32 compat_clock_t; |
653 |
+ typedef s32 compat_pid_t; |
654 |
+-typedef u32 __compat_uid_t; |
655 |
+-typedef u32 __compat_gid_t; |
656 |
++typedef u16 __compat_uid_t; |
657 |
++typedef u16 __compat_gid_t; |
658 |
+ typedef u16 __compat_uid16_t; |
659 |
+ typedef u16 __compat_gid16_t; |
660 |
+ typedef u32 __compat_uid32_t; |
661 |
+diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h |
662 |
+index 8e24ef3f7c82..b4f6b19a8a68 100644 |
663 |
+--- a/arch/arm64/include/asm/irq_work.h |
664 |
++++ b/arch/arm64/include/asm/irq_work.h |
665 |
+@@ -1,6 +1,8 @@ |
666 |
+ #ifndef __ASM_IRQ_WORK_H |
667 |
+ #define __ASM_IRQ_WORK_H |
668 |
+ |
669 |
++#ifdef CONFIG_SMP |
670 |
++ |
671 |
+ #include <asm/smp.h> |
672 |
+ |
673 |
+ static inline bool arch_irq_work_has_interrupt(void) |
674 |
+@@ -8,4 +10,13 @@ static inline bool arch_irq_work_has_interrupt(void) |
675 |
+ return !!__smp_cross_call; |
676 |
+ } |
677 |
+ |
678 |
++#else |
679 |
++ |
680 |
++static inline bool arch_irq_work_has_interrupt(void) |
681 |
++{ |
682 |
++ return false; |
683 |
++} |
684 |
++ |
685 |
++#endif |
686 |
++ |
687 |
+ #endif /* __ASM_IRQ_WORK_H */ |
688 |
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S |
689 |
+index f0b5e5120a87..726b910fe6ec 100644 |
690 |
+--- a/arch/arm64/kernel/entry.S |
691 |
++++ b/arch/arm64/kernel/entry.S |
692 |
+@@ -324,7 +324,6 @@ el1_dbg: |
693 |
+ mrs x0, far_el1 |
694 |
+ mov x2, sp // struct pt_regs |
695 |
+ bl do_debug_exception |
696 |
+- enable_dbg |
697 |
+ kernel_exit 1 |
698 |
+ el1_inv: |
699 |
+ // TODO: add support for undefined instructions in kernel mode |
700 |
+diff --git a/arch/m68k/mm/hwtest.c b/arch/m68k/mm/hwtest.c |
701 |
+index 2c7dde3c6430..2a5259fd23eb 100644 |
702 |
+--- a/arch/m68k/mm/hwtest.c |
703 |
++++ b/arch/m68k/mm/hwtest.c |
704 |
+@@ -28,9 +28,11 @@ |
705 |
+ int hwreg_present( volatile void *regp ) |
706 |
+ { |
707 |
+ int ret = 0; |
708 |
++ unsigned long flags; |
709 |
+ long save_sp, save_vbr; |
710 |
+ long tmp_vectors[3]; |
711 |
+ |
712 |
++ local_irq_save(flags); |
713 |
+ __asm__ __volatile__ |
714 |
+ ( "movec %/vbr,%2\n\t" |
715 |
+ "movel #Lberr1,%4@(8)\n\t" |
716 |
+@@ -46,6 +48,7 @@ int hwreg_present( volatile void *regp ) |
717 |
+ : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr) |
718 |
+ : "a" (regp), "a" (tmp_vectors) |
719 |
+ ); |
720 |
++ local_irq_restore(flags); |
721 |
+ |
722 |
+ return( ret ); |
723 |
+ } |
724 |
+@@ -58,9 +61,11 @@ EXPORT_SYMBOL(hwreg_present); |
725 |
+ int hwreg_write( volatile void *regp, unsigned short val ) |
726 |
+ { |
727 |
+ int ret; |
728 |
++ unsigned long flags; |
729 |
+ long save_sp, save_vbr; |
730 |
+ long tmp_vectors[3]; |
731 |
+ |
732 |
++ local_irq_save(flags); |
733 |
+ __asm__ __volatile__ |
734 |
+ ( "movec %/vbr,%2\n\t" |
735 |
+ "movel #Lberr2,%4@(8)\n\t" |
736 |
+@@ -78,6 +83,7 @@ int hwreg_write( volatile void *regp, unsigned short val ) |
737 |
+ : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr) |
738 |
+ : "a" (regp), "a" (tmp_vectors), "g" (val) |
739 |
+ ); |
740 |
++ local_irq_restore(flags); |
741 |
+ |
742 |
+ return( ret ); |
743 |
+ } |
744 |
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c |
745 |
+index 00e3844525a6..eef08f0bca73 100644 |
746 |
+--- a/arch/powerpc/kernel/eeh_pe.c |
747 |
++++ b/arch/powerpc/kernel/eeh_pe.c |
748 |
+@@ -584,6 +584,8 @@ static void *__eeh_pe_state_clear(void *data, void *flag) |
749 |
+ { |
750 |
+ struct eeh_pe *pe = (struct eeh_pe *)data; |
751 |
+ int state = *((int *)flag); |
752 |
++ struct eeh_dev *edev, *tmp; |
753 |
++ struct pci_dev *pdev; |
754 |
+ |
755 |
+ /* Keep the state of permanently removed PE intact */ |
756 |
+ if ((pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) && |
757 |
+@@ -592,9 +594,22 @@ static void *__eeh_pe_state_clear(void *data, void *flag) |
758 |
+ |
759 |
+ pe->state &= ~state; |
760 |
+ |
761 |
+- /* Clear check count since last isolation */ |
762 |
+- if (state & EEH_PE_ISOLATED) |
763 |
+- pe->check_count = 0; |
764 |
++ /* |
765 |
++ * Special treatment on clearing isolated state. Clear |
766 |
++ * check count since last isolation and put all affected |
767 |
++ * devices to normal state. |
768 |
++ */ |
769 |
++ if (!(state & EEH_PE_ISOLATED)) |
770 |
++ return NULL; |
771 |
++ |
772 |
++ pe->check_count = 0; |
773 |
++ eeh_pe_for_each_dev(pe, edev, tmp) { |
774 |
++ pdev = eeh_dev_to_pci_dev(edev); |
775 |
++ if (!pdev) |
776 |
++ continue; |
777 |
++ |
778 |
++ pdev->error_state = pci_channel_io_normal; |
779 |
++ } |
780 |
+ |
781 |
+ return NULL; |
782 |
+ } |
783 |
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c |
784 |
+index a0738af4aba6..dc0e7744d2a8 100644 |
785 |
+--- a/arch/powerpc/kernel/smp.c |
786 |
++++ b/arch/powerpc/kernel/smp.c |
787 |
+@@ -379,8 +379,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) |
788 |
+ /* |
789 |
+ * numa_node_id() works after this. |
790 |
+ */ |
791 |
+- set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); |
792 |
+- set_cpu_numa_mem(cpu, local_memory_node(numa_cpu_lookup_table[cpu])); |
793 |
++ if (cpu_present(cpu)) { |
794 |
++ set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); |
795 |
++ set_cpu_numa_mem(cpu, |
796 |
++ local_memory_node(numa_cpu_lookup_table[cpu])); |
797 |
++ } |
798 |
+ } |
799 |
+ |
800 |
+ cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); |
801 |
+@@ -728,6 +731,9 @@ void start_secondary(void *unused) |
802 |
+ } |
803 |
+ traverse_core_siblings(cpu, true); |
804 |
+ |
805 |
++ set_numa_node(numa_cpu_lookup_table[cpu]); |
806 |
++ set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); |
807 |
++ |
808 |
+ smp_wmb(); |
809 |
+ notify_cpu_starting(cpu); |
810 |
+ set_cpu_online(cpu, true); |
811 |
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c |
812 |
+index d7737a542fd7..3a9061e9f5dd 100644 |
813 |
+--- a/arch/powerpc/mm/numa.c |
814 |
++++ b/arch/powerpc/mm/numa.c |
815 |
+@@ -1127,9 +1127,8 @@ void __init do_init_bootmem(void) |
816 |
+ * even before we online them, so that we can use cpu_to_{node,mem} |
817 |
+ * early in boot, cf. smp_prepare_cpus(). |
818 |
+ */ |
819 |
+- for_each_possible_cpu(cpu) { |
820 |
+- cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, |
821 |
+- (void *)(unsigned long)cpu); |
822 |
++ for_each_present_cpu(cpu) { |
823 |
++ numa_setup_cpu((unsigned long)cpu); |
824 |
+ } |
825 |
+ } |
826 |
+ |
827 |
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c |
828 |
+index 4642d6a4d356..de1ec54a2a57 100644 |
829 |
+--- a/arch/powerpc/platforms/pseries/iommu.c |
830 |
++++ b/arch/powerpc/platforms/pseries/iommu.c |
831 |
+@@ -329,16 +329,16 @@ struct direct_window { |
832 |
+ |
833 |
+ /* Dynamic DMA Window support */ |
834 |
+ struct ddw_query_response { |
835 |
+- __be32 windows_available; |
836 |
+- __be32 largest_available_block; |
837 |
+- __be32 page_size; |
838 |
+- __be32 migration_capable; |
839 |
++ u32 windows_available; |
840 |
++ u32 largest_available_block; |
841 |
++ u32 page_size; |
842 |
++ u32 migration_capable; |
843 |
+ }; |
844 |
+ |
845 |
+ struct ddw_create_response { |
846 |
+- __be32 liobn; |
847 |
+- __be32 addr_hi; |
848 |
+- __be32 addr_lo; |
849 |
++ u32 liobn; |
850 |
++ u32 addr_hi; |
851 |
++ u32 addr_lo; |
852 |
+ }; |
853 |
+ |
854 |
+ static LIST_HEAD(direct_window_list); |
855 |
+@@ -725,16 +725,18 @@ static void remove_ddw(struct device_node *np, bool remove_prop) |
856 |
+ { |
857 |
+ struct dynamic_dma_window_prop *dwp; |
858 |
+ struct property *win64; |
859 |
+- const u32 *ddw_avail; |
860 |
++ u32 ddw_avail[3]; |
861 |
+ u64 liobn; |
862 |
+- int len, ret = 0; |
863 |
++ int ret = 0; |
864 |
++ |
865 |
++ ret = of_property_read_u32_array(np, "ibm,ddw-applicable", |
866 |
++ &ddw_avail[0], 3); |
867 |
+ |
868 |
+- ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len); |
869 |
+ win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); |
870 |
+ if (!win64) |
871 |
+ return; |
872 |
+ |
873 |
+- if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp)) |
874 |
++ if (ret || win64->length < sizeof(*dwp)) |
875 |
+ goto delprop; |
876 |
+ |
877 |
+ dwp = win64->value; |
878 |
+@@ -872,8 +874,9 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, |
879 |
+ |
880 |
+ do { |
881 |
+ /* extra outputs are LIOBN and dma-addr (hi, lo) */ |
882 |
+- ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr, |
883 |
+- BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); |
884 |
++ ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, |
885 |
++ cfg_addr, BUID_HI(buid), BUID_LO(buid), |
886 |
++ page_shift, window_shift); |
887 |
+ } while (rtas_busy_delay(ret)); |
888 |
+ dev_info(&dev->dev, |
889 |
+ "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " |
890 |
+@@ -910,7 +913,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) |
891 |
+ int page_shift; |
892 |
+ u64 dma_addr, max_addr; |
893 |
+ struct device_node *dn; |
894 |
+- const u32 *uninitialized_var(ddw_avail); |
895 |
++ u32 ddw_avail[3]; |
896 |
+ struct direct_window *window; |
897 |
+ struct property *win64; |
898 |
+ struct dynamic_dma_window_prop *ddwprop; |
899 |
+@@ -942,8 +945,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) |
900 |
+ * for the given node in that order. |
901 |
+ * the property is actually in the parent, not the PE |
902 |
+ */ |
903 |
+- ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); |
904 |
+- if (!ddw_avail || len < 3 * sizeof(u32)) |
905 |
++ ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", |
906 |
++ &ddw_avail[0], 3); |
907 |
++ if (ret) |
908 |
+ goto out_failed; |
909 |
+ |
910 |
+ /* |
911 |
+@@ -966,11 +970,11 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) |
912 |
+ dev_dbg(&dev->dev, "no free dynamic windows"); |
913 |
+ goto out_failed; |
914 |
+ } |
915 |
+- if (be32_to_cpu(query.page_size) & 4) { |
916 |
++ if (query.page_size & 4) { |
917 |
+ page_shift = 24; /* 16MB */ |
918 |
+- } else if (be32_to_cpu(query.page_size) & 2) { |
919 |
++ } else if (query.page_size & 2) { |
920 |
+ page_shift = 16; /* 64kB */ |
921 |
+- } else if (be32_to_cpu(query.page_size) & 1) { |
922 |
++ } else if (query.page_size & 1) { |
923 |
+ page_shift = 12; /* 4kB */ |
924 |
+ } else { |
925 |
+ dev_dbg(&dev->dev, "no supported direct page size in mask %x", |
926 |
+@@ -980,7 +984,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) |
927 |
+ /* verify the window * number of ptes will map the partition */ |
928 |
+ /* check largest block * page size > max memory hotplug addr */ |
929 |
+ max_addr = memory_hotplug_max(); |
930 |
+- if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) { |
931 |
++ if (query.largest_available_block < (max_addr >> page_shift)) { |
932 |
+ dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " |
933 |
+ "%llu-sized pages\n", max_addr, query.largest_available_block, |
934 |
+ 1ULL << page_shift); |
935 |
+@@ -1006,8 +1010,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) |
936 |
+ if (ret != 0) |
937 |
+ goto out_free_prop; |
938 |
+ |
939 |
+- ddwprop->liobn = create.liobn; |
940 |
+- ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2)); |
941 |
++ ddwprop->liobn = cpu_to_be32(create.liobn); |
942 |
++ ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) | |
943 |
++ create.addr_lo); |
944 |
+ ddwprop->tce_shift = cpu_to_be32(page_shift); |
945 |
+ ddwprop->window_shift = cpu_to_be32(len); |
946 |
+ |
947 |
+@@ -1039,7 +1044,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) |
948 |
+ list_add(&window->list, &direct_window_list); |
949 |
+ spin_unlock(&direct_window_list_lock); |
950 |
+ |
951 |
+- dma_addr = of_read_number(&create.addr_hi, 2); |
952 |
++ dma_addr = be64_to_cpu(ddwprop->dma_base); |
953 |
+ goto out_unlock; |
954 |
+ |
955 |
+ out_free_window: |
956 |
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c |
957 |
+index f4c819bfc193..fe482ec99bae 100644 |
958 |
+--- a/arch/s390/kvm/interrupt.c |
959 |
++++ b/arch/s390/kvm/interrupt.c |
960 |
+@@ -85,6 +85,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, |
961 |
+ return 0; |
962 |
+ if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) |
963 |
+ return 1; |
964 |
++ return 0; |
965 |
+ case KVM_S390_INT_EMERGENCY: |
966 |
+ if (psw_extint_disabled(vcpu)) |
967 |
+ return 0; |
968 |
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig |
969 |
+index a537816613f9..96ac69c5eba0 100644 |
970 |
+--- a/arch/sparc/Kconfig |
971 |
++++ b/arch/sparc/Kconfig |
972 |
+@@ -67,6 +67,7 @@ config SPARC64 |
973 |
+ select HAVE_SYSCALL_TRACEPOINTS |
974 |
+ select HAVE_CONTEXT_TRACKING |
975 |
+ select HAVE_DEBUG_KMEMLEAK |
976 |
++ select SPARSE_IRQ |
977 |
+ select RTC_DRV_CMOS |
978 |
+ select RTC_DRV_BQ4802 |
979 |
+ select RTC_DRV_SUN4V |
980 |
+diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h |
981 |
+index 94b39caea3eb..4f6725ff4c33 100644 |
982 |
+--- a/arch/sparc/include/asm/hypervisor.h |
983 |
++++ b/arch/sparc/include/asm/hypervisor.h |
984 |
+@@ -2947,6 +2947,16 @@ unsigned long sun4v_vt_set_perfreg(unsigned long reg_num, |
985 |
+ unsigned long reg_val); |
986 |
+ #endif |
987 |
+ |
988 |
++#define HV_FAST_T5_GET_PERFREG 0x1a8 |
989 |
++#define HV_FAST_T5_SET_PERFREG 0x1a9 |
990 |
++ |
991 |
++#ifndef __ASSEMBLY__ |
992 |
++unsigned long sun4v_t5_get_perfreg(unsigned long reg_num, |
993 |
++ unsigned long *reg_val); |
994 |
++unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, |
995 |
++ unsigned long reg_val); |
996 |
++#endif |
997 |
++ |
998 |
+ /* Function numbers for HV_CORE_TRAP. */ |
999 |
+ #define HV_CORE_SET_VER 0x00 |
1000 |
+ #define HV_CORE_PUTCHAR 0x01 |
1001 |
+@@ -2978,6 +2988,7 @@ unsigned long sun4v_vt_set_perfreg(unsigned long reg_num, |
1002 |
+ #define HV_GRP_VF_CPU 0x0205 |
1003 |
+ #define HV_GRP_KT_CPU 0x0209 |
1004 |
+ #define HV_GRP_VT_CPU 0x020c |
1005 |
++#define HV_GRP_T5_CPU 0x0211 |
1006 |
+ #define HV_GRP_DIAG 0x0300 |
1007 |
+ |
1008 |
+ #ifndef __ASSEMBLY__ |
1009 |
+diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h |
1010 |
+index 91d219381306..3f70f900e834 100644 |
1011 |
+--- a/arch/sparc/include/asm/irq_64.h |
1012 |
++++ b/arch/sparc/include/asm/irq_64.h |
1013 |
+@@ -37,7 +37,7 @@ |
1014 |
+ * |
1015 |
+ * ino_bucket->irq allocation is made during {sun4v_,}build_irq(). |
1016 |
+ */ |
1017 |
+-#define NR_IRQS 255 |
1018 |
++#define NR_IRQS (2048) |
1019 |
+ |
1020 |
+ void irq_install_pre_handler(int irq, |
1021 |
+ void (*func)(unsigned int, void *, void *), |
1022 |
+@@ -57,11 +57,8 @@ unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p, |
1023 |
+ unsigned long iclr_base); |
1024 |
+ void sun4u_destroy_msi(unsigned int irq); |
1025 |
+ |
1026 |
+-unsigned char irq_alloc(unsigned int dev_handle, |
1027 |
+- unsigned int dev_ino); |
1028 |
+-#ifdef CONFIG_PCI_MSI |
1029 |
++unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino); |
1030 |
+ void irq_free(unsigned int irq); |
1031 |
+-#endif |
1032 |
+ |
1033 |
+ void __init init_IRQ(void); |
1034 |
+ void fixup_irqs(void); |
1035 |
+diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h |
1036 |
+index c8c67f621f4f..58ab64de25d2 100644 |
1037 |
+--- a/arch/sparc/include/asm/ldc.h |
1038 |
++++ b/arch/sparc/include/asm/ldc.h |
1039 |
+@@ -53,13 +53,14 @@ struct ldc_channel; |
1040 |
+ /* Allocate state for a channel. */ |
1041 |
+ struct ldc_channel *ldc_alloc(unsigned long id, |
1042 |
+ const struct ldc_channel_config *cfgp, |
1043 |
+- void *event_arg); |
1044 |
++ void *event_arg, |
1045 |
++ const char *name); |
1046 |
+ |
1047 |
+ /* Shut down and free state for a channel. */ |
1048 |
+ void ldc_free(struct ldc_channel *lp); |
1049 |
+ |
1050 |
+ /* Register TX and RX queues of the link with the hypervisor. */ |
1051 |
+-int ldc_bind(struct ldc_channel *lp, const char *name); |
1052 |
++int ldc_bind(struct ldc_channel *lp); |
1053 |
+ |
1054 |
+ /* For non-RAW protocols we need to complete a handshake before |
1055 |
+ * communication can proceed. ldc_connect() does that, if the |
1056 |
+diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h |
1057 |
+index f34682430fcf..2e3a4add8591 100644 |
1058 |
+--- a/arch/sparc/include/asm/oplib_64.h |
1059 |
++++ b/arch/sparc/include/asm/oplib_64.h |
1060 |
+@@ -62,7 +62,8 @@ struct linux_mem_p1275 { |
1061 |
+ /* You must call prom_init() before using any of the library services, |
1062 |
+ * preferably as early as possible. Pass it the romvec pointer. |
1063 |
+ */ |
1064 |
+-void prom_init(void *cif_handler, void *cif_stack); |
1065 |
++void prom_init(void *cif_handler); |
1066 |
++void prom_init_report(void); |
1067 |
+ |
1068 |
+ /* Boot argument acquisition, returns the boot command line string. */ |
1069 |
+ char *prom_getbootargs(void); |
1070 |
+diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h |
1071 |
+index bf109984a032..8c2a8c937540 100644 |
1072 |
+--- a/arch/sparc/include/asm/page_64.h |
1073 |
++++ b/arch/sparc/include/asm/page_64.h |
1074 |
+@@ -57,18 +57,21 @@ void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topa |
1075 |
+ typedef struct { unsigned long pte; } pte_t; |
1076 |
+ typedef struct { unsigned long iopte; } iopte_t; |
1077 |
+ typedef struct { unsigned long pmd; } pmd_t; |
1078 |
++typedef struct { unsigned long pud; } pud_t; |
1079 |
+ typedef struct { unsigned long pgd; } pgd_t; |
1080 |
+ typedef struct { unsigned long pgprot; } pgprot_t; |
1081 |
+ |
1082 |
+ #define pte_val(x) ((x).pte) |
1083 |
+ #define iopte_val(x) ((x).iopte) |
1084 |
+ #define pmd_val(x) ((x).pmd) |
1085 |
++#define pud_val(x) ((x).pud) |
1086 |
+ #define pgd_val(x) ((x).pgd) |
1087 |
+ #define pgprot_val(x) ((x).pgprot) |
1088 |
+ |
1089 |
+ #define __pte(x) ((pte_t) { (x) } ) |
1090 |
+ #define __iopte(x) ((iopte_t) { (x) } ) |
1091 |
+ #define __pmd(x) ((pmd_t) { (x) } ) |
1092 |
++#define __pud(x) ((pud_t) { (x) } ) |
1093 |
+ #define __pgd(x) ((pgd_t) { (x) } ) |
1094 |
+ #define __pgprot(x) ((pgprot_t) { (x) } ) |
1095 |
+ |
1096 |
+@@ -77,18 +80,21 @@ typedef struct { unsigned long pgprot; } pgprot_t; |
1097 |
+ typedef unsigned long pte_t; |
1098 |
+ typedef unsigned long iopte_t; |
1099 |
+ typedef unsigned long pmd_t; |
1100 |
++typedef unsigned long pud_t; |
1101 |
+ typedef unsigned long pgd_t; |
1102 |
+ typedef unsigned long pgprot_t; |
1103 |
+ |
1104 |
+ #define pte_val(x) (x) |
1105 |
+ #define iopte_val(x) (x) |
1106 |
+ #define pmd_val(x) (x) |
1107 |
++#define pud_val(x) (x) |
1108 |
+ #define pgd_val(x) (x) |
1109 |
+ #define pgprot_val(x) (x) |
1110 |
+ |
1111 |
+ #define __pte(x) (x) |
1112 |
+ #define __iopte(x) (x) |
1113 |
+ #define __pmd(x) (x) |
1114 |
++#define __pud(x) (x) |
1115 |
+ #define __pgd(x) (x) |
1116 |
+ #define __pgprot(x) (x) |
1117 |
+ |
1118 |
+@@ -96,21 +102,14 @@ typedef unsigned long pgprot_t; |
1119 |
+ |
1120 |
+ typedef pte_t *pgtable_t; |
1121 |
+ |
1122 |
+-/* These two values define the virtual address space range in which we |
1123 |
+- * must forbid 64-bit user processes from making mappings. It used to |
1124 |
+- * represent precisely the virtual address space hole present in most |
1125 |
+- * early sparc64 chips including UltraSPARC-I. But now it also is |
1126 |
+- * further constrained by the limits of our page tables, which is |
1127 |
+- * 43-bits of virtual address. |
1128 |
+- */ |
1129 |
+-#define SPARC64_VA_HOLE_TOP _AC(0xfffffc0000000000,UL) |
1130 |
+-#define SPARC64_VA_HOLE_BOTTOM _AC(0x0000040000000000,UL) |
1131 |
++extern unsigned long sparc64_va_hole_top; |
1132 |
++extern unsigned long sparc64_va_hole_bottom; |
1133 |
+ |
1134 |
+ /* The next two defines specify the actual exclusion region we |
1135 |
+ * enforce, wherein we use a 4GB red zone on each side of the VA hole. |
1136 |
+ */ |
1137 |
+-#define VA_EXCLUDE_START (SPARC64_VA_HOLE_BOTTOM - (1UL << 32UL)) |
1138 |
+-#define VA_EXCLUDE_END (SPARC64_VA_HOLE_TOP + (1UL << 32UL)) |
1139 |
++#define VA_EXCLUDE_START (sparc64_va_hole_bottom - (1UL << 32UL)) |
1140 |
++#define VA_EXCLUDE_END (sparc64_va_hole_top + (1UL << 32UL)) |
1141 |
+ |
1142 |
+ #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ |
1143 |
+ _AC(0x0000000070000000,UL) : \ |
1144 |
+@@ -118,20 +117,16 @@ typedef pte_t *pgtable_t; |
1145 |
+ |
1146 |
+ #include <asm-generic/memory_model.h> |
1147 |
+ |
1148 |
+-#define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X))) |
1149 |
+ extern unsigned long PAGE_OFFSET; |
1150 |
+ |
1151 |
+ #endif /* !(__ASSEMBLY__) */ |
1152 |
+ |
1153 |
+-/* The maximum number of physical memory address bits we support, this |
1154 |
+- * is used to size various tables used to manage kernel TLB misses and |
1155 |
+- * also the sparsemem code. |
1156 |
++/* The maximum number of physical memory address bits we support. The |
1157 |
++ * largest value we can support is whatever "KPGD_SHIFT + KPTE_BITS" |
1158 |
++ * evaluates to. |
1159 |
+ */ |
1160 |
+-#define MAX_PHYS_ADDRESS_BITS 47 |
1161 |
++#define MAX_PHYS_ADDRESS_BITS 53 |
1162 |
+ |
1163 |
+-/* These two shift counts are used when indexing sparc64_valid_addr_bitmap |
1164 |
+- * and kpte_linear_bitmap. |
1165 |
+- */ |
1166 |
+ #define ILOG2_4MB 22 |
1167 |
+ #define ILOG2_256MB 28 |
1168 |
+ |
1169 |
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h |
1170 |
+index 39a7ac49b00c..5e3187185b4a 100644 |
1171 |
+--- a/arch/sparc/include/asm/pgalloc_64.h |
1172 |
++++ b/arch/sparc/include/asm/pgalloc_64.h |
1173 |
+@@ -15,6 +15,13 @@ |
1174 |
+ |
1175 |
+ extern struct kmem_cache *pgtable_cache; |
1176 |
+ |
1177 |
++static inline void __pgd_populate(pgd_t *pgd, pud_t *pud) |
1178 |
++{ |
1179 |
++ pgd_set(pgd, pud); |
1180 |
++} |
1181 |
++ |
1182 |
++#define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD) |
1183 |
++ |
1184 |
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
1185 |
+ { |
1186 |
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); |
1187 |
+@@ -25,7 +32,23 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
1188 |
+ kmem_cache_free(pgtable_cache, pgd); |
1189 |
+ } |
1190 |
+ |
1191 |
+-#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) |
1192 |
++static inline void __pud_populate(pud_t *pud, pmd_t *pmd) |
1193 |
++{ |
1194 |
++ pud_set(pud, pmd); |
1195 |
++} |
1196 |
++ |
1197 |
++#define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD) |
1198 |
++ |
1199 |
++static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
1200 |
++{ |
1201 |
++ return kmem_cache_alloc(pgtable_cache, |
1202 |
++ GFP_KERNEL|__GFP_REPEAT); |
1203 |
++} |
1204 |
++ |
1205 |
++static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
1206 |
++{ |
1207 |
++ kmem_cache_free(pgtable_cache, pud); |
1208 |
++} |
1209 |
+ |
1210 |
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
1211 |
+ { |
1212 |
+@@ -91,4 +114,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte, |
1213 |
+ #define __pmd_free_tlb(tlb, pmd, addr) \ |
1214 |
+ pgtable_free_tlb(tlb, pmd, false) |
1215 |
+ |
1216 |
++#define __pud_free_tlb(tlb, pud, addr) \ |
1217 |
++ pgtable_free_tlb(tlb, pud, false) |
1218 |
++ |
1219 |
+ #endif /* _SPARC64_PGALLOC_H */ |
1220 |
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h |
1221 |
+index 3770bf5c6e1b..bfeb626085ac 100644 |
1222 |
+--- a/arch/sparc/include/asm/pgtable_64.h |
1223 |
++++ b/arch/sparc/include/asm/pgtable_64.h |
1224 |
+@@ -20,8 +20,6 @@ |
1225 |
+ #include <asm/page.h> |
1226 |
+ #include <asm/processor.h> |
1227 |
+ |
1228 |
+-#include <asm-generic/pgtable-nopud.h> |
1229 |
+- |
1230 |
+ /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). |
1231 |
+ * The page copy blockops can use 0x6000000 to 0x8000000. |
1232 |
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range. |
1233 |
+@@ -42,10 +40,7 @@ |
1234 |
+ #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) |
1235 |
+ #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) |
1236 |
+ #define VMALLOC_START _AC(0x0000000100000000,UL) |
1237 |
+-#define VMALLOC_END _AC(0x0000010000000000,UL) |
1238 |
+-#define VMEMMAP_BASE _AC(0x0000010000000000,UL) |
1239 |
+- |
1240 |
+-#define vmemmap ((struct page *)VMEMMAP_BASE) |
1241 |
++#define VMEMMAP_BASE VMALLOC_END |
1242 |
+ |
1243 |
+ /* PMD_SHIFT determines the size of the area a second-level page |
1244 |
+ * table can map |
1245 |
+@@ -55,13 +50,25 @@ |
1246 |
+ #define PMD_MASK (~(PMD_SIZE-1)) |
1247 |
+ #define PMD_BITS (PAGE_SHIFT - 3) |
1248 |
+ |
1249 |
+-/* PGDIR_SHIFT determines what a third-level page table entry can map */ |
1250 |
+-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) |
1251 |
++/* PUD_SHIFT determines the size of the area a third-level page |
1252 |
++ * table can map |
1253 |
++ */ |
1254 |
++#define PUD_SHIFT (PMD_SHIFT + PMD_BITS) |
1255 |
++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) |
1256 |
++#define PUD_MASK (~(PUD_SIZE-1)) |
1257 |
++#define PUD_BITS (PAGE_SHIFT - 3) |
1258 |
++ |
1259 |
++/* PGDIR_SHIFT determines what a fourth-level page table entry can map */ |
1260 |
++#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS) |
1261 |
+ #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) |
1262 |
+ #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
1263 |
+ #define PGDIR_BITS (PAGE_SHIFT - 3) |
1264 |
+ |
1265 |
+-#if (PGDIR_SHIFT + PGDIR_BITS) != 43 |
1266 |
++#if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS) |
1267 |
++#error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support |
1268 |
++#endif |
1269 |
++ |
1270 |
++#if (PGDIR_SHIFT + PGDIR_BITS) != 53 |
1271 |
+ #error Page table parameters do not cover virtual address space properly. |
1272 |
+ #endif |
1273 |
+ |
1274 |
+@@ -71,28 +78,18 @@ |
1275 |
+ |
1276 |
+ #ifndef __ASSEMBLY__ |
1277 |
+ |
1278 |
+-#include <linux/sched.h> |
1279 |
+- |
1280 |
+-extern unsigned long sparc64_valid_addr_bitmap[]; |
1281 |
++extern unsigned long VMALLOC_END; |
1282 |
+ |
1283 |
+-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
1284 |
+-static inline bool __kern_addr_valid(unsigned long paddr) |
1285 |
+-{ |
1286 |
+- if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL) |
1287 |
+- return false; |
1288 |
+- return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap); |
1289 |
+-} |
1290 |
++#define vmemmap ((struct page *)VMEMMAP_BASE) |
1291 |
+ |
1292 |
+-static inline bool kern_addr_valid(unsigned long addr) |
1293 |
+-{ |
1294 |
+- unsigned long paddr = __pa(addr); |
1295 |
++#include <linux/sched.h> |
1296 |
+ |
1297 |
+- return __kern_addr_valid(paddr); |
1298 |
+-} |
1299 |
++bool kern_addr_valid(unsigned long addr); |
1300 |
+ |
1301 |
+ /* Entries per page directory level. */ |
1302 |
+ #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) |
1303 |
+ #define PTRS_PER_PMD (1UL << PMD_BITS) |
1304 |
++#define PTRS_PER_PUD (1UL << PUD_BITS) |
1305 |
+ #define PTRS_PER_PGD (1UL << PGDIR_BITS) |
1306 |
+ |
1307 |
+ /* Kernel has a separate 44bit address space. */ |
1308 |
+@@ -101,6 +98,9 @@ static inline bool kern_addr_valid(unsigned long addr) |
1309 |
+ #define pmd_ERROR(e) \ |
1310 |
+ pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \ |
1311 |
+ __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0)) |
1312 |
++#define pud_ERROR(e) \ |
1313 |
++ pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \ |
1314 |
++ __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0)) |
1315 |
+ #define pgd_ERROR(e) \ |
1316 |
+ pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \ |
1317 |
+ __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0)) |
1318 |
+@@ -112,6 +112,7 @@ static inline bool kern_addr_valid(unsigned long addr) |
1319 |
+ #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ |
1320 |
+ #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ |
1321 |
+ #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */ |
1322 |
++#define _PAGE_PUD_HUGE _PAGE_PMD_HUGE |
1323 |
+ |
1324 |
+ /* Advertise support for _PAGE_SPECIAL */ |
1325 |
+ #define __HAVE_ARCH_PTE_SPECIAL |
1326 |
+@@ -658,26 +659,26 @@ static inline unsigned long pmd_large(pmd_t pmd) |
1327 |
+ return pte_val(pte) & _PAGE_PMD_HUGE; |
1328 |
+ } |
1329 |
+ |
1330 |
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1331 |
+-static inline unsigned long pmd_young(pmd_t pmd) |
1332 |
++static inline unsigned long pmd_pfn(pmd_t pmd) |
1333 |
+ { |
1334 |
+ pte_t pte = __pte(pmd_val(pmd)); |
1335 |
+ |
1336 |
+- return pte_young(pte); |
1337 |
++ return pte_pfn(pte); |
1338 |
+ } |
1339 |
+ |
1340 |
+-static inline unsigned long pmd_write(pmd_t pmd) |
1341 |
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1342 |
++static inline unsigned long pmd_young(pmd_t pmd) |
1343 |
+ { |
1344 |
+ pte_t pte = __pte(pmd_val(pmd)); |
1345 |
+ |
1346 |
+- return pte_write(pte); |
1347 |
++ return pte_young(pte); |
1348 |
+ } |
1349 |
+ |
1350 |
+-static inline unsigned long pmd_pfn(pmd_t pmd) |
1351 |
++static inline unsigned long pmd_write(pmd_t pmd) |
1352 |
+ { |
1353 |
+ pte_t pte = __pte(pmd_val(pmd)); |
1354 |
+ |
1355 |
+- return pte_pfn(pte); |
1356 |
++ return pte_write(pte); |
1357 |
+ } |
1358 |
+ |
1359 |
+ static inline unsigned long pmd_trans_huge(pmd_t pmd) |
1360 |
+@@ -771,13 +772,15 @@ static inline int pmd_present(pmd_t pmd) |
1361 |
+ * the top bits outside of the range of any physical address size we |
1362 |
+ * support are clear as well. We also validate the physical itself. |
1363 |
+ */ |
1364 |
+-#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \ |
1365 |
+- !__kern_addr_valid(pmd_val(pmd))) |
1366 |
++#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) |
1367 |
+ |
1368 |
+ #define pud_none(pud) (!pud_val(pud)) |
1369 |
+ |
1370 |
+-#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \ |
1371 |
+- !__kern_addr_valid(pud_val(pud))) |
1372 |
++#define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK) |
1373 |
++ |
1374 |
++#define pgd_none(pgd) (!pgd_val(pgd)) |
1375 |
++ |
1376 |
++#define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK) |
1377 |
+ |
1378 |
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1379 |
+ void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
1380 |
+@@ -815,10 +818,31 @@ static inline unsigned long __pmd_page(pmd_t pmd) |
1381 |
+ #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) |
1382 |
+ #define pud_present(pud) (pud_val(pud) != 0U) |
1383 |
+ #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) |
1384 |
++#define pgd_page_vaddr(pgd) \ |
1385 |
++ ((unsigned long) __va(pgd_val(pgd))) |
1386 |
++#define pgd_present(pgd) (pgd_val(pgd) != 0U) |
1387 |
++#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL) |
1388 |
++ |
1389 |
++static inline unsigned long pud_large(pud_t pud) |
1390 |
++{ |
1391 |
++ pte_t pte = __pte(pud_val(pud)); |
1392 |
++ |
1393 |
++ return pte_val(pte) & _PAGE_PMD_HUGE; |
1394 |
++} |
1395 |
++ |
1396 |
++static inline unsigned long pud_pfn(pud_t pud) |
1397 |
++{ |
1398 |
++ pte_t pte = __pte(pud_val(pud)); |
1399 |
++ |
1400 |
++ return pte_pfn(pte); |
1401 |
++} |
1402 |
+ |
1403 |
+ /* Same in both SUN4V and SUN4U. */ |
1404 |
+ #define pte_none(pte) (!pte_val(pte)) |
1405 |
+ |
1406 |
++#define pgd_set(pgdp, pudp) \ |
1407 |
++ (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp)))) |
1408 |
++ |
1409 |
+ /* to find an entry in a page-table-directory. */ |
1410 |
+ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
1411 |
+ #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
1412 |
+@@ -826,6 +850,11 @@ static inline unsigned long __pmd_page(pmd_t pmd) |
1413 |
+ /* to find an entry in a kernel page-table-directory */ |
1414 |
+ #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
1415 |
+ |
1416 |
++/* Find an entry in the third-level page table.. */ |
1417 |
++#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) |
1418 |
++#define pud_offset(pgdp, address) \ |
1419 |
++ ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address)) |
1420 |
++ |
1421 |
+ /* Find an entry in the second-level page table.. */ |
1422 |
+ #define pmd_offset(pudp, address) \ |
1423 |
+ ((pmd_t *) pud_page_vaddr(*(pudp)) + \ |
1424 |
+@@ -898,7 +927,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
1425 |
+ #endif |
1426 |
+ |
1427 |
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
1428 |
+-extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD]; |
1429 |
+ |
1430 |
+ void paging_init(void); |
1431 |
+ unsigned long find_ecache_flush_span(unsigned long size); |
1432 |
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h |
1433 |
+index f5fffd84d0dd..29d64b1758ed 100644 |
1434 |
+--- a/arch/sparc/include/asm/setup.h |
1435 |
++++ b/arch/sparc/include/asm/setup.h |
1436 |
+@@ -48,6 +48,8 @@ unsigned long safe_compute_effective_address(struct pt_regs *, unsigned int); |
1437 |
+ #endif |
1438 |
+ |
1439 |
+ #ifdef CONFIG_SPARC64 |
1440 |
++void __init start_early_boot(void); |
1441 |
++ |
1442 |
+ /* unaligned_64.c */ |
1443 |
+ int handle_ldf_stq(u32 insn, struct pt_regs *regs); |
1444 |
+ void handle_ld_nf(u32 insn, struct pt_regs *regs); |
1445 |
+diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h |
1446 |
+index 3fc58691dbd0..56f933816144 100644 |
1447 |
+--- a/arch/sparc/include/asm/spitfire.h |
1448 |
++++ b/arch/sparc/include/asm/spitfire.h |
1449 |
+@@ -45,6 +45,8 @@ |
1450 |
+ #define SUN4V_CHIP_NIAGARA3 0x03 |
1451 |
+ #define SUN4V_CHIP_NIAGARA4 0x04 |
1452 |
+ #define SUN4V_CHIP_NIAGARA5 0x05 |
1453 |
++#define SUN4V_CHIP_SPARC_M6 0x06 |
1454 |
++#define SUN4V_CHIP_SPARC_M7 0x07 |
1455 |
+ #define SUN4V_CHIP_SPARC64X 0x8a |
1456 |
+ #define SUN4V_CHIP_UNKNOWN 0xff |
1457 |
+ |
1458 |
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h |
1459 |
+index a5f01ac6d0f1..cc6275c931a5 100644 |
1460 |
+--- a/arch/sparc/include/asm/thread_info_64.h |
1461 |
++++ b/arch/sparc/include/asm/thread_info_64.h |
1462 |
+@@ -63,7 +63,8 @@ struct thread_info { |
1463 |
+ struct pt_regs *kern_una_regs; |
1464 |
+ unsigned int kern_una_insn; |
1465 |
+ |
1466 |
+- unsigned long fpregs[0] __attribute__ ((aligned(64))); |
1467 |
++ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)] |
1468 |
++ __attribute__ ((aligned(64))); |
1469 |
+ }; |
1470 |
+ |
1471 |
+ #endif /* !(__ASSEMBLY__) */ |
1472 |
+@@ -102,6 +103,7 @@ struct thread_info { |
1473 |
+ #define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */ |
1474 |
+ #define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */ |
1475 |
+ #define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */ |
1476 |
++#define FAULT_CODE_BAD_RA 0x20 /* Bad RA for sun4v */ |
1477 |
+ |
1478 |
+ #if PAGE_SHIFT == 13 |
1479 |
+ #define THREAD_SIZE (2*PAGE_SIZE) |
1480 |
+diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h |
1481 |
+index 90916f955cac..ecb49cfa3be9 100644 |
1482 |
+--- a/arch/sparc/include/asm/tsb.h |
1483 |
++++ b/arch/sparc/include/asm/tsb.h |
1484 |
+@@ -133,9 +133,24 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; |
1485 |
+ sub TSB, 0x8, TSB; \ |
1486 |
+ TSB_STORE(TSB, TAG); |
1487 |
+ |
1488 |
+- /* Do a kernel page table walk. Leaves physical PTE pointer in |
1489 |
+- * REG1. Jumps to FAIL_LABEL on early page table walk termination. |
1490 |
+- * VADDR will not be clobbered, but REG2 will. |
1491 |
++ /* Do a kernel page table walk. Leaves valid PTE value in |
1492 |
++ * REG1. Jumps to FAIL_LABEL on early page table walk |
1493 |
++ * termination. VADDR will not be clobbered, but REG2 will. |
1494 |
++ * |
1495 |
++ * There are two masks we must apply to propagate bits from |
1496 |
++ * the virtual address into the PTE physical address field |
1497 |
++ * when dealing with huge pages. This is because the page |
1498 |
++ * table boundaries do not match the huge page size(s) the |
1499 |
++ * hardware supports. |
1500 |
++ * |
1501 |
++ * In these cases we propagate the bits that are below the |
1502 |
++ * page table level where we saw the huge page mapping, but |
1503 |
++ * are still within the relevant physical bits for the huge |
1504 |
++ * page size in question. So for PMD mappings (which fall on |
1505 |
++ * bit 23, for 8MB per PMD) we must propagate bit 22 for a |
1506 |
++ * 4MB huge page. For huge PUDs (which fall on bit 33, for |
1507 |
++ * 8GB per PUD), we have to accomodate 256MB and 2GB huge |
1508 |
++ * pages. So for those we propagate bits 32 to 28. |
1509 |
+ */ |
1510 |
+ #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ |
1511 |
+ sethi %hi(swapper_pg_dir), REG1; \ |
1512 |
+@@ -145,15 +160,40 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; |
1513 |
+ andn REG2, 0x7, REG2; \ |
1514 |
+ ldx [REG1 + REG2], REG1; \ |
1515 |
+ brz,pn REG1, FAIL_LABEL; \ |
1516 |
+- sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ |
1517 |
++ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \ |
1518 |
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
1519 |
+ andn REG2, 0x7, REG2; \ |
1520 |
+ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
1521 |
+ brz,pn REG1, FAIL_LABEL; \ |
1522 |
+- sllx VADDR, 64 - PMD_SHIFT, REG2; \ |
1523 |
++ sethi %uhi(_PAGE_PUD_HUGE), REG2; \ |
1524 |
++ brz,pn REG1, FAIL_LABEL; \ |
1525 |
++ sllx REG2, 32, REG2; \ |
1526 |
++ andcc REG1, REG2, %g0; \ |
1527 |
++ sethi %hi(0xf8000000), REG2; \ |
1528 |
++ bne,pt %xcc, 697f; \ |
1529 |
++ sllx REG2, 1, REG2; \ |
1530 |
++ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ |
1531 |
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
1532 |
+ andn REG2, 0x7, REG2; \ |
1533 |
+- add REG1, REG2, REG1; |
1534 |
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
1535 |
++ sethi %uhi(_PAGE_PMD_HUGE), REG2; \ |
1536 |
++ brz,pn REG1, FAIL_LABEL; \ |
1537 |
++ sllx REG2, 32, REG2; \ |
1538 |
++ andcc REG1, REG2, %g0; \ |
1539 |
++ be,pn %xcc, 698f; \ |
1540 |
++ sethi %hi(0x400000), REG2; \ |
1541 |
++697: brgez,pn REG1, FAIL_LABEL; \ |
1542 |
++ andn REG1, REG2, REG1; \ |
1543 |
++ and VADDR, REG2, REG2; \ |
1544 |
++ ba,pt %xcc, 699f; \ |
1545 |
++ or REG1, REG2, REG1; \ |
1546 |
++698: sllx VADDR, 64 - PMD_SHIFT, REG2; \ |
1547 |
++ srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
1548 |
++ andn REG2, 0x7, REG2; \ |
1549 |
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
1550 |
++ brgez,pn REG1, FAIL_LABEL; \ |
1551 |
++ nop; \ |
1552 |
++699: |
1553 |
+ |
1554 |
+ /* PMD has been loaded into REG1, interpret the value, seeing |
1555 |
+ * if it is a HUGE PMD or a normal one. If it is not valid |
1556 |
+@@ -198,6 +238,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; |
1557 |
+ andn REG2, 0x7, REG2; \ |
1558 |
+ ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \ |
1559 |
+ brz,pn REG1, FAIL_LABEL; \ |
1560 |
++ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \ |
1561 |
++ srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
1562 |
++ andn REG2, 0x7, REG2; \ |
1563 |
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
1564 |
++ brz,pn REG1, FAIL_LABEL; \ |
1565 |
+ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ |
1566 |
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
1567 |
+ andn REG2, 0x7, REG2; \ |
1568 |
+@@ -246,8 +291,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; |
1569 |
+ (KERNEL_TSB_SIZE_BYTES / 16) |
1570 |
+ #define KERNEL_TSB4M_NENTRIES 4096 |
1571 |
+ |
1572 |
+-#define KTSB_PHYS_SHIFT 15 |
1573 |
+- |
1574 |
+ /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL |
1575 |
+ * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries |
1576 |
+ * and the found TTE will be left in REG1. REG3 and REG4 must |
1577 |
+@@ -256,17 +299,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; |
1578 |
+ * VADDR and TAG will be preserved and not clobbered by this macro. |
1579 |
+ */ |
1580 |
+ #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ |
1581 |
+-661: sethi %hi(swapper_tsb), REG1; \ |
1582 |
+- or REG1, %lo(swapper_tsb), REG1; \ |
1583 |
++661: sethi %uhi(swapper_tsb), REG1; \ |
1584 |
++ sethi %hi(swapper_tsb), REG2; \ |
1585 |
++ or REG1, %ulo(swapper_tsb), REG1; \ |
1586 |
++ or REG2, %lo(swapper_tsb), REG2; \ |
1587 |
+ .section .swapper_tsb_phys_patch, "ax"; \ |
1588 |
+ .word 661b; \ |
1589 |
+ .previous; \ |
1590 |
+-661: nop; \ |
1591 |
+- .section .tsb_ldquad_phys_patch, "ax"; \ |
1592 |
+- .word 661b; \ |
1593 |
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \ |
1594 |
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \ |
1595 |
+- .previous; \ |
1596 |
++ sllx REG1, 32, REG1; \ |
1597 |
++ or REG1, REG2, REG1; \ |
1598 |
+ srlx VADDR, PAGE_SHIFT, REG2; \ |
1599 |
+ and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \ |
1600 |
+ sllx REG2, 4, REG2; \ |
1601 |
+@@ -281,17 +322,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; |
1602 |
+ * we can make use of that for the index computation. |
1603 |
+ */ |
1604 |
+ #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ |
1605 |
+-661: sethi %hi(swapper_4m_tsb), REG1; \ |
1606 |
+- or REG1, %lo(swapper_4m_tsb), REG1; \ |
1607 |
++661: sethi %uhi(swapper_4m_tsb), REG1; \ |
1608 |
++ sethi %hi(swapper_4m_tsb), REG2; \ |
1609 |
++ or REG1, %ulo(swapper_4m_tsb), REG1; \ |
1610 |
++ or REG2, %lo(swapper_4m_tsb), REG2; \ |
1611 |
+ .section .swapper_4m_tsb_phys_patch, "ax"; \ |
1612 |
+ .word 661b; \ |
1613 |
+ .previous; \ |
1614 |
+-661: nop; \ |
1615 |
+- .section .tsb_ldquad_phys_patch, "ax"; \ |
1616 |
+- .word 661b; \ |
1617 |
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \ |
1618 |
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \ |
1619 |
+- .previous; \ |
1620 |
++ sllx REG1, 32, REG1; \ |
1621 |
++ or REG1, REG2, REG1; \ |
1622 |
+ and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \ |
1623 |
+ sllx REG2, 4, REG2; \ |
1624 |
+ add REG1, REG2, REG2; \ |
1625 |
+diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h |
1626 |
+index b26673759283..1f0aa2024e94 100644 |
1627 |
+--- a/arch/sparc/include/asm/visasm.h |
1628 |
++++ b/arch/sparc/include/asm/visasm.h |
1629 |
+@@ -39,6 +39,14 @@ |
1630 |
+ 297: wr %o5, FPRS_FEF, %fprs; \ |
1631 |
+ 298: |
1632 |
+ |
1633 |
++#define VISEntryHalfFast(fail_label) \ |
1634 |
++ rd %fprs, %o5; \ |
1635 |
++ andcc %o5, FPRS_FEF, %g0; \ |
1636 |
++ be,pt %icc, 297f; \ |
1637 |
++ nop; \ |
1638 |
++ ba,a,pt %xcc, fail_label; \ |
1639 |
++297: wr %o5, FPRS_FEF, %fprs; |
1640 |
++ |
1641 |
+ #define VISExitHalf \ |
1642 |
+ wr %o5, 0, %fprs; |
1643 |
+ |
1644 |
+diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c |
1645 |
+index 82a3a71c451e..dfad8b1aea9f 100644 |
1646 |
+--- a/arch/sparc/kernel/cpu.c |
1647 |
++++ b/arch/sparc/kernel/cpu.c |
1648 |
+@@ -494,6 +494,18 @@ static void __init sun4v_cpu_probe(void) |
1649 |
+ sparc_pmu_type = "niagara5"; |
1650 |
+ break; |
1651 |
+ |
1652 |
++ case SUN4V_CHIP_SPARC_M6: |
1653 |
++ sparc_cpu_type = "SPARC-M6"; |
1654 |
++ sparc_fpu_type = "SPARC-M6 integrated FPU"; |
1655 |
++ sparc_pmu_type = "sparc-m6"; |
1656 |
++ break; |
1657 |
++ |
1658 |
++ case SUN4V_CHIP_SPARC_M7: |
1659 |
++ sparc_cpu_type = "SPARC-M7"; |
1660 |
++ sparc_fpu_type = "SPARC-M7 integrated FPU"; |
1661 |
++ sparc_pmu_type = "sparc-m7"; |
1662 |
++ break; |
1663 |
++ |
1664 |
+ case SUN4V_CHIP_SPARC64X: |
1665 |
+ sparc_cpu_type = "SPARC64-X"; |
1666 |
+ sparc_fpu_type = "SPARC64-X integrated FPU"; |
1667 |
+diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c |
1668 |
+index de1c844dfabc..e69ec0e3f155 100644 |
1669 |
+--- a/arch/sparc/kernel/cpumap.c |
1670 |
++++ b/arch/sparc/kernel/cpumap.c |
1671 |
+@@ -326,6 +326,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index) |
1672 |
+ case SUN4V_CHIP_NIAGARA3: |
1673 |
+ case SUN4V_CHIP_NIAGARA4: |
1674 |
+ case SUN4V_CHIP_NIAGARA5: |
1675 |
++ case SUN4V_CHIP_SPARC_M6: |
1676 |
++ case SUN4V_CHIP_SPARC_M7: |
1677 |
+ case SUN4V_CHIP_SPARC64X: |
1678 |
+ rover_inc_table = niagara_iterate_method; |
1679 |
+ break; |
1680 |
+diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c |
1681 |
+index dff60abbea01..f87a55d77094 100644 |
1682 |
+--- a/arch/sparc/kernel/ds.c |
1683 |
++++ b/arch/sparc/kernel/ds.c |
1684 |
+@@ -1200,14 +1200,14 @@ static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id) |
1685 |
+ ds_cfg.tx_irq = vdev->tx_irq; |
1686 |
+ ds_cfg.rx_irq = vdev->rx_irq; |
1687 |
+ |
1688 |
+- lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp); |
1689 |
++ lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS"); |
1690 |
+ if (IS_ERR(lp)) { |
1691 |
+ err = PTR_ERR(lp); |
1692 |
+ goto out_free_ds_states; |
1693 |
+ } |
1694 |
+ dp->lp = lp; |
1695 |
+ |
1696 |
+- err = ldc_bind(lp, "DS"); |
1697 |
++ err = ldc_bind(lp); |
1698 |
+ if (err) |
1699 |
+ goto out_free_ldc; |
1700 |
+ |
1701 |
+diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S |
1702 |
+index b2c2c5be281c..d668ca149e64 100644 |
1703 |
+--- a/arch/sparc/kernel/dtlb_prot.S |
1704 |
++++ b/arch/sparc/kernel/dtlb_prot.S |
1705 |
+@@ -24,11 +24,11 @@ |
1706 |
+ mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr |
1707 |
+ |
1708 |
+ /* PROT ** ICACHE line 2: More real fault processing */ |
1709 |
++ ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5 |
1710 |
+ bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup |
1711 |
+- ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5 |
1712 |
+- ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault |
1713 |
+ mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 |
1714 |
+- nop |
1715 |
++ ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault |
1716 |
++ nop |
1717 |
+ nop |
1718 |
+ nop |
1719 |
+ nop |
1720 |
+diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h |
1721 |
+index ebaba6167dd4..88d322b67fac 100644 |
1722 |
+--- a/arch/sparc/kernel/entry.h |
1723 |
++++ b/arch/sparc/kernel/entry.h |
1724 |
+@@ -65,13 +65,10 @@ struct pause_patch_entry { |
1725 |
+ extern struct pause_patch_entry __pause_3insn_patch, |
1726 |
+ __pause_3insn_patch_end; |
1727 |
+ |
1728 |
+-void __init per_cpu_patch(void); |
1729 |
+ void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, |
1730 |
+ struct sun4v_1insn_patch_entry *); |
1731 |
+ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, |
1732 |
+ struct sun4v_2insn_patch_entry *); |
1733 |
+-void __init sun4v_patch(void); |
1734 |
+-void __init boot_cpu_id_too_large(int cpu); |
1735 |
+ extern unsigned int dcache_parity_tl1_occurred; |
1736 |
+ extern unsigned int icache_parity_tl1_occurred; |
1737 |
+ |
1738 |
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S |
1739 |
+index 452f04fe8da6..3d61fcae7ee3 100644 |
1740 |
+--- a/arch/sparc/kernel/head_64.S |
1741 |
++++ b/arch/sparc/kernel/head_64.S |
1742 |
+@@ -427,6 +427,12 @@ sun4v_chip_type: |
1743 |
+ cmp %g2, '5' |
1744 |
+ be,pt %xcc, 5f |
1745 |
+ mov SUN4V_CHIP_NIAGARA5, %g4 |
1746 |
++ cmp %g2, '6' |
1747 |
++ be,pt %xcc, 5f |
1748 |
++ mov SUN4V_CHIP_SPARC_M6, %g4 |
1749 |
++ cmp %g2, '7' |
1750 |
++ be,pt %xcc, 5f |
1751 |
++ mov SUN4V_CHIP_SPARC_M7, %g4 |
1752 |
+ ba,pt %xcc, 49f |
1753 |
+ nop |
1754 |
+ |
1755 |
+@@ -585,6 +591,12 @@ niagara_tlb_fixup: |
1756 |
+ cmp %g1, SUN4V_CHIP_NIAGARA5 |
1757 |
+ be,pt %xcc, niagara4_patch |
1758 |
+ nop |
1759 |
++ cmp %g1, SUN4V_CHIP_SPARC_M6 |
1760 |
++ be,pt %xcc, niagara4_patch |
1761 |
++ nop |
1762 |
++ cmp %g1, SUN4V_CHIP_SPARC_M7 |
1763 |
++ be,pt %xcc, niagara4_patch |
1764 |
++ nop |
1765 |
+ |
1766 |
+ call generic_patch_copyops |
1767 |
+ nop |
1768 |
+@@ -660,14 +672,12 @@ tlb_fixup_done: |
1769 |
+ sethi %hi(init_thread_union), %g6 |
1770 |
+ or %g6, %lo(init_thread_union), %g6 |
1771 |
+ ldx [%g6 + TI_TASK], %g4 |
1772 |
+- mov %sp, %l6 |
1773 |
+ |
1774 |
+ wr %g0, ASI_P, %asi |
1775 |
+ mov 1, %g1 |
1776 |
+ sllx %g1, THREAD_SHIFT, %g1 |
1777 |
+ sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1 |
1778 |
+ add %g6, %g1, %sp |
1779 |
+- mov 0, %fp |
1780 |
+ |
1781 |
+ /* Set per-cpu pointer initially to zero, this makes |
1782 |
+ * the boot-cpu use the in-kernel-image per-cpu areas |
1783 |
+@@ -694,44 +704,14 @@ tlb_fixup_done: |
1784 |
+ nop |
1785 |
+ #endif |
1786 |
+ |
1787 |
+- mov %l6, %o1 ! OpenPROM stack |
1788 |
+ call prom_init |
1789 |
+ mov %l7, %o0 ! OpenPROM cif handler |
1790 |
+ |
1791 |
+- /* Initialize current_thread_info()->cpu as early as possible. |
1792 |
+- * In order to do that accurately we have to patch up the get_cpuid() |
1793 |
+- * assembler sequences. And that, in turn, requires that we know |
1794 |
+- * if we are on a Starfire box or not. While we're here, patch up |
1795 |
+- * the sun4v sequences as well. |
1796 |
++ /* To create a one-register-window buffer between the kernel's |
1797 |
++ * initial stack and the last stack frame we use from the firmware, |
1798 |
++ * do the rest of the boot from a C helper function. |
1799 |
+ */ |
1800 |
+- call check_if_starfire |
1801 |
+- nop |
1802 |
+- call per_cpu_patch |
1803 |
+- nop |
1804 |
+- call sun4v_patch |
1805 |
+- nop |
1806 |
+- |
1807 |
+-#ifdef CONFIG_SMP |
1808 |
+- call hard_smp_processor_id |
1809 |
+- nop |
1810 |
+- cmp %o0, NR_CPUS |
1811 |
+- blu,pt %xcc, 1f |
1812 |
+- nop |
1813 |
+- call boot_cpu_id_too_large |
1814 |
+- nop |
1815 |
+- /* Not reached... */ |
1816 |
+- |
1817 |
+-1: |
1818 |
+-#else |
1819 |
+- mov 0, %o0 |
1820 |
+-#endif |
1821 |
+- sth %o0, [%g6 + TI_CPU] |
1822 |
+- |
1823 |
+- call prom_init_report |
1824 |
+- nop |
1825 |
+- |
1826 |
+- /* Off we go.... */ |
1827 |
+- call start_kernel |
1828 |
++ call start_early_boot |
1829 |
+ nop |
1830 |
+ /* Not reached... */ |
1831 |
+ |
1832 |
+diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c |
1833 |
+index c0a2de0fd624..5c55145bfbf0 100644 |
1834 |
+--- a/arch/sparc/kernel/hvapi.c |
1835 |
++++ b/arch/sparc/kernel/hvapi.c |
1836 |
+@@ -46,6 +46,7 @@ static struct api_info api_table[] = { |
1837 |
+ { .group = HV_GRP_VF_CPU, }, |
1838 |
+ { .group = HV_GRP_KT_CPU, }, |
1839 |
+ { .group = HV_GRP_VT_CPU, }, |
1840 |
++ { .group = HV_GRP_T5_CPU, }, |
1841 |
+ { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, |
1842 |
+ }; |
1843 |
+ |
1844 |
+diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S |
1845 |
+index f3ab509b76a8..caedf8320416 100644 |
1846 |
+--- a/arch/sparc/kernel/hvcalls.S |
1847 |
++++ b/arch/sparc/kernel/hvcalls.S |
1848 |
+@@ -821,3 +821,19 @@ ENTRY(sun4v_vt_set_perfreg) |
1849 |
+ retl |
1850 |
+ nop |
1851 |
+ ENDPROC(sun4v_vt_set_perfreg) |
1852 |
++ |
1853 |
++ENTRY(sun4v_t5_get_perfreg) |
1854 |
++ mov %o1, %o4 |
1855 |
++ mov HV_FAST_T5_GET_PERFREG, %o5 |
1856 |
++ ta HV_FAST_TRAP |
1857 |
++ stx %o1, [%o4] |
1858 |
++ retl |
1859 |
++ nop |
1860 |
++ENDPROC(sun4v_t5_get_perfreg) |
1861 |
++ |
1862 |
++ENTRY(sun4v_t5_set_perfreg) |
1863 |
++ mov HV_FAST_T5_SET_PERFREG, %o5 |
1864 |
++ ta HV_FAST_TRAP |
1865 |
++ retl |
1866 |
++ nop |
1867 |
++ENDPROC(sun4v_t5_set_perfreg) |
1868 |
+diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S |
1869 |
+index b7ddcdd1dea9..cdbfec299f2f 100644 |
1870 |
+--- a/arch/sparc/kernel/hvtramp.S |
1871 |
++++ b/arch/sparc/kernel/hvtramp.S |
1872 |
+@@ -109,7 +109,6 @@ hv_cpu_startup: |
1873 |
+ sllx %g5, THREAD_SHIFT, %g5 |
1874 |
+ sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 |
1875 |
+ add %g6, %g5, %sp |
1876 |
+- mov 0, %fp |
1877 |
+ |
1878 |
+ call init_irqwork_curcpu |
1879 |
+ nop |
1880 |
+diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c |
1881 |
+index 7f08ec8a7c68..28fed53b13a0 100644 |
1882 |
+--- a/arch/sparc/kernel/ioport.c |
1883 |
++++ b/arch/sparc/kernel/ioport.c |
1884 |
+@@ -278,7 +278,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, |
1885 |
+ } |
1886 |
+ |
1887 |
+ order = get_order(len_total); |
1888 |
+- if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0) |
1889 |
++ va = __get_free_pages(gfp, order); |
1890 |
++ if (va == 0) |
1891 |
+ goto err_nopages; |
1892 |
+ |
1893 |
+ if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) |
1894 |
+@@ -443,7 +444,7 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len, |
1895 |
+ } |
1896 |
+ |
1897 |
+ order = get_order(len_total); |
1898 |
+- va = (void *) __get_free_pages(GFP_KERNEL, order); |
1899 |
++ va = (void *) __get_free_pages(gfp, order); |
1900 |
+ if (va == NULL) { |
1901 |
+ printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); |
1902 |
+ goto err_nopages; |
1903 |
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c |
1904 |
+index 666193f4e8bb..4033c23bdfa6 100644 |
1905 |
+--- a/arch/sparc/kernel/irq_64.c |
1906 |
++++ b/arch/sparc/kernel/irq_64.c |
1907 |
+@@ -47,8 +47,6 @@ |
1908 |
+ #include "cpumap.h" |
1909 |
+ #include "kstack.h" |
1910 |
+ |
1911 |
+-#define NUM_IVECS (IMAP_INR + 1) |
1912 |
+- |
1913 |
+ struct ino_bucket *ivector_table; |
1914 |
+ unsigned long ivector_table_pa; |
1915 |
+ |
1916 |
+@@ -107,55 +105,196 @@ static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq) |
1917 |
+ |
1918 |
+ #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) |
1919 |
+ |
1920 |
+-static struct { |
1921 |
+- unsigned int dev_handle; |
1922 |
+- unsigned int dev_ino; |
1923 |
+- unsigned int in_use; |
1924 |
+-} irq_table[NR_IRQS]; |
1925 |
+-static DEFINE_SPINLOCK(irq_alloc_lock); |
1926 |
++static unsigned long hvirq_major __initdata; |
1927 |
++static int __init early_hvirq_major(char *p) |
1928 |
++{ |
1929 |
++ int rc = kstrtoul(p, 10, &hvirq_major); |
1930 |
++ |
1931 |
++ return rc; |
1932 |
++} |
1933 |
++early_param("hvirq", early_hvirq_major); |
1934 |
++ |
1935 |
++static int hv_irq_version; |
1936 |
++ |
1937 |
++/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie |
1938 |
++ * based interfaces, but: |
1939 |
++ * |
1940 |
++ * 1) Several OSs, Solaris and Linux included, use them even when only |
1941 |
++ * negotiating version 1.0 (or failing to negotiate at all). So the |
1942 |
++ * hypervisor has a workaround that provides the VIRQ interfaces even |
1943 |
++ * when only verion 1.0 of the API is in use. |
1944 |
++ * |
1945 |
++ * 2) Second, and more importantly, with major version 2.0 these VIRQ |
1946 |
++ * interfaces only were actually hooked up for LDC interrupts, even |
1947 |
++ * though the Hypervisor specification clearly stated: |
1948 |
++ * |
1949 |
++ * The new interrupt API functions will be available to a guest |
1950 |
++ * when it negotiates version 2.0 in the interrupt API group 0x2. When |
1951 |
++ * a guest negotiates version 2.0, all interrupt sources will only |
1952 |
++ * support using the cookie interface, and any attempt to use the |
1953 |
++ * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the |
1954 |
++ * ENOTSUPPORTED error being returned. |
1955 |
++ * |
1956 |
++ * with an emphasis on "all interrupt sources". |
1957 |
++ * |
1958 |
++ * To correct this, major version 3.0 was created which does actually |
1959 |
++ * support VIRQs for all interrupt sources (not just LDC devices). So |
1960 |
++ * if we want to move completely over the cookie based VIRQs we must |
1961 |
++ * negotiate major version 3.0 or later of HV_GRP_INTR. |
1962 |
++ */ |
1963 |
++static bool sun4v_cookie_only_virqs(void) |
1964 |
++{ |
1965 |
++ if (hv_irq_version >= 3) |
1966 |
++ return true; |
1967 |
++ return false; |
1968 |
++} |
1969 |
+ |
1970 |
+-unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino) |
1971 |
++static void __init irq_init_hv(void) |
1972 |
+ { |
1973 |
+- unsigned long flags; |
1974 |
+- unsigned char ent; |
1975 |
++ unsigned long hv_error, major, minor = 0; |
1976 |
++ |
1977 |
++ if (tlb_type != hypervisor) |
1978 |
++ return; |
1979 |
+ |
1980 |
+- BUILD_BUG_ON(NR_IRQS >= 256); |
1981 |
++ if (hvirq_major) |
1982 |
++ major = hvirq_major; |
1983 |
++ else |
1984 |
++ major = 3; |
1985 |
+ |
1986 |
+- spin_lock_irqsave(&irq_alloc_lock, flags); |
1987 |
++ hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor); |
1988 |
++ if (!hv_error) |
1989 |
++ hv_irq_version = major; |
1990 |
++ else |
1991 |
++ hv_irq_version = 1; |
1992 |
+ |
1993 |
+- for (ent = 1; ent < NR_IRQS; ent++) { |
1994 |
+- if (!irq_table[ent].in_use) |
1995 |
++ pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n", |
1996 |
++ hv_irq_version, |
1997 |
++ sun4v_cookie_only_virqs() ? "enabled" : "disabled"); |
1998 |
++} |
1999 |
++ |
2000 |
++/* This function is for the timer interrupt.*/ |
2001 |
++int __init arch_probe_nr_irqs(void) |
2002 |
++{ |
2003 |
++ return 1; |
2004 |
++} |
2005 |
++ |
2006 |
++#define DEFAULT_NUM_IVECS (0xfffU) |
2007 |
++static unsigned int nr_ivec = DEFAULT_NUM_IVECS; |
2008 |
++#define NUM_IVECS (nr_ivec) |
2009 |
++ |
2010 |
++static unsigned int __init size_nr_ivec(void) |
2011 |
++{ |
2012 |
++ if (tlb_type == hypervisor) { |
2013 |
++ switch (sun4v_chip_type) { |
2014 |
++ /* Athena's devhandle|devino is large.*/ |
2015 |
++ case SUN4V_CHIP_SPARC64X: |
2016 |
++ nr_ivec = 0xffff; |
2017 |
+ break; |
2018 |
++ } |
2019 |
+ } |
2020 |
+- if (ent >= NR_IRQS) { |
2021 |
+- printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); |
2022 |
+- ent = 0; |
2023 |
+- } else { |
2024 |
+- irq_table[ent].dev_handle = dev_handle; |
2025 |
+- irq_table[ent].dev_ino = dev_ino; |
2026 |
+- irq_table[ent].in_use = 1; |
2027 |
+- } |
2028 |
++ return nr_ivec; |
2029 |
++} |
2030 |
++ |
2031 |
++struct irq_handler_data { |
2032 |
++ union { |
2033 |
++ struct { |
2034 |
++ unsigned int dev_handle; |
2035 |
++ unsigned int dev_ino; |
2036 |
++ }; |
2037 |
++ unsigned long sysino; |
2038 |
++ }; |
2039 |
++ struct ino_bucket bucket; |
2040 |
++ unsigned long iclr; |
2041 |
++ unsigned long imap; |
2042 |
++}; |
2043 |
++ |
2044 |
++static inline unsigned int irq_data_to_handle(struct irq_data *data) |
2045 |
++{ |
2046 |
++ struct irq_handler_data *ihd = data->handler_data; |
2047 |
++ |
2048 |
++ return ihd->dev_handle; |
2049 |
++} |
2050 |
++ |
2051 |
++static inline unsigned int irq_data_to_ino(struct irq_data *data) |
2052 |
++{ |
2053 |
++ struct irq_handler_data *ihd = data->handler_data; |
2054 |
+ |
2055 |
+- spin_unlock_irqrestore(&irq_alloc_lock, flags); |
2056 |
++ return ihd->dev_ino; |
2057 |
++} |
2058 |
++ |
2059 |
++static inline unsigned long irq_data_to_sysino(struct irq_data *data) |
2060 |
++{ |
2061 |
++ struct irq_handler_data *ihd = data->handler_data; |
2062 |
+ |
2063 |
+- return ent; |
2064 |
++ return ihd->sysino; |
2065 |
+ } |
2066 |
+ |
2067 |
+-#ifdef CONFIG_PCI_MSI |
2068 |
+ void irq_free(unsigned int irq) |
2069 |
+ { |
2070 |
+- unsigned long flags; |
2071 |
++ void *data = irq_get_handler_data(irq); |
2072 |
+ |
2073 |
+- if (irq >= NR_IRQS) |
2074 |
+- return; |
2075 |
++ kfree(data); |
2076 |
++ irq_set_handler_data(irq, NULL); |
2077 |
++ irq_free_descs(irq, 1); |
2078 |
++} |
2079 |
+ |
2080 |
+- spin_lock_irqsave(&irq_alloc_lock, flags); |
2081 |
++unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino) |
2082 |
++{ |
2083 |
++ int irq; |
2084 |
+ |
2085 |
+- irq_table[irq].in_use = 0; |
2086 |
++ irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL); |
2087 |
++ if (irq <= 0) |
2088 |
++ goto out; |
2089 |
+ |
2090 |
+- spin_unlock_irqrestore(&irq_alloc_lock, flags); |
2091 |
++ return irq; |
2092 |
++out: |
2093 |
++ return 0; |
2094 |
++} |
2095 |
++ |
2096 |
++static unsigned int cookie_exists(u32 devhandle, unsigned int devino) |
2097 |
++{ |
2098 |
++ unsigned long hv_err, cookie; |
2099 |
++ struct ino_bucket *bucket; |
2100 |
++ unsigned int irq = 0U; |
2101 |
++ |
2102 |
++ hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie); |
2103 |
++ if (hv_err) { |
2104 |
++ pr_err("HV get cookie failed hv_err = %ld\n", hv_err); |
2105 |
++ goto out; |
2106 |
++ } |
2107 |
++ |
2108 |
++ if (cookie & ((1UL << 63UL))) { |
2109 |
++ cookie = ~cookie; |
2110 |
++ bucket = (struct ino_bucket *) __va(cookie); |
2111 |
++ irq = bucket->__irq; |
2112 |
++ } |
2113 |
++out: |
2114 |
++ return irq; |
2115 |
++} |
2116 |
++ |
2117 |
++static unsigned int sysino_exists(u32 devhandle, unsigned int devino) |
2118 |
++{ |
2119 |
++ unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); |
2120 |
++ struct ino_bucket *bucket; |
2121 |
++ unsigned int irq; |
2122 |
++ |
2123 |
++ bucket = &ivector_table[sysino]; |
2124 |
++ irq = bucket_get_irq(__pa(bucket)); |
2125 |
++ |
2126 |
++ return irq; |
2127 |
++} |
2128 |
++ |
2129 |
++void ack_bad_irq(unsigned int irq) |
2130 |
++{ |
2131 |
++ pr_crit("BAD IRQ ack %d\n", irq); |
2132 |
++} |
2133 |
++ |
2134 |
++void irq_install_pre_handler(int irq, |
2135 |
++ void (*func)(unsigned int, void *, void *), |
2136 |
++ void *arg1, void *arg2) |
2137 |
++{ |
2138 |
++ pr_warn("IRQ pre handler NOT supported.\n"); |
2139 |
+ } |
2140 |
+-#endif |
2141 |
+ |
2142 |
+ /* |
2143 |
+ * /proc/interrupts printing: |
2144 |
+@@ -206,15 +345,6 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) |
2145 |
+ return tid; |
2146 |
+ } |
2147 |
+ |
2148 |
+-struct irq_handler_data { |
2149 |
+- unsigned long iclr; |
2150 |
+- unsigned long imap; |
2151 |
+- |
2152 |
+- void (*pre_handler)(unsigned int, void *, void *); |
2153 |
+- void *arg1; |
2154 |
+- void *arg2; |
2155 |
+-}; |
2156 |
+- |
2157 |
+ #ifdef CONFIG_SMP |
2158 |
+ static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) |
2159 |
+ { |
2160 |
+@@ -316,8 +446,8 @@ static void sun4u_irq_eoi(struct irq_data *data) |
2161 |
+ |
2162 |
+ static void sun4v_irq_enable(struct irq_data *data) |
2163 |
+ { |
2164 |
+- unsigned int ino = irq_table[data->irq].dev_ino; |
2165 |
+ unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity); |
2166 |
++ unsigned int ino = irq_data_to_sysino(data); |
2167 |
+ int err; |
2168 |
+ |
2169 |
+ err = sun4v_intr_settarget(ino, cpuid); |
2170 |
+@@ -337,8 +467,8 @@ static void sun4v_irq_enable(struct irq_data *data) |
2171 |
+ static int sun4v_set_affinity(struct irq_data *data, |
2172 |
+ const struct cpumask *mask, bool force) |
2173 |
+ { |
2174 |
+- unsigned int ino = irq_table[data->irq].dev_ino; |
2175 |
+ unsigned long cpuid = irq_choose_cpu(data->irq, mask); |
2176 |
++ unsigned int ino = irq_data_to_sysino(data); |
2177 |
+ int err; |
2178 |
+ |
2179 |
+ err = sun4v_intr_settarget(ino, cpuid); |
2180 |
+@@ -351,7 +481,7 @@ static int sun4v_set_affinity(struct irq_data *data, |
2181 |
+ |
2182 |
+ static void sun4v_irq_disable(struct irq_data *data) |
2183 |
+ { |
2184 |
+- unsigned int ino = irq_table[data->irq].dev_ino; |
2185 |
++ unsigned int ino = irq_data_to_sysino(data); |
2186 |
+ int err; |
2187 |
+ |
2188 |
+ err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); |
2189 |
+@@ -362,7 +492,7 @@ static void sun4v_irq_disable(struct irq_data *data) |
2190 |
+ |
2191 |
+ static void sun4v_irq_eoi(struct irq_data *data) |
2192 |
+ { |
2193 |
+- unsigned int ino = irq_table[data->irq].dev_ino; |
2194 |
++ unsigned int ino = irq_data_to_sysino(data); |
2195 |
+ int err; |
2196 |
+ |
2197 |
+ err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
2198 |
+@@ -373,14 +503,13 @@ static void sun4v_irq_eoi(struct irq_data *data) |
2199 |
+ |
2200 |
+ static void sun4v_virq_enable(struct irq_data *data) |
2201 |
+ { |
2202 |
+- unsigned long cpuid, dev_handle, dev_ino; |
2203 |
++ unsigned long dev_handle = irq_data_to_handle(data); |
2204 |
++ unsigned long dev_ino = irq_data_to_ino(data); |
2205 |
++ unsigned long cpuid; |
2206 |
+ int err; |
2207 |
+ |
2208 |
+ cpuid = irq_choose_cpu(data->irq, data->affinity); |
2209 |
+ |
2210 |
+- dev_handle = irq_table[data->irq].dev_handle; |
2211 |
+- dev_ino = irq_table[data->irq].dev_ino; |
2212 |
+- |
2213 |
+ err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
2214 |
+ if (err != HV_EOK) |
2215 |
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " |
2216 |
+@@ -403,14 +532,13 @@ static void sun4v_virq_enable(struct irq_data *data) |
2217 |
+ static int sun4v_virt_set_affinity(struct irq_data *data, |
2218 |
+ const struct cpumask *mask, bool force) |
2219 |
+ { |
2220 |
+- unsigned long cpuid, dev_handle, dev_ino; |
2221 |
++ unsigned long dev_handle = irq_data_to_handle(data); |
2222 |
++ unsigned long dev_ino = irq_data_to_ino(data); |
2223 |
++ unsigned long cpuid; |
2224 |
+ int err; |
2225 |
+ |
2226 |
+ cpuid = irq_choose_cpu(data->irq, mask); |
2227 |
+ |
2228 |
+- dev_handle = irq_table[data->irq].dev_handle; |
2229 |
+- dev_ino = irq_table[data->irq].dev_ino; |
2230 |
+- |
2231 |
+ err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
2232 |
+ if (err != HV_EOK) |
2233 |
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " |
2234 |
+@@ -422,11 +550,10 @@ static int sun4v_virt_set_affinity(struct irq_data *data, |
2235 |
+ |
2236 |
+ static void sun4v_virq_disable(struct irq_data *data) |
2237 |
+ { |
2238 |
+- unsigned long dev_handle, dev_ino; |
2239 |
++ unsigned long dev_handle = irq_data_to_handle(data); |
2240 |
++ unsigned long dev_ino = irq_data_to_ino(data); |
2241 |
+ int err; |
2242 |
+ |
2243 |
+- dev_handle = irq_table[data->irq].dev_handle; |
2244 |
+- dev_ino = irq_table[data->irq].dev_ino; |
2245 |
+ |
2246 |
+ err = sun4v_vintr_set_valid(dev_handle, dev_ino, |
2247 |
+ HV_INTR_DISABLED); |
2248 |
+@@ -438,12 +565,10 @@ static void sun4v_virq_disable(struct irq_data *data) |
2249 |
+ |
2250 |
+ static void sun4v_virq_eoi(struct irq_data *data) |
2251 |
+ { |
2252 |
+- unsigned long dev_handle, dev_ino; |
2253 |
++ unsigned long dev_handle = irq_data_to_handle(data); |
2254 |
++ unsigned long dev_ino = irq_data_to_ino(data); |
2255 |
+ int err; |
2256 |
+ |
2257 |
+- dev_handle = irq_table[data->irq].dev_handle; |
2258 |
+- dev_ino = irq_table[data->irq].dev_ino; |
2259 |
+- |
2260 |
+ err = sun4v_vintr_set_state(dev_handle, dev_ino, |
2261 |
+ HV_INTR_STATE_IDLE); |
2262 |
+ if (err != HV_EOK) |
2263 |
+@@ -479,31 +604,10 @@ static struct irq_chip sun4v_virq = { |
2264 |
+ .flags = IRQCHIP_EOI_IF_HANDLED, |
2265 |
+ }; |
2266 |
+ |
2267 |
+-static void pre_flow_handler(struct irq_data *d) |
2268 |
+-{ |
2269 |
+- struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d); |
2270 |
+- unsigned int ino = irq_table[d->irq].dev_ino; |
2271 |
+- |
2272 |
+- handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); |
2273 |
+-} |
2274 |
+- |
2275 |
+-void irq_install_pre_handler(int irq, |
2276 |
+- void (*func)(unsigned int, void *, void *), |
2277 |
+- void *arg1, void *arg2) |
2278 |
+-{ |
2279 |
+- struct irq_handler_data *handler_data = irq_get_handler_data(irq); |
2280 |
+- |
2281 |
+- handler_data->pre_handler = func; |
2282 |
+- handler_data->arg1 = arg1; |
2283 |
+- handler_data->arg2 = arg2; |
2284 |
+- |
2285 |
+- __irq_set_preflow_handler(irq, pre_flow_handler); |
2286 |
+-} |
2287 |
+- |
2288 |
+ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) |
2289 |
+ { |
2290 |
+- struct ino_bucket *bucket; |
2291 |
+ struct irq_handler_data *handler_data; |
2292 |
++ struct ino_bucket *bucket; |
2293 |
+ unsigned int irq; |
2294 |
+ int ino; |
2295 |
+ |
2296 |
+@@ -537,119 +641,166 @@ out: |
2297 |
+ return irq; |
2298 |
+ } |
2299 |
+ |
2300 |
+-static unsigned int sun4v_build_common(unsigned long sysino, |
2301 |
+- struct irq_chip *chip) |
2302 |
++static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino, |
2303 |
++ void (*handler_data_init)(struct irq_handler_data *data, |
2304 |
++ u32 devhandle, unsigned int devino), |
2305 |
++ struct irq_chip *chip) |
2306 |
+ { |
2307 |
+- struct ino_bucket *bucket; |
2308 |
+- struct irq_handler_data *handler_data; |
2309 |
++ struct irq_handler_data *data; |
2310 |
+ unsigned int irq; |
2311 |
+ |
2312 |
+- BUG_ON(tlb_type != hypervisor); |
2313 |
++ irq = irq_alloc(devhandle, devino); |
2314 |
++ if (!irq) |
2315 |
++ goto out; |
2316 |
+ |
2317 |
+- bucket = &ivector_table[sysino]; |
2318 |
+- irq = bucket_get_irq(__pa(bucket)); |
2319 |
+- if (!irq) { |
2320 |
+- irq = irq_alloc(0, sysino); |
2321 |
+- bucket_set_irq(__pa(bucket), irq); |
2322 |
+- irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, |
2323 |
+- "IVEC"); |
2324 |
++ data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); |
2325 |
++ if (unlikely(!data)) { |
2326 |
++ pr_err("IRQ handler data allocation failed.\n"); |
2327 |
++ irq_free(irq); |
2328 |
++ irq = 0; |
2329 |
++ goto out; |
2330 |
+ } |
2331 |
+ |
2332 |
+- handler_data = irq_get_handler_data(irq); |
2333 |
+- if (unlikely(handler_data)) |
2334 |
+- goto out; |
2335 |
++ irq_set_handler_data(irq, data); |
2336 |
++ handler_data_init(data, devhandle, devino); |
2337 |
++ irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC"); |
2338 |
++ data->imap = ~0UL; |
2339 |
++ data->iclr = ~0UL; |
2340 |
++out: |
2341 |
++ return irq; |
2342 |
++} |
2343 |
+ |
2344 |
+- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); |
2345 |
+- if (unlikely(!handler_data)) { |
2346 |
+- prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); |
2347 |
+- prom_halt(); |
2348 |
+- } |
2349 |
+- irq_set_handler_data(irq, handler_data); |
2350 |
++static unsigned long cookie_assign(unsigned int irq, u32 devhandle, |
2351 |
++ unsigned int devino) |
2352 |
++{ |
2353 |
++ struct irq_handler_data *ihd = irq_get_handler_data(irq); |
2354 |
++ unsigned long hv_error, cookie; |
2355 |
+ |
2356 |
+- /* Catch accidental accesses to these things. IMAP/ICLR handling |
2357 |
+- * is done by hypervisor calls on sun4v platforms, not by direct |
2358 |
+- * register accesses. |
2359 |
++ /* handler_irq needs to find the irq. cookie is seen signed in |
2360 |
++ * sun4v_dev_mondo and treated as a non ivector_table delivery. |
2361 |
+ */ |
2362 |
+- handler_data->imap = ~0UL; |
2363 |
+- handler_data->iclr = ~0UL; |
2364 |
++ ihd->bucket.__irq = irq; |
2365 |
++ cookie = ~__pa(&ihd->bucket); |
2366 |
+ |
2367 |
+-out: |
2368 |
+- return irq; |
2369 |
++ hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie); |
2370 |
++ if (hv_error) |
2371 |
++ pr_err("HV vintr set cookie failed = %ld\n", hv_error); |
2372 |
++ |
2373 |
++ return hv_error; |
2374 |
+ } |
2375 |
+ |
2376 |
+-unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) |
2377 |
++static void cookie_handler_data(struct irq_handler_data *data, |
2378 |
++ u32 devhandle, unsigned int devino) |
2379 |
+ { |
2380 |
+- unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); |
2381 |
++ data->dev_handle = devhandle; |
2382 |
++ data->dev_ino = devino; |
2383 |
++} |
2384 |
+ |
2385 |
+- return sun4v_build_common(sysino, &sun4v_irq); |
2386 |
++static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino, |
2387 |
++ struct irq_chip *chip) |
2388 |
++{ |
2389 |
++ unsigned long hv_error; |
2390 |
++ unsigned int irq; |
2391 |
++ |
2392 |
++ irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip); |
2393 |
++ |
2394 |
++ hv_error = cookie_assign(irq, devhandle, devino); |
2395 |
++ if (hv_error) { |
2396 |
++ irq_free(irq); |
2397 |
++ irq = 0; |
2398 |
++ } |
2399 |
++ |
2400 |
++ return irq; |
2401 |
+ } |
2402 |
+ |
2403 |
+-unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) |
2404 |
++static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino) |
2405 |
+ { |
2406 |
+- struct irq_handler_data *handler_data; |
2407 |
+- unsigned long hv_err, cookie; |
2408 |
+- struct ino_bucket *bucket; |
2409 |
+ unsigned int irq; |
2410 |
+ |
2411 |
+- bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); |
2412 |
+- if (unlikely(!bucket)) |
2413 |
+- return 0; |
2414 |
++ irq = cookie_exists(devhandle, devino); |
2415 |
++ if (irq) |
2416 |
++ goto out; |
2417 |
+ |
2418 |
+- /* The only reference we store to the IRQ bucket is |
2419 |
+- * by physical address which kmemleak can't see, tell |
2420 |
+- * it that this object explicitly is not a leak and |
2421 |
+- * should be scanned. |
2422 |
+- */ |
2423 |
+- kmemleak_not_leak(bucket); |
2424 |
++ irq = cookie_build_irq(devhandle, devino, &sun4v_virq); |
2425 |
+ |
2426 |
+- __flush_dcache_range((unsigned long) bucket, |
2427 |
+- ((unsigned long) bucket + |
2428 |
+- sizeof(struct ino_bucket))); |
2429 |
++out: |
2430 |
++ return irq; |
2431 |
++} |
2432 |
+ |
2433 |
+- irq = irq_alloc(devhandle, devino); |
2434 |
++static void sysino_set_bucket(unsigned int irq) |
2435 |
++{ |
2436 |
++ struct irq_handler_data *ihd = irq_get_handler_data(irq); |
2437 |
++ struct ino_bucket *bucket; |
2438 |
++ unsigned long sysino; |
2439 |
++ |
2440 |
++ sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino); |
2441 |
++ BUG_ON(sysino >= nr_ivec); |
2442 |
++ bucket = &ivector_table[sysino]; |
2443 |
+ bucket_set_irq(__pa(bucket), irq); |
2444 |
++} |
2445 |
+ |
2446 |
+- irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq, |
2447 |
+- "IVEC"); |
2448 |
++static void sysino_handler_data(struct irq_handler_data *data, |
2449 |
++ u32 devhandle, unsigned int devino) |
2450 |
++{ |
2451 |
++ unsigned long sysino; |
2452 |
+ |
2453 |
+- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); |
2454 |
+- if (unlikely(!handler_data)) |
2455 |
+- return 0; |
2456 |
++ sysino = sun4v_devino_to_sysino(devhandle, devino); |
2457 |
++ data->sysino = sysino; |
2458 |
++} |
2459 |
+ |
2460 |
+- /* In order to make the LDC channel startup sequence easier, |
2461 |
+- * especially wrt. locking, we do not let request_irq() enable |
2462 |
+- * the interrupt. |
2463 |
+- */ |
2464 |
+- irq_set_status_flags(irq, IRQ_NOAUTOEN); |
2465 |
+- irq_set_handler_data(irq, handler_data); |
2466 |
++static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino, |
2467 |
++ struct irq_chip *chip) |
2468 |
++{ |
2469 |
++ unsigned int irq; |
2470 |
+ |
2471 |
+- /* Catch accidental accesses to these things. IMAP/ICLR handling |
2472 |
+- * is done by hypervisor calls on sun4v platforms, not by direct |
2473 |
+- * register accesses. |
2474 |
+- */ |
2475 |
+- handler_data->imap = ~0UL; |
2476 |
+- handler_data->iclr = ~0UL; |
2477 |
++ irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip); |
2478 |
++ if (!irq) |
2479 |
++ goto out; |
2480 |
+ |
2481 |
+- cookie = ~__pa(bucket); |
2482 |
+- hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); |
2483 |
+- if (hv_err) { |
2484 |
+- prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] " |
2485 |
+- "err=%lu\n", devhandle, devino, hv_err); |
2486 |
+- prom_halt(); |
2487 |
+- } |
2488 |
++ sysino_set_bucket(irq); |
2489 |
++out: |
2490 |
++ return irq; |
2491 |
++} |
2492 |
+ |
2493 |
++static int sun4v_build_sysino(u32 devhandle, unsigned int devino) |
2494 |
++{ |
2495 |
++ int irq; |
2496 |
++ |
2497 |
++ irq = sysino_exists(devhandle, devino); |
2498 |
++ if (irq) |
2499 |
++ goto out; |
2500 |
++ |
2501 |
++ irq = sysino_build_irq(devhandle, devino, &sun4v_irq); |
2502 |
++out: |
2503 |
+ return irq; |
2504 |
+ } |
2505 |
+ |
2506 |
+-void ack_bad_irq(unsigned int irq) |
2507 |
++unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) |
2508 |
+ { |
2509 |
+- unsigned int ino = irq_table[irq].dev_ino; |
2510 |
++ unsigned int irq; |
2511 |
+ |
2512 |
+- if (!ino) |
2513 |
+- ino = 0xdeadbeef; |
2514 |
++ if (sun4v_cookie_only_virqs()) |
2515 |
++ irq = sun4v_build_cookie(devhandle, devino); |
2516 |
++ else |
2517 |
++ irq = sun4v_build_sysino(devhandle, devino); |
2518 |
+ |
2519 |
+- printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n", |
2520 |
+- ino, irq); |
2521 |
++ return irq; |
2522 |
++} |
2523 |
++ |
2524 |
++unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) |
2525 |
++{ |
2526 |
++ int irq; |
2527 |
++ |
2528 |
++ irq = cookie_build_irq(devhandle, devino, &sun4v_virq); |
2529 |
++ if (!irq) |
2530 |
++ goto out; |
2531 |
++ |
2532 |
++ /* This is borrowed from the original function. |
2533 |
++ */ |
2534 |
++ irq_set_status_flags(irq, IRQ_NOAUTOEN); |
2535 |
++ |
2536 |
++out: |
2537 |
++ return irq; |
2538 |
+ } |
2539 |
+ |
2540 |
+ void *hardirq_stack[NR_CPUS]; |
2541 |
+@@ -720,9 +871,12 @@ void fixup_irqs(void) |
2542 |
+ |
2543 |
+ for (irq = 0; irq < NR_IRQS; irq++) { |
2544 |
+ struct irq_desc *desc = irq_to_desc(irq); |
2545 |
+- struct irq_data *data = irq_desc_get_irq_data(desc); |
2546 |
++ struct irq_data *data; |
2547 |
+ unsigned long flags; |
2548 |
+ |
2549 |
++ if (!desc) |
2550 |
++ continue; |
2551 |
++ data = irq_desc_get_irq_data(desc); |
2552 |
+ raw_spin_lock_irqsave(&desc->lock, flags); |
2553 |
+ if (desc->action && !irqd_is_per_cpu(data)) { |
2554 |
+ if (data->chip->irq_set_affinity) |
2555 |
+@@ -922,16 +1076,22 @@ static struct irqaction timer_irq_action = { |
2556 |
+ .name = "timer", |
2557 |
+ }; |
2558 |
+ |
2559 |
+-/* Only invoked on boot processor. */ |
2560 |
+-void __init init_IRQ(void) |
2561 |
++static void __init irq_ivector_init(void) |
2562 |
+ { |
2563 |
+- unsigned long size; |
2564 |
++ unsigned long size, order; |
2565 |
++ unsigned int ivecs; |
2566 |
+ |
2567 |
+- map_prom_timers(); |
2568 |
+- kill_prom_timer(); |
2569 |
++ /* If we are doing cookie only VIRQs then we do not need the ivector |
2570 |
++ * table to process interrupts. |
2571 |
++ */ |
2572 |
++ if (sun4v_cookie_only_virqs()) |
2573 |
++ return; |
2574 |
+ |
2575 |
+- size = sizeof(struct ino_bucket) * NUM_IVECS; |
2576 |
+- ivector_table = kzalloc(size, GFP_KERNEL); |
2577 |
++ ivecs = size_nr_ivec(); |
2578 |
++ size = sizeof(struct ino_bucket) * ivecs; |
2579 |
++ order = get_order(size); |
2580 |
++ ivector_table = (struct ino_bucket *) |
2581 |
++ __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
2582 |
+ if (!ivector_table) { |
2583 |
+ prom_printf("Fatal error, cannot allocate ivector_table\n"); |
2584 |
+ prom_halt(); |
2585 |
+@@ -940,6 +1100,15 @@ void __init init_IRQ(void) |
2586 |
+ ((unsigned long) ivector_table) + size); |
2587 |
+ |
2588 |
+ ivector_table_pa = __pa(ivector_table); |
2589 |
++} |
2590 |
++ |
2591 |
++/* Only invoked on boot processor.*/ |
2592 |
++void __init init_IRQ(void) |
2593 |
++{ |
2594 |
++ irq_init_hv(); |
2595 |
++ irq_ivector_init(); |
2596 |
++ map_prom_timers(); |
2597 |
++ kill_prom_timer(); |
2598 |
+ |
2599 |
+ if (tlb_type == hypervisor) |
2600 |
+ sun4v_init_mondo_queues(); |
2601 |
+diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S |
2602 |
+index 605d49204580..ef0d8e9e1210 100644 |
2603 |
+--- a/arch/sparc/kernel/ktlb.S |
2604 |
++++ b/arch/sparc/kernel/ktlb.S |
2605 |
+@@ -47,14 +47,6 @@ kvmap_itlb_vmalloc_addr: |
2606 |
+ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) |
2607 |
+ |
2608 |
+ TSB_LOCK_TAG(%g1, %g2, %g7) |
2609 |
+- |
2610 |
+- /* Load and check PTE. */ |
2611 |
+- ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
2612 |
+- mov 1, %g7 |
2613 |
+- sllx %g7, TSB_TAG_INVALID_BIT, %g7 |
2614 |
+- brgez,a,pn %g5, kvmap_itlb_longpath |
2615 |
+- TSB_STORE(%g1, %g7) |
2616 |
+- |
2617 |
+ TSB_WRITE(%g1, %g5, %g6) |
2618 |
+ |
2619 |
+ /* fallthrough to TLB load */ |
2620 |
+@@ -118,6 +110,12 @@ kvmap_dtlb_obp: |
2621 |
+ ba,pt %xcc, kvmap_dtlb_load |
2622 |
+ nop |
2623 |
+ |
2624 |
++kvmap_linear_early: |
2625 |
++ sethi %hi(kern_linear_pte_xor), %g7 |
2626 |
++ ldx [%g7 + %lo(kern_linear_pte_xor)], %g2 |
2627 |
++ ba,pt %xcc, kvmap_dtlb_tsb4m_load |
2628 |
++ xor %g2, %g4, %g5 |
2629 |
++ |
2630 |
+ .align 32 |
2631 |
+ kvmap_dtlb_tsb4m_load: |
2632 |
+ TSB_LOCK_TAG(%g1, %g2, %g7) |
2633 |
+@@ -146,105 +144,17 @@ kvmap_dtlb_4v: |
2634 |
+ /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ |
2635 |
+ KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) |
2636 |
+ #endif |
2637 |
+- /* TSB entry address left in %g1, lookup linear PTE. |
2638 |
+- * Must preserve %g1 and %g6 (TAG). |
2639 |
+- */ |
2640 |
+-kvmap_dtlb_tsb4m_miss: |
2641 |
+- /* Clear the PAGE_OFFSET top virtual bits, shift |
2642 |
+- * down to get PFN, and make sure PFN is in range. |
2643 |
+- */ |
2644 |
+-661: sllx %g4, 0, %g5 |
2645 |
+- .section .page_offset_shift_patch, "ax" |
2646 |
+- .word 661b |
2647 |
+- .previous |
2648 |
+- |
2649 |
+- /* Check to see if we know about valid memory at the 4MB |
2650 |
+- * chunk this physical address will reside within. |
2651 |
++ /* Linear mapping TSB lookup failed. Fallthrough to kernel |
2652 |
++ * page table based lookup. |
2653 |
+ */ |
2654 |
+-661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2 |
2655 |
+- .section .page_offset_shift_patch, "ax" |
2656 |
+- .word 661b |
2657 |
+- .previous |
2658 |
+- |
2659 |
+- brnz,pn %g2, kvmap_dtlb_longpath |
2660 |
+- nop |
2661 |
+- |
2662 |
+- /* This unconditional branch and delay-slot nop gets patched |
2663 |
+- * by the sethi sequence once the bitmap is properly setup. |
2664 |
+- */ |
2665 |
+- .globl valid_addr_bitmap_insn |
2666 |
+-valid_addr_bitmap_insn: |
2667 |
+- ba,pt %xcc, 2f |
2668 |
+- nop |
2669 |
+- .subsection 2 |
2670 |
+- .globl valid_addr_bitmap_patch |
2671 |
+-valid_addr_bitmap_patch: |
2672 |
+- sethi %hi(sparc64_valid_addr_bitmap), %g7 |
2673 |
+- or %g7, %lo(sparc64_valid_addr_bitmap), %g7 |
2674 |
+- .previous |
2675 |
+- |
2676 |
+-661: srlx %g5, ILOG2_4MB, %g2 |
2677 |
+- .section .page_offset_shift_patch, "ax" |
2678 |
+- .word 661b |
2679 |
+- .previous |
2680 |
+- |
2681 |
+- srlx %g2, 6, %g5 |
2682 |
+- and %g2, 63, %g2 |
2683 |
+- sllx %g5, 3, %g5 |
2684 |
+- ldx [%g7 + %g5], %g5 |
2685 |
+- mov 1, %g7 |
2686 |
+- sllx %g7, %g2, %g7 |
2687 |
+- andcc %g5, %g7, %g0 |
2688 |
+- be,pn %xcc, kvmap_dtlb_longpath |
2689 |
+- |
2690 |
+-2: sethi %hi(kpte_linear_bitmap), %g2 |
2691 |
+- |
2692 |
+- /* Get the 256MB physical address index. */ |
2693 |
+-661: sllx %g4, 0, %g5 |
2694 |
+- .section .page_offset_shift_patch, "ax" |
2695 |
+- .word 661b |
2696 |
+- .previous |
2697 |
+- |
2698 |
+- or %g2, %lo(kpte_linear_bitmap), %g2 |
2699 |
+- |
2700 |
+-661: srlx %g5, ILOG2_256MB, %g5 |
2701 |
+- .section .page_offset_shift_patch, "ax" |
2702 |
+- .word 661b |
2703 |
+- .previous |
2704 |
+- |
2705 |
+- and %g5, (32 - 1), %g7 |
2706 |
+- |
2707 |
+- /* Divide by 32 to get the offset into the bitmask. */ |
2708 |
+- srlx %g5, 5, %g5 |
2709 |
+- add %g7, %g7, %g7 |
2710 |
+- sllx %g5, 3, %g5 |
2711 |
+- |
2712 |
+- /* kern_linear_pte_xor[(mask >> shift) & 3)] */ |
2713 |
+- ldx [%g2 + %g5], %g2 |
2714 |
+- srlx %g2, %g7, %g7 |
2715 |
+- sethi %hi(kern_linear_pte_xor), %g5 |
2716 |
+- and %g7, 3, %g7 |
2717 |
+- or %g5, %lo(kern_linear_pte_xor), %g5 |
2718 |
+- sllx %g7, 3, %g7 |
2719 |
+- ldx [%g5 + %g7], %g2 |
2720 |
+- |
2721 |
+ .globl kvmap_linear_patch |
2722 |
+ kvmap_linear_patch: |
2723 |
+- ba,pt %xcc, kvmap_dtlb_tsb4m_load |
2724 |
+- xor %g2, %g4, %g5 |
2725 |
++ ba,a,pt %xcc, kvmap_linear_early |
2726 |
+ |
2727 |
+ kvmap_dtlb_vmalloc_addr: |
2728 |
+ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) |
2729 |
+ |
2730 |
+ TSB_LOCK_TAG(%g1, %g2, %g7) |
2731 |
+- |
2732 |
+- /* Load and check PTE. */ |
2733 |
+- ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
2734 |
+- mov 1, %g7 |
2735 |
+- sllx %g7, TSB_TAG_INVALID_BIT, %g7 |
2736 |
+- brgez,a,pn %g5, kvmap_dtlb_longpath |
2737 |
+- TSB_STORE(%g1, %g7) |
2738 |
+- |
2739 |
+ TSB_WRITE(%g1, %g5, %g6) |
2740 |
+ |
2741 |
+ /* fallthrough to TLB load */ |
2742 |
+@@ -276,13 +186,8 @@ kvmap_dtlb_load: |
2743 |
+ |
2744 |
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP |
2745 |
+ kvmap_vmemmap: |
2746 |
+- sub %g4, %g5, %g5 |
2747 |
+- srlx %g5, ILOG2_4MB, %g5 |
2748 |
+- sethi %hi(vmemmap_table), %g1 |
2749 |
+- sllx %g5, 3, %g5 |
2750 |
+- or %g1, %lo(vmemmap_table), %g1 |
2751 |
+- ba,pt %xcc, kvmap_dtlb_load |
2752 |
+- ldx [%g1 + %g5], %g5 |
2753 |
++ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) |
2754 |
++ ba,a,pt %xcc, kvmap_dtlb_load |
2755 |
+ #endif |
2756 |
+ |
2757 |
+ kvmap_dtlb_nonlinear: |
2758 |
+@@ -294,8 +199,8 @@ kvmap_dtlb_nonlinear: |
2759 |
+ |
2760 |
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP |
2761 |
+ /* Do not use the TSB for vmemmap. */ |
2762 |
+- mov (VMEMMAP_BASE >> 40), %g5 |
2763 |
+- sllx %g5, 40, %g5 |
2764 |
++ sethi %hi(VMEMMAP_BASE), %g5 |
2765 |
++ ldx [%g5 + %lo(VMEMMAP_BASE)], %g5 |
2766 |
+ cmp %g4,%g5 |
2767 |
+ bgeu,pn %xcc, kvmap_vmemmap |
2768 |
+ nop |
2769 |
+@@ -307,8 +212,8 @@ kvmap_dtlb_tsbmiss: |
2770 |
+ sethi %hi(MODULES_VADDR), %g5 |
2771 |
+ cmp %g4, %g5 |
2772 |
+ blu,pn %xcc, kvmap_dtlb_longpath |
2773 |
+- mov (VMALLOC_END >> 40), %g5 |
2774 |
+- sllx %g5, 40, %g5 |
2775 |
++ sethi %hi(VMALLOC_END), %g5 |
2776 |
++ ldx [%g5 + %lo(VMALLOC_END)], %g5 |
2777 |
+ cmp %g4, %g5 |
2778 |
+ bgeu,pn %xcc, kvmap_dtlb_longpath |
2779 |
+ nop |
2780 |
+diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c |
2781 |
+index 66dacd56bb10..27bb55485472 100644 |
2782 |
+--- a/arch/sparc/kernel/ldc.c |
2783 |
++++ b/arch/sparc/kernel/ldc.c |
2784 |
+@@ -1078,7 +1078,8 @@ static void ldc_iommu_release(struct ldc_channel *lp) |
2785 |
+ |
2786 |
+ struct ldc_channel *ldc_alloc(unsigned long id, |
2787 |
+ const struct ldc_channel_config *cfgp, |
2788 |
+- void *event_arg) |
2789 |
++ void *event_arg, |
2790 |
++ const char *name) |
2791 |
+ { |
2792 |
+ struct ldc_channel *lp; |
2793 |
+ const struct ldc_mode_ops *mops; |
2794 |
+@@ -1093,6 +1094,8 @@ struct ldc_channel *ldc_alloc(unsigned long id, |
2795 |
+ err = -EINVAL; |
2796 |
+ if (!cfgp) |
2797 |
+ goto out_err; |
2798 |
++ if (!name) |
2799 |
++ goto out_err; |
2800 |
+ |
2801 |
+ switch (cfgp->mode) { |
2802 |
+ case LDC_MODE_RAW: |
2803 |
+@@ -1185,6 +1188,21 @@ struct ldc_channel *ldc_alloc(unsigned long id, |
2804 |
+ |
2805 |
+ INIT_HLIST_HEAD(&lp->mh_list); |
2806 |
+ |
2807 |
++ snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); |
2808 |
++ snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); |
2809 |
++ |
2810 |
++ err = request_irq(lp->cfg.rx_irq, ldc_rx, 0, |
2811 |
++ lp->rx_irq_name, lp); |
2812 |
++ if (err) |
2813 |
++ goto out_free_txq; |
2814 |
++ |
2815 |
++ err = request_irq(lp->cfg.tx_irq, ldc_tx, 0, |
2816 |
++ lp->tx_irq_name, lp); |
2817 |
++ if (err) { |
2818 |
++ free_irq(lp->cfg.rx_irq, lp); |
2819 |
++ goto out_free_txq; |
2820 |
++ } |
2821 |
++ |
2822 |
+ return lp; |
2823 |
+ |
2824 |
+ out_free_txq: |
2825 |
+@@ -1237,31 +1255,14 @@ EXPORT_SYMBOL(ldc_free); |
2826 |
+ * state. This does not initiate a handshake, ldc_connect() does |
2827 |
+ * that. |
2828 |
+ */ |
2829 |
+-int ldc_bind(struct ldc_channel *lp, const char *name) |
2830 |
++int ldc_bind(struct ldc_channel *lp) |
2831 |
+ { |
2832 |
+ unsigned long hv_err, flags; |
2833 |
+ int err = -EINVAL; |
2834 |
+ |
2835 |
+- if (!name || |
2836 |
+- (lp->state != LDC_STATE_INIT)) |
2837 |
++ if (lp->state != LDC_STATE_INIT) |
2838 |
+ return -EINVAL; |
2839 |
+ |
2840 |
+- snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); |
2841 |
+- snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); |
2842 |
+- |
2843 |
+- err = request_irq(lp->cfg.rx_irq, ldc_rx, 0, |
2844 |
+- lp->rx_irq_name, lp); |
2845 |
+- if (err) |
2846 |
+- return err; |
2847 |
+- |
2848 |
+- err = request_irq(lp->cfg.tx_irq, ldc_tx, 0, |
2849 |
+- lp->tx_irq_name, lp); |
2850 |
+- if (err) { |
2851 |
+- free_irq(lp->cfg.rx_irq, lp); |
2852 |
+- return err; |
2853 |
+- } |
2854 |
+- |
2855 |
+- |
2856 |
+ spin_lock_irqsave(&lp->lock, flags); |
2857 |
+ |
2858 |
+ enable_irq(lp->cfg.rx_irq); |
2859 |
+diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c |
2860 |
+index 269af58497aa..7e967c8018c8 100644 |
2861 |
+--- a/arch/sparc/kernel/pcr.c |
2862 |
++++ b/arch/sparc/kernel/pcr.c |
2863 |
+@@ -191,12 +191,41 @@ static const struct pcr_ops n4_pcr_ops = { |
2864 |
+ .pcr_nmi_disable = PCR_N4_PICNPT, |
2865 |
+ }; |
2866 |
+ |
2867 |
++static u64 n5_pcr_read(unsigned long reg_num) |
2868 |
++{ |
2869 |
++ unsigned long val; |
2870 |
++ |
2871 |
++ (void) sun4v_t5_get_perfreg(reg_num, &val); |
2872 |
++ |
2873 |
++ return val; |
2874 |
++} |
2875 |
++ |
2876 |
++static void n5_pcr_write(unsigned long reg_num, u64 val) |
2877 |
++{ |
2878 |
++ (void) sun4v_t5_set_perfreg(reg_num, val); |
2879 |
++} |
2880 |
++ |
2881 |
++static const struct pcr_ops n5_pcr_ops = { |
2882 |
++ .read_pcr = n5_pcr_read, |
2883 |
++ .write_pcr = n5_pcr_write, |
2884 |
++ .read_pic = n4_pic_read, |
2885 |
++ .write_pic = n4_pic_write, |
2886 |
++ .nmi_picl_value = n4_picl_value, |
2887 |
++ .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | |
2888 |
++ PCR_N4_UTRACE | PCR_N4_TOE | |
2889 |
++ (26 << PCR_N4_SL_SHIFT)), |
2890 |
++ .pcr_nmi_disable = PCR_N4_PICNPT, |
2891 |
++}; |
2892 |
++ |
2893 |
++ |
2894 |
+ static unsigned long perf_hsvc_group; |
2895 |
+ static unsigned long perf_hsvc_major; |
2896 |
+ static unsigned long perf_hsvc_minor; |
2897 |
+ |
2898 |
+ static int __init register_perf_hsvc(void) |
2899 |
+ { |
2900 |
++ unsigned long hverror; |
2901 |
++ |
2902 |
+ if (tlb_type == hypervisor) { |
2903 |
+ switch (sun4v_chip_type) { |
2904 |
+ case SUN4V_CHIP_NIAGARA1: |
2905 |
+@@ -215,6 +244,10 @@ static int __init register_perf_hsvc(void) |
2906 |
+ perf_hsvc_group = HV_GRP_VT_CPU; |
2907 |
+ break; |
2908 |
+ |
2909 |
++ case SUN4V_CHIP_NIAGARA5: |
2910 |
++ perf_hsvc_group = HV_GRP_T5_CPU; |
2911 |
++ break; |
2912 |
++ |
2913 |
+ default: |
2914 |
+ return -ENODEV; |
2915 |
+ } |
2916 |
+@@ -222,10 +255,12 @@ static int __init register_perf_hsvc(void) |
2917 |
+ |
2918 |
+ perf_hsvc_major = 1; |
2919 |
+ perf_hsvc_minor = 0; |
2920 |
+- if (sun4v_hvapi_register(perf_hsvc_group, |
2921 |
+- perf_hsvc_major, |
2922 |
+- &perf_hsvc_minor)) { |
2923 |
+- printk("perfmon: Could not register hvapi.\n"); |
2924 |
++ hverror = sun4v_hvapi_register(perf_hsvc_group, |
2925 |
++ perf_hsvc_major, |
2926 |
++ &perf_hsvc_minor); |
2927 |
++ if (hverror) { |
2928 |
++ pr_err("perfmon: Could not register hvapi(0x%lx).\n", |
2929 |
++ hverror); |
2930 |
+ return -ENODEV; |
2931 |
+ } |
2932 |
+ } |
2933 |
+@@ -254,6 +289,10 @@ static int __init setup_sun4v_pcr_ops(void) |
2934 |
+ pcr_ops = &n4_pcr_ops; |
2935 |
+ break; |
2936 |
+ |
2937 |
++ case SUN4V_CHIP_NIAGARA5: |
2938 |
++ pcr_ops = &n5_pcr_ops; |
2939 |
++ break; |
2940 |
++ |
2941 |
+ default: |
2942 |
+ ret = -ENODEV; |
2943 |
+ break; |
2944 |
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c |
2945 |
+index d35c490a91cb..c9759ad3f34a 100644 |
2946 |
+--- a/arch/sparc/kernel/perf_event.c |
2947 |
++++ b/arch/sparc/kernel/perf_event.c |
2948 |
+@@ -1662,7 +1662,8 @@ static bool __init supported_pmu(void) |
2949 |
+ sparc_pmu = &niagara2_pmu; |
2950 |
+ return true; |
2951 |
+ } |
2952 |
+- if (!strcmp(sparc_pmu_type, "niagara4")) { |
2953 |
++ if (!strcmp(sparc_pmu_type, "niagara4") || |
2954 |
++ !strcmp(sparc_pmu_type, "niagara5")) { |
2955 |
+ sparc_pmu = &niagara4_pmu; |
2956 |
+ return true; |
2957 |
+ } |
2958 |
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c |
2959 |
+index 3fdb455e3318..61a519808cb7 100644 |
2960 |
+--- a/arch/sparc/kernel/setup_64.c |
2961 |
++++ b/arch/sparc/kernel/setup_64.c |
2962 |
+@@ -30,6 +30,7 @@ |
2963 |
+ #include <linux/cpu.h> |
2964 |
+ #include <linux/initrd.h> |
2965 |
+ #include <linux/module.h> |
2966 |
++#include <linux/start_kernel.h> |
2967 |
+ |
2968 |
+ #include <asm/io.h> |
2969 |
+ #include <asm/processor.h> |
2970 |
+@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE]; |
2971 |
+ |
2972 |
+ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; |
2973 |
+ |
2974 |
+-void __init per_cpu_patch(void) |
2975 |
++static void __init per_cpu_patch(void) |
2976 |
+ { |
2977 |
+ struct cpuid_patch_entry *p; |
2978 |
+ unsigned long ver; |
2979 |
+@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start, |
2980 |
+ } |
2981 |
+ } |
2982 |
+ |
2983 |
+-void __init sun4v_patch(void) |
2984 |
++static void __init sun4v_patch(void) |
2985 |
+ { |
2986 |
+ extern void sun4v_hvapi_init(void); |
2987 |
+ |
2988 |
+@@ -335,14 +336,25 @@ static void __init pause_patch(void) |
2989 |
+ } |
2990 |
+ } |
2991 |
+ |
2992 |
+-#ifdef CONFIG_SMP |
2993 |
+-void __init boot_cpu_id_too_large(int cpu) |
2994 |
++void __init start_early_boot(void) |
2995 |
+ { |
2996 |
+- prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", |
2997 |
+- cpu, NR_CPUS); |
2998 |
+- prom_halt(); |
2999 |
++ int cpu; |
3000 |
++ |
3001 |
++ check_if_starfire(); |
3002 |
++ per_cpu_patch(); |
3003 |
++ sun4v_patch(); |
3004 |
++ |
3005 |
++ cpu = hard_smp_processor_id(); |
3006 |
++ if (cpu >= NR_CPUS) { |
3007 |
++ prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", |
3008 |
++ cpu, NR_CPUS); |
3009 |
++ prom_halt(); |
3010 |
++ } |
3011 |
++ current_thread_info()->cpu = cpu; |
3012 |
++ |
3013 |
++ prom_init_report(); |
3014 |
++ start_kernel(); |
3015 |
+ } |
3016 |
+-#endif |
3017 |
+ |
3018 |
+ /* On Ultra, we support all of the v8 capabilities. */ |
3019 |
+ unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | |
3020 |
+@@ -500,12 +512,16 @@ static void __init init_sparc64_elf_hwcap(void) |
3021 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
3022 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || |
3023 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || |
3024 |
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || |
3025 |
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || |
3026 |
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X) |
3027 |
+ cap |= HWCAP_SPARC_BLKINIT; |
3028 |
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || |
3029 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
3030 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || |
3031 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || |
3032 |
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || |
3033 |
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || |
3034 |
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X) |
3035 |
+ cap |= HWCAP_SPARC_N2; |
3036 |
+ } |
3037 |
+@@ -533,6 +549,8 @@ static void __init init_sparc64_elf_hwcap(void) |
3038 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
3039 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || |
3040 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || |
3041 |
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || |
3042 |
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || |
3043 |
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X) |
3044 |
+ cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | |
3045 |
+ AV_SPARC_ASI_BLK_INIT | |
3046 |
+@@ -540,6 +558,8 @@ static void __init init_sparc64_elf_hwcap(void) |
3047 |
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
3048 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || |
3049 |
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || |
3050 |
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || |
3051 |
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || |
3052 |
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X) |
3053 |
+ cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | |
3054 |
+ AV_SPARC_FMAF); |
3055 |
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c |
3056 |
+index f7ba87543e5f..c9300bfaee5a 100644 |
3057 |
+--- a/arch/sparc/kernel/smp_64.c |
3058 |
++++ b/arch/sparc/kernel/smp_64.c |
3059 |
+@@ -1467,6 +1467,13 @@ static void __init pcpu_populate_pte(unsigned long addr) |
3060 |
+ pud_t *pud; |
3061 |
+ pmd_t *pmd; |
3062 |
+ |
3063 |
++ if (pgd_none(*pgd)) { |
3064 |
++ pud_t *new; |
3065 |
++ |
3066 |
++ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
3067 |
++ pgd_populate(&init_mm, pgd, new); |
3068 |
++ } |
3069 |
++ |
3070 |
+ pud = pud_offset(pgd, addr); |
3071 |
+ if (pud_none(*pud)) { |
3072 |
+ pmd_t *new; |
3073 |
+diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S |
3074 |
+index e0c09bf85610..6179e19bc9b9 100644 |
3075 |
+--- a/arch/sparc/kernel/sun4v_tlb_miss.S |
3076 |
++++ b/arch/sparc/kernel/sun4v_tlb_miss.S |
3077 |
+@@ -195,6 +195,11 @@ sun4v_tsb_miss_common: |
3078 |
+ ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 |
3079 |
+ |
3080 |
+ sun4v_itlb_error: |
3081 |
++ rdpr %tl, %g1 |
3082 |
++ cmp %g1, 1 |
3083 |
++ ble,pt %icc, sun4v_bad_ra |
3084 |
++ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_ITLB, %g1 |
3085 |
++ |
3086 |
+ sethi %hi(sun4v_err_itlb_vaddr), %g1 |
3087 |
+ stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)] |
3088 |
+ sethi %hi(sun4v_err_itlb_ctx), %g1 |
3089 |
+@@ -206,15 +211,10 @@ sun4v_itlb_error: |
3090 |
+ sethi %hi(sun4v_err_itlb_error), %g1 |
3091 |
+ stx %o0, [%g1 + %lo(sun4v_err_itlb_error)] |
3092 |
+ |
3093 |
++ sethi %hi(1f), %g7 |
3094 |
+ rdpr %tl, %g4 |
3095 |
+- cmp %g4, 1 |
3096 |
+- ble,pt %icc, 1f |
3097 |
+- sethi %hi(2f), %g7 |
3098 |
+ ba,pt %xcc, etraptl1 |
3099 |
+- or %g7, %lo(2f), %g7 |
3100 |
+- |
3101 |
+-1: ba,pt %xcc, etrap |
3102 |
+-2: or %g7, %lo(2b), %g7 |
3103 |
++1: or %g7, %lo(1f), %g7 |
3104 |
+ mov %l4, %o1 |
3105 |
+ call sun4v_itlb_error_report |
3106 |
+ add %sp, PTREGS_OFF, %o0 |
3107 |
+@@ -222,6 +222,11 @@ sun4v_itlb_error: |
3108 |
+ /* NOTREACHED */ |
3109 |
+ |
3110 |
+ sun4v_dtlb_error: |
3111 |
++ rdpr %tl, %g1 |
3112 |
++ cmp %g1, 1 |
3113 |
++ ble,pt %icc, sun4v_bad_ra |
3114 |
++ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_DTLB, %g1 |
3115 |
++ |
3116 |
+ sethi %hi(sun4v_err_dtlb_vaddr), %g1 |
3117 |
+ stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)] |
3118 |
+ sethi %hi(sun4v_err_dtlb_ctx), %g1 |
3119 |
+@@ -233,21 +238,23 @@ sun4v_dtlb_error: |
3120 |
+ sethi %hi(sun4v_err_dtlb_error), %g1 |
3121 |
+ stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)] |
3122 |
+ |
3123 |
++ sethi %hi(1f), %g7 |
3124 |
+ rdpr %tl, %g4 |
3125 |
+- cmp %g4, 1 |
3126 |
+- ble,pt %icc, 1f |
3127 |
+- sethi %hi(2f), %g7 |
3128 |
+ ba,pt %xcc, etraptl1 |
3129 |
+- or %g7, %lo(2f), %g7 |
3130 |
+- |
3131 |
+-1: ba,pt %xcc, etrap |
3132 |
+-2: or %g7, %lo(2b), %g7 |
3133 |
++1: or %g7, %lo(1f), %g7 |
3134 |
+ mov %l4, %o1 |
3135 |
+ call sun4v_dtlb_error_report |
3136 |
+ add %sp, PTREGS_OFF, %o0 |
3137 |
+ |
3138 |
+ /* NOTREACHED */ |
3139 |
+ |
3140 |
++sun4v_bad_ra: |
3141 |
++ or %g0, %g4, %g5 |
3142 |
++ ba,pt %xcc, sparc64_realfault_common |
3143 |
++ or %g1, %g0, %g4 |
3144 |
++ |
3145 |
++ /* NOTREACHED */ |
3146 |
++ |
3147 |
+ /* Instruction Access Exception, tl0. */ |
3148 |
+ sun4v_iacc: |
3149 |
+ ldxa [%g0] ASI_SCRATCHPAD, %g2 |
3150 |
+diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S |
3151 |
+index 737f8cbc7d56..88ede1d53b4c 100644 |
3152 |
+--- a/arch/sparc/kernel/trampoline_64.S |
3153 |
++++ b/arch/sparc/kernel/trampoline_64.S |
3154 |
+@@ -109,10 +109,13 @@ startup_continue: |
3155 |
+ brnz,pn %g1, 1b |
3156 |
+ nop |
3157 |
+ |
3158 |
+- sethi %hi(p1275buf), %g2 |
3159 |
+- or %g2, %lo(p1275buf), %g2 |
3160 |
+- ldx [%g2 + 0x10], %l2 |
3161 |
+- add %l2, -(192 + 128), %sp |
3162 |
++ /* Get onto temporary stack which will be in the locked |
3163 |
++ * kernel image. |
3164 |
++ */ |
3165 |
++ sethi %hi(tramp_stack), %g1 |
3166 |
++ or %g1, %lo(tramp_stack), %g1 |
3167 |
++ add %g1, TRAMP_STACK_SIZE, %g1 |
3168 |
++ sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp |
3169 |
+ flushw |
3170 |
+ |
3171 |
+ /* Setup the loop variables: |
3172 |
+@@ -394,7 +397,6 @@ after_lock_tlb: |
3173 |
+ sllx %g5, THREAD_SHIFT, %g5 |
3174 |
+ sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 |
3175 |
+ add %g6, %g5, %sp |
3176 |
+- mov 0, %fp |
3177 |
+ |
3178 |
+ rdpr %pstate, %o1 |
3179 |
+ or %o1, PSTATE_IE, %o1 |
3180 |
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c |
3181 |
+index fb6640ec8557..981a769b9558 100644 |
3182 |
+--- a/arch/sparc/kernel/traps_64.c |
3183 |
++++ b/arch/sparc/kernel/traps_64.c |
3184 |
+@@ -2104,6 +2104,11 @@ void sun4v_nonresum_overflow(struct pt_regs *regs) |
3185 |
+ atomic_inc(&sun4v_nonresum_oflow_cnt); |
3186 |
+ } |
3187 |
+ |
3188 |
++static void sun4v_tlb_error(struct pt_regs *regs) |
3189 |
++{ |
3190 |
++ die_if_kernel("TLB/TSB error", regs); |
3191 |
++} |
3192 |
++ |
3193 |
+ unsigned long sun4v_err_itlb_vaddr; |
3194 |
+ unsigned long sun4v_err_itlb_ctx; |
3195 |
+ unsigned long sun4v_err_itlb_pte; |
3196 |
+@@ -2111,8 +2116,7 @@ unsigned long sun4v_err_itlb_error; |
3197 |
+ |
3198 |
+ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) |
3199 |
+ { |
3200 |
+- if (tl > 1) |
3201 |
+- dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); |
3202 |
++ dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); |
3203 |
+ |
3204 |
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", |
3205 |
+ regs->tpc, tl); |
3206 |
+@@ -2125,7 +2129,7 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) |
3207 |
+ sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx, |
3208 |
+ sun4v_err_itlb_pte, sun4v_err_itlb_error); |
3209 |
+ |
3210 |
+- prom_halt(); |
3211 |
++ sun4v_tlb_error(regs); |
3212 |
+ } |
3213 |
+ |
3214 |
+ unsigned long sun4v_err_dtlb_vaddr; |
3215 |
+@@ -2135,8 +2139,7 @@ unsigned long sun4v_err_dtlb_error; |
3216 |
+ |
3217 |
+ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) |
3218 |
+ { |
3219 |
+- if (tl > 1) |
3220 |
+- dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); |
3221 |
++ dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); |
3222 |
+ |
3223 |
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", |
3224 |
+ regs->tpc, tl); |
3225 |
+@@ -2149,7 +2152,7 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) |
3226 |
+ sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx, |
3227 |
+ sun4v_err_dtlb_pte, sun4v_err_dtlb_error); |
3228 |
+ |
3229 |
+- prom_halt(); |
3230 |
++ sun4v_tlb_error(regs); |
3231 |
+ } |
3232 |
+ |
3233 |
+ void hypervisor_tlbop_error(unsigned long err, unsigned long op) |
3234 |
+diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S |
3235 |
+index 14158d40ba76..be98685c14c6 100644 |
3236 |
+--- a/arch/sparc/kernel/tsb.S |
3237 |
++++ b/arch/sparc/kernel/tsb.S |
3238 |
+@@ -162,10 +162,10 @@ tsb_miss_page_table_walk_sun4v_fastpath: |
3239 |
+ nop |
3240 |
+ .previous |
3241 |
+ |
3242 |
+- rdpr %tl, %g3 |
3243 |
+- cmp %g3, 1 |
3244 |
++ rdpr %tl, %g7 |
3245 |
++ cmp %g7, 1 |
3246 |
+ bne,pn %xcc, winfix_trampoline |
3247 |
+- nop |
3248 |
++ mov %g3, %g4 |
3249 |
+ ba,pt %xcc, etrap |
3250 |
+ rd %pc, %g7 |
3251 |
+ call hugetlb_setup |
3252 |
+diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c |
3253 |
+index f8e7dd53e1c7..9c5fbd0b8a04 100644 |
3254 |
+--- a/arch/sparc/kernel/viohs.c |
3255 |
++++ b/arch/sparc/kernel/viohs.c |
3256 |
+@@ -714,7 +714,7 @@ int vio_ldc_alloc(struct vio_driver_state *vio, |
3257 |
+ cfg.tx_irq = vio->vdev->tx_irq; |
3258 |
+ cfg.rx_irq = vio->vdev->rx_irq; |
3259 |
+ |
3260 |
+- lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg); |
3261 |
++ lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name); |
3262 |
+ if (IS_ERR(lp)) |
3263 |
+ return PTR_ERR(lp); |
3264 |
+ |
3265 |
+@@ -746,7 +746,7 @@ void vio_port_up(struct vio_driver_state *vio) |
3266 |
+ |
3267 |
+ err = 0; |
3268 |
+ if (state == LDC_STATE_INIT) { |
3269 |
+- err = ldc_bind(vio->lp, vio->name); |
3270 |
++ err = ldc_bind(vio->lp); |
3271 |
+ if (err) |
3272 |
+ printk(KERN_WARNING "%s: Port %lu bind failed, " |
3273 |
+ "err=%d\n", |
3274 |
+diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S |
3275 |
+index 932ff90fd760..09243057cb0b 100644 |
3276 |
+--- a/arch/sparc/kernel/vmlinux.lds.S |
3277 |
++++ b/arch/sparc/kernel/vmlinux.lds.S |
3278 |
+@@ -35,8 +35,9 @@ jiffies = jiffies_64; |
3279 |
+ |
3280 |
+ SECTIONS |
3281 |
+ { |
3282 |
+- /* swapper_low_pmd_dir is sparc64 only */ |
3283 |
+- swapper_low_pmd_dir = 0x0000000000402000; |
3284 |
++#ifdef CONFIG_SPARC64 |
3285 |
++ swapper_pg_dir = 0x0000000000402000; |
3286 |
++#endif |
3287 |
+ . = INITIAL_ADDRESS; |
3288 |
+ .text TEXTSTART : |
3289 |
+ { |
3290 |
+@@ -122,11 +123,6 @@ SECTIONS |
3291 |
+ *(.swapper_4m_tsb_phys_patch) |
3292 |
+ __swapper_4m_tsb_phys_patch_end = .; |
3293 |
+ } |
3294 |
+- .page_offset_shift_patch : { |
3295 |
+- __page_offset_shift_patch = .; |
3296 |
+- *(.page_offset_shift_patch) |
3297 |
+- __page_offset_shift_patch_end = .; |
3298 |
+- } |
3299 |
+ .popc_3insn_patch : { |
3300 |
+ __popc_3insn_patch = .; |
3301 |
+ *(.popc_3insn_patch) |
3302 |
+diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S |
3303 |
+index 9cf2ee01cee3..140527a20e7d 100644 |
3304 |
+--- a/arch/sparc/lib/NG4memcpy.S |
3305 |
++++ b/arch/sparc/lib/NG4memcpy.S |
3306 |
+@@ -41,6 +41,10 @@ |
3307 |
+ #endif |
3308 |
+ #endif |
3309 |
+ |
3310 |
++#if !defined(EX_LD) && !defined(EX_ST) |
3311 |
++#define NON_USER_COPY |
3312 |
++#endif |
3313 |
++ |
3314 |
+ #ifndef EX_LD |
3315 |
+ #define EX_LD(x) x |
3316 |
+ #endif |
3317 |
+@@ -197,9 +201,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ |
3318 |
+ mov EX_RETVAL(%o3), %o0 |
3319 |
+ |
3320 |
+ .Llarge_src_unaligned: |
3321 |
++#ifdef NON_USER_COPY |
3322 |
++ VISEntryHalfFast(.Lmedium_vis_entry_fail) |
3323 |
++#else |
3324 |
++ VISEntryHalf |
3325 |
++#endif |
3326 |
+ andn %o2, 0x3f, %o4 |
3327 |
+ sub %o2, %o4, %o2 |
3328 |
+- VISEntryHalf |
3329 |
+ alignaddr %o1, %g0, %g1 |
3330 |
+ add %o1, %o4, %o1 |
3331 |
+ EX_LD(LOAD(ldd, %g1 + 0x00, %f0)) |
3332 |
+@@ -240,6 +248,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ |
3333 |
+ nop |
3334 |
+ ba,a,pt %icc, .Lmedium_unaligned |
3335 |
+ |
3336 |
++#ifdef NON_USER_COPY |
3337 |
++.Lmedium_vis_entry_fail: |
3338 |
++ or %o0, %o1, %g2 |
3339 |
++#endif |
3340 |
+ .Lmedium: |
3341 |
+ LOAD(prefetch, %o1 + 0x40, #n_reads_strong) |
3342 |
+ andcc %g2, 0x7, %g0 |
3343 |
+diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S |
3344 |
+index 99c017be8719..f75e6906df14 100644 |
3345 |
+--- a/arch/sparc/lib/memset.S |
3346 |
++++ b/arch/sparc/lib/memset.S |
3347 |
+@@ -3,8 +3,9 @@ |
3348 |
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@××××××××××××××××.cz) |
3349 |
+ * Copyright (C) 1996 David S. Miller (davem@××××××××××××.edu) |
3350 |
+ * |
3351 |
+- * Returns 0, if ok, and number of bytes not yet set if exception |
3352 |
+- * occurs and we were called as clear_user. |
3353 |
++ * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and |
3354 |
++ * number of bytes not yet set if exception occurs and we were called as |
3355 |
++ * clear_user. |
3356 |
+ */ |
3357 |
+ |
3358 |
+ #include <asm/ptrace.h> |
3359 |
+@@ -65,6 +66,8 @@ __bzero_begin: |
3360 |
+ .globl __memset_start, __memset_end |
3361 |
+ __memset_start: |
3362 |
+ memset: |
3363 |
++ mov %o0, %g1 |
3364 |
++ mov 1, %g4 |
3365 |
+ and %o1, 0xff, %g3 |
3366 |
+ sll %g3, 8, %g2 |
3367 |
+ or %g3, %g2, %g3 |
3368 |
+@@ -89,6 +92,7 @@ memset: |
3369 |
+ sub %o0, %o2, %o0 |
3370 |
+ |
3371 |
+ __bzero: |
3372 |
++ clr %g4 |
3373 |
+ mov %g0, %g3 |
3374 |
+ 1: |
3375 |
+ cmp %o1, 7 |
3376 |
+@@ -151,8 +155,8 @@ __bzero: |
3377 |
+ bne,a 8f |
3378 |
+ EX(stb %g3, [%o0], and %o1, 1) |
3379 |
+ 8: |
3380 |
+- retl |
3381 |
+- clr %o0 |
3382 |
++ b 0f |
3383 |
++ nop |
3384 |
+ 7: |
3385 |
+ be 13b |
3386 |
+ orcc %o1, 0, %g0 |
3387 |
+@@ -164,6 +168,12 @@ __bzero: |
3388 |
+ bne 8b |
3389 |
+ EX(stb %g3, [%o0 - 1], add %o1, 1) |
3390 |
+ 0: |
3391 |
++ andcc %g4, 1, %g0 |
3392 |
++ be 5f |
3393 |
++ nop |
3394 |
++ retl |
3395 |
++ mov %g1, %o0 |
3396 |
++5: |
3397 |
+ retl |
3398 |
+ clr %o0 |
3399 |
+ __memset_end: |
3400 |
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c |
3401 |
+index 587cd0565128..18fcd7167095 100644 |
3402 |
+--- a/arch/sparc/mm/fault_64.c |
3403 |
++++ b/arch/sparc/mm/fault_64.c |
3404 |
+@@ -346,6 +346,9 @@ retry: |
3405 |
+ down_read(&mm->mmap_sem); |
3406 |
+ } |
3407 |
+ |
3408 |
++ if (fault_code & FAULT_CODE_BAD_RA) |
3409 |
++ goto do_sigbus; |
3410 |
++ |
3411 |
+ vma = find_vma(mm, address); |
3412 |
+ if (!vma) |
3413 |
+ goto bad_area; |
3414 |
+diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c |
3415 |
+index 1aed0432c64b..ae6ce383d4df 100644 |
3416 |
+--- a/arch/sparc/mm/gup.c |
3417 |
++++ b/arch/sparc/mm/gup.c |
3418 |
+@@ -160,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, |
3419 |
+ return 1; |
3420 |
+ } |
3421 |
+ |
3422 |
++int __get_user_pages_fast(unsigned long start, int nr_pages, int write, |
3423 |
++ struct page **pages) |
3424 |
++{ |
3425 |
++ struct mm_struct *mm = current->mm; |
3426 |
++ unsigned long addr, len, end; |
3427 |
++ unsigned long next, flags; |
3428 |
++ pgd_t *pgdp; |
3429 |
++ int nr = 0; |
3430 |
++ |
3431 |
++ start &= PAGE_MASK; |
3432 |
++ addr = start; |
3433 |
++ len = (unsigned long) nr_pages << PAGE_SHIFT; |
3434 |
++ end = start + len; |
3435 |
++ |
3436 |
++ local_irq_save(flags); |
3437 |
++ pgdp = pgd_offset(mm, addr); |
3438 |
++ do { |
3439 |
++ pgd_t pgd = *pgdp; |
3440 |
++ |
3441 |
++ next = pgd_addr_end(addr, end); |
3442 |
++ if (pgd_none(pgd)) |
3443 |
++ break; |
3444 |
++ if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) |
3445 |
++ break; |
3446 |
++ } while (pgdp++, addr = next, addr != end); |
3447 |
++ local_irq_restore(flags); |
3448 |
++ |
3449 |
++ return nr; |
3450 |
++} |
3451 |
++ |
3452 |
+ int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
3453 |
+ struct page **pages) |
3454 |
+ { |
3455 |
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c |
3456 |
+index 98ac8e80adae..04bc826135b4 100644 |
3457 |
+--- a/arch/sparc/mm/init_64.c |
3458 |
++++ b/arch/sparc/mm/init_64.c |
3459 |
+@@ -75,7 +75,6 @@ unsigned long kern_linear_pte_xor[4] __read_mostly; |
3460 |
+ * 'cpu' properties, but we need to have this table setup before the |
3461 |
+ * MDESC is initialized. |
3462 |
+ */ |
3463 |
+-unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; |
3464 |
+ |
3465 |
+ #ifndef CONFIG_DEBUG_PAGEALLOC |
3466 |
+ /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. |
3467 |
+@@ -84,10 +83,11 @@ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; |
3468 |
+ */ |
3469 |
+ extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; |
3470 |
+ #endif |
3471 |
++extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
3472 |
+ |
3473 |
+ static unsigned long cpu_pgsz_mask; |
3474 |
+ |
3475 |
+-#define MAX_BANKS 32 |
3476 |
++#define MAX_BANKS 1024 |
3477 |
+ |
3478 |
+ static struct linux_prom64_registers pavail[MAX_BANKS]; |
3479 |
+ static int pavail_ents; |
3480 |
+@@ -165,10 +165,6 @@ static void __init read_obp_memory(const char *property, |
3481 |
+ cmp_p64, NULL); |
3482 |
+ } |
3483 |
+ |
3484 |
+-unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES / |
3485 |
+- sizeof(unsigned long)]; |
3486 |
+-EXPORT_SYMBOL(sparc64_valid_addr_bitmap); |
3487 |
+- |
3488 |
+ /* Kernel physical address base and size in bytes. */ |
3489 |
+ unsigned long kern_base __read_mostly; |
3490 |
+ unsigned long kern_size __read_mostly; |
3491 |
+@@ -840,7 +836,10 @@ static int find_node(unsigned long addr) |
3492 |
+ if ((addr & p->mask) == p->val) |
3493 |
+ return i; |
3494 |
+ } |
3495 |
+- return -1; |
3496 |
++ /* The following condition has been observed on LDOM guests.*/ |
3497 |
++ WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" |
3498 |
++ " rule. Some physical memory will be owned by node 0."); |
3499 |
++ return 0; |
3500 |
+ } |
3501 |
+ |
3502 |
+ static u64 memblock_nid_range(u64 start, u64 end, int *nid) |
3503 |
+@@ -1366,9 +1365,144 @@ static unsigned long __init bootmem_init(unsigned long phys_base) |
3504 |
+ static struct linux_prom64_registers pall[MAX_BANKS] __initdata; |
3505 |
+ static int pall_ents __initdata; |
3506 |
+ |
3507 |
+-#ifdef CONFIG_DEBUG_PAGEALLOC |
3508 |
++static unsigned long max_phys_bits = 40; |
3509 |
++ |
3510 |
++bool kern_addr_valid(unsigned long addr) |
3511 |
++{ |
3512 |
++ pgd_t *pgd; |
3513 |
++ pud_t *pud; |
3514 |
++ pmd_t *pmd; |
3515 |
++ pte_t *pte; |
3516 |
++ |
3517 |
++ if ((long)addr < 0L) { |
3518 |
++ unsigned long pa = __pa(addr); |
3519 |
++ |
3520 |
++ if ((addr >> max_phys_bits) != 0UL) |
3521 |
++ return false; |
3522 |
++ |
3523 |
++ return pfn_valid(pa >> PAGE_SHIFT); |
3524 |
++ } |
3525 |
++ |
3526 |
++ if (addr >= (unsigned long) KERNBASE && |
3527 |
++ addr < (unsigned long)&_end) |
3528 |
++ return true; |
3529 |
++ |
3530 |
++ pgd = pgd_offset_k(addr); |
3531 |
++ if (pgd_none(*pgd)) |
3532 |
++ return 0; |
3533 |
++ |
3534 |
++ pud = pud_offset(pgd, addr); |
3535 |
++ if (pud_none(*pud)) |
3536 |
++ return 0; |
3537 |
++ |
3538 |
++ if (pud_large(*pud)) |
3539 |
++ return pfn_valid(pud_pfn(*pud)); |
3540 |
++ |
3541 |
++ pmd = pmd_offset(pud, addr); |
3542 |
++ if (pmd_none(*pmd)) |
3543 |
++ return 0; |
3544 |
++ |
3545 |
++ if (pmd_large(*pmd)) |
3546 |
++ return pfn_valid(pmd_pfn(*pmd)); |
3547 |
++ |
3548 |
++ pte = pte_offset_kernel(pmd, addr); |
3549 |
++ if (pte_none(*pte)) |
3550 |
++ return 0; |
3551 |
++ |
3552 |
++ return pfn_valid(pte_pfn(*pte)); |
3553 |
++} |
3554 |
++EXPORT_SYMBOL(kern_addr_valid); |
3555 |
++ |
3556 |
++static unsigned long __ref kernel_map_hugepud(unsigned long vstart, |
3557 |
++ unsigned long vend, |
3558 |
++ pud_t *pud) |
3559 |
++{ |
3560 |
++ const unsigned long mask16gb = (1UL << 34) - 1UL; |
3561 |
++ u64 pte_val = vstart; |
3562 |
++ |
3563 |
++ /* Each PUD is 8GB */ |
3564 |
++ if ((vstart & mask16gb) || |
3565 |
++ (vend - vstart <= mask16gb)) { |
3566 |
++ pte_val ^= kern_linear_pte_xor[2]; |
3567 |
++ pud_val(*pud) = pte_val | _PAGE_PUD_HUGE; |
3568 |
++ |
3569 |
++ return vstart + PUD_SIZE; |
3570 |
++ } |
3571 |
++ |
3572 |
++ pte_val ^= kern_linear_pte_xor[3]; |
3573 |
++ pte_val |= _PAGE_PUD_HUGE; |
3574 |
++ |
3575 |
++ vend = vstart + mask16gb + 1UL; |
3576 |
++ while (vstart < vend) { |
3577 |
++ pud_val(*pud) = pte_val; |
3578 |
++ |
3579 |
++ pte_val += PUD_SIZE; |
3580 |
++ vstart += PUD_SIZE; |
3581 |
++ pud++; |
3582 |
++ } |
3583 |
++ return vstart; |
3584 |
++} |
3585 |
++ |
3586 |
++static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend, |
3587 |
++ bool guard) |
3588 |
++{ |
3589 |
++ if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) |
3590 |
++ return true; |
3591 |
++ |
3592 |
++ return false; |
3593 |
++} |
3594 |
++ |
3595 |
++static unsigned long __ref kernel_map_hugepmd(unsigned long vstart, |
3596 |
++ unsigned long vend, |
3597 |
++ pmd_t *pmd) |
3598 |
++{ |
3599 |
++ const unsigned long mask256mb = (1UL << 28) - 1UL; |
3600 |
++ const unsigned long mask2gb = (1UL << 31) - 1UL; |
3601 |
++ u64 pte_val = vstart; |
3602 |
++ |
3603 |
++ /* Each PMD is 8MB */ |
3604 |
++ if ((vstart & mask256mb) || |
3605 |
++ (vend - vstart <= mask256mb)) { |
3606 |
++ pte_val ^= kern_linear_pte_xor[0]; |
3607 |
++ pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE; |
3608 |
++ |
3609 |
++ return vstart + PMD_SIZE; |
3610 |
++ } |
3611 |
++ |
3612 |
++ if ((vstart & mask2gb) || |
3613 |
++ (vend - vstart <= mask2gb)) { |
3614 |
++ pte_val ^= kern_linear_pte_xor[1]; |
3615 |
++ pte_val |= _PAGE_PMD_HUGE; |
3616 |
++ vend = vstart + mask256mb + 1UL; |
3617 |
++ } else { |
3618 |
++ pte_val ^= kern_linear_pte_xor[2]; |
3619 |
++ pte_val |= _PAGE_PMD_HUGE; |
3620 |
++ vend = vstart + mask2gb + 1UL; |
3621 |
++ } |
3622 |
++ |
3623 |
++ while (vstart < vend) { |
3624 |
++ pmd_val(*pmd) = pte_val; |
3625 |
++ |
3626 |
++ pte_val += PMD_SIZE; |
3627 |
++ vstart += PMD_SIZE; |
3628 |
++ pmd++; |
3629 |
++ } |
3630 |
++ |
3631 |
++ return vstart; |
3632 |
++} |
3633 |
++ |
3634 |
++static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend, |
3635 |
++ bool guard) |
3636 |
++{ |
3637 |
++ if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE) |
3638 |
++ return true; |
3639 |
++ |
3640 |
++ return false; |
3641 |
++} |
3642 |
++ |
3643 |
+ static unsigned long __ref kernel_map_range(unsigned long pstart, |
3644 |
+- unsigned long pend, pgprot_t prot) |
3645 |
++ unsigned long pend, pgprot_t prot, |
3646 |
++ bool use_huge) |
3647 |
+ { |
3648 |
+ unsigned long vstart = PAGE_OFFSET + pstart; |
3649 |
+ unsigned long vend = PAGE_OFFSET + pend; |
3650 |
+@@ -1387,19 +1521,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, |
3651 |
+ pmd_t *pmd; |
3652 |
+ pte_t *pte; |
3653 |
+ |
3654 |
++ if (pgd_none(*pgd)) { |
3655 |
++ pud_t *new; |
3656 |
++ |
3657 |
++ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
3658 |
++ alloc_bytes += PAGE_SIZE; |
3659 |
++ pgd_populate(&init_mm, pgd, new); |
3660 |
++ } |
3661 |
+ pud = pud_offset(pgd, vstart); |
3662 |
+ if (pud_none(*pud)) { |
3663 |
+ pmd_t *new; |
3664 |
+ |
3665 |
++ if (kernel_can_map_hugepud(vstart, vend, use_huge)) { |
3666 |
++ vstart = kernel_map_hugepud(vstart, vend, pud); |
3667 |
++ continue; |
3668 |
++ } |
3669 |
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
3670 |
+ alloc_bytes += PAGE_SIZE; |
3671 |
+ pud_populate(&init_mm, pud, new); |
3672 |
+ } |
3673 |
+ |
3674 |
+ pmd = pmd_offset(pud, vstart); |
3675 |
+- if (!pmd_present(*pmd)) { |
3676 |
++ if (pmd_none(*pmd)) { |
3677 |
+ pte_t *new; |
3678 |
+ |
3679 |
++ if (kernel_can_map_hugepmd(vstart, vend, use_huge)) { |
3680 |
++ vstart = kernel_map_hugepmd(vstart, vend, pmd); |
3681 |
++ continue; |
3682 |
++ } |
3683 |
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
3684 |
+ alloc_bytes += PAGE_SIZE; |
3685 |
+ pmd_populate_kernel(&init_mm, pmd, new); |
3686 |
+@@ -1422,100 +1571,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, |
3687 |
+ return alloc_bytes; |
3688 |
+ } |
3689 |
+ |
3690 |
+-extern unsigned int kvmap_linear_patch[1]; |
3691 |
+-#endif /* CONFIG_DEBUG_PAGEALLOC */ |
3692 |
+- |
3693 |
+-static void __init kpte_set_val(unsigned long index, unsigned long val) |
3694 |
++static void __init flush_all_kernel_tsbs(void) |
3695 |
+ { |
3696 |
+- unsigned long *ptr = kpte_linear_bitmap; |
3697 |
+- |
3698 |
+- val <<= ((index % (BITS_PER_LONG / 2)) * 2); |
3699 |
+- ptr += (index / (BITS_PER_LONG / 2)); |
3700 |
+- |
3701 |
+- *ptr |= val; |
3702 |
+-} |
3703 |
+- |
3704 |
+-static const unsigned long kpte_shift_min = 28; /* 256MB */ |
3705 |
+-static const unsigned long kpte_shift_max = 34; /* 16GB */ |
3706 |
+-static const unsigned long kpte_shift_incr = 3; |
3707 |
+- |
3708 |
+-static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end, |
3709 |
+- unsigned long shift) |
3710 |
+-{ |
3711 |
+- unsigned long size = (1UL << shift); |
3712 |
+- unsigned long mask = (size - 1UL); |
3713 |
+- unsigned long remains = end - start; |
3714 |
+- unsigned long val; |
3715 |
+- |
3716 |
+- if (remains < size || (start & mask)) |
3717 |
+- return start; |
3718 |
+- |
3719 |
+- /* VAL maps: |
3720 |
+- * |
3721 |
+- * shift 28 --> kern_linear_pte_xor index 1 |
3722 |
+- * shift 31 --> kern_linear_pte_xor index 2 |
3723 |
+- * shift 34 --> kern_linear_pte_xor index 3 |
3724 |
+- */ |
3725 |
+- val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1; |
3726 |
+- |
3727 |
+- remains &= ~mask; |
3728 |
+- if (shift != kpte_shift_max) |
3729 |
+- remains = size; |
3730 |
+- |
3731 |
+- while (remains) { |
3732 |
+- unsigned long index = start >> kpte_shift_min; |
3733 |
++ int i; |
3734 |
+ |
3735 |
+- kpte_set_val(index, val); |
3736 |
++ for (i = 0; i < KERNEL_TSB_NENTRIES; i++) { |
3737 |
++ struct tsb *ent = &swapper_tsb[i]; |
3738 |
+ |
3739 |
+- start += 1UL << kpte_shift_min; |
3740 |
+- remains -= 1UL << kpte_shift_min; |
3741 |
++ ent->tag = (1UL << TSB_TAG_INVALID_BIT); |
3742 |
+ } |
3743 |
++#ifndef CONFIG_DEBUG_PAGEALLOC |
3744 |
++ for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) { |
3745 |
++ struct tsb *ent = &swapper_4m_tsb[i]; |
3746 |
+ |
3747 |
+- return start; |
3748 |
+-} |
3749 |
+- |
3750 |
+-static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) |
3751 |
+-{ |
3752 |
+- unsigned long smallest_size, smallest_mask; |
3753 |
+- unsigned long s; |
3754 |
+- |
3755 |
+- smallest_size = (1UL << kpte_shift_min); |
3756 |
+- smallest_mask = (smallest_size - 1UL); |
3757 |
+- |
3758 |
+- while (start < end) { |
3759 |
+- unsigned long orig_start = start; |
3760 |
+- |
3761 |
+- for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) { |
3762 |
+- start = kpte_mark_using_shift(start, end, s); |
3763 |
+- |
3764 |
+- if (start != orig_start) |
3765 |
+- break; |
3766 |
+- } |
3767 |
+- |
3768 |
+- if (start == orig_start) |
3769 |
+- start = (start + smallest_size) & ~smallest_mask; |
3770 |
++ ent->tag = (1UL << TSB_TAG_INVALID_BIT); |
3771 |
+ } |
3772 |
++#endif |
3773 |
+ } |
3774 |
+ |
3775 |
+-static void __init init_kpte_bitmap(void) |
3776 |
+-{ |
3777 |
+- unsigned long i; |
3778 |
+- |
3779 |
+- for (i = 0; i < pall_ents; i++) { |
3780 |
+- unsigned long phys_start, phys_end; |
3781 |
+- |
3782 |
+- phys_start = pall[i].phys_addr; |
3783 |
+- phys_end = phys_start + pall[i].reg_size; |
3784 |
+- |
3785 |
+- mark_kpte_bitmap(phys_start, phys_end); |
3786 |
+- } |
3787 |
+-} |
3788 |
++extern unsigned int kvmap_linear_patch[1]; |
3789 |
+ |
3790 |
+ static void __init kernel_physical_mapping_init(void) |
3791 |
+ { |
3792 |
+-#ifdef CONFIG_DEBUG_PAGEALLOC |
3793 |
+ unsigned long i, mem_alloced = 0UL; |
3794 |
++ bool use_huge = true; |
3795 |
+ |
3796 |
++#ifdef CONFIG_DEBUG_PAGEALLOC |
3797 |
++ use_huge = false; |
3798 |
++#endif |
3799 |
+ for (i = 0; i < pall_ents; i++) { |
3800 |
+ unsigned long phys_start, phys_end; |
3801 |
+ |
3802 |
+@@ -1523,7 +1606,7 @@ static void __init kernel_physical_mapping_init(void) |
3803 |
+ phys_end = phys_start + pall[i].reg_size; |
3804 |
+ |
3805 |
+ mem_alloced += kernel_map_range(phys_start, phys_end, |
3806 |
+- PAGE_KERNEL); |
3807 |
++ PAGE_KERNEL, use_huge); |
3808 |
+ } |
3809 |
+ |
3810 |
+ printk("Allocated %ld bytes for kernel page tables.\n", |
3811 |
+@@ -1532,8 +1615,9 @@ static void __init kernel_physical_mapping_init(void) |
3812 |
+ kvmap_linear_patch[0] = 0x01000000; /* nop */ |
3813 |
+ flushi(&kvmap_linear_patch[0]); |
3814 |
+ |
3815 |
++ flush_all_kernel_tsbs(); |
3816 |
++ |
3817 |
+ __flush_tlb_all(); |
3818 |
+-#endif |
3819 |
+ } |
3820 |
+ |
3821 |
+ #ifdef CONFIG_DEBUG_PAGEALLOC |
3822 |
+@@ -1543,7 +1627,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) |
3823 |
+ unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); |
3824 |
+ |
3825 |
+ kernel_map_range(phys_start, phys_end, |
3826 |
+- (enable ? PAGE_KERNEL : __pgprot(0))); |
3827 |
++ (enable ? PAGE_KERNEL : __pgprot(0)), false); |
3828 |
+ |
3829 |
+ flush_tsb_kernel_range(PAGE_OFFSET + phys_start, |
3830 |
+ PAGE_OFFSET + phys_end); |
3831 |
+@@ -1571,76 +1655,56 @@ unsigned long __init find_ecache_flush_span(unsigned long size) |
3832 |
+ unsigned long PAGE_OFFSET; |
3833 |
+ EXPORT_SYMBOL(PAGE_OFFSET); |
3834 |
+ |
3835 |
+-static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits) |
3836 |
+-{ |
3837 |
+- unsigned long final_shift; |
3838 |
+- unsigned int val = *insn; |
3839 |
+- unsigned int cnt; |
3840 |
+- |
3841 |
+- /* We are patching in ilog2(max_supported_phys_address), and |
3842 |
+- * we are doing so in a manner similar to a relocation addend. |
3843 |
+- * That is, we are adding the shift value to whatever value |
3844 |
+- * is in the shift instruction count field already. |
3845 |
+- */ |
3846 |
+- cnt = (val & 0x3f); |
3847 |
+- val &= ~0x3f; |
3848 |
+- |
3849 |
+- /* If we are trying to shift >= 64 bits, clear the destination |
3850 |
+- * register. This can happen when phys_bits ends up being equal |
3851 |
+- * to MAX_PHYS_ADDRESS_BITS. |
3852 |
+- */ |
3853 |
+- final_shift = (cnt + (64 - phys_bits)); |
3854 |
+- if (final_shift >= 64) { |
3855 |
+- unsigned int rd = (val >> 25) & 0x1f; |
3856 |
+- |
3857 |
+- val = 0x80100000 | (rd << 25); |
3858 |
+- } else { |
3859 |
+- val |= final_shift; |
3860 |
+- } |
3861 |
+- *insn = val; |
3862 |
+- |
3863 |
+- __asm__ __volatile__("flush %0" |
3864 |
+- : /* no outputs */ |
3865 |
+- : "r" (insn)); |
3866 |
+-} |
3867 |
+- |
3868 |
+-static void __init page_offset_shift_patch(unsigned long phys_bits) |
3869 |
+-{ |
3870 |
+- extern unsigned int __page_offset_shift_patch; |
3871 |
+- extern unsigned int __page_offset_shift_patch_end; |
3872 |
+- unsigned int *p; |
3873 |
+- |
3874 |
+- p = &__page_offset_shift_patch; |
3875 |
+- while (p < &__page_offset_shift_patch_end) { |
3876 |
+- unsigned int *insn = (unsigned int *)(unsigned long)*p; |
3877 |
++unsigned long VMALLOC_END = 0x0000010000000000UL; |
3878 |
++EXPORT_SYMBOL(VMALLOC_END); |
3879 |
+ |
3880 |
+- page_offset_shift_patch_one(insn, phys_bits); |
3881 |
+- |
3882 |
+- p++; |
3883 |
+- } |
3884 |
+-} |
3885 |
++unsigned long sparc64_va_hole_top = 0xfffff80000000000UL; |
3886 |
++unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL; |
3887 |
+ |
3888 |
+ static void __init setup_page_offset(void) |
3889 |
+ { |
3890 |
+- unsigned long max_phys_bits = 40; |
3891 |
+- |
3892 |
+ if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
3893 |
++ /* Cheetah/Panther support a full 64-bit virtual |
3894 |
++ * address, so we can use all that our page tables |
3895 |
++ * support. |
3896 |
++ */ |
3897 |
++ sparc64_va_hole_top = 0xfff0000000000000UL; |
3898 |
++ sparc64_va_hole_bottom = 0x0010000000000000UL; |
3899 |
++ |
3900 |
+ max_phys_bits = 42; |
3901 |
+ } else if (tlb_type == hypervisor) { |
3902 |
+ switch (sun4v_chip_type) { |
3903 |
+ case SUN4V_CHIP_NIAGARA1: |
3904 |
+ case SUN4V_CHIP_NIAGARA2: |
3905 |
++ /* T1 and T2 support 48-bit virtual addresses. */ |
3906 |
++ sparc64_va_hole_top = 0xffff800000000000UL; |
3907 |
++ sparc64_va_hole_bottom = 0x0000800000000000UL; |
3908 |
++ |
3909 |
+ max_phys_bits = 39; |
3910 |
+ break; |
3911 |
+ case SUN4V_CHIP_NIAGARA3: |
3912 |
++ /* T3 supports 48-bit virtual addresses. */ |
3913 |
++ sparc64_va_hole_top = 0xffff800000000000UL; |
3914 |
++ sparc64_va_hole_bottom = 0x0000800000000000UL; |
3915 |
++ |
3916 |
+ max_phys_bits = 43; |
3917 |
+ break; |
3918 |
+ case SUN4V_CHIP_NIAGARA4: |
3919 |
+ case SUN4V_CHIP_NIAGARA5: |
3920 |
+ case SUN4V_CHIP_SPARC64X: |
3921 |
+- default: |
3922 |
++ case SUN4V_CHIP_SPARC_M6: |
3923 |
++ /* T4 and later support 52-bit virtual addresses. */ |
3924 |
++ sparc64_va_hole_top = 0xfff8000000000000UL; |
3925 |
++ sparc64_va_hole_bottom = 0x0008000000000000UL; |
3926 |
+ max_phys_bits = 47; |
3927 |
+ break; |
3928 |
++ case SUN4V_CHIP_SPARC_M7: |
3929 |
++ default: |
3930 |
++ /* M7 and later support 52-bit virtual addresses. */ |
3931 |
++ sparc64_va_hole_top = 0xfff8000000000000UL; |
3932 |
++ sparc64_va_hole_bottom = 0x0008000000000000UL; |
3933 |
++ max_phys_bits = 49; |
3934 |
++ break; |
3935 |
+ } |
3936 |
+ } |
3937 |
+ |
3938 |
+@@ -1650,12 +1714,16 @@ static void __init setup_page_offset(void) |
3939 |
+ prom_halt(); |
3940 |
+ } |
3941 |
+ |
3942 |
+- PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits); |
3943 |
++ PAGE_OFFSET = sparc64_va_hole_top; |
3944 |
++ VMALLOC_END = ((sparc64_va_hole_bottom >> 1) + |
3945 |
++ (sparc64_va_hole_bottom >> 2)); |
3946 |
+ |
3947 |
+- pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", |
3948 |
++ pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", |
3949 |
+ PAGE_OFFSET, max_phys_bits); |
3950 |
+- |
3951 |
+- page_offset_shift_patch(max_phys_bits); |
3952 |
++ pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n", |
3953 |
++ VMALLOC_START, VMALLOC_END); |
3954 |
++ pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n", |
3955 |
++ VMEMMAP_BASE, VMEMMAP_BASE << 1); |
3956 |
+ } |
3957 |
+ |
3958 |
+ static void __init tsb_phys_patch(void) |
3959 |
+@@ -1700,21 +1768,42 @@ static void __init tsb_phys_patch(void) |
3960 |
+ #define NUM_KTSB_DESCR 1 |
3961 |
+ #endif |
3962 |
+ static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; |
3963 |
+-extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
3964 |
++ |
3965 |
++/* The swapper TSBs are loaded with a base sequence of: |
3966 |
++ * |
3967 |
++ * sethi %uhi(SYMBOL), REG1 |
3968 |
++ * sethi %hi(SYMBOL), REG2 |
3969 |
++ * or REG1, %ulo(SYMBOL), REG1 |
3970 |
++ * or REG2, %lo(SYMBOL), REG2 |
3971 |
++ * sllx REG1, 32, REG1 |
3972 |
++ * or REG1, REG2, REG1 |
3973 |
++ * |
3974 |
++ * When we use physical addressing for the TSB accesses, we patch the |
3975 |
++ * first four instructions in the above sequence. |
3976 |
++ */ |
3977 |
+ |
3978 |
+ static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) |
3979 |
+ { |
3980 |
+- pa >>= KTSB_PHYS_SHIFT; |
3981 |
++ unsigned long high_bits, low_bits; |
3982 |
++ |
3983 |
++ high_bits = (pa >> 32) & 0xffffffff; |
3984 |
++ low_bits = (pa >> 0) & 0xffffffff; |
3985 |
+ |
3986 |
+ while (start < end) { |
3987 |
+ unsigned int *ia = (unsigned int *)(unsigned long)*start; |
3988 |
+ |
3989 |
+- ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10); |
3990 |
++ ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10); |
3991 |
+ __asm__ __volatile__("flush %0" : : "r" (ia)); |
3992 |
+ |
3993 |
+- ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff); |
3994 |
++ ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10); |
3995 |
+ __asm__ __volatile__("flush %0" : : "r" (ia + 1)); |
3996 |
+ |
3997 |
++ ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff); |
3998 |
++ __asm__ __volatile__("flush %0" : : "r" (ia + 2)); |
3999 |
++ |
4000 |
++ ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff); |
4001 |
++ __asm__ __volatile__("flush %0" : : "r" (ia + 3)); |
4002 |
++ |
4003 |
+ start++; |
4004 |
+ } |
4005 |
+ } |
4006 |
+@@ -1853,7 +1942,6 @@ static void __init sun4v_linear_pte_xor_finalize(void) |
4007 |
+ /* paging_init() sets up the page tables */ |
4008 |
+ |
4009 |
+ static unsigned long last_valid_pfn; |
4010 |
+-pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
4011 |
+ |
4012 |
+ static void sun4u_pgprot_init(void); |
4013 |
+ static void sun4v_pgprot_init(void); |
4014 |
+@@ -1956,16 +2044,10 @@ void __init paging_init(void) |
4015 |
+ */ |
4016 |
+ init_mm.pgd += ((shift) / (sizeof(pgd_t))); |
4017 |
+ |
4018 |
+- memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); |
4019 |
++ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); |
4020 |
+ |
4021 |
+- /* Now can init the kernel/bad page tables. */ |
4022 |
+- pud_set(pud_offset(&swapper_pg_dir[0], 0), |
4023 |
+- swapper_low_pmd_dir + (shift / sizeof(pgd_t))); |
4024 |
+- |
4025 |
+ inherit_prom_mappings(); |
4026 |
+ |
4027 |
+- init_kpte_bitmap(); |
4028 |
+- |
4029 |
+ /* Ok, we can use our TLB miss and window trap handlers safely. */ |
4030 |
+ setup_tba(); |
4031 |
+ |
4032 |
+@@ -2072,70 +2154,6 @@ int page_in_phys_avail(unsigned long paddr) |
4033 |
+ return 0; |
4034 |
+ } |
4035 |
+ |
4036 |
+-static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; |
4037 |
+-static int pavail_rescan_ents __initdata; |
4038 |
+- |
4039 |
+-/* Certain OBP calls, such as fetching "available" properties, can |
4040 |
+- * claim physical memory. So, along with initializing the valid |
4041 |
+- * address bitmap, what we do here is refetch the physical available |
4042 |
+- * memory list again, and make sure it provides at least as much |
4043 |
+- * memory as 'pavail' does. |
4044 |
+- */ |
4045 |
+-static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) |
4046 |
+-{ |
4047 |
+- int i; |
4048 |
+- |
4049 |
+- read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); |
4050 |
+- |
4051 |
+- for (i = 0; i < pavail_ents; i++) { |
4052 |
+- unsigned long old_start, old_end; |
4053 |
+- |
4054 |
+- old_start = pavail[i].phys_addr; |
4055 |
+- old_end = old_start + pavail[i].reg_size; |
4056 |
+- while (old_start < old_end) { |
4057 |
+- int n; |
4058 |
+- |
4059 |
+- for (n = 0; n < pavail_rescan_ents; n++) { |
4060 |
+- unsigned long new_start, new_end; |
4061 |
+- |
4062 |
+- new_start = pavail_rescan[n].phys_addr; |
4063 |
+- new_end = new_start + |
4064 |
+- pavail_rescan[n].reg_size; |
4065 |
+- |
4066 |
+- if (new_start <= old_start && |
4067 |
+- new_end >= (old_start + PAGE_SIZE)) { |
4068 |
+- set_bit(old_start >> ILOG2_4MB, bitmap); |
4069 |
+- goto do_next_page; |
4070 |
+- } |
4071 |
+- } |
4072 |
+- |
4073 |
+- prom_printf("mem_init: Lost memory in pavail\n"); |
4074 |
+- prom_printf("mem_init: OLD start[%lx] size[%lx]\n", |
4075 |
+- pavail[i].phys_addr, |
4076 |
+- pavail[i].reg_size); |
4077 |
+- prom_printf("mem_init: NEW start[%lx] size[%lx]\n", |
4078 |
+- pavail_rescan[i].phys_addr, |
4079 |
+- pavail_rescan[i].reg_size); |
4080 |
+- prom_printf("mem_init: Cannot continue, aborting.\n"); |
4081 |
+- prom_halt(); |
4082 |
+- |
4083 |
+- do_next_page: |
4084 |
+- old_start += PAGE_SIZE; |
4085 |
+- } |
4086 |
+- } |
4087 |
+-} |
4088 |
+- |
4089 |
+-static void __init patch_tlb_miss_handler_bitmap(void) |
4090 |
+-{ |
4091 |
+- extern unsigned int valid_addr_bitmap_insn[]; |
4092 |
+- extern unsigned int valid_addr_bitmap_patch[]; |
4093 |
+- |
4094 |
+- valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1]; |
4095 |
+- mb(); |
4096 |
+- valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0]; |
4097 |
+- flushi(&valid_addr_bitmap_insn[0]); |
4098 |
+-} |
4099 |
+- |
4100 |
+ static void __init register_page_bootmem_info(void) |
4101 |
+ { |
4102 |
+ #ifdef CONFIG_NEED_MULTIPLE_NODES |
4103 |
+@@ -2148,18 +2166,6 @@ static void __init register_page_bootmem_info(void) |
4104 |
+ } |
4105 |
+ void __init mem_init(void) |
4106 |
+ { |
4107 |
+- unsigned long addr, last; |
4108 |
+- |
4109 |
+- addr = PAGE_OFFSET + kern_base; |
4110 |
+- last = PAGE_ALIGN(kern_size) + addr; |
4111 |
+- while (addr < last) { |
4112 |
+- set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap); |
4113 |
+- addr += PAGE_SIZE; |
4114 |
+- } |
4115 |
+- |
4116 |
+- setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap); |
4117 |
+- patch_tlb_miss_handler_bitmap(); |
4118 |
+- |
4119 |
+ high_memory = __va(last_valid_pfn << PAGE_SHIFT); |
4120 |
+ |
4121 |
+ register_page_bootmem_info(); |
4122 |
+@@ -2249,18 +2255,9 @@ unsigned long _PAGE_CACHE __read_mostly; |
4123 |
+ EXPORT_SYMBOL(_PAGE_CACHE); |
4124 |
+ |
4125 |
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP |
4126 |
+-unsigned long vmemmap_table[VMEMMAP_SIZE]; |
4127 |
+- |
4128 |
+-static long __meminitdata addr_start, addr_end; |
4129 |
+-static int __meminitdata node_start; |
4130 |
+- |
4131 |
+ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, |
4132 |
+ int node) |
4133 |
+ { |
4134 |
+- unsigned long phys_start = (vstart - VMEMMAP_BASE); |
4135 |
+- unsigned long phys_end = (vend - VMEMMAP_BASE); |
4136 |
+- unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; |
4137 |
+- unsigned long end = VMEMMAP_ALIGN(phys_end); |
4138 |
+ unsigned long pte_base; |
4139 |
+ |
4140 |
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | |
4141 |
+@@ -2271,47 +2268,52 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, |
4142 |
+ _PAGE_CP_4V | _PAGE_CV_4V | |
4143 |
+ _PAGE_P_4V | _PAGE_W_4V); |
4144 |
+ |
4145 |
+- for (; addr < end; addr += VMEMMAP_CHUNK) { |
4146 |
+- unsigned long *vmem_pp = |
4147 |
+- vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT); |
4148 |
+- void *block; |
4149 |
++ pte_base |= _PAGE_PMD_HUGE; |
4150 |
+ |
4151 |
+- if (!(*vmem_pp & _PAGE_VALID)) { |
4152 |
+- block = vmemmap_alloc_block(1UL << ILOG2_4MB, node); |
4153 |
+- if (!block) |
4154 |
++ vstart = vstart & PMD_MASK; |
4155 |
++ vend = ALIGN(vend, PMD_SIZE); |
4156 |
++ for (; vstart < vend; vstart += PMD_SIZE) { |
4157 |
++ pgd_t *pgd = pgd_offset_k(vstart); |
4158 |
++ unsigned long pte; |
4159 |
++ pud_t *pud; |
4160 |
++ pmd_t *pmd; |
4161 |
++ |
4162 |
++ if (pgd_none(*pgd)) { |
4163 |
++ pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node); |
4164 |
++ |
4165 |
++ if (!new) |
4166 |
+ return -ENOMEM; |
4167 |
++ pgd_populate(&init_mm, pgd, new); |
4168 |
++ } |
4169 |
+ |
4170 |
+- *vmem_pp = pte_base | __pa(block); |
4171 |
++ pud = pud_offset(pgd, vstart); |
4172 |
++ if (pud_none(*pud)) { |
4173 |
++ pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node); |
4174 |
+ |
4175 |
+- /* check to see if we have contiguous blocks */ |
4176 |
+- if (addr_end != addr || node_start != node) { |
4177 |
+- if (addr_start) |
4178 |
+- printk(KERN_DEBUG " [%lx-%lx] on node %d\n", |
4179 |
+- addr_start, addr_end-1, node_start); |
4180 |
+- addr_start = addr; |
4181 |
+- node_start = node; |
4182 |
+- } |
4183 |
+- addr_end = addr + VMEMMAP_CHUNK; |
4184 |
++ if (!new) |
4185 |
++ return -ENOMEM; |
4186 |
++ pud_populate(&init_mm, pud, new); |
4187 |
+ } |
4188 |
+- } |
4189 |
+- return 0; |
4190 |
+-} |
4191 |
+ |
4192 |
+-void __meminit vmemmap_populate_print_last(void) |
4193 |
+-{ |
4194 |
+- if (addr_start) { |
4195 |
+- printk(KERN_DEBUG " [%lx-%lx] on node %d\n", |
4196 |
+- addr_start, addr_end-1, node_start); |
4197 |
+- addr_start = 0; |
4198 |
+- addr_end = 0; |
4199 |
+- node_start = 0; |
4200 |
++ pmd = pmd_offset(pud, vstart); |
4201 |
++ |
4202 |
++ pte = pmd_val(*pmd); |
4203 |
++ if (!(pte & _PAGE_VALID)) { |
4204 |
++ void *block = vmemmap_alloc_block(PMD_SIZE, node); |
4205 |
++ |
4206 |
++ if (!block) |
4207 |
++ return -ENOMEM; |
4208 |
++ |
4209 |
++ pmd_val(*pmd) = pte_base | __pa(block); |
4210 |
++ } |
4211 |
+ } |
4212 |
++ |
4213 |
++ return 0; |
4214 |
+ } |
4215 |
+ |
4216 |
+ void vmemmap_free(unsigned long start, unsigned long end) |
4217 |
+ { |
4218 |
+ } |
4219 |
+- |
4220 |
+ #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
4221 |
+ |
4222 |
+ static void prot_init_common(unsigned long page_none, |
4223 |
+@@ -2787,8 +2789,8 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
4224 |
+ do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); |
4225 |
+ } |
4226 |
+ if (end > HI_OBP_ADDRESS) { |
4227 |
+- flush_tsb_kernel_range(end, HI_OBP_ADDRESS); |
4228 |
+- do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS); |
4229 |
++ flush_tsb_kernel_range(HI_OBP_ADDRESS, end); |
4230 |
++ do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end); |
4231 |
+ } |
4232 |
+ } else { |
4233 |
+ flush_tsb_kernel_range(start, end); |
4234 |
+diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h |
4235 |
+index 0668b364f44d..a4c09603b05c 100644 |
4236 |
+--- a/arch/sparc/mm/init_64.h |
4237 |
++++ b/arch/sparc/mm/init_64.h |
4238 |
+@@ -8,15 +8,8 @@ |
4239 |
+ */ |
4240 |
+ |
4241 |
+ #define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS) |
4242 |
+-#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) |
4243 |
+-#define KPTE_BITMAP_BYTES \ |
4244 |
+- ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4) |
4245 |
+-#define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL) |
4246 |
+-#define VALID_ADDR_BITMAP_BYTES \ |
4247 |
+- ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8) |
4248 |
+ |
4249 |
+ extern unsigned long kern_linear_pte_xor[4]; |
4250 |
+-extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; |
4251 |
+ extern unsigned int sparc64_highest_unlocked_tlb_ent; |
4252 |
+ extern unsigned long sparc64_kern_pri_context; |
4253 |
+ extern unsigned long sparc64_kern_pri_nuc_bits; |
4254 |
+@@ -38,15 +31,4 @@ extern unsigned long kern_locked_tte_data; |
4255 |
+ |
4256 |
+ void prom_world(int enter); |
4257 |
+ |
4258 |
+-#ifdef CONFIG_SPARSEMEM_VMEMMAP |
4259 |
+-#define VMEMMAP_CHUNK_SHIFT 22 |
4260 |
+-#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT) |
4261 |
+-#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL) |
4262 |
+-#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK) |
4263 |
+- |
4264 |
+-#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \ |
4265 |
+- sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT) |
4266 |
+-extern unsigned long vmemmap_table[VMEMMAP_SIZE]; |
4267 |
+-#endif |
4268 |
+- |
4269 |
+ #endif /* _SPARC64_MM_INIT_H */ |
4270 |
+diff --git a/arch/sparc/power/hibernate_asm.S b/arch/sparc/power/hibernate_asm.S |
4271 |
+index 79942166df84..d7d9017dcb15 100644 |
4272 |
+--- a/arch/sparc/power/hibernate_asm.S |
4273 |
++++ b/arch/sparc/power/hibernate_asm.S |
4274 |
+@@ -54,8 +54,8 @@ ENTRY(swsusp_arch_resume) |
4275 |
+ nop |
4276 |
+ |
4277 |
+ /* Write PAGE_OFFSET to %g7 */ |
4278 |
+- sethi %uhi(PAGE_OFFSET), %g7 |
4279 |
+- sllx %g7, 32, %g7 |
4280 |
++ sethi %hi(PAGE_OFFSET), %g7 |
4281 |
++ ldx [%g7 + %lo(PAGE_OFFSET)], %g7 |
4282 |
+ |
4283 |
+ setuw (PAGE_SIZE-8), %g3 |
4284 |
+ |
4285 |
+diff --git a/arch/sparc/prom/bootstr_64.c b/arch/sparc/prom/bootstr_64.c |
4286 |
+index ab9ccc63b388..7149e77714a4 100644 |
4287 |
+--- a/arch/sparc/prom/bootstr_64.c |
4288 |
++++ b/arch/sparc/prom/bootstr_64.c |
4289 |
+@@ -14,7 +14,10 @@ |
4290 |
+ * the .bss section or it will break things. |
4291 |
+ */ |
4292 |
+ |
4293 |
+-#define BARG_LEN 256 |
4294 |
++/* We limit BARG_LEN to 1024 because this is the size of the |
4295 |
++ * 'barg_out' command line buffer in the SILO bootloader. |
4296 |
++ */ |
4297 |
++#define BARG_LEN 1024 |
4298 |
+ struct { |
4299 |
+ int bootstr_len; |
4300 |
+ int bootstr_valid; |
4301 |
+diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S |
4302 |
+index 9c86b4b7d429..8050f381f518 100644 |
4303 |
+--- a/arch/sparc/prom/cif.S |
4304 |
++++ b/arch/sparc/prom/cif.S |
4305 |
+@@ -11,11 +11,10 @@ |
4306 |
+ .text |
4307 |
+ .globl prom_cif_direct |
4308 |
+ prom_cif_direct: |
4309 |
++ save %sp, -192, %sp |
4310 |
+ sethi %hi(p1275buf), %o1 |
4311 |
+ or %o1, %lo(p1275buf), %o1 |
4312 |
+- ldx [%o1 + 0x0010], %o2 ! prom_cif_stack |
4313 |
+- save %o2, -192, %sp |
4314 |
+- ldx [%i1 + 0x0008], %l2 ! prom_cif_handler |
4315 |
++ ldx [%o1 + 0x0008], %l2 ! prom_cif_handler |
4316 |
+ mov %g4, %l0 |
4317 |
+ mov %g5, %l1 |
4318 |
+ mov %g6, %l3 |
4319 |
+diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c |
4320 |
+index d95db755828f..110b0d78b864 100644 |
4321 |
+--- a/arch/sparc/prom/init_64.c |
4322 |
++++ b/arch/sparc/prom/init_64.c |
4323 |
+@@ -26,13 +26,13 @@ phandle prom_chosen_node; |
4324 |
+ * It gets passed the pointer to the PROM vector. |
4325 |
+ */ |
4326 |
+ |
4327 |
+-extern void prom_cif_init(void *, void *); |
4328 |
++extern void prom_cif_init(void *); |
4329 |
+ |
4330 |
+-void __init prom_init(void *cif_handler, void *cif_stack) |
4331 |
++void __init prom_init(void *cif_handler) |
4332 |
+ { |
4333 |
+ phandle node; |
4334 |
+ |
4335 |
+- prom_cif_init(cif_handler, cif_stack); |
4336 |
++ prom_cif_init(cif_handler); |
4337 |
+ |
4338 |
+ prom_chosen_node = prom_finddevice(prom_chosen_path); |
4339 |
+ if (!prom_chosen_node || (s32)prom_chosen_node == -1) |
4340 |
+diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c |
4341 |
+index e58b81726319..545d8bb79b65 100644 |
4342 |
+--- a/arch/sparc/prom/p1275.c |
4343 |
++++ b/arch/sparc/prom/p1275.c |
4344 |
+@@ -9,6 +9,7 @@ |
4345 |
+ #include <linux/smp.h> |
4346 |
+ #include <linux/string.h> |
4347 |
+ #include <linux/spinlock.h> |
4348 |
++#include <linux/irqflags.h> |
4349 |
+ |
4350 |
+ #include <asm/openprom.h> |
4351 |
+ #include <asm/oplib.h> |
4352 |
+@@ -19,7 +20,6 @@ |
4353 |
+ struct { |
4354 |
+ long prom_callback; /* 0x00 */ |
4355 |
+ void (*prom_cif_handler)(long *); /* 0x08 */ |
4356 |
+- unsigned long prom_cif_stack; /* 0x10 */ |
4357 |
+ } p1275buf; |
4358 |
+ |
4359 |
+ extern void prom_world(int); |
4360 |
+@@ -36,8 +36,8 @@ void p1275_cmd_direct(unsigned long *args) |
4361 |
+ { |
4362 |
+ unsigned long flags; |
4363 |
+ |
4364 |
+- raw_local_save_flags(flags); |
4365 |
+- raw_local_irq_restore((unsigned long)PIL_NMI); |
4366 |
++ local_save_flags(flags); |
4367 |
++ local_irq_restore((unsigned long)PIL_NMI); |
4368 |
+ raw_spin_lock(&prom_entry_lock); |
4369 |
+ |
4370 |
+ prom_world(1); |
4371 |
+@@ -45,11 +45,10 @@ void p1275_cmd_direct(unsigned long *args) |
4372 |
+ prom_world(0); |
4373 |
+ |
4374 |
+ raw_spin_unlock(&prom_entry_lock); |
4375 |
+- raw_local_irq_restore(flags); |
4376 |
++ local_irq_restore(flags); |
4377 |
+ } |
4378 |
+ |
4379 |
+ void prom_cif_init(void *cif_handler, void *cif_stack) |
4380 |
+ { |
4381 |
+ p1275buf.prom_cif_handler = (void (*)(long *))cif_handler; |
4382 |
+- p1275buf.prom_cif_stack = (unsigned long)cif_stack; |
4383 |
+ } |
4384 |
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h |
4385 |
+index 7c492ed9087b..92d3486a6196 100644 |
4386 |
+--- a/arch/x86/include/asm/kvm_host.h |
4387 |
++++ b/arch/x86/include/asm/kvm_host.h |
4388 |
+@@ -481,6 +481,7 @@ struct kvm_vcpu_arch { |
4389 |
+ u64 mmio_gva; |
4390 |
+ unsigned access; |
4391 |
+ gfn_t mmio_gfn; |
4392 |
++ u64 mmio_gen; |
4393 |
+ |
4394 |
+ struct kvm_pmu pmu; |
4395 |
+ |
4396 |
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c |
4397 |
+index 74e804ddc5c7..50ce7519ccef 100644 |
4398 |
+--- a/arch/x86/kernel/cpu/intel.c |
4399 |
++++ b/arch/x86/kernel/cpu/intel.c |
4400 |
+@@ -144,6 +144,21 @@ static void early_init_intel(struct cpuinfo_x86 *c) |
4401 |
+ setup_clear_cpu_cap(X86_FEATURE_ERMS); |
4402 |
+ } |
4403 |
+ } |
4404 |
++ |
4405 |
++ /* |
4406 |
++ * Intel Quark Core DevMan_001.pdf section 6.4.11 |
4407 |
++ * "The operating system also is required to invalidate (i.e., flush) |
4408 |
++ * the TLB when any changes are made to any of the page table entries. |
4409 |
++ * The operating system must reload CR3 to cause the TLB to be flushed" |
4410 |
++ * |
4411 |
++ * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should |
4412 |
++ * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE |
4413 |
++ * to be modified |
4414 |
++ */ |
4415 |
++ if (c->x86 == 5 && c->x86_model == 9) { |
4416 |
++ pr_info("Disabling PGE capability bit\n"); |
4417 |
++ setup_clear_cpu_cap(X86_FEATURE_PGE); |
4418 |
++ } |
4419 |
+ } |
4420 |
+ |
4421 |
+ #ifdef CONFIG_X86_32 |
4422 |
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
4423 |
+index 931467881da7..1cd2a5fbde07 100644 |
4424 |
+--- a/arch/x86/kvm/mmu.c |
4425 |
++++ b/arch/x86/kvm/mmu.c |
4426 |
+@@ -199,16 +199,20 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) |
4427 |
+ EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); |
4428 |
+ |
4429 |
+ /* |
4430 |
+- * spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number, |
4431 |
+- * the bits of bits 52 ~ bit 61 are used as high 10 bits of generation |
4432 |
+- * number. |
4433 |
++ * the low bit of the generation number is always presumed to be zero. |
4434 |
++ * This disables mmio caching during memslot updates. The concept is |
4435 |
++ * similar to a seqcount but instead of retrying the access we just punt |
4436 |
++ * and ignore the cache. |
4437 |
++ * |
4438 |
++ * spte bits 3-11 are used as bits 1-9 of the generation number, |
4439 |
++ * the bits 52-61 are used as bits 10-19 of the generation number. |
4440 |
+ */ |
4441 |
+-#define MMIO_SPTE_GEN_LOW_SHIFT 3 |
4442 |
++#define MMIO_SPTE_GEN_LOW_SHIFT 2 |
4443 |
+ #define MMIO_SPTE_GEN_HIGH_SHIFT 52 |
4444 |
+ |
4445 |
+-#define MMIO_GEN_SHIFT 19 |
4446 |
+-#define MMIO_GEN_LOW_SHIFT 9 |
4447 |
+-#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 1) |
4448 |
++#define MMIO_GEN_SHIFT 20 |
4449 |
++#define MMIO_GEN_LOW_SHIFT 10 |
4450 |
++#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2) |
4451 |
+ #define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1) |
4452 |
+ #define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1) |
4453 |
+ |
4454 |
+@@ -236,12 +240,7 @@ static unsigned int get_mmio_spte_generation(u64 spte) |
4455 |
+ |
4456 |
+ static unsigned int kvm_current_mmio_generation(struct kvm *kvm) |
4457 |
+ { |
4458 |
+- /* |
4459 |
+- * Init kvm generation close to MMIO_MAX_GEN to easily test the |
4460 |
+- * code of handling generation number wrap-around. |
4461 |
+- */ |
4462 |
+- return (kvm_memslots(kvm)->generation + |
4463 |
+- MMIO_MAX_GEN - 150) & MMIO_GEN_MASK; |
4464 |
++ return kvm_memslots(kvm)->generation & MMIO_GEN_MASK; |
4465 |
+ } |
4466 |
+ |
4467 |
+ static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn, |
4468 |
+@@ -3163,7 +3162,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) |
4469 |
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
4470 |
+ return; |
4471 |
+ |
4472 |
+- vcpu_clear_mmio_info(vcpu, ~0ul); |
4473 |
++ vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); |
4474 |
+ kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); |
4475 |
+ if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { |
4476 |
+ hpa_t root = vcpu->arch.mmu.root_hpa; |
4477 |
+@@ -4433,7 +4432,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) |
4478 |
+ * The very rare case: if the generation-number is round, |
4479 |
+ * zap all shadow pages. |
4480 |
+ */ |
4481 |
+- if (unlikely(kvm_current_mmio_generation(kvm) >= MMIO_MAX_GEN)) { |
4482 |
++ if (unlikely(kvm_current_mmio_generation(kvm) == 0)) { |
4483 |
+ printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n"); |
4484 |
+ kvm_mmu_invalidate_zap_all_pages(kvm); |
4485 |
+ } |
4486 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
4487 |
+index bfe11cf124a1..6a118fa378b5 100644 |
4488 |
+--- a/arch/x86/kvm/vmx.c |
4489 |
++++ b/arch/x86/kvm/vmx.c |
4490 |
+@@ -453,6 +453,7 @@ struct vcpu_vmx { |
4491 |
+ int gs_ldt_reload_needed; |
4492 |
+ int fs_reload_needed; |
4493 |
+ u64 msr_host_bndcfgs; |
4494 |
++ unsigned long vmcs_host_cr4; /* May not match real cr4 */ |
4495 |
+ } host_state; |
4496 |
+ struct { |
4497 |
+ int vm86_active; |
4498 |
+@@ -4235,11 +4236,16 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) |
4499 |
+ u32 low32, high32; |
4500 |
+ unsigned long tmpl; |
4501 |
+ struct desc_ptr dt; |
4502 |
++ unsigned long cr4; |
4503 |
+ |
4504 |
+ vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ |
4505 |
+- vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ |
4506 |
+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ |
4507 |
+ |
4508 |
++ /* Save the most likely value for this task's CR4 in the VMCS. */ |
4509 |
++ cr4 = read_cr4(); |
4510 |
++ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ |
4511 |
++ vmx->host_state.vmcs_host_cr4 = cr4; |
4512 |
++ |
4513 |
+ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ |
4514 |
+ #ifdef CONFIG_X86_64 |
4515 |
+ /* |
4516 |
+@@ -7376,7 +7382,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) |
4517 |
+ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
4518 |
+ { |
4519 |
+ struct vcpu_vmx *vmx = to_vmx(vcpu); |
4520 |
+- unsigned long debugctlmsr; |
4521 |
++ unsigned long debugctlmsr, cr4; |
4522 |
+ |
4523 |
+ /* Record the guest's net vcpu time for enforced NMI injections. */ |
4524 |
+ if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) |
4525 |
+@@ -7397,6 +7403,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
4526 |
+ if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) |
4527 |
+ vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); |
4528 |
+ |
4529 |
++ cr4 = read_cr4(); |
4530 |
++ if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { |
4531 |
++ vmcs_writel(HOST_CR4, cr4); |
4532 |
++ vmx->host_state.vmcs_host_cr4 = cr4; |
4533 |
++ } |
4534 |
++ |
4535 |
+ /* When single-stepping over STI and MOV SS, we must clear the |
4536 |
+ * corresponding interruptibility bits in the guest state. Otherwise |
4537 |
+ * vmentry fails as it then expects bit 14 (BS) in pending debug |
4538 |
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h |
4539 |
+index 306a1b77581f..985fb2c006fa 100644 |
4540 |
+--- a/arch/x86/kvm/x86.h |
4541 |
++++ b/arch/x86/kvm/x86.h |
4542 |
+@@ -88,15 +88,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, |
4543 |
+ vcpu->arch.mmio_gva = gva & PAGE_MASK; |
4544 |
+ vcpu->arch.access = access; |
4545 |
+ vcpu->arch.mmio_gfn = gfn; |
4546 |
++ vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; |
4547 |
++} |
4548 |
++ |
4549 |
++static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) |
4550 |
++{ |
4551 |
++ return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; |
4552 |
+ } |
4553 |
+ |
4554 |
+ /* |
4555 |
+- * Clear the mmio cache info for the given gva, |
4556 |
+- * specially, if gva is ~0ul, we clear all mmio cache info. |
4557 |
++ * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we |
4558 |
++ * clear all mmio cache info. |
4559 |
+ */ |
4560 |
++#define MMIO_GVA_ANY (~(gva_t)0) |
4561 |
++ |
4562 |
+ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) |
4563 |
+ { |
4564 |
+- if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) |
4565 |
++ if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) |
4566 |
+ return; |
4567 |
+ |
4568 |
+ vcpu->arch.mmio_gva = 0; |
4569 |
+@@ -104,7 +112,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) |
4570 |
+ |
4571 |
+ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) |
4572 |
+ { |
4573 |
+- if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK)) |
4574 |
++ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && |
4575 |
++ vcpu->arch.mmio_gva == (gva & PAGE_MASK)) |
4576 |
+ return true; |
4577 |
+ |
4578 |
+ return false; |
4579 |
+@@ -112,7 +121,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) |
4580 |
+ |
4581 |
+ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
4582 |
+ { |
4583 |
+- if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) |
4584 |
++ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && |
4585 |
++ vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) |
4586 |
+ return true; |
4587 |
+ |
4588 |
+ return false; |
4589 |
+diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c |
4590 |
+index 3c562f5a60bb..e1bce26cd4f9 100644 |
4591 |
+--- a/crypto/async_tx/async_xor.c |
4592 |
++++ b/crypto/async_tx/async_xor.c |
4593 |
+@@ -78,8 +78,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, |
4594 |
+ tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, |
4595 |
+ xor_src_cnt, unmap->len, |
4596 |
+ dma_flags); |
4597 |
+- src_list[0] = tmp; |
4598 |
+- |
4599 |
+ |
4600 |
+ if (unlikely(!tx)) |
4601 |
+ async_tx_quiesce(&submit->depend_tx); |
4602 |
+@@ -92,6 +90,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, |
4603 |
+ xor_src_cnt, unmap->len, |
4604 |
+ dma_flags); |
4605 |
+ } |
4606 |
++ src_list[0] = tmp; |
4607 |
+ |
4608 |
+ dma_set_unmap(tx, unmap); |
4609 |
+ async_tx_submit(chan, tx, submit); |
4610 |
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c |
4611 |
+index bf424305f3dc..3d785ebb48d3 100644 |
4612 |
+--- a/drivers/base/firmware_class.c |
4613 |
++++ b/drivers/base/firmware_class.c |
4614 |
+@@ -1105,6 +1105,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name, |
4615 |
+ if (!firmware_p) |
4616 |
+ return -EINVAL; |
4617 |
+ |
4618 |
++ if (!name || name[0] == '\0') |
4619 |
++ return -EINVAL; |
4620 |
++ |
4621 |
+ ret = _request_firmware_prepare(&fw, name, device); |
4622 |
+ if (ret <= 0) /* error or already assigned */ |
4623 |
+ goto out; |
4624 |
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c |
4625 |
+index 0c94b661c16f..5799a0b9e6cc 100644 |
4626 |
+--- a/drivers/base/regmap/regmap-debugfs.c |
4627 |
++++ b/drivers/base/regmap/regmap-debugfs.c |
4628 |
+@@ -473,6 +473,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name) |
4629 |
+ { |
4630 |
+ struct rb_node *next; |
4631 |
+ struct regmap_range_node *range_node; |
4632 |
++ const char *devname = "dummy"; |
4633 |
+ |
4634 |
+ /* If we don't have the debugfs root yet, postpone init */ |
4635 |
+ if (!regmap_debugfs_root) { |
4636 |
+@@ -491,12 +492,15 @@ void regmap_debugfs_init(struct regmap *map, const char *name) |
4637 |
+ INIT_LIST_HEAD(&map->debugfs_off_cache); |
4638 |
+ mutex_init(&map->cache_lock); |
4639 |
+ |
4640 |
++ if (map->dev) |
4641 |
++ devname = dev_name(map->dev); |
4642 |
++ |
4643 |
+ if (name) { |
4644 |
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", |
4645 |
+- dev_name(map->dev), name); |
4646 |
++ devname, name); |
4647 |
+ name = map->debugfs_name; |
4648 |
+ } else { |
4649 |
+- name = dev_name(map->dev); |
4650 |
++ name = devname; |
4651 |
+ } |
4652 |
+ |
4653 |
+ map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); |
4654 |
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c |
4655 |
+index 1cf427bc0d4a..3a785a4f4ff6 100644 |
4656 |
+--- a/drivers/base/regmap/regmap.c |
4657 |
++++ b/drivers/base/regmap/regmap.c |
4658 |
+@@ -1408,7 +1408,7 @@ int _regmap_write(struct regmap *map, unsigned int reg, |
4659 |
+ } |
4660 |
+ |
4661 |
+ #ifdef LOG_DEVICE |
4662 |
+- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) |
4663 |
++ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) |
4664 |
+ dev_info(map->dev, "%x <= %x\n", reg, val); |
4665 |
+ #endif |
4666 |
+ |
4667 |
+@@ -1659,6 +1659,9 @@ out: |
4668 |
+ } else { |
4669 |
+ void *wval; |
4670 |
+ |
4671 |
++ if (!val_count) |
4672 |
++ return -EINVAL; |
4673 |
++ |
4674 |
+ wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); |
4675 |
+ if (!wval) { |
4676 |
+ dev_err(map->dev, "Error in memory allocation\n"); |
4677 |
+@@ -2058,7 +2061,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, |
4678 |
+ ret = map->reg_read(context, reg, val); |
4679 |
+ if (ret == 0) { |
4680 |
+ #ifdef LOG_DEVICE |
4681 |
+- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) |
4682 |
++ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) |
4683 |
+ dev_info(map->dev, "%x => %x\n", reg, *val); |
4684 |
+ #endif |
4685 |
+ |
4686 |
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c |
4687 |
+index 292c38e8aa17..f0ea79064d4f 100644 |
4688 |
+--- a/drivers/bluetooth/btusb.c |
4689 |
++++ b/drivers/bluetooth/btusb.c |
4690 |
+@@ -330,6 +330,9 @@ static void btusb_intr_complete(struct urb *urb) |
4691 |
+ BT_ERR("%s corrupted event packet", hdev->name); |
4692 |
+ hdev->stat.err_rx++; |
4693 |
+ } |
4694 |
++ } else if (urb->status == -ENOENT) { |
4695 |
++ /* Avoid suspend failed when usb_kill_urb */ |
4696 |
++ return; |
4697 |
+ } |
4698 |
+ |
4699 |
+ if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) |
4700 |
+@@ -418,6 +421,9 @@ static void btusb_bulk_complete(struct urb *urb) |
4701 |
+ BT_ERR("%s corrupted ACL packet", hdev->name); |
4702 |
+ hdev->stat.err_rx++; |
4703 |
+ } |
4704 |
++ } else if (urb->status == -ENOENT) { |
4705 |
++ /* Avoid suspend failed when usb_kill_urb */ |
4706 |
++ return; |
4707 |
+ } |
4708 |
+ |
4709 |
+ if (!test_bit(BTUSB_BULK_RUNNING, &data->flags)) |
4710 |
+@@ -512,6 +518,9 @@ static void btusb_isoc_complete(struct urb *urb) |
4711 |
+ hdev->stat.err_rx++; |
4712 |
+ } |
4713 |
+ } |
4714 |
++ } else if (urb->status == -ENOENT) { |
4715 |
++ /* Avoid suspend failed when usb_kill_urb */ |
4716 |
++ return; |
4717 |
+ } |
4718 |
+ |
4719 |
+ if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) |
4720 |
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c |
4721 |
+index caacb422995d..a22838669b4e 100644 |
4722 |
+--- a/drivers/bluetooth/hci_h5.c |
4723 |
++++ b/drivers/bluetooth/hci_h5.c |
4724 |
+@@ -237,7 +237,7 @@ static void h5_pkt_cull(struct h5 *h5) |
4725 |
+ break; |
4726 |
+ |
4727 |
+ to_remove--; |
4728 |
+- seq = (seq - 1) % 8; |
4729 |
++ seq = (seq - 1) & 0x07; |
4730 |
+ } |
4731 |
+ |
4732 |
+ if (seq != h5->rx_ack) |
4733 |
+diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c |
4734 |
+index 3b83b7dd78c7..5cd62a709ac7 100644 |
4735 |
+--- a/drivers/clk/qcom/gcc-ipq806x.c |
4736 |
++++ b/drivers/clk/qcom/gcc-ipq806x.c |
4737 |
+@@ -32,6 +32,33 @@ |
4738 |
+ #include "clk-branch.h" |
4739 |
+ #include "reset.h" |
4740 |
+ |
4741 |
++static struct clk_pll pll0 = { |
4742 |
++ .l_reg = 0x30c4, |
4743 |
++ .m_reg = 0x30c8, |
4744 |
++ .n_reg = 0x30cc, |
4745 |
++ .config_reg = 0x30d4, |
4746 |
++ .mode_reg = 0x30c0, |
4747 |
++ .status_reg = 0x30d8, |
4748 |
++ .status_bit = 16, |
4749 |
++ .clkr.hw.init = &(struct clk_init_data){ |
4750 |
++ .name = "pll0", |
4751 |
++ .parent_names = (const char *[]){ "pxo" }, |
4752 |
++ .num_parents = 1, |
4753 |
++ .ops = &clk_pll_ops, |
4754 |
++ }, |
4755 |
++}; |
4756 |
++ |
4757 |
++static struct clk_regmap pll0_vote = { |
4758 |
++ .enable_reg = 0x34c0, |
4759 |
++ .enable_mask = BIT(0), |
4760 |
++ .hw.init = &(struct clk_init_data){ |
4761 |
++ .name = "pll0_vote", |
4762 |
++ .parent_names = (const char *[]){ "pll0" }, |
4763 |
++ .num_parents = 1, |
4764 |
++ .ops = &clk_pll_vote_ops, |
4765 |
++ }, |
4766 |
++}; |
4767 |
++ |
4768 |
+ static struct clk_pll pll3 = { |
4769 |
+ .l_reg = 0x3164, |
4770 |
+ .m_reg = 0x3168, |
4771 |
+@@ -154,7 +181,7 @@ static const u8 gcc_pxo_pll8_pll0[] = { |
4772 |
+ static const char *gcc_pxo_pll8_pll0_map[] = { |
4773 |
+ "pxo", |
4774 |
+ "pll8_vote", |
4775 |
+- "pll0", |
4776 |
++ "pll0_vote", |
4777 |
+ }; |
4778 |
+ |
4779 |
+ static struct freq_tbl clk_tbl_gsbi_uart[] = { |
4780 |
+@@ -2133,6 +2160,8 @@ static struct clk_branch usb_fs1_h_clk = { |
4781 |
+ }; |
4782 |
+ |
4783 |
+ static struct clk_regmap *gcc_ipq806x_clks[] = { |
4784 |
++ [PLL0] = &pll0.clkr, |
4785 |
++ [PLL0_VOTE] = &pll0_vote, |
4786 |
+ [PLL3] = &pll3.clkr, |
4787 |
+ [PLL8] = &pll8.clkr, |
4788 |
+ [PLL8_VOTE] = &pll8_vote, |
4789 |
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c |
4790 |
+index d5149aacd2fe..026484ade10d 100644 |
4791 |
+--- a/drivers/dma/pl330.c |
4792 |
++++ b/drivers/dma/pl330.c |
4793 |
+@@ -2755,8 +2755,10 @@ probe_err3: |
4794 |
+ list_del(&pch->chan.device_node); |
4795 |
+ |
4796 |
+ /* Flush the channel */ |
4797 |
+- pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); |
4798 |
+- pl330_free_chan_resources(&pch->chan); |
4799 |
++ if (pch->thread) { |
4800 |
++ pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); |
4801 |
++ pl330_free_chan_resources(&pch->chan); |
4802 |
++ } |
4803 |
+ } |
4804 |
+ probe_err2: |
4805 |
+ pl330_del(pl330); |
4806 |
+@@ -2782,8 +2784,10 @@ static int pl330_remove(struct amba_device *adev) |
4807 |
+ list_del(&pch->chan.device_node); |
4808 |
+ |
4809 |
+ /* Flush the channel */ |
4810 |
+- pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); |
4811 |
+- pl330_free_chan_resources(&pch->chan); |
4812 |
++ if (pch->thread) { |
4813 |
++ pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); |
4814 |
++ pl330_free_chan_resources(&pch->chan); |
4815 |
++ } |
4816 |
+ } |
4817 |
+ |
4818 |
+ pl330_del(pl330); |
4819 |
+diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c |
4820 |
+index f4aec2e6ef56..7d3742edbaa2 100644 |
4821 |
+--- a/drivers/edac/mpc85xx_edac.c |
4822 |
++++ b/drivers/edac/mpc85xx_edac.c |
4823 |
+@@ -633,7 +633,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op) |
4824 |
+ if (edac_op_state == EDAC_OPSTATE_INT) { |
4825 |
+ pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); |
4826 |
+ res = devm_request_irq(&op->dev, pdata->irq, |
4827 |
+- mpc85xx_l2_isr, 0, |
4828 |
++ mpc85xx_l2_isr, IRQF_SHARED, |
4829 |
+ "[EDAC] L2 err", edac_dev); |
4830 |
+ if (res < 0) { |
4831 |
+ printk(KERN_ERR |
4832 |
+diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c |
4833 |
+index 8389e8109218..3cccff73b9b9 100644 |
4834 |
+--- a/drivers/hid/hid-rmi.c |
4835 |
++++ b/drivers/hid/hid-rmi.c |
4836 |
+@@ -320,10 +320,7 @@ static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data, |
4837 |
+ int offset; |
4838 |
+ int i; |
4839 |
+ |
4840 |
+- if (size < hdata->f11.report_size) |
4841 |
+- return 0; |
4842 |
+- |
4843 |
+- if (!(irq & hdata->f11.irq_mask)) |
4844 |
++ if (!(irq & hdata->f11.irq_mask) || size <= 0) |
4845 |
+ return 0; |
4846 |
+ |
4847 |
+ offset = (hdata->max_fingers >> 2) + 1; |
4848 |
+@@ -332,9 +329,19 @@ static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data, |
4849 |
+ int fs_bit_position = (i & 0x3) << 1; |
4850 |
+ int finger_state = (data[fs_byte_position] >> fs_bit_position) & |
4851 |
+ 0x03; |
4852 |
++ int position = offset + 5 * i; |
4853 |
++ |
4854 |
++ if (position + 5 > size) { |
4855 |
++ /* partial report, go on with what we received */ |
4856 |
++ printk_once(KERN_WARNING |
4857 |
++ "%s %s: Detected incomplete finger report. Finger reports may occasionally get dropped on this platform.\n", |
4858 |
++ dev_driver_string(&hdev->dev), |
4859 |
++ dev_name(&hdev->dev)); |
4860 |
++ hid_dbg(hdev, "Incomplete finger report\n"); |
4861 |
++ break; |
4862 |
++ } |
4863 |
+ |
4864 |
+- rmi_f11_process_touch(hdata, i, finger_state, |
4865 |
+- &data[offset + 5 * i]); |
4866 |
++ rmi_f11_process_touch(hdata, i, finger_state, &data[position]); |
4867 |
+ } |
4868 |
+ input_mt_sync_frame(hdata->input); |
4869 |
+ input_sync(hdata->input); |
4870 |
+@@ -352,6 +359,11 @@ static int rmi_f30_input_event(struct hid_device *hdev, u8 irq, u8 *data, |
4871 |
+ if (!(irq & hdata->f30.irq_mask)) |
4872 |
+ return 0; |
4873 |
+ |
4874 |
++ if (size < (int)hdata->f30.report_size) { |
4875 |
++ hid_warn(hdev, "Click Button pressed, but the click data is missing\n"); |
4876 |
++ return 0; |
4877 |
++ } |
4878 |
++ |
4879 |
+ for (i = 0; i < hdata->gpio_led_count; i++) { |
4880 |
+ if (test_bit(i, &hdata->button_mask)) { |
4881 |
+ value = (data[i / 8] >> (i & 0x07)) & BIT(0); |
4882 |
+@@ -412,9 +424,29 @@ static int rmi_read_data_event(struct hid_device *hdev, u8 *data, int size) |
4883 |
+ return 1; |
4884 |
+ } |
4885 |
+ |
4886 |
++static int rmi_check_sanity(struct hid_device *hdev, u8 *data, int size) |
4887 |
++{ |
4888 |
++ int valid_size = size; |
4889 |
++ /* |
4890 |
++ * On the Dell XPS 13 9333, the bus sometimes get confused and fills |
4891 |
++ * the report with a sentinel value "ff". Synaptics told us that such |
4892 |
++ * behavior does not comes from the touchpad itself, so we filter out |
4893 |
++ * such reports here. |
4894 |
++ */ |
4895 |
++ |
4896 |
++ while ((data[valid_size - 1] == 0xff) && valid_size > 0) |
4897 |
++ valid_size--; |
4898 |
++ |
4899 |
++ return valid_size; |
4900 |
++} |
4901 |
++ |
4902 |
+ static int rmi_raw_event(struct hid_device *hdev, |
4903 |
+ struct hid_report *report, u8 *data, int size) |
4904 |
+ { |
4905 |
++ size = rmi_check_sanity(hdev, data, size); |
4906 |
++ if (size < 2) |
4907 |
++ return 0; |
4908 |
++ |
4909 |
+ switch (data[0]) { |
4910 |
+ case RMI_READ_DATA_REPORT_ID: |
4911 |
+ return rmi_read_data_event(hdev, data, size); |
4912 |
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c |
4913 |
+index f0db7eca9023..129fd330dd27 100644 |
4914 |
+--- a/drivers/hid/wacom_sys.c |
4915 |
++++ b/drivers/hid/wacom_sys.c |
4916 |
+@@ -23,13 +23,13 @@ |
4917 |
+ #define WAC_CMD_ICON_BT_XFER 0x26 |
4918 |
+ #define WAC_CMD_RETRIES 10 |
4919 |
+ |
4920 |
+-static int wacom_get_report(struct hid_device *hdev, u8 type, u8 id, |
4921 |
+- void *buf, size_t size, unsigned int retries) |
4922 |
++static int wacom_get_report(struct hid_device *hdev, u8 type, u8 *buf, |
4923 |
++ size_t size, unsigned int retries) |
4924 |
+ { |
4925 |
+ int retval; |
4926 |
+ |
4927 |
+ do { |
4928 |
+- retval = hid_hw_raw_request(hdev, id, buf, size, type, |
4929 |
++ retval = hid_hw_raw_request(hdev, buf[0], buf, size, type, |
4930 |
+ HID_REQ_GET_REPORT); |
4931 |
+ } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries); |
4932 |
+ |
4933 |
+@@ -106,12 +106,24 @@ static void wacom_feature_mapping(struct hid_device *hdev, |
4934 |
+ { |
4935 |
+ struct wacom *wacom = hid_get_drvdata(hdev); |
4936 |
+ struct wacom_features *features = &wacom->wacom_wac.features; |
4937 |
++ u8 *data; |
4938 |
++ int ret; |
4939 |
+ |
4940 |
+ switch (usage->hid) { |
4941 |
+ case HID_DG_CONTACTMAX: |
4942 |
+ /* leave touch_max as is if predefined */ |
4943 |
+- if (!features->touch_max) |
4944 |
+- features->touch_max = field->value[0]; |
4945 |
++ if (!features->touch_max) { |
4946 |
++ /* read manually */ |
4947 |
++ data = kzalloc(2, GFP_KERNEL); |
4948 |
++ if (!data) |
4949 |
++ break; |
4950 |
++ data[0] = field->report->id; |
4951 |
++ ret = wacom_get_report(hdev, HID_FEATURE_REPORT, |
4952 |
++ data, 2, 0); |
4953 |
++ if (ret == 2) |
4954 |
++ features->touch_max = data[1]; |
4955 |
++ kfree(data); |
4956 |
++ } |
4957 |
+ break; |
4958 |
+ } |
4959 |
+ } |
4960 |
+@@ -255,7 +267,7 @@ static int wacom_set_device_mode(struct hid_device *hdev, int report_id, |
4961 |
+ length, 1); |
4962 |
+ if (error >= 0) |
4963 |
+ error = wacom_get_report(hdev, HID_FEATURE_REPORT, |
4964 |
+- report_id, rep_data, length, 1); |
4965 |
++ rep_data, length, 1); |
4966 |
+ } while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES); |
4967 |
+ |
4968 |
+ kfree(rep_data); |
4969 |
+@@ -1245,6 +1257,8 @@ static int wacom_probe(struct hid_device *hdev, |
4970 |
+ if (!id->driver_data) |
4971 |
+ return -EINVAL; |
4972 |
+ |
4973 |
++ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS; |
4974 |
++ |
4975 |
+ wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); |
4976 |
+ if (!wacom) |
4977 |
+ return -ENOMEM; |
4978 |
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c |
4979 |
+index 531a593912ec..19bad59073e6 100644 |
4980 |
+--- a/drivers/hv/channel.c |
4981 |
++++ b/drivers/hv/channel.c |
4982 |
+@@ -165,8 +165,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, |
4983 |
+ ret = vmbus_post_msg(open_msg, |
4984 |
+ sizeof(struct vmbus_channel_open_channel)); |
4985 |
+ |
4986 |
+- if (ret != 0) |
4987 |
++ if (ret != 0) { |
4988 |
++ err = ret; |
4989 |
+ goto error1; |
4990 |
++ } |
4991 |
+ |
4992 |
+ t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); |
4993 |
+ if (t == 0) { |
4994 |
+@@ -363,7 +365,6 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, |
4995 |
+ u32 next_gpadl_handle; |
4996 |
+ unsigned long flags; |
4997 |
+ int ret = 0; |
4998 |
+- int t; |
4999 |
+ |
5000 |
+ next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle); |
5001 |
+ atomic_inc(&vmbus_connection.next_gpadl_handle); |
5002 |
+@@ -410,9 +411,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, |
5003 |
+ |
5004 |
+ } |
5005 |
+ } |
5006 |
+- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); |
5007 |
+- BUG_ON(t == 0); |
5008 |
+- |
5009 |
++ wait_for_completion(&msginfo->waitevent); |
5010 |
+ |
5011 |
+ /* At this point, we received the gpadl created msg */ |
5012 |
+ *gpadl_handle = gpadlmsg->gpadl; |
5013 |
+@@ -435,7 +434,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) |
5014 |
+ struct vmbus_channel_gpadl_teardown *msg; |
5015 |
+ struct vmbus_channel_msginfo *info; |
5016 |
+ unsigned long flags; |
5017 |
+- int ret, t; |
5018 |
++ int ret; |
5019 |
+ |
5020 |
+ info = kmalloc(sizeof(*info) + |
5021 |
+ sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL); |
5022 |
+@@ -457,11 +456,12 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) |
5023 |
+ ret = vmbus_post_msg(msg, |
5024 |
+ sizeof(struct vmbus_channel_gpadl_teardown)); |
5025 |
+ |
5026 |
+- BUG_ON(ret != 0); |
5027 |
+- t = wait_for_completion_timeout(&info->waitevent, 5*HZ); |
5028 |
+- BUG_ON(t == 0); |
5029 |
++ if (ret) |
5030 |
++ goto post_msg_err; |
5031 |
++ |
5032 |
++ wait_for_completion(&info->waitevent); |
5033 |
+ |
5034 |
+- /* Received a torndown response */ |
5035 |
++post_msg_err: |
5036 |
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); |
5037 |
+ list_del(&info->msglistentry); |
5038 |
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); |
5039 |
+@@ -478,7 +478,7 @@ static void reset_channel_cb(void *arg) |
5040 |
+ channel->onchannel_callback = NULL; |
5041 |
+ } |
5042 |
+ |
5043 |
+-static void vmbus_close_internal(struct vmbus_channel *channel) |
5044 |
++static int vmbus_close_internal(struct vmbus_channel *channel) |
5045 |
+ { |
5046 |
+ struct vmbus_channel_close_channel *msg; |
5047 |
+ int ret; |
5048 |
+@@ -501,11 +501,28 @@ static void vmbus_close_internal(struct vmbus_channel *channel) |
5049 |
+ |
5050 |
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel)); |
5051 |
+ |
5052 |
+- BUG_ON(ret != 0); |
5053 |
++ if (ret) { |
5054 |
++ pr_err("Close failed: close post msg return is %d\n", ret); |
5055 |
++ /* |
5056 |
++ * If we failed to post the close msg, |
5057 |
++ * it is perhaps better to leak memory. |
5058 |
++ */ |
5059 |
++ return ret; |
5060 |
++ } |
5061 |
++ |
5062 |
+ /* Tear down the gpadl for the channel's ring buffer */ |
5063 |
+- if (channel->ringbuffer_gpadlhandle) |
5064 |
+- vmbus_teardown_gpadl(channel, |
5065 |
+- channel->ringbuffer_gpadlhandle); |
5066 |
++ if (channel->ringbuffer_gpadlhandle) { |
5067 |
++ ret = vmbus_teardown_gpadl(channel, |
5068 |
++ channel->ringbuffer_gpadlhandle); |
5069 |
++ if (ret) { |
5070 |
++ pr_err("Close failed: teardown gpadl return %d\n", ret); |
5071 |
++ /* |
5072 |
++ * If we failed to teardown gpadl, |
5073 |
++ * it is perhaps better to leak memory. |
5074 |
++ */ |
5075 |
++ return ret; |
5076 |
++ } |
5077 |
++ } |
5078 |
+ |
5079 |
+ /* Cleanup the ring buffers for this channel */ |
5080 |
+ hv_ringbuffer_cleanup(&channel->outbound); |
5081 |
+@@ -514,7 +531,7 @@ static void vmbus_close_internal(struct vmbus_channel *channel) |
5082 |
+ free_pages((unsigned long)channel->ringbuffer_pages, |
5083 |
+ get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); |
5084 |
+ |
5085 |
+- |
5086 |
++ return ret; |
5087 |
+ } |
5088 |
+ |
5089 |
+ /* |
5090 |
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c |
5091 |
+index ae22e3c1fc4c..e206619b946e 100644 |
5092 |
+--- a/drivers/hv/connection.c |
5093 |
++++ b/drivers/hv/connection.c |
5094 |
+@@ -427,10 +427,21 @@ int vmbus_post_msg(void *buffer, size_t buflen) |
5095 |
+ * insufficient resources. Retry the operation a couple of |
5096 |
+ * times before giving up. |
5097 |
+ */ |
5098 |
+- while (retries < 3) { |
5099 |
+- ret = hv_post_message(conn_id, 1, buffer, buflen); |
5100 |
+- if (ret != HV_STATUS_INSUFFICIENT_BUFFERS) |
5101 |
++ while (retries < 10) { |
5102 |
++ ret = hv_post_message(conn_id, 1, buffer, buflen); |
5103 |
++ |
5104 |
++ switch (ret) { |
5105 |
++ case HV_STATUS_INSUFFICIENT_BUFFERS: |
5106 |
++ ret = -ENOMEM; |
5107 |
++ case -ENOMEM: |
5108 |
++ break; |
5109 |
++ case HV_STATUS_SUCCESS: |
5110 |
+ return ret; |
5111 |
++ default: |
5112 |
++ pr_err("hv_post_msg() failed; error code:%d\n", ret); |
5113 |
++ return -EINVAL; |
5114 |
++ } |
5115 |
++ |
5116 |
+ retries++; |
5117 |
+ msleep(100); |
5118 |
+ } |
5119 |
+diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c |
5120 |
+index edfc8488cb03..3e4235c7a47f 100644 |
5121 |
+--- a/drivers/hv/hv.c |
5122 |
++++ b/drivers/hv/hv.c |
5123 |
+@@ -138,6 +138,8 @@ int hv_init(void) |
5124 |
+ memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); |
5125 |
+ memset(hv_context.synic_message_page, 0, |
5126 |
+ sizeof(void *) * NR_CPUS); |
5127 |
++ memset(hv_context.post_msg_page, 0, |
5128 |
++ sizeof(void *) * NR_CPUS); |
5129 |
+ memset(hv_context.vp_index, 0, |
5130 |
+ sizeof(int) * NR_CPUS); |
5131 |
+ memset(hv_context.event_dpc, 0, |
5132 |
+@@ -217,26 +219,18 @@ int hv_post_message(union hv_connection_id connection_id, |
5133 |
+ enum hv_message_type message_type, |
5134 |
+ void *payload, size_t payload_size) |
5135 |
+ { |
5136 |
+- struct aligned_input { |
5137 |
+- u64 alignment8; |
5138 |
+- struct hv_input_post_message msg; |
5139 |
+- }; |
5140 |
+ |
5141 |
+ struct hv_input_post_message *aligned_msg; |
5142 |
+ u16 status; |
5143 |
+- unsigned long addr; |
5144 |
+ |
5145 |
+ if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) |
5146 |
+ return -EMSGSIZE; |
5147 |
+ |
5148 |
+- addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC); |
5149 |
+- if (!addr) |
5150 |
+- return -ENOMEM; |
5151 |
+- |
5152 |
+ aligned_msg = (struct hv_input_post_message *) |
5153 |
+- (ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN)); |
5154 |
++ hv_context.post_msg_page[get_cpu()]; |
5155 |
+ |
5156 |
+ aligned_msg->connectionid = connection_id; |
5157 |
++ aligned_msg->reserved = 0; |
5158 |
+ aligned_msg->message_type = message_type; |
5159 |
+ aligned_msg->payload_size = payload_size; |
5160 |
+ memcpy((void *)aligned_msg->payload, payload, payload_size); |
5161 |
+@@ -244,8 +238,7 @@ int hv_post_message(union hv_connection_id connection_id, |
5162 |
+ status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL) |
5163 |
+ & 0xFFFF; |
5164 |
+ |
5165 |
+- kfree((void *)addr); |
5166 |
+- |
5167 |
++ put_cpu(); |
5168 |
+ return status; |
5169 |
+ } |
5170 |
+ |
5171 |
+@@ -294,6 +287,14 @@ int hv_synic_alloc(void) |
5172 |
+ pr_err("Unable to allocate SYNIC event page\n"); |
5173 |
+ goto err; |
5174 |
+ } |
5175 |
++ |
5176 |
++ hv_context.post_msg_page[cpu] = |
5177 |
++ (void *)get_zeroed_page(GFP_ATOMIC); |
5178 |
++ |
5179 |
++ if (hv_context.post_msg_page[cpu] == NULL) { |
5180 |
++ pr_err("Unable to allocate post msg page\n"); |
5181 |
++ goto err; |
5182 |
++ } |
5183 |
+ } |
5184 |
+ |
5185 |
+ return 0; |
5186 |
+@@ -308,6 +309,8 @@ static void hv_synic_free_cpu(int cpu) |
5187 |
+ free_page((unsigned long)hv_context.synic_event_page[cpu]); |
5188 |
+ if (hv_context.synic_message_page[cpu]) |
5189 |
+ free_page((unsigned long)hv_context.synic_message_page[cpu]); |
5190 |
++ if (hv_context.post_msg_page[cpu]) |
5191 |
++ free_page((unsigned long)hv_context.post_msg_page[cpu]); |
5192 |
+ } |
5193 |
+ |
5194 |
+ void hv_synic_free(void) |
5195 |
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h |
5196 |
+index 22b750749a39..c386d8dc7223 100644 |
5197 |
+--- a/drivers/hv/hyperv_vmbus.h |
5198 |
++++ b/drivers/hv/hyperv_vmbus.h |
5199 |
+@@ -515,6 +515,10 @@ struct hv_context { |
5200 |
+ * per-cpu list of the channels based on their CPU affinity. |
5201 |
+ */ |
5202 |
+ struct list_head percpu_list[NR_CPUS]; |
5203 |
++ /* |
5204 |
++ * buffer to post messages to the host. |
5205 |
++ */ |
5206 |
++ void *post_msg_page[NR_CPUS]; |
5207 |
+ }; |
5208 |
+ |
5209 |
+ extern struct hv_context hv_context; |
5210 |
+diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c |
5211 |
+index 787933d43d32..613231c16194 100644 |
5212 |
+--- a/drivers/message/fusion/mptspi.c |
5213 |
++++ b/drivers/message/fusion/mptspi.c |
5214 |
+@@ -1419,6 +1419,11 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
5215 |
+ goto out_mptspi_probe; |
5216 |
+ } |
5217 |
+ |
5218 |
++ /* VMWare emulation doesn't properly implement WRITE_SAME |
5219 |
++ */ |
5220 |
++ if (pdev->subsystem_vendor == 0x15AD) |
5221 |
++ sh->no_write_same = 1; |
5222 |
++ |
5223 |
+ spin_lock_irqsave(&ioc->FreeQlock, flags); |
5224 |
+ |
5225 |
+ /* Attach the SCSI Host to the IOC structure |
5226 |
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c |
5227 |
+index 0e993ef28b94..8fd9466266b6 100644 |
5228 |
+--- a/drivers/misc/mei/bus.c |
5229 |
++++ b/drivers/misc/mei/bus.c |
5230 |
+@@ -70,7 +70,7 @@ static int mei_cl_device_probe(struct device *dev) |
5231 |
+ |
5232 |
+ dev_dbg(dev, "Device probe\n"); |
5233 |
+ |
5234 |
+- strncpy(id.name, dev_name(dev), sizeof(id.name)); |
5235 |
++ strlcpy(id.name, dev_name(dev), sizeof(id.name)); |
5236 |
+ |
5237 |
+ return driver->probe(device, &id); |
5238 |
+ } |
5239 |
+diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c |
5240 |
+index 00fb8badbacc..3b3e91057a4c 100644 |
5241 |
+--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c |
5242 |
++++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c |
5243 |
+@@ -1004,9 +1004,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, |
5244 |
+ case ATH9K_ANI_FIRSTEP_LEVEL:{ |
5245 |
+ u32 level = param; |
5246 |
+ |
5247 |
+- value = level; |
5248 |
++ value = level * 2; |
5249 |
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, |
5250 |
+ AR_PHY_FIND_SIG_FIRSTEP, value); |
5251 |
++ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW, |
5252 |
++ AR_PHY_FIND_SIG_FIRSTEP_LOW, value); |
5253 |
+ |
5254 |
+ if (level != aniState->firstepLevel) { |
5255 |
+ ath_dbg(common, ANI, |
5256 |
+diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h |
5257 |
+index ca79f7160573..72da88d879c7 100644 |
5258 |
+--- a/drivers/net/wireless/iwlwifi/mvm/constants.h |
5259 |
++++ b/drivers/net/wireless/iwlwifi/mvm/constants.h |
5260 |
+@@ -82,7 +82,7 @@ |
5261 |
+ #define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62 |
5262 |
+ #define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65 |
5263 |
+ #define IWL_MVM_BT_COEX_SYNC2SCO 1 |
5264 |
+-#define IWL_MVM_BT_COEX_CORUNNING 1 |
5265 |
++#define IWL_MVM_BT_COEX_CORUNNING 0 |
5266 |
+ #define IWL_MVM_BT_COEX_MPLUT 1 |
5267 |
+ |
5268 |
+ #endif /* __MVM_CONSTANTS_H */ |
5269 |
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c |
5270 |
+index 073a68b97a72..bc6a5db283f0 100644 |
5271 |
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c |
5272 |
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c |
5273 |
+@@ -273,6 +273,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { |
5274 |
+ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, |
5275 |
+ {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, |
5276 |
+ {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, |
5277 |
++ {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, |
5278 |
++ {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, |
5279 |
+ {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, |
5280 |
+ {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, |
5281 |
+ {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, |
5282 |
+@@ -316,6 +318,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { |
5283 |
+ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, |
5284 |
+ {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, |
5285 |
+ {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, |
5286 |
++ {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, |
5287 |
++ {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, |
5288 |
+ {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, |
5289 |
+ {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, |
5290 |
+ {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, |
5291 |
+diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h |
5292 |
+index a394a9a95919..7cf6081a05a1 100644 |
5293 |
+--- a/drivers/net/wireless/rt2x00/rt2800.h |
5294 |
++++ b/drivers/net/wireless/rt2x00/rt2800.h |
5295 |
+@@ -2039,7 +2039,7 @@ struct mac_iveiv_entry { |
5296 |
+ * 2 - drop tx power by 12dBm, |
5297 |
+ * 3 - increase tx power by 6dBm |
5298 |
+ */ |
5299 |
+-#define BBP1_TX_POWER_CTRL FIELD8(0x07) |
5300 |
++#define BBP1_TX_POWER_CTRL FIELD8(0x03) |
5301 |
+ #define BBP1_TX_ANTENNA FIELD8(0x18) |
5302 |
+ |
5303 |
+ /* |
5304 |
+diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c |
5305 |
+index a8c6f1a92e0f..b1315e197ffb 100644 |
5306 |
+--- a/drivers/pci/host/pci-mvebu.c |
5307 |
++++ b/drivers/pci/host/pci-mvebu.c |
5308 |
+@@ -873,7 +873,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn, |
5309 |
+ rangesz = pna + na + ns; |
5310 |
+ nranges = rlen / sizeof(__be32) / rangesz; |
5311 |
+ |
5312 |
+- for (i = 0; i < nranges; i++) { |
5313 |
++ for (i = 0; i < nranges; i++, range += rangesz) { |
5314 |
+ u32 flags = of_read_number(range, 1); |
5315 |
+ u32 slot = of_read_number(range + 1, 1); |
5316 |
+ u64 cpuaddr = of_read_number(range + na, pna); |
5317 |
+@@ -883,14 +883,14 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn, |
5318 |
+ rtype = IORESOURCE_IO; |
5319 |
+ else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) |
5320 |
+ rtype = IORESOURCE_MEM; |
5321 |
++ else |
5322 |
++ continue; |
5323 |
+ |
5324 |
+ if (slot == PCI_SLOT(devfn) && type == rtype) { |
5325 |
+ *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); |
5326 |
+ *attr = DT_CPUADDR_TO_ATTR(cpuaddr); |
5327 |
+ return 0; |
5328 |
+ } |
5329 |
+- |
5330 |
+- range += rangesz; |
5331 |
+ } |
5332 |
+ |
5333 |
+ return -ENOENT; |
5334 |
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c |
5335 |
+index 9ff0a901ecf7..76ef7914c9aa 100644 |
5336 |
+--- a/drivers/pci/pci-sysfs.c |
5337 |
++++ b/drivers/pci/pci-sysfs.c |
5338 |
+@@ -177,7 +177,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
5339 |
+ { |
5340 |
+ struct pci_dev *pci_dev = to_pci_dev(dev); |
5341 |
+ |
5342 |
+- return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n", |
5343 |
++ return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", |
5344 |
+ pci_dev->vendor, pci_dev->device, |
5345 |
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device, |
5346 |
+ (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), |
5347 |
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
5348 |
+index 80c2d014283d..feaa5c23e991 100644 |
5349 |
+--- a/drivers/pci/quirks.c |
5350 |
++++ b/drivers/pci/quirks.c |
5351 |
+@@ -24,6 +24,7 @@ |
5352 |
+ #include <linux/ioport.h> |
5353 |
+ #include <linux/sched.h> |
5354 |
+ #include <linux/ktime.h> |
5355 |
++#include <linux/mm.h> |
5356 |
+ #include <asm/dma.h> /* isa_dma_bridge_buggy */ |
5357 |
+ #include "pci.h" |
5358 |
+ |
5359 |
+@@ -287,6 +288,25 @@ static void quirk_citrine(struct pci_dev *dev) |
5360 |
+ } |
5361 |
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine); |
5362 |
+ |
5363 |
++/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */ |
5364 |
++static void quirk_extend_bar_to_page(struct pci_dev *dev) |
5365 |
++{ |
5366 |
++ int i; |
5367 |
++ |
5368 |
++ for (i = 0; i < PCI_STD_RESOURCE_END; i++) { |
5369 |
++ struct resource *r = &dev->resource[i]; |
5370 |
++ |
5371 |
++ if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { |
5372 |
++ r->end = PAGE_SIZE - 1; |
5373 |
++ r->start = 0; |
5374 |
++ r->flags |= IORESOURCE_UNSET; |
5375 |
++ dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n", |
5376 |
++ i, r); |
5377 |
++ } |
5378 |
++ } |
5379 |
++} |
5380 |
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page); |
5381 |
++ |
5382 |
+ /* |
5383 |
+ * S3 868 and 968 chips report region size equal to 32M, but they decode 64M. |
5384 |
+ * If it's needed, re-allocate the region. |
5385 |
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c |
5386 |
+index 6373985ad3f7..0482235eee92 100644 |
5387 |
+--- a/drivers/pci/setup-bus.c |
5388 |
++++ b/drivers/pci/setup-bus.c |
5389 |
+@@ -1652,7 +1652,7 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) |
5390 |
+ struct pci_dev_resource *fail_res; |
5391 |
+ int retval; |
5392 |
+ unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | |
5393 |
+- IORESOURCE_PREFETCH; |
5394 |
++ IORESOURCE_PREFETCH | IORESOURCE_MEM_64; |
5395 |
+ |
5396 |
+ again: |
5397 |
+ __pci_bus_size_bridges(parent, &add_list); |
5398 |
+diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c |
5399 |
+index c756955bfcc5..0ce8e4e0fa73 100644 |
5400 |
+--- a/drivers/regulator/ltc3589.c |
5401 |
++++ b/drivers/regulator/ltc3589.c |
5402 |
+@@ -372,6 +372,7 @@ static bool ltc3589_volatile_reg(struct device *dev, unsigned int reg) |
5403 |
+ switch (reg) { |
5404 |
+ case LTC3589_IRQSTAT: |
5405 |
+ case LTC3589_PGSTAT: |
5406 |
++ case LTC3589_VCCR: |
5407 |
+ return true; |
5408 |
+ } |
5409 |
+ return false; |
5410 |
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c |
5411 |
+index b0e4a3eb33c7..5b2e76159b41 100644 |
5412 |
+--- a/drivers/rtc/rtc-cmos.c |
5413 |
++++ b/drivers/rtc/rtc-cmos.c |
5414 |
+@@ -856,7 +856,7 @@ static void __exit cmos_do_remove(struct device *dev) |
5415 |
+ cmos->dev = NULL; |
5416 |
+ } |
5417 |
+ |
5418 |
+-#ifdef CONFIG_PM_SLEEP |
5419 |
++#ifdef CONFIG_PM |
5420 |
+ |
5421 |
+ static int cmos_suspend(struct device *dev) |
5422 |
+ { |
5423 |
+@@ -907,6 +907,8 @@ static inline int cmos_poweroff(struct device *dev) |
5424 |
+ return cmos_suspend(dev); |
5425 |
+ } |
5426 |
+ |
5427 |
++#ifdef CONFIG_PM_SLEEP |
5428 |
++ |
5429 |
+ static int cmos_resume(struct device *dev) |
5430 |
+ { |
5431 |
+ struct cmos_rtc *cmos = dev_get_drvdata(dev); |
5432 |
+@@ -954,6 +956,7 @@ static int cmos_resume(struct device *dev) |
5433 |
+ return 0; |
5434 |
+ } |
5435 |
+ |
5436 |
++#endif |
5437 |
+ #else |
5438 |
+ |
5439 |
+ static inline int cmos_poweroff(struct device *dev) |
5440 |
+diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c |
5441 |
+index 665afcb74a56..3f3544f62259 100644 |
5442 |
+--- a/drivers/scsi/be2iscsi/be_mgmt.c |
5443 |
++++ b/drivers/scsi/be2iscsi/be_mgmt.c |
5444 |
+@@ -943,17 +943,20 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba, |
5445 |
+ |
5446 |
+ if (ip_action == IP_ACTION_ADD) { |
5447 |
+ memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value, |
5448 |
+- ip_param->len); |
5449 |
++ sizeof(req->ip_params.ip_record.ip_addr.addr)); |
5450 |
+ |
5451 |
+ if (subnet_param) |
5452 |
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, |
5453 |
+- subnet_param->value, subnet_param->len); |
5454 |
++ subnet_param->value, |
5455 |
++ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask)); |
5456 |
+ } else { |
5457 |
+ memcpy(req->ip_params.ip_record.ip_addr.addr, |
5458 |
+- if_info->ip_addr.addr, ip_param->len); |
5459 |
++ if_info->ip_addr.addr, |
5460 |
++ sizeof(req->ip_params.ip_record.ip_addr.addr)); |
5461 |
+ |
5462 |
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, |
5463 |
+- if_info->ip_addr.subnet_mask, ip_param->len); |
5464 |
++ if_info->ip_addr.subnet_mask, |
5465 |
++ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask)); |
5466 |
+ } |
5467 |
+ |
5468 |
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); |
5469 |
+@@ -981,7 +984,7 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr, |
5470 |
+ req->action = gtway_action; |
5471 |
+ req->ip_addr.ip_type = BE2_IPV4; |
5472 |
+ |
5473 |
+- memcpy(req->ip_addr.addr, gt_addr, param_len); |
5474 |
++ memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr)); |
5475 |
+ |
5476 |
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); |
5477 |
+ } |
5478 |
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c |
5479 |
+index be9698d920c2..8252c0e6682c 100644 |
5480 |
+--- a/drivers/scsi/qla2xxx/qla_os.c |
5481 |
++++ b/drivers/scsi/qla2xxx/qla_os.c |
5482 |
+@@ -3119,10 +3119,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha) |
5483 |
+ } |
5484 |
+ |
5485 |
+ static void |
5486 |
+-qla2x00_clear_drv_active(scsi_qla_host_t *vha) |
5487 |
++qla2x00_clear_drv_active(struct qla_hw_data *ha) |
5488 |
+ { |
5489 |
+- struct qla_hw_data *ha = vha->hw; |
5490 |
+- |
5491 |
+ if (IS_QLA8044(ha)) { |
5492 |
+ qla8044_idc_lock(ha); |
5493 |
+ qla8044_clear_drv_active(ha); |
5494 |
+@@ -3193,7 +3191,7 @@ qla2x00_remove_one(struct pci_dev *pdev) |
5495 |
+ |
5496 |
+ scsi_host_put(base_vha->host); |
5497 |
+ |
5498 |
+- qla2x00_clear_drv_active(base_vha); |
5499 |
++ qla2x00_clear_drv_active(ha); |
5500 |
+ |
5501 |
+ qla2x00_unmap_iobases(ha); |
5502 |
+ |
5503 |
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c |
5504 |
+index e632e14180cf..bcc449a0c3a7 100644 |
5505 |
+--- a/drivers/scsi/qla2xxx/qla_target.c |
5506 |
++++ b/drivers/scsi/qla2xxx/qla_target.c |
5507 |
+@@ -1431,12 +1431,10 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha, |
5508 |
+ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, |
5509 |
+ uint32_t req_cnt) |
5510 |
+ { |
5511 |
+- struct qla_hw_data *ha = vha->hw; |
5512 |
+- device_reg_t __iomem *reg = ha->iobase; |
5513 |
+ uint32_t cnt; |
5514 |
+ |
5515 |
+ if (vha->req->cnt < (req_cnt + 2)) { |
5516 |
+- cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); |
5517 |
++ cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out); |
5518 |
+ |
5519 |
+ ql_dbg(ql_dbg_tgt, vha, 0xe00a, |
5520 |
+ "Request ring circled: cnt=%d, vha->->ring_index=%d, " |
5521 |
+@@ -3277,6 +3275,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, |
5522 |
+ return -ENOMEM; |
5523 |
+ |
5524 |
+ memcpy(&op->atio, atio, sizeof(*atio)); |
5525 |
++ op->vha = vha; |
5526 |
+ INIT_WORK(&op->work, qlt_create_sess_from_atio); |
5527 |
+ queue_work(qla_tgt_wq, &op->work); |
5528 |
+ return 0; |
5529 |
+diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c |
5530 |
+index 6d207afec8cb..a4c45ea8f688 100644 |
5531 |
+--- a/drivers/spi/spi-dw-mid.c |
5532 |
++++ b/drivers/spi/spi-dw-mid.c |
5533 |
+@@ -89,7 +89,13 @@ err_exit: |
5534 |
+ |
5535 |
+ static void mid_spi_dma_exit(struct dw_spi *dws) |
5536 |
+ { |
5537 |
++ if (!dws->dma_inited) |
5538 |
++ return; |
5539 |
++ |
5540 |
++ dmaengine_terminate_all(dws->txchan); |
5541 |
+ dma_release_channel(dws->txchan); |
5542 |
++ |
5543 |
++ dmaengine_terminate_all(dws->rxchan); |
5544 |
+ dma_release_channel(dws->rxchan); |
5545 |
+ } |
5546 |
+ |
5547 |
+@@ -136,7 +142,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) |
5548 |
+ txconf.dst_addr = dws->dma_addr; |
5549 |
+ txconf.dst_maxburst = LNW_DMA_MSIZE_16; |
5550 |
+ txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
5551 |
+- txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; |
5552 |
++ txconf.dst_addr_width = dws->dma_width; |
5553 |
+ txconf.device_fc = false; |
5554 |
+ |
5555 |
+ txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, |
5556 |
+@@ -159,7 +165,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) |
5557 |
+ rxconf.src_addr = dws->dma_addr; |
5558 |
+ rxconf.src_maxburst = LNW_DMA_MSIZE_16; |
5559 |
+ rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
5560 |
+- rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; |
5561 |
++ rxconf.src_addr_width = dws->dma_width; |
5562 |
+ rxconf.device_fc = false; |
5563 |
+ |
5564 |
+ rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, |
5565 |
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c |
5566 |
+index 3afc266b666d..f96ea8a38d64 100644 |
5567 |
+--- a/drivers/spi/spi-rockchip.c |
5568 |
++++ b/drivers/spi/spi-rockchip.c |
5569 |
+@@ -415,7 +415,7 @@ static void rockchip_spi_dma_txcb(void *data) |
5570 |
+ spin_unlock_irqrestore(&rs->lock, flags); |
5571 |
+ } |
5572 |
+ |
5573 |
+-static int rockchip_spi_dma_transfer(struct rockchip_spi *rs) |
5574 |
++static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) |
5575 |
+ { |
5576 |
+ unsigned long flags; |
5577 |
+ struct dma_slave_config rxconf, txconf; |
5578 |
+@@ -474,8 +474,6 @@ static int rockchip_spi_dma_transfer(struct rockchip_spi *rs) |
5579 |
+ dmaengine_submit(txdesc); |
5580 |
+ dma_async_issue_pending(rs->dma_tx.ch); |
5581 |
+ } |
5582 |
+- |
5583 |
+- return 1; |
5584 |
+ } |
5585 |
+ |
5586 |
+ static void rockchip_spi_config(struct rockchip_spi *rs) |
5587 |
+@@ -557,16 +555,17 @@ static int rockchip_spi_transfer_one( |
5588 |
+ else if (rs->rx) |
5589 |
+ rs->tmode = CR0_XFM_RO; |
5590 |
+ |
5591 |
+- if (master->can_dma && master->can_dma(master, spi, xfer)) |
5592 |
++ /* we need prepare dma before spi was enabled */ |
5593 |
++ if (master->can_dma && master->can_dma(master, spi, xfer)) { |
5594 |
+ rs->use_dma = 1; |
5595 |
+- else |
5596 |
++ rockchip_spi_prepare_dma(rs); |
5597 |
++ } else { |
5598 |
+ rs->use_dma = 0; |
5599 |
++ } |
5600 |
+ |
5601 |
+ rockchip_spi_config(rs); |
5602 |
+ |
5603 |
+- if (rs->use_dma) |
5604 |
+- ret = rockchip_spi_dma_transfer(rs); |
5605 |
+- else |
5606 |
++ if (!rs->use_dma) |
5607 |
+ ret = rockchip_spi_pio_transfer(rs); |
5608 |
+ |
5609 |
+ return ret; |
5610 |
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c |
5611 |
+index d017cec8a34a..e454b7c2ecd9 100644 |
5612 |
+--- a/drivers/tty/serial/omap-serial.c |
5613 |
++++ b/drivers/tty/serial/omap-serial.c |
5614 |
+@@ -254,8 +254,16 @@ serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud) |
5615 |
+ { |
5616 |
+ unsigned int n13 = port->uartclk / (13 * baud); |
5617 |
+ unsigned int n16 = port->uartclk / (16 * baud); |
5618 |
+- int baudAbsDiff13 = baud - (port->uartclk / (13 * n13)); |
5619 |
+- int baudAbsDiff16 = baud - (port->uartclk / (16 * n16)); |
5620 |
++ int baudAbsDiff13; |
5621 |
++ int baudAbsDiff16; |
5622 |
++ |
5623 |
++ if (n13 == 0) |
5624 |
++ n13 = 1; |
5625 |
++ if (n16 == 0) |
5626 |
++ n16 = 1; |
5627 |
++ |
5628 |
++ baudAbsDiff13 = baud - (port->uartclk / (13 * n13)); |
5629 |
++ baudAbsDiff16 = baud - (port->uartclk / (16 * n16)); |
5630 |
+ if (baudAbsDiff13 < 0) |
5631 |
+ baudAbsDiff13 = -baudAbsDiff13; |
5632 |
+ if (baudAbsDiff16 < 0) |
5633 |
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c |
5634 |
+index f7825332a325..9558da3f06a0 100644 |
5635 |
+--- a/drivers/vfio/pci/vfio_pci.c |
5636 |
++++ b/drivers/vfio/pci/vfio_pci.c |
5637 |
+@@ -876,15 +876,11 @@ static void vfio_pci_remove(struct pci_dev *pdev) |
5638 |
+ { |
5639 |
+ struct vfio_pci_device *vdev; |
5640 |
+ |
5641 |
+- mutex_lock(&driver_lock); |
5642 |
+- |
5643 |
+ vdev = vfio_del_group_dev(&pdev->dev); |
5644 |
+ if (vdev) { |
5645 |
+ iommu_group_put(pdev->dev.iommu_group); |
5646 |
+ kfree(vdev); |
5647 |
+ } |
5648 |
+- |
5649 |
+- mutex_unlock(&driver_lock); |
5650 |
+ } |
5651 |
+ |
5652 |
+ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, |
5653 |
+@@ -927,108 +923,90 @@ static struct pci_driver vfio_pci_driver = { |
5654 |
+ .err_handler = &vfio_err_handlers, |
5655 |
+ }; |
5656 |
+ |
5657 |
+-/* |
5658 |
+- * Test whether a reset is necessary and possible. We mark devices as |
5659 |
+- * needs_reset when they are released, but don't have a function-local reset |
5660 |
+- * available. If any of these exist in the affected devices, we want to do |
5661 |
+- * a bus/slot reset. We also need all of the affected devices to be unused, |
5662 |
+- * so we abort if any device has a non-zero refcnt. driver_lock prevents a |
5663 |
+- * device from being opened during the scan or unbound from vfio-pci. |
5664 |
+- */ |
5665 |
+-static int vfio_pci_test_bus_reset(struct pci_dev *pdev, void *data) |
5666 |
+-{ |
5667 |
+- bool *needs_reset = data; |
5668 |
+- struct pci_driver *pci_drv = ACCESS_ONCE(pdev->driver); |
5669 |
+- int ret = -EBUSY; |
5670 |
+- |
5671 |
+- if (pci_drv == &vfio_pci_driver) { |
5672 |
+- struct vfio_device *device; |
5673 |
+- struct vfio_pci_device *vdev; |
5674 |
+- |
5675 |
+- device = vfio_device_get_from_dev(&pdev->dev); |
5676 |
+- if (!device) |
5677 |
+- return ret; |
5678 |
+- |
5679 |
+- vdev = vfio_device_data(device); |
5680 |
+- if (vdev) { |
5681 |
+- if (vdev->needs_reset) |
5682 |
+- *needs_reset = true; |
5683 |
+- |
5684 |
+- if (!vdev->refcnt) |
5685 |
+- ret = 0; |
5686 |
+- } |
5687 |
+- |
5688 |
+- vfio_device_put(device); |
5689 |
+- } |
5690 |
+- |
5691 |
+- /* |
5692 |
+- * TODO: vfio-core considers groups to be viable even if some devices |
5693 |
+- * are attached to known drivers, like pci-stub or pcieport. We can't |
5694 |
+- * freeze devices from being unbound to those drivers like we can |
5695 |
+- * here though, so it would be racy to test for them. We also can't |
5696 |
+- * use device_lock() to prevent changes as that would interfere with |
5697 |
+- * PCI-core taking device_lock during bus reset. For now, we require |
5698 |
+- * devices to be bound to vfio-pci to get a bus/slot reset on release. |
5699 |
+- */ |
5700 |
+- |
5701 |
+- return ret; |
5702 |
+-} |
5703 |
++struct vfio_devices { |
5704 |
++ struct vfio_device **devices; |
5705 |
++ int cur_index; |
5706 |
++ int max_index; |
5707 |
++}; |
5708 |
+ |
5709 |
+-/* Clear needs_reset on all affected devices after successful bus/slot reset */ |
5710 |
+-static int vfio_pci_clear_needs_reset(struct pci_dev *pdev, void *data) |
5711 |
++static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) |
5712 |
+ { |
5713 |
++ struct vfio_devices *devs = data; |
5714 |
+ struct pci_driver *pci_drv = ACCESS_ONCE(pdev->driver); |
5715 |
+ |
5716 |
+- if (pci_drv == &vfio_pci_driver) { |
5717 |
+- struct vfio_device *device; |
5718 |
+- struct vfio_pci_device *vdev; |
5719 |
++ if (pci_drv != &vfio_pci_driver) |
5720 |
++ return -EBUSY; |
5721 |
+ |
5722 |
+- device = vfio_device_get_from_dev(&pdev->dev); |
5723 |
+- if (!device) |
5724 |
+- return 0; |
5725 |
++ if (devs->cur_index == devs->max_index) |
5726 |
++ return -ENOSPC; |
5727 |
+ |
5728 |
+- vdev = vfio_device_data(device); |
5729 |
+- if (vdev) |
5730 |
+- vdev->needs_reset = false; |
5731 |
+- |
5732 |
+- vfio_device_put(device); |
5733 |
+- } |
5734 |
++ devs->devices[devs->cur_index] = vfio_device_get_from_dev(&pdev->dev); |
5735 |
++ if (!devs->devices[devs->cur_index]) |
5736 |
++ return -EINVAL; |
5737 |
+ |
5738 |
++ devs->cur_index++; |
5739 |
+ return 0; |
5740 |
+ } |
5741 |
+ |
5742 |
+ /* |
5743 |
+ * Attempt to do a bus/slot reset if there are devices affected by a reset for |
5744 |
+ * this device that are needs_reset and all of the affected devices are unused |
5745 |
+- * (!refcnt). Callers of this function are required to hold driver_lock such |
5746 |
+- * that devices can not be unbound from vfio-pci or opened by a user while we |
5747 |
+- * test for and perform a bus/slot reset. |
5748 |
++ * (!refcnt). Callers are required to hold driver_lock when calling this to |
5749 |
++ * prevent device opens and concurrent bus reset attempts. We prevent device |
5750 |
++ * unbinds by acquiring and holding a reference to the vfio_device. |
5751 |
++ * |
5752 |
++ * NB: vfio-core considers a group to be viable even if some devices are |
5753 |
++ * bound to drivers like pci-stub or pcieport. Here we require all devices |
5754 |
++ * to be bound to vfio_pci since that's the only way we can be sure they |
5755 |
++ * stay put. |
5756 |
+ */ |
5757 |
+ static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev) |
5758 |
+ { |
5759 |
++ struct vfio_devices devs = { .cur_index = 0 }; |
5760 |
++ int i = 0, ret = -EINVAL; |
5761 |
+ bool needs_reset = false, slot = false; |
5762 |
+- int ret; |
5763 |
++ struct vfio_pci_device *tmp; |
5764 |
+ |
5765 |
+ if (!pci_probe_reset_slot(vdev->pdev->slot)) |
5766 |
+ slot = true; |
5767 |
+ else if (pci_probe_reset_bus(vdev->pdev->bus)) |
5768 |
+ return; |
5769 |
+ |
5770 |
+- if (vfio_pci_for_each_slot_or_bus(vdev->pdev, |
5771 |
+- vfio_pci_test_bus_reset, |
5772 |
+- &needs_reset, slot) || !needs_reset) |
5773 |
++ if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs, |
5774 |
++ &i, slot) || !i) |
5775 |
+ return; |
5776 |
+ |
5777 |
+- if (slot) |
5778 |
+- ret = pci_try_reset_slot(vdev->pdev->slot); |
5779 |
+- else |
5780 |
+- ret = pci_try_reset_bus(vdev->pdev->bus); |
5781 |
+- |
5782 |
+- if (ret) |
5783 |
++ devs.max_index = i; |
5784 |
++ devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL); |
5785 |
++ if (!devs.devices) |
5786 |
+ return; |
5787 |
+ |
5788 |
+- vfio_pci_for_each_slot_or_bus(vdev->pdev, |
5789 |
+- vfio_pci_clear_needs_reset, NULL, slot); |
5790 |
++ if (vfio_pci_for_each_slot_or_bus(vdev->pdev, |
5791 |
++ vfio_pci_get_devs, &devs, slot)) |
5792 |
++ goto put_devs; |
5793 |
++ |
5794 |
++ for (i = 0; i < devs.cur_index; i++) { |
5795 |
++ tmp = vfio_device_data(devs.devices[i]); |
5796 |
++ if (tmp->needs_reset) |
5797 |
++ needs_reset = true; |
5798 |
++ if (tmp->refcnt) |
5799 |
++ goto put_devs; |
5800 |
++ } |
5801 |
++ |
5802 |
++ if (needs_reset) |
5803 |
++ ret = slot ? pci_try_reset_slot(vdev->pdev->slot) : |
5804 |
++ pci_try_reset_bus(vdev->pdev->bus); |
5805 |
++ |
5806 |
++put_devs: |
5807 |
++ for (i = 0; i < devs.cur_index; i++) { |
5808 |
++ if (!ret) { |
5809 |
++ tmp = vfio_device_data(devs.devices[i]); |
5810 |
++ tmp->needs_reset = false; |
5811 |
++ } |
5812 |
++ vfio_device_put(devs.devices[i]); |
5813 |
++ } |
5814 |
++ |
5815 |
++ kfree(devs.devices); |
5816 |
+ } |
5817 |
+ |
5818 |
+ static void __exit vfio_pci_cleanup(void) |
5819 |
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c |
5820 |
+index 25ebe8eecdb7..c3eb93fc9261 100644 |
5821 |
+--- a/drivers/virtio/virtio_balloon.c |
5822 |
++++ b/drivers/virtio/virtio_balloon.c |
5823 |
+@@ -163,8 +163,8 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num) |
5824 |
+ /* Find pfns pointing at start of each page, get pages and free them. */ |
5825 |
+ for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { |
5826 |
+ struct page *page = balloon_pfn_to_page(pfns[i]); |
5827 |
+- balloon_page_free(page); |
5828 |
+ adjust_managed_page_count(page, 1); |
5829 |
++ put_page(page); /* balloon reference */ |
5830 |
+ } |
5831 |
+ } |
5832 |
+ |
5833 |
+@@ -395,6 +395,8 @@ static int virtballoon_migratepage(struct address_space *mapping, |
5834 |
+ if (!mutex_trylock(&vb->balloon_lock)) |
5835 |
+ return -EAGAIN; |
5836 |
+ |
5837 |
++ get_page(newpage); /* balloon reference */ |
5838 |
++ |
5839 |
+ /* balloon's page migration 1st step -- inflate "newpage" */ |
5840 |
+ spin_lock_irqsave(&vb_dev_info->pages_lock, flags); |
5841 |
+ balloon_page_insert(newpage, mapping, &vb_dev_info->pages); |
5842 |
+@@ -404,12 +406,7 @@ static int virtballoon_migratepage(struct address_space *mapping, |
5843 |
+ set_page_pfns(vb->pfns, newpage); |
5844 |
+ tell_host(vb, vb->inflate_vq); |
5845 |
+ |
5846 |
+- /* |
5847 |
+- * balloon's page migration 2nd step -- deflate "page" |
5848 |
+- * |
5849 |
+- * It's safe to delete page->lru here because this page is at |
5850 |
+- * an isolated migration list, and this step is expected to happen here |
5851 |
+- */ |
5852 |
++ /* balloon's page migration 2nd step -- deflate "page" */ |
5853 |
+ balloon_page_delete(page); |
5854 |
+ vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; |
5855 |
+ set_page_pfns(vb->pfns, page); |
5856 |
+@@ -417,7 +414,9 @@ static int virtballoon_migratepage(struct address_space *mapping, |
5857 |
+ |
5858 |
+ mutex_unlock(&vb->balloon_lock); |
5859 |
+ |
5860 |
+- return MIGRATEPAGE_BALLOON_SUCCESS; |
5861 |
++ put_page(page); /* balloon reference */ |
5862 |
++ |
5863 |
++ return MIGRATEPAGE_SUCCESS; |
5864 |
+ } |
5865 |
+ |
5866 |
+ /* define the balloon_mapping->a_ops callback to allow balloon page migration */ |
5867 |
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c |
5868 |
+index eea26e1b2fda..d738ff8ab81c 100644 |
5869 |
+--- a/fs/btrfs/dev-replace.c |
5870 |
++++ b/fs/btrfs/dev-replace.c |
5871 |
+@@ -567,6 +567,8 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, |
5872 |
+ btrfs_kobj_rm_device(fs_info, src_device); |
5873 |
+ btrfs_kobj_add_device(fs_info, tgt_device); |
5874 |
+ |
5875 |
++ btrfs_dev_replace_unlock(dev_replace); |
5876 |
++ |
5877 |
+ btrfs_rm_dev_replace_blocked(fs_info); |
5878 |
+ |
5879 |
+ btrfs_rm_dev_replace_srcdev(fs_info, src_device); |
5880 |
+@@ -580,7 +582,6 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, |
5881 |
+ * superblock is scratched out so that it is no longer marked to |
5882 |
+ * belong to this filesystem. |
5883 |
+ */ |
5884 |
+- btrfs_dev_replace_unlock(dev_replace); |
5885 |
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
5886 |
+ mutex_unlock(&root->fs_info->chunk_mutex); |
5887 |
+ |
5888 |
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
5889 |
+index 3efe1c3877bf..98042c1a48b4 100644 |
5890 |
+--- a/fs/btrfs/extent-tree.c |
5891 |
++++ b/fs/btrfs/extent-tree.c |
5892 |
+@@ -4502,7 +4502,13 @@ again: |
5893 |
+ space_info->flush = 1; |
5894 |
+ } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { |
5895 |
+ used += orig_bytes; |
5896 |
+- if (need_do_async_reclaim(space_info, root->fs_info, used) && |
5897 |
++ /* |
5898 |
++ * We will do the space reservation dance during log replay, |
5899 |
++ * which means we won't have fs_info->fs_root set, so don't do |
5900 |
++ * the async reclaim as we will panic. |
5901 |
++ */ |
5902 |
++ if (!root->fs_info->log_root_recovering && |
5903 |
++ need_do_async_reclaim(space_info, root->fs_info, used) && |
5904 |
+ !work_busy(&root->fs_info->async_reclaim_work)) |
5905 |
+ queue_work(system_unbound_wq, |
5906 |
+ &root->fs_info->async_reclaim_work); |
5907 |
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c |
5908 |
+index ff1cc0399b9a..68dd92cd7d54 100644 |
5909 |
+--- a/fs/btrfs/file.c |
5910 |
++++ b/fs/btrfs/file.c |
5911 |
+@@ -2621,23 +2621,28 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) |
5912 |
+ struct btrfs_root *root = BTRFS_I(inode)->root; |
5913 |
+ struct extent_map *em = NULL; |
5914 |
+ struct extent_state *cached_state = NULL; |
5915 |
+- u64 lockstart = *offset; |
5916 |
+- u64 lockend = i_size_read(inode); |
5917 |
+- u64 start = *offset; |
5918 |
+- u64 len = i_size_read(inode); |
5919 |
++ u64 lockstart; |
5920 |
++ u64 lockend; |
5921 |
++ u64 start; |
5922 |
++ u64 len; |
5923 |
+ int ret = 0; |
5924 |
+ |
5925 |
+- lockend = max_t(u64, root->sectorsize, lockend); |
5926 |
++ if (inode->i_size == 0) |
5927 |
++ return -ENXIO; |
5928 |
++ |
5929 |
++ /* |
5930 |
++ * *offset can be negative, in this case we start finding DATA/HOLE from |
5931 |
++ * the very start of the file. |
5932 |
++ */ |
5933 |
++ start = max_t(loff_t, 0, *offset); |
5934 |
++ |
5935 |
++ lockstart = round_down(start, root->sectorsize); |
5936 |
++ lockend = round_up(i_size_read(inode), root->sectorsize); |
5937 |
+ if (lockend <= lockstart) |
5938 |
+ lockend = lockstart + root->sectorsize; |
5939 |
+- |
5940 |
+ lockend--; |
5941 |
+ len = lockend - lockstart + 1; |
5942 |
+ |
5943 |
+- len = max_t(u64, len, root->sectorsize); |
5944 |
+- if (inode->i_size == 0) |
5945 |
+- return -ENXIO; |
5946 |
+- |
5947 |
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, |
5948 |
+ &cached_state); |
5949 |
+ |
5950 |
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
5951 |
+index 016c403bfe7e..886d8d42640d 100644 |
5952 |
+--- a/fs/btrfs/inode.c |
5953 |
++++ b/fs/btrfs/inode.c |
5954 |
+@@ -3662,7 +3662,8 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, |
5955 |
+ * without delay |
5956 |
+ */ |
5957 |
+ if (!btrfs_is_free_space_inode(inode) |
5958 |
+- && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { |
5959 |
++ && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID |
5960 |
++ && !root->fs_info->log_root_recovering) { |
5961 |
+ btrfs_update_root_times(trans, root); |
5962 |
+ |
5963 |
+ ret = btrfs_delayed_update_inode(trans, root, inode); |
5964 |
+@@ -5202,42 +5203,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) |
5965 |
+ iput(inode); |
5966 |
+ inode = ERR_PTR(ret); |
5967 |
+ } |
5968 |
+- /* |
5969 |
+- * If orphan cleanup did remove any orphans, it means the tree |
5970 |
+- * was modified and therefore the commit root is not the same as |
5971 |
+- * the current root anymore. This is a problem, because send |
5972 |
+- * uses the commit root and therefore can see inode items that |
5973 |
+- * don't exist in the current root anymore, and for example make |
5974 |
+- * calls to btrfs_iget, which will do tree lookups based on the |
5975 |
+- * current root and not on the commit root. Those lookups will |
5976 |
+- * fail, returning a -ESTALE error, and making send fail with |
5977 |
+- * that error. So make sure a send does not see any orphans we |
5978 |
+- * have just removed, and that it will see the same inodes |
5979 |
+- * regardless of whether a transaction commit happened before |
5980 |
+- * it started (meaning that the commit root will be the same as |
5981 |
+- * the current root) or not. |
5982 |
+- */ |
5983 |
+- if (sub_root->node != sub_root->commit_root) { |
5984 |
+- u64 sub_flags = btrfs_root_flags(&sub_root->root_item); |
5985 |
+- |
5986 |
+- if (sub_flags & BTRFS_ROOT_SUBVOL_RDONLY) { |
5987 |
+- struct extent_buffer *eb; |
5988 |
+- |
5989 |
+- /* |
5990 |
+- * Assert we can't have races between dentry |
5991 |
+- * lookup called through the snapshot creation |
5992 |
+- * ioctl and the VFS. |
5993 |
+- */ |
5994 |
+- ASSERT(mutex_is_locked(&dir->i_mutex)); |
5995 |
+- |
5996 |
+- down_write(&root->fs_info->commit_root_sem); |
5997 |
+- eb = sub_root->commit_root; |
5998 |
+- sub_root->commit_root = |
5999 |
+- btrfs_root_node(sub_root); |
6000 |
+- up_write(&root->fs_info->commit_root_sem); |
6001 |
+- free_extent_buffer(eb); |
6002 |
+- } |
6003 |
+- } |
6004 |
+ } |
6005 |
+ |
6006 |
+ return inode; |
6007 |
+@@ -6191,21 +6156,60 @@ out_fail_inode: |
6008 |
+ goto out_fail; |
6009 |
+ } |
6010 |
+ |
6011 |
++/* Find next extent map of a given extent map, caller needs to ensure locks */ |
6012 |
++static struct extent_map *next_extent_map(struct extent_map *em) |
6013 |
++{ |
6014 |
++ struct rb_node *next; |
6015 |
++ |
6016 |
++ next = rb_next(&em->rb_node); |
6017 |
++ if (!next) |
6018 |
++ return NULL; |
6019 |
++ return container_of(next, struct extent_map, rb_node); |
6020 |
++} |
6021 |
++ |
6022 |
++static struct extent_map *prev_extent_map(struct extent_map *em) |
6023 |
++{ |
6024 |
++ struct rb_node *prev; |
6025 |
++ |
6026 |
++ prev = rb_prev(&em->rb_node); |
6027 |
++ if (!prev) |
6028 |
++ return NULL; |
6029 |
++ return container_of(prev, struct extent_map, rb_node); |
6030 |
++} |
6031 |
++ |
6032 |
+ /* helper for btfs_get_extent. Given an existing extent in the tree, |
6033 |
++ * the existing extent is the nearest extent to map_start, |
6034 |
+ * and an extent that you want to insert, deal with overlap and insert |
6035 |
+- * the new extent into the tree. |
6036 |
++ * the best fitted new extent into the tree. |
6037 |
+ */ |
6038 |
+ static int merge_extent_mapping(struct extent_map_tree *em_tree, |
6039 |
+ struct extent_map *existing, |
6040 |
+ struct extent_map *em, |
6041 |
+ u64 map_start) |
6042 |
+ { |
6043 |
++ struct extent_map *prev; |
6044 |
++ struct extent_map *next; |
6045 |
++ u64 start; |
6046 |
++ u64 end; |
6047 |
+ u64 start_diff; |
6048 |
+ |
6049 |
+ BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); |
6050 |
+- start_diff = map_start - em->start; |
6051 |
+- em->start = map_start; |
6052 |
+- em->len = existing->start - em->start; |
6053 |
++ |
6054 |
++ if (existing->start > map_start) { |
6055 |
++ next = existing; |
6056 |
++ prev = prev_extent_map(next); |
6057 |
++ } else { |
6058 |
++ prev = existing; |
6059 |
++ next = next_extent_map(prev); |
6060 |
++ } |
6061 |
++ |
6062 |
++ start = prev ? extent_map_end(prev) : em->start; |
6063 |
++ start = max_t(u64, start, em->start); |
6064 |
++ end = next ? next->start : extent_map_end(em); |
6065 |
++ end = min_t(u64, end, extent_map_end(em)); |
6066 |
++ start_diff = start - em->start; |
6067 |
++ em->start = start; |
6068 |
++ em->len = end - start; |
6069 |
+ if (em->block_start < EXTENT_MAP_LAST_BYTE && |
6070 |
+ !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { |
6071 |
+ em->block_start += start_diff; |
6072 |
+@@ -6482,25 +6486,21 @@ insert: |
6073 |
+ |
6074 |
+ ret = 0; |
6075 |
+ |
6076 |
+- existing = lookup_extent_mapping(em_tree, start, len); |
6077 |
+- if (existing && (existing->start > start || |
6078 |
+- existing->start + existing->len <= start)) { |
6079 |
++ existing = search_extent_mapping(em_tree, start, len); |
6080 |
++ /* |
6081 |
++ * existing will always be non-NULL, since there must be |
6082 |
++ * extent causing the -EEXIST. |
6083 |
++ */ |
6084 |
++ if (start >= extent_map_end(existing) || |
6085 |
++ start <= existing->start) { |
6086 |
++ /* |
6087 |
++ * The existing extent map is the one nearest to |
6088 |
++ * the [start, start + len) range which overlaps |
6089 |
++ */ |
6090 |
++ err = merge_extent_mapping(em_tree, existing, |
6091 |
++ em, start); |
6092 |
+ free_extent_map(existing); |
6093 |
+- existing = NULL; |
6094 |
+- } |
6095 |
+- if (!existing) { |
6096 |
+- existing = lookup_extent_mapping(em_tree, em->start, |
6097 |
+- em->len); |
6098 |
+- if (existing) { |
6099 |
+- err = merge_extent_mapping(em_tree, existing, |
6100 |
+- em, start); |
6101 |
+- free_extent_map(existing); |
6102 |
+- if (err) { |
6103 |
+- free_extent_map(em); |
6104 |
+- em = NULL; |
6105 |
+- } |
6106 |
+- } else { |
6107 |
+- err = -EIO; |
6108 |
++ if (err) { |
6109 |
+ free_extent_map(em); |
6110 |
+ em = NULL; |
6111 |
+ } |
6112 |
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c |
6113 |
+index 8a8e29878c34..b765d412cbb6 100644 |
6114 |
+--- a/fs/btrfs/ioctl.c |
6115 |
++++ b/fs/btrfs/ioctl.c |
6116 |
+@@ -332,6 +332,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) |
6117 |
+ goto out_drop; |
6118 |
+ |
6119 |
+ } else { |
6120 |
++ ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0); |
6121 |
++ if (ret && ret != -ENODATA) |
6122 |
++ goto out_drop; |
6123 |
+ ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); |
6124 |
+ } |
6125 |
+ |
6126 |
+@@ -711,6 +714,39 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, |
6127 |
+ if (ret) |
6128 |
+ goto fail; |
6129 |
+ |
6130 |
++ ret = btrfs_orphan_cleanup(pending_snapshot->snap); |
6131 |
++ if (ret) |
6132 |
++ goto fail; |
6133 |
++ |
6134 |
++ /* |
6135 |
++ * If orphan cleanup did remove any orphans, it means the tree was |
6136 |
++ * modified and therefore the commit root is not the same as the |
6137 |
++ * current root anymore. This is a problem, because send uses the |
6138 |
++ * commit root and therefore can see inode items that don't exist |
6139 |
++ * in the current root anymore, and for example make calls to |
6140 |
++ * btrfs_iget, which will do tree lookups based on the current root |
6141 |
++ * and not on the commit root. Those lookups will fail, returning a |
6142 |
++ * -ESTALE error, and making send fail with that error. So make sure |
6143 |
++ * a send does not see any orphans we have just removed, and that it |
6144 |
++ * will see the same inodes regardless of whether a transaction |
6145 |
++ * commit happened before it started (meaning that the commit root |
6146 |
++ * will be the same as the current root) or not. |
6147 |
++ */ |
6148 |
++ if (readonly && pending_snapshot->snap->node != |
6149 |
++ pending_snapshot->snap->commit_root) { |
6150 |
++ trans = btrfs_join_transaction(pending_snapshot->snap); |
6151 |
++ if (IS_ERR(trans) && PTR_ERR(trans) != -ENOENT) { |
6152 |
++ ret = PTR_ERR(trans); |
6153 |
++ goto fail; |
6154 |
++ } |
6155 |
++ if (!IS_ERR(trans)) { |
6156 |
++ ret = btrfs_commit_transaction(trans, |
6157 |
++ pending_snapshot->snap); |
6158 |
++ if (ret) |
6159 |
++ goto fail; |
6160 |
++ } |
6161 |
++ } |
6162 |
++ |
6163 |
+ inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); |
6164 |
+ if (IS_ERR(inode)) { |
6165 |
+ ret = PTR_ERR(inode); |
6166 |
+@@ -5283,6 +5319,12 @@ long btrfs_ioctl(struct file *file, unsigned int |
6167 |
+ if (ret) |
6168 |
+ return ret; |
6169 |
+ ret = btrfs_sync_fs(file->f_dentry->d_sb, 1); |
6170 |
++ /* |
6171 |
++ * The transaction thread may want to do more work, |
6172 |
++ * namely it pokes the cleaner ktread that will start |
6173 |
++ * processing uncleaned subvols. |
6174 |
++ */ |
6175 |
++ wake_up_process(root->fs_info->transaction_kthread); |
6176 |
+ return ret; |
6177 |
+ } |
6178 |
+ case BTRFS_IOC_START_SYNC: |
6179 |
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c |
6180 |
+index ded5c601d916..d094534c3b53 100644 |
6181 |
+--- a/fs/btrfs/qgroup.c |
6182 |
++++ b/fs/btrfs/qgroup.c |
6183 |
+@@ -551,9 +551,15 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans, |
6184 |
+ key.type = BTRFS_QGROUP_INFO_KEY; |
6185 |
+ key.offset = qgroupid; |
6186 |
+ |
6187 |
++ /* |
6188 |
++ * Avoid a transaction abort by catching -EEXIST here. In that |
6189 |
++ * case, we proceed by re-initializing the existing structure |
6190 |
++ * on disk. |
6191 |
++ */ |
6192 |
++ |
6193 |
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key, |
6194 |
+ sizeof(*qgroup_info)); |
6195 |
+- if (ret) |
6196 |
++ if (ret && ret != -EEXIST) |
6197 |
+ goto out; |
6198 |
+ |
6199 |
+ leaf = path->nodes[0]; |
6200 |
+@@ -572,7 +578,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans, |
6201 |
+ key.type = BTRFS_QGROUP_LIMIT_KEY; |
6202 |
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key, |
6203 |
+ sizeof(*qgroup_limit)); |
6204 |
+- if (ret) |
6205 |
++ if (ret && ret != -EEXIST) |
6206 |
+ goto out; |
6207 |
+ |
6208 |
+ leaf = path->nodes[0]; |
6209 |
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c |
6210 |
+index 65245a07275b..56fe6ec409ac 100644 |
6211 |
+--- a/fs/btrfs/relocation.c |
6212 |
++++ b/fs/btrfs/relocation.c |
6213 |
+@@ -736,7 +736,8 @@ again: |
6214 |
+ err = ret; |
6215 |
+ goto out; |
6216 |
+ } |
6217 |
+- BUG_ON(!ret || !path1->slots[0]); |
6218 |
++ ASSERT(ret); |
6219 |
++ ASSERT(path1->slots[0]); |
6220 |
+ |
6221 |
+ path1->slots[0]--; |
6222 |
+ |
6223 |
+@@ -746,10 +747,10 @@ again: |
6224 |
+ * the backref was added previously when processing |
6225 |
+ * backref of type BTRFS_TREE_BLOCK_REF_KEY |
6226 |
+ */ |
6227 |
+- BUG_ON(!list_is_singular(&cur->upper)); |
6228 |
++ ASSERT(list_is_singular(&cur->upper)); |
6229 |
+ edge = list_entry(cur->upper.next, struct backref_edge, |
6230 |
+ list[LOWER]); |
6231 |
+- BUG_ON(!list_empty(&edge->list[UPPER])); |
6232 |
++ ASSERT(list_empty(&edge->list[UPPER])); |
6233 |
+ exist = edge->node[UPPER]; |
6234 |
+ /* |
6235 |
+ * add the upper level block to pending list if we need |
6236 |
+@@ -831,7 +832,7 @@ again: |
6237 |
+ cur->cowonly = 1; |
6238 |
+ } |
6239 |
+ #else |
6240 |
+- BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); |
6241 |
++ ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY); |
6242 |
+ if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { |
6243 |
+ #endif |
6244 |
+ if (key.objectid == key.offset) { |
6245 |
+@@ -840,7 +841,7 @@ again: |
6246 |
+ * backref of this type. |
6247 |
+ */ |
6248 |
+ root = find_reloc_root(rc, cur->bytenr); |
6249 |
+- BUG_ON(!root); |
6250 |
++ ASSERT(root); |
6251 |
+ cur->root = root; |
6252 |
+ break; |
6253 |
+ } |
6254 |
+@@ -868,7 +869,7 @@ again: |
6255 |
+ } else { |
6256 |
+ upper = rb_entry(rb_node, struct backref_node, |
6257 |
+ rb_node); |
6258 |
+- BUG_ON(!upper->checked); |
6259 |
++ ASSERT(upper->checked); |
6260 |
+ INIT_LIST_HEAD(&edge->list[UPPER]); |
6261 |
+ } |
6262 |
+ list_add_tail(&edge->list[LOWER], &cur->upper); |
6263 |
+@@ -892,7 +893,7 @@ again: |
6264 |
+ |
6265 |
+ if (btrfs_root_level(&root->root_item) == cur->level) { |
6266 |
+ /* tree root */ |
6267 |
+- BUG_ON(btrfs_root_bytenr(&root->root_item) != |
6268 |
++ ASSERT(btrfs_root_bytenr(&root->root_item) == |
6269 |
+ cur->bytenr); |
6270 |
+ if (should_ignore_root(root)) |
6271 |
+ list_add(&cur->list, &useless); |
6272 |
+@@ -927,7 +928,7 @@ again: |
6273 |
+ need_check = true; |
6274 |
+ for (; level < BTRFS_MAX_LEVEL; level++) { |
6275 |
+ if (!path2->nodes[level]) { |
6276 |
+- BUG_ON(btrfs_root_bytenr(&root->root_item) != |
6277 |
++ ASSERT(btrfs_root_bytenr(&root->root_item) == |
6278 |
+ lower->bytenr); |
6279 |
+ if (should_ignore_root(root)) |
6280 |
+ list_add(&lower->list, &useless); |
6281 |
+@@ -977,12 +978,15 @@ again: |
6282 |
+ need_check = false; |
6283 |
+ list_add_tail(&edge->list[UPPER], |
6284 |
+ &list); |
6285 |
+- } else |
6286 |
++ } else { |
6287 |
++ if (upper->checked) |
6288 |
++ need_check = true; |
6289 |
+ INIT_LIST_HEAD(&edge->list[UPPER]); |
6290 |
++ } |
6291 |
+ } else { |
6292 |
+ upper = rb_entry(rb_node, struct backref_node, |
6293 |
+ rb_node); |
6294 |
+- BUG_ON(!upper->checked); |
6295 |
++ ASSERT(upper->checked); |
6296 |
+ INIT_LIST_HEAD(&edge->list[UPPER]); |
6297 |
+ if (!upper->owner) |
6298 |
+ upper->owner = btrfs_header_owner(eb); |
6299 |
+@@ -1026,7 +1030,7 @@ next: |
6300 |
+ * everything goes well, connect backref nodes and insert backref nodes |
6301 |
+ * into the cache. |
6302 |
+ */ |
6303 |
+- BUG_ON(!node->checked); |
6304 |
++ ASSERT(node->checked); |
6305 |
+ cowonly = node->cowonly; |
6306 |
+ if (!cowonly) { |
6307 |
+ rb_node = tree_insert(&cache->rb_root, node->bytenr, |
6308 |
+@@ -1062,8 +1066,21 @@ next: |
6309 |
+ continue; |
6310 |
+ } |
6311 |
+ |
6312 |
+- BUG_ON(!upper->checked); |
6313 |
+- BUG_ON(cowonly != upper->cowonly); |
6314 |
++ if (!upper->checked) { |
6315 |
++ /* |
6316 |
++ * Still want to blow up for developers since this is a |
6317 |
++ * logic bug. |
6318 |
++ */ |
6319 |
++ ASSERT(0); |
6320 |
++ err = -EINVAL; |
6321 |
++ goto out; |
6322 |
++ } |
6323 |
++ if (cowonly != upper->cowonly) { |
6324 |
++ ASSERT(0); |
6325 |
++ err = -EINVAL; |
6326 |
++ goto out; |
6327 |
++ } |
6328 |
++ |
6329 |
+ if (!cowonly) { |
6330 |
+ rb_node = tree_insert(&cache->rb_root, upper->bytenr, |
6331 |
+ &upper->rb_node); |
6332 |
+@@ -1086,7 +1103,7 @@ next: |
6333 |
+ while (!list_empty(&useless)) { |
6334 |
+ upper = list_entry(useless.next, struct backref_node, list); |
6335 |
+ list_del_init(&upper->list); |
6336 |
+- BUG_ON(!list_empty(&upper->upper)); |
6337 |
++ ASSERT(list_empty(&upper->upper)); |
6338 |
+ if (upper == node) |
6339 |
+ node = NULL; |
6340 |
+ if (upper->lowest) { |
6341 |
+@@ -1119,29 +1136,45 @@ out: |
6342 |
+ if (err) { |
6343 |
+ while (!list_empty(&useless)) { |
6344 |
+ lower = list_entry(useless.next, |
6345 |
+- struct backref_node, upper); |
6346 |
+- list_del_init(&lower->upper); |
6347 |
++ struct backref_node, list); |
6348 |
++ list_del_init(&lower->list); |
6349 |
+ } |
6350 |
+- upper = node; |
6351 |
+- INIT_LIST_HEAD(&list); |
6352 |
+- while (upper) { |
6353 |
+- if (RB_EMPTY_NODE(&upper->rb_node)) { |
6354 |
+- list_splice_tail(&upper->upper, &list); |
6355 |
+- free_backref_node(cache, upper); |
6356 |
+- } |
6357 |
+- |
6358 |
+- if (list_empty(&list)) |
6359 |
+- break; |
6360 |
+- |
6361 |
+- edge = list_entry(list.next, struct backref_edge, |
6362 |
+- list[LOWER]); |
6363 |
++ while (!list_empty(&list)) { |
6364 |
++ edge = list_first_entry(&list, struct backref_edge, |
6365 |
++ list[UPPER]); |
6366 |
++ list_del(&edge->list[UPPER]); |
6367 |
+ list_del(&edge->list[LOWER]); |
6368 |
++ lower = edge->node[LOWER]; |
6369 |
+ upper = edge->node[UPPER]; |
6370 |
+ free_backref_edge(cache, edge); |
6371 |
++ |
6372 |
++ /* |
6373 |
++ * Lower is no longer linked to any upper backref nodes |
6374 |
++ * and isn't in the cache, we can free it ourselves. |
6375 |
++ */ |
6376 |
++ if (list_empty(&lower->upper) && |
6377 |
++ RB_EMPTY_NODE(&lower->rb_node)) |
6378 |
++ list_add(&lower->list, &useless); |
6379 |
++ |
6380 |
++ if (!RB_EMPTY_NODE(&upper->rb_node)) |
6381 |
++ continue; |
6382 |
++ |
6383 |
++ /* Add this guy's upper edges to the list to proces */ |
6384 |
++ list_for_each_entry(edge, &upper->upper, list[LOWER]) |
6385 |
++ list_add_tail(&edge->list[UPPER], &list); |
6386 |
++ if (list_empty(&upper->upper)) |
6387 |
++ list_add(&upper->list, &useless); |
6388 |
++ } |
6389 |
++ |
6390 |
++ while (!list_empty(&useless)) { |
6391 |
++ lower = list_entry(useless.next, |
6392 |
++ struct backref_node, list); |
6393 |
++ list_del_init(&lower->list); |
6394 |
++ free_backref_node(cache, lower); |
6395 |
+ } |
6396 |
+ return ERR_PTR(err); |
6397 |
+ } |
6398 |
+- BUG_ON(node && node->detached); |
6399 |
++ ASSERT(!node || !node->detached); |
6400 |
+ return node; |
6401 |
+ } |
6402 |
+ |
6403 |
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c |
6404 |
+index d89c6d3542ca..98a25df1c430 100644 |
6405 |
+--- a/fs/btrfs/transaction.c |
6406 |
++++ b/fs/btrfs/transaction.c |
6407 |
+@@ -609,7 +609,6 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) |
6408 |
+ if (transid <= root->fs_info->last_trans_committed) |
6409 |
+ goto out; |
6410 |
+ |
6411 |
+- ret = -EINVAL; |
6412 |
+ /* find specified transaction */ |
6413 |
+ spin_lock(&root->fs_info->trans_lock); |
6414 |
+ list_for_each_entry(t, &root->fs_info->trans_list, list) { |
6415 |
+@@ -625,9 +624,16 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) |
6416 |
+ } |
6417 |
+ } |
6418 |
+ spin_unlock(&root->fs_info->trans_lock); |
6419 |
+- /* The specified transaction doesn't exist */ |
6420 |
+- if (!cur_trans) |
6421 |
++ |
6422 |
++ /* |
6423 |
++ * The specified transaction doesn't exist, or we |
6424 |
++ * raced with btrfs_commit_transaction |
6425 |
++ */ |
6426 |
++ if (!cur_trans) { |
6427 |
++ if (transid > root->fs_info->last_trans_committed) |
6428 |
++ ret = -EINVAL; |
6429 |
+ goto out; |
6430 |
++ } |
6431 |
+ } else { |
6432 |
+ /* find newest transaction that is committing | committed */ |
6433 |
+ spin_lock(&root->fs_info->trans_lock); |
6434 |
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c |
6435 |
+index d4a9431ec73c..57ee4c53b4f8 100644 |
6436 |
+--- a/fs/ecryptfs/inode.c |
6437 |
++++ b/fs/ecryptfs/inode.c |
6438 |
+@@ -1039,7 +1039,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, |
6439 |
+ } |
6440 |
+ |
6441 |
+ rc = vfs_setxattr(lower_dentry, name, value, size, flags); |
6442 |
+- if (!rc) |
6443 |
++ if (!rc && dentry->d_inode) |
6444 |
+ fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode); |
6445 |
+ out: |
6446 |
+ return rc; |
6447 |
+diff --git a/fs/namei.c b/fs/namei.c |
6448 |
+index a7b05bf82d31..3ddb044f3702 100644 |
6449 |
+--- a/fs/namei.c |
6450 |
++++ b/fs/namei.c |
6451 |
+@@ -3074,7 +3074,7 @@ opened: |
6452 |
+ error = open_check_o_direct(file); |
6453 |
+ if (error) |
6454 |
+ goto exit_fput; |
6455 |
+- error = ima_file_check(file, op->acc_mode); |
6456 |
++ error = ima_file_check(file, op->acc_mode, *opened); |
6457 |
+ if (error) |
6458 |
+ goto exit_fput; |
6459 |
+ |
6460 |
+diff --git a/fs/namespace.c b/fs/namespace.c |
6461 |
+index ef42d9bee212..7f67b463a5b4 100644 |
6462 |
+--- a/fs/namespace.c |
6463 |
++++ b/fs/namespace.c |
6464 |
+@@ -1356,6 +1356,8 @@ static int do_umount(struct mount *mnt, int flags) |
6465 |
+ * Special case for "unmounting" root ... |
6466 |
+ * we just try to remount it readonly. |
6467 |
+ */ |
6468 |
++ if (!capable(CAP_SYS_ADMIN)) |
6469 |
++ return -EPERM; |
6470 |
+ down_write(&sb->s_umount); |
6471 |
+ if (!(sb->s_flags & MS_RDONLY)) |
6472 |
+ retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); |
6473 |
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c |
6474 |
+index 6a4f3666e273..94088517039f 100644 |
6475 |
+--- a/fs/nfs/client.c |
6476 |
++++ b/fs/nfs/client.c |
6477 |
+@@ -1318,7 +1318,7 @@ static int nfs_server_list_show(struct seq_file *m, void *v) |
6478 |
+ */ |
6479 |
+ static int nfs_volume_list_open(struct inode *inode, struct file *file) |
6480 |
+ { |
6481 |
+- return seq_open_net(inode, file, &nfs_server_list_ops, |
6482 |
++ return seq_open_net(inode, file, &nfs_volume_list_ops, |
6483 |
+ sizeof(struct seq_net_private)); |
6484 |
+ } |
6485 |
+ |
6486 |
+diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c |
6487 |
+index 90978075f730..f59713e091a8 100644 |
6488 |
+--- a/fs/nfs/filelayout/filelayout.c |
6489 |
++++ b/fs/nfs/filelayout/filelayout.c |
6490 |
+@@ -1031,7 +1031,7 @@ filelayout_clear_request_commit(struct nfs_page *req, |
6491 |
+ } |
6492 |
+ out: |
6493 |
+ nfs_request_remove_commit_list(req, cinfo); |
6494 |
+- pnfs_put_lseg_async(freeme); |
6495 |
++ pnfs_put_lseg_locked(freeme); |
6496 |
+ } |
6497 |
+ |
6498 |
+ static void |
6499 |
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
6500 |
+index 6ca0c8e7a945..0422d77b73c7 100644 |
6501 |
+--- a/fs/nfs/nfs4proc.c |
6502 |
++++ b/fs/nfs/nfs4proc.c |
6503 |
+@@ -7353,7 +7353,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr |
6504 |
+ int ret = 0; |
6505 |
+ |
6506 |
+ if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) |
6507 |
+- return 0; |
6508 |
++ return -EAGAIN; |
6509 |
+ task = _nfs41_proc_sequence(clp, cred, false); |
6510 |
+ if (IS_ERR(task)) |
6511 |
+ ret = PTR_ERR(task); |
6512 |
+diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c |
6513 |
+index 1720d32ffa54..e1ba58c3d1ad 100644 |
6514 |
+--- a/fs/nfs/nfs4renewd.c |
6515 |
++++ b/fs/nfs/nfs4renewd.c |
6516 |
+@@ -88,10 +88,18 @@ nfs4_renew_state(struct work_struct *work) |
6517 |
+ } |
6518 |
+ nfs_expire_all_delegations(clp); |
6519 |
+ } else { |
6520 |
++ int ret; |
6521 |
++ |
6522 |
+ /* Queue an asynchronous RENEW. */ |
6523 |
+- ops->sched_state_renewal(clp, cred, renew_flags); |
6524 |
++ ret = ops->sched_state_renewal(clp, cred, renew_flags); |
6525 |
+ put_rpccred(cred); |
6526 |
+- goto out_exp; |
6527 |
++ switch (ret) { |
6528 |
++ default: |
6529 |
++ goto out_exp; |
6530 |
++ case -EAGAIN: |
6531 |
++ case -ENOMEM: |
6532 |
++ break; |
6533 |
++ } |
6534 |
+ } |
6535 |
+ } else { |
6536 |
+ dprintk("%s: failed to call renewd. Reason: lease not expired \n", |
6537 |
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c |
6538 |
+index 22fe35104c0c..5194933ed419 100644 |
6539 |
+--- a/fs/nfs/nfs4state.c |
6540 |
++++ b/fs/nfs/nfs4state.c |
6541 |
+@@ -1705,7 +1705,8 @@ restart: |
6542 |
+ if (status < 0) { |
6543 |
+ set_bit(ops->owner_flag_bit, &sp->so_flags); |
6544 |
+ nfs4_put_state_owner(sp); |
6545 |
+- return nfs4_recovery_handle_error(clp, status); |
6546 |
++ status = nfs4_recovery_handle_error(clp, status); |
6547 |
++ return (status != 0) ? status : -EAGAIN; |
6548 |
+ } |
6549 |
+ |
6550 |
+ nfs4_put_state_owner(sp); |
6551 |
+@@ -1714,7 +1715,7 @@ restart: |
6552 |
+ spin_unlock(&clp->cl_lock); |
6553 |
+ } |
6554 |
+ rcu_read_unlock(); |
6555 |
+- return status; |
6556 |
++ return 0; |
6557 |
+ } |
6558 |
+ |
6559 |
+ static int nfs4_check_lease(struct nfs_client *clp) |
6560 |
+@@ -1761,7 +1762,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status) |
6561 |
+ break; |
6562 |
+ case -NFS4ERR_STALE_CLIENTID: |
6563 |
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); |
6564 |
+- nfs4_state_clear_reclaim_reboot(clp); |
6565 |
+ nfs4_state_start_reclaim_reboot(clp); |
6566 |
+ break; |
6567 |
+ case -NFS4ERR_CLID_INUSE: |
6568 |
+@@ -2345,6 +2345,7 @@ static void nfs4_state_manager(struct nfs_client *clp) |
6569 |
+ status = nfs4_check_lease(clp); |
6570 |
+ if (status < 0) |
6571 |
+ goto out_error; |
6572 |
++ continue; |
6573 |
+ } |
6574 |
+ |
6575 |
+ if (test_and_clear_bit(NFS4CLNT_MOVED, &clp->cl_state)) { |
6576 |
+@@ -2366,14 +2367,11 @@ static void nfs4_state_manager(struct nfs_client *clp) |
6577 |
+ section = "reclaim reboot"; |
6578 |
+ status = nfs4_do_reclaim(clp, |
6579 |
+ clp->cl_mvops->reboot_recovery_ops); |
6580 |
+- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || |
6581 |
+- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) |
6582 |
+- continue; |
6583 |
+- nfs4_state_end_reclaim_reboot(clp); |
6584 |
+- if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) |
6585 |
++ if (status == -EAGAIN) |
6586 |
+ continue; |
6587 |
+ if (status < 0) |
6588 |
+ goto out_error; |
6589 |
++ nfs4_state_end_reclaim_reboot(clp); |
6590 |
+ } |
6591 |
+ |
6592 |
+ /* Now recover expired state... */ |
6593 |
+@@ -2381,9 +2379,7 @@ static void nfs4_state_manager(struct nfs_client *clp) |
6594 |
+ section = "reclaim nograce"; |
6595 |
+ status = nfs4_do_reclaim(clp, |
6596 |
+ clp->cl_mvops->nograce_recovery_ops); |
6597 |
+- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || |
6598 |
+- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) || |
6599 |
+- test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) |
6600 |
++ if (status == -EAGAIN) |
6601 |
+ continue; |
6602 |
+ if (status < 0) |
6603 |
+ goto out_error; |
6604 |
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c |
6605 |
+index be7cbce6e4c7..9229d4780f87 100644 |
6606 |
+--- a/fs/nfs/pagelist.c |
6607 |
++++ b/fs/nfs/pagelist.c |
6608 |
+@@ -518,7 +518,8 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free); |
6609 |
+ */ |
6610 |
+ void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr) |
6611 |
+ { |
6612 |
+- put_nfs_open_context(hdr->args.context); |
6613 |
++ if (hdr->args.context) |
6614 |
++ put_nfs_open_context(hdr->args.context); |
6615 |
+ if (hdr->page_array.pagevec != hdr->page_array.page_array) |
6616 |
+ kfree(hdr->page_array.pagevec); |
6617 |
+ } |
6618 |
+@@ -743,12 +744,11 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, |
6619 |
+ nfs_list_remove_request(req); |
6620 |
+ nfs_list_add_request(req, &hdr->pages); |
6621 |
+ |
6622 |
+- if (WARN_ON_ONCE(pageused >= pagecount)) |
6623 |
+- return nfs_pgio_error(desc, hdr); |
6624 |
+- |
6625 |
+ if (!last_page || last_page != req->wb_page) { |
6626 |
+- *pages++ = last_page = req->wb_page; |
6627 |
+ pageused++; |
6628 |
++ if (pageused > pagecount) |
6629 |
++ break; |
6630 |
++ *pages++ = last_page = req->wb_page; |
6631 |
+ } |
6632 |
+ } |
6633 |
+ if (WARN_ON_ONCE(pageused != pagecount)) |
6634 |
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c |
6635 |
+index a3851debf8a2..5480720bdc0f 100644 |
6636 |
+--- a/fs/nfs/pnfs.c |
6637 |
++++ b/fs/nfs/pnfs.c |
6638 |
+@@ -361,22 +361,43 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg) |
6639 |
+ } |
6640 |
+ EXPORT_SYMBOL_GPL(pnfs_put_lseg); |
6641 |
+ |
6642 |
+-static void pnfs_put_lseg_async_work(struct work_struct *work) |
6643 |
++static void pnfs_free_lseg_async_work(struct work_struct *work) |
6644 |
+ { |
6645 |
+ struct pnfs_layout_segment *lseg; |
6646 |
++ struct pnfs_layout_hdr *lo; |
6647 |
+ |
6648 |
+ lseg = container_of(work, struct pnfs_layout_segment, pls_work); |
6649 |
++ lo = lseg->pls_layout; |
6650 |
+ |
6651 |
+- pnfs_put_lseg(lseg); |
6652 |
++ pnfs_free_lseg(lseg); |
6653 |
++ pnfs_put_layout_hdr(lo); |
6654 |
+ } |
6655 |
+ |
6656 |
+-void |
6657 |
+-pnfs_put_lseg_async(struct pnfs_layout_segment *lseg) |
6658 |
++static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg) |
6659 |
+ { |
6660 |
+- INIT_WORK(&lseg->pls_work, pnfs_put_lseg_async_work); |
6661 |
++ INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work); |
6662 |
+ schedule_work(&lseg->pls_work); |
6663 |
+ } |
6664 |
+-EXPORT_SYMBOL_GPL(pnfs_put_lseg_async); |
6665 |
++ |
6666 |
++void |
6667 |
++pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg) |
6668 |
++{ |
6669 |
++ if (!lseg) |
6670 |
++ return; |
6671 |
++ |
6672 |
++ assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock); |
6673 |
++ |
6674 |
++ dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg, |
6675 |
++ atomic_read(&lseg->pls_refcount), |
6676 |
++ test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); |
6677 |
++ if (atomic_dec_and_test(&lseg->pls_refcount)) { |
6678 |
++ struct pnfs_layout_hdr *lo = lseg->pls_layout; |
6679 |
++ pnfs_get_layout_hdr(lo); |
6680 |
++ pnfs_layout_remove_lseg(lo, lseg); |
6681 |
++ pnfs_free_lseg_async(lseg); |
6682 |
++ } |
6683 |
++} |
6684 |
++EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked); |
6685 |
+ |
6686 |
+ static u64 |
6687 |
+ end_offset(u64 start, u64 len) |
6688 |
+diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h |
6689 |
+index aca3dff5dae6..bc2db1c2a5ee 100644 |
6690 |
+--- a/fs/nfs/pnfs.h |
6691 |
++++ b/fs/nfs/pnfs.h |
6692 |
+@@ -183,7 +183,7 @@ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp); |
6693 |
+ /* pnfs.c */ |
6694 |
+ void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo); |
6695 |
+ void pnfs_put_lseg(struct pnfs_layout_segment *lseg); |
6696 |
+-void pnfs_put_lseg_async(struct pnfs_layout_segment *lseg); |
6697 |
++void pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg); |
6698 |
+ |
6699 |
+ void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32); |
6700 |
+ void unset_pnfs_layoutdriver(struct nfs_server *); |
6701 |
+@@ -422,10 +422,6 @@ static inline void pnfs_put_lseg(struct pnfs_layout_segment *lseg) |
6702 |
+ { |
6703 |
+ } |
6704 |
+ |
6705 |
+-static inline void pnfs_put_lseg_async(struct pnfs_layout_segment *lseg) |
6706 |
+-{ |
6707 |
+-} |
6708 |
+- |
6709 |
+ static inline int pnfs_return_layout(struct inode *ino) |
6710 |
+ { |
6711 |
+ return 0; |
6712 |
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c |
6713 |
+index b01f6e100ee8..353aac85a3e3 100644 |
6714 |
+--- a/fs/nfsd/nfs4xdr.c |
6715 |
++++ b/fs/nfsd/nfs4xdr.c |
6716 |
+@@ -1670,6 +1670,14 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) |
6717 |
+ readbytes += nfsd4_max_reply(argp->rqstp, op); |
6718 |
+ } else |
6719 |
+ max_reply += nfsd4_max_reply(argp->rqstp, op); |
6720 |
++ /* |
6721 |
++ * OP_LOCK may return a conflicting lock. (Special case |
6722 |
++ * because it will just skip encoding this if it runs |
6723 |
++ * out of xdr buffer space, and it is the only operation |
6724 |
++ * that behaves this way.) |
6725 |
++ */ |
6726 |
++ if (op->opnum == OP_LOCK) |
6727 |
++ max_reply += NFS4_OPAQUE_LIMIT; |
6728 |
+ |
6729 |
+ if (op->status) { |
6730 |
+ argp->opcnt = i+1; |
6731 |
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c |
6732 |
+index f501a9b5c9df..6ab077bb897e 100644 |
6733 |
+--- a/fs/nfsd/vfs.c |
6734 |
++++ b/fs/nfsd/vfs.c |
6735 |
+@@ -708,7 +708,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, |
6736 |
+ host_err = PTR_ERR(*filp); |
6737 |
+ *filp = NULL; |
6738 |
+ } else { |
6739 |
+- host_err = ima_file_check(*filp, may_flags); |
6740 |
++ host_err = ima_file_check(*filp, may_flags, 0); |
6741 |
+ |
6742 |
+ if (may_flags & NFSD_MAY_64BIT_COOKIE) |
6743 |
+ (*filp)->f_mode |= FMODE_64BITHASH; |
6744 |
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c |
6745 |
+index b13992a41bd9..c991616acca9 100644 |
6746 |
+--- a/fs/notify/fanotify/fanotify_user.c |
6747 |
++++ b/fs/notify/fanotify/fanotify_user.c |
6748 |
+@@ -78,7 +78,7 @@ static int create_fd(struct fsnotify_group *group, |
6749 |
+ |
6750 |
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event); |
6751 |
+ |
6752 |
+- client_fd = get_unused_fd(); |
6753 |
++ client_fd = get_unused_fd_flags(group->fanotify_data.f_flags); |
6754 |
+ if (client_fd < 0) |
6755 |
+ return client_fd; |
6756 |
+ |
6757 |
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c |
6758 |
+index 08598843288f..c9b4df5810d5 100644 |
6759 |
+--- a/fs/udf/inode.c |
6760 |
++++ b/fs/udf/inode.c |
6761 |
+@@ -1277,7 +1277,7 @@ update_time: |
6762 |
+ */ |
6763 |
+ #define UDF_MAX_ICB_NESTING 1024 |
6764 |
+ |
6765 |
+-static int udf_read_inode(struct inode *inode) |
6766 |
++static int udf_read_inode(struct inode *inode, bool hidden_inode) |
6767 |
+ { |
6768 |
+ struct buffer_head *bh = NULL; |
6769 |
+ struct fileEntry *fe; |
6770 |
+@@ -1436,8 +1436,11 @@ reread: |
6771 |
+ |
6772 |
+ link_count = le16_to_cpu(fe->fileLinkCount); |
6773 |
+ if (!link_count) { |
6774 |
+- ret = -ESTALE; |
6775 |
+- goto out; |
6776 |
++ if (!hidden_inode) { |
6777 |
++ ret = -ESTALE; |
6778 |
++ goto out; |
6779 |
++ } |
6780 |
++ link_count = 1; |
6781 |
+ } |
6782 |
+ set_nlink(inode, link_count); |
6783 |
+ |
6784 |
+@@ -1826,7 +1829,8 @@ out: |
6785 |
+ return err; |
6786 |
+ } |
6787 |
+ |
6788 |
+-struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino) |
6789 |
++struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino, |
6790 |
++ bool hidden_inode) |
6791 |
+ { |
6792 |
+ unsigned long block = udf_get_lb_pblock(sb, ino, 0); |
6793 |
+ struct inode *inode = iget_locked(sb, block); |
6794 |
+@@ -1839,7 +1843,7 @@ struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino) |
6795 |
+ return inode; |
6796 |
+ |
6797 |
+ memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); |
6798 |
+- err = udf_read_inode(inode); |
6799 |
++ err = udf_read_inode(inode, hidden_inode); |
6800 |
+ if (err < 0) { |
6801 |
+ iget_failed(inode); |
6802 |
+ return ERR_PTR(err); |
6803 |
+diff --git a/fs/udf/super.c b/fs/udf/super.c |
6804 |
+index 5401fc33f5cc..e229315bbf7a 100644 |
6805 |
+--- a/fs/udf/super.c |
6806 |
++++ b/fs/udf/super.c |
6807 |
+@@ -959,7 +959,7 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb, |
6808 |
+ addr.logicalBlockNum = meta_file_loc; |
6809 |
+ addr.partitionReferenceNum = partition_num; |
6810 |
+ |
6811 |
+- metadata_fe = udf_iget(sb, &addr); |
6812 |
++ metadata_fe = udf_iget_special(sb, &addr); |
6813 |
+ |
6814 |
+ if (IS_ERR(metadata_fe)) { |
6815 |
+ udf_warn(sb, "metadata inode efe not found\n"); |
6816 |
+@@ -1020,7 +1020,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition) |
6817 |
+ udf_debug("Bitmap file location: block = %d part = %d\n", |
6818 |
+ addr.logicalBlockNum, addr.partitionReferenceNum); |
6819 |
+ |
6820 |
+- fe = udf_iget(sb, &addr); |
6821 |
++ fe = udf_iget_special(sb, &addr); |
6822 |
+ if (IS_ERR(fe)) { |
6823 |
+ if (sb->s_flags & MS_RDONLY) |
6824 |
+ udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n"); |
6825 |
+@@ -1119,7 +1119,7 @@ static int udf_fill_partdesc_info(struct super_block *sb, |
6826 |
+ }; |
6827 |
+ struct inode *inode; |
6828 |
+ |
6829 |
+- inode = udf_iget(sb, &loc); |
6830 |
++ inode = udf_iget_special(sb, &loc); |
6831 |
+ if (IS_ERR(inode)) { |
6832 |
+ udf_debug("cannot load unallocSpaceTable (part %d)\n", |
6833 |
+ p_index); |
6834 |
+@@ -1154,7 +1154,7 @@ static int udf_fill_partdesc_info(struct super_block *sb, |
6835 |
+ }; |
6836 |
+ struct inode *inode; |
6837 |
+ |
6838 |
+- inode = udf_iget(sb, &loc); |
6839 |
++ inode = udf_iget_special(sb, &loc); |
6840 |
+ if (IS_ERR(inode)) { |
6841 |
+ udf_debug("cannot load freedSpaceTable (part %d)\n", |
6842 |
+ p_index); |
6843 |
+@@ -1198,7 +1198,7 @@ static void udf_find_vat_block(struct super_block *sb, int p_index, |
6844 |
+ vat_block >= map->s_partition_root && |
6845 |
+ vat_block >= start_block - 3; vat_block--) { |
6846 |
+ ino.logicalBlockNum = vat_block - map->s_partition_root; |
6847 |
+- inode = udf_iget(sb, &ino); |
6848 |
++ inode = udf_iget_special(sb, &ino); |
6849 |
+ if (!IS_ERR(inode)) { |
6850 |
+ sbi->s_vat_inode = inode; |
6851 |
+ break; |
6852 |
+diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h |
6853 |
+index 742557be9936..1cc3c993ebd0 100644 |
6854 |
+--- a/fs/udf/udfdecl.h |
6855 |
++++ b/fs/udf/udfdecl.h |
6856 |
+@@ -138,7 +138,18 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, |
6857 |
+ /* file.c */ |
6858 |
+ extern long udf_ioctl(struct file *, unsigned int, unsigned long); |
6859 |
+ /* inode.c */ |
6860 |
+-extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *); |
6861 |
++extern struct inode *__udf_iget(struct super_block *, struct kernel_lb_addr *, |
6862 |
++ bool hidden_inode); |
6863 |
++static inline struct inode *udf_iget_special(struct super_block *sb, |
6864 |
++ struct kernel_lb_addr *ino) |
6865 |
++{ |
6866 |
++ return __udf_iget(sb, ino, true); |
6867 |
++} |
6868 |
++static inline struct inode *udf_iget(struct super_block *sb, |
6869 |
++ struct kernel_lb_addr *ino) |
6870 |
++{ |
6871 |
++ return __udf_iget(sb, ino, false); |
6872 |
++} |
6873 |
+ extern int udf_expand_file_adinicb(struct inode *); |
6874 |
+ extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *); |
6875 |
+ extern struct buffer_head *udf_bread(struct inode *, int, int, int *); |
6876 |
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c |
6877 |
+index b984647c24db..2f502537a39c 100644 |
6878 |
+--- a/fs/xfs/xfs_aops.c |
6879 |
++++ b/fs/xfs/xfs_aops.c |
6880 |
+@@ -434,10 +434,22 @@ xfs_start_page_writeback( |
6881 |
+ { |
6882 |
+ ASSERT(PageLocked(page)); |
6883 |
+ ASSERT(!PageWriteback(page)); |
6884 |
+- if (clear_dirty) |
6885 |
++ |
6886 |
++ /* |
6887 |
++ * if the page was not fully cleaned, we need to ensure that the higher |
6888 |
++ * layers come back to it correctly. That means we need to keep the page |
6889 |
++ * dirty, and for WB_SYNC_ALL writeback we need to ensure the |
6890 |
++ * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to |
6891 |
++ * write this page in this writeback sweep will be made. |
6892 |
++ */ |
6893 |
++ if (clear_dirty) { |
6894 |
+ clear_page_dirty_for_io(page); |
6895 |
+- set_page_writeback(page); |
6896 |
++ set_page_writeback(page); |
6897 |
++ } else |
6898 |
++ set_page_writeback_keepwrite(page); |
6899 |
++ |
6900 |
+ unlock_page(page); |
6901 |
++ |
6902 |
+ /* If no buffers on the page are to be written, finish it here */ |
6903 |
+ if (!buffers) |
6904 |
+ end_page_writeback(page); |
6905 |
+diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c |
6906 |
+index f71be9c68017..f1deb961a296 100644 |
6907 |
+--- a/fs/xfs/xfs_itable.c |
6908 |
++++ b/fs/xfs/xfs_itable.c |
6909 |
+@@ -639,7 +639,8 @@ next_ag: |
6910 |
+ xfs_buf_relse(agbp); |
6911 |
+ agbp = NULL; |
6912 |
+ agino = 0; |
6913 |
+- } while (++agno < mp->m_sb.sb_agcount); |
6914 |
++ agno++; |
6915 |
++ } while (agno < mp->m_sb.sb_agcount); |
6916 |
+ |
6917 |
+ if (!error) { |
6918 |
+ if (bufidx) { |
6919 |
+diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h |
6920 |
+index 089743ade734..38aa07d5b81c 100644 |
6921 |
+--- a/include/linux/balloon_compaction.h |
6922 |
++++ b/include/linux/balloon_compaction.h |
6923 |
+@@ -27,10 +27,13 @@ |
6924 |
+ * counter raised only while it is under our special handling; |
6925 |
+ * |
6926 |
+ * iii. after the lockless scan step have selected a potential balloon page for |
6927 |
+- * isolation, re-test the page->mapping flags and the page ref counter |
6928 |
++ * isolation, re-test the PageBalloon mark and the PagePrivate flag |
6929 |
+ * under the proper page lock, to ensure isolating a valid balloon page |
6930 |
+ * (not yet isolated, nor under release procedure) |
6931 |
+ * |
6932 |
++ * iv. isolation or dequeueing procedure must clear PagePrivate flag under |
6933 |
++ * page lock together with removing page from balloon device page list. |
6934 |
++ * |
6935 |
+ * The functions provided by this interface are placed to help on coping with |
6936 |
+ * the aforementioned balloon page corner case, as well as to ensure the simple |
6937 |
+ * set of exposed rules are satisfied while we are dealing with balloon pages |
6938 |
+@@ -71,28 +74,6 @@ static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info) |
6939 |
+ kfree(b_dev_info); |
6940 |
+ } |
6941 |
+ |
6942 |
+-/* |
6943 |
+- * balloon_page_free - release a balloon page back to the page free lists |
6944 |
+- * @page: ballooned page to be set free |
6945 |
+- * |
6946 |
+- * This function must be used to properly set free an isolated/dequeued balloon |
6947 |
+- * page at the end of a sucessful page migration, or at the balloon driver's |
6948 |
+- * page release procedure. |
6949 |
+- */ |
6950 |
+-static inline void balloon_page_free(struct page *page) |
6951 |
+-{ |
6952 |
+- /* |
6953 |
+- * Balloon pages always get an extra refcount before being isolated |
6954 |
+- * and before being dequeued to help on sorting out fortuite colisions |
6955 |
+- * between a thread attempting to isolate and another thread attempting |
6956 |
+- * to release the very same balloon page. |
6957 |
+- * |
6958 |
+- * Before we handle the page back to Buddy, lets drop its extra refcnt. |
6959 |
+- */ |
6960 |
+- put_page(page); |
6961 |
+- __free_page(page); |
6962 |
+-} |
6963 |
+- |
6964 |
+ #ifdef CONFIG_BALLOON_COMPACTION |
6965 |
+ extern bool balloon_page_isolate(struct page *page); |
6966 |
+ extern void balloon_page_putback(struct page *page); |
6967 |
+@@ -108,74 +89,33 @@ static inline void balloon_mapping_free(struct address_space *balloon_mapping) |
6968 |
+ } |
6969 |
+ |
6970 |
+ /* |
6971 |
+- * page_flags_cleared - helper to perform balloon @page ->flags tests. |
6972 |
+- * |
6973 |
+- * As balloon pages are obtained from buddy and we do not play with page->flags |
6974 |
+- * at driver level (exception made when we get the page lock for compaction), |
6975 |
+- * we can safely identify a ballooned page by checking if the |
6976 |
+- * PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared. This approach also |
6977 |
+- * helps us skip ballooned pages that are locked for compaction or release, thus |
6978 |
+- * mitigating their racy check at balloon_page_movable() |
6979 |
+- */ |
6980 |
+-static inline bool page_flags_cleared(struct page *page) |
6981 |
+-{ |
6982 |
+- return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP); |
6983 |
+-} |
6984 |
+- |
6985 |
+-/* |
6986 |
+- * __is_movable_balloon_page - helper to perform @page mapping->flags tests |
6987 |
++ * __is_movable_balloon_page - helper to perform @page PageBalloon tests |
6988 |
+ */ |
6989 |
+ static inline bool __is_movable_balloon_page(struct page *page) |
6990 |
+ { |
6991 |
+- struct address_space *mapping = page->mapping; |
6992 |
+- return mapping_balloon(mapping); |
6993 |
++ return PageBalloon(page); |
6994 |
+ } |
6995 |
+ |
6996 |
+ /* |
6997 |
+- * balloon_page_movable - test page->mapping->flags to identify balloon pages |
6998 |
+- * that can be moved by compaction/migration. |
6999 |
+- * |
7000 |
+- * This function is used at core compaction's page isolation scheme, therefore |
7001 |
+- * most pages exposed to it are not enlisted as balloon pages and so, to avoid |
7002 |
+- * undesired side effects like racing against __free_pages(), we cannot afford |
7003 |
+- * holding the page locked while testing page->mapping->flags here. |
7004 |
++ * balloon_page_movable - test PageBalloon to identify balloon pages |
7005 |
++ * and PagePrivate to check that the page is not |
7006 |
++ * isolated and can be moved by compaction/migration. |
7007 |
+ * |
7008 |
+ * As we might return false positives in the case of a balloon page being just |
7009 |
+- * released under us, the page->mapping->flags need to be re-tested later, |
7010 |
+- * under the proper page lock, at the functions that will be coping with the |
7011 |
+- * balloon page case. |
7012 |
++ * released under us, this need to be re-tested later, under the page lock. |
7013 |
+ */ |
7014 |
+ static inline bool balloon_page_movable(struct page *page) |
7015 |
+ { |
7016 |
+- /* |
7017 |
+- * Before dereferencing and testing mapping->flags, let's make sure |
7018 |
+- * this is not a page that uses ->mapping in a different way |
7019 |
+- */ |
7020 |
+- if (page_flags_cleared(page) && !page_mapped(page) && |
7021 |
+- page_count(page) == 1) |
7022 |
+- return __is_movable_balloon_page(page); |
7023 |
+- |
7024 |
+- return false; |
7025 |
++ return PageBalloon(page) && PagePrivate(page); |
7026 |
+ } |
7027 |
+ |
7028 |
+ /* |
7029 |
+ * isolated_balloon_page - identify an isolated balloon page on private |
7030 |
+ * compaction/migration page lists. |
7031 |
+- * |
7032 |
+- * After a compaction thread isolates a balloon page for migration, it raises |
7033 |
+- * the page refcount to prevent concurrent compaction threads from re-isolating |
7034 |
+- * the same page. For that reason putback_movable_pages(), or other routines |
7035 |
+- * that need to identify isolated balloon pages on private pagelists, cannot |
7036 |
+- * rely on balloon_page_movable() to accomplish the task. |
7037 |
+ */ |
7038 |
+ static inline bool isolated_balloon_page(struct page *page) |
7039 |
+ { |
7040 |
+- /* Already isolated balloon pages, by default, have a raised refcount */ |
7041 |
+- if (page_flags_cleared(page) && !page_mapped(page) && |
7042 |
+- page_count(page) >= 2) |
7043 |
+- return __is_movable_balloon_page(page); |
7044 |
+- |
7045 |
+- return false; |
7046 |
++ return PageBalloon(page); |
7047 |
+ } |
7048 |
+ |
7049 |
+ /* |
7050 |
+@@ -192,6 +132,8 @@ static inline void balloon_page_insert(struct page *page, |
7051 |
+ struct address_space *mapping, |
7052 |
+ struct list_head *head) |
7053 |
+ { |
7054 |
++ __SetPageBalloon(page); |
7055 |
++ SetPagePrivate(page); |
7056 |
+ page->mapping = mapping; |
7057 |
+ list_add(&page->lru, head); |
7058 |
+ } |
7059 |
+@@ -206,8 +148,12 @@ static inline void balloon_page_insert(struct page *page, |
7060 |
+ */ |
7061 |
+ static inline void balloon_page_delete(struct page *page) |
7062 |
+ { |
7063 |
++ __ClearPageBalloon(page); |
7064 |
+ page->mapping = NULL; |
7065 |
+- list_del(&page->lru); |
7066 |
++ if (PagePrivate(page)) { |
7067 |
++ ClearPagePrivate(page); |
7068 |
++ list_del(&page->lru); |
7069 |
++ } |
7070 |
+ } |
7071 |
+ |
7072 |
+ /* |
7073 |
+@@ -258,6 +204,11 @@ static inline void balloon_page_delete(struct page *page) |
7074 |
+ list_del(&page->lru); |
7075 |
+ } |
7076 |
+ |
7077 |
++static inline bool __is_movable_balloon_page(struct page *page) |
7078 |
++{ |
7079 |
++ return false; |
7080 |
++} |
7081 |
++ |
7082 |
+ static inline bool balloon_page_movable(struct page *page) |
7083 |
+ { |
7084 |
+ return false; |
7085 |
+diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h |
7086 |
+new file mode 100644 |
7087 |
+index 000000000000..cdd1cc202d51 |
7088 |
+--- /dev/null |
7089 |
++++ b/include/linux/compiler-gcc5.h |
7090 |
+@@ -0,0 +1,66 @@ |
7091 |
++#ifndef __LINUX_COMPILER_H |
7092 |
++#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead." |
7093 |
++#endif |
7094 |
++ |
7095 |
++#define __used __attribute__((__used__)) |
7096 |
++#define __must_check __attribute__((warn_unused_result)) |
7097 |
++#define __compiler_offsetof(a, b) __builtin_offsetof(a, b) |
7098 |
++ |
7099 |
++/* Mark functions as cold. gcc will assume any path leading to a call |
7100 |
++ to them will be unlikely. This means a lot of manual unlikely()s |
7101 |
++ are unnecessary now for any paths leading to the usual suspects |
7102 |
++ like BUG(), printk(), panic() etc. [but let's keep them for now for |
7103 |
++ older compilers] |
7104 |
++ |
7105 |
++ Early snapshots of gcc 4.3 don't support this and we can't detect this |
7106 |
++ in the preprocessor, but we can live with this because they're unreleased. |
7107 |
++ Maketime probing would be overkill here. |
7108 |
++ |
7109 |
++ gcc also has a __attribute__((__hot__)) to move hot functions into |
7110 |
++ a special section, but I don't see any sense in this right now in |
7111 |
++ the kernel context */ |
7112 |
++#define __cold __attribute__((__cold__)) |
7113 |
++ |
7114 |
++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) |
7115 |
++ |
7116 |
++#ifndef __CHECKER__ |
7117 |
++# define __compiletime_warning(message) __attribute__((warning(message))) |
7118 |
++# define __compiletime_error(message) __attribute__((error(message))) |
7119 |
++#endif /* __CHECKER__ */ |
7120 |
++ |
7121 |
++/* |
7122 |
++ * Mark a position in code as unreachable. This can be used to |
7123 |
++ * suppress control flow warnings after asm blocks that transfer |
7124 |
++ * control elsewhere. |
7125 |
++ * |
7126 |
++ * Early snapshots of gcc 4.5 don't support this and we can't detect |
7127 |
++ * this in the preprocessor, but we can live with this because they're |
7128 |
++ * unreleased. Really, we need to have autoconf for the kernel. |
7129 |
++ */ |
7130 |
++#define unreachable() __builtin_unreachable() |
7131 |
++ |
7132 |
++/* Mark a function definition as prohibited from being cloned. */ |
7133 |
++#define __noclone __attribute__((__noclone__)) |
7134 |
++ |
7135 |
++/* |
7136 |
++ * Tell the optimizer that something else uses this function or variable. |
7137 |
++ */ |
7138 |
++#define __visible __attribute__((externally_visible)) |
7139 |
++ |
7140 |
++/* |
7141 |
++ * GCC 'asm goto' miscompiles certain code sequences: |
7142 |
++ * |
7143 |
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 |
7144 |
++ * |
7145 |
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. |
7146 |
++ * Fixed in GCC 4.8.2 and later versions. |
7147 |
++ * |
7148 |
++ * (asm goto is automatically volatile - the naming reflects this.) |
7149 |
++ */ |
7150 |
++#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) |
7151 |
++ |
7152 |
++#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP |
7153 |
++#define __HAVE_BUILTIN_BSWAP32__ |
7154 |
++#define __HAVE_BUILTIN_BSWAP64__ |
7155 |
++#define __HAVE_BUILTIN_BSWAP16__ |
7156 |
++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ |
7157 |
+diff --git a/include/linux/ima.h b/include/linux/ima.h |
7158 |
+index 7cf5e9b32550..120ccc53fcb7 100644 |
7159 |
+--- a/include/linux/ima.h |
7160 |
++++ b/include/linux/ima.h |
7161 |
+@@ -15,7 +15,7 @@ struct linux_binprm; |
7162 |
+ |
7163 |
+ #ifdef CONFIG_IMA |
7164 |
+ extern int ima_bprm_check(struct linux_binprm *bprm); |
7165 |
+-extern int ima_file_check(struct file *file, int mask); |
7166 |
++extern int ima_file_check(struct file *file, int mask, int opened); |
7167 |
+ extern void ima_file_free(struct file *file); |
7168 |
+ extern int ima_file_mmap(struct file *file, unsigned long prot); |
7169 |
+ extern int ima_module_check(struct file *file); |
7170 |
+@@ -27,7 +27,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm) |
7171 |
+ return 0; |
7172 |
+ } |
7173 |
+ |
7174 |
+-static inline int ima_file_check(struct file *file, int mask) |
7175 |
++static inline int ima_file_check(struct file *file, int mask, int opened) |
7176 |
+ { |
7177 |
+ return 0; |
7178 |
+ } |
7179 |
+diff --git a/include/linux/migrate.h b/include/linux/migrate.h |
7180 |
+index a2901c414664..b33347f4e4b7 100644 |
7181 |
+--- a/include/linux/migrate.h |
7182 |
++++ b/include/linux/migrate.h |
7183 |
+@@ -13,18 +13,9 @@ typedef void free_page_t(struct page *page, unsigned long private); |
7184 |
+ * Return values from addresss_space_operations.migratepage(): |
7185 |
+ * - negative errno on page migration failure; |
7186 |
+ * - zero on page migration success; |
7187 |
+- * |
7188 |
+- * The balloon page migration introduces this special case where a 'distinct' |
7189 |
+- * return code is used to flag a successful page migration to unmap_and_move(). |
7190 |
+- * This approach is necessary because page migration can race against balloon |
7191 |
+- * deflation procedure, and for such case we could introduce a nasty page leak |
7192 |
+- * if a successfully migrated balloon page gets released concurrently with |
7193 |
+- * migration's unmap_and_move() wrap-up steps. |
7194 |
+ */ |
7195 |
+ #define MIGRATEPAGE_SUCCESS 0 |
7196 |
+-#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page |
7197 |
+- * sucessful migration case. |
7198 |
+- */ |
7199 |
++ |
7200 |
+ enum migrate_reason { |
7201 |
+ MR_COMPACTION, |
7202 |
+ MR_MEMORY_FAILURE, |
7203 |
+diff --git a/include/linux/mm.h b/include/linux/mm.h |
7204 |
+index 8981cc882ed2..16e6f1effef8 100644 |
7205 |
+--- a/include/linux/mm.h |
7206 |
++++ b/include/linux/mm.h |
7207 |
+@@ -553,6 +553,25 @@ static inline void __ClearPageBuddy(struct page *page) |
7208 |
+ atomic_set(&page->_mapcount, -1); |
7209 |
+ } |
7210 |
+ |
7211 |
++#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) |
7212 |
++ |
7213 |
++static inline int PageBalloon(struct page *page) |
7214 |
++{ |
7215 |
++ return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; |
7216 |
++} |
7217 |
++ |
7218 |
++static inline void __SetPageBalloon(struct page *page) |
7219 |
++{ |
7220 |
++ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); |
7221 |
++ atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); |
7222 |
++} |
7223 |
++ |
7224 |
++static inline void __ClearPageBalloon(struct page *page) |
7225 |
++{ |
7226 |
++ VM_BUG_ON_PAGE(!PageBalloon(page), page); |
7227 |
++ atomic_set(&page->_mapcount, -1); |
7228 |
++} |
7229 |
++ |
7230 |
+ void put_page(struct page *page); |
7231 |
+ void put_pages_list(struct list_head *pages); |
7232 |
+ |
7233 |
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h |
7234 |
+index 6ed0bb73a864..4e82195b1695 100644 |
7235 |
+--- a/include/linux/pci_ids.h |
7236 |
++++ b/include/linux/pci_ids.h |
7237 |
+@@ -2557,6 +2557,7 @@ |
7238 |
+ #define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823 |
7239 |
+ #define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824 |
7240 |
+ #define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F |
7241 |
++#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E |
7242 |
+ #define PCI_DEVICE_ID_INTEL_I960 0x0960 |
7243 |
+ #define PCI_DEVICE_ID_INTEL_I960RM 0x0962 |
7244 |
+ #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60 |
7245 |
+diff --git a/include/linux/sched.h b/include/linux/sched.h |
7246 |
+index b867a4dab38a..2b1d9e974382 100644 |
7247 |
+--- a/include/linux/sched.h |
7248 |
++++ b/include/linux/sched.h |
7249 |
+@@ -1934,11 +1934,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, |
7250 |
+ #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
7251 |
+ #define used_math() tsk_used_math(current) |
7252 |
+ |
7253 |
+-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */ |
7254 |
++/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags |
7255 |
++ * __GFP_FS is also cleared as it implies __GFP_IO. |
7256 |
++ */ |
7257 |
+ static inline gfp_t memalloc_noio_flags(gfp_t flags) |
7258 |
+ { |
7259 |
+ if (unlikely(current->flags & PF_MEMALLOC_NOIO)) |
7260 |
+- flags &= ~__GFP_IO; |
7261 |
++ flags &= ~(__GFP_IO | __GFP_FS); |
7262 |
+ return flags; |
7263 |
+ } |
7264 |
+ |
7265 |
+diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h |
7266 |
+index 78e4a86030dd..0a8e6badb29b 100644 |
7267 |
+--- a/include/uapi/linux/hyperv.h |
7268 |
++++ b/include/uapi/linux/hyperv.h |
7269 |
+@@ -137,7 +137,7 @@ struct hv_do_fcopy { |
7270 |
+ __u64 offset; |
7271 |
+ __u32 size; |
7272 |
+ __u8 data[DATA_FRAGMENT]; |
7273 |
+-}; |
7274 |
++} __attribute__((packed)); |
7275 |
+ |
7276 |
+ /* |
7277 |
+ * An implementation of HyperV key value pair (KVP) functionality for Linux. |
7278 |
+diff --git a/kernel/futex.c b/kernel/futex.c |
7279 |
+index 815d7af2ffe8..f3a3a071283c 100644 |
7280 |
+--- a/kernel/futex.c |
7281 |
++++ b/kernel/futex.c |
7282 |
+@@ -343,6 +343,8 @@ static void get_futex_key_refs(union futex_key *key) |
7283 |
+ case FUT_OFF_MMSHARED: |
7284 |
+ futex_get_mm(key); /* implies MB (B) */ |
7285 |
+ break; |
7286 |
++ default: |
7287 |
++ smp_mb(); /* explicit MB (B) */ |
7288 |
+ } |
7289 |
+ } |
7290 |
+ |
7291 |
+diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c |
7292 |
+index 8563081e8da3..a1c387f6afba 100644 |
7293 |
+--- a/lib/lzo/lzo1x_decompress_safe.c |
7294 |
++++ b/lib/lzo/lzo1x_decompress_safe.c |
7295 |
+@@ -19,31 +19,21 @@ |
7296 |
+ #include <linux/lzo.h> |
7297 |
+ #include "lzodefs.h" |
7298 |
+ |
7299 |
+-#define HAVE_IP(t, x) \ |
7300 |
+- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \ |
7301 |
+- (((t + x) >= t) && ((t + x) >= x))) |
7302 |
++#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) |
7303 |
++#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) |
7304 |
++#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun |
7305 |
++#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun |
7306 |
++#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun |
7307 |
+ |
7308 |
+-#define HAVE_OP(t, x) \ |
7309 |
+- (((size_t)(op_end - op) >= (size_t)(t + x)) && \ |
7310 |
+- (((t + x) >= t) && ((t + x) >= x))) |
7311 |
+- |
7312 |
+-#define NEED_IP(t, x) \ |
7313 |
+- do { \ |
7314 |
+- if (!HAVE_IP(t, x)) \ |
7315 |
+- goto input_overrun; \ |
7316 |
+- } while (0) |
7317 |
+- |
7318 |
+-#define NEED_OP(t, x) \ |
7319 |
+- do { \ |
7320 |
+- if (!HAVE_OP(t, x)) \ |
7321 |
+- goto output_overrun; \ |
7322 |
+- } while (0) |
7323 |
+- |
7324 |
+-#define TEST_LB(m_pos) \ |
7325 |
+- do { \ |
7326 |
+- if ((m_pos) < out) \ |
7327 |
+- goto lookbehind_overrun; \ |
7328 |
+- } while (0) |
7329 |
++/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base |
7330 |
++ * count without overflowing an integer. The multiply will overflow when |
7331 |
++ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier |
7332 |
++ * depending on the base count. Since the base count is taken from a u8 |
7333 |
++ * and a few bits, it is safe to assume that it will always be lower than |
7334 |
++ * or equal to 2*255, thus we can always prevent any overflow by accepting |
7335 |
++ * two less 255 steps. See Documentation/lzo.txt for more information. |
7336 |
++ */ |
7337 |
++#define MAX_255_COUNT ((((size_t)~0) / 255) - 2) |
7338 |
+ |
7339 |
+ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, |
7340 |
+ unsigned char *out, size_t *out_len) |
7341 |
+@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, |
7342 |
+ if (t < 16) { |
7343 |
+ if (likely(state == 0)) { |
7344 |
+ if (unlikely(t == 0)) { |
7345 |
++ size_t offset; |
7346 |
++ const unsigned char *ip_last = ip; |
7347 |
++ |
7348 |
+ while (unlikely(*ip == 0)) { |
7349 |
+- t += 255; |
7350 |
+ ip++; |
7351 |
+- NEED_IP(1, 0); |
7352 |
++ NEED_IP(1); |
7353 |
+ } |
7354 |
+- t += 15 + *ip++; |
7355 |
++ offset = ip - ip_last; |
7356 |
++ if (unlikely(offset > MAX_255_COUNT)) |
7357 |
++ return LZO_E_ERROR; |
7358 |
++ |
7359 |
++ offset = (offset << 8) - offset; |
7360 |
++ t += offset + 15 + *ip++; |
7361 |
+ } |
7362 |
+ t += 3; |
7363 |
+ copy_literal_run: |
7364 |
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
7365 |
+- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) { |
7366 |
++ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { |
7367 |
+ const unsigned char *ie = ip + t; |
7368 |
+ unsigned char *oe = op + t; |
7369 |
+ do { |
7370 |
+@@ -101,8 +98,8 @@ copy_literal_run: |
7371 |
+ } else |
7372 |
+ #endif |
7373 |
+ { |
7374 |
+- NEED_OP(t, 0); |
7375 |
+- NEED_IP(t, 3); |
7376 |
++ NEED_OP(t); |
7377 |
++ NEED_IP(t + 3); |
7378 |
+ do { |
7379 |
+ *op++ = *ip++; |
7380 |
+ } while (--t > 0); |
7381 |
+@@ -115,7 +112,7 @@ copy_literal_run: |
7382 |
+ m_pos -= t >> 2; |
7383 |
+ m_pos -= *ip++ << 2; |
7384 |
+ TEST_LB(m_pos); |
7385 |
+- NEED_OP(2, 0); |
7386 |
++ NEED_OP(2); |
7387 |
+ op[0] = m_pos[0]; |
7388 |
+ op[1] = m_pos[1]; |
7389 |
+ op += 2; |
7390 |
+@@ -136,13 +133,20 @@ copy_literal_run: |
7391 |
+ } else if (t >= 32) { |
7392 |
+ t = (t & 31) + (3 - 1); |
7393 |
+ if (unlikely(t == 2)) { |
7394 |
++ size_t offset; |
7395 |
++ const unsigned char *ip_last = ip; |
7396 |
++ |
7397 |
+ while (unlikely(*ip == 0)) { |
7398 |
+- t += 255; |
7399 |
+ ip++; |
7400 |
+- NEED_IP(1, 0); |
7401 |
++ NEED_IP(1); |
7402 |
+ } |
7403 |
+- t += 31 + *ip++; |
7404 |
+- NEED_IP(2, 0); |
7405 |
++ offset = ip - ip_last; |
7406 |
++ if (unlikely(offset > MAX_255_COUNT)) |
7407 |
++ return LZO_E_ERROR; |
7408 |
++ |
7409 |
++ offset = (offset << 8) - offset; |
7410 |
++ t += offset + 31 + *ip++; |
7411 |
++ NEED_IP(2); |
7412 |
+ } |
7413 |
+ m_pos = op - 1; |
7414 |
+ next = get_unaligned_le16(ip); |
7415 |
+@@ -154,13 +158,20 @@ copy_literal_run: |
7416 |
+ m_pos -= (t & 8) << 11; |
7417 |
+ t = (t & 7) + (3 - 1); |
7418 |
+ if (unlikely(t == 2)) { |
7419 |
++ size_t offset; |
7420 |
++ const unsigned char *ip_last = ip; |
7421 |
++ |
7422 |
+ while (unlikely(*ip == 0)) { |
7423 |
+- t += 255; |
7424 |
+ ip++; |
7425 |
+- NEED_IP(1, 0); |
7426 |
++ NEED_IP(1); |
7427 |
+ } |
7428 |
+- t += 7 + *ip++; |
7429 |
+- NEED_IP(2, 0); |
7430 |
++ offset = ip - ip_last; |
7431 |
++ if (unlikely(offset > MAX_255_COUNT)) |
7432 |
++ return LZO_E_ERROR; |
7433 |
++ |
7434 |
++ offset = (offset << 8) - offset; |
7435 |
++ t += offset + 7 + *ip++; |
7436 |
++ NEED_IP(2); |
7437 |
+ } |
7438 |
+ next = get_unaligned_le16(ip); |
7439 |
+ ip += 2; |
7440 |
+@@ -174,7 +185,7 @@ copy_literal_run: |
7441 |
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
7442 |
+ if (op - m_pos >= 8) { |
7443 |
+ unsigned char *oe = op + t; |
7444 |
+- if (likely(HAVE_OP(t, 15))) { |
7445 |
++ if (likely(HAVE_OP(t + 15))) { |
7446 |
+ do { |
7447 |
+ COPY8(op, m_pos); |
7448 |
+ op += 8; |
7449 |
+@@ -184,7 +195,7 @@ copy_literal_run: |
7450 |
+ m_pos += 8; |
7451 |
+ } while (op < oe); |
7452 |
+ op = oe; |
7453 |
+- if (HAVE_IP(6, 0)) { |
7454 |
++ if (HAVE_IP(6)) { |
7455 |
+ state = next; |
7456 |
+ COPY4(op, ip); |
7457 |
+ op += next; |
7458 |
+@@ -192,7 +203,7 @@ copy_literal_run: |
7459 |
+ continue; |
7460 |
+ } |
7461 |
+ } else { |
7462 |
+- NEED_OP(t, 0); |
7463 |
++ NEED_OP(t); |
7464 |
+ do { |
7465 |
+ *op++ = *m_pos++; |
7466 |
+ } while (op < oe); |
7467 |
+@@ -201,7 +212,7 @@ copy_literal_run: |
7468 |
+ #endif |
7469 |
+ { |
7470 |
+ unsigned char *oe = op + t; |
7471 |
+- NEED_OP(t, 0); |
7472 |
++ NEED_OP(t); |
7473 |
+ op[0] = m_pos[0]; |
7474 |
+ op[1] = m_pos[1]; |
7475 |
+ op += 2; |
7476 |
+@@ -214,15 +225,15 @@ match_next: |
7477 |
+ state = next; |
7478 |
+ t = next; |
7479 |
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
7480 |
+- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) { |
7481 |
++ if (likely(HAVE_IP(6) && HAVE_OP(4))) { |
7482 |
+ COPY4(op, ip); |
7483 |
+ op += t; |
7484 |
+ ip += t; |
7485 |
+ } else |
7486 |
+ #endif |
7487 |
+ { |
7488 |
+- NEED_IP(t, 3); |
7489 |
+- NEED_OP(t, 0); |
7490 |
++ NEED_IP(t + 3); |
7491 |
++ NEED_OP(t); |
7492 |
+ while (t > 0) { |
7493 |
+ *op++ = *ip++; |
7494 |
+ t--; |
7495 |
+diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c |
7496 |
+index 6e45a5074bf0..52abeeb3cb9d 100644 |
7497 |
+--- a/mm/balloon_compaction.c |
7498 |
++++ b/mm/balloon_compaction.c |
7499 |
+@@ -93,17 +93,12 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) |
7500 |
+ * to be released by the balloon driver. |
7501 |
+ */ |
7502 |
+ if (trylock_page(page)) { |
7503 |
++ if (!PagePrivate(page)) { |
7504 |
++ /* raced with isolation */ |
7505 |
++ unlock_page(page); |
7506 |
++ continue; |
7507 |
++ } |
7508 |
+ spin_lock_irqsave(&b_dev_info->pages_lock, flags); |
7509 |
+- /* |
7510 |
+- * Raise the page refcount here to prevent any wrong |
7511 |
+- * attempt to isolate this page, in case of coliding |
7512 |
+- * with balloon_page_isolate() just after we release |
7513 |
+- * the page lock. |
7514 |
+- * |
7515 |
+- * balloon_page_free() will take care of dropping |
7516 |
+- * this extra refcount later. |
7517 |
+- */ |
7518 |
+- get_page(page); |
7519 |
+ balloon_page_delete(page); |
7520 |
+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); |
7521 |
+ unlock_page(page); |
7522 |
+@@ -187,7 +182,9 @@ static inline void __isolate_balloon_page(struct page *page) |
7523 |
+ { |
7524 |
+ struct balloon_dev_info *b_dev_info = page->mapping->private_data; |
7525 |
+ unsigned long flags; |
7526 |
++ |
7527 |
+ spin_lock_irqsave(&b_dev_info->pages_lock, flags); |
7528 |
++ ClearPagePrivate(page); |
7529 |
+ list_del(&page->lru); |
7530 |
+ b_dev_info->isolated_pages++; |
7531 |
+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); |
7532 |
+@@ -197,7 +194,9 @@ static inline void __putback_balloon_page(struct page *page) |
7533 |
+ { |
7534 |
+ struct balloon_dev_info *b_dev_info = page->mapping->private_data; |
7535 |
+ unsigned long flags; |
7536 |
++ |
7537 |
+ spin_lock_irqsave(&b_dev_info->pages_lock, flags); |
7538 |
++ SetPagePrivate(page); |
7539 |
+ list_add(&page->lru, &b_dev_info->pages); |
7540 |
+ b_dev_info->isolated_pages--; |
7541 |
+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); |
7542 |
+@@ -235,12 +234,11 @@ bool balloon_page_isolate(struct page *page) |
7543 |
+ */ |
7544 |
+ if (likely(trylock_page(page))) { |
7545 |
+ /* |
7546 |
+- * A ballooned page, by default, has just one refcount. |
7547 |
++ * A ballooned page, by default, has PagePrivate set. |
7548 |
+ * Prevent concurrent compaction threads from isolating |
7549 |
+- * an already isolated balloon page by refcount check. |
7550 |
++ * an already isolated balloon page by clearing it. |
7551 |
+ */ |
7552 |
+- if (__is_movable_balloon_page(page) && |
7553 |
+- page_count(page) == 2) { |
7554 |
++ if (balloon_page_movable(page)) { |
7555 |
+ __isolate_balloon_page(page); |
7556 |
+ unlock_page(page); |
7557 |
+ return true; |
7558 |
+diff --git a/mm/cma.c b/mm/cma.c |
7559 |
+index c17751c0dcaf..0ab564623ea8 100644 |
7560 |
+--- a/mm/cma.c |
7561 |
++++ b/mm/cma.c |
7562 |
+@@ -57,7 +57,9 @@ unsigned long cma_get_size(struct cma *cma) |
7563 |
+ |
7564 |
+ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) |
7565 |
+ { |
7566 |
+- return (1UL << (align_order >> cma->order_per_bit)) - 1; |
7567 |
++ if (align_order <= cma->order_per_bit) |
7568 |
++ return 0; |
7569 |
++ return (1UL << (align_order - cma->order_per_bit)) - 1; |
7570 |
+ } |
7571 |
+ |
7572 |
+ static unsigned long cma_bitmap_maxno(struct cma *cma) |
7573 |
+diff --git a/mm/compaction.c b/mm/compaction.c |
7574 |
+index 21bf292b642a..0653f5f73bfa 100644 |
7575 |
+--- a/mm/compaction.c |
7576 |
++++ b/mm/compaction.c |
7577 |
+@@ -597,7 +597,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, |
7578 |
+ */ |
7579 |
+ if (!PageLRU(page)) { |
7580 |
+ if (unlikely(balloon_page_movable(page))) { |
7581 |
+- if (locked && balloon_page_isolate(page)) { |
7582 |
++ if (balloon_page_isolate(page)) { |
7583 |
+ /* Successfully isolated */ |
7584 |
+ goto isolate_success; |
7585 |
+ } |
7586 |
+diff --git a/mm/migrate.c b/mm/migrate.c |
7587 |
+index 2740360cd216..01439953abf5 100644 |
7588 |
+--- a/mm/migrate.c |
7589 |
++++ b/mm/migrate.c |
7590 |
+@@ -876,7 +876,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, |
7591 |
+ } |
7592 |
+ } |
7593 |
+ |
7594 |
+- if (unlikely(balloon_page_movable(page))) { |
7595 |
++ if (unlikely(isolated_balloon_page(page))) { |
7596 |
+ /* |
7597 |
+ * A ballooned page does not need any special attention from |
7598 |
+ * physical to virtual reverse mapping procedures. |
7599 |
+@@ -955,17 +955,6 @@ static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, |
7600 |
+ |
7601 |
+ rc = __unmap_and_move(page, newpage, force, mode); |
7602 |
+ |
7603 |
+- if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { |
7604 |
+- /* |
7605 |
+- * A ballooned page has been migrated already. |
7606 |
+- * Now, it's the time to wrap-up counters, |
7607 |
+- * handle the page back to Buddy and return. |
7608 |
+- */ |
7609 |
+- dec_zone_page_state(page, NR_ISOLATED_ANON + |
7610 |
+- page_is_file_cache(page)); |
7611 |
+- balloon_page_free(page); |
7612 |
+- return MIGRATEPAGE_SUCCESS; |
7613 |
+- } |
7614 |
+ out: |
7615 |
+ if (rc != -EAGAIN) { |
7616 |
+ /* |
7617 |
+@@ -988,6 +977,9 @@ out: |
7618 |
+ if (rc != MIGRATEPAGE_SUCCESS && put_new_page) { |
7619 |
+ ClearPageSwapBacked(newpage); |
7620 |
+ put_new_page(newpage, private); |
7621 |
++ } else if (unlikely(__is_movable_balloon_page(newpage))) { |
7622 |
++ /* drop our reference, page already in the balloon */ |
7623 |
++ put_page(newpage); |
7624 |
+ } else |
7625 |
+ putback_lru_page(newpage); |
7626 |
+ |
7627 |
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c |
7628 |
+index 206b65ccd5b8..075f20d050d6 100644 |
7629 |
+--- a/net/bluetooth/6lowpan.c |
7630 |
++++ b/net/bluetooth/6lowpan.c |
7631 |
+@@ -39,6 +39,7 @@ static struct dentry *lowpan_control_debugfs; |
7632 |
+ |
7633 |
+ struct skb_cb { |
7634 |
+ struct in6_addr addr; |
7635 |
++ struct in6_addr gw; |
7636 |
+ struct l2cap_chan *chan; |
7637 |
+ int status; |
7638 |
+ }; |
7639 |
+@@ -158,6 +159,54 @@ static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev, |
7640 |
+ return NULL; |
7641 |
+ } |
7642 |
+ |
7643 |
++static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, |
7644 |
++ struct in6_addr *daddr, |
7645 |
++ struct sk_buff *skb) |
7646 |
++{ |
7647 |
++ struct lowpan_peer *peer, *tmp; |
7648 |
++ struct in6_addr *nexthop; |
7649 |
++ struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); |
7650 |
++ int count = atomic_read(&dev->peer_count); |
7651 |
++ |
7652 |
++ BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt); |
7653 |
++ |
7654 |
++ /* If we have multiple 6lowpan peers, then check where we should |
7655 |
++ * send the packet. If only one peer exists, then we can send the |
7656 |
++ * packet right away. |
7657 |
++ */ |
7658 |
++ if (count == 1) |
7659 |
++ return list_first_entry(&dev->peers, struct lowpan_peer, |
7660 |
++ list); |
7661 |
++ |
7662 |
++ if (!rt) { |
7663 |
++ nexthop = &lowpan_cb(skb)->gw; |
7664 |
++ |
7665 |
++ if (ipv6_addr_any(nexthop)) |
7666 |
++ return NULL; |
7667 |
++ } else { |
7668 |
++ nexthop = rt6_nexthop(rt); |
7669 |
++ |
7670 |
++ /* We need to remember the address because it is needed |
7671 |
++ * by bt_xmit() when sending the packet. In bt_xmit(), the |
7672 |
++ * destination routing info is not set. |
7673 |
++ */ |
7674 |
++ memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr)); |
7675 |
++ } |
7676 |
++ |
7677 |
++ BT_DBG("gw %pI6c", nexthop); |
7678 |
++ |
7679 |
++ list_for_each_entry_safe(peer, tmp, &dev->peers, list) { |
7680 |
++ BT_DBG("dst addr %pMR dst type %d ip %pI6c", |
7681 |
++ &peer->chan->dst, peer->chan->dst_type, |
7682 |
++ &peer->peer_addr); |
7683 |
++ |
7684 |
++ if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) |
7685 |
++ return peer; |
7686 |
++ } |
7687 |
++ |
7688 |
++ return NULL; |
7689 |
++} |
7690 |
++ |
7691 |
+ static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) |
7692 |
+ { |
7693 |
+ struct lowpan_dev *entry, *tmp; |
7694 |
+@@ -415,8 +464,18 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev, |
7695 |
+ read_unlock_irqrestore(&devices_lock, flags); |
7696 |
+ |
7697 |
+ if (!peer) { |
7698 |
+- BT_DBG("no such peer %pMR found", &addr); |
7699 |
+- return -ENOENT; |
7700 |
++ /* The packet might be sent to 6lowpan interface |
7701 |
++ * because of routing (either via default route |
7702 |
++ * or user set route) so get peer according to |
7703 |
++ * the destination address. |
7704 |
++ */ |
7705 |
++ read_lock_irqsave(&devices_lock, flags); |
7706 |
++ peer = peer_lookup_dst(dev, &hdr->daddr, skb); |
7707 |
++ read_unlock_irqrestore(&devices_lock, flags); |
7708 |
++ if (!peer) { |
7709 |
++ BT_DBG("no such peer %pMR found", &addr); |
7710 |
++ return -ENOENT; |
7711 |
++ } |
7712 |
+ } |
7713 |
+ |
7714 |
+ daddr = peer->eui64_addr; |
7715 |
+@@ -520,6 +579,8 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) |
7716 |
+ |
7717 |
+ read_lock_irqsave(&devices_lock, flags); |
7718 |
+ peer = peer_lookup_ba(dev, &addr, addr_type); |
7719 |
++ if (!peer) |
7720 |
++ peer = peer_lookup_dst(dev, &lowpan_cb(skb)->addr, skb); |
7721 |
+ read_unlock_irqrestore(&devices_lock, flags); |
7722 |
+ |
7723 |
+ BT_DBG("xmit %s to %pMR type %d IP %pI6c peer %p", |
7724 |
+@@ -671,6 +732,14 @@ static struct l2cap_chan *chan_open(struct l2cap_chan *pchan) |
7725 |
+ return chan; |
7726 |
+ } |
7727 |
+ |
7728 |
++static void set_ip_addr_bits(u8 addr_type, u8 *addr) |
7729 |
++{ |
7730 |
++ if (addr_type == BDADDR_LE_PUBLIC) |
7731 |
++ *addr |= 0x02; |
7732 |
++ else |
7733 |
++ *addr &= ~0x02; |
7734 |
++} |
7735 |
++ |
7736 |
+ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, |
7737 |
+ struct lowpan_dev *dev) |
7738 |
+ { |
7739 |
+@@ -693,6 +762,11 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, |
7740 |
+ memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8, |
7741 |
+ EUI64_ADDR_LEN); |
7742 |
+ |
7743 |
++ /* IPv6 address needs to have the U/L bit set properly so toggle |
7744 |
++ * it back here. |
7745 |
++ */ |
7746 |
++ set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8); |
7747 |
++ |
7748 |
+ write_lock_irqsave(&devices_lock, flags); |
7749 |
+ INIT_LIST_HEAD(&peer->list); |
7750 |
+ peer_add(dev, peer); |
7751 |
+@@ -890,7 +964,7 @@ static void chan_resume_cb(struct l2cap_chan *chan) |
7752 |
+ |
7753 |
+ static long chan_get_sndtimeo_cb(struct l2cap_chan *chan) |
7754 |
+ { |
7755 |
+- return msecs_to_jiffies(1000); |
7756 |
++ return L2CAP_CONN_TIMEOUT; |
7757 |
+ } |
7758 |
+ |
7759 |
+ static const struct l2cap_ops bt_6lowpan_chan_ops = { |
7760 |
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c |
7761 |
+index 46547b920f88..14ca8ae7cfbe 100644 |
7762 |
+--- a/net/bluetooth/l2cap_core.c |
7763 |
++++ b/net/bluetooth/l2cap_core.c |
7764 |
+@@ -2418,12 +2418,8 @@ static int l2cap_segment_le_sdu(struct l2cap_chan *chan, |
7765 |
+ |
7766 |
+ BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); |
7767 |
+ |
7768 |
+- pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE; |
7769 |
+- |
7770 |
+- pdu_len = min_t(size_t, pdu_len, chan->remote_mps); |
7771 |
+- |
7772 |
+ sdu_len = len; |
7773 |
+- pdu_len -= L2CAP_SDULEN_SIZE; |
7774 |
++ pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE; |
7775 |
+ |
7776 |
+ while (len > 0) { |
7777 |
+ if (len <= pdu_len) |
7778 |
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c |
7779 |
+index fd3294300803..7f0509e1d3bb 100644 |
7780 |
+--- a/net/bluetooth/smp.c |
7781 |
++++ b/net/bluetooth/smp.c |
7782 |
+@@ -442,8 +442,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, |
7783 |
+ } |
7784 |
+ |
7785 |
+ /* Not Just Works/Confirm results in MITM Authentication */ |
7786 |
+- if (method != JUST_CFM) |
7787 |
++ if (method != JUST_CFM) { |
7788 |
+ set_bit(SMP_FLAG_MITM_AUTH, &smp->flags); |
7789 |
++ if (hcon->pending_sec_level < BT_SECURITY_HIGH) |
7790 |
++ hcon->pending_sec_level = BT_SECURITY_HIGH; |
7791 |
++ } |
7792 |
+ |
7793 |
+ /* If both devices have Keyoard-Display I/O, the master |
7794 |
+ * Confirms and the slave Enters the passkey. |
7795 |
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h |
7796 |
+index 57da4bd7ba0c..0fb456c20eda 100644 |
7797 |
+--- a/security/integrity/ima/ima.h |
7798 |
++++ b/security/integrity/ima/ima.h |
7799 |
+@@ -177,7 +177,7 @@ void ima_delete_rules(void); |
7800 |
+ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, |
7801 |
+ struct file *file, const unsigned char *filename, |
7802 |
+ struct evm_ima_xattr_data *xattr_value, |
7803 |
+- int xattr_len); |
7804 |
++ int xattr_len, int opened); |
7805 |
+ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func); |
7806 |
+ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file); |
7807 |
+ enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, |
7808 |
+@@ -193,7 +193,7 @@ static inline int ima_appraise_measurement(int func, |
7809 |
+ struct file *file, |
7810 |
+ const unsigned char *filename, |
7811 |
+ struct evm_ima_xattr_data *xattr_value, |
7812 |
+- int xattr_len) |
7813 |
++ int xattr_len, int opened) |
7814 |
+ { |
7815 |
+ return INTEGRITY_UNKNOWN; |
7816 |
+ } |
7817 |
+diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c |
7818 |
+index 86bfd5c5df85..225fd944a4ef 100644 |
7819 |
+--- a/security/integrity/ima/ima_appraise.c |
7820 |
++++ b/security/integrity/ima/ima_appraise.c |
7821 |
+@@ -183,7 +183,7 @@ int ima_read_xattr(struct dentry *dentry, |
7822 |
+ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, |
7823 |
+ struct file *file, const unsigned char *filename, |
7824 |
+ struct evm_ima_xattr_data *xattr_value, |
7825 |
+- int xattr_len) |
7826 |
++ int xattr_len, int opened) |
7827 |
+ { |
7828 |
+ static const char op[] = "appraise_data"; |
7829 |
+ char *cause = "unknown"; |
7830 |
+@@ -202,8 +202,11 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, |
7831 |
+ goto out; |
7832 |
+ |
7833 |
+ cause = "missing-hash"; |
7834 |
+- status = |
7835 |
+- (inode->i_size == 0) ? INTEGRITY_PASS : INTEGRITY_NOLABEL; |
7836 |
++ status = INTEGRITY_NOLABEL; |
7837 |
++ if (opened & FILE_CREATED) { |
7838 |
++ iint->flags |= IMA_NEW_FILE; |
7839 |
++ status = INTEGRITY_PASS; |
7840 |
++ } |
7841 |
+ goto out; |
7842 |
+ } |
7843 |
+ |
7844 |
+diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c |
7845 |
+index 0bd732843fe7..f7aac3cf19ae 100644 |
7846 |
+--- a/security/integrity/ima/ima_crypto.c |
7847 |
++++ b/security/integrity/ima/ima_crypto.c |
7848 |
+@@ -80,19 +80,19 @@ static int ima_kernel_read(struct file *file, loff_t offset, |
7849 |
+ { |
7850 |
+ mm_segment_t old_fs; |
7851 |
+ char __user *buf = addr; |
7852 |
+- ssize_t ret; |
7853 |
++ ssize_t ret = -EINVAL; |
7854 |
+ |
7855 |
+ if (!(file->f_mode & FMODE_READ)) |
7856 |
+ return -EBADF; |
7857 |
+- if (!file->f_op->read && !file->f_op->aio_read) |
7858 |
+- return -EINVAL; |
7859 |
+ |
7860 |
+ old_fs = get_fs(); |
7861 |
+ set_fs(get_ds()); |
7862 |
+ if (file->f_op->read) |
7863 |
+ ret = file->f_op->read(file, buf, count, &offset); |
7864 |
+- else |
7865 |
++ else if (file->f_op->aio_read) |
7866 |
+ ret = do_sync_read(file, buf, count, &offset); |
7867 |
++ else if (file->f_op->read_iter) |
7868 |
++ ret = new_sync_read(file, buf, count, &offset); |
7869 |
+ set_fs(old_fs); |
7870 |
+ return ret; |
7871 |
+ } |
7872 |
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c |
7873 |
+index 2917f980bf30..f82cf9b8e92b 100644 |
7874 |
+--- a/security/integrity/ima/ima_main.c |
7875 |
++++ b/security/integrity/ima/ima_main.c |
7876 |
+@@ -124,11 +124,13 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint, |
7877 |
+ return; |
7878 |
+ |
7879 |
+ mutex_lock(&inode->i_mutex); |
7880 |
+- if (atomic_read(&inode->i_writecount) == 1 && |
7881 |
+- iint->version != inode->i_version) { |
7882 |
+- iint->flags &= ~IMA_DONE_MASK; |
7883 |
+- if (iint->flags & IMA_APPRAISE) |
7884 |
+- ima_update_xattr(iint, file); |
7885 |
++ if (atomic_read(&inode->i_writecount) == 1) { |
7886 |
++ if ((iint->version != inode->i_version) || |
7887 |
++ (iint->flags & IMA_NEW_FILE)) { |
7888 |
++ iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE); |
7889 |
++ if (iint->flags & IMA_APPRAISE) |
7890 |
++ ima_update_xattr(iint, file); |
7891 |
++ } |
7892 |
+ } |
7893 |
+ mutex_unlock(&inode->i_mutex); |
7894 |
+ } |
7895 |
+@@ -155,7 +157,7 @@ void ima_file_free(struct file *file) |
7896 |
+ } |
7897 |
+ |
7898 |
+ static int process_measurement(struct file *file, const char *filename, |
7899 |
+- int mask, int function) |
7900 |
++ int mask, int function, int opened) |
7901 |
+ { |
7902 |
+ struct inode *inode = file_inode(file); |
7903 |
+ struct integrity_iint_cache *iint; |
7904 |
+@@ -224,7 +226,7 @@ static int process_measurement(struct file *file, const char *filename, |
7905 |
+ xattr_value, xattr_len); |
7906 |
+ if (action & IMA_APPRAISE_SUBMASK) |
7907 |
+ rc = ima_appraise_measurement(_func, iint, file, pathname, |
7908 |
+- xattr_value, xattr_len); |
7909 |
++ xattr_value, xattr_len, opened); |
7910 |
+ if (action & IMA_AUDIT) |
7911 |
+ ima_audit_measurement(iint, pathname); |
7912 |
+ kfree(pathbuf); |
7913 |
+@@ -253,7 +255,7 @@ out: |
7914 |
+ int ima_file_mmap(struct file *file, unsigned long prot) |
7915 |
+ { |
7916 |
+ if (file && (prot & PROT_EXEC)) |
7917 |
+- return process_measurement(file, NULL, MAY_EXEC, MMAP_CHECK); |
7918 |
++ return process_measurement(file, NULL, MAY_EXEC, MMAP_CHECK, 0); |
7919 |
+ return 0; |
7920 |
+ } |
7921 |
+ |
7922 |
+@@ -275,7 +277,7 @@ int ima_bprm_check(struct linux_binprm *bprm) |
7923 |
+ return process_measurement(bprm->file, |
7924 |
+ (strcmp(bprm->filename, bprm->interp) == 0) ? |
7925 |
+ bprm->filename : bprm->interp, |
7926 |
+- MAY_EXEC, BPRM_CHECK); |
7927 |
++ MAY_EXEC, BPRM_CHECK, 0); |
7928 |
+ } |
7929 |
+ |
7930 |
+ /** |
7931 |
+@@ -288,12 +290,12 @@ int ima_bprm_check(struct linux_binprm *bprm) |
7932 |
+ * On success return 0. On integrity appraisal error, assuming the file |
7933 |
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. |
7934 |
+ */ |
7935 |
+-int ima_file_check(struct file *file, int mask) |
7936 |
++int ima_file_check(struct file *file, int mask, int opened) |
7937 |
+ { |
7938 |
+ ima_rdwr_violation_check(file); |
7939 |
+ return process_measurement(file, NULL, |
7940 |
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC), |
7941 |
+- FILE_CHECK); |
7942 |
++ FILE_CHECK, opened); |
7943 |
+ } |
7944 |
+ EXPORT_SYMBOL_GPL(ima_file_check); |
7945 |
+ |
7946 |
+@@ -316,7 +318,7 @@ int ima_module_check(struct file *file) |
7947 |
+ #endif |
7948 |
+ return 0; /* We rely on module signature checking */ |
7949 |
+ } |
7950 |
+- return process_measurement(file, NULL, MAY_EXEC, MODULE_CHECK); |
7951 |
++ return process_measurement(file, NULL, MAY_EXEC, MODULE_CHECK, 0); |
7952 |
+ } |
7953 |
+ |
7954 |
+ int ima_fw_from_file(struct file *file, char *buf, size_t size) |
7955 |
+@@ -327,7 +329,7 @@ int ima_fw_from_file(struct file *file, char *buf, size_t size) |
7956 |
+ return -EACCES; /* INTEGRITY_UNKNOWN */ |
7957 |
+ return 0; |
7958 |
+ } |
7959 |
+- return process_measurement(file, NULL, MAY_EXEC, FIRMWARE_CHECK); |
7960 |
++ return process_measurement(file, NULL, MAY_EXEC, FIRMWARE_CHECK, 0); |
7961 |
+ } |
7962 |
+ |
7963 |
+ static int __init init_ima(void) |
7964 |
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h |
7965 |
+index 19b8e314ca96..904e68abd49e 100644 |
7966 |
+--- a/security/integrity/integrity.h |
7967 |
++++ b/security/integrity/integrity.h |
7968 |
+@@ -31,6 +31,7 @@ |
7969 |
+ #define IMA_DIGSIG 0x01000000 |
7970 |
+ #define IMA_DIGSIG_REQUIRED 0x02000000 |
7971 |
+ #define IMA_PERMIT_DIRECTIO 0x04000000 |
7972 |
++#define IMA_NEW_FILE 0x08000000 |
7973 |
+ |
7974 |
+ #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \ |
7975 |
+ IMA_APPRAISE_SUBMASK) |
7976 |
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c |
7977 |
+index 8cd2f930ad0b..a95356f45606 100644 |
7978 |
+--- a/sound/core/pcm_native.c |
7979 |
++++ b/sound/core/pcm_native.c |
7980 |
+@@ -3193,7 +3193,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = { |
7981 |
+ |
7982 |
+ #ifndef ARCH_HAS_DMA_MMAP_COHERENT |
7983 |
+ /* This should be defined / handled globally! */ |
7984 |
+-#ifdef CONFIG_ARM |
7985 |
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) |
7986 |
+ #define ARCH_HAS_DMA_MMAP_COHERENT |
7987 |
+ #endif |
7988 |
+ #endif |
7989 |
+diff --git a/sound/firewire/bebob/bebob_terratec.c b/sound/firewire/bebob/bebob_terratec.c |
7990 |
+index eef8ea7d9b97..0e4c0bfc463b 100644 |
7991 |
+--- a/sound/firewire/bebob/bebob_terratec.c |
7992 |
++++ b/sound/firewire/bebob/bebob_terratec.c |
7993 |
+@@ -17,10 +17,10 @@ phase88_rack_clk_src_get(struct snd_bebob *bebob, unsigned int *id) |
7994 |
+ unsigned int enable_ext, enable_word; |
7995 |
+ int err; |
7996 |
+ |
7997 |
+- err = avc_audio_get_selector(bebob->unit, 0, 0, &enable_ext); |
7998 |
++ err = avc_audio_get_selector(bebob->unit, 0, 9, &enable_ext); |
7999 |
+ if (err < 0) |
8000 |
+ goto end; |
8001 |
+- err = avc_audio_get_selector(bebob->unit, 0, 0, &enable_word); |
8002 |
++ err = avc_audio_get_selector(bebob->unit, 0, 8, &enable_word); |
8003 |
+ if (err < 0) |
8004 |
+ goto end; |
8005 |
+ |
8006 |
+diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c |
8007 |
+index 3f3ef38d9b6e..874cd76c7b7f 100644 |
8008 |
+--- a/sound/pci/emu10k1/emu10k1_callback.c |
8009 |
++++ b/sound/pci/emu10k1/emu10k1_callback.c |
8010 |
+@@ -85,6 +85,8 @@ snd_emu10k1_ops_setup(struct snd_emux *emux) |
8011 |
+ * get more voice for pcm |
8012 |
+ * |
8013 |
+ * terminate most inactive voice and give it as a pcm voice. |
8014 |
++ * |
8015 |
++ * voice_lock is already held. |
8016 |
+ */ |
8017 |
+ int |
8018 |
+ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw) |
8019 |
+@@ -92,12 +94,10 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw) |
8020 |
+ struct snd_emux *emu; |
8021 |
+ struct snd_emux_voice *vp; |
8022 |
+ struct best_voice best[V_END]; |
8023 |
+- unsigned long flags; |
8024 |
+ int i; |
8025 |
+ |
8026 |
+ emu = hw->synth; |
8027 |
+ |
8028 |
+- spin_lock_irqsave(&emu->voice_lock, flags); |
8029 |
+ lookup_voices(emu, hw, best, 1); /* no OFF voices */ |
8030 |
+ for (i = 0; i < V_END; i++) { |
8031 |
+ if (best[i].voice >= 0) { |
8032 |
+@@ -113,11 +113,9 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw) |
8033 |
+ vp->emu->num_voices--; |
8034 |
+ vp->ch = -1; |
8035 |
+ vp->state = SNDRV_EMUX_ST_OFF; |
8036 |
+- spin_unlock_irqrestore(&emu->voice_lock, flags); |
8037 |
+ return ch; |
8038 |
+ } |
8039 |
+ } |
8040 |
+- spin_unlock_irqrestore(&emu->voice_lock, flags); |
8041 |
+ |
8042 |
+ /* not found */ |
8043 |
+ return -ENOMEM; |
8044 |
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h |
8045 |
+index 364bb413e02a..bb989ab316e8 100644 |
8046 |
+--- a/sound/pci/hda/hda_local.h |
8047 |
++++ b/sound/pci/hda/hda_local.h |
8048 |
+@@ -425,7 +425,7 @@ struct snd_hda_pin_quirk { |
8049 |
+ .subvendor = _subvendor,\ |
8050 |
+ .name = _name,\ |
8051 |
+ .value = _value,\ |
8052 |
+- .pins = (const struct hda_pintbl[]) { _pins } \ |
8053 |
++ .pins = (const struct hda_pintbl[]) { _pins, {0, 0}} \ |
8054 |
+ } |
8055 |
+ #else |
8056 |
+ |
8057 |
+@@ -433,7 +433,7 @@ struct snd_hda_pin_quirk { |
8058 |
+ { .codec = _codec,\ |
8059 |
+ .subvendor = _subvendor,\ |
8060 |
+ .value = _value,\ |
8061 |
+- .pins = (const struct hda_pintbl[]) { _pins } \ |
8062 |
++ .pins = (const struct hda_pintbl[]) { _pins, {0, 0}} \ |
8063 |
+ } |
8064 |
+ |
8065 |
+ #endif |
8066 |
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c |
8067 |
+index 99d7d7fecaad..c3658df2359c 100644 |
8068 |
+--- a/sound/pci/hda/patch_hdmi.c |
8069 |
++++ b/sound/pci/hda/patch_hdmi.c |
8070 |
+@@ -1577,19 +1577,22 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) |
8071 |
+ } |
8072 |
+ } |
8073 |
+ |
8074 |
+- if (pin_eld->eld_valid && !eld->eld_valid) { |
8075 |
+- update_eld = true; |
8076 |
++ if (pin_eld->eld_valid != eld->eld_valid) |
8077 |
+ eld_changed = true; |
8078 |
+- } |
8079 |
++ |
8080 |
++ if (pin_eld->eld_valid && !eld->eld_valid) |
8081 |
++ update_eld = true; |
8082 |
++ |
8083 |
+ if (update_eld) { |
8084 |
+ bool old_eld_valid = pin_eld->eld_valid; |
8085 |
+ pin_eld->eld_valid = eld->eld_valid; |
8086 |
+- eld_changed = pin_eld->eld_size != eld->eld_size || |
8087 |
++ if (pin_eld->eld_size != eld->eld_size || |
8088 |
+ memcmp(pin_eld->eld_buffer, eld->eld_buffer, |
8089 |
+- eld->eld_size) != 0; |
8090 |
+- if (eld_changed) |
8091 |
++ eld->eld_size) != 0) { |
8092 |
+ memcpy(pin_eld->eld_buffer, eld->eld_buffer, |
8093 |
+ eld->eld_size); |
8094 |
++ eld_changed = true; |
8095 |
++ } |
8096 |
+ pin_eld->eld_size = eld->eld_size; |
8097 |
+ pin_eld->info = eld->info; |
8098 |
+ |
8099 |
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
8100 |
+index 1ba22fb527c2..b7b293cc710e 100644 |
8101 |
+--- a/sound/pci/hda/patch_realtek.c |
8102 |
++++ b/sound/pci/hda/patch_realtek.c |
8103 |
+@@ -3125,6 +3125,9 @@ static void alc283_shutup(struct hda_codec *codec) |
8104 |
+ |
8105 |
+ alc_write_coef_idx(codec, 0x43, 0x9004); |
8106 |
+ |
8107 |
++ /*depop hp during suspend*/ |
8108 |
++ alc_write_coef_idx(codec, 0x06, 0x2100); |
8109 |
++ |
8110 |
+ snd_hda_codec_write(codec, hp_pin, 0, |
8111 |
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); |
8112 |
+ |
8113 |
+@@ -5783,9 +5786,9 @@ static void alc662_led_gpio1_mute_hook(void *private_data, int enabled) |
8114 |
+ unsigned int oldval = spec->gpio_led; |
8115 |
+ |
8116 |
+ if (enabled) |
8117 |
+- spec->gpio_led &= ~0x01; |
8118 |
+- else |
8119 |
+ spec->gpio_led |= 0x01; |
8120 |
++ else |
8121 |
++ spec->gpio_led &= ~0x01; |
8122 |
+ if (spec->gpio_led != oldval) |
8123 |
+ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, |
8124 |
+ spec->gpio_led); |
8125 |
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h |
8126 |
+index 223c47b33ba3..c657752a420c 100644 |
8127 |
+--- a/sound/usb/quirks-table.h |
8128 |
++++ b/sound/usb/quirks-table.h |
8129 |
+@@ -385,6 +385,36 @@ YAMAHA_DEVICE(0x105d, NULL), |
8130 |
+ } |
8131 |
+ }, |
8132 |
+ { |
8133 |
++ USB_DEVICE(0x0499, 0x1509), |
8134 |
++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { |
8135 |
++ /* .vendor_name = "Yamaha", */ |
8136 |
++ /* .product_name = "Steinberg UR22", */ |
8137 |
++ .ifnum = QUIRK_ANY_INTERFACE, |
8138 |
++ .type = QUIRK_COMPOSITE, |
8139 |
++ .data = (const struct snd_usb_audio_quirk[]) { |
8140 |
++ { |
8141 |
++ .ifnum = 1, |
8142 |
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE |
8143 |
++ }, |
8144 |
++ { |
8145 |
++ .ifnum = 2, |
8146 |
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE |
8147 |
++ }, |
8148 |
++ { |
8149 |
++ .ifnum = 3, |
8150 |
++ .type = QUIRK_MIDI_YAMAHA |
8151 |
++ }, |
8152 |
++ { |
8153 |
++ .ifnum = 4, |
8154 |
++ .type = QUIRK_IGNORE_INTERFACE |
8155 |
++ }, |
8156 |
++ { |
8157 |
++ .ifnum = -1 |
8158 |
++ } |
8159 |
++ } |
8160 |
++ } |
8161 |
++}, |
8162 |
++{ |
8163 |
+ USB_DEVICE(0x0499, 0x150a), |
8164 |
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { |
8165 |
+ /* .vendor_name = "Yamaha", */ |
8166 |
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
8167 |
+index 95519bc959ed..6a3f29bd43d7 100644 |
8168 |
+--- a/virt/kvm/kvm_main.c |
8169 |
++++ b/virt/kvm/kvm_main.c |
8170 |
+@@ -52,6 +52,7 @@ |
8171 |
+ |
8172 |
+ #include <asm/processor.h> |
8173 |
+ #include <asm/io.h> |
8174 |
++#include <asm/ioctl.h> |
8175 |
+ #include <asm/uaccess.h> |
8176 |
+ #include <asm/pgtable.h> |
8177 |
+ |
8178 |
+@@ -95,8 +96,6 @@ static int hardware_enable_all(void); |
8179 |
+ static void hardware_disable_all(void); |
8180 |
+ |
8181 |
+ static void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
8182 |
+-static void update_memslots(struct kvm_memslots *slots, |
8183 |
+- struct kvm_memory_slot *new, u64 last_generation); |
8184 |
+ |
8185 |
+ static void kvm_release_pfn_dirty(pfn_t pfn); |
8186 |
+ static void mark_page_dirty_in_slot(struct kvm *kvm, |
8187 |
+@@ -476,6 +475,13 @@ static struct kvm *kvm_create_vm(unsigned long type) |
8188 |
+ kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); |
8189 |
+ if (!kvm->memslots) |
8190 |
+ goto out_err_no_srcu; |
8191 |
++ |
8192 |
++ /* |
8193 |
++ * Init kvm generation close to the maximum to easily test the |
8194 |
++ * code of handling generation number wrap-around. |
8195 |
++ */ |
8196 |
++ kvm->memslots->generation = -150; |
8197 |
++ |
8198 |
+ kvm_init_memslots_id(kvm); |
8199 |
+ if (init_srcu_struct(&kvm->srcu)) |
8200 |
+ goto out_err_no_srcu; |
8201 |
+@@ -687,8 +693,7 @@ static void sort_memslots(struct kvm_memslots *slots) |
8202 |
+ } |
8203 |
+ |
8204 |
+ static void update_memslots(struct kvm_memslots *slots, |
8205 |
+- struct kvm_memory_slot *new, |
8206 |
+- u64 last_generation) |
8207 |
++ struct kvm_memory_slot *new) |
8208 |
+ { |
8209 |
+ if (new) { |
8210 |
+ int id = new->id; |
8211 |
+@@ -699,8 +704,6 @@ static void update_memslots(struct kvm_memslots *slots, |
8212 |
+ if (new->npages != npages) |
8213 |
+ sort_memslots(slots); |
8214 |
+ } |
8215 |
+- |
8216 |
+- slots->generation = last_generation + 1; |
8217 |
+ } |
8218 |
+ |
8219 |
+ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) |
8220 |
+@@ -722,10 +725,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, |
8221 |
+ { |
8222 |
+ struct kvm_memslots *old_memslots = kvm->memslots; |
8223 |
+ |
8224 |
+- update_memslots(slots, new, kvm->memslots->generation); |
8225 |
++ /* |
8226 |
++ * Set the low bit in the generation, which disables SPTE caching |
8227 |
++ * until the end of synchronize_srcu_expedited. |
8228 |
++ */ |
8229 |
++ WARN_ON(old_memslots->generation & 1); |
8230 |
++ slots->generation = old_memslots->generation + 1; |
8231 |
++ |
8232 |
++ update_memslots(slots, new); |
8233 |
+ rcu_assign_pointer(kvm->memslots, slots); |
8234 |
+ synchronize_srcu_expedited(&kvm->srcu); |
8235 |
+ |
8236 |
++ /* |
8237 |
++ * Increment the new memslot generation a second time. This prevents |
8238 |
++ * vm exits that race with memslot updates from caching a memslot |
8239 |
++ * generation that will (potentially) be valid forever. |
8240 |
++ */ |
8241 |
++ slots->generation++; |
8242 |
++ |
8243 |
+ kvm_arch_memslots_updated(kvm); |
8244 |
+ |
8245 |
+ return old_memslots; |
8246 |
+@@ -1975,6 +1992,9 @@ static long kvm_vcpu_ioctl(struct file *filp, |
8247 |
+ if (vcpu->kvm->mm != current->mm) |
8248 |
+ return -EIO; |
8249 |
+ |
8250 |
++ if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) |
8251 |
++ return -EINVAL; |
8252 |
++ |
8253 |
+ #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) |
8254 |
+ /* |
8255 |
+ * Special cases: vcpu ioctls that are asynchronous to vcpu execution, |