1 |
commit: e566389761770017c40d9055be61770fbe8da1a9 |
2 |
Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org> |
3 |
AuthorDate: Mon Mar 27 14:02:08 2017 +0000 |
4 |
Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org> |
5 |
CommitDate: Mon Mar 27 14:02:08 2017 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=e5663897 |
7 |
|
8 |
grsecurity-3.1-4.9.18-201703261106 |
9 |
|
10 |
4.9.16/1015_linux-4.9.16.patch | 1623 ------ |
11 |
{4.9.16 => 4.9.18}/0000_README | 10 +- |
12 |
4.9.18/1016_linux-4.9.17.patch | 6091 ++++++++++++++++++++ |
13 |
4.9.18/1017_linux-4.9.18.patch | 876 +++ |
14 |
.../4420_grsecurity-3.1-4.9.18-201703261106.patch | 322 +- |
15 |
{4.9.16 => 4.9.18}/4425_grsec_remove_EI_PAX.patch | 0 |
16 |
.../4426_default_XATTR_PAX_FLAGS.patch | 0 |
17 |
.../4427_force_XATTR_PAX_tmpfs.patch | 0 |
18 |
.../4430_grsec-remove-localversion-grsec.patch | 0 |
19 |
{4.9.16 => 4.9.18}/4435_grsec-mute-warnings.patch | 0 |
20 |
.../4440_grsec-remove-protected-paths.patch | 0 |
21 |
.../4450_grsec-kconfig-default-gids.patch | 0 |
22 |
.../4465_selinux-avc_audit-log-curr_ip.patch | 0 |
23 |
{4.9.16 => 4.9.18}/4470_disable-compat_vdso.patch | 0 |
24 |
{4.9.16 => 4.9.18}/4475_emutramp_default_on.patch | 0 |
25 |
15 files changed, 7135 insertions(+), 1787 deletions(-) |
26 |
|
27 |
diff --git a/4.9.16/1015_linux-4.9.16.patch b/4.9.16/1015_linux-4.9.16.patch |
28 |
deleted file mode 100644 |
29 |
index 7ac2f77..0000000 |
30 |
--- a/4.9.16/1015_linux-4.9.16.patch |
31 |
+++ /dev/null |
32 |
@@ -1,1623 +0,0 @@ |
33 |
-diff --git a/Makefile b/Makefile |
34 |
-index 03df4fc..4e0f962 100644 |
35 |
---- a/Makefile |
36 |
-+++ b/Makefile |
37 |
-@@ -1,6 +1,6 @@ |
38 |
- VERSION = 4 |
39 |
- PATCHLEVEL = 9 |
40 |
--SUBLEVEL = 15 |
41 |
-+SUBLEVEL = 16 |
42 |
- EXTRAVERSION = |
43 |
- NAME = Roaring Lionus |
44 |
- |
45 |
-diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig |
46 |
-index 5d83ff7..ec8e968 100644 |
47 |
---- a/arch/mips/configs/ip22_defconfig |
48 |
-+++ b/arch/mips/configs/ip22_defconfig |
49 |
-@@ -67,8 +67,8 @@ CONFIG_NETFILTER_NETLINK_QUEUE=m |
50 |
- CONFIG_NF_CONNTRACK=m |
51 |
- CONFIG_NF_CONNTRACK_SECMARK=y |
52 |
- CONFIG_NF_CONNTRACK_EVENTS=y |
53 |
--CONFIG_NF_CT_PROTO_DCCP=m |
54 |
--CONFIG_NF_CT_PROTO_UDPLITE=m |
55 |
-+CONFIG_NF_CT_PROTO_DCCP=y |
56 |
-+CONFIG_NF_CT_PROTO_UDPLITE=y |
57 |
- CONFIG_NF_CONNTRACK_AMANDA=m |
58 |
- CONFIG_NF_CONNTRACK_FTP=m |
59 |
- CONFIG_NF_CONNTRACK_H323=m |
60 |
-diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig |
61 |
-index 2b74aee..e582069 100644 |
62 |
---- a/arch/mips/configs/ip27_defconfig |
63 |
-+++ b/arch/mips/configs/ip27_defconfig |
64 |
-@@ -133,7 +133,7 @@ CONFIG_LIBFC=m |
65 |
- CONFIG_SCSI_QLOGIC_1280=y |
66 |
- CONFIG_SCSI_PMCRAID=m |
67 |
- CONFIG_SCSI_BFA_FC=m |
68 |
--CONFIG_SCSI_DH=m |
69 |
-+CONFIG_SCSI_DH=y |
70 |
- CONFIG_SCSI_DH_RDAC=m |
71 |
- CONFIG_SCSI_DH_HP_SW=m |
72 |
- CONFIG_SCSI_DH_EMC=m |
73 |
-@@ -205,7 +205,6 @@ CONFIG_MLX4_EN=m |
74 |
- # CONFIG_MLX4_DEBUG is not set |
75 |
- CONFIG_TEHUTI=m |
76 |
- CONFIG_BNX2X=m |
77 |
--CONFIG_QLGE=m |
78 |
- CONFIG_SFC=m |
79 |
- CONFIG_BE2NET=m |
80 |
- CONFIG_LIBERTAS_THINFIRM=m |
81 |
-diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig |
82 |
-index 5da76e0..0cdb431 100644 |
83 |
---- a/arch/mips/configs/lemote2f_defconfig |
84 |
-+++ b/arch/mips/configs/lemote2f_defconfig |
85 |
-@@ -39,7 +39,7 @@ CONFIG_HIBERNATION=y |
86 |
- CONFIG_PM_STD_PARTITION="/dev/hda3" |
87 |
- CONFIG_CPU_FREQ=y |
88 |
- CONFIG_CPU_FREQ_DEBUG=y |
89 |
--CONFIG_CPU_FREQ_STAT=m |
90 |
-+CONFIG_CPU_FREQ_STAT=y |
91 |
- CONFIG_CPU_FREQ_STAT_DETAILS=y |
92 |
- CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y |
93 |
- CONFIG_CPU_FREQ_GOV_POWERSAVE=m |
94 |
-diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig |
95 |
-index 58d43f3..078ecac 100644 |
96 |
---- a/arch/mips/configs/malta_defconfig |
97 |
-+++ b/arch/mips/configs/malta_defconfig |
98 |
-@@ -59,8 +59,8 @@ CONFIG_NETFILTER=y |
99 |
- CONFIG_NF_CONNTRACK=m |
100 |
- CONFIG_NF_CONNTRACK_SECMARK=y |
101 |
- CONFIG_NF_CONNTRACK_EVENTS=y |
102 |
--CONFIG_NF_CT_PROTO_DCCP=m |
103 |
--CONFIG_NF_CT_PROTO_UDPLITE=m |
104 |
-+CONFIG_NF_CT_PROTO_DCCP=y |
105 |
-+CONFIG_NF_CT_PROTO_UDPLITE=y |
106 |
- CONFIG_NF_CONNTRACK_AMANDA=m |
107 |
- CONFIG_NF_CONNTRACK_FTP=m |
108 |
- CONFIG_NF_CONNTRACK_H323=m |
109 |
-diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig |
110 |
-index c8f7e28..e233f87 100644 |
111 |
---- a/arch/mips/configs/malta_kvm_defconfig |
112 |
-+++ b/arch/mips/configs/malta_kvm_defconfig |
113 |
-@@ -60,8 +60,8 @@ CONFIG_NETFILTER=y |
114 |
- CONFIG_NF_CONNTRACK=m |
115 |
- CONFIG_NF_CONNTRACK_SECMARK=y |
116 |
- CONFIG_NF_CONNTRACK_EVENTS=y |
117 |
--CONFIG_NF_CT_PROTO_DCCP=m |
118 |
--CONFIG_NF_CT_PROTO_UDPLITE=m |
119 |
-+CONFIG_NF_CT_PROTO_DCCP=y |
120 |
-+CONFIG_NF_CT_PROTO_UDPLITE=y |
121 |
- CONFIG_NF_CONNTRACK_AMANDA=m |
122 |
- CONFIG_NF_CONNTRACK_FTP=m |
123 |
- CONFIG_NF_CONNTRACK_H323=m |
124 |
-diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig |
125 |
-index d2f54e5..fbe085c 100644 |
126 |
---- a/arch/mips/configs/malta_kvm_guest_defconfig |
127 |
-+++ b/arch/mips/configs/malta_kvm_guest_defconfig |
128 |
-@@ -59,8 +59,8 @@ CONFIG_NETFILTER=y |
129 |
- CONFIG_NF_CONNTRACK=m |
130 |
- CONFIG_NF_CONNTRACK_SECMARK=y |
131 |
- CONFIG_NF_CONNTRACK_EVENTS=y |
132 |
--CONFIG_NF_CT_PROTO_DCCP=m |
133 |
--CONFIG_NF_CT_PROTO_UDPLITE=m |
134 |
-+CONFIG_NF_CT_PROTO_DCCP=y |
135 |
-+CONFIG_NF_CT_PROTO_UDPLITE=y |
136 |
- CONFIG_NF_CONNTRACK_AMANDA=m |
137 |
- CONFIG_NF_CONNTRACK_FTP=m |
138 |
- CONFIG_NF_CONNTRACK_H323=m |
139 |
-diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig |
140 |
-index 3d0d9cb..2942610 100644 |
141 |
---- a/arch/mips/configs/maltaup_xpa_defconfig |
142 |
-+++ b/arch/mips/configs/maltaup_xpa_defconfig |
143 |
-@@ -61,8 +61,8 @@ CONFIG_NETFILTER=y |
144 |
- CONFIG_NF_CONNTRACK=m |
145 |
- CONFIG_NF_CONNTRACK_SECMARK=y |
146 |
- CONFIG_NF_CONNTRACK_EVENTS=y |
147 |
--CONFIG_NF_CT_PROTO_DCCP=m |
148 |
--CONFIG_NF_CT_PROTO_UDPLITE=m |
149 |
-+CONFIG_NF_CT_PROTO_DCCP=y |
150 |
-+CONFIG_NF_CT_PROTO_UDPLITE=y |
151 |
- CONFIG_NF_CONNTRACK_AMANDA=m |
152 |
- CONFIG_NF_CONNTRACK_FTP=m |
153 |
- CONFIG_NF_CONNTRACK_H323=m |
154 |
-diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig |
155 |
-index b496c25..07d0182 100644 |
156 |
---- a/arch/mips/configs/nlm_xlp_defconfig |
157 |
-+++ b/arch/mips/configs/nlm_xlp_defconfig |
158 |
-@@ -110,7 +110,7 @@ CONFIG_NETFILTER=y |
159 |
- CONFIG_NF_CONNTRACK=m |
160 |
- CONFIG_NF_CONNTRACK_SECMARK=y |
161 |
- CONFIG_NF_CONNTRACK_EVENTS=y |
162 |
--CONFIG_NF_CT_PROTO_UDPLITE=m |
163 |
-+CONFIG_NF_CT_PROTO_UDPLITE=y |
164 |
- CONFIG_NF_CONNTRACK_AMANDA=m |
165 |
- CONFIG_NF_CONNTRACK_FTP=m |
166 |
- CONFIG_NF_CONNTRACK_H323=m |
167 |
-diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig |
168 |
-index 8e99ad8..f59969a 100644 |
169 |
---- a/arch/mips/configs/nlm_xlr_defconfig |
170 |
-+++ b/arch/mips/configs/nlm_xlr_defconfig |
171 |
-@@ -90,7 +90,7 @@ CONFIG_NETFILTER=y |
172 |
- CONFIG_NF_CONNTRACK=m |
173 |
- CONFIG_NF_CONNTRACK_SECMARK=y |
174 |
- CONFIG_NF_CONNTRACK_EVENTS=y |
175 |
--CONFIG_NF_CT_PROTO_UDPLITE=m |
176 |
-+CONFIG_NF_CT_PROTO_UDPLITE=y |
177 |
- CONFIG_NF_CONNTRACK_AMANDA=m |
178 |
- CONFIG_NF_CONNTRACK_FTP=m |
179 |
- CONFIG_NF_CONNTRACK_H323=m |
180 |
-diff --git a/arch/mips/include/asm/mach-ip27/spaces.h b/arch/mips/include/asm/mach-ip27/spaces.h |
181 |
-index 4775a11..24d5e31 100644 |
182 |
---- a/arch/mips/include/asm/mach-ip27/spaces.h |
183 |
-+++ b/arch/mips/include/asm/mach-ip27/spaces.h |
184 |
-@@ -12,14 +12,16 @@ |
185 |
- |
186 |
- /* |
187 |
- * IP27 uses the R10000's uncached attribute feature. Attribute 3 selects |
188 |
-- * uncached memory addressing. |
189 |
-+ * uncached memory addressing. Hide the definitions on 32-bit compilation |
190 |
-+ * of the compat-vdso code. |
191 |
- */ |
192 |
-- |
193 |
-+#ifdef CONFIG_64BIT |
194 |
- #define HSPEC_BASE 0x9000000000000000 |
195 |
- #define IO_BASE 0x9200000000000000 |
196 |
- #define MSPEC_BASE 0x9400000000000000 |
197 |
- #define UNCAC_BASE 0x9600000000000000 |
198 |
- #define CAC_BASE 0xa800000000000000 |
199 |
-+#endif |
200 |
- |
201 |
- #define TO_MSPEC(x) (MSPEC_BASE | ((x) & TO_PHYS_MASK)) |
202 |
- #define TO_HSPEC(x) (HSPEC_BASE | ((x) & TO_PHYS_MASK)) |
203 |
-diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c |
204 |
-index 5a73c5e..23198c9 100644 |
205 |
---- a/arch/mips/ralink/prom.c |
206 |
-+++ b/arch/mips/ralink/prom.c |
207 |
-@@ -30,8 +30,10 @@ const char *get_system_type(void) |
208 |
- return soc_info.sys_type; |
209 |
- } |
210 |
- |
211 |
--static __init void prom_init_cmdline(int argc, char **argv) |
212 |
-+static __init void prom_init_cmdline(void) |
213 |
- { |
214 |
-+ int argc; |
215 |
-+ char **argv; |
216 |
- int i; |
217 |
- |
218 |
- pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n", |
219 |
-@@ -60,14 +62,11 @@ static __init void prom_init_cmdline(int argc, char **argv) |
220 |
- |
221 |
- void __init prom_init(void) |
222 |
- { |
223 |
-- int argc; |
224 |
-- char **argv; |
225 |
-- |
226 |
- prom_soc_init(&soc_info); |
227 |
- |
228 |
- pr_info("SoC Type: %s\n", get_system_type()); |
229 |
- |
230 |
-- prom_init_cmdline(argc, argv); |
231 |
-+ prom_init_cmdline(); |
232 |
- } |
233 |
- |
234 |
- void __init prom_free_prom_memory(void) |
235 |
-diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c |
236 |
-index 285796e..2b76e36 100644 |
237 |
---- a/arch/mips/ralink/rt288x.c |
238 |
-+++ b/arch/mips/ralink/rt288x.c |
239 |
-@@ -40,16 +40,6 @@ static struct rt2880_pmx_group rt2880_pinmux_data_act[] = { |
240 |
- { 0 } |
241 |
- }; |
242 |
- |
243 |
--static void rt288x_wdt_reset(void) |
244 |
--{ |
245 |
-- u32 t; |
246 |
-- |
247 |
-- /* enable WDT reset output on pin SRAM_CS_N */ |
248 |
-- t = rt_sysc_r32(SYSC_REG_CLKCFG); |
249 |
-- t |= CLKCFG_SRAM_CS_N_WDT; |
250 |
-- rt_sysc_w32(t, SYSC_REG_CLKCFG); |
251 |
--} |
252 |
-- |
253 |
- void __init ralink_clk_init(void) |
254 |
- { |
255 |
- unsigned long cpu_rate, wmac_rate = 40000000; |
256 |
-diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c |
257 |
-index c8a28c4b..e778e0b 100644 |
258 |
---- a/arch/mips/ralink/rt305x.c |
259 |
-+++ b/arch/mips/ralink/rt305x.c |
260 |
-@@ -89,17 +89,6 @@ static struct rt2880_pmx_group rt5350_pinmux_data[] = { |
261 |
- { 0 } |
262 |
- }; |
263 |
- |
264 |
--static void rt305x_wdt_reset(void) |
265 |
--{ |
266 |
-- u32 t; |
267 |
-- |
268 |
-- /* enable WDT reset output on pin SRAM_CS_N */ |
269 |
-- t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); |
270 |
-- t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT << |
271 |
-- RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT; |
272 |
-- rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); |
273 |
--} |
274 |
-- |
275 |
- static unsigned long rt5350_get_mem_size(void) |
276 |
- { |
277 |
- void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); |
278 |
-diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c |
279 |
-index 4cef916..3e0aa09 100644 |
280 |
---- a/arch/mips/ralink/rt3883.c |
281 |
-+++ b/arch/mips/ralink/rt3883.c |
282 |
-@@ -63,16 +63,6 @@ static struct rt2880_pmx_group rt3883_pinmux_data[] = { |
283 |
- { 0 } |
284 |
- }; |
285 |
- |
286 |
--static void rt3883_wdt_reset(void) |
287 |
--{ |
288 |
-- u32 t; |
289 |
-- |
290 |
-- /* enable WDT reset output on GPIO 2 */ |
291 |
-- t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1); |
292 |
-- t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT; |
293 |
-- rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1); |
294 |
--} |
295 |
-- |
296 |
- void __init ralink_clk_init(void) |
297 |
- { |
298 |
- unsigned long cpu_rate, sys_rate; |
299 |
-diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c |
300 |
-index 8077ff3..d4469b2 100644 |
301 |
---- a/arch/mips/ralink/timer.c |
302 |
-+++ b/arch/mips/ralink/timer.c |
303 |
-@@ -71,11 +71,6 @@ static int rt_timer_request(struct rt_timer *rt) |
304 |
- return err; |
305 |
- } |
306 |
- |
307 |
--static void rt_timer_free(struct rt_timer *rt) |
308 |
--{ |
309 |
-- free_irq(rt->irq, rt); |
310 |
--} |
311 |
-- |
312 |
- static int rt_timer_config(struct rt_timer *rt, unsigned long divisor) |
313 |
- { |
314 |
- if (rt->timer_freq < divisor) |
315 |
-@@ -101,15 +96,6 @@ static int rt_timer_enable(struct rt_timer *rt) |
316 |
- return 0; |
317 |
- } |
318 |
- |
319 |
--static void rt_timer_disable(struct rt_timer *rt) |
320 |
--{ |
321 |
-- u32 t; |
322 |
-- |
323 |
-- t = rt_timer_r32(rt, TIMER_REG_TMR0CTL); |
324 |
-- t &= ~TMR0CTL_ENABLE; |
325 |
-- rt_timer_w32(rt, TIMER_REG_TMR0CTL, t); |
326 |
--} |
327 |
-- |
328 |
- static int rt_timer_probe(struct platform_device *pdev) |
329 |
- { |
330 |
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
331 |
-diff --git a/arch/mips/sgi-ip22/Platform b/arch/mips/sgi-ip22/Platform |
332 |
-index b7a4b7e..e8f6b3a 100644 |
333 |
---- a/arch/mips/sgi-ip22/Platform |
334 |
-+++ b/arch/mips/sgi-ip22/Platform |
335 |
-@@ -25,7 +25,7 @@ endif |
336 |
- # Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys |
337 |
- # |
338 |
- ifdef CONFIG_SGI_IP28 |
339 |
-- ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n) |
340 |
-+ ifeq ($(call cc-option-yn,-march=r10000 -mr10k-cache-barrier=store), n) |
341 |
- $(error gcc doesn't support needed option -mr10k-cache-barrier=store) |
342 |
- endif |
343 |
- endif |
344 |
-diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c |
345 |
-index 3362299..6ca3b90 100644 |
346 |
---- a/arch/powerpc/lib/sstep.c |
347 |
-+++ b/arch/powerpc/lib/sstep.c |
348 |
-@@ -1807,8 +1807,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) |
349 |
- goto instr_done; |
350 |
- |
351 |
- case LARX: |
352 |
-- if (regs->msr & MSR_LE) |
353 |
-- return 0; |
354 |
- if (op.ea & (size - 1)) |
355 |
- break; /* can't handle misaligned */ |
356 |
- err = -EFAULT; |
357 |
-@@ -1832,8 +1830,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) |
358 |
- goto ldst_done; |
359 |
- |
360 |
- case STCX: |
361 |
-- if (regs->msr & MSR_LE) |
362 |
-- return 0; |
363 |
- if (op.ea & (size - 1)) |
364 |
- break; /* can't handle misaligned */ |
365 |
- err = -EFAULT; |
366 |
-@@ -1859,8 +1855,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) |
367 |
- goto ldst_done; |
368 |
- |
369 |
- case LOAD: |
370 |
-- if (regs->msr & MSR_LE) |
371 |
-- return 0; |
372 |
- err = read_mem(®s->gpr[op.reg], op.ea, size, regs); |
373 |
- if (!err) { |
374 |
- if (op.type & SIGNEXT) |
375 |
-@@ -1872,8 +1866,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) |
376 |
- |
377 |
- #ifdef CONFIG_PPC_FPU |
378 |
- case LOAD_FP: |
379 |
-- if (regs->msr & MSR_LE) |
380 |
-- return 0; |
381 |
- if (size == 4) |
382 |
- err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); |
383 |
- else |
384 |
-@@ -1882,15 +1874,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) |
385 |
- #endif |
386 |
- #ifdef CONFIG_ALTIVEC |
387 |
- case LOAD_VMX: |
388 |
-- if (regs->msr & MSR_LE) |
389 |
-- return 0; |
390 |
- err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); |
391 |
- goto ldst_done; |
392 |
- #endif |
393 |
- #ifdef CONFIG_VSX |
394 |
- case LOAD_VSX: |
395 |
-- if (regs->msr & MSR_LE) |
396 |
-- return 0; |
397 |
- err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); |
398 |
- goto ldst_done; |
399 |
- #endif |
400 |
-@@ -1913,8 +1901,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) |
401 |
- goto instr_done; |
402 |
- |
403 |
- case STORE: |
404 |
-- if (regs->msr & MSR_LE) |
405 |
-- return 0; |
406 |
- if ((op.type & UPDATE) && size == sizeof(long) && |
407 |
- op.reg == 1 && op.update_reg == 1 && |
408 |
- !(regs->msr & MSR_PR) && |
409 |
-@@ -1927,8 +1913,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) |
410 |
- |
411 |
- #ifdef CONFIG_PPC_FPU |
412 |
- case STORE_FP: |
413 |
-- if (regs->msr & MSR_LE) |
414 |
-- return 0; |
415 |
- if (size == 4) |
416 |
- err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); |
417 |
- else |
418 |
-@@ -1937,15 +1921,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) |
419 |
- #endif |
420 |
- #ifdef CONFIG_ALTIVEC |
421 |
- case STORE_VMX: |
422 |
-- if (regs->msr & MSR_LE) |
423 |
-- return 0; |
424 |
- err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); |
425 |
- goto ldst_done; |
426 |
- #endif |
427 |
- #ifdef CONFIG_VSX |
428 |
- case STORE_VSX: |
429 |
-- if (regs->msr & MSR_LE) |
430 |
-- return 0; |
431 |
- err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); |
432 |
- goto ldst_done; |
433 |
- #endif |
434 |
-diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c |
435 |
-index c96c0cb..32c46b4 100644 |
436 |
---- a/arch/powerpc/sysdev/xics/icp-opal.c |
437 |
-+++ b/arch/powerpc/sysdev/xics/icp-opal.c |
438 |
-@@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void) |
439 |
- |
440 |
- static void icp_opal_set_cpu_priority(unsigned char cppr) |
441 |
- { |
442 |
-+ /* |
443 |
-+ * Here be dragons. The caller has asked to allow only IPI's and not |
444 |
-+ * external interrupts. But OPAL XIVE doesn't support that. So instead |
445 |
-+ * of allowing no interrupts allow all. That's still not right, but |
446 |
-+ * currently the only caller who does this is xics_migrate_irqs_away() |
447 |
-+ * and it works in that case. |
448 |
-+ */ |
449 |
-+ if (cppr >= DEFAULT_PRIORITY) |
450 |
-+ cppr = LOWEST_PRIORITY; |
451 |
-+ |
452 |
- xics_set_base_cppr(cppr); |
453 |
- opal_int_set_cppr(cppr); |
454 |
- iosync(); |
455 |
-diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c |
456 |
-index 69d858e..23efe4e 100644 |
457 |
---- a/arch/powerpc/sysdev/xics/xics-common.c |
458 |
-+++ b/arch/powerpc/sysdev/xics/xics-common.c |
459 |
-@@ -20,6 +20,7 @@ |
460 |
- #include <linux/of.h> |
461 |
- #include <linux/slab.h> |
462 |
- #include <linux/spinlock.h> |
463 |
-+#include <linux/delay.h> |
464 |
- |
465 |
- #include <asm/prom.h> |
466 |
- #include <asm/io.h> |
467 |
-@@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void) |
468 |
- /* Remove ourselves from the global interrupt queue */ |
469 |
- xics_set_cpu_giq(xics_default_distrib_server, 0); |
470 |
- |
471 |
-- /* Allow IPIs again... */ |
472 |
-- icp_ops->set_priority(DEFAULT_PRIORITY); |
473 |
-- |
474 |
- for_each_irq_desc(virq, desc) { |
475 |
- struct irq_chip *chip; |
476 |
- long server; |
477 |
-@@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void) |
478 |
- unlock: |
479 |
- raw_spin_unlock_irqrestore(&desc->lock, flags); |
480 |
- } |
481 |
-+ |
482 |
-+ /* Allow "sufficient" time to drop any inflight IRQ's */ |
483 |
-+ mdelay(5); |
484 |
-+ |
485 |
-+ /* |
486 |
-+ * Allow IPIs again. This is done at the very end, after migrating all |
487 |
-+ * interrupts, the expectation is that we'll only get woken up by an IPI |
488 |
-+ * interrupt beyond this point, but leave externals masked just to be |
489 |
-+ * safe. If we're using icp-opal this may actually allow all |
490 |
-+ * interrupts anyway, but that should be OK. |
491 |
-+ */ |
492 |
-+ icp_ops->set_priority(DEFAULT_PRIORITY); |
493 |
-+ |
494 |
- } |
495 |
- #endif /* CONFIG_HOTPLUG_CPU */ |
496 |
- |
497 |
-diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c |
498 |
-index d56ef26..7678f79 100644 |
499 |
---- a/arch/s390/mm/pgtable.c |
500 |
-+++ b/arch/s390/mm/pgtable.c |
501 |
-@@ -606,12 +606,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
502 |
- bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) |
503 |
- { |
504 |
- spinlock_t *ptl; |
505 |
-+ pgd_t *pgd; |
506 |
-+ pud_t *pud; |
507 |
-+ pmd_t *pmd; |
508 |
- pgste_t pgste; |
509 |
- pte_t *ptep; |
510 |
- pte_t pte; |
511 |
- bool dirty; |
512 |
- |
513 |
-- ptep = get_locked_pte(mm, addr, &ptl); |
514 |
-+ pgd = pgd_offset(mm, addr); |
515 |
-+ pud = pud_alloc(mm, pgd, addr); |
516 |
-+ if (!pud) |
517 |
-+ return false; |
518 |
-+ pmd = pmd_alloc(mm, pud, addr); |
519 |
-+ if (!pmd) |
520 |
-+ return false; |
521 |
-+ /* We can't run guests backed by huge pages, but userspace can |
522 |
-+ * still set them up and then try to migrate them without any |
523 |
-+ * migration support. |
524 |
-+ */ |
525 |
-+ if (pmd_large(*pmd)) |
526 |
-+ return true; |
527 |
-+ |
528 |
-+ ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl); |
529 |
- if (unlikely(!ptep)) |
530 |
- return false; |
531 |
- |
532 |
-diff --git a/crypto/Makefile b/crypto/Makefile |
533 |
-index bd6a029..9e52b3c 100644 |
534 |
---- a/crypto/Makefile |
535 |
-+++ b/crypto/Makefile |
536 |
-@@ -71,6 +71,7 @@ obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o |
537 |
- obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o |
538 |
- obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o |
539 |
- obj-$(CONFIG_CRYPTO_WP512) += wp512.o |
540 |
-+CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 |
541 |
- obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o |
542 |
- obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o |
543 |
- obj-$(CONFIG_CRYPTO_ECB) += ecb.o |
544 |
-@@ -94,6 +95,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o |
545 |
- obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o |
546 |
- obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o |
547 |
- obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o |
548 |
-+CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 |
549 |
- obj-$(CONFIG_CRYPTO_AES) += aes_generic.o |
550 |
- obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o |
551 |
- obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o |
552 |
-diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c |
553 |
-index 7c75a8d..6bdf39e 100644 |
554 |
---- a/drivers/firmware/efi/arm-runtime.c |
555 |
-+++ b/drivers/firmware/efi/arm-runtime.c |
556 |
-@@ -65,6 +65,7 @@ static bool __init efi_virtmap_init(void) |
557 |
- bool systab_found; |
558 |
- |
559 |
- efi_mm.pgd = pgd_alloc(&efi_mm); |
560 |
-+ mm_init_cpumask(&efi_mm); |
561 |
- init_new_context(NULL, &efi_mm); |
562 |
- |
563 |
- systab_found = false; |
564 |
-diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c |
565 |
-index 83768e8..2178266 100644 |
566 |
---- a/drivers/i2c/i2c-mux.c |
567 |
-+++ b/drivers/i2c/i2c-mux.c |
568 |
-@@ -429,6 +429,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc) |
569 |
- while (muxc->num_adapters) { |
570 |
- struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters]; |
571 |
- struct i2c_mux_priv *priv = adap->algo_data; |
572 |
-+ struct device_node *np = adap->dev.of_node; |
573 |
- |
574 |
- muxc->adapter[muxc->num_adapters] = NULL; |
575 |
- |
576 |
-@@ -438,6 +439,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc) |
577 |
- |
578 |
- sysfs_remove_link(&priv->adap.dev.kobj, "mux_device"); |
579 |
- i2c_del_adapter(adap); |
580 |
-+ of_node_put(np); |
581 |
- kfree(priv); |
582 |
- } |
583 |
- } |
584 |
-diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c |
585 |
-index 4cab29e..11bfa27 100644 |
586 |
---- a/drivers/infiniband/hw/mlx5/main.c |
587 |
-+++ b/drivers/infiniband/hw/mlx5/main.c |
588 |
-@@ -3141,9 +3141,11 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) |
589 |
- if (err) |
590 |
- goto err_rsrc; |
591 |
- |
592 |
-- err = mlx5_ib_alloc_q_counters(dev); |
593 |
-- if (err) |
594 |
-- goto err_odp; |
595 |
-+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) { |
596 |
-+ err = mlx5_ib_alloc_q_counters(dev); |
597 |
-+ if (err) |
598 |
-+ goto err_odp; |
599 |
-+ } |
600 |
- |
601 |
- err = ib_register_device(&dev->ib_dev, NULL); |
602 |
- if (err) |
603 |
-@@ -3171,7 +3173,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) |
604 |
- ib_unregister_device(&dev->ib_dev); |
605 |
- |
606 |
- err_q_cnt: |
607 |
-- mlx5_ib_dealloc_q_counters(dev); |
608 |
-+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) |
609 |
-+ mlx5_ib_dealloc_q_counters(dev); |
610 |
- |
611 |
- err_odp: |
612 |
- mlx5_ib_odp_remove_one(dev); |
613 |
-@@ -3201,7 +3204,8 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) |
614 |
- |
615 |
- mlx5_remove_roce_notifier(dev); |
616 |
- ib_unregister_device(&dev->ib_dev); |
617 |
-- mlx5_ib_dealloc_q_counters(dev); |
618 |
-+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) |
619 |
-+ mlx5_ib_dealloc_q_counters(dev); |
620 |
- destroy_umrc_res(dev); |
621 |
- mlx5_ib_odp_remove_one(dev); |
622 |
- destroy_dev_resources(&dev->devr); |
623 |
-diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
624 |
-index ef7bf1d..628ba00 100644 |
625 |
---- a/drivers/md/dm.c |
626 |
-+++ b/drivers/md/dm.c |
627 |
-@@ -972,10 +972,61 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) |
628 |
- } |
629 |
- EXPORT_SYMBOL_GPL(dm_accept_partial_bio); |
630 |
- |
631 |
-+/* |
632 |
-+ * Flush current->bio_list when the target map method blocks. |
633 |
-+ * This fixes deadlocks in snapshot and possibly in other targets. |
634 |
-+ */ |
635 |
-+struct dm_offload { |
636 |
-+ struct blk_plug plug; |
637 |
-+ struct blk_plug_cb cb; |
638 |
-+}; |
639 |
-+ |
640 |
-+static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) |
641 |
-+{ |
642 |
-+ struct dm_offload *o = container_of(cb, struct dm_offload, cb); |
643 |
-+ struct bio_list list; |
644 |
-+ struct bio *bio; |
645 |
-+ |
646 |
-+ INIT_LIST_HEAD(&o->cb.list); |
647 |
-+ |
648 |
-+ if (unlikely(!current->bio_list)) |
649 |
-+ return; |
650 |
-+ |
651 |
-+ list = *current->bio_list; |
652 |
-+ bio_list_init(current->bio_list); |
653 |
-+ |
654 |
-+ while ((bio = bio_list_pop(&list))) { |
655 |
-+ struct bio_set *bs = bio->bi_pool; |
656 |
-+ if (unlikely(!bs) || bs == fs_bio_set) { |
657 |
-+ bio_list_add(current->bio_list, bio); |
658 |
-+ continue; |
659 |
-+ } |
660 |
-+ |
661 |
-+ spin_lock(&bs->rescue_lock); |
662 |
-+ bio_list_add(&bs->rescue_list, bio); |
663 |
-+ queue_work(bs->rescue_workqueue, &bs->rescue_work); |
664 |
-+ spin_unlock(&bs->rescue_lock); |
665 |
-+ } |
666 |
-+} |
667 |
-+ |
668 |
-+static void dm_offload_start(struct dm_offload *o) |
669 |
-+{ |
670 |
-+ blk_start_plug(&o->plug); |
671 |
-+ o->cb.callback = flush_current_bio_list; |
672 |
-+ list_add(&o->cb.list, ¤t->plug->cb_list); |
673 |
-+} |
674 |
-+ |
675 |
-+static void dm_offload_end(struct dm_offload *o) |
676 |
-+{ |
677 |
-+ list_del(&o->cb.list); |
678 |
-+ blk_finish_plug(&o->plug); |
679 |
-+} |
680 |
-+ |
681 |
- static void __map_bio(struct dm_target_io *tio) |
682 |
- { |
683 |
- int r; |
684 |
- sector_t sector; |
685 |
-+ struct dm_offload o; |
686 |
- struct bio *clone = &tio->clone; |
687 |
- struct dm_target *ti = tio->ti; |
688 |
- |
689 |
-@@ -988,7 +1039,11 @@ static void __map_bio(struct dm_target_io *tio) |
690 |
- */ |
691 |
- atomic_inc(&tio->io->io_count); |
692 |
- sector = clone->bi_iter.bi_sector; |
693 |
-+ |
694 |
-+ dm_offload_start(&o); |
695 |
- r = ti->type->map(ti, clone); |
696 |
-+ dm_offload_end(&o); |
697 |
-+ |
698 |
- if (r == DM_MAPIO_REMAPPED) { |
699 |
- /* the bio has been remapped so dispatch it */ |
700 |
- |
701 |
-diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c |
702 |
-index d9c1f2f..aba7735 100644 |
703 |
---- a/drivers/media/rc/rc-main.c |
704 |
-+++ b/drivers/media/rc/rc-main.c |
705 |
-@@ -1411,6 +1411,7 @@ int rc_register_device(struct rc_dev *dev) |
706 |
- int attr = 0; |
707 |
- int minor; |
708 |
- int rc; |
709 |
-+ u64 rc_type; |
710 |
- |
711 |
- if (!dev || !dev->map_name) |
712 |
- return -EINVAL; |
713 |
-@@ -1496,14 +1497,18 @@ int rc_register_device(struct rc_dev *dev) |
714 |
- goto out_input; |
715 |
- } |
716 |
- |
717 |
-+ rc_type = BIT_ULL(rc_map->rc_type); |
718 |
-+ |
719 |
- if (dev->change_protocol) { |
720 |
-- u64 rc_type = (1ll << rc_map->rc_type); |
721 |
- rc = dev->change_protocol(dev, &rc_type); |
722 |
- if (rc < 0) |
723 |
- goto out_raw; |
724 |
- dev->enabled_protocols = rc_type; |
725 |
- } |
726 |
- |
727 |
-+ if (dev->driver_type == RC_DRIVER_IR_RAW) |
728 |
-+ ir_raw_load_modules(&rc_type); |
729 |
-+ |
730 |
- /* Allow the RC sysfs nodes to be accessible */ |
731 |
- atomic_set(&dev->initialized, 1); |
732 |
- |
733 |
-diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c |
734 |
-index 2c720cb..c3e6734 100644 |
735 |
---- a/drivers/media/usb/dvb-usb/dw2102.c |
736 |
-+++ b/drivers/media/usb/dvb-usb/dw2102.c |
737 |
-@@ -68,6 +68,7 @@ |
738 |
- struct dw2102_state { |
739 |
- u8 initialized; |
740 |
- u8 last_lock; |
741 |
-+ u8 data[MAX_XFER_SIZE + 4]; |
742 |
- struct i2c_client *i2c_client_demod; |
743 |
- struct i2c_client *i2c_client_tuner; |
744 |
- |
745 |
-@@ -662,62 +663,72 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], |
746 |
- int num) |
747 |
- { |
748 |
- struct dvb_usb_device *d = i2c_get_adapdata(adap); |
749 |
-- u8 obuf[0x40], ibuf[0x40]; |
750 |
-+ struct dw2102_state *state; |
751 |
- |
752 |
- if (!d) |
753 |
- return -ENODEV; |
754 |
-+ |
755 |
-+ state = d->priv; |
756 |
-+ |
757 |
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0) |
758 |
- return -EAGAIN; |
759 |
-+ if (mutex_lock_interruptible(&d->data_mutex) < 0) { |
760 |
-+ mutex_unlock(&d->i2c_mutex); |
761 |
-+ return -EAGAIN; |
762 |
-+ } |
763 |
- |
764 |
- switch (num) { |
765 |
- case 1: |
766 |
- switch (msg[0].addr) { |
767 |
- case SU3000_STREAM_CTRL: |
768 |
-- obuf[0] = msg[0].buf[0] + 0x36; |
769 |
-- obuf[1] = 3; |
770 |
-- obuf[2] = 0; |
771 |
-- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0) |
772 |
-+ state->data[0] = msg[0].buf[0] + 0x36; |
773 |
-+ state->data[1] = 3; |
774 |
-+ state->data[2] = 0; |
775 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, |
776 |
-+ state->data, 0, 0) < 0) |
777 |
- err("i2c transfer failed."); |
778 |
- break; |
779 |
- case DW2102_RC_QUERY: |
780 |
-- obuf[0] = 0x10; |
781 |
-- if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0) |
782 |
-+ state->data[0] = 0x10; |
783 |
-+ if (dvb_usb_generic_rw(d, state->data, 1, |
784 |
-+ state->data, 2, 0) < 0) |
785 |
- err("i2c transfer failed."); |
786 |
-- msg[0].buf[1] = ibuf[0]; |
787 |
-- msg[0].buf[0] = ibuf[1]; |
788 |
-+ msg[0].buf[1] = state->data[0]; |
789 |
-+ msg[0].buf[0] = state->data[1]; |
790 |
- break; |
791 |
- default: |
792 |
- /* always i2c write*/ |
793 |
-- obuf[0] = 0x08; |
794 |
-- obuf[1] = msg[0].addr; |
795 |
-- obuf[2] = msg[0].len; |
796 |
-+ state->data[0] = 0x08; |
797 |
-+ state->data[1] = msg[0].addr; |
798 |
-+ state->data[2] = msg[0].len; |
799 |
- |
800 |
-- memcpy(&obuf[3], msg[0].buf, msg[0].len); |
801 |
-+ memcpy(&state->data[3], msg[0].buf, msg[0].len); |
802 |
- |
803 |
-- if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3, |
804 |
-- ibuf, 1, 0) < 0) |
805 |
-+ if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3, |
806 |
-+ state->data, 1, 0) < 0) |
807 |
- err("i2c transfer failed."); |
808 |
- |
809 |
- } |
810 |
- break; |
811 |
- case 2: |
812 |
- /* always i2c read */ |
813 |
-- obuf[0] = 0x09; |
814 |
-- obuf[1] = msg[0].len; |
815 |
-- obuf[2] = msg[1].len; |
816 |
-- obuf[3] = msg[0].addr; |
817 |
-- memcpy(&obuf[4], msg[0].buf, msg[0].len); |
818 |
-- |
819 |
-- if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4, |
820 |
-- ibuf, msg[1].len + 1, 0) < 0) |
821 |
-+ state->data[0] = 0x09; |
822 |
-+ state->data[1] = msg[0].len; |
823 |
-+ state->data[2] = msg[1].len; |
824 |
-+ state->data[3] = msg[0].addr; |
825 |
-+ memcpy(&state->data[4], msg[0].buf, msg[0].len); |
826 |
-+ |
827 |
-+ if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4, |
828 |
-+ state->data, msg[1].len + 1, 0) < 0) |
829 |
- err("i2c transfer failed."); |
830 |
- |
831 |
-- memcpy(msg[1].buf, &ibuf[1], msg[1].len); |
832 |
-+ memcpy(msg[1].buf, &state->data[1], msg[1].len); |
833 |
- break; |
834 |
- default: |
835 |
- warn("more than 2 i2c messages at a time is not handled yet."); |
836 |
- break; |
837 |
- } |
838 |
-+ mutex_unlock(&d->data_mutex); |
839 |
- mutex_unlock(&d->i2c_mutex); |
840 |
- return num; |
841 |
- } |
842 |
-@@ -845,17 +856,23 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) |
843 |
- static int su3000_power_ctrl(struct dvb_usb_device *d, int i) |
844 |
- { |
845 |
- struct dw2102_state *state = (struct dw2102_state *)d->priv; |
846 |
-- u8 obuf[] = {0xde, 0}; |
847 |
-+ int ret = 0; |
848 |
- |
849 |
- info("%s: %d, initialized %d", __func__, i, state->initialized); |
850 |
- |
851 |
- if (i && !state->initialized) { |
852 |
-+ mutex_lock(&d->data_mutex); |
853 |
-+ |
854 |
-+ state->data[0] = 0xde; |
855 |
-+ state->data[1] = 0; |
856 |
-+ |
857 |
- state->initialized = 1; |
858 |
- /* reset board */ |
859 |
-- return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); |
860 |
-+ ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0); |
861 |
-+ mutex_unlock(&d->data_mutex); |
862 |
- } |
863 |
- |
864 |
-- return 0; |
865 |
-+ return ret; |
866 |
- } |
867 |
- |
868 |
- static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) |
869 |
-@@ -1310,49 +1327,57 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d) |
870 |
- return 0; |
871 |
- } |
872 |
- |
873 |
--static int su3000_frontend_attach(struct dvb_usb_adapter *d) |
874 |
-+static int su3000_frontend_attach(struct dvb_usb_adapter *adap) |
875 |
- { |
876 |
-- u8 obuf[3] = { 0xe, 0x80, 0 }; |
877 |
-- u8 ibuf[] = { 0 }; |
878 |
-+ struct dvb_usb_device *d = adap->dev; |
879 |
-+ struct dw2102_state *state = d->priv; |
880 |
-+ |
881 |
-+ mutex_lock(&d->data_mutex); |
882 |
-+ |
883 |
-+ state->data[0] = 0xe; |
884 |
-+ state->data[1] = 0x80; |
885 |
-+ state->data[2] = 0; |
886 |
- |
887 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) |
888 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
889 |
- err("command 0x0e transfer failed."); |
890 |
- |
891 |
-- obuf[0] = 0xe; |
892 |
-- obuf[1] = 0x02; |
893 |
-- obuf[2] = 1; |
894 |
-+ state->data[0] = 0xe; |
895 |
-+ state->data[1] = 0x02; |
896 |
-+ state->data[2] = 1; |
897 |
- |
898 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) |
899 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
900 |
- err("command 0x0e transfer failed."); |
901 |
- msleep(300); |
902 |
- |
903 |
-- obuf[0] = 0xe; |
904 |
-- obuf[1] = 0x83; |
905 |
-- obuf[2] = 0; |
906 |
-+ state->data[0] = 0xe; |
907 |
-+ state->data[1] = 0x83; |
908 |
-+ state->data[2] = 0; |
909 |
- |
910 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) |
911 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
912 |
- err("command 0x0e transfer failed."); |
913 |
- |
914 |
-- obuf[0] = 0xe; |
915 |
-- obuf[1] = 0x83; |
916 |
-- obuf[2] = 1; |
917 |
-+ state->data[0] = 0xe; |
918 |
-+ state->data[1] = 0x83; |
919 |
-+ state->data[2] = 1; |
920 |
- |
921 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) |
922 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
923 |
- err("command 0x0e transfer failed."); |
924 |
- |
925 |
-- obuf[0] = 0x51; |
926 |
-+ state->data[0] = 0x51; |
927 |
- |
928 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) |
929 |
-+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) |
930 |
- err("command 0x51 transfer failed."); |
931 |
- |
932 |
-- d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, |
933 |
-- &d->dev->i2c_adap); |
934 |
-- if (d->fe_adap[0].fe == NULL) |
935 |
-+ mutex_unlock(&d->data_mutex); |
936 |
-+ |
937 |
-+ adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, |
938 |
-+ &d->i2c_adap); |
939 |
-+ if (adap->fe_adap[0].fe == NULL) |
940 |
- return -EIO; |
941 |
- |
942 |
-- if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, |
943 |
-+ if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, |
944 |
- &dw2104_ts2020_config, |
945 |
-- &d->dev->i2c_adap)) { |
946 |
-+ &d->i2c_adap)) { |
947 |
- info("Attached DS3000/TS2020!"); |
948 |
- return 0; |
949 |
- } |
950 |
-@@ -1361,47 +1386,55 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d) |
951 |
- return -EIO; |
952 |
- } |
953 |
- |
954 |
--static int t220_frontend_attach(struct dvb_usb_adapter *d) |
955 |
-+static int t220_frontend_attach(struct dvb_usb_adapter *adap) |
956 |
- { |
957 |
-- u8 obuf[3] = { 0xe, 0x87, 0 }; |
958 |
-- u8 ibuf[] = { 0 }; |
959 |
-+ struct dvb_usb_device *d = adap->dev; |
960 |
-+ struct dw2102_state *state = d->priv; |
961 |
-+ |
962 |
-+ mutex_lock(&d->data_mutex); |
963 |
- |
964 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) |
965 |
-+ state->data[0] = 0xe; |
966 |
-+ state->data[1] = 0x87; |
967 |
-+ state->data[2] = 0x0; |
968 |
-+ |
969 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
970 |
- err("command 0x0e transfer failed."); |
971 |
- |
972 |
-- obuf[0] = 0xe; |
973 |
-- obuf[1] = 0x86; |
974 |
-- obuf[2] = 1; |
975 |
-+ state->data[0] = 0xe; |
976 |
-+ state->data[1] = 0x86; |
977 |
-+ state->data[2] = 1; |
978 |
- |
979 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) |
980 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
981 |
- err("command 0x0e transfer failed."); |
982 |
- |
983 |
-- obuf[0] = 0xe; |
984 |
-- obuf[1] = 0x80; |
985 |
-- obuf[2] = 0; |
986 |
-+ state->data[0] = 0xe; |
987 |
-+ state->data[1] = 0x80; |
988 |
-+ state->data[2] = 0; |
989 |
- |
990 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) |
991 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
992 |
- err("command 0x0e transfer failed."); |
993 |
- |
994 |
- msleep(50); |
995 |
- |
996 |
-- obuf[0] = 0xe; |
997 |
-- obuf[1] = 0x80; |
998 |
-- obuf[2] = 1; |
999 |
-+ state->data[0] = 0xe; |
1000 |
-+ state->data[1] = 0x80; |
1001 |
-+ state->data[2] = 1; |
1002 |
- |
1003 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) |
1004 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1005 |
- err("command 0x0e transfer failed."); |
1006 |
- |
1007 |
-- obuf[0] = 0x51; |
1008 |
-+ state->data[0] = 0x51; |
1009 |
- |
1010 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) |
1011 |
-+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) |
1012 |
- err("command 0x51 transfer failed."); |
1013 |
- |
1014 |
-- d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, |
1015 |
-- &d->dev->i2c_adap, NULL); |
1016 |
-- if (d->fe_adap[0].fe != NULL) { |
1017 |
-- if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60, |
1018 |
-- &d->dev->i2c_adap, &tda18271_config)) { |
1019 |
-+ mutex_unlock(&d->data_mutex); |
1020 |
-+ |
1021 |
-+ adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, |
1022 |
-+ &d->i2c_adap, NULL); |
1023 |
-+ if (adap->fe_adap[0].fe != NULL) { |
1024 |
-+ if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60, |
1025 |
-+ &d->i2c_adap, &tda18271_config)) { |
1026 |
- info("Attached TDA18271HD/CXD2820R!"); |
1027 |
- return 0; |
1028 |
- } |
1029 |
-@@ -1411,23 +1444,30 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d) |
1030 |
- return -EIO; |
1031 |
- } |
1032 |
- |
1033 |
--static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d) |
1034 |
-+static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap) |
1035 |
- { |
1036 |
-- u8 obuf[] = { 0x51 }; |
1037 |
-- u8 ibuf[] = { 0 }; |
1038 |
-+ struct dvb_usb_device *d = adap->dev; |
1039 |
-+ struct dw2102_state *state = d->priv; |
1040 |
-+ |
1041 |
-+ mutex_lock(&d->data_mutex); |
1042 |
- |
1043 |
-- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) |
1044 |
-+ state->data[0] = 0x51; |
1045 |
-+ |
1046 |
-+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) |
1047 |
- err("command 0x51 transfer failed."); |
1048 |
- |
1049 |
-- d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config, |
1050 |
-- &d->dev->i2c_adap); |
1051 |
-+ mutex_unlock(&d->data_mutex); |
1052 |
- |
1053 |
-- if (d->fe_adap[0].fe == NULL) |
1054 |
-+ adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach, |
1055 |
-+ &s421_m88rs2000_config, |
1056 |
-+ &d->i2c_adap); |
1057 |
-+ |
1058 |
-+ if (adap->fe_adap[0].fe == NULL) |
1059 |
- return -EIO; |
1060 |
- |
1061 |
-- if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, |
1062 |
-+ if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, |
1063 |
- &dw2104_ts2020_config, |
1064 |
-- &d->dev->i2c_adap)) { |
1065 |
-+ &d->i2c_adap)) { |
1066 |
- info("Attached RS2000/TS2020!"); |
1067 |
- return 0; |
1068 |
- } |
1069 |
-@@ -1440,44 +1480,50 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) |
1070 |
- { |
1071 |
- struct dvb_usb_device *d = adap->dev; |
1072 |
- struct dw2102_state *state = d->priv; |
1073 |
-- u8 obuf[3] = { 0xe, 0x80, 0 }; |
1074 |
-- u8 ibuf[] = { 0 }; |
1075 |
- struct i2c_adapter *i2c_adapter; |
1076 |
- struct i2c_client *client; |
1077 |
- struct i2c_board_info board_info; |
1078 |
- struct m88ds3103_platform_data m88ds3103_pdata = {}; |
1079 |
- struct ts2020_config ts2020_config = {}; |
1080 |
- |
1081 |
-- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) |
1082 |
-+ mutex_lock(&d->data_mutex); |
1083 |
-+ |
1084 |
-+ state->data[0] = 0xe; |
1085 |
-+ state->data[1] = 0x80; |
1086 |
-+ state->data[2] = 0x0; |
1087 |
-+ |
1088 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1089 |
- err("command 0x0e transfer failed."); |
1090 |
- |
1091 |
-- obuf[0] = 0xe; |
1092 |
-- obuf[1] = 0x02; |
1093 |
-- obuf[2] = 1; |
1094 |
-+ state->data[0] = 0xe; |
1095 |
-+ state->data[1] = 0x02; |
1096 |
-+ state->data[2] = 1; |
1097 |
- |
1098 |
-- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) |
1099 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1100 |
- err("command 0x0e transfer failed."); |
1101 |
- msleep(300); |
1102 |
- |
1103 |
-- obuf[0] = 0xe; |
1104 |
-- obuf[1] = 0x83; |
1105 |
-- obuf[2] = 0; |
1106 |
-+ state->data[0] = 0xe; |
1107 |
-+ state->data[1] = 0x83; |
1108 |
-+ state->data[2] = 0; |
1109 |
- |
1110 |
-- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) |
1111 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1112 |
- err("command 0x0e transfer failed."); |
1113 |
- |
1114 |
-- obuf[0] = 0xe; |
1115 |
-- obuf[1] = 0x83; |
1116 |
-- obuf[2] = 1; |
1117 |
-+ state->data[0] = 0xe; |
1118 |
-+ state->data[1] = 0x83; |
1119 |
-+ state->data[2] = 1; |
1120 |
- |
1121 |
-- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) |
1122 |
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1123 |
- err("command 0x0e transfer failed."); |
1124 |
- |
1125 |
-- obuf[0] = 0x51; |
1126 |
-+ state->data[0] = 0x51; |
1127 |
- |
1128 |
-- if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0) |
1129 |
-+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) |
1130 |
- err("command 0x51 transfer failed."); |
1131 |
- |
1132 |
-+ mutex_unlock(&d->data_mutex); |
1133 |
-+ |
1134 |
- /* attach demod */ |
1135 |
- m88ds3103_pdata.clk = 27000000; |
1136 |
- m88ds3103_pdata.i2c_wr_max = 33; |
1137 |
-diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c |
1138 |
-index f9fa3fa..2051f28 100644 |
1139 |
---- a/drivers/mtd/maps/pmcmsp-flash.c |
1140 |
-+++ b/drivers/mtd/maps/pmcmsp-flash.c |
1141 |
-@@ -139,15 +139,13 @@ static int __init init_msp_flash(void) |
1142 |
- } |
1143 |
- |
1144 |
- msp_maps[i].bankwidth = 1; |
1145 |
-- msp_maps[i].name = kmalloc(7, GFP_KERNEL); |
1146 |
-+ msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL); |
1147 |
- if (!msp_maps[i].name) { |
1148 |
- iounmap(msp_maps[i].virt); |
1149 |
- kfree(msp_parts[i]); |
1150 |
- goto cleanup_loop; |
1151 |
- } |
1152 |
- |
1153 |
-- msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7); |
1154 |
-- |
1155 |
- for (j = 0; j < pcnt; j++) { |
1156 |
- part_name[5] = '0' + i; |
1157 |
- part_name[7] = '0' + j; |
1158 |
-diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c |
1159 |
-index 5370909..08d91ef 100644 |
1160 |
---- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c |
1161 |
-+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c |
1162 |
-@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev) |
1163 |
- priv->old_link = 0; |
1164 |
- priv->old_duplex = -1; |
1165 |
- priv->old_pause = -1; |
1166 |
-+ } else { |
1167 |
-+ phydev = NULL; |
1168 |
- } |
1169 |
- |
1170 |
- /* mask all interrupts and request them */ |
1171 |
-@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev) |
1172 |
- enet_dmac_writel(priv, priv->dma_chan_int_mask, |
1173 |
- ENETDMAC_IRMASK, priv->tx_chan); |
1174 |
- |
1175 |
-- if (priv->has_phy) |
1176 |
-+ if (phydev) |
1177 |
- phy_start(phydev); |
1178 |
- else |
1179 |
- bcm_enet_adjust_link(dev); |
1180 |
-@@ -1126,7 +1128,7 @@ static int bcm_enet_open(struct net_device *dev) |
1181 |
- free_irq(dev->irq, dev); |
1182 |
- |
1183 |
- out_phy_disconnect: |
1184 |
-- if (priv->has_phy) |
1185 |
-+ if (phydev) |
1186 |
- phy_disconnect(phydev); |
1187 |
- |
1188 |
- return ret; |
1189 |
-diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c |
1190 |
-index 28097be..5127b7e 100644 |
1191 |
---- a/drivers/net/ethernet/ti/cpmac.c |
1192 |
-+++ b/drivers/net/ethernet/ti/cpmac.c |
1193 |
-@@ -1211,7 +1211,7 @@ int cpmac_init(void) |
1194 |
- goto fail_alloc; |
1195 |
- } |
1196 |
- |
1197 |
--#warning FIXME: unhardcode gpio&reset bits |
1198 |
-+ /* FIXME: unhardcode gpio&reset bits */ |
1199 |
- ar7_gpio_disable(26); |
1200 |
- ar7_gpio_disable(27); |
1201 |
- ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); |
1202 |
-diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
1203 |
-index 3a035e07..087a218 100644 |
1204 |
---- a/drivers/pci/quirks.c |
1205 |
-+++ b/drivers/pci/quirks.c |
1206 |
-@@ -2173,6 +2173,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd); |
1207 |
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); |
1208 |
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, |
1209 |
- quirk_blacklist_vpd); |
1210 |
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); |
1211 |
- |
1212 |
- /* |
1213 |
- * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the |
1214 |
-diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c |
1215 |
-index f44615f..3e2ef4f 100644 |
1216 |
---- a/drivers/tty/serial/samsung.c |
1217 |
-+++ b/drivers/tty/serial/samsung.c |
1218 |
-@@ -1036,8 +1036,10 @@ static int s3c64xx_serial_startup(struct uart_port *port) |
1219 |
- if (ourport->dma) { |
1220 |
- ret = s3c24xx_serial_request_dma(ourport); |
1221 |
- if (ret < 0) { |
1222 |
-- dev_warn(port->dev, "DMA request failed\n"); |
1223 |
-- return ret; |
1224 |
-+ dev_warn(port->dev, |
1225 |
-+ "DMA request failed, DMA will not be used\n"); |
1226 |
-+ devm_kfree(port->dev, ourport->dma); |
1227 |
-+ ourport->dma = NULL; |
1228 |
- } |
1229 |
- } |
1230 |
- |
1231 |
-diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c |
1232 |
-index 29e80cc..5dd1832 100644 |
1233 |
---- a/drivers/usb/dwc3/dwc3-omap.c |
1234 |
-+++ b/drivers/usb/dwc3/dwc3-omap.c |
1235 |
-@@ -249,6 +249,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, |
1236 |
- val = dwc3_omap_read_utmi_ctrl(omap); |
1237 |
- val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG; |
1238 |
- dwc3_omap_write_utmi_ctrl(omap, val); |
1239 |
-+ break; |
1240 |
- |
1241 |
- case OMAP_DWC3_VBUS_OFF: |
1242 |
- val = dwc3_omap_read_utmi_ctrl(omap); |
1243 |
-diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h |
1244 |
-index e4a1d97..39459b7 100644 |
1245 |
---- a/drivers/usb/dwc3/gadget.h |
1246 |
-+++ b/drivers/usb/dwc3/gadget.h |
1247 |
-@@ -28,23 +28,23 @@ struct dwc3; |
1248 |
- #define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) |
1249 |
- |
1250 |
- /* DEPCFG parameter 1 */ |
1251 |
--#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0) |
1252 |
-+#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0) |
1253 |
- #define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8) |
1254 |
- #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9) |
1255 |
- #define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10) |
1256 |
- #define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11) |
1257 |
- #define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13) |
1258 |
--#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16) |
1259 |
-+#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) |
1260 |
- #define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24) |
1261 |
--#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25) |
1262 |
-+#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) |
1263 |
- #define DWC3_DEPCFG_BULK_BASED (1 << 30) |
1264 |
- #define DWC3_DEPCFG_FIFO_BASED (1 << 31) |
1265 |
- |
1266 |
- /* DEPCFG parameter 0 */ |
1267 |
--#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1) |
1268 |
--#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3) |
1269 |
--#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17) |
1270 |
--#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22) |
1271 |
-+#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1) |
1272 |
-+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3) |
1273 |
-+#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17) |
1274 |
-+#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22) |
1275 |
- #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) |
1276 |
- /* This applies for core versions earlier than 1.94a */ |
1277 |
- #define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31) |
1278 |
-diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c |
1279 |
-index 8d412d8..89081b8 100644 |
1280 |
---- a/drivers/usb/gadget/function/f_fs.c |
1281 |
-+++ b/drivers/usb/gadget/function/f_fs.c |
1282 |
-@@ -1833,11 +1833,14 @@ static int ffs_func_eps_enable(struct ffs_function *func) |
1283 |
- spin_lock_irqsave(&func->ffs->eps_lock, flags); |
1284 |
- do { |
1285 |
- struct usb_endpoint_descriptor *ds; |
1286 |
-+ struct usb_ss_ep_comp_descriptor *comp_desc = NULL; |
1287 |
-+ int needs_comp_desc = false; |
1288 |
- int desc_idx; |
1289 |
- |
1290 |
-- if (ffs->gadget->speed == USB_SPEED_SUPER) |
1291 |
-+ if (ffs->gadget->speed == USB_SPEED_SUPER) { |
1292 |
- desc_idx = 2; |
1293 |
-- else if (ffs->gadget->speed == USB_SPEED_HIGH) |
1294 |
-+ needs_comp_desc = true; |
1295 |
-+ } else if (ffs->gadget->speed == USB_SPEED_HIGH) |
1296 |
- desc_idx = 1; |
1297 |
- else |
1298 |
- desc_idx = 0; |
1299 |
-@@ -1854,6 +1857,14 @@ static int ffs_func_eps_enable(struct ffs_function *func) |
1300 |
- |
1301 |
- ep->ep->driver_data = ep; |
1302 |
- ep->ep->desc = ds; |
1303 |
-+ |
1304 |
-+ comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + |
1305 |
-+ USB_DT_ENDPOINT_SIZE); |
1306 |
-+ ep->ep->maxburst = comp_desc->bMaxBurst + 1; |
1307 |
-+ |
1308 |
-+ if (needs_comp_desc) |
1309 |
-+ ep->ep->comp_desc = comp_desc; |
1310 |
-+ |
1311 |
- ret = usb_ep_enable(ep->ep); |
1312 |
- if (likely(!ret)) { |
1313 |
- epfile->ep = ep; |
1314 |
-diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c |
1315 |
-index 27ed51b..29b41b5 100644 |
1316 |
---- a/drivers/usb/gadget/function/f_uvc.c |
1317 |
-+++ b/drivers/usb/gadget/function/f_uvc.c |
1318 |
-@@ -258,13 +258,6 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) |
1319 |
- memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req)); |
1320 |
- v4l2_event_queue(&uvc->vdev, &v4l2_event); |
1321 |
- |
1322 |
-- /* Pass additional setup data to userspace */ |
1323 |
-- if (uvc->event_setup_out && uvc->event_length) { |
1324 |
-- uvc->control_req->length = uvc->event_length; |
1325 |
-- return usb_ep_queue(uvc->func.config->cdev->gadget->ep0, |
1326 |
-- uvc->control_req, GFP_ATOMIC); |
1327 |
-- } |
1328 |
-- |
1329 |
- return 0; |
1330 |
- } |
1331 |
- |
1332 |
-diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c |
1333 |
-index a81d9ab..4fa5de2 100644 |
1334 |
---- a/drivers/usb/gadget/udc/dummy_hcd.c |
1335 |
-+++ b/drivers/usb/gadget/udc/dummy_hcd.c |
1336 |
-@@ -1031,6 +1031,8 @@ static int dummy_udc_probe(struct platform_device *pdev) |
1337 |
- int rc; |
1338 |
- |
1339 |
- dum = *((void **)dev_get_platdata(&pdev->dev)); |
1340 |
-+ /* Clear usb_gadget region for new registration to udc-core */ |
1341 |
-+ memzero_explicit(&dum->gadget, sizeof(struct usb_gadget)); |
1342 |
- dum->gadget.name = gadget_name; |
1343 |
- dum->gadget.ops = &dummy_ops; |
1344 |
- dum->gadget.max_speed = USB_SPEED_SUPER; |
1345 |
-diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c |
1346 |
-index b38a228..af0566d 100644 |
1347 |
---- a/drivers/usb/host/ohci-at91.c |
1348 |
-+++ b/drivers/usb/host/ohci-at91.c |
1349 |
-@@ -361,7 +361,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, |
1350 |
- |
1351 |
- case USB_PORT_FEAT_SUSPEND: |
1352 |
- dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n"); |
1353 |
-- if (valid_port(wIndex)) { |
1354 |
-+ if (valid_port(wIndex) && ohci_at91->sfr_regmap) { |
1355 |
- ohci_at91_port_suspend(ohci_at91->sfr_regmap, |
1356 |
- 1); |
1357 |
- return 0; |
1358 |
-@@ -404,7 +404,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, |
1359 |
- |
1360 |
- case USB_PORT_FEAT_SUSPEND: |
1361 |
- dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n"); |
1362 |
-- if (valid_port(wIndex)) { |
1363 |
-+ if (valid_port(wIndex) && ohci_at91->sfr_regmap) { |
1364 |
- ohci_at91_port_suspend(ohci_at91->sfr_regmap, |
1365 |
- 0); |
1366 |
- return 0; |
1367 |
-diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c |
1368 |
-index 74c42f7..3425154 100644 |
1369 |
---- a/drivers/usb/host/xhci-dbg.c |
1370 |
-+++ b/drivers/usb/host/xhci-dbg.c |
1371 |
-@@ -111,7 +111,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci) |
1372 |
- xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK); |
1373 |
- |
1374 |
- /* xhci 1.1 controllers have the HCCPARAMS2 register */ |
1375 |
-- if (hci_version > 100) { |
1376 |
-+ if (hci_version > 0x100) { |
1377 |
- temp = readl(&xhci->cap_regs->hcc_params2); |
1378 |
- xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp); |
1379 |
- xhci_dbg(xhci, " HC %s Force save context capability", |
1380 |
-diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c |
1381 |
-index abe3606..5895e84 100644 |
1382 |
---- a/drivers/usb/host/xhci-plat.c |
1383 |
-+++ b/drivers/usb/host/xhci-plat.c |
1384 |
-@@ -274,6 +274,8 @@ static int xhci_plat_remove(struct platform_device *dev) |
1385 |
- struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1386 |
- struct clk *clk = xhci->clk; |
1387 |
- |
1388 |
-+ xhci->xhc_state |= XHCI_STATE_REMOVING; |
1389 |
-+ |
1390 |
- usb_remove_hcd(xhci->shared_hcd); |
1391 |
- usb_phy_shutdown(hcd->usb_phy); |
1392 |
- |
1393 |
-diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c |
1394 |
-index 095778f..37c63cb 100644 |
1395 |
---- a/drivers/usb/misc/iowarrior.c |
1396 |
-+++ b/drivers/usb/misc/iowarrior.c |
1397 |
-@@ -781,12 +781,6 @@ static int iowarrior_probe(struct usb_interface *interface, |
1398 |
- iface_desc = interface->cur_altsetting; |
1399 |
- dev->product_id = le16_to_cpu(udev->descriptor.idProduct); |
1400 |
- |
1401 |
-- if (iface_desc->desc.bNumEndpoints < 1) { |
1402 |
-- dev_err(&interface->dev, "Invalid number of endpoints\n"); |
1403 |
-- retval = -EINVAL; |
1404 |
-- goto error; |
1405 |
-- } |
1406 |
-- |
1407 |
- /* set up the endpoint information */ |
1408 |
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { |
1409 |
- endpoint = &iface_desc->endpoint[i].desc; |
1410 |
-@@ -797,6 +791,21 @@ static int iowarrior_probe(struct usb_interface *interface, |
1411 |
- /* this one will match for the IOWarrior56 only */ |
1412 |
- dev->int_out_endpoint = endpoint; |
1413 |
- } |
1414 |
-+ |
1415 |
-+ if (!dev->int_in_endpoint) { |
1416 |
-+ dev_err(&interface->dev, "no interrupt-in endpoint found\n"); |
1417 |
-+ retval = -ENODEV; |
1418 |
-+ goto error; |
1419 |
-+ } |
1420 |
-+ |
1421 |
-+ if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) { |
1422 |
-+ if (!dev->int_out_endpoint) { |
1423 |
-+ dev_err(&interface->dev, "no interrupt-out endpoint found\n"); |
1424 |
-+ retval = -ENODEV; |
1425 |
-+ goto error; |
1426 |
-+ } |
1427 |
-+ } |
1428 |
-+ |
1429 |
- /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ |
1430 |
- dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); |
1431 |
- if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && |
1432 |
-diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c |
1433 |
-index 6a1df9e..30bf0f5 100644 |
1434 |
---- a/drivers/usb/serial/digi_acceleport.c |
1435 |
-+++ b/drivers/usb/serial/digi_acceleport.c |
1436 |
-@@ -1482,16 +1482,20 @@ static int digi_read_oob_callback(struct urb *urb) |
1437 |
- struct usb_serial *serial = port->serial; |
1438 |
- struct tty_struct *tty; |
1439 |
- struct digi_port *priv = usb_get_serial_port_data(port); |
1440 |
-+ unsigned char *buf = urb->transfer_buffer; |
1441 |
- int opcode, line, status, val; |
1442 |
- int i; |
1443 |
- unsigned int rts; |
1444 |
- |
1445 |
-+ if (urb->actual_length < 4) |
1446 |
-+ return -1; |
1447 |
-+ |
1448 |
- /* handle each oob command */ |
1449 |
-- for (i = 0; i < urb->actual_length - 3;) { |
1450 |
-- opcode = ((unsigned char *)urb->transfer_buffer)[i++]; |
1451 |
-- line = ((unsigned char *)urb->transfer_buffer)[i++]; |
1452 |
-- status = ((unsigned char *)urb->transfer_buffer)[i++]; |
1453 |
-- val = ((unsigned char *)urb->transfer_buffer)[i++]; |
1454 |
-+ for (i = 0; i < urb->actual_length - 3; i += 4) { |
1455 |
-+ opcode = buf[i]; |
1456 |
-+ line = buf[i + 1]; |
1457 |
-+ status = buf[i + 2]; |
1458 |
-+ val = buf[i + 3]; |
1459 |
- |
1460 |
- dev_dbg(&port->dev, "digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d\n", |
1461 |
- opcode, line, status, val); |
1462 |
-diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c |
1463 |
-index c02808a..f1a8fdc 100644 |
1464 |
---- a/drivers/usb/serial/io_ti.c |
1465 |
-+++ b/drivers/usb/serial/io_ti.c |
1466 |
-@@ -1674,6 +1674,12 @@ static void edge_interrupt_callback(struct urb *urb) |
1467 |
- function = TIUMP_GET_FUNC_FROM_CODE(data[0]); |
1468 |
- dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, |
1469 |
- port_number, function, data[1]); |
1470 |
-+ |
1471 |
-+ if (port_number >= edge_serial->serial->num_ports) { |
1472 |
-+ dev_err(dev, "bad port number %d\n", port_number); |
1473 |
-+ goto exit; |
1474 |
-+ } |
1475 |
-+ |
1476 |
- port = edge_serial->serial->port[port_number]; |
1477 |
- edge_port = usb_get_serial_port_data(port); |
1478 |
- if (!edge_port) { |
1479 |
-@@ -1755,7 +1761,7 @@ static void edge_bulk_in_callback(struct urb *urb) |
1480 |
- |
1481 |
- port_number = edge_port->port->port_number; |
1482 |
- |
1483 |
-- if (edge_port->lsr_event) { |
1484 |
-+ if (urb->actual_length > 0 && edge_port->lsr_event) { |
1485 |
- edge_port->lsr_event = 0; |
1486 |
- dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", |
1487 |
- __func__, port_number, edge_port->lsr_mask, *data); |
1488 |
-diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c |
1489 |
-index a180b17..76564b3 100644 |
1490 |
---- a/drivers/usb/serial/omninet.c |
1491 |
-+++ b/drivers/usb/serial/omninet.c |
1492 |
-@@ -142,12 +142,6 @@ static int omninet_port_remove(struct usb_serial_port *port) |
1493 |
- |
1494 |
- static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port) |
1495 |
- { |
1496 |
-- struct usb_serial *serial = port->serial; |
1497 |
-- struct usb_serial_port *wport; |
1498 |
-- |
1499 |
-- wport = serial->port[1]; |
1500 |
-- tty_port_tty_set(&wport->port, tty); |
1501 |
-- |
1502 |
- return usb_serial_generic_open(tty, port); |
1503 |
- } |
1504 |
- |
1505 |
-diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c |
1506 |
-index 93c6c9b..8a069aa 100644 |
1507 |
---- a/drivers/usb/serial/safe_serial.c |
1508 |
-+++ b/drivers/usb/serial/safe_serial.c |
1509 |
-@@ -200,6 +200,11 @@ static void safe_process_read_urb(struct urb *urb) |
1510 |
- if (!safe) |
1511 |
- goto out; |
1512 |
- |
1513 |
-+ if (length < 2) { |
1514 |
-+ dev_err(&port->dev, "malformed packet\n"); |
1515 |
-+ return; |
1516 |
-+ } |
1517 |
-+ |
1518 |
- fcs = fcs_compute10(data, length, CRC10_INITFCS); |
1519 |
- if (fcs) { |
1520 |
- dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); |
1521 |
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
1522 |
-index 1d4f5fa..dc9d64a 100644 |
1523 |
---- a/fs/ext4/inode.c |
1524 |
-+++ b/fs/ext4/inode.c |
1525 |
-@@ -3824,6 +3824,10 @@ static int ext4_block_truncate_page(handle_t *handle, |
1526 |
- unsigned blocksize; |
1527 |
- struct inode *inode = mapping->host; |
1528 |
- |
1529 |
-+ /* If we are processing an encrypted inode during orphan list handling */ |
1530 |
-+ if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode)) |
1531 |
-+ return 0; |
1532 |
-+ |
1533 |
- blocksize = inode->i_sb->s_blocksize; |
1534 |
- length = blocksize - (offset & (blocksize - 1)); |
1535 |
- |
1536 |
-diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h |
1537 |
-index eb209d4..dc79773 100644 |
1538 |
---- a/include/linux/user_namespace.h |
1539 |
-+++ b/include/linux/user_namespace.h |
1540 |
-@@ -65,7 +65,7 @@ struct ucounts { |
1541 |
- struct hlist_node node; |
1542 |
- struct user_namespace *ns; |
1543 |
- kuid_t uid; |
1544 |
-- atomic_t count; |
1545 |
-+ int count; |
1546 |
- atomic_t ucount[UCOUNT_COUNTS]; |
1547 |
- }; |
1548 |
- |
1549 |
-diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h |
1550 |
-index 14e49c7..b35533b 100644 |
1551 |
---- a/include/trace/events/syscalls.h |
1552 |
-+++ b/include/trace/events/syscalls.h |
1553 |
-@@ -1,5 +1,6 @@ |
1554 |
- #undef TRACE_SYSTEM |
1555 |
- #define TRACE_SYSTEM raw_syscalls |
1556 |
-+#undef TRACE_INCLUDE_FILE |
1557 |
- #define TRACE_INCLUDE_FILE syscalls |
1558 |
- |
1559 |
- #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) |
1560 |
-diff --git a/kernel/ucount.c b/kernel/ucount.c |
1561 |
-index 4bbd38e..f4ac185 100644 |
1562 |
---- a/kernel/ucount.c |
1563 |
-+++ b/kernel/ucount.c |
1564 |
-@@ -139,7 +139,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) |
1565 |
- |
1566 |
- new->ns = ns; |
1567 |
- new->uid = uid; |
1568 |
-- atomic_set(&new->count, 0); |
1569 |
-+ new->count = 0; |
1570 |
- |
1571 |
- spin_lock_irq(&ucounts_lock); |
1572 |
- ucounts = find_ucounts(ns, uid, hashent); |
1573 |
-@@ -150,8 +150,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) |
1574 |
- ucounts = new; |
1575 |
- } |
1576 |
- } |
1577 |
-- if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) |
1578 |
-+ if (ucounts->count == INT_MAX) |
1579 |
- ucounts = NULL; |
1580 |
-+ else |
1581 |
-+ ucounts->count += 1; |
1582 |
- spin_unlock_irq(&ucounts_lock); |
1583 |
- return ucounts; |
1584 |
- } |
1585 |
-@@ -160,13 +162,15 @@ static void put_ucounts(struct ucounts *ucounts) |
1586 |
- { |
1587 |
- unsigned long flags; |
1588 |
- |
1589 |
-- if (atomic_dec_and_test(&ucounts->count)) { |
1590 |
-- spin_lock_irqsave(&ucounts_lock, flags); |
1591 |
-+ spin_lock_irqsave(&ucounts_lock, flags); |
1592 |
-+ ucounts->count -= 1; |
1593 |
-+ if (!ucounts->count) |
1594 |
- hlist_del_init(&ucounts->node); |
1595 |
-- spin_unlock_irqrestore(&ucounts_lock, flags); |
1596 |
-+ else |
1597 |
-+ ucounts = NULL; |
1598 |
-+ spin_unlock_irqrestore(&ucounts_lock, flags); |
1599 |
- |
1600 |
-- kfree(ucounts); |
1601 |
-- } |
1602 |
-+ kfree(ucounts); |
1603 |
- } |
1604 |
- |
1605 |
- static inline bool atomic_inc_below(atomic_t *v, int u) |
1606 |
-diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c |
1607 |
-index ebe1b9f..85814d1 100644 |
1608 |
---- a/virt/kvm/arm/vgic/vgic-mmio.c |
1609 |
-+++ b/virt/kvm/arm/vgic/vgic-mmio.c |
1610 |
-@@ -187,21 +187,37 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, |
1611 |
- static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, |
1612 |
- bool new_active_state) |
1613 |
- { |
1614 |
-+ struct kvm_vcpu *requester_vcpu; |
1615 |
- spin_lock(&irq->irq_lock); |
1616 |
-+ |
1617 |
-+ /* |
1618 |
-+ * The vcpu parameter here can mean multiple things depending on how |
1619 |
-+ * this function is called; when handling a trap from the kernel it |
1620 |
-+ * depends on the GIC version, and these functions are also called as |
1621 |
-+ * part of save/restore from userspace. |
1622 |
-+ * |
1623 |
-+ * Therefore, we have to figure out the requester in a reliable way. |
1624 |
-+ * |
1625 |
-+ * When accessing VGIC state from user space, the requester_vcpu is |
1626 |
-+ * NULL, which is fine, because we guarantee that no VCPUs are running |
1627 |
-+ * when accessing VGIC state from user space so irq->vcpu->cpu is |
1628 |
-+ * always -1. |
1629 |
-+ */ |
1630 |
-+ requester_vcpu = kvm_arm_get_running_vcpu(); |
1631 |
-+ |
1632 |
- /* |
1633 |
- * If this virtual IRQ was written into a list register, we |
1634 |
- * have to make sure the CPU that runs the VCPU thread has |
1635 |
-- * synced back LR state to the struct vgic_irq. We can only |
1636 |
-- * know this for sure, when either this irq is not assigned to |
1637 |
-- * anyone's AP list anymore, or the VCPU thread is not |
1638 |
-- * running on any CPUs. |
1639 |
-+ * synced back the LR state to the struct vgic_irq. |
1640 |
- * |
1641 |
-- * In the opposite case, we know the VCPU thread may be on its |
1642 |
-- * way back from the guest and still has to sync back this |
1643 |
-- * IRQ, so we release and re-acquire the spin_lock to let the |
1644 |
-- * other thread sync back the IRQ. |
1645 |
-+ * As long as the conditions below are true, we know the VCPU thread |
1646 |
-+ * may be on its way back from the guest (we kicked the VCPU thread in |
1647 |
-+ * vgic_change_active_prepare) and still has to sync back this IRQ, |
1648 |
-+ * so we release and re-acquire the spin_lock to let the other thread |
1649 |
-+ * sync back the IRQ. |
1650 |
- */ |
1651 |
- while (irq->vcpu && /* IRQ may have state in an LR somewhere */ |
1652 |
-+ irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */ |
1653 |
- irq->vcpu->cpu != -1) /* VCPU thread is running */ |
1654 |
- cond_resched_lock(&irq->irq_lock); |
1655 |
- |
1656 |
|
1657 |
diff --git a/4.9.16/0000_README b/4.9.18/0000_README |
1658 |
similarity index 91% |
1659 |
rename from 4.9.16/0000_README |
1660 |
rename to 4.9.18/0000_README |
1661 |
index 5b280f3..8c12f63 100644 |
1662 |
--- a/4.9.16/0000_README |
1663 |
+++ b/4.9.18/0000_README |
1664 |
@@ -2,11 +2,15 @@ README |
1665 |
----------------------------------------------------------------------------- |
1666 |
Individual Patch Descriptions: |
1667 |
----------------------------------------------------------------------------- |
1668 |
-Patch: 1015_linux-4.9.16.patch |
1669 |
+Patch: 1016_linux-4.9.17.patch |
1670 |
From: http://www.kernel.org |
1671 |
-Desc: Linux 4.9.16 |
1672 |
+Desc: Linux 4.9.17 |
1673 |
|
1674 |
-Patch: 4420_grsecurity-3.1-4.9.16-201703180820.patch |
1675 |
+Patch: 1017_linux-4.9.18.patch |
1676 |
+From: http://www.kernel.org |
1677 |
+Desc: Linux 4.9.18 |
1678 |
+ |
1679 |
+Patch: 4420_grsecurity-3.1-4.9.18-201703261106.patch |
1680 |
From: http://www.grsecurity.net |
1681 |
Desc: hardened-sources base patch from upstream grsecurity |
1682 |
|
1683 |
|
1684 |
diff --git a/4.9.18/1016_linux-4.9.17.patch b/4.9.18/1016_linux-4.9.17.patch |
1685 |
new file mode 100644 |
1686 |
index 0000000..1a83496 |
1687 |
--- /dev/null |
1688 |
+++ b/4.9.18/1016_linux-4.9.17.patch |
1689 |
@@ -0,0 +1,6091 @@ |
1690 |
+diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt |
1691 |
+index 405da11..d11af52 100644 |
1692 |
+--- a/Documentation/arm64/silicon-errata.txt |
1693 |
++++ b/Documentation/arm64/silicon-errata.txt |
1694 |
+@@ -42,24 +42,26 @@ file acts as a registry of software workarounds in the Linux Kernel and |
1695 |
+ will be updated when new workarounds are committed and backported to |
1696 |
+ stable kernels. |
1697 |
+ |
1698 |
+-| Implementor | Component | Erratum ID | Kconfig | |
1699 |
+-+----------------+-----------------+-----------------+-------------------------+ |
1700 |
+-| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | |
1701 |
+-| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | |
1702 |
+-| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 | |
1703 |
+-| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 | |
1704 |
+-| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 | |
1705 |
+-| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 | |
1706 |
+-| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | |
1707 |
+-| ARM | Cortex-A57 | #852523 | N/A | |
1708 |
+-| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | |
1709 |
+-| ARM | Cortex-A72 | #853709 | N/A | |
1710 |
+-| ARM | MMU-500 | #841119,#826419 | N/A | |
1711 |
+-| | | | | |
1712 |
+-| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | |
1713 |
+-| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | |
1714 |
+-| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | |
1715 |
+-| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | |
1716 |
+-| Cavium | ThunderX SMMUv2 | #27704 | N/A | |
1717 |
+-| | | | | |
1718 |
+-| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | |
1719 |
++| Implementor | Component | Erratum ID | Kconfig | |
1720 |
+++----------------+-----------------+-----------------+-----------------------------+ |
1721 |
++| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | |
1722 |
++| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | |
1723 |
++| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 | |
1724 |
++| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 | |
1725 |
++| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 | |
1726 |
++| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 | |
1727 |
++| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | |
1728 |
++| ARM | Cortex-A57 | #852523 | N/A | |
1729 |
++| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | |
1730 |
++| ARM | Cortex-A72 | #853709 | N/A | |
1731 |
++| ARM | MMU-500 | #841119,#826419 | N/A | |
1732 |
++| | | | | |
1733 |
++| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | |
1734 |
++| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | |
1735 |
++| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | |
1736 |
++| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | |
1737 |
++| Cavium | ThunderX SMMUv2 | #27704 | N/A | |
1738 |
++| | | | | |
1739 |
++| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | |
1740 |
++| | | | | |
1741 |
++| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | |
1742 |
+diff --git a/Makefile b/Makefile |
1743 |
+index 4e0f962..004f90a 100644 |
1744 |
+--- a/Makefile |
1745 |
++++ b/Makefile |
1746 |
+@@ -1,6 +1,6 @@ |
1747 |
+ VERSION = 4 |
1748 |
+ PATCHLEVEL = 9 |
1749 |
+-SUBLEVEL = 16 |
1750 |
++SUBLEVEL = 17 |
1751 |
+ EXTRAVERSION = |
1752 |
+ NAME = Roaring Lionus |
1753 |
+ |
1754 |
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig |
1755 |
+index 969ef88..cf57a77 100644 |
1756 |
+--- a/arch/arm64/Kconfig |
1757 |
++++ b/arch/arm64/Kconfig |
1758 |
+@@ -474,6 +474,16 @@ config CAVIUM_ERRATUM_27456 |
1759 |
+ |
1760 |
+ If unsure, say Y. |
1761 |
+ |
1762 |
++config QCOM_QDF2400_ERRATUM_0065 |
1763 |
++ bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size" |
1764 |
++ default y |
1765 |
++ help |
1766 |
++ On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports |
1767 |
++ ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have |
1768 |
++ been indicated as 16Bytes (0xf), not 8Bytes (0x7). |
1769 |
++ |
1770 |
++ If unsure, say Y. |
1771 |
++ |
1772 |
+ endmenu |
1773 |
+ |
1774 |
+ |
1775 |
+diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c |
1776 |
+index 88e2f2b..55889d0 100644 |
1777 |
+--- a/arch/arm64/kvm/hyp/tlb.c |
1778 |
++++ b/arch/arm64/kvm/hyp/tlb.c |
1779 |
+@@ -17,14 +17,62 @@ |
1780 |
+ |
1781 |
+ #include <asm/kvm_hyp.h> |
1782 |
+ |
1783 |
++static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) |
1784 |
++{ |
1785 |
++ u64 val; |
1786 |
++ |
1787 |
++ /* |
1788 |
++ * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and |
1789 |
++ * most TLB operations target EL2/EL0. In order to affect the |
1790 |
++ * guest TLBs (EL1/EL0), we need to change one of these two |
1791 |
++ * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so |
1792 |
++ * let's flip TGE before executing the TLB operation. |
1793 |
++ */ |
1794 |
++ write_sysreg(kvm->arch.vttbr, vttbr_el2); |
1795 |
++ val = read_sysreg(hcr_el2); |
1796 |
++ val &= ~HCR_TGE; |
1797 |
++ write_sysreg(val, hcr_el2); |
1798 |
++ isb(); |
1799 |
++} |
1800 |
++ |
1801 |
++static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) |
1802 |
++{ |
1803 |
++ write_sysreg(kvm->arch.vttbr, vttbr_el2); |
1804 |
++ isb(); |
1805 |
++} |
1806 |
++ |
1807 |
++static hyp_alternate_select(__tlb_switch_to_guest, |
1808 |
++ __tlb_switch_to_guest_nvhe, |
1809 |
++ __tlb_switch_to_guest_vhe, |
1810 |
++ ARM64_HAS_VIRT_HOST_EXTN); |
1811 |
++ |
1812 |
++static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) |
1813 |
++{ |
1814 |
++ /* |
1815 |
++ * We're done with the TLB operation, let's restore the host's |
1816 |
++ * view of HCR_EL2. |
1817 |
++ */ |
1818 |
++ write_sysreg(0, vttbr_el2); |
1819 |
++ write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); |
1820 |
++} |
1821 |
++ |
1822 |
++static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) |
1823 |
++{ |
1824 |
++ write_sysreg(0, vttbr_el2); |
1825 |
++} |
1826 |
++ |
1827 |
++static hyp_alternate_select(__tlb_switch_to_host, |
1828 |
++ __tlb_switch_to_host_nvhe, |
1829 |
++ __tlb_switch_to_host_vhe, |
1830 |
++ ARM64_HAS_VIRT_HOST_EXTN); |
1831 |
++ |
1832 |
+ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
1833 |
+ { |
1834 |
+ dsb(ishst); |
1835 |
+ |
1836 |
+ /* Switch to requested VMID */ |
1837 |
+ kvm = kern_hyp_va(kvm); |
1838 |
+- write_sysreg(kvm->arch.vttbr, vttbr_el2); |
1839 |
+- isb(); |
1840 |
++ __tlb_switch_to_guest()(kvm); |
1841 |
+ |
1842 |
+ /* |
1843 |
+ * We could do so much better if we had the VA as well. |
1844 |
+@@ -45,7 +93,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
1845 |
+ dsb(ish); |
1846 |
+ isb(); |
1847 |
+ |
1848 |
+- write_sysreg(0, vttbr_el2); |
1849 |
++ __tlb_switch_to_host()(kvm); |
1850 |
+ } |
1851 |
+ |
1852 |
+ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) |
1853 |
+@@ -54,14 +102,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) |
1854 |
+ |
1855 |
+ /* Switch to requested VMID */ |
1856 |
+ kvm = kern_hyp_va(kvm); |
1857 |
+- write_sysreg(kvm->arch.vttbr, vttbr_el2); |
1858 |
+- isb(); |
1859 |
++ __tlb_switch_to_guest()(kvm); |
1860 |
+ |
1861 |
+ asm volatile("tlbi vmalls12e1is" : : ); |
1862 |
+ dsb(ish); |
1863 |
+ isb(); |
1864 |
+ |
1865 |
+- write_sysreg(0, vttbr_el2); |
1866 |
++ __tlb_switch_to_host()(kvm); |
1867 |
+ } |
1868 |
+ |
1869 |
+ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) |
1870 |
+@@ -69,14 +116,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) |
1871 |
+ struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); |
1872 |
+ |
1873 |
+ /* Switch to requested VMID */ |
1874 |
+- write_sysreg(kvm->arch.vttbr, vttbr_el2); |
1875 |
+- isb(); |
1876 |
++ __tlb_switch_to_guest()(kvm); |
1877 |
+ |
1878 |
+ asm volatile("tlbi vmalle1" : : ); |
1879 |
+ dsb(nsh); |
1880 |
+ isb(); |
1881 |
+ |
1882 |
+- write_sysreg(0, vttbr_el2); |
1883 |
++ __tlb_switch_to_host()(kvm); |
1884 |
+ } |
1885 |
+ |
1886 |
+ void __hyp_text __kvm_flush_vm_context(void) |
1887 |
+diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c |
1888 |
+index 9fa046d..4119945 100644 |
1889 |
+--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c |
1890 |
++++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c |
1891 |
+@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm) |
1892 |
+ { |
1893 |
+ u32 *key = crypto_tfm_ctx(tfm); |
1894 |
+ |
1895 |
+- *key = 0; |
1896 |
++ *key = ~0; |
1897 |
+ |
1898 |
+ return 0; |
1899 |
+ } |
1900 |
+diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h |
1901 |
+index 5c45114..b9e3f0a 100644 |
1902 |
+--- a/arch/powerpc/include/asm/mmu_context.h |
1903 |
++++ b/arch/powerpc/include/asm/mmu_context.h |
1904 |
+@@ -19,16 +19,18 @@ extern void destroy_context(struct mm_struct *mm); |
1905 |
+ struct mm_iommu_table_group_mem_t; |
1906 |
+ |
1907 |
+ extern int isolate_lru_page(struct page *page); /* from internal.h */ |
1908 |
+-extern bool mm_iommu_preregistered(void); |
1909 |
+-extern long mm_iommu_get(unsigned long ua, unsigned long entries, |
1910 |
++extern bool mm_iommu_preregistered(struct mm_struct *mm); |
1911 |
++extern long mm_iommu_get(struct mm_struct *mm, |
1912 |
++ unsigned long ua, unsigned long entries, |
1913 |
+ struct mm_iommu_table_group_mem_t **pmem); |
1914 |
+-extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem); |
1915 |
+-extern void mm_iommu_init(mm_context_t *ctx); |
1916 |
+-extern void mm_iommu_cleanup(mm_context_t *ctx); |
1917 |
+-extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, |
1918 |
+- unsigned long size); |
1919 |
+-extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, |
1920 |
+- unsigned long entries); |
1921 |
++extern long mm_iommu_put(struct mm_struct *mm, |
1922 |
++ struct mm_iommu_table_group_mem_t *mem); |
1923 |
++extern void mm_iommu_init(struct mm_struct *mm); |
1924 |
++extern void mm_iommu_cleanup(struct mm_struct *mm); |
1925 |
++extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, |
1926 |
++ unsigned long ua, unsigned long size); |
1927 |
++extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
1928 |
++ unsigned long ua, unsigned long entries); |
1929 |
+ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
1930 |
+ unsigned long ua, unsigned long *hpa); |
1931 |
+ extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
1932 |
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c |
1933 |
+index 270ee30..f516ac5 100644 |
1934 |
+--- a/arch/powerpc/kernel/setup-common.c |
1935 |
++++ b/arch/powerpc/kernel/setup-common.c |
1936 |
+@@ -915,7 +915,7 @@ void __init setup_arch(char **cmdline_p) |
1937 |
+ init_mm.context.pte_frag = NULL; |
1938 |
+ #endif |
1939 |
+ #ifdef CONFIG_SPAPR_TCE_IOMMU |
1940 |
+- mm_iommu_init(&init_mm.context); |
1941 |
++ mm_iommu_init(&init_mm); |
1942 |
+ #endif |
1943 |
+ irqstack_early_init(); |
1944 |
+ exc_lvl_early_init(); |
1945 |
+diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c |
1946 |
+index b114f8b..73bf6e1 100644 |
1947 |
+--- a/arch/powerpc/mm/mmu_context_book3s64.c |
1948 |
++++ b/arch/powerpc/mm/mmu_context_book3s64.c |
1949 |
+@@ -115,7 +115,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
1950 |
+ mm->context.pte_frag = NULL; |
1951 |
+ #endif |
1952 |
+ #ifdef CONFIG_SPAPR_TCE_IOMMU |
1953 |
+- mm_iommu_init(&mm->context); |
1954 |
++ mm_iommu_init(mm); |
1955 |
+ #endif |
1956 |
+ return 0; |
1957 |
+ } |
1958 |
+@@ -156,13 +156,11 @@ static inline void destroy_pagetable_page(struct mm_struct *mm) |
1959 |
+ } |
1960 |
+ #endif |
1961 |
+ |
1962 |
+- |
1963 |
+ void destroy_context(struct mm_struct *mm) |
1964 |
+ { |
1965 |
+ #ifdef CONFIG_SPAPR_TCE_IOMMU |
1966 |
+- mm_iommu_cleanup(&mm->context); |
1967 |
++ WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); |
1968 |
+ #endif |
1969 |
+- |
1970 |
+ #ifdef CONFIG_PPC_ICSWX |
1971 |
+ drop_cop(mm->context.acop, mm); |
1972 |
+ kfree(mm->context.cop_lockp); |
1973 |
+diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c |
1974 |
+index e0f1c33..7de7124 100644 |
1975 |
+--- a/arch/powerpc/mm/mmu_context_iommu.c |
1976 |
++++ b/arch/powerpc/mm/mmu_context_iommu.c |
1977 |
+@@ -56,7 +56,7 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, |
1978 |
+ } |
1979 |
+ |
1980 |
+ pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n", |
1981 |
+- current->pid, |
1982 |
++ current ? current->pid : 0, |
1983 |
+ incr ? '+' : '-', |
1984 |
+ npages << PAGE_SHIFT, |
1985 |
+ mm->locked_vm << PAGE_SHIFT, |
1986 |
+@@ -66,12 +66,9 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, |
1987 |
+ return ret; |
1988 |
+ } |
1989 |
+ |
1990 |
+-bool mm_iommu_preregistered(void) |
1991 |
++bool mm_iommu_preregistered(struct mm_struct *mm) |
1992 |
+ { |
1993 |
+- if (!current || !current->mm) |
1994 |
+- return false; |
1995 |
+- |
1996 |
+- return !list_empty(¤t->mm->context.iommu_group_mem_list); |
1997 |
++ return !list_empty(&mm->context.iommu_group_mem_list); |
1998 |
+ } |
1999 |
+ EXPORT_SYMBOL_GPL(mm_iommu_preregistered); |
2000 |
+ |
2001 |
+@@ -124,19 +121,16 @@ static int mm_iommu_move_page_from_cma(struct page *page) |
2002 |
+ return 0; |
2003 |
+ } |
2004 |
+ |
2005 |
+-long mm_iommu_get(unsigned long ua, unsigned long entries, |
2006 |
++long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
2007 |
+ struct mm_iommu_table_group_mem_t **pmem) |
2008 |
+ { |
2009 |
+ struct mm_iommu_table_group_mem_t *mem; |
2010 |
+ long i, j, ret = 0, locked_entries = 0; |
2011 |
+ struct page *page = NULL; |
2012 |
+ |
2013 |
+- if (!current || !current->mm) |
2014 |
+- return -ESRCH; /* process exited */ |
2015 |
+- |
2016 |
+ mutex_lock(&mem_list_mutex); |
2017 |
+ |
2018 |
+- list_for_each_entry_rcu(mem, ¤t->mm->context.iommu_group_mem_list, |
2019 |
++ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, |
2020 |
+ next) { |
2021 |
+ if ((mem->ua == ua) && (mem->entries == entries)) { |
2022 |
+ ++mem->used; |
2023 |
+@@ -154,7 +148,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, |
2024 |
+ |
2025 |
+ } |
2026 |
+ |
2027 |
+- ret = mm_iommu_adjust_locked_vm(current->mm, entries, true); |
2028 |
++ ret = mm_iommu_adjust_locked_vm(mm, entries, true); |
2029 |
+ if (ret) |
2030 |
+ goto unlock_exit; |
2031 |
+ |
2032 |
+@@ -190,7 +184,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, |
2033 |
+ * of the CMA zone if possible. NOTE: faulting in + migration |
2034 |
+ * can be expensive. Batching can be considered later |
2035 |
+ */ |
2036 |
+- if (get_pageblock_migratetype(page) == MIGRATE_CMA) { |
2037 |
++ if (is_migrate_cma_page(page)) { |
2038 |
+ if (mm_iommu_move_page_from_cma(page)) |
2039 |
+ goto populate; |
2040 |
+ if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), |
2041 |
+@@ -215,11 +209,11 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, |
2042 |
+ mem->entries = entries; |
2043 |
+ *pmem = mem; |
2044 |
+ |
2045 |
+- list_add_rcu(&mem->next, ¤t->mm->context.iommu_group_mem_list); |
2046 |
++ list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); |
2047 |
+ |
2048 |
+ unlock_exit: |
2049 |
+ if (locked_entries && ret) |
2050 |
+- mm_iommu_adjust_locked_vm(current->mm, locked_entries, false); |
2051 |
++ mm_iommu_adjust_locked_vm(mm, locked_entries, false); |
2052 |
+ |
2053 |
+ mutex_unlock(&mem_list_mutex); |
2054 |
+ |
2055 |
+@@ -264,17 +258,13 @@ static void mm_iommu_free(struct rcu_head *head) |
2056 |
+ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) |
2057 |
+ { |
2058 |
+ list_del_rcu(&mem->next); |
2059 |
+- mm_iommu_adjust_locked_vm(current->mm, mem->entries, false); |
2060 |
+ call_rcu(&mem->rcu, mm_iommu_free); |
2061 |
+ } |
2062 |
+ |
2063 |
+-long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) |
2064 |
++long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) |
2065 |
+ { |
2066 |
+ long ret = 0; |
2067 |
+ |
2068 |
+- if (!current || !current->mm) |
2069 |
+- return -ESRCH; /* process exited */ |
2070 |
+- |
2071 |
+ mutex_lock(&mem_list_mutex); |
2072 |
+ |
2073 |
+ if (mem->used == 0) { |
2074 |
+@@ -297,6 +287,8 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) |
2075 |
+ /* @mapped became 0 so now mappings are disabled, release the region */ |
2076 |
+ mm_iommu_release(mem); |
2077 |
+ |
2078 |
++ mm_iommu_adjust_locked_vm(mm, mem->entries, false); |
2079 |
++ |
2080 |
+ unlock_exit: |
2081 |
+ mutex_unlock(&mem_list_mutex); |
2082 |
+ |
2083 |
+@@ -304,14 +296,12 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) |
2084 |
+ } |
2085 |
+ EXPORT_SYMBOL_GPL(mm_iommu_put); |
2086 |
+ |
2087 |
+-struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, |
2088 |
+- unsigned long size) |
2089 |
++struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, |
2090 |
++ unsigned long ua, unsigned long size) |
2091 |
+ { |
2092 |
+ struct mm_iommu_table_group_mem_t *mem, *ret = NULL; |
2093 |
+ |
2094 |
+- list_for_each_entry_rcu(mem, |
2095 |
+- ¤t->mm->context.iommu_group_mem_list, |
2096 |
+- next) { |
2097 |
++ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { |
2098 |
+ if ((mem->ua <= ua) && |
2099 |
+ (ua + size <= mem->ua + |
2100 |
+ (mem->entries << PAGE_SHIFT))) { |
2101 |
+@@ -324,14 +314,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, |
2102 |
+ } |
2103 |
+ EXPORT_SYMBOL_GPL(mm_iommu_lookup); |
2104 |
+ |
2105 |
+-struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, |
2106 |
+- unsigned long entries) |
2107 |
++struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
2108 |
++ unsigned long ua, unsigned long entries) |
2109 |
+ { |
2110 |
+ struct mm_iommu_table_group_mem_t *mem, *ret = NULL; |
2111 |
+ |
2112 |
+- list_for_each_entry_rcu(mem, |
2113 |
+- ¤t->mm->context.iommu_group_mem_list, |
2114 |
+- next) { |
2115 |
++ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { |
2116 |
+ if ((mem->ua == ua) && (mem->entries == entries)) { |
2117 |
+ ret = mem; |
2118 |
+ break; |
2119 |
+@@ -373,17 +361,7 @@ void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem) |
2120 |
+ } |
2121 |
+ EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec); |
2122 |
+ |
2123 |
+-void mm_iommu_init(mm_context_t *ctx) |
2124 |
++void mm_iommu_init(struct mm_struct *mm) |
2125 |
+ { |
2126 |
+- INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list); |
2127 |
+-} |
2128 |
+- |
2129 |
+-void mm_iommu_cleanup(mm_context_t *ctx) |
2130 |
+-{ |
2131 |
+- struct mm_iommu_table_group_mem_t *mem, *tmp; |
2132 |
+- |
2133 |
+- list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) { |
2134 |
+- list_del_rcu(&mem->next); |
2135 |
+- mm_iommu_do_free(mem); |
2136 |
+- } |
2137 |
++ INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list); |
2138 |
+ } |
2139 |
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c |
2140 |
+index 7fe88bb..38623e2 100644 |
2141 |
+--- a/arch/x86/events/core.c |
2142 |
++++ b/arch/x86/events/core.c |
2143 |
+@@ -2096,8 +2096,8 @@ static int x86_pmu_event_init(struct perf_event *event) |
2144 |
+ |
2145 |
+ static void refresh_pce(void *ignored) |
2146 |
+ { |
2147 |
+- if (current->mm) |
2148 |
+- load_mm_cr4(current->mm); |
2149 |
++ if (current->active_mm) |
2150 |
++ load_mm_cr4(current->active_mm); |
2151 |
+ } |
2152 |
+ |
2153 |
+ static void x86_pmu_event_mapped(struct perf_event *event) |
2154 |
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c |
2155 |
+index 8f44c5a..f228f74 100644 |
2156 |
+--- a/arch/x86/kernel/cpu/mshyperv.c |
2157 |
++++ b/arch/x86/kernel/cpu/mshyperv.c |
2158 |
+@@ -31,6 +31,7 @@ |
2159 |
+ #include <asm/apic.h> |
2160 |
+ #include <asm/timer.h> |
2161 |
+ #include <asm/reboot.h> |
2162 |
++#include <asm/nmi.h> |
2163 |
+ |
2164 |
+ struct ms_hyperv_info ms_hyperv; |
2165 |
+ EXPORT_SYMBOL_GPL(ms_hyperv); |
2166 |
+@@ -158,6 +159,26 @@ static unsigned char hv_get_nmi_reason(void) |
2167 |
+ return 0; |
2168 |
+ } |
2169 |
+ |
2170 |
++#ifdef CONFIG_X86_LOCAL_APIC |
2171 |
++/* |
2172 |
++ * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes |
2173 |
++ * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle |
2174 |
++ * unknown NMI on the first CPU which gets it. |
2175 |
++ */ |
2176 |
++static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) |
2177 |
++{ |
2178 |
++ static atomic_t nmi_cpu = ATOMIC_INIT(-1); |
2179 |
++ |
2180 |
++ if (!unknown_nmi_panic) |
2181 |
++ return NMI_DONE; |
2182 |
++ |
2183 |
++ if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1) |
2184 |
++ return NMI_HANDLED; |
2185 |
++ |
2186 |
++ return NMI_DONE; |
2187 |
++} |
2188 |
++#endif |
2189 |
++ |
2190 |
+ static void __init ms_hyperv_init_platform(void) |
2191 |
+ { |
2192 |
+ /* |
2193 |
+@@ -183,6 +204,9 @@ static void __init ms_hyperv_init_platform(void) |
2194 |
+ pr_info("HyperV: LAPIC Timer Frequency: %#x\n", |
2195 |
+ lapic_timer_frequency); |
2196 |
+ } |
2197 |
++ |
2198 |
++ register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST, |
2199 |
++ "hv_nmi_unknown"); |
2200 |
+ #endif |
2201 |
+ |
2202 |
+ if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) |
2203 |
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c |
2204 |
+index 54a2372..b5785c1 100644 |
2205 |
+--- a/arch/x86/kernel/head64.c |
2206 |
++++ b/arch/x86/kernel/head64.c |
2207 |
+@@ -4,6 +4,7 @@ |
2208 |
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@××××.de> SuSE |
2209 |
+ */ |
2210 |
+ |
2211 |
++#define DISABLE_BRANCH_PROFILING |
2212 |
+ #include <linux/init.h> |
2213 |
+ #include <linux/linkage.h> |
2214 |
+ #include <linux/types.h> |
2215 |
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c |
2216 |
+index 46b2f41..eea88fe 100644 |
2217 |
+--- a/arch/x86/kernel/tsc.c |
2218 |
++++ b/arch/x86/kernel/tsc.c |
2219 |
+@@ -1287,6 +1287,8 @@ static int __init init_tsc_clocksource(void) |
2220 |
+ * exporting a reliable TSC. |
2221 |
+ */ |
2222 |
+ if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { |
2223 |
++ if (boot_cpu_has(X86_FEATURE_ART)) |
2224 |
++ art_related_clocksource = &clocksource_tsc; |
2225 |
+ clocksource_register_khz(&clocksource_tsc, tsc_khz); |
2226 |
+ return 0; |
2227 |
+ } |
2228 |
+diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c |
2229 |
+index 0493c17..333362f 100644 |
2230 |
+--- a/arch/x86/mm/kasan_init_64.c |
2231 |
++++ b/arch/x86/mm/kasan_init_64.c |
2232 |
+@@ -1,3 +1,4 @@ |
2233 |
++#define DISABLE_BRANCH_PROFILING |
2234 |
+ #define pr_fmt(fmt) "kasan: " fmt |
2235 |
+ #include <linux/bootmem.h> |
2236 |
+ #include <linux/kasan.h> |
2237 |
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c |
2238 |
+index bedfab9..a00a6c0 100644 |
2239 |
+--- a/arch/x86/pci/xen.c |
2240 |
++++ b/arch/x86/pci/xen.c |
2241 |
+@@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
2242 |
+ return 1; |
2243 |
+ |
2244 |
+ for_each_pci_msi_entry(msidesc, dev) { |
2245 |
+- __pci_read_msi_msg(msidesc, &msg); |
2246 |
+- pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | |
2247 |
+- ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); |
2248 |
+- if (msg.data != XEN_PIRQ_MSI_DATA || |
2249 |
+- xen_irq_from_pirq(pirq) < 0) { |
2250 |
+- pirq = xen_allocate_pirq_msi(dev, msidesc); |
2251 |
+- if (pirq < 0) { |
2252 |
+- irq = -ENODEV; |
2253 |
+- goto error; |
2254 |
+- } |
2255 |
+- xen_msi_compose_msg(dev, pirq, &msg); |
2256 |
+- __pci_write_msi_msg(msidesc, &msg); |
2257 |
+- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); |
2258 |
+- } else { |
2259 |
+- dev_dbg(&dev->dev, |
2260 |
+- "xen: msi already bound to pirq=%d\n", pirq); |
2261 |
++ pirq = xen_allocate_pirq_msi(dev, msidesc); |
2262 |
++ if (pirq < 0) { |
2263 |
++ irq = -ENODEV; |
2264 |
++ goto error; |
2265 |
+ } |
2266 |
++ xen_msi_compose_msg(dev, pirq, &msg); |
2267 |
++ __pci_write_msi_msg(msidesc, &msg); |
2268 |
++ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); |
2269 |
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, |
2270 |
+ (type == PCI_CAP_ID_MSI) ? nvec : 1, |
2271 |
+ (type == PCI_CAP_ID_MSIX) ? |
2272 |
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c |
2273 |
+index 0774799..c6fee74 100644 |
2274 |
+--- a/block/scsi_ioctl.c |
2275 |
++++ b/block/scsi_ioctl.c |
2276 |
+@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) |
2277 |
+ __set_bit(WRITE_16, filter->write_ok); |
2278 |
+ __set_bit(WRITE_LONG, filter->write_ok); |
2279 |
+ __set_bit(WRITE_LONG_2, filter->write_ok); |
2280 |
++ __set_bit(WRITE_SAME, filter->write_ok); |
2281 |
++ __set_bit(WRITE_SAME_16, filter->write_ok); |
2282 |
++ __set_bit(WRITE_SAME_32, filter->write_ok); |
2283 |
+ __set_bit(ERASE, filter->write_ok); |
2284 |
+ __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); |
2285 |
+ __set_bit(MODE_SELECT, filter->write_ok); |
2286 |
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c |
2287 |
+index bdc67ba..4421f7c 100644 |
2288 |
+--- a/drivers/acpi/blacklist.c |
2289 |
++++ b/drivers/acpi/blacklist.c |
2290 |
+@@ -160,6 +160,34 @@ static struct dmi_system_id acpi_rev_dmi_table[] __initdata = { |
2291 |
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"), |
2292 |
+ }, |
2293 |
+ }, |
2294 |
++ { |
2295 |
++ .callback = dmi_enable_rev_override, |
2296 |
++ .ident = "DELL Precision 5520", |
2297 |
++ .matches = { |
2298 |
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
2299 |
++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"), |
2300 |
++ }, |
2301 |
++ }, |
2302 |
++ { |
2303 |
++ .callback = dmi_enable_rev_override, |
2304 |
++ .ident = "DELL Precision 3520", |
2305 |
++ .matches = { |
2306 |
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
2307 |
++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"), |
2308 |
++ }, |
2309 |
++ }, |
2310 |
++ /* |
2311 |
++ * Resolves a quirk with the Dell Latitude 3350 that |
2312 |
++ * causes the ethernet adapter to not function. |
2313 |
++ */ |
2314 |
++ { |
2315 |
++ .callback = dmi_enable_rev_override, |
2316 |
++ .ident = "DELL Latitude 3350", |
2317 |
++ .matches = { |
2318 |
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
2319 |
++ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"), |
2320 |
++ }, |
2321 |
++ }, |
2322 |
+ #endif |
2323 |
+ {} |
2324 |
+ }; |
2325 |
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c |
2326 |
+index 3bbd2a5..2acaa77 100644 |
2327 |
+--- a/drivers/clk/bcm/clk-bcm2835.c |
2328 |
++++ b/drivers/clk/bcm/clk-bcm2835.c |
2329 |
+@@ -1598,7 +1598,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { |
2330 |
+ .a2w_reg = A2W_PLLH_AUX, |
2331 |
+ .load_mask = CM_PLLH_LOADAUX, |
2332 |
+ .hold_mask = 0, |
2333 |
+- .fixed_divider = 10), |
2334 |
++ .fixed_divider = 1), |
2335 |
+ [BCM2835_PLLH_PIX] = REGISTER_PLL_DIV( |
2336 |
+ .name = "pllh_pix", |
2337 |
+ .source_pll = "pllh", |
2338 |
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c |
2339 |
+index 015f711..d235fbe 100644 |
2340 |
+--- a/drivers/dma/ioat/init.c |
2341 |
++++ b/drivers/dma/ioat/init.c |
2342 |
+@@ -691,7 +691,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) |
2343 |
+ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ |
2344 |
+ ioat_chan->completion = |
2345 |
+ dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool, |
2346 |
+- GFP_KERNEL, &ioat_chan->completion_dma); |
2347 |
++ GFP_NOWAIT, &ioat_chan->completion_dma); |
2348 |
+ if (!ioat_chan->completion) |
2349 |
+ return -ENOMEM; |
2350 |
+ |
2351 |
+@@ -701,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) |
2352 |
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
2353 |
+ |
2354 |
+ order = IOAT_MAX_ORDER; |
2355 |
+- ring = ioat_alloc_ring(c, order, GFP_KERNEL); |
2356 |
++ ring = ioat_alloc_ring(c, order, GFP_NOWAIT); |
2357 |
+ if (!ring) |
2358 |
+ return -ENOMEM; |
2359 |
+ |
2360 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild |
2361 |
+index 77a52b5..70f0344 100644 |
2362 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild |
2363 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild |
2364 |
+@@ -95,9 +95,11 @@ nvkm-y += nvkm/engine/disp/cursg84.o |
2365 |
+ nvkm-y += nvkm/engine/disp/cursgt215.o |
2366 |
+ nvkm-y += nvkm/engine/disp/cursgf119.o |
2367 |
+ nvkm-y += nvkm/engine/disp/cursgk104.o |
2368 |
++nvkm-y += nvkm/engine/disp/cursgp102.o |
2369 |
+ |
2370 |
+ nvkm-y += nvkm/engine/disp/oimmnv50.o |
2371 |
+ nvkm-y += nvkm/engine/disp/oimmg84.o |
2372 |
+ nvkm-y += nvkm/engine/disp/oimmgt215.o |
2373 |
+ nvkm-y += nvkm/engine/disp/oimmgf119.o |
2374 |
+ nvkm-y += nvkm/engine/disp/oimmgk104.o |
2375 |
++nvkm-y += nvkm/engine/disp/oimmgp102.o |
2376 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c |
2377 |
+index dd2953b..9d90d8b 100644 |
2378 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c |
2379 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c |
2380 |
+@@ -82,7 +82,7 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug) |
2381 |
+ |
2382 |
+ if (mthd->addr) { |
2383 |
+ snprintf(cname_, sizeof(cname_), "%s %d", |
2384 |
+- mthd->name, chan->chid); |
2385 |
++ mthd->name, chan->chid.user); |
2386 |
+ cname = cname_; |
2387 |
+ } |
2388 |
+ |
2389 |
+@@ -139,7 +139,7 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size, |
2390 |
+ if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { |
2391 |
+ notify->size = sizeof(struct nvif_notify_uevent_rep); |
2392 |
+ notify->types = 1; |
2393 |
+- notify->index = chan->chid; |
2394 |
++ notify->index = chan->chid.user; |
2395 |
+ return 0; |
2396 |
+ } |
2397 |
+ |
2398 |
+@@ -159,7 +159,7 @@ nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data) |
2399 |
+ struct nv50_disp_chan *chan = nv50_disp_chan(object); |
2400 |
+ struct nv50_disp *disp = chan->root->disp; |
2401 |
+ struct nvkm_device *device = disp->base.engine.subdev.device; |
2402 |
+- *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr); |
2403 |
++ *data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr); |
2404 |
+ return 0; |
2405 |
+ } |
2406 |
+ |
2407 |
+@@ -169,7 +169,7 @@ nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data) |
2408 |
+ struct nv50_disp_chan *chan = nv50_disp_chan(object); |
2409 |
+ struct nv50_disp *disp = chan->root->disp; |
2410 |
+ struct nvkm_device *device = disp->base.engine.subdev.device; |
2411 |
+- nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data); |
2412 |
++ nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data); |
2413 |
+ return 0; |
2414 |
+ } |
2415 |
+ |
2416 |
+@@ -196,7 +196,7 @@ nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size) |
2417 |
+ struct nv50_disp *disp = chan->root->disp; |
2418 |
+ struct nvkm_device *device = disp->base.engine.subdev.device; |
2419 |
+ *addr = device->func->resource_addr(device, 0) + |
2420 |
+- 0x640000 + (chan->chid * 0x1000); |
2421 |
++ 0x640000 + (chan->chid.user * 0x1000); |
2422 |
+ *size = 0x001000; |
2423 |
+ return 0; |
2424 |
+ } |
2425 |
+@@ -243,8 +243,8 @@ nv50_disp_chan_dtor(struct nvkm_object *object) |
2426 |
+ { |
2427 |
+ struct nv50_disp_chan *chan = nv50_disp_chan(object); |
2428 |
+ struct nv50_disp *disp = chan->root->disp; |
2429 |
+- if (chan->chid >= 0) |
2430 |
+- disp->chan[chan->chid] = NULL; |
2431 |
++ if (chan->chid.user >= 0) |
2432 |
++ disp->chan[chan->chid.user] = NULL; |
2433 |
+ return chan->func->dtor ? chan->func->dtor(chan) : chan; |
2434 |
+ } |
2435 |
+ |
2436 |
+@@ -263,7 +263,7 @@ nv50_disp_chan = { |
2437 |
+ int |
2438 |
+ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func, |
2439 |
+ const struct nv50_disp_chan_mthd *mthd, |
2440 |
+- struct nv50_disp_root *root, int chid, int head, |
2441 |
++ struct nv50_disp_root *root, int ctrl, int user, int head, |
2442 |
+ const struct nvkm_oclass *oclass, |
2443 |
+ struct nv50_disp_chan *chan) |
2444 |
+ { |
2445 |
+@@ -273,21 +273,22 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func, |
2446 |
+ chan->func = func; |
2447 |
+ chan->mthd = mthd; |
2448 |
+ chan->root = root; |
2449 |
+- chan->chid = chid; |
2450 |
++ chan->chid.ctrl = ctrl; |
2451 |
++ chan->chid.user = user; |
2452 |
+ chan->head = head; |
2453 |
+ |
2454 |
+- if (disp->chan[chan->chid]) { |
2455 |
+- chan->chid = -1; |
2456 |
++ if (disp->chan[chan->chid.user]) { |
2457 |
++ chan->chid.user = -1; |
2458 |
+ return -EBUSY; |
2459 |
+ } |
2460 |
+- disp->chan[chan->chid] = chan; |
2461 |
++ disp->chan[chan->chid.user] = chan; |
2462 |
+ return 0; |
2463 |
+ } |
2464 |
+ |
2465 |
+ int |
2466 |
+ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func, |
2467 |
+ const struct nv50_disp_chan_mthd *mthd, |
2468 |
+- struct nv50_disp_root *root, int chid, int head, |
2469 |
++ struct nv50_disp_root *root, int ctrl, int user, int head, |
2470 |
+ const struct nvkm_oclass *oclass, |
2471 |
+ struct nvkm_object **pobject) |
2472 |
+ { |
2473 |
+@@ -297,5 +298,6 @@ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func, |
2474 |
+ return -ENOMEM; |
2475 |
+ *pobject = &chan->object; |
2476 |
+ |
2477 |
+- return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan); |
2478 |
++ return nv50_disp_chan_ctor(func, mthd, root, ctrl, user, |
2479 |
++ head, oclass, chan); |
2480 |
+ } |
2481 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h |
2482 |
+index f5f683d..737b38f 100644 |
2483 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h |
2484 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h |
2485 |
+@@ -7,7 +7,11 @@ struct nv50_disp_chan { |
2486 |
+ const struct nv50_disp_chan_func *func; |
2487 |
+ const struct nv50_disp_chan_mthd *mthd; |
2488 |
+ struct nv50_disp_root *root; |
2489 |
+- int chid; |
2490 |
++ |
2491 |
++ struct { |
2492 |
++ int ctrl; |
2493 |
++ int user; |
2494 |
++ } chid; |
2495 |
+ int head; |
2496 |
+ |
2497 |
+ struct nvkm_object object; |
2498 |
+@@ -25,11 +29,11 @@ struct nv50_disp_chan_func { |
2499 |
+ |
2500 |
+ int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *, |
2501 |
+ const struct nv50_disp_chan_mthd *, |
2502 |
+- struct nv50_disp_root *, int chid, int head, |
2503 |
++ struct nv50_disp_root *, int ctrl, int user, int head, |
2504 |
+ const struct nvkm_oclass *, struct nv50_disp_chan *); |
2505 |
+ int nv50_disp_chan_new_(const struct nv50_disp_chan_func *, |
2506 |
+ const struct nv50_disp_chan_mthd *, |
2507 |
+- struct nv50_disp_root *, int chid, int head, |
2508 |
++ struct nv50_disp_root *, int ctrl, int user, int head, |
2509 |
+ const struct nvkm_oclass *, struct nvkm_object **); |
2510 |
+ |
2511 |
+ extern const struct nv50_disp_chan_func nv50_disp_pioc_func; |
2512 |
+@@ -90,13 +94,16 @@ extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd; |
2513 |
+ struct nv50_disp_pioc_oclass { |
2514 |
+ int (*ctor)(const struct nv50_disp_chan_func *, |
2515 |
+ const struct nv50_disp_chan_mthd *, |
2516 |
+- struct nv50_disp_root *, int chid, |
2517 |
++ struct nv50_disp_root *, int ctrl, int user, |
2518 |
+ const struct nvkm_oclass *, void *data, u32 size, |
2519 |
+ struct nvkm_object **); |
2520 |
+ struct nvkm_sclass base; |
2521 |
+ const struct nv50_disp_chan_func *func; |
2522 |
+ const struct nv50_disp_chan_mthd *mthd; |
2523 |
+- int chid; |
2524 |
++ struct { |
2525 |
++ int ctrl; |
2526 |
++ int user; |
2527 |
++ } chid; |
2528 |
+ }; |
2529 |
+ |
2530 |
+ extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass; |
2531 |
+@@ -114,15 +121,17 @@ extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass; |
2532 |
+ extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass; |
2533 |
+ extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass; |
2534 |
+ |
2535 |
++extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass; |
2536 |
++extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass; |
2537 |
+ |
2538 |
+ int nv50_disp_curs_new(const struct nv50_disp_chan_func *, |
2539 |
+ const struct nv50_disp_chan_mthd *, |
2540 |
+- struct nv50_disp_root *, int chid, |
2541 |
++ struct nv50_disp_root *, int ctrl, int user, |
2542 |
+ const struct nvkm_oclass *, void *data, u32 size, |
2543 |
+ struct nvkm_object **); |
2544 |
+ int nv50_disp_oimm_new(const struct nv50_disp_chan_func *, |
2545 |
+ const struct nv50_disp_chan_mthd *, |
2546 |
+- struct nv50_disp_root *, int chid, |
2547 |
++ struct nv50_disp_root *, int ctrl, int user, |
2548 |
+ const struct nvkm_oclass *, void *data, u32 size, |
2549 |
+ struct nvkm_object **); |
2550 |
+ #endif |
2551 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c |
2552 |
+index dd99fc7..fa781b5 100644 |
2553 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c |
2554 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c |
2555 |
+@@ -33,5 +33,5 @@ g84_disp_curs_oclass = { |
2556 |
+ .base.maxver = 0, |
2557 |
+ .ctor = nv50_disp_curs_new, |
2558 |
+ .func = &nv50_disp_pioc_func, |
2559 |
+- .chid = 7, |
2560 |
++ .chid = { 7, 7 }, |
2561 |
+ }; |
2562 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c |
2563 |
+index 2a1574e..2be6fb0 100644 |
2564 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c |
2565 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c |
2566 |
+@@ -33,5 +33,5 @@ gf119_disp_curs_oclass = { |
2567 |
+ .base.maxver = 0, |
2568 |
+ .ctor = nv50_disp_curs_new, |
2569 |
+ .func = &gf119_disp_pioc_func, |
2570 |
+- .chid = 13, |
2571 |
++ .chid = { 13, 13 }, |
2572 |
+ }; |
2573 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c |
2574 |
+index 28e8f06..2a99db4 100644 |
2575 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c |
2576 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c |
2577 |
+@@ -33,5 +33,5 @@ gk104_disp_curs_oclass = { |
2578 |
+ .base.maxver = 0, |
2579 |
+ .ctor = nv50_disp_curs_new, |
2580 |
+ .func = &gf119_disp_pioc_func, |
2581 |
+- .chid = 13, |
2582 |
++ .chid = { 13, 13 }, |
2583 |
+ }; |
2584 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c |
2585 |
+new file mode 100644 |
2586 |
+index 0000000..e958210 |
2587 |
+--- /dev/null |
2588 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c |
2589 |
+@@ -0,0 +1,37 @@ |
2590 |
++/* |
2591 |
++ * Copyright 2016 Red Hat Inc. |
2592 |
++ * |
2593 |
++ * Permission is hereby granted, free of charge, to any person obtaining a |
2594 |
++ * copy of this software and associated documentation files (the "Software"), |
2595 |
++ * to deal in the Software without restriction, including without limitation |
2596 |
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
2597 |
++ * and/or sell copies of the Software, and to permit persons to whom the |
2598 |
++ * Software is furnished to do so, subject to the following conditions: |
2599 |
++ * |
2600 |
++ * The above copyright notice and this permission notice shall be included in |
2601 |
++ * all copies or substantial portions of the Software. |
2602 |
++ * |
2603 |
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
2604 |
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
2605 |
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
2606 |
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
2607 |
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
2608 |
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
2609 |
++ * OTHER DEALINGS IN THE SOFTWARE. |
2610 |
++ * |
2611 |
++ * Authors: Ben Skeggs <bskeggs@××××××.com> |
2612 |
++ */ |
2613 |
++#include "channv50.h" |
2614 |
++#include "rootnv50.h" |
2615 |
++ |
2616 |
++#include <nvif/class.h> |
2617 |
++ |
2618 |
++const struct nv50_disp_pioc_oclass |
2619 |
++gp102_disp_curs_oclass = { |
2620 |
++ .base.oclass = GK104_DISP_CURSOR, |
2621 |
++ .base.minver = 0, |
2622 |
++ .base.maxver = 0, |
2623 |
++ .ctor = nv50_disp_curs_new, |
2624 |
++ .func = &gf119_disp_pioc_func, |
2625 |
++ .chid = { 13, 17 }, |
2626 |
++}; |
2627 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c |
2628 |
+index d8a4b9c..00a7f35 100644 |
2629 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c |
2630 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c |
2631 |
+@@ -33,5 +33,5 @@ gt215_disp_curs_oclass = { |
2632 |
+ .base.maxver = 0, |
2633 |
+ .ctor = nv50_disp_curs_new, |
2634 |
+ .func = &nv50_disp_pioc_func, |
2635 |
+- .chid = 7, |
2636 |
++ .chid = { 7, 7 }, |
2637 |
+ }; |
2638 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c |
2639 |
+index 8b13204..82ff82d 100644 |
2640 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c |
2641 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c |
2642 |
+@@ -33,7 +33,7 @@ |
2643 |
+ int |
2644 |
+ nv50_disp_curs_new(const struct nv50_disp_chan_func *func, |
2645 |
+ const struct nv50_disp_chan_mthd *mthd, |
2646 |
+- struct nv50_disp_root *root, int chid, |
2647 |
++ struct nv50_disp_root *root, int ctrl, int user, |
2648 |
+ const struct nvkm_oclass *oclass, void *data, u32 size, |
2649 |
+ struct nvkm_object **pobject) |
2650 |
+ { |
2651 |
+@@ -54,7 +54,7 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func, |
2652 |
+ } else |
2653 |
+ return ret; |
2654 |
+ |
2655 |
+- return nv50_disp_chan_new_(func, mthd, root, chid + head, |
2656 |
++ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head, |
2657 |
+ head, oclass, pobject); |
2658 |
+ } |
2659 |
+ |
2660 |
+@@ -65,5 +65,5 @@ nv50_disp_curs_oclass = { |
2661 |
+ .base.maxver = 0, |
2662 |
+ .ctor = nv50_disp_curs_new, |
2663 |
+ .func = &nv50_disp_pioc_func, |
2664 |
+- .chid = 7, |
2665 |
++ .chid = { 7, 7 }, |
2666 |
+ }; |
2667 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c |
2668 |
+index a57f7ce..ce7cd74 100644 |
2669 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c |
2670 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c |
2671 |
+@@ -32,8 +32,8 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan, |
2672 |
+ struct nvkm_object *object, u32 handle) |
2673 |
+ { |
2674 |
+ return nvkm_ramht_insert(chan->base.root->ramht, object, |
2675 |
+- chan->base.chid, -9, handle, |
2676 |
+- chan->base.chid << 27 | 0x00000001); |
2677 |
++ chan->base.chid.user, -9, handle, |
2678 |
++ chan->base.chid.user << 27 | 0x00000001); |
2679 |
+ } |
2680 |
+ |
2681 |
+ void |
2682 |
+@@ -42,22 +42,23 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan) |
2683 |
+ struct nv50_disp *disp = chan->base.root->disp; |
2684 |
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
2685 |
+ struct nvkm_device *device = subdev->device; |
2686 |
+- int chid = chan->base.chid; |
2687 |
++ int ctrl = chan->base.chid.ctrl; |
2688 |
++ int user = chan->base.chid.user; |
2689 |
+ |
2690 |
+ /* deactivate channel */ |
2691 |
+- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000); |
2692 |
+- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000); |
2693 |
++ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000); |
2694 |
++ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000); |
2695 |
+ if (nvkm_msec(device, 2000, |
2696 |
+- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000)) |
2697 |
++ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000)) |
2698 |
+ break; |
2699 |
+ ) < 0) { |
2700 |
+- nvkm_error(subdev, "ch %d fini: %08x\n", chid, |
2701 |
+- nvkm_rd32(device, 0x610490 + (chid * 0x10))); |
2702 |
++ nvkm_error(subdev, "ch %d fini: %08x\n", user, |
2703 |
++ nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); |
2704 |
+ } |
2705 |
+ |
2706 |
+ /* disable error reporting and completion notification */ |
2707 |
+- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); |
2708 |
+- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); |
2709 |
++ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000); |
2710 |
++ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000); |
2711 |
+ } |
2712 |
+ |
2713 |
+ static int |
2714 |
+@@ -66,26 +67,27 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan) |
2715 |
+ struct nv50_disp *disp = chan->base.root->disp; |
2716 |
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
2717 |
+ struct nvkm_device *device = subdev->device; |
2718 |
+- int chid = chan->base.chid; |
2719 |
++ int ctrl = chan->base.chid.ctrl; |
2720 |
++ int user = chan->base.chid.user; |
2721 |
+ |
2722 |
+ /* enable error reporting */ |
2723 |
+- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); |
2724 |
++ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); |
2725 |
+ |
2726 |
+ /* initialise channel for dma command submission */ |
2727 |
+- nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push); |
2728 |
+- nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000); |
2729 |
+- nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001); |
2730 |
+- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); |
2731 |
+- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); |
2732 |
+- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); |
2733 |
++ nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push); |
2734 |
++ nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000); |
2735 |
++ nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001); |
2736 |
++ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010); |
2737 |
++ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); |
2738 |
++ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013); |
2739 |
+ |
2740 |
+ /* wait for it to go inactive */ |
2741 |
+ if (nvkm_msec(device, 2000, |
2742 |
+- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) |
2743 |
++ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000)) |
2744 |
+ break; |
2745 |
+ ) < 0) { |
2746 |
+- nvkm_error(subdev, "ch %d init: %08x\n", chid, |
2747 |
+- nvkm_rd32(device, 0x610490 + (chid * 0x10))); |
2748 |
++ nvkm_error(subdev, "ch %d init: %08x\n", user, |
2749 |
++ nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); |
2750 |
+ return -EBUSY; |
2751 |
+ } |
2752 |
+ |
2753 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c |
2754 |
+index ad24c2c..d26d3b4 100644 |
2755 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c |
2756 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c |
2757 |
+@@ -32,26 +32,27 @@ gp104_disp_dmac_init(struct nv50_disp_dmac *chan) |
2758 |
+ struct nv50_disp *disp = chan->base.root->disp; |
2759 |
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
2760 |
+ struct nvkm_device *device = subdev->device; |
2761 |
+- int chid = chan->base.chid; |
2762 |
++ int ctrl = chan->base.chid.ctrl; |
2763 |
++ int user = chan->base.chid.user; |
2764 |
+ |
2765 |
+ /* enable error reporting */ |
2766 |
+- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); |
2767 |
++ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); |
2768 |
+ |
2769 |
+ /* initialise channel for dma command submission */ |
2770 |
+- nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push); |
2771 |
+- nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000); |
2772 |
+- nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001); |
2773 |
+- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); |
2774 |
+- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); |
2775 |
+- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); |
2776 |
++ nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push); |
2777 |
++ nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000); |
2778 |
++ nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001); |
2779 |
++ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010); |
2780 |
++ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); |
2781 |
++ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013); |
2782 |
+ |
2783 |
+ /* wait for it to go inactive */ |
2784 |
+ if (nvkm_msec(device, 2000, |
2785 |
+- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) |
2786 |
++ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000)) |
2787 |
+ break; |
2788 |
+ ) < 0) { |
2789 |
+- nvkm_error(subdev, "ch %d init: %08x\n", chid, |
2790 |
+- nvkm_rd32(device, 0x610490 + (chid * 0x10))); |
2791 |
++ nvkm_error(subdev, "ch %d init: %08x\n", user, |
2792 |
++ nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); |
2793 |
+ return -EBUSY; |
2794 |
+ } |
2795 |
+ |
2796 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c |
2797 |
+index 9c6645a..0a1381a 100644 |
2798 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c |
2799 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c |
2800 |
+@@ -149,7 +149,7 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func, |
2801 |
+ chan->func = func; |
2802 |
+ |
2803 |
+ ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root, |
2804 |
+- chid, head, oclass, &chan->base); |
2805 |
++ chid, chid, head, oclass, &chan->base); |
2806 |
+ if (ret) |
2807 |
+ return ret; |
2808 |
+ |
2809 |
+@@ -179,9 +179,9 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan, |
2810 |
+ struct nvkm_object *object, u32 handle) |
2811 |
+ { |
2812 |
+ return nvkm_ramht_insert(chan->base.root->ramht, object, |
2813 |
+- chan->base.chid, -10, handle, |
2814 |
+- chan->base.chid << 28 | |
2815 |
+- chan->base.chid); |
2816 |
++ chan->base.chid.user, -10, handle, |
2817 |
++ chan->base.chid.user << 28 | |
2818 |
++ chan->base.chid.user); |
2819 |
+ } |
2820 |
+ |
2821 |
+ static void |
2822 |
+@@ -190,21 +190,22 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan) |
2823 |
+ struct nv50_disp *disp = chan->base.root->disp; |
2824 |
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
2825 |
+ struct nvkm_device *device = subdev->device; |
2826 |
+- int chid = chan->base.chid; |
2827 |
++ int ctrl = chan->base.chid.ctrl; |
2828 |
++ int user = chan->base.chid.user; |
2829 |
+ |
2830 |
+ /* deactivate channel */ |
2831 |
+- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); |
2832 |
+- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); |
2833 |
++ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000); |
2834 |
++ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000); |
2835 |
+ if (nvkm_msec(device, 2000, |
2836 |
+- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000)) |
2837 |
++ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000)) |
2838 |
+ break; |
2839 |
+ ) < 0) { |
2840 |
+- nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid, |
2841 |
+- nvkm_rd32(device, 0x610200 + (chid * 0x10))); |
2842 |
++ nvkm_error(subdev, "ch %d fini timeout, %08x\n", user, |
2843 |
++ nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); |
2844 |
+ } |
2845 |
+ |
2846 |
+ /* disable error reporting and completion notifications */ |
2847 |
+- nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid); |
2848 |
++ nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user); |
2849 |
+ } |
2850 |
+ |
2851 |
+ static int |
2852 |
+@@ -213,26 +214,27 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan) |
2853 |
+ struct nv50_disp *disp = chan->base.root->disp; |
2854 |
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
2855 |
+ struct nvkm_device *device = subdev->device; |
2856 |
+- int chid = chan->base.chid; |
2857 |
++ int ctrl = chan->base.chid.ctrl; |
2858 |
++ int user = chan->base.chid.user; |
2859 |
+ |
2860 |
+ /* enable error reporting */ |
2861 |
+- nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid); |
2862 |
++ nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user); |
2863 |
+ |
2864 |
+ /* initialise channel for dma command submission */ |
2865 |
+- nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push); |
2866 |
+- nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000); |
2867 |
+- nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid); |
2868 |
+- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); |
2869 |
+- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); |
2870 |
+- nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013); |
2871 |
++ nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push); |
2872 |
++ nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000); |
2873 |
++ nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl); |
2874 |
++ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010); |
2875 |
++ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); |
2876 |
++ nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013); |
2877 |
+ |
2878 |
+ /* wait for it to go inactive */ |
2879 |
+ if (nvkm_msec(device, 2000, |
2880 |
+- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000)) |
2881 |
++ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000)) |
2882 |
+ break; |
2883 |
+ ) < 0) { |
2884 |
+- nvkm_error(subdev, "ch %d init timeout, %08x\n", chid, |
2885 |
+- nvkm_rd32(device, 0x610200 + (chid * 0x10))); |
2886 |
++ nvkm_error(subdev, "ch %d init timeout, %08x\n", user, |
2887 |
++ nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); |
2888 |
+ return -EBUSY; |
2889 |
+ } |
2890 |
+ |
2891 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c |
2892 |
+index 54a4ae8..5ad5d0f 100644 |
2893 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c |
2894 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c |
2895 |
+@@ -33,5 +33,5 @@ g84_disp_oimm_oclass = { |
2896 |
+ .base.maxver = 0, |
2897 |
+ .ctor = nv50_disp_oimm_new, |
2898 |
+ .func = &nv50_disp_pioc_func, |
2899 |
+- .chid = 5, |
2900 |
++ .chid = { 5, 5 }, |
2901 |
+ }; |
2902 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c |
2903 |
+index c658db5..1f9fd34 100644 |
2904 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c |
2905 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c |
2906 |
+@@ -33,5 +33,5 @@ gf119_disp_oimm_oclass = { |
2907 |
+ .base.maxver = 0, |
2908 |
+ .ctor = nv50_disp_oimm_new, |
2909 |
+ .func = &gf119_disp_pioc_func, |
2910 |
+- .chid = 9, |
2911 |
++ .chid = { 9, 9 }, |
2912 |
+ }; |
2913 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c |
2914 |
+index b1fde8c..0c09fe8 100644 |
2915 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c |
2916 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c |
2917 |
+@@ -33,5 +33,5 @@ gk104_disp_oimm_oclass = { |
2918 |
+ .base.maxver = 0, |
2919 |
+ .ctor = nv50_disp_oimm_new, |
2920 |
+ .func = &gf119_disp_pioc_func, |
2921 |
+- .chid = 9, |
2922 |
++ .chid = { 9, 9 }, |
2923 |
+ }; |
2924 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c |
2925 |
+new file mode 100644 |
2926 |
+index 0000000..abf8236 |
2927 |
+--- /dev/null |
2928 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c |
2929 |
+@@ -0,0 +1,37 @@ |
2930 |
++/* |
2931 |
++ * Copyright 2016 Red Hat Inc. |
2932 |
++ * |
2933 |
++ * Permission is hereby granted, free of charge, to any person obtaining a |
2934 |
++ * copy of this software and associated documentation files (the "Software"), |
2935 |
++ * to deal in the Software without restriction, including without limitation |
2936 |
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
2937 |
++ * and/or sell copies of the Software, and to permit persons to whom the |
2938 |
++ * Software is furnished to do so, subject to the following conditions: |
2939 |
++ * |
2940 |
++ * The above copyright notice and this permission notice shall be included in |
2941 |
++ * all copies or substantial portions of the Software. |
2942 |
++ * |
2943 |
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
2944 |
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
2945 |
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
2946 |
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
2947 |
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
2948 |
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
2949 |
++ * OTHER DEALINGS IN THE SOFTWARE. |
2950 |
++ * |
2951 |
++ * Authors: Ben Skeggs <bskeggs@××××××.com> |
2952 |
++ */ |
2953 |
++#include "channv50.h" |
2954 |
++#include "rootnv50.h" |
2955 |
++ |
2956 |
++#include <nvif/class.h> |
2957 |
++ |
2958 |
++const struct nv50_disp_pioc_oclass |
2959 |
++gp102_disp_oimm_oclass = { |
2960 |
++ .base.oclass = GK104_DISP_OVERLAY, |
2961 |
++ .base.minver = 0, |
2962 |
++ .base.maxver = 0, |
2963 |
++ .ctor = nv50_disp_oimm_new, |
2964 |
++ .func = &gf119_disp_pioc_func, |
2965 |
++ .chid = { 9, 13 }, |
2966 |
++}; |
2967 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c |
2968 |
+index f4e7eb3..1281db2 100644 |
2969 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c |
2970 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c |
2971 |
+@@ -33,5 +33,5 @@ gt215_disp_oimm_oclass = { |
2972 |
+ .base.maxver = 0, |
2973 |
+ .ctor = nv50_disp_oimm_new, |
2974 |
+ .func = &nv50_disp_pioc_func, |
2975 |
+- .chid = 5, |
2976 |
++ .chid = { 5, 5 }, |
2977 |
+ }; |
2978 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c |
2979 |
+index 3940b9c..07540f3 100644 |
2980 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c |
2981 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c |
2982 |
+@@ -33,7 +33,7 @@ |
2983 |
+ int |
2984 |
+ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func, |
2985 |
+ const struct nv50_disp_chan_mthd *mthd, |
2986 |
+- struct nv50_disp_root *root, int chid, |
2987 |
++ struct nv50_disp_root *root, int ctrl, int user, |
2988 |
+ const struct nvkm_oclass *oclass, void *data, u32 size, |
2989 |
+ struct nvkm_object **pobject) |
2990 |
+ { |
2991 |
+@@ -54,7 +54,7 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func, |
2992 |
+ } else |
2993 |
+ return ret; |
2994 |
+ |
2995 |
+- return nv50_disp_chan_new_(func, mthd, root, chid + head, |
2996 |
++ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head, |
2997 |
+ head, oclass, pobject); |
2998 |
+ } |
2999 |
+ |
3000 |
+@@ -65,5 +65,5 @@ nv50_disp_oimm_oclass = { |
3001 |
+ .base.maxver = 0, |
3002 |
+ .ctor = nv50_disp_oimm_new, |
3003 |
+ .func = &nv50_disp_pioc_func, |
3004 |
+- .chid = 5, |
3005 |
++ .chid = { 5, 5 }, |
3006 |
+ }; |
3007 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c |
3008 |
+index a625a98..0abaa64 100644 |
3009 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c |
3010 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c |
3011 |
+@@ -32,20 +32,21 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan) |
3012 |
+ struct nv50_disp *disp = chan->root->disp; |
3013 |
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
3014 |
+ struct nvkm_device *device = subdev->device; |
3015 |
+- int chid = chan->chid; |
3016 |
++ int ctrl = chan->chid.ctrl; |
3017 |
++ int user = chan->chid.user; |
3018 |
+ |
3019 |
+- nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000); |
3020 |
++ nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000); |
3021 |
+ if (nvkm_msec(device, 2000, |
3022 |
+- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000)) |
3023 |
++ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000)) |
3024 |
+ break; |
3025 |
+ ) < 0) { |
3026 |
+- nvkm_error(subdev, "ch %d fini: %08x\n", chid, |
3027 |
+- nvkm_rd32(device, 0x610490 + (chid * 0x10))); |
3028 |
++ nvkm_error(subdev, "ch %d fini: %08x\n", user, |
3029 |
++ nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); |
3030 |
+ } |
3031 |
+ |
3032 |
+ /* disable error reporting and completion notification */ |
3033 |
+- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); |
3034 |
+- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); |
3035 |
++ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000); |
3036 |
++ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000); |
3037 |
+ } |
3038 |
+ |
3039 |
+ static int |
3040 |
+@@ -54,20 +55,21 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan) |
3041 |
+ struct nv50_disp *disp = chan->root->disp; |
3042 |
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
3043 |
+ struct nvkm_device *device = subdev->device; |
3044 |
+- int chid = chan->chid; |
3045 |
++ int ctrl = chan->chid.ctrl; |
3046 |
++ int user = chan->chid.user; |
3047 |
+ |
3048 |
+ /* enable error reporting */ |
3049 |
+- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); |
3050 |
++ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); |
3051 |
+ |
3052 |
+ /* activate channel */ |
3053 |
+- nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001); |
3054 |
++ nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001); |
3055 |
+ if (nvkm_msec(device, 2000, |
3056 |
+- u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10)); |
3057 |
++ u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10)); |
3058 |
+ if ((tmp & 0x00030000) == 0x00010000) |
3059 |
+ break; |
3060 |
+ ) < 0) { |
3061 |
+- nvkm_error(subdev, "ch %d init: %08x\n", chid, |
3062 |
+- nvkm_rd32(device, 0x610490 + (chid * 0x10))); |
3063 |
++ nvkm_error(subdev, "ch %d init: %08x\n", user, |
3064 |
++ nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); |
3065 |
+ return -EBUSY; |
3066 |
+ } |
3067 |
+ |
3068 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c |
3069 |
+index 9d2618d..0211e0e 100644 |
3070 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c |
3071 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c |
3072 |
+@@ -32,15 +32,16 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan) |
3073 |
+ struct nv50_disp *disp = chan->root->disp; |
3074 |
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
3075 |
+ struct nvkm_device *device = subdev->device; |
3076 |
+- int chid = chan->chid; |
3077 |
++ int ctrl = chan->chid.ctrl; |
3078 |
++ int user = chan->chid.user; |
3079 |
+ |
3080 |
+- nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); |
3081 |
++ nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000); |
3082 |
+ if (nvkm_msec(device, 2000, |
3083 |
+- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) |
3084 |
++ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000)) |
3085 |
+ break; |
3086 |
+ ) < 0) { |
3087 |
+- nvkm_error(subdev, "ch %d timeout: %08x\n", chid, |
3088 |
+- nvkm_rd32(device, 0x610200 + (chid * 0x10))); |
3089 |
++ nvkm_error(subdev, "ch %d timeout: %08x\n", user, |
3090 |
++ nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); |
3091 |
+ } |
3092 |
+ } |
3093 |
+ |
3094 |
+@@ -50,26 +51,27 @@ nv50_disp_pioc_init(struct nv50_disp_chan *chan) |
3095 |
+ struct nv50_disp *disp = chan->root->disp; |
3096 |
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
3097 |
+ struct nvkm_device *device = subdev->device; |
3098 |
+- int chid = chan->chid; |
3099 |
++ int ctrl = chan->chid.ctrl; |
3100 |
++ int user = chan->chid.user; |
3101 |
+ |
3102 |
+- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000); |
3103 |
++ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000); |
3104 |
+ if (nvkm_msec(device, 2000, |
3105 |
+- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) |
3106 |
++ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000)) |
3107 |
+ break; |
3108 |
+ ) < 0) { |
3109 |
+- nvkm_error(subdev, "ch %d timeout0: %08x\n", chid, |
3110 |
+- nvkm_rd32(device, 0x610200 + (chid * 0x10))); |
3111 |
++ nvkm_error(subdev, "ch %d timeout0: %08x\n", user, |
3112 |
++ nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); |
3113 |
+ return -EBUSY; |
3114 |
+ } |
3115 |
+ |
3116 |
+- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001); |
3117 |
++ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001); |
3118 |
+ if (nvkm_msec(device, 2000, |
3119 |
+- u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10)); |
3120 |
++ u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10)); |
3121 |
+ if ((tmp & 0x00030000) == 0x00010000) |
3122 |
+ break; |
3123 |
+ ) < 0) { |
3124 |
+- nvkm_error(subdev, "ch %d timeout1: %08x\n", chid, |
3125 |
+- nvkm_rd32(device, 0x610200 + (chid * 0x10))); |
3126 |
++ nvkm_error(subdev, "ch %d timeout1: %08x\n", user, |
3127 |
++ nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); |
3128 |
+ return -EBUSY; |
3129 |
+ } |
3130 |
+ |
3131 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c |
3132 |
+index 8443e04..b053b29 100644 |
3133 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c |
3134 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c |
3135 |
+@@ -36,8 +36,8 @@ gp104_disp_root = { |
3136 |
+ &gp104_disp_ovly_oclass, |
3137 |
+ }, |
3138 |
+ .pioc = { |
3139 |
+- &gk104_disp_oimm_oclass, |
3140 |
+- &gk104_disp_curs_oclass, |
3141 |
++ &gp102_disp_oimm_oclass, |
3142 |
++ &gp102_disp_curs_oclass, |
3143 |
+ }, |
3144 |
+ }; |
3145 |
+ |
3146 |
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c |
3147 |
+index 2f9cecd..05c829a 100644 |
3148 |
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c |
3149 |
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c |
3150 |
+@@ -207,8 +207,8 @@ nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass, |
3151 |
+ { |
3152 |
+ const struct nv50_disp_pioc_oclass *sclass = oclass->priv; |
3153 |
+ struct nv50_disp_root *root = nv50_disp_root(oclass->parent); |
3154 |
+- return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid, |
3155 |
+- oclass, data, size, pobject); |
3156 |
++ return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl, |
3157 |
++ sclass->chid.user, oclass, data, size, pobject); |
3158 |
+ } |
3159 |
+ |
3160 |
+ static int |
3161 |
+diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c |
3162 |
+index d544ff9..7aadce1 100644 |
3163 |
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c |
3164 |
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c |
3165 |
+@@ -83,8 +83,7 @@ struct vc4_crtc_data { |
3166 |
+ /* Which channel of the HVS this pixelvalve sources from. */ |
3167 |
+ int hvs_channel; |
3168 |
+ |
3169 |
+- enum vc4_encoder_type encoder0_type; |
3170 |
+- enum vc4_encoder_type encoder1_type; |
3171 |
++ enum vc4_encoder_type encoder_types[4]; |
3172 |
+ }; |
3173 |
+ |
3174 |
+ #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset)) |
3175 |
+@@ -669,6 +668,14 @@ void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id) |
3176 |
+ CRTC_WRITE(PV_INTEN, 0); |
3177 |
+ } |
3178 |
+ |
3179 |
++/* Must be called with the event lock held */ |
3180 |
++bool vc4_event_pending(struct drm_crtc *crtc) |
3181 |
++{ |
3182 |
++ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); |
3183 |
++ |
3184 |
++ return !!vc4_crtc->event; |
3185 |
++} |
3186 |
++ |
3187 |
+ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) |
3188 |
+ { |
3189 |
+ struct drm_crtc *crtc = &vc4_crtc->base; |
3190 |
+@@ -859,20 +866,26 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { |
3191 |
+ |
3192 |
+ static const struct vc4_crtc_data pv0_data = { |
3193 |
+ .hvs_channel = 0, |
3194 |
+- .encoder0_type = VC4_ENCODER_TYPE_DSI0, |
3195 |
+- .encoder1_type = VC4_ENCODER_TYPE_DPI, |
3196 |
++ .encoder_types = { |
3197 |
++ [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0, |
3198 |
++ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI, |
3199 |
++ }, |
3200 |
+ }; |
3201 |
+ |
3202 |
+ static const struct vc4_crtc_data pv1_data = { |
3203 |
+ .hvs_channel = 2, |
3204 |
+- .encoder0_type = VC4_ENCODER_TYPE_DSI1, |
3205 |
+- .encoder1_type = VC4_ENCODER_TYPE_SMI, |
3206 |
++ .encoder_types = { |
3207 |
++ [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1, |
3208 |
++ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI, |
3209 |
++ }, |
3210 |
+ }; |
3211 |
+ |
3212 |
+ static const struct vc4_crtc_data pv2_data = { |
3213 |
+ .hvs_channel = 1, |
3214 |
+- .encoder0_type = VC4_ENCODER_TYPE_VEC, |
3215 |
+- .encoder1_type = VC4_ENCODER_TYPE_HDMI, |
3216 |
++ .encoder_types = { |
3217 |
++ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI, |
3218 |
++ [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC, |
3219 |
++ }, |
3220 |
+ }; |
3221 |
+ |
3222 |
+ static const struct of_device_id vc4_crtc_dt_match[] = { |
3223 |
+@@ -886,17 +899,20 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm, |
3224 |
+ struct drm_crtc *crtc) |
3225 |
+ { |
3226 |
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); |
3227 |
++ const struct vc4_crtc_data *crtc_data = vc4_crtc->data; |
3228 |
++ const enum vc4_encoder_type *encoder_types = crtc_data->encoder_types; |
3229 |
+ struct drm_encoder *encoder; |
3230 |
+ |
3231 |
+ drm_for_each_encoder(encoder, drm) { |
3232 |
+ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); |
3233 |
+- |
3234 |
+- if (vc4_encoder->type == vc4_crtc->data->encoder0_type) { |
3235 |
+- vc4_encoder->clock_select = 0; |
3236 |
+- encoder->possible_crtcs |= drm_crtc_mask(crtc); |
3237 |
+- } else if (vc4_encoder->type == vc4_crtc->data->encoder1_type) { |
3238 |
+- vc4_encoder->clock_select = 1; |
3239 |
+- encoder->possible_crtcs |= drm_crtc_mask(crtc); |
3240 |
++ int i; |
3241 |
++ |
3242 |
++ for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) { |
3243 |
++ if (vc4_encoder->type == encoder_types[i]) { |
3244 |
++ vc4_encoder->clock_select = i; |
3245 |
++ encoder->possible_crtcs |= drm_crtc_mask(crtc); |
3246 |
++ break; |
3247 |
++ } |
3248 |
+ } |
3249 |
+ } |
3250 |
+ } |
3251 |
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h |
3252 |
+index 7c1e4d9..50a55ef 100644 |
3253 |
+--- a/drivers/gpu/drm/vc4/vc4_drv.h |
3254 |
++++ b/drivers/gpu/drm/vc4/vc4_drv.h |
3255 |
+@@ -194,6 +194,7 @@ to_vc4_plane(struct drm_plane *plane) |
3256 |
+ } |
3257 |
+ |
3258 |
+ enum vc4_encoder_type { |
3259 |
++ VC4_ENCODER_TYPE_NONE, |
3260 |
+ VC4_ENCODER_TYPE_HDMI, |
3261 |
+ VC4_ENCODER_TYPE_VEC, |
3262 |
+ VC4_ENCODER_TYPE_DSI0, |
3263 |
+@@ -440,6 +441,7 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); |
3264 |
+ extern struct platform_driver vc4_crtc_driver; |
3265 |
+ int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id); |
3266 |
+ void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id); |
3267 |
++bool vc4_event_pending(struct drm_crtc *crtc); |
3268 |
+ int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); |
3269 |
+ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, |
3270 |
+ unsigned int flags, int *vpos, int *hpos, |
3271 |
+diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c |
3272 |
+index c1f65c6..67af2af 100644 |
3273 |
+--- a/drivers/gpu/drm/vc4/vc4_kms.c |
3274 |
++++ b/drivers/gpu/drm/vc4/vc4_kms.c |
3275 |
+@@ -119,17 +119,34 @@ static int vc4_atomic_commit(struct drm_device *dev, |
3276 |
+ |
3277 |
+ /* Make sure that any outstanding modesets have finished. */ |
3278 |
+ if (nonblock) { |
3279 |
+- ret = down_trylock(&vc4->async_modeset); |
3280 |
+- if (ret) { |
3281 |
++ struct drm_crtc *crtc; |
3282 |
++ struct drm_crtc_state *crtc_state; |
3283 |
++ unsigned long flags; |
3284 |
++ bool busy = false; |
3285 |
++ |
3286 |
++ /* |
3287 |
++ * If there's an undispatched event to send then we're |
3288 |
++ * obviously still busy. If there isn't, then we can |
3289 |
++ * unconditionally wait for the semaphore because it |
3290 |
++ * shouldn't be contended (for long). |
3291 |
++ * |
3292 |
++ * This is to prevent a race where queuing a new flip |
3293 |
++ * from userspace immediately on receipt of an event |
3294 |
++ * beats our clean-up and returns EBUSY. |
3295 |
++ */ |
3296 |
++ spin_lock_irqsave(&dev->event_lock, flags); |
3297 |
++ for_each_crtc_in_state(state, crtc, crtc_state, i) |
3298 |
++ busy |= vc4_event_pending(crtc); |
3299 |
++ spin_unlock_irqrestore(&dev->event_lock, flags); |
3300 |
++ if (busy) { |
3301 |
+ kfree(c); |
3302 |
+ return -EBUSY; |
3303 |
+ } |
3304 |
+- } else { |
3305 |
+- ret = down_interruptible(&vc4->async_modeset); |
3306 |
+- if (ret) { |
3307 |
+- kfree(c); |
3308 |
+- return ret; |
3309 |
+- } |
3310 |
++ } |
3311 |
++ ret = down_interruptible(&vc4->async_modeset); |
3312 |
++ if (ret) { |
3313 |
++ kfree(c); |
3314 |
++ return ret; |
3315 |
+ } |
3316 |
+ |
3317 |
+ ret = drm_atomic_helper_prepare_planes(dev, state); |
3318 |
+diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h |
3319 |
+index 1aa44c2..39f6886 100644 |
3320 |
+--- a/drivers/gpu/drm/vc4/vc4_regs.h |
3321 |
++++ b/drivers/gpu/drm/vc4/vc4_regs.h |
3322 |
+@@ -177,8 +177,9 @@ |
3323 |
+ # define PV_CONTROL_WAIT_HSTART BIT(12) |
3324 |
+ # define PV_CONTROL_PIXEL_REP_MASK VC4_MASK(5, 4) |
3325 |
+ # define PV_CONTROL_PIXEL_REP_SHIFT 4 |
3326 |
+-# define PV_CONTROL_CLK_SELECT_DSI_VEC 0 |
3327 |
++# define PV_CONTROL_CLK_SELECT_DSI 0 |
3328 |
+ # define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI 1 |
3329 |
++# define PV_CONTROL_CLK_SELECT_VEC 2 |
3330 |
+ # define PV_CONTROL_CLK_SELECT_MASK VC4_MASK(3, 2) |
3331 |
+ # define PV_CONTROL_CLK_SELECT_SHIFT 2 |
3332 |
+ # define PV_CONTROL_FIFO_CLR BIT(1) |
3333 |
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c |
3334 |
+index c5dee30..acb9d25 100644 |
3335 |
+--- a/drivers/irqchip/irq-gic-v3-its.c |
3336 |
++++ b/drivers/irqchip/irq-gic-v3-its.c |
3337 |
+@@ -1598,6 +1598,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data) |
3338 |
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; |
3339 |
+ } |
3340 |
+ |
3341 |
++static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) |
3342 |
++{ |
3343 |
++ struct its_node *its = data; |
3344 |
++ |
3345 |
++ /* On QDF2400, the size of the ITE is 16Bytes */ |
3346 |
++ its->ite_size = 16; |
3347 |
++} |
3348 |
++ |
3349 |
+ static const struct gic_quirk its_quirks[] = { |
3350 |
+ #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
3351 |
+ { |
3352 |
+@@ -1615,6 +1623,14 @@ static const struct gic_quirk its_quirks[] = { |
3353 |
+ .init = its_enable_quirk_cavium_23144, |
3354 |
+ }, |
3355 |
+ #endif |
3356 |
++#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 |
3357 |
++ { |
3358 |
++ .desc = "ITS: QDF2400 erratum 0065", |
3359 |
++ .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ |
3360 |
++ .mask = 0xffffffff, |
3361 |
++ .init = its_enable_quirk_qdf2400_e0065, |
3362 |
++ }, |
3363 |
++#endif |
3364 |
+ { |
3365 |
+ } |
3366 |
+ }; |
3367 |
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c |
3368 |
+index 302e284..cde43b6 100644 |
3369 |
+--- a/drivers/media/usb/uvc/uvc_driver.c |
3370 |
++++ b/drivers/media/usb/uvc/uvc_driver.c |
3371 |
+@@ -1595,6 +1595,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain) |
3372 |
+ return buffer; |
3373 |
+ } |
3374 |
+ |
3375 |
++static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev) |
3376 |
++{ |
3377 |
++ struct uvc_video_chain *chain; |
3378 |
++ |
3379 |
++ chain = kzalloc(sizeof(*chain), GFP_KERNEL); |
3380 |
++ if (chain == NULL) |
3381 |
++ return NULL; |
3382 |
++ |
3383 |
++ INIT_LIST_HEAD(&chain->entities); |
3384 |
++ mutex_init(&chain->ctrl_mutex); |
3385 |
++ chain->dev = dev; |
3386 |
++ v4l2_prio_init(&chain->prio); |
3387 |
++ |
3388 |
++ return chain; |
3389 |
++} |
3390 |
++ |
3391 |
++/* |
3392 |
++ * Fallback heuristic for devices that don't connect units and terminals in a |
3393 |
++ * valid chain. |
3394 |
++ * |
3395 |
++ * Some devices have invalid baSourceID references, causing uvc_scan_chain() |
3396 |
++ * to fail, but if we just take the entities we can find and put them together |
3397 |
++ * in the most sensible chain we can think of, turns out they do work anyway. |
3398 |
++ * Note: This heuristic assumes there is a single chain. |
3399 |
++ * |
3400 |
++ * At the time of writing, devices known to have such a broken chain are |
3401 |
++ * - Acer Integrated Camera (5986:055a) |
3402 |
++ * - Realtek rtl157a7 (0bda:57a7) |
3403 |
++ */ |
3404 |
++static int uvc_scan_fallback(struct uvc_device *dev) |
3405 |
++{ |
3406 |
++ struct uvc_video_chain *chain; |
3407 |
++ struct uvc_entity *iterm = NULL; |
3408 |
++ struct uvc_entity *oterm = NULL; |
3409 |
++ struct uvc_entity *entity; |
3410 |
++ struct uvc_entity *prev; |
3411 |
++ |
3412 |
++ /* |
3413 |
++ * Start by locating the input and output terminals. We only support |
3414 |
++ * devices with exactly one of each for now. |
3415 |
++ */ |
3416 |
++ list_for_each_entry(entity, &dev->entities, list) { |
3417 |
++ if (UVC_ENTITY_IS_ITERM(entity)) { |
3418 |
++ if (iterm) |
3419 |
++ return -EINVAL; |
3420 |
++ iterm = entity; |
3421 |
++ } |
3422 |
++ |
3423 |
++ if (UVC_ENTITY_IS_OTERM(entity)) { |
3424 |
++ if (oterm) |
3425 |
++ return -EINVAL; |
3426 |
++ oterm = entity; |
3427 |
++ } |
3428 |
++ } |
3429 |
++ |
3430 |
++ if (iterm == NULL || oterm == NULL) |
3431 |
++ return -EINVAL; |
3432 |
++ |
3433 |
++ /* Allocate the chain and fill it. */ |
3434 |
++ chain = uvc_alloc_chain(dev); |
3435 |
++ if (chain == NULL) |
3436 |
++ return -ENOMEM; |
3437 |
++ |
3438 |
++ if (uvc_scan_chain_entity(chain, oterm) < 0) |
3439 |
++ goto error; |
3440 |
++ |
3441 |
++ prev = oterm; |
3442 |
++ |
3443 |
++ /* |
3444 |
++ * Add all Processing and Extension Units with two pads. The order |
3445 |
++ * doesn't matter much, use reverse list traversal to connect units in |
3446 |
++ * UVC descriptor order as we build the chain from output to input. This |
3447 |
++ * leads to units appearing in the order meant by the manufacturer for |
3448 |
++ * the cameras known to require this heuristic. |
3449 |
++ */ |
3450 |
++ list_for_each_entry_reverse(entity, &dev->entities, list) { |
3451 |
++ if (entity->type != UVC_VC_PROCESSING_UNIT && |
3452 |
++ entity->type != UVC_VC_EXTENSION_UNIT) |
3453 |
++ continue; |
3454 |
++ |
3455 |
++ if (entity->num_pads != 2) |
3456 |
++ continue; |
3457 |
++ |
3458 |
++ if (uvc_scan_chain_entity(chain, entity) < 0) |
3459 |
++ goto error; |
3460 |
++ |
3461 |
++ prev->baSourceID[0] = entity->id; |
3462 |
++ prev = entity; |
3463 |
++ } |
3464 |
++ |
3465 |
++ if (uvc_scan_chain_entity(chain, iterm) < 0) |
3466 |
++ goto error; |
3467 |
++ |
3468 |
++ prev->baSourceID[0] = iterm->id; |
3469 |
++ |
3470 |
++ list_add_tail(&chain->list, &dev->chains); |
3471 |
++ |
3472 |
++ uvc_trace(UVC_TRACE_PROBE, |
3473 |
++ "Found a video chain by fallback heuristic (%s).\n", |
3474 |
++ uvc_print_chain(chain)); |
3475 |
++ |
3476 |
++ return 0; |
3477 |
++ |
3478 |
++error: |
3479 |
++ kfree(chain); |
3480 |
++ return -EINVAL; |
3481 |
++} |
3482 |
++ |
3483 |
+ /* |
3484 |
+ * Scan the device for video chains and register video devices. |
3485 |
+ * |
3486 |
+@@ -1617,15 +1725,10 @@ static int uvc_scan_device(struct uvc_device *dev) |
3487 |
+ if (term->chain.next || term->chain.prev) |
3488 |
+ continue; |
3489 |
+ |
3490 |
+- chain = kzalloc(sizeof(*chain), GFP_KERNEL); |
3491 |
++ chain = uvc_alloc_chain(dev); |
3492 |
+ if (chain == NULL) |
3493 |
+ return -ENOMEM; |
3494 |
+ |
3495 |
+- INIT_LIST_HEAD(&chain->entities); |
3496 |
+- mutex_init(&chain->ctrl_mutex); |
3497 |
+- chain->dev = dev; |
3498 |
+- v4l2_prio_init(&chain->prio); |
3499 |
+- |
3500 |
+ term->flags |= UVC_ENTITY_FLAG_DEFAULT; |
3501 |
+ |
3502 |
+ if (uvc_scan_chain(chain, term) < 0) { |
3503 |
+@@ -1639,6 +1742,9 @@ static int uvc_scan_device(struct uvc_device *dev) |
3504 |
+ list_add_tail(&chain->list, &dev->chains); |
3505 |
+ } |
3506 |
+ |
3507 |
++ if (list_empty(&dev->chains)) |
3508 |
++ uvc_scan_fallback(dev); |
3509 |
++ |
3510 |
+ if (list_empty(&dev->chains)) { |
3511 |
+ uvc_printk(KERN_INFO, "No valid video chain found.\n"); |
3512 |
+ return -1; |
3513 |
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c |
3514 |
+index a36022b..03dca73 100644 |
3515 |
+--- a/drivers/net/ethernet/ibm/ibmveth.c |
3516 |
++++ b/drivers/net/ethernet/ibm/ibmveth.c |
3517 |
+@@ -1181,7 +1181,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, |
3518 |
+ |
3519 |
+ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) |
3520 |
+ { |
3521 |
++ struct tcphdr *tcph; |
3522 |
+ int offset = 0; |
3523 |
++ int hdr_len; |
3524 |
+ |
3525 |
+ /* only TCP packets will be aggregated */ |
3526 |
+ if (skb->protocol == htons(ETH_P_IP)) { |
3527 |
+@@ -1208,14 +1210,20 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) |
3528 |
+ /* if mss is not set through Large Packet bit/mss in rx buffer, |
3529 |
+ * expect that the mss will be written to the tcp header checksum. |
3530 |
+ */ |
3531 |
++ tcph = (struct tcphdr *)(skb->data + offset); |
3532 |
+ if (lrg_pkt) { |
3533 |
+ skb_shinfo(skb)->gso_size = mss; |
3534 |
+ } else if (offset) { |
3535 |
+- struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset); |
3536 |
+- |
3537 |
+ skb_shinfo(skb)->gso_size = ntohs(tcph->check); |
3538 |
+ tcph->check = 0; |
3539 |
+ } |
3540 |
++ |
3541 |
++ if (skb_shinfo(skb)->gso_size) { |
3542 |
++ hdr_len = offset + tcph->doff * 4; |
3543 |
++ skb_shinfo(skb)->gso_segs = |
3544 |
++ DIV_ROUND_UP(skb->len - hdr_len, |
3545 |
++ skb_shinfo(skb)->gso_size); |
3546 |
++ } |
3547 |
+ } |
3548 |
+ |
3549 |
+ static int ibmveth_poll(struct napi_struct *napi, int budget) |
3550 |
+diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c |
3551 |
+index 5b54254..2788a54 100644 |
3552 |
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.c |
3553 |
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c |
3554 |
+@@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw) |
3555 |
+ s32 ret_val = 0; |
3556 |
+ u16 phy_id; |
3557 |
+ |
3558 |
++ /* ensure PHY page selection to fix misconfigured i210 */ |
3559 |
++ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) |
3560 |
++ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); |
3561 |
++ |
3562 |
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); |
3563 |
+ if (ret_val) |
3564 |
+ goto out; |
3565 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
3566 |
+index b3067137..d4fa851 100644 |
3567 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
3568 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
3569 |
+@@ -81,6 +81,7 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) |
3570 |
+ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) |
3571 |
+ { |
3572 |
+ priv->params.rq_wq_type = rq_type; |
3573 |
++ priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; |
3574 |
+ switch (priv->params.rq_wq_type) { |
3575 |
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
3576 |
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; |
3577 |
+@@ -92,6 +93,10 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) |
3578 |
+ break; |
3579 |
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */ |
3580 |
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; |
3581 |
++ |
3582 |
++ /* Extra room needed for build_skb */ |
3583 |
++ priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM + |
3584 |
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
3585 |
+ } |
3586 |
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, |
3587 |
+ BIT(priv->params.log_rq_size)); |
3588 |
+@@ -3473,12 +3478,6 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, |
3589 |
+ mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, |
3590 |
+ MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev)); |
3591 |
+ |
3592 |
+- priv->params.lro_wqe_sz = |
3593 |
+- MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - |
3594 |
+- /* Extra room needed for build_skb */ |
3595 |
+- MLX5_RX_HEADROOM - |
3596 |
+- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
3597 |
+- |
3598 |
+ /* Initialize pflags */ |
3599 |
+ MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, |
3600 |
+ priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); |
3601 |
+@@ -3936,6 +3935,19 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) |
3602 |
+ } |
3603 |
+ } |
3604 |
+ |
3605 |
++static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev) |
3606 |
++{ |
3607 |
++ struct mlx5_eswitch *esw = mdev->priv.eswitch; |
3608 |
++ int total_vfs = MLX5_TOTAL_VPORTS(mdev); |
3609 |
++ int vport; |
3610 |
++ |
3611 |
++ if (!MLX5_CAP_GEN(mdev, vport_group_manager)) |
3612 |
++ return; |
3613 |
++ |
3614 |
++ for (vport = 1; vport < total_vfs; vport++) |
3615 |
++ mlx5_eswitch_unregister_vport_rep(esw, vport); |
3616 |
++} |
3617 |
++ |
3618 |
+ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) |
3619 |
+ { |
3620 |
+ struct mlx5e_priv *priv = netdev_priv(netdev); |
3621 |
+@@ -3983,6 +3995,7 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv) |
3622 |
+ return err; |
3623 |
+ } |
3624 |
+ |
3625 |
++ mlx5e_register_vport_rep(mdev); |
3626 |
+ return 0; |
3627 |
+ } |
3628 |
+ |
3629 |
+@@ -3994,6 +4007,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) |
3630 |
+ if (!netif_device_present(netdev)) |
3631 |
+ return; |
3632 |
+ |
3633 |
++ mlx5e_unregister_vport_rep(mdev); |
3634 |
+ mlx5e_detach_netdev(mdev, netdev); |
3635 |
+ mlx5e_destroy_mdev_resources(mdev); |
3636 |
+ } |
3637 |
+@@ -4012,8 +4026,6 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) |
3638 |
+ if (err) |
3639 |
+ return NULL; |
3640 |
+ |
3641 |
+- mlx5e_register_vport_rep(mdev); |
3642 |
+- |
3643 |
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) |
3644 |
+ ppriv = &esw->offloads.vport_reps[0]; |
3645 |
+ |
3646 |
+@@ -4065,13 +4077,7 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) |
3647 |
+ |
3648 |
+ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) |
3649 |
+ { |
3650 |
+- struct mlx5_eswitch *esw = mdev->priv.eswitch; |
3651 |
+- int total_vfs = MLX5_TOTAL_VPORTS(mdev); |
3652 |
+ struct mlx5e_priv *priv = vpriv; |
3653 |
+- int vport; |
3654 |
+- |
3655 |
+- for (vport = 1; vport < total_vfs; vport++) |
3656 |
+- mlx5_eswitch_unregister_vport_rep(esw, vport); |
3657 |
+ |
3658 |
+ unregister_netdev(priv->netdev); |
3659 |
+ mlx5e_detach(mdev, vpriv); |
3660 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c |
3661 |
+index e7b2158..796bdf0 100644 |
3662 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c |
3663 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c |
3664 |
+@@ -92,19 +92,18 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) |
3665 |
+ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, |
3666 |
+ struct mlx5e_cq *cq, u32 cqcc) |
3667 |
+ { |
3668 |
+- u16 wqe_cnt_step; |
3669 |
+- |
3670 |
+ cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; |
3671 |
+ cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; |
3672 |
+ cq->title.op_own &= 0xf0; |
3673 |
+ cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); |
3674 |
+ cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); |
3675 |
+ |
3676 |
+- wqe_cnt_step = |
3677 |
+- rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? |
3678 |
+- mpwrq_get_cqe_consumed_strides(&cq->title) : 1; |
3679 |
+- cq->decmprs_wqe_counter = |
3680 |
+- (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1; |
3681 |
++ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) |
3682 |
++ cq->decmprs_wqe_counter += |
3683 |
++ mpwrq_get_cqe_consumed_strides(&cq->title); |
3684 |
++ else |
3685 |
++ cq->decmprs_wqe_counter = |
3686 |
++ (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1; |
3687 |
+ } |
3688 |
+ |
3689 |
+ static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, |
3690 |
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
3691 |
+index e83072d..6905630 100644 |
3692 |
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
3693 |
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
3694 |
+@@ -500,30 +500,40 @@ static int |
3695 |
+ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, |
3696 |
+ struct mlxsw_sp_prefix_usage *req_prefix_usage) |
3697 |
+ { |
3698 |
+- struct mlxsw_sp_lpm_tree *lpm_tree; |
3699 |
++ struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; |
3700 |
++ struct mlxsw_sp_lpm_tree *new_tree; |
3701 |
++ int err; |
3702 |
+ |
3703 |
+- if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, |
3704 |
+- &vr->lpm_tree->prefix_usage)) |
3705 |
++ if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage)) |
3706 |
+ return 0; |
3707 |
+ |
3708 |
+- lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, |
3709 |
++ new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, |
3710 |
+ vr->proto, false); |
3711 |
+- if (IS_ERR(lpm_tree)) { |
3712 |
++ if (IS_ERR(new_tree)) { |
3713 |
+ /* We failed to get a tree according to the required |
3714 |
+ * prefix usage. However, the current tree might be still good |
3715 |
+ * for us if our requirement is subset of the prefixes used |
3716 |
+ * in the tree. |
3717 |
+ */ |
3718 |
+ if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, |
3719 |
+- &vr->lpm_tree->prefix_usage)) |
3720 |
++ &lpm_tree->prefix_usage)) |
3721 |
+ return 0; |
3722 |
+- return PTR_ERR(lpm_tree); |
3723 |
++ return PTR_ERR(new_tree); |
3724 |
+ } |
3725 |
+ |
3726 |
+- mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); |
3727 |
+- mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); |
3728 |
++ /* Prevent packet loss by overwriting existing binding */ |
3729 |
++ vr->lpm_tree = new_tree; |
3730 |
++ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); |
3731 |
++ if (err) |
3732 |
++ goto err_tree_bind; |
3733 |
++ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); |
3734 |
++ |
3735 |
++ return 0; |
3736 |
++ |
3737 |
++err_tree_bind: |
3738 |
+ vr->lpm_tree = lpm_tree; |
3739 |
+- return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); |
3740 |
++ mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); |
3741 |
++ return err; |
3742 |
+ } |
3743 |
+ |
3744 |
+ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, |
3745 |
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c |
3746 |
+index 8b4822a..3c1f89a 100644 |
3747 |
+--- a/drivers/net/geneve.c |
3748 |
++++ b/drivers/net/geneve.c |
3749 |
+@@ -1039,16 +1039,22 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) |
3750 |
+ { |
3751 |
+ struct geneve_dev *geneve = netdev_priv(dev); |
3752 |
+ struct ip_tunnel_info *info = NULL; |
3753 |
++ int err; |
3754 |
+ |
3755 |
+ if (geneve->collect_md) |
3756 |
+ info = skb_tunnel_info(skb); |
3757 |
+ |
3758 |
++ rcu_read_lock(); |
3759 |
+ #if IS_ENABLED(CONFIG_IPV6) |
3760 |
+ if ((info && ip_tunnel_info_af(info) == AF_INET6) || |
3761 |
+ (!info && geneve->remote.sa.sa_family == AF_INET6)) |
3762 |
+- return geneve6_xmit_skb(skb, dev, info); |
3763 |
++ err = geneve6_xmit_skb(skb, dev, info); |
3764 |
++ else |
3765 |
+ #endif |
3766 |
+- return geneve_xmit_skb(skb, dev, info); |
3767 |
++ err = geneve_xmit_skb(skb, dev, info); |
3768 |
++ rcu_read_unlock(); |
3769 |
++ |
3770 |
++ return err; |
3771 |
+ } |
3772 |
+ |
3773 |
+ static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict) |
3774 |
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
3775 |
+index f424b86..201ffa5 100644 |
3776 |
+--- a/drivers/net/phy/phy.c |
3777 |
++++ b/drivers/net/phy/phy.c |
3778 |
+@@ -611,14 +611,18 @@ void phy_start_machine(struct phy_device *phydev) |
3779 |
+ * phy_trigger_machine - trigger the state machine to run |
3780 |
+ * |
3781 |
+ * @phydev: the phy_device struct |
3782 |
++ * @sync: indicate whether we should wait for the workqueue cancelation |
3783 |
+ * |
3784 |
+ * Description: There has been a change in state which requires that the |
3785 |
+ * state machine runs. |
3786 |
+ */ |
3787 |
+ |
3788 |
+-static void phy_trigger_machine(struct phy_device *phydev) |
3789 |
++static void phy_trigger_machine(struct phy_device *phydev, bool sync) |
3790 |
+ { |
3791 |
+- cancel_delayed_work_sync(&phydev->state_queue); |
3792 |
++ if (sync) |
3793 |
++ cancel_delayed_work_sync(&phydev->state_queue); |
3794 |
++ else |
3795 |
++ cancel_delayed_work(&phydev->state_queue); |
3796 |
+ queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); |
3797 |
+ } |
3798 |
+ |
3799 |
+@@ -655,7 +659,7 @@ static void phy_error(struct phy_device *phydev) |
3800 |
+ phydev->state = PHY_HALTED; |
3801 |
+ mutex_unlock(&phydev->lock); |
3802 |
+ |
3803 |
+- phy_trigger_machine(phydev); |
3804 |
++ phy_trigger_machine(phydev, false); |
3805 |
+ } |
3806 |
+ |
3807 |
+ /** |
3808 |
+@@ -817,7 +821,7 @@ void phy_change(struct work_struct *work) |
3809 |
+ } |
3810 |
+ |
3811 |
+ /* reschedule state queue work to run as soon as possible */ |
3812 |
+- phy_trigger_machine(phydev); |
3813 |
++ phy_trigger_machine(phydev, true); |
3814 |
+ return; |
3815 |
+ |
3816 |
+ ignore: |
3817 |
+@@ -907,7 +911,7 @@ void phy_start(struct phy_device *phydev) |
3818 |
+ if (do_resume) |
3819 |
+ phy_resume(phydev); |
3820 |
+ |
3821 |
+- phy_trigger_machine(phydev); |
3822 |
++ phy_trigger_machine(phydev, true); |
3823 |
+ } |
3824 |
+ EXPORT_SYMBOL(phy_start); |
3825 |
+ |
3826 |
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
3827 |
+index b31aca8..a931b73 100644 |
3828 |
+--- a/drivers/net/tun.c |
3829 |
++++ b/drivers/net/tun.c |
3830 |
+@@ -819,7 +819,18 @@ static void tun_net_uninit(struct net_device *dev) |
3831 |
+ /* Net device open. */ |
3832 |
+ static int tun_net_open(struct net_device *dev) |
3833 |
+ { |
3834 |
++ struct tun_struct *tun = netdev_priv(dev); |
3835 |
++ int i; |
3836 |
++ |
3837 |
+ netif_tx_start_all_queues(dev); |
3838 |
++ |
3839 |
++ for (i = 0; i < tun->numqueues; i++) { |
3840 |
++ struct tun_file *tfile; |
3841 |
++ |
3842 |
++ tfile = rtnl_dereference(tun->tfiles[i]); |
3843 |
++ tfile->socket.sk->sk_write_space(tfile->socket.sk); |
3844 |
++ } |
3845 |
++ |
3846 |
+ return 0; |
3847 |
+ } |
3848 |
+ |
3849 |
+@@ -1116,9 +1127,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait) |
3850 |
+ if (!skb_array_empty(&tfile->tx_array)) |
3851 |
+ mask |= POLLIN | POLLRDNORM; |
3852 |
+ |
3853 |
+- if (sock_writeable(sk) || |
3854 |
+- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && |
3855 |
+- sock_writeable(sk))) |
3856 |
++ if (tun->dev->flags & IFF_UP && |
3857 |
++ (sock_writeable(sk) || |
3858 |
++ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && |
3859 |
++ sock_writeable(sk)))) |
3860 |
+ mask |= POLLOUT | POLLWRNORM; |
3861 |
+ |
3862 |
+ if (tun->dev->reg_state != NETREG_REGISTERED) |
3863 |
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c |
3864 |
+index 95cf1d8..bc744ac 100644 |
3865 |
+--- a/drivers/net/vrf.c |
3866 |
++++ b/drivers/net/vrf.c |
3867 |
+@@ -346,6 +346,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) |
3868 |
+ |
3869 |
+ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) |
3870 |
+ { |
3871 |
++ int len = skb->len; |
3872 |
+ netdev_tx_t ret = is_ip_tx_frame(skb, dev); |
3873 |
+ |
3874 |
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { |
3875 |
+@@ -353,7 +354,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) |
3876 |
+ |
3877 |
+ u64_stats_update_begin(&dstats->syncp); |
3878 |
+ dstats->tx_pkts++; |
3879 |
+- dstats->tx_bytes += skb->len; |
3880 |
++ dstats->tx_bytes += len; |
3881 |
+ u64_stats_update_end(&dstats->syncp); |
3882 |
+ } else { |
3883 |
+ this_cpu_inc(dev->dstats->tx_drps); |
3884 |
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
3885 |
+index d4f495b..3c4c2cf 100644 |
3886 |
+--- a/drivers/net/vxlan.c |
3887 |
++++ b/drivers/net/vxlan.c |
3888 |
+@@ -1942,7 +1942,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3889 |
+ const struct iphdr *old_iph; |
3890 |
+ union vxlan_addr *dst; |
3891 |
+ union vxlan_addr remote_ip, local_ip; |
3892 |
+- union vxlan_addr *src; |
3893 |
+ struct vxlan_metadata _md; |
3894 |
+ struct vxlan_metadata *md = &_md; |
3895 |
+ __be16 src_port = 0, dst_port; |
3896 |
+@@ -1956,11 +1955,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3897 |
+ |
3898 |
+ info = skb_tunnel_info(skb); |
3899 |
+ |
3900 |
++ rcu_read_lock(); |
3901 |
+ if (rdst) { |
3902 |
+ dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; |
3903 |
+ vni = rdst->remote_vni; |
3904 |
+ dst = &rdst->remote_ip; |
3905 |
+- src = &vxlan->cfg.saddr; |
3906 |
++ local_ip = vxlan->cfg.saddr; |
3907 |
+ dst_cache = &rdst->dst_cache; |
3908 |
+ } else { |
3909 |
+ if (!info) { |
3910 |
+@@ -1979,7 +1979,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3911 |
+ local_ip.sin6.sin6_addr = info->key.u.ipv6.src; |
3912 |
+ } |
3913 |
+ dst = &remote_ip; |
3914 |
+- src = &local_ip; |
3915 |
+ dst_cache = &info->dst_cache; |
3916 |
+ } |
3917 |
+ |
3918 |
+@@ -1987,7 +1986,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3919 |
+ if (did_rsc) { |
3920 |
+ /* short-circuited back to local bridge */ |
3921 |
+ vxlan_encap_bypass(skb, vxlan, vxlan); |
3922 |
+- return; |
3923 |
++ goto out_unlock; |
3924 |
+ } |
3925 |
+ goto drop; |
3926 |
+ } |
3927 |
+@@ -2028,7 +2027,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3928 |
+ rt = vxlan_get_route(vxlan, skb, |
3929 |
+ rdst ? rdst->remote_ifindex : 0, tos, |
3930 |
+ dst->sin.sin_addr.s_addr, |
3931 |
+- &src->sin.sin_addr.s_addr, |
3932 |
++ &local_ip.sin.sin_addr.s_addr, |
3933 |
+ dst_cache, info); |
3934 |
+ if (IS_ERR(rt)) { |
3935 |
+ netdev_dbg(dev, "no route to %pI4\n", |
3936 |
+@@ -2056,7 +2055,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3937 |
+ if (!dst_vxlan) |
3938 |
+ goto tx_error; |
3939 |
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan); |
3940 |
+- return; |
3941 |
++ goto out_unlock; |
3942 |
+ } |
3943 |
+ |
3944 |
+ if (!info) |
3945 |
+@@ -2071,7 +2070,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3946 |
+ if (err < 0) |
3947 |
+ goto xmit_tx_error; |
3948 |
+ |
3949 |
+- udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, |
3950 |
++ udp_tunnel_xmit_skb(rt, sk, skb, local_ip.sin.sin_addr.s_addr, |
3951 |
+ dst->sin.sin_addr.s_addr, tos, ttl, df, |
3952 |
+ src_port, dst_port, xnet, !udp_sum); |
3953 |
+ #if IS_ENABLED(CONFIG_IPV6) |
3954 |
+@@ -2087,7 +2086,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3955 |
+ ndst = vxlan6_get_route(vxlan, skb, |
3956 |
+ rdst ? rdst->remote_ifindex : 0, tos, |
3957 |
+ label, &dst->sin6.sin6_addr, |
3958 |
+- &src->sin6.sin6_addr, |
3959 |
++ &local_ip.sin6.sin6_addr, |
3960 |
+ dst_cache, info); |
3961 |
+ if (IS_ERR(ndst)) { |
3962 |
+ netdev_dbg(dev, "no route to %pI6\n", |
3963 |
+@@ -2117,7 +2116,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3964 |
+ if (!dst_vxlan) |
3965 |
+ goto tx_error; |
3966 |
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan); |
3967 |
+- return; |
3968 |
++ goto out_unlock; |
3969 |
+ } |
3970 |
+ |
3971 |
+ if (!info) |
3972 |
+@@ -2131,15 +2130,16 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3973 |
+ if (err < 0) { |
3974 |
+ dst_release(ndst); |
3975 |
+ dev->stats.tx_errors++; |
3976 |
+- return; |
3977 |
++ goto out_unlock; |
3978 |
+ } |
3979 |
+ udp_tunnel6_xmit_skb(ndst, sk, skb, dev, |
3980 |
+- &src->sin6.sin6_addr, |
3981 |
++ &local_ip.sin6.sin6_addr, |
3982 |
+ &dst->sin6.sin6_addr, tos, ttl, |
3983 |
+ label, src_port, dst_port, !udp_sum); |
3984 |
+ #endif |
3985 |
+ } |
3986 |
+- |
3987 |
++out_unlock: |
3988 |
++ rcu_read_unlock(); |
3989 |
+ return; |
3990 |
+ |
3991 |
+ drop: |
3992 |
+@@ -2155,6 +2155,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3993 |
+ dev->stats.tx_errors++; |
3994 |
+ tx_free: |
3995 |
+ dev_kfree_skb(skb); |
3996 |
++ rcu_read_unlock(); |
3997 |
+ } |
3998 |
+ |
3999 |
+ /* Transmit local packets over Vxlan |
4000 |
+@@ -2637,7 +2638,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) |
4001 |
+ |
4002 |
+ if (data[IFLA_VXLAN_ID]) { |
4003 |
+ __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); |
4004 |
+- if (id >= VXLAN_VID_MASK) |
4005 |
++ if (id >= VXLAN_N_VID) |
4006 |
+ return -ERANGE; |
4007 |
+ } |
4008 |
+ |
4009 |
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c |
4010 |
+index e30f05c..4722782 100644 |
4011 |
+--- a/drivers/pci/iov.c |
4012 |
++++ b/drivers/pci/iov.c |
4013 |
+@@ -306,13 +306,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) |
4014 |
+ return rc; |
4015 |
+ } |
4016 |
+ |
4017 |
+- pci_iov_set_numvfs(dev, nr_virtfn); |
4018 |
+- iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; |
4019 |
+- pci_cfg_access_lock(dev); |
4020 |
+- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); |
4021 |
+- msleep(100); |
4022 |
+- pci_cfg_access_unlock(dev); |
4023 |
+- |
4024 |
+ iov->initial_VFs = initial; |
4025 |
+ if (nr_virtfn < initial) |
4026 |
+ initial = nr_virtfn; |
4027 |
+@@ -323,6 +316,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) |
4028 |
+ goto err_pcibios; |
4029 |
+ } |
4030 |
+ |
4031 |
++ pci_iov_set_numvfs(dev, nr_virtfn); |
4032 |
++ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; |
4033 |
++ pci_cfg_access_lock(dev); |
4034 |
++ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); |
4035 |
++ msleep(100); |
4036 |
++ pci_cfg_access_unlock(dev); |
4037 |
++ |
4038 |
+ for (i = 0; i < initial; i++) { |
4039 |
+ rc = pci_iov_add_virtfn(dev, i, 0); |
4040 |
+ if (rc) |
4041 |
+@@ -554,21 +554,61 @@ void pci_iov_release(struct pci_dev *dev) |
4042 |
+ } |
4043 |
+ |
4044 |
+ /** |
4045 |
+- * pci_iov_resource_bar - get position of the SR-IOV BAR |
4046 |
++ * pci_iov_update_resource - update a VF BAR |
4047 |
+ * @dev: the PCI device |
4048 |
+ * @resno: the resource number |
4049 |
+ * |
4050 |
+- * Returns position of the BAR encapsulated in the SR-IOV capability. |
4051 |
++ * Update a VF BAR in the SR-IOV capability of a PF. |
4052 |
+ */ |
4053 |
+-int pci_iov_resource_bar(struct pci_dev *dev, int resno) |
4054 |
++void pci_iov_update_resource(struct pci_dev *dev, int resno) |
4055 |
+ { |
4056 |
+- if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END) |
4057 |
+- return 0; |
4058 |
++ struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL; |
4059 |
++ struct resource *res = dev->resource + resno; |
4060 |
++ int vf_bar = resno - PCI_IOV_RESOURCES; |
4061 |
++ struct pci_bus_region region; |
4062 |
++ u16 cmd; |
4063 |
++ u32 new; |
4064 |
++ int reg; |
4065 |
++ |
4066 |
++ /* |
4067 |
++ * The generic pci_restore_bars() path calls this for all devices, |
4068 |
++ * including VFs and non-SR-IOV devices. If this is not a PF, we |
4069 |
++ * have nothing to do. |
4070 |
++ */ |
4071 |
++ if (!iov) |
4072 |
++ return; |
4073 |
++ |
4074 |
++ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd); |
4075 |
++ if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) { |
4076 |
++ dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n", |
4077 |
++ vf_bar, res); |
4078 |
++ return; |
4079 |
++ } |
4080 |
++ |
4081 |
++ /* |
4082 |
++ * Ignore unimplemented BARs, unused resource slots for 64-bit |
4083 |
++ * BARs, and non-movable resources, e.g., those described via |
4084 |
++ * Enhanced Allocation. |
4085 |
++ */ |
4086 |
++ if (!res->flags) |
4087 |
++ return; |
4088 |
++ |
4089 |
++ if (res->flags & IORESOURCE_UNSET) |
4090 |
++ return; |
4091 |
++ |
4092 |
++ if (res->flags & IORESOURCE_PCI_FIXED) |
4093 |
++ return; |
4094 |
+ |
4095 |
+- BUG_ON(!dev->is_physfn); |
4096 |
++ pcibios_resource_to_bus(dev->bus, ®ion, res); |
4097 |
++ new = region.start; |
4098 |
++ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; |
4099 |
+ |
4100 |
+- return dev->sriov->pos + PCI_SRIOV_BAR + |
4101 |
+- 4 * (resno - PCI_IOV_RESOURCES); |
4102 |
++ reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar; |
4103 |
++ pci_write_config_dword(dev, reg, new); |
4104 |
++ if (res->flags & IORESOURCE_MEM_64) { |
4105 |
++ new = region.start >> 16 >> 16; |
4106 |
++ pci_write_config_dword(dev, reg + 4, new); |
4107 |
++ } |
4108 |
+ } |
4109 |
+ |
4110 |
+ resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev, |
4111 |
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c |
4112 |
+index eda6a7c..6922964 100644 |
4113 |
+--- a/drivers/pci/pci.c |
4114 |
++++ b/drivers/pci/pci.c |
4115 |
+@@ -564,10 +564,6 @@ static void pci_restore_bars(struct pci_dev *dev) |
4116 |
+ { |
4117 |
+ int i; |
4118 |
+ |
4119 |
+- /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ |
4120 |
+- if (dev->is_virtfn) |
4121 |
+- return; |
4122 |
+- |
4123 |
+ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) |
4124 |
+ pci_update_resource(dev, i); |
4125 |
+ } |
4126 |
+@@ -4835,36 +4831,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags) |
4127 |
+ } |
4128 |
+ EXPORT_SYMBOL(pci_select_bars); |
4129 |
+ |
4130 |
+-/** |
4131 |
+- * pci_resource_bar - get position of the BAR associated with a resource |
4132 |
+- * @dev: the PCI device |
4133 |
+- * @resno: the resource number |
4134 |
+- * @type: the BAR type to be filled in |
4135 |
+- * |
4136 |
+- * Returns BAR position in config space, or 0 if the BAR is invalid. |
4137 |
+- */ |
4138 |
+-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) |
4139 |
+-{ |
4140 |
+- int reg; |
4141 |
+- |
4142 |
+- if (resno < PCI_ROM_RESOURCE) { |
4143 |
+- *type = pci_bar_unknown; |
4144 |
+- return PCI_BASE_ADDRESS_0 + 4 * resno; |
4145 |
+- } else if (resno == PCI_ROM_RESOURCE) { |
4146 |
+- *type = pci_bar_mem32; |
4147 |
+- return dev->rom_base_reg; |
4148 |
+- } else if (resno < PCI_BRIDGE_RESOURCES) { |
4149 |
+- /* device specific resource */ |
4150 |
+- *type = pci_bar_unknown; |
4151 |
+- reg = pci_iov_resource_bar(dev, resno); |
4152 |
+- if (reg) |
4153 |
+- return reg; |
4154 |
+- } |
4155 |
+- |
4156 |
+- dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); |
4157 |
+- return 0; |
4158 |
+-} |
4159 |
+- |
4160 |
+ /* Some architectures require additional programming to enable VGA */ |
4161 |
+ static arch_set_vga_state_t arch_set_vga_state; |
4162 |
+ |
4163 |
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h |
4164 |
+index 4518562..a5d37f6 100644 |
4165 |
+--- a/drivers/pci/pci.h |
4166 |
++++ b/drivers/pci/pci.h |
4167 |
+@@ -245,7 +245,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, |
4168 |
+ int pci_setup_device(struct pci_dev *dev); |
4169 |
+ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, |
4170 |
+ struct resource *res, unsigned int reg); |
4171 |
+-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); |
4172 |
+ void pci_configure_ari(struct pci_dev *dev); |
4173 |
+ void __pci_bus_size_bridges(struct pci_bus *bus, |
4174 |
+ struct list_head *realloc_head); |
4175 |
+@@ -289,7 +288,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev) |
4176 |
+ #ifdef CONFIG_PCI_IOV |
4177 |
+ int pci_iov_init(struct pci_dev *dev); |
4178 |
+ void pci_iov_release(struct pci_dev *dev); |
4179 |
+-int pci_iov_resource_bar(struct pci_dev *dev, int resno); |
4180 |
++void pci_iov_update_resource(struct pci_dev *dev, int resno); |
4181 |
+ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); |
4182 |
+ void pci_restore_iov_state(struct pci_dev *dev); |
4183 |
+ int pci_iov_bus_range(struct pci_bus *bus); |
4184 |
+@@ -303,10 +302,6 @@ static inline void pci_iov_release(struct pci_dev *dev) |
4185 |
+ |
4186 |
+ { |
4187 |
+ } |
4188 |
+-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno) |
4189 |
+-{ |
4190 |
+- return 0; |
4191 |
+-} |
4192 |
+ static inline void pci_restore_iov_state(struct pci_dev *dev) |
4193 |
+ { |
4194 |
+ } |
4195 |
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
4196 |
+index 300770c..d266d80 100644 |
4197 |
+--- a/drivers/pci/probe.c |
4198 |
++++ b/drivers/pci/probe.c |
4199 |
+@@ -227,7 +227,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, |
4200 |
+ mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK; |
4201 |
+ } |
4202 |
+ } else { |
4203 |
+- res->flags |= (l & IORESOURCE_ROM_ENABLE); |
4204 |
++ if (l & PCI_ROM_ADDRESS_ENABLE) |
4205 |
++ res->flags |= IORESOURCE_ROM_ENABLE; |
4206 |
+ l64 = l & PCI_ROM_ADDRESS_MASK; |
4207 |
+ sz64 = sz & PCI_ROM_ADDRESS_MASK; |
4208 |
+ mask64 = (u32)PCI_ROM_ADDRESS_MASK; |
4209 |
+diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c |
4210 |
+index 06663d3..b6edb18 100644 |
4211 |
+--- a/drivers/pci/rom.c |
4212 |
++++ b/drivers/pci/rom.c |
4213 |
+@@ -35,6 +35,11 @@ int pci_enable_rom(struct pci_dev *pdev) |
4214 |
+ if (res->flags & IORESOURCE_ROM_SHADOW) |
4215 |
+ return 0; |
4216 |
+ |
4217 |
++ /* |
4218 |
++ * Ideally pci_update_resource() would update the ROM BAR address, |
4219 |
++ * and we would only set the enable bit here. But apparently some |
4220 |
++ * devices have buggy ROM BARs that read as zero when disabled. |
4221 |
++ */ |
4222 |
+ pcibios_resource_to_bus(pdev->bus, ®ion, res); |
4223 |
+ pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
4224 |
+ rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
4225 |
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c |
4226 |
+index 9526e34..4bc589e 100644 |
4227 |
+--- a/drivers/pci/setup-res.c |
4228 |
++++ b/drivers/pci/setup-res.c |
4229 |
+@@ -25,21 +25,18 @@ |
4230 |
+ #include <linux/slab.h> |
4231 |
+ #include "pci.h" |
4232 |
+ |
4233 |
+- |
4234 |
+-void pci_update_resource(struct pci_dev *dev, int resno) |
4235 |
++static void pci_std_update_resource(struct pci_dev *dev, int resno) |
4236 |
+ { |
4237 |
+ struct pci_bus_region region; |
4238 |
+ bool disable; |
4239 |
+ u16 cmd; |
4240 |
+ u32 new, check, mask; |
4241 |
+ int reg; |
4242 |
+- enum pci_bar_type type; |
4243 |
+ struct resource *res = dev->resource + resno; |
4244 |
+ |
4245 |
+- if (dev->is_virtfn) { |
4246 |
+- dev_warn(&dev->dev, "can't update VF BAR%d\n", resno); |
4247 |
++ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ |
4248 |
++ if (dev->is_virtfn) |
4249 |
+ return; |
4250 |
+- } |
4251 |
+ |
4252 |
+ /* |
4253 |
+ * Ignore resources for unimplemented BARs and unused resource slots |
4254 |
+@@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno) |
4255 |
+ return; |
4256 |
+ |
4257 |
+ pcibios_resource_to_bus(dev->bus, ®ion, res); |
4258 |
++ new = region.start; |
4259 |
+ |
4260 |
+- new = region.start | (res->flags & PCI_REGION_FLAG_MASK); |
4261 |
+- if (res->flags & IORESOURCE_IO) |
4262 |
++ if (res->flags & IORESOURCE_IO) { |
4263 |
+ mask = (u32)PCI_BASE_ADDRESS_IO_MASK; |
4264 |
+- else |
4265 |
++ new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; |
4266 |
++ } else if (resno == PCI_ROM_RESOURCE) { |
4267 |
++ mask = (u32)PCI_ROM_ADDRESS_MASK; |
4268 |
++ } else { |
4269 |
+ mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; |
4270 |
++ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; |
4271 |
++ } |
4272 |
+ |
4273 |
+- reg = pci_resource_bar(dev, resno, &type); |
4274 |
+- if (!reg) |
4275 |
+- return; |
4276 |
+- if (type != pci_bar_unknown) { |
4277 |
++ if (resno < PCI_ROM_RESOURCE) { |
4278 |
++ reg = PCI_BASE_ADDRESS_0 + 4 * resno; |
4279 |
++ } else if (resno == PCI_ROM_RESOURCE) { |
4280 |
++ |
4281 |
++ /* |
4282 |
++ * Apparently some Matrox devices have ROM BARs that read |
4283 |
++ * as zero when disabled, so don't update ROM BARs unless |
4284 |
++ * they're enabled. See https://lkml.org/lkml/2005/8/30/138. |
4285 |
++ */ |
4286 |
+ if (!(res->flags & IORESOURCE_ROM_ENABLE)) |
4287 |
+ return; |
4288 |
++ |
4289 |
++ reg = dev->rom_base_reg; |
4290 |
+ new |= PCI_ROM_ADDRESS_ENABLE; |
4291 |
+- } |
4292 |
++ } else |
4293 |
++ return; |
4294 |
+ |
4295 |
+ /* |
4296 |
+ * We can't update a 64-bit BAR atomically, so when possible, |
4297 |
+@@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno) |
4298 |
+ pci_write_config_word(dev, PCI_COMMAND, cmd); |
4299 |
+ } |
4300 |
+ |
4301 |
++void pci_update_resource(struct pci_dev *dev, int resno) |
4302 |
++{ |
4303 |
++ if (resno <= PCI_ROM_RESOURCE) |
4304 |
++ pci_std_update_resource(dev, resno); |
4305 |
++#ifdef CONFIG_PCI_IOV |
4306 |
++ else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) |
4307 |
++ pci_iov_update_resource(dev, resno); |
4308 |
++#endif |
4309 |
++} |
4310 |
++ |
4311 |
+ int pci_claim_resource(struct pci_dev *dev, int resource) |
4312 |
+ { |
4313 |
+ struct resource *res = &dev->resource[resource]; |
4314 |
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c |
4315 |
+index ed92fb0..76b802c 100644 |
4316 |
+--- a/drivers/s390/crypto/ap_bus.c |
4317 |
++++ b/drivers/s390/crypto/ap_bus.c |
4318 |
+@@ -1712,6 +1712,9 @@ static void ap_scan_bus(struct work_struct *unused) |
4319 |
+ ap_dev->queue_depth = queue_depth; |
4320 |
+ ap_dev->raw_hwtype = device_type; |
4321 |
+ ap_dev->device_type = device_type; |
4322 |
++ /* CEX6 toleration: map to CEX5 */ |
4323 |
++ if (device_type == AP_DEVICE_TYPE_CEX6) |
4324 |
++ ap_dev->device_type = AP_DEVICE_TYPE_CEX5; |
4325 |
+ ap_dev->functions = device_functions; |
4326 |
+ spin_lock_init(&ap_dev->lock); |
4327 |
+ INIT_LIST_HEAD(&ap_dev->pendingq); |
4328 |
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h |
4329 |
+index d7fdf5c..fd66d2c 100644 |
4330 |
+--- a/drivers/s390/crypto/ap_bus.h |
4331 |
++++ b/drivers/s390/crypto/ap_bus.h |
4332 |
+@@ -105,6 +105,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) |
4333 |
+ #define AP_DEVICE_TYPE_CEX3C 9 |
4334 |
+ #define AP_DEVICE_TYPE_CEX4 10 |
4335 |
+ #define AP_DEVICE_TYPE_CEX5 11 |
4336 |
++#define AP_DEVICE_TYPE_CEX6 12 |
4337 |
+ |
4338 |
+ /* |
4339 |
+ * Known function facilities |
4340 |
+diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
4341 |
+index 91dfd58..c4fe95a 100644 |
4342 |
+--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
4343 |
++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
4344 |
+@@ -22,7 +22,7 @@ |
4345 |
+ * |
4346 |
+ ****************************************************************************/ |
4347 |
+ |
4348 |
+-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
4349 |
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
4350 |
+ |
4351 |
+ #include <linux/module.h> |
4352 |
+ #include <linux/kernel.h> |
4353 |
+@@ -82,7 +82,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, |
4354 |
+ } |
4355 |
+ } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { |
4356 |
+ if (se_cmd->data_direction == DMA_TO_DEVICE) { |
4357 |
+- /* residual data from an overflow write */ |
4358 |
++ /* residual data from an overflow write */ |
4359 |
+ rsp->flags = SRP_RSP_FLAG_DOOVER; |
4360 |
+ rsp->data_out_res_cnt = cpu_to_be32(residual_count); |
4361 |
+ } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { |
4362 |
+@@ -102,7 +102,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, |
4363 |
+ * and the function returns TRUE. |
4364 |
+ * |
4365 |
+ * EXECUTION ENVIRONMENT: |
4366 |
+- * Interrupt or Process environment |
4367 |
++ * Interrupt or Process environment |
4368 |
+ */ |
4369 |
+ static bool connection_broken(struct scsi_info *vscsi) |
4370 |
+ { |
4371 |
+@@ -325,7 +325,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask, |
4372 |
+ } |
4373 |
+ |
4374 |
+ /** |
4375 |
+- * ibmvscsis_send_init_message() - send initialize message to the client |
4376 |
++ * ibmvscsis_send_init_message() - send initialize message to the client |
4377 |
+ * @vscsi: Pointer to our adapter structure |
4378 |
+ * @format: Which Init Message format to send |
4379 |
+ * |
4380 |
+@@ -383,13 +383,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) |
4381 |
+ vscsi->cmd_q.base_addr); |
4382 |
+ if (crq) { |
4383 |
+ *format = (uint)(crq->format); |
4384 |
+- rc = ERROR; |
4385 |
++ rc = ERROR; |
4386 |
+ crq->valid = INVALIDATE_CMD_RESP_EL; |
4387 |
+ dma_rmb(); |
4388 |
+ } |
4389 |
+ } else { |
4390 |
+ *format = (uint)(crq->format); |
4391 |
+- rc = ERROR; |
4392 |
++ rc = ERROR; |
4393 |
+ crq->valid = INVALIDATE_CMD_RESP_EL; |
4394 |
+ dma_rmb(); |
4395 |
+ } |
4396 |
+@@ -398,166 +398,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) |
4397 |
+ } |
4398 |
+ |
4399 |
+ /** |
4400 |
+- * ibmvscsis_establish_new_q() - Establish new CRQ queue |
4401 |
+- * @vscsi: Pointer to our adapter structure |
4402 |
+- * @new_state: New state being established after resetting the queue |
4403 |
+- * |
4404 |
+- * Must be called with interrupt lock held. |
4405 |
+- */ |
4406 |
+-static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state) |
4407 |
+-{ |
4408 |
+- long rc = ADAPT_SUCCESS; |
4409 |
+- uint format; |
4410 |
+- |
4411 |
+- vscsi->flags &= PRESERVE_FLAG_FIELDS; |
4412 |
+- vscsi->rsp_q_timer.timer_pops = 0; |
4413 |
+- vscsi->debit = 0; |
4414 |
+- vscsi->credit = 0; |
4415 |
+- |
4416 |
+- rc = vio_enable_interrupts(vscsi->dma_dev); |
4417 |
+- if (rc) { |
4418 |
+- pr_warn("reset_queue: failed to enable interrupts, rc %ld\n", |
4419 |
+- rc); |
4420 |
+- return rc; |
4421 |
+- } |
4422 |
+- |
4423 |
+- rc = ibmvscsis_check_init_msg(vscsi, &format); |
4424 |
+- if (rc) { |
4425 |
+- dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n", |
4426 |
+- rc); |
4427 |
+- return rc; |
4428 |
+- } |
4429 |
+- |
4430 |
+- if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) { |
4431 |
+- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); |
4432 |
+- switch (rc) { |
4433 |
+- case H_SUCCESS: |
4434 |
+- case H_DROPPED: |
4435 |
+- case H_CLOSED: |
4436 |
+- rc = ADAPT_SUCCESS; |
4437 |
+- break; |
4438 |
+- |
4439 |
+- case H_PARAMETER: |
4440 |
+- case H_HARDWARE: |
4441 |
+- break; |
4442 |
+- |
4443 |
+- default: |
4444 |
+- vscsi->state = UNDEFINED; |
4445 |
+- rc = H_HARDWARE; |
4446 |
+- break; |
4447 |
+- } |
4448 |
+- } |
4449 |
+- |
4450 |
+- return rc; |
4451 |
+-} |
4452 |
+- |
4453 |
+-/** |
4454 |
+- * ibmvscsis_reset_queue() - Reset CRQ Queue |
4455 |
+- * @vscsi: Pointer to our adapter structure |
4456 |
+- * @new_state: New state to establish after resetting the queue |
4457 |
+- * |
4458 |
+- * This function calls h_free_q and then calls h_reg_q and does all |
4459 |
+- * of the bookkeeping to get us back to where we can communicate. |
4460 |
+- * |
4461 |
+- * Actually, we don't always call h_free_crq. A problem was discovered |
4462 |
+- * where one partition would close and reopen his queue, which would |
4463 |
+- * cause his partner to get a transport event, which would cause him to |
4464 |
+- * close and reopen his queue, which would cause the original partition |
4465 |
+- * to get a transport event, etc., etc. To prevent this, we don't |
4466 |
+- * actually close our queue if the client initiated the reset, (i.e. |
4467 |
+- * either we got a transport event or we have detected that the client's |
4468 |
+- * queue is gone) |
4469 |
+- * |
4470 |
+- * EXECUTION ENVIRONMENT: |
4471 |
+- * Process environment, called with interrupt lock held |
4472 |
+- */ |
4473 |
+-static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state) |
4474 |
+-{ |
4475 |
+- int bytes; |
4476 |
+- long rc = ADAPT_SUCCESS; |
4477 |
+- |
4478 |
+- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); |
4479 |
+- |
4480 |
+- /* don't reset, the client did it for us */ |
4481 |
+- if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { |
4482 |
+- vscsi->flags &= PRESERVE_FLAG_FIELDS; |
4483 |
+- vscsi->rsp_q_timer.timer_pops = 0; |
4484 |
+- vscsi->debit = 0; |
4485 |
+- vscsi->credit = 0; |
4486 |
+- vscsi->state = new_state; |
4487 |
+- vio_enable_interrupts(vscsi->dma_dev); |
4488 |
+- } else { |
4489 |
+- rc = ibmvscsis_free_command_q(vscsi); |
4490 |
+- if (rc == ADAPT_SUCCESS) { |
4491 |
+- vscsi->state = new_state; |
4492 |
+- |
4493 |
+- bytes = vscsi->cmd_q.size * PAGE_SIZE; |
4494 |
+- rc = h_reg_crq(vscsi->dds.unit_id, |
4495 |
+- vscsi->cmd_q.crq_token, bytes); |
4496 |
+- if (rc == H_CLOSED || rc == H_SUCCESS) { |
4497 |
+- rc = ibmvscsis_establish_new_q(vscsi, |
4498 |
+- new_state); |
4499 |
+- } |
4500 |
+- |
4501 |
+- if (rc != ADAPT_SUCCESS) { |
4502 |
+- pr_debug("reset_queue: reg_crq rc %ld\n", rc); |
4503 |
+- |
4504 |
+- vscsi->state = ERR_DISCONNECTED; |
4505 |
+- vscsi->flags |= RESPONSE_Q_DOWN; |
4506 |
+- ibmvscsis_free_command_q(vscsi); |
4507 |
+- } |
4508 |
+- } else { |
4509 |
+- vscsi->state = ERR_DISCONNECTED; |
4510 |
+- vscsi->flags |= RESPONSE_Q_DOWN; |
4511 |
+- } |
4512 |
+- } |
4513 |
+-} |
4514 |
+- |
4515 |
+-/** |
4516 |
+- * ibmvscsis_free_cmd_resources() - Free command resources |
4517 |
+- * @vscsi: Pointer to our adapter structure |
4518 |
+- * @cmd: Command which is not longer in use |
4519 |
+- * |
4520 |
+- * Must be called with interrupt lock held. |
4521 |
+- */ |
4522 |
+-static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, |
4523 |
+- struct ibmvscsis_cmd *cmd) |
4524 |
+-{ |
4525 |
+- struct iu_entry *iue = cmd->iue; |
4526 |
+- |
4527 |
+- switch (cmd->type) { |
4528 |
+- case TASK_MANAGEMENT: |
4529 |
+- case SCSI_CDB: |
4530 |
+- /* |
4531 |
+- * When the queue goes down this value is cleared, so it |
4532 |
+- * cannot be cleared in this general purpose function. |
4533 |
+- */ |
4534 |
+- if (vscsi->debit) |
4535 |
+- vscsi->debit -= 1; |
4536 |
+- break; |
4537 |
+- case ADAPTER_MAD: |
4538 |
+- vscsi->flags &= ~PROCESSING_MAD; |
4539 |
+- break; |
4540 |
+- case UNSET_TYPE: |
4541 |
+- break; |
4542 |
+- default: |
4543 |
+- dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", |
4544 |
+- cmd->type); |
4545 |
+- break; |
4546 |
+- } |
4547 |
+- |
4548 |
+- cmd->iue = NULL; |
4549 |
+- list_add_tail(&cmd->list, &vscsi->free_cmd); |
4550 |
+- srp_iu_put(iue); |
4551 |
+- |
4552 |
+- if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && |
4553 |
+- list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { |
4554 |
+- vscsi->flags &= ~WAIT_FOR_IDLE; |
4555 |
+- complete(&vscsi->wait_idle); |
4556 |
+- } |
4557 |
+-} |
4558 |
+- |
4559 |
+-/** |
4560 |
+ * ibmvscsis_disconnect() - Helper function to disconnect |
4561 |
+ * @work: Pointer to work_struct, gives access to our adapter structure |
4562 |
+ * |
4563 |
+@@ -576,7 +416,6 @@ static void ibmvscsis_disconnect(struct work_struct *work) |
4564 |
+ proc_work); |
4565 |
+ u16 new_state; |
4566 |
+ bool wait_idle = false; |
4567 |
+- long rc = ADAPT_SUCCESS; |
4568 |
+ |
4569 |
+ spin_lock_bh(&vscsi->intr_lock); |
4570 |
+ new_state = vscsi->new_state; |
4571 |
+@@ -590,7 +429,7 @@ static void ibmvscsis_disconnect(struct work_struct *work) |
4572 |
+ * should transitition to the new state |
4573 |
+ */ |
4574 |
+ switch (vscsi->state) { |
4575 |
+- /* Should never be called while in this state. */ |
4576 |
++ /* Should never be called while in this state. */ |
4577 |
+ case NO_QUEUE: |
4578 |
+ /* |
4579 |
+ * Can never transition from this state; |
4580 |
+@@ -629,30 +468,24 @@ static void ibmvscsis_disconnect(struct work_struct *work) |
4581 |
+ vscsi->state = new_state; |
4582 |
+ break; |
4583 |
+ |
4584 |
+- /* |
4585 |
+- * If this is a transition into an error state. |
4586 |
+- * a client is attempting to establish a connection |
4587 |
+- * and has violated the RPA protocol. |
4588 |
+- * There can be nothing pending on the adapter although |
4589 |
+- * there can be requests in the command queue. |
4590 |
+- */ |
4591 |
+ case WAIT_ENABLED: |
4592 |
+- case PART_UP_WAIT_ENAB: |
4593 |
+ switch (new_state) { |
4594 |
+- case ERR_DISCONNECT: |
4595 |
+- vscsi->flags |= RESPONSE_Q_DOWN; |
4596 |
++ case UNCONFIGURING: |
4597 |
+ vscsi->state = new_state; |
4598 |
++ vscsi->flags |= RESPONSE_Q_DOWN; |
4599 |
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT | |
4600 |
+ DISCONNECT_SCHEDULED); |
4601 |
+- ibmvscsis_free_command_q(vscsi); |
4602 |
+- break; |
4603 |
+- case ERR_DISCONNECT_RECONNECT: |
4604 |
+- ibmvscsis_reset_queue(vscsi, WAIT_ENABLED); |
4605 |
++ dma_rmb(); |
4606 |
++ if (vscsi->flags & CFG_SLEEPING) { |
4607 |
++ vscsi->flags &= ~CFG_SLEEPING; |
4608 |
++ complete(&vscsi->unconfig); |
4609 |
++ } |
4610 |
+ break; |
4611 |
+ |
4612 |
+ /* should never happen */ |
4613 |
++ case ERR_DISCONNECT: |
4614 |
++ case ERR_DISCONNECT_RECONNECT: |
4615 |
+ case WAIT_IDLE: |
4616 |
+- rc = ERROR; |
4617 |
+ dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n", |
4618 |
+ vscsi->state); |
4619 |
+ break; |
4620 |
+@@ -661,6 +494,13 @@ static void ibmvscsis_disconnect(struct work_struct *work) |
4621 |
+ |
4622 |
+ case WAIT_IDLE: |
4623 |
+ switch (new_state) { |
4624 |
++ case UNCONFIGURING: |
4625 |
++ vscsi->flags |= RESPONSE_Q_DOWN; |
4626 |
++ vscsi->state = new_state; |
4627 |
++ vscsi->flags &= ~(SCHEDULE_DISCONNECT | |
4628 |
++ DISCONNECT_SCHEDULED); |
4629 |
++ ibmvscsis_free_command_q(vscsi); |
4630 |
++ break; |
4631 |
+ case ERR_DISCONNECT: |
4632 |
+ case ERR_DISCONNECT_RECONNECT: |
4633 |
+ vscsi->state = new_state; |
4634 |
+@@ -765,45 +605,348 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state, |
4635 |
+ else |
4636 |
+ state = vscsi->state; |
4637 |
+ |
4638 |
+- switch (state) { |
4639 |
+- case NO_QUEUE: |
4640 |
+- case UNCONFIGURING: |
4641 |
+- break; |
4642 |
++ switch (state) { |
4643 |
++ case NO_QUEUE: |
4644 |
++ case UNCONFIGURING: |
4645 |
++ break; |
4646 |
++ |
4647 |
++ case ERR_DISCONNECTED: |
4648 |
++ case ERR_DISCONNECT: |
4649 |
++ case UNDEFINED: |
4650 |
++ if (new_state == UNCONFIGURING) |
4651 |
++ vscsi->new_state = new_state; |
4652 |
++ break; |
4653 |
++ |
4654 |
++ case ERR_DISCONNECT_RECONNECT: |
4655 |
++ switch (new_state) { |
4656 |
++ case UNCONFIGURING: |
4657 |
++ case ERR_DISCONNECT: |
4658 |
++ vscsi->new_state = new_state; |
4659 |
++ break; |
4660 |
++ default: |
4661 |
++ break; |
4662 |
++ } |
4663 |
++ break; |
4664 |
++ |
4665 |
++ case WAIT_ENABLED: |
4666 |
++ case WAIT_IDLE: |
4667 |
++ case WAIT_CONNECTION: |
4668 |
++ case CONNECTED: |
4669 |
++ case SRP_PROCESSING: |
4670 |
++ vscsi->new_state = new_state; |
4671 |
++ break; |
4672 |
++ |
4673 |
++ default: |
4674 |
++ break; |
4675 |
++ } |
4676 |
++ } |
4677 |
++ |
4678 |
++ pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", |
4679 |
++ vscsi->flags, vscsi->new_state); |
4680 |
++} |
4681 |
++ |
4682 |
++/** |
4683 |
++ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message |
4684 |
++ * @vscsi: Pointer to our adapter structure |
4685 |
++ * |
4686 |
++ * Must be called with interrupt lock held. |
4687 |
++ */ |
4688 |
++static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) |
4689 |
++{ |
4690 |
++ long rc = ADAPT_SUCCESS; |
4691 |
++ |
4692 |
++ switch (vscsi->state) { |
4693 |
++ case NO_QUEUE: |
4694 |
++ case ERR_DISCONNECT: |
4695 |
++ case ERR_DISCONNECT_RECONNECT: |
4696 |
++ case ERR_DISCONNECTED: |
4697 |
++ case UNCONFIGURING: |
4698 |
++ case UNDEFINED: |
4699 |
++ rc = ERROR; |
4700 |
++ break; |
4701 |
++ |
4702 |
++ case WAIT_CONNECTION: |
4703 |
++ vscsi->state = CONNECTED; |
4704 |
++ break; |
4705 |
++ |
4706 |
++ case WAIT_IDLE: |
4707 |
++ case SRP_PROCESSING: |
4708 |
++ case CONNECTED: |
4709 |
++ case WAIT_ENABLED: |
4710 |
++ default: |
4711 |
++ rc = ERROR; |
4712 |
++ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", |
4713 |
++ vscsi->state); |
4714 |
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
4715 |
++ break; |
4716 |
++ } |
4717 |
++ |
4718 |
++ return rc; |
4719 |
++} |
4720 |
++ |
4721 |
++/** |
4722 |
++ * ibmvscsis_handle_init_msg() - Respond to an Init Message |
4723 |
++ * @vscsi: Pointer to our adapter structure |
4724 |
++ * |
4725 |
++ * Must be called with interrupt lock held. |
4726 |
++ */ |
4727 |
++static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) |
4728 |
++{ |
4729 |
++ long rc = ADAPT_SUCCESS; |
4730 |
++ |
4731 |
++ switch (vscsi->state) { |
4732 |
++ case WAIT_CONNECTION: |
4733 |
++ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); |
4734 |
++ switch (rc) { |
4735 |
++ case H_SUCCESS: |
4736 |
++ vscsi->state = CONNECTED; |
4737 |
++ break; |
4738 |
++ |
4739 |
++ case H_PARAMETER: |
4740 |
++ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", |
4741 |
++ rc); |
4742 |
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); |
4743 |
++ break; |
4744 |
++ |
4745 |
++ case H_DROPPED: |
4746 |
++ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", |
4747 |
++ rc); |
4748 |
++ rc = ERROR; |
4749 |
++ ibmvscsis_post_disconnect(vscsi, |
4750 |
++ ERR_DISCONNECT_RECONNECT, 0); |
4751 |
++ break; |
4752 |
++ |
4753 |
++ case H_CLOSED: |
4754 |
++ pr_warn("init_msg: failed to send, rc %ld\n", rc); |
4755 |
++ rc = 0; |
4756 |
++ break; |
4757 |
++ } |
4758 |
++ break; |
4759 |
++ |
4760 |
++ case UNDEFINED: |
4761 |
++ rc = ERROR; |
4762 |
++ break; |
4763 |
++ |
4764 |
++ case UNCONFIGURING: |
4765 |
++ break; |
4766 |
++ |
4767 |
++ case WAIT_ENABLED: |
4768 |
++ case CONNECTED: |
4769 |
++ case SRP_PROCESSING: |
4770 |
++ case WAIT_IDLE: |
4771 |
++ case NO_QUEUE: |
4772 |
++ case ERR_DISCONNECT: |
4773 |
++ case ERR_DISCONNECT_RECONNECT: |
4774 |
++ case ERR_DISCONNECTED: |
4775 |
++ default: |
4776 |
++ rc = ERROR; |
4777 |
++ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", |
4778 |
++ vscsi->state); |
4779 |
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
4780 |
++ break; |
4781 |
++ } |
4782 |
++ |
4783 |
++ return rc; |
4784 |
++} |
4785 |
++ |
4786 |
++/** |
4787 |
++ * ibmvscsis_init_msg() - Respond to an init message |
4788 |
++ * @vscsi: Pointer to our adapter structure |
4789 |
++ * @crq: Pointer to CRQ element containing the Init Message |
4790 |
++ * |
4791 |
++ * EXECUTION ENVIRONMENT: |
4792 |
++ * Interrupt, interrupt lock held |
4793 |
++ */ |
4794 |
++static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) |
4795 |
++{ |
4796 |
++ long rc = ADAPT_SUCCESS; |
4797 |
++ |
4798 |
++ pr_debug("init_msg: state 0x%hx\n", vscsi->state); |
4799 |
++ |
4800 |
++ rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, |
4801 |
++ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, |
4802 |
++ 0); |
4803 |
++ if (rc == H_SUCCESS) { |
4804 |
++ vscsi->client_data.partition_number = |
4805 |
++ be64_to_cpu(*(u64 *)vscsi->map_buf); |
4806 |
++ pr_debug("init_msg, part num %d\n", |
4807 |
++ vscsi->client_data.partition_number); |
4808 |
++ } else { |
4809 |
++ pr_debug("init_msg h_vioctl rc %ld\n", rc); |
4810 |
++ rc = ADAPT_SUCCESS; |
4811 |
++ } |
4812 |
++ |
4813 |
++ if (crq->format == INIT_MSG) { |
4814 |
++ rc = ibmvscsis_handle_init_msg(vscsi); |
4815 |
++ } else if (crq->format == INIT_COMPLETE_MSG) { |
4816 |
++ rc = ibmvscsis_handle_init_compl_msg(vscsi); |
4817 |
++ } else { |
4818 |
++ rc = ERROR; |
4819 |
++ dev_err(&vscsi->dev, "init_msg: invalid format %d\n", |
4820 |
++ (uint)crq->format); |
4821 |
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
4822 |
++ } |
4823 |
++ |
4824 |
++ return rc; |
4825 |
++} |
4826 |
++ |
4827 |
++/** |
4828 |
++ * ibmvscsis_establish_new_q() - Establish new CRQ queue |
4829 |
++ * @vscsi: Pointer to our adapter structure |
4830 |
++ * |
4831 |
++ * Must be called with interrupt lock held. |
4832 |
++ */ |
4833 |
++static long ibmvscsis_establish_new_q(struct scsi_info *vscsi) |
4834 |
++{ |
4835 |
++ long rc = ADAPT_SUCCESS; |
4836 |
++ uint format; |
4837 |
++ |
4838 |
++ vscsi->flags &= PRESERVE_FLAG_FIELDS; |
4839 |
++ vscsi->rsp_q_timer.timer_pops = 0; |
4840 |
++ vscsi->debit = 0; |
4841 |
++ vscsi->credit = 0; |
4842 |
++ |
4843 |
++ rc = vio_enable_interrupts(vscsi->dma_dev); |
4844 |
++ if (rc) { |
4845 |
++ pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n", |
4846 |
++ rc); |
4847 |
++ return rc; |
4848 |
++ } |
4849 |
++ |
4850 |
++ rc = ibmvscsis_check_init_msg(vscsi, &format); |
4851 |
++ if (rc) { |
4852 |
++ dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n", |
4853 |
++ rc); |
4854 |
++ return rc; |
4855 |
++ } |
4856 |
++ |
4857 |
++ if (format == UNUSED_FORMAT) { |
4858 |
++ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); |
4859 |
++ switch (rc) { |
4860 |
++ case H_SUCCESS: |
4861 |
++ case H_DROPPED: |
4862 |
++ case H_CLOSED: |
4863 |
++ rc = ADAPT_SUCCESS; |
4864 |
++ break; |
4865 |
++ |
4866 |
++ case H_PARAMETER: |
4867 |
++ case H_HARDWARE: |
4868 |
++ break; |
4869 |
++ |
4870 |
++ default: |
4871 |
++ vscsi->state = UNDEFINED; |
4872 |
++ rc = H_HARDWARE; |
4873 |
++ break; |
4874 |
++ } |
4875 |
++ } else if (format == INIT_MSG) { |
4876 |
++ rc = ibmvscsis_handle_init_msg(vscsi); |
4877 |
++ } |
4878 |
++ |
4879 |
++ return rc; |
4880 |
++} |
4881 |
++ |
4882 |
++/** |
4883 |
++ * ibmvscsis_reset_queue() - Reset CRQ Queue |
4884 |
++ * @vscsi: Pointer to our adapter structure |
4885 |
++ * |
4886 |
++ * This function calls h_free_q and then calls h_reg_q and does all |
4887 |
++ * of the bookkeeping to get us back to where we can communicate. |
4888 |
++ * |
4889 |
++ * Actually, we don't always call h_free_crq. A problem was discovered |
4890 |
++ * where one partition would close and reopen his queue, which would |
4891 |
++ * cause his partner to get a transport event, which would cause him to |
4892 |
++ * close and reopen his queue, which would cause the original partition |
4893 |
++ * to get a transport event, etc., etc. To prevent this, we don't |
4894 |
++ * actually close our queue if the client initiated the reset, (i.e. |
4895 |
++ * either we got a transport event or we have detected that the client's |
4896 |
++ * queue is gone) |
4897 |
++ * |
4898 |
++ * EXECUTION ENVIRONMENT: |
4899 |
++ * Process environment, called with interrupt lock held |
4900 |
++ */ |
4901 |
++static void ibmvscsis_reset_queue(struct scsi_info *vscsi) |
4902 |
++{ |
4903 |
++ int bytes; |
4904 |
++ long rc = ADAPT_SUCCESS; |
4905 |
++ |
4906 |
++ pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); |
4907 |
++ |
4908 |
++ /* don't reset, the client did it for us */ |
4909 |
++ if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { |
4910 |
++ vscsi->flags &= PRESERVE_FLAG_FIELDS; |
4911 |
++ vscsi->rsp_q_timer.timer_pops = 0; |
4912 |
++ vscsi->debit = 0; |
4913 |
++ vscsi->credit = 0; |
4914 |
++ vscsi->state = WAIT_CONNECTION; |
4915 |
++ vio_enable_interrupts(vscsi->dma_dev); |
4916 |
++ } else { |
4917 |
++ rc = ibmvscsis_free_command_q(vscsi); |
4918 |
++ if (rc == ADAPT_SUCCESS) { |
4919 |
++ vscsi->state = WAIT_CONNECTION; |
4920 |
++ |
4921 |
++ bytes = vscsi->cmd_q.size * PAGE_SIZE; |
4922 |
++ rc = h_reg_crq(vscsi->dds.unit_id, |
4923 |
++ vscsi->cmd_q.crq_token, bytes); |
4924 |
++ if (rc == H_CLOSED || rc == H_SUCCESS) { |
4925 |
++ rc = ibmvscsis_establish_new_q(vscsi); |
4926 |
++ } |
4927 |
+ |
4928 |
+- case ERR_DISCONNECTED: |
4929 |
+- case ERR_DISCONNECT: |
4930 |
+- case UNDEFINED: |
4931 |
+- if (new_state == UNCONFIGURING) |
4932 |
+- vscsi->new_state = new_state; |
4933 |
+- break; |
4934 |
++ if (rc != ADAPT_SUCCESS) { |
4935 |
++ pr_debug("reset_queue: reg_crq rc %ld\n", rc); |
4936 |
+ |
4937 |
+- case ERR_DISCONNECT_RECONNECT: |
4938 |
+- switch (new_state) { |
4939 |
+- case UNCONFIGURING: |
4940 |
+- case ERR_DISCONNECT: |
4941 |
+- vscsi->new_state = new_state; |
4942 |
+- break; |
4943 |
+- default: |
4944 |
+- break; |
4945 |
++ vscsi->state = ERR_DISCONNECTED; |
4946 |
++ vscsi->flags |= RESPONSE_Q_DOWN; |
4947 |
++ ibmvscsis_free_command_q(vscsi); |
4948 |
+ } |
4949 |
+- break; |
4950 |
++ } else { |
4951 |
++ vscsi->state = ERR_DISCONNECTED; |
4952 |
++ vscsi->flags |= RESPONSE_Q_DOWN; |
4953 |
++ } |
4954 |
++ } |
4955 |
++} |
4956 |
+ |
4957 |
+- case WAIT_ENABLED: |
4958 |
+- case PART_UP_WAIT_ENAB: |
4959 |
+- case WAIT_IDLE: |
4960 |
+- case WAIT_CONNECTION: |
4961 |
+- case CONNECTED: |
4962 |
+- case SRP_PROCESSING: |
4963 |
+- vscsi->new_state = new_state; |
4964 |
+- break; |
4965 |
++/** |
4966 |
++ * ibmvscsis_free_cmd_resources() - Free command resources |
4967 |
++ * @vscsi: Pointer to our adapter structure |
4968 |
++ * @cmd: Command which is not longer in use |
4969 |
++ * |
4970 |
++ * Must be called with interrupt lock held. |
4971 |
++ */ |
4972 |
++static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, |
4973 |
++ struct ibmvscsis_cmd *cmd) |
4974 |
++{ |
4975 |
++ struct iu_entry *iue = cmd->iue; |
4976 |
+ |
4977 |
+- default: |
4978 |
+- break; |
4979 |
+- } |
4980 |
++ switch (cmd->type) { |
4981 |
++ case TASK_MANAGEMENT: |
4982 |
++ case SCSI_CDB: |
4983 |
++ /* |
4984 |
++ * When the queue goes down this value is cleared, so it |
4985 |
++ * cannot be cleared in this general purpose function. |
4986 |
++ */ |
4987 |
++ if (vscsi->debit) |
4988 |
++ vscsi->debit -= 1; |
4989 |
++ break; |
4990 |
++ case ADAPTER_MAD: |
4991 |
++ vscsi->flags &= ~PROCESSING_MAD; |
4992 |
++ break; |
4993 |
++ case UNSET_TYPE: |
4994 |
++ break; |
4995 |
++ default: |
4996 |
++ dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", |
4997 |
++ cmd->type); |
4998 |
++ break; |
4999 |
+ } |
5000 |
+ |
5001 |
+- pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", |
5002 |
+- vscsi->flags, vscsi->new_state); |
5003 |
++ cmd->iue = NULL; |
5004 |
++ list_add_tail(&cmd->list, &vscsi->free_cmd); |
5005 |
++ srp_iu_put(iue); |
5006 |
++ |
5007 |
++ if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && |
5008 |
++ list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { |
5009 |
++ vscsi->flags &= ~WAIT_FOR_IDLE; |
5010 |
++ complete(&vscsi->wait_idle); |
5011 |
++ } |
5012 |
+ } |
5013 |
+ |
5014 |
+ /** |
5015 |
+@@ -864,10 +1007,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi, |
5016 |
+ TRANS_EVENT)); |
5017 |
+ break; |
5018 |
+ |
5019 |
+- case PART_UP_WAIT_ENAB: |
5020 |
+- vscsi->state = WAIT_ENABLED; |
5021 |
+- break; |
5022 |
+- |
5023 |
+ case SRP_PROCESSING: |
5024 |
+ if ((vscsi->debit > 0) || |
5025 |
+ !list_empty(&vscsi->schedule_q) || |
5026 |
+@@ -896,7 +1035,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi, |
5027 |
+ } |
5028 |
+ } |
5029 |
+ |
5030 |
+- rc = vscsi->flags & SCHEDULE_DISCONNECT; |
5031 |
++ rc = vscsi->flags & SCHEDULE_DISCONNECT; |
5032 |
+ |
5033 |
+ pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", |
5034 |
+ vscsi->flags, vscsi->state, rc); |
5035 |
+@@ -1067,16 +1206,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) |
5036 |
+ free_qs = true; |
5037 |
+ |
5038 |
+ switch (vscsi->state) { |
5039 |
++ case UNCONFIGURING: |
5040 |
++ ibmvscsis_free_command_q(vscsi); |
5041 |
++ dma_rmb(); |
5042 |
++ isync(); |
5043 |
++ if (vscsi->flags & CFG_SLEEPING) { |
5044 |
++ vscsi->flags &= ~CFG_SLEEPING; |
5045 |
++ complete(&vscsi->unconfig); |
5046 |
++ } |
5047 |
++ break; |
5048 |
+ case ERR_DISCONNECT_RECONNECT: |
5049 |
+- ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION); |
5050 |
++ ibmvscsis_reset_queue(vscsi); |
5051 |
+ pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags); |
5052 |
+ break; |
5053 |
+ |
5054 |
+ case ERR_DISCONNECT: |
5055 |
+ ibmvscsis_free_command_q(vscsi); |
5056 |
+- vscsi->flags &= ~DISCONNECT_SCHEDULED; |
5057 |
++ vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED); |
5058 |
+ vscsi->flags |= RESPONSE_Q_DOWN; |
5059 |
+- vscsi->state = ERR_DISCONNECTED; |
5060 |
++ if (vscsi->tport.enabled) |
5061 |
++ vscsi->state = ERR_DISCONNECTED; |
5062 |
++ else |
5063 |
++ vscsi->state = WAIT_ENABLED; |
5064 |
+ pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n", |
5065 |
+ vscsi->flags, vscsi->state); |
5066 |
+ break; |
5067 |
+@@ -1221,7 +1372,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi, |
5068 |
+ * @iue: Information Unit containing the Adapter Info MAD request |
5069 |
+ * |
5070 |
+ * EXECUTION ENVIRONMENT: |
5071 |
+- * Interrupt adpater lock is held |
5072 |
++ * Interrupt adapter lock is held |
5073 |
+ */ |
5074 |
+ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, |
5075 |
+ struct iu_entry *iue) |
5076 |
+@@ -1621,8 +1772,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) |
5077 |
+ be64_to_cpu(msg_hi), |
5078 |
+ be64_to_cpu(cmd->rsp.tag)); |
5079 |
+ |
5080 |
+- pr_debug("send_messages: tag 0x%llx, rc %ld\n", |
5081 |
+- be64_to_cpu(cmd->rsp.tag), rc); |
5082 |
++ pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n", |
5083 |
++ cmd, be64_to_cpu(cmd->rsp.tag), rc); |
5084 |
+ |
5085 |
+ /* if all ok free up the command element resources */ |
5086 |
+ if (rc == H_SUCCESS) { |
5087 |
+@@ -1692,7 +1843,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi, |
5088 |
+ * @crq: Pointer to the CRQ entry containing the MAD request |
5089 |
+ * |
5090 |
+ * EXECUTION ENVIRONMENT: |
5091 |
+- * Interrupt called with adapter lock held |
5092 |
++ * Interrupt, called with adapter lock held |
5093 |
+ */ |
5094 |
+ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) |
5095 |
+ { |
5096 |
+@@ -1746,14 +1897,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) |
5097 |
+ |
5098 |
+ pr_debug("mad: type %d\n", be32_to_cpu(mad->type)); |
5099 |
+ |
5100 |
+- if (be16_to_cpu(mad->length) < 0) { |
5101 |
+- dev_err(&vscsi->dev, "mad: length is < 0\n"); |
5102 |
+- ibmvscsis_post_disconnect(vscsi, |
5103 |
+- ERR_DISCONNECT_RECONNECT, 0); |
5104 |
+- rc = SRP_VIOLATION; |
5105 |
+- } else { |
5106 |
+- rc = ibmvscsis_process_mad(vscsi, iue); |
5107 |
+- } |
5108 |
++ rc = ibmvscsis_process_mad(vscsi, iue); |
5109 |
+ |
5110 |
+ pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status), |
5111 |
+ rc); |
5112 |
+@@ -1865,7 +2009,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi, |
5113 |
+ break; |
5114 |
+ case H_PERMISSION: |
5115 |
+ if (connection_broken(vscsi)) |
5116 |
+- flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; |
5117 |
++ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; |
5118 |
+ dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", |
5119 |
+ rc); |
5120 |
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, |
5121 |
+@@ -2090,248 +2234,98 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq) |
5122 |
+ break; |
5123 |
+ |
5124 |
+ case SRP_TSK_MGMT: |
5125 |
+- tsk = &vio_iu(iue)->srp.tsk_mgmt; |
5126 |
+- pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, |
5127 |
+- tsk->tag); |
5128 |
+- cmd->rsp.tag = tsk->tag; |
5129 |
+- vscsi->debit += 1; |
5130 |
+- cmd->type = TASK_MANAGEMENT; |
5131 |
+- list_add_tail(&cmd->list, &vscsi->schedule_q); |
5132 |
+- queue_work(vscsi->work_q, &cmd->work); |
5133 |
+- break; |
5134 |
+- |
5135 |
+- case SRP_CMD: |
5136 |
+- pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, |
5137 |
+- srp->tag); |
5138 |
+- cmd->rsp.tag = srp->tag; |
5139 |
+- vscsi->debit += 1; |
5140 |
+- cmd->type = SCSI_CDB; |
5141 |
+- /* |
5142 |
+- * We want to keep track of work waiting for |
5143 |
+- * the workqueue. |
5144 |
+- */ |
5145 |
+- list_add_tail(&cmd->list, &vscsi->schedule_q); |
5146 |
+- queue_work(vscsi->work_q, &cmd->work); |
5147 |
+- break; |
5148 |
+- |
5149 |
+- case SRP_I_LOGOUT: |
5150 |
+- rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); |
5151 |
+- break; |
5152 |
+- |
5153 |
+- case SRP_CRED_RSP: |
5154 |
+- case SRP_AER_RSP: |
5155 |
+- default: |
5156 |
+- ibmvscsis_free_cmd_resources(vscsi, cmd); |
5157 |
+- dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", |
5158 |
+- (uint)srp->opcode); |
5159 |
+- ibmvscsis_post_disconnect(vscsi, |
5160 |
+- ERR_DISCONNECT_RECONNECT, 0); |
5161 |
+- break; |
5162 |
+- } |
5163 |
+- } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { |
5164 |
+- rc = ibmvscsis_srp_login(vscsi, cmd, crq); |
5165 |
+- } else { |
5166 |
+- ibmvscsis_free_cmd_resources(vscsi, cmd); |
5167 |
+- dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", |
5168 |
+- vscsi->state); |
5169 |
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
5170 |
+- } |
5171 |
+-} |
5172 |
+- |
5173 |
+-/** |
5174 |
+- * ibmvscsis_ping_response() - Respond to a ping request |
5175 |
+- * @vscsi: Pointer to our adapter structure |
5176 |
+- * |
5177 |
+- * Let the client know that the server is alive and waiting on |
5178 |
+- * its native I/O stack. |
5179 |
+- * If any type of error occurs from the call to queue a ping |
5180 |
+- * response then the client is either not accepting or receiving |
5181 |
+- * interrupts. Disconnect with an error. |
5182 |
+- * |
5183 |
+- * EXECUTION ENVIRONMENT: |
5184 |
+- * Interrupt, interrupt lock held |
5185 |
+- */ |
5186 |
+-static long ibmvscsis_ping_response(struct scsi_info *vscsi) |
5187 |
+-{ |
5188 |
+- struct viosrp_crq *crq; |
5189 |
+- u64 buffer[2] = { 0, 0 }; |
5190 |
+- long rc; |
5191 |
+- |
5192 |
+- crq = (struct viosrp_crq *)&buffer; |
5193 |
+- crq->valid = VALID_CMD_RESP_EL; |
5194 |
+- crq->format = (u8)MESSAGE_IN_CRQ; |
5195 |
+- crq->status = PING_RESPONSE; |
5196 |
+- |
5197 |
+- rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), |
5198 |
+- cpu_to_be64(buffer[MSG_LOW])); |
5199 |
+- |
5200 |
+- switch (rc) { |
5201 |
+- case H_SUCCESS: |
5202 |
+- break; |
5203 |
+- case H_CLOSED: |
5204 |
+- vscsi->flags |= CLIENT_FAILED; |
5205 |
+- case H_DROPPED: |
5206 |
+- vscsi->flags |= RESPONSE_Q_DOWN; |
5207 |
+- case H_REMOTE_PARM: |
5208 |
+- dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", |
5209 |
+- rc); |
5210 |
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
5211 |
+- break; |
5212 |
+- default: |
5213 |
+- dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", |
5214 |
+- rc); |
5215 |
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); |
5216 |
+- break; |
5217 |
+- } |
5218 |
+- |
5219 |
+- return rc; |
5220 |
+-} |
5221 |
+- |
5222 |
+-/** |
5223 |
+- * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message |
5224 |
+- * @vscsi: Pointer to our adapter structure |
5225 |
+- * |
5226 |
+- * Must be called with interrupt lock held. |
5227 |
+- */ |
5228 |
+-static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) |
5229 |
+-{ |
5230 |
+- long rc = ADAPT_SUCCESS; |
5231 |
+- |
5232 |
+- switch (vscsi->state) { |
5233 |
+- case NO_QUEUE: |
5234 |
+- case ERR_DISCONNECT: |
5235 |
+- case ERR_DISCONNECT_RECONNECT: |
5236 |
+- case ERR_DISCONNECTED: |
5237 |
+- case UNCONFIGURING: |
5238 |
+- case UNDEFINED: |
5239 |
+- rc = ERROR; |
5240 |
+- break; |
5241 |
+- |
5242 |
+- case WAIT_CONNECTION: |
5243 |
+- vscsi->state = CONNECTED; |
5244 |
+- break; |
5245 |
+- |
5246 |
+- case WAIT_IDLE: |
5247 |
+- case SRP_PROCESSING: |
5248 |
+- case CONNECTED: |
5249 |
+- case WAIT_ENABLED: |
5250 |
+- case PART_UP_WAIT_ENAB: |
5251 |
+- default: |
5252 |
+- rc = ERROR; |
5253 |
+- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", |
5254 |
+- vscsi->state); |
5255 |
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
5256 |
+- break; |
5257 |
+- } |
5258 |
+- |
5259 |
+- return rc; |
5260 |
+-} |
5261 |
+- |
5262 |
+-/** |
5263 |
+- * ibmvscsis_handle_init_msg() - Respond to an Init Message |
5264 |
+- * @vscsi: Pointer to our adapter structure |
5265 |
+- * |
5266 |
+- * Must be called with interrupt lock held. |
5267 |
+- */ |
5268 |
+-static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) |
5269 |
+-{ |
5270 |
+- long rc = ADAPT_SUCCESS; |
5271 |
+- |
5272 |
+- switch (vscsi->state) { |
5273 |
+- case WAIT_ENABLED: |
5274 |
+- vscsi->state = PART_UP_WAIT_ENAB; |
5275 |
+- break; |
5276 |
++ tsk = &vio_iu(iue)->srp.tsk_mgmt; |
5277 |
++ pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, |
5278 |
++ tsk->tag); |
5279 |
++ cmd->rsp.tag = tsk->tag; |
5280 |
++ vscsi->debit += 1; |
5281 |
++ cmd->type = TASK_MANAGEMENT; |
5282 |
++ list_add_tail(&cmd->list, &vscsi->schedule_q); |
5283 |
++ queue_work(vscsi->work_q, &cmd->work); |
5284 |
++ break; |
5285 |
+ |
5286 |
+- case WAIT_CONNECTION: |
5287 |
+- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); |
5288 |
+- switch (rc) { |
5289 |
+- case H_SUCCESS: |
5290 |
+- vscsi->state = CONNECTED; |
5291 |
++ case SRP_CMD: |
5292 |
++ pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, |
5293 |
++ srp->tag); |
5294 |
++ cmd->rsp.tag = srp->tag; |
5295 |
++ vscsi->debit += 1; |
5296 |
++ cmd->type = SCSI_CDB; |
5297 |
++ /* |
5298 |
++ * We want to keep track of work waiting for |
5299 |
++ * the workqueue. |
5300 |
++ */ |
5301 |
++ list_add_tail(&cmd->list, &vscsi->schedule_q); |
5302 |
++ queue_work(vscsi->work_q, &cmd->work); |
5303 |
+ break; |
5304 |
+ |
5305 |
+- case H_PARAMETER: |
5306 |
+- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", |
5307 |
+- rc); |
5308 |
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); |
5309 |
++ case SRP_I_LOGOUT: |
5310 |
++ rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); |
5311 |
+ break; |
5312 |
+ |
5313 |
+- case H_DROPPED: |
5314 |
+- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", |
5315 |
+- rc); |
5316 |
+- rc = ERROR; |
5317 |
++ case SRP_CRED_RSP: |
5318 |
++ case SRP_AER_RSP: |
5319 |
++ default: |
5320 |
++ ibmvscsis_free_cmd_resources(vscsi, cmd); |
5321 |
++ dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", |
5322 |
++ (uint)srp->opcode); |
5323 |
+ ibmvscsis_post_disconnect(vscsi, |
5324 |
+ ERR_DISCONNECT_RECONNECT, 0); |
5325 |
+ break; |
5326 |
+- |
5327 |
+- case H_CLOSED: |
5328 |
+- pr_warn("init_msg: failed to send, rc %ld\n", rc); |
5329 |
+- rc = 0; |
5330 |
+- break; |
5331 |
+ } |
5332 |
+- break; |
5333 |
+- |
5334 |
+- case UNDEFINED: |
5335 |
+- rc = ERROR; |
5336 |
+- break; |
5337 |
+- |
5338 |
+- case UNCONFIGURING: |
5339 |
+- break; |
5340 |
+- |
5341 |
+- case PART_UP_WAIT_ENAB: |
5342 |
+- case CONNECTED: |
5343 |
+- case SRP_PROCESSING: |
5344 |
+- case WAIT_IDLE: |
5345 |
+- case NO_QUEUE: |
5346 |
+- case ERR_DISCONNECT: |
5347 |
+- case ERR_DISCONNECT_RECONNECT: |
5348 |
+- case ERR_DISCONNECTED: |
5349 |
+- default: |
5350 |
+- rc = ERROR; |
5351 |
+- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", |
5352 |
++ } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { |
5353 |
++ rc = ibmvscsis_srp_login(vscsi, cmd, crq); |
5354 |
++ } else { |
5355 |
++ ibmvscsis_free_cmd_resources(vscsi, cmd); |
5356 |
++ dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", |
5357 |
+ vscsi->state); |
5358 |
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
5359 |
+- break; |
5360 |
+ } |
5361 |
+- |
5362 |
+- return rc; |
5363 |
+ } |
5364 |
+ |
5365 |
+ /** |
5366 |
+- * ibmvscsis_init_msg() - Respond to an init message |
5367 |
++ * ibmvscsis_ping_response() - Respond to a ping request |
5368 |
+ * @vscsi: Pointer to our adapter structure |
5369 |
+- * @crq: Pointer to CRQ element containing the Init Message |
5370 |
++ * |
5371 |
++ * Let the client know that the server is alive and waiting on |
5372 |
++ * its native I/O stack. |
5373 |
++ * If any type of error occurs from the call to queue a ping |
5374 |
++ * response then the client is either not accepting or receiving |
5375 |
++ * interrupts. Disconnect with an error. |
5376 |
+ * |
5377 |
+ * EXECUTION ENVIRONMENT: |
5378 |
+ * Interrupt, interrupt lock held |
5379 |
+ */ |
5380 |
+-static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) |
5381 |
++static long ibmvscsis_ping_response(struct scsi_info *vscsi) |
5382 |
+ { |
5383 |
+- long rc = ADAPT_SUCCESS; |
5384 |
++ struct viosrp_crq *crq; |
5385 |
++ u64 buffer[2] = { 0, 0 }; |
5386 |
++ long rc; |
5387 |
+ |
5388 |
+- pr_debug("init_msg: state 0x%hx\n", vscsi->state); |
5389 |
++ crq = (struct viosrp_crq *)&buffer; |
5390 |
++ crq->valid = VALID_CMD_RESP_EL; |
5391 |
++ crq->format = (u8)MESSAGE_IN_CRQ; |
5392 |
++ crq->status = PING_RESPONSE; |
5393 |
+ |
5394 |
+- rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, |
5395 |
+- (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, |
5396 |
+- 0); |
5397 |
+- if (rc == H_SUCCESS) { |
5398 |
+- vscsi->client_data.partition_number = |
5399 |
+- be64_to_cpu(*(u64 *)vscsi->map_buf); |
5400 |
+- pr_debug("init_msg, part num %d\n", |
5401 |
+- vscsi->client_data.partition_number); |
5402 |
+- } else { |
5403 |
+- pr_debug("init_msg h_vioctl rc %ld\n", rc); |
5404 |
+- rc = ADAPT_SUCCESS; |
5405 |
+- } |
5406 |
++ rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), |
5407 |
++ cpu_to_be64(buffer[MSG_LOW])); |
5408 |
+ |
5409 |
+- if (crq->format == INIT_MSG) { |
5410 |
+- rc = ibmvscsis_handle_init_msg(vscsi); |
5411 |
+- } else if (crq->format == INIT_COMPLETE_MSG) { |
5412 |
+- rc = ibmvscsis_handle_init_compl_msg(vscsi); |
5413 |
+- } else { |
5414 |
+- rc = ERROR; |
5415 |
+- dev_err(&vscsi->dev, "init_msg: invalid format %d\n", |
5416 |
+- (uint)crq->format); |
5417 |
++ switch (rc) { |
5418 |
++ case H_SUCCESS: |
5419 |
++ break; |
5420 |
++ case H_CLOSED: |
5421 |
++ vscsi->flags |= CLIENT_FAILED; |
5422 |
++ case H_DROPPED: |
5423 |
++ vscsi->flags |= RESPONSE_Q_DOWN; |
5424 |
++ case H_REMOTE_PARM: |
5425 |
++ dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", |
5426 |
++ rc); |
5427 |
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
5428 |
++ break; |
5429 |
++ default: |
5430 |
++ dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", |
5431 |
++ rc); |
5432 |
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); |
5433 |
++ break; |
5434 |
+ } |
5435 |
+ |
5436 |
+ return rc; |
5437 |
+@@ -2392,7 +2386,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi, |
5438 |
+ break; |
5439 |
+ |
5440 |
+ case VALID_TRANS_EVENT: |
5441 |
+- rc = ibmvscsis_trans_event(vscsi, crq); |
5442 |
++ rc = ibmvscsis_trans_event(vscsi, crq); |
5443 |
+ break; |
5444 |
+ |
5445 |
+ case VALID_INIT_MSG: |
5446 |
+@@ -2523,7 +2517,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, |
5447 |
+ dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n", |
5448 |
+ srp->tag); |
5449 |
+ goto fail; |
5450 |
+- return; |
5451 |
+ } |
5452 |
+ |
5453 |
+ cmd->rsp.sol_not = srp->sol_not; |
5454 |
+@@ -2560,6 +2553,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, |
5455 |
+ data_len, attr, dir, 0); |
5456 |
+ if (rc) { |
5457 |
+ dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc); |
5458 |
++ spin_lock_bh(&vscsi->intr_lock); |
5459 |
++ list_del(&cmd->list); |
5460 |
++ ibmvscsis_free_cmd_resources(vscsi, cmd); |
5461 |
++ spin_unlock_bh(&vscsi->intr_lock); |
5462 |
+ goto fail; |
5463 |
+ } |
5464 |
+ return; |
5465 |
+@@ -2639,6 +2636,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi, |
5466 |
+ if (rc) { |
5467 |
+ dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n", |
5468 |
+ rc); |
5469 |
++ spin_lock_bh(&vscsi->intr_lock); |
5470 |
++ list_del(&cmd->list); |
5471 |
++ spin_unlock_bh(&vscsi->intr_lock); |
5472 |
+ cmd->se_cmd.se_tmr_req->response = |
5473 |
+ TMR_FUNCTION_REJECTED; |
5474 |
+ } |
5475 |
+@@ -2787,36 +2787,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data) |
5476 |
+ } |
5477 |
+ |
5478 |
+ /** |
5479 |
+- * ibmvscsis_check_q() - Helper function to Check Init Message Valid |
5480 |
+- * @vscsi: Pointer to our adapter structure |
5481 |
+- * |
5482 |
+- * Checks if a initialize message was queued by the initiatior |
5483 |
+- * while the timing window was open. This function is called from |
5484 |
+- * probe after the CRQ is created and interrupts are enabled. |
5485 |
+- * It would only be used by adapters who wait for some event before |
5486 |
+- * completing the init handshake with the client. For ibmvscsi, this |
5487 |
+- * event is waiting for the port to be enabled. |
5488 |
+- * |
5489 |
+- * EXECUTION ENVIRONMENT: |
5490 |
+- * Process level only, interrupt lock held |
5491 |
+- */ |
5492 |
+-static long ibmvscsis_check_q(struct scsi_info *vscsi) |
5493 |
+-{ |
5494 |
+- uint format; |
5495 |
+- long rc; |
5496 |
+- |
5497 |
+- rc = ibmvscsis_check_init_msg(vscsi, &format); |
5498 |
+- if (rc) |
5499 |
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
5500 |
+- else if (format == UNUSED_FORMAT) |
5501 |
+- vscsi->state = WAIT_ENABLED; |
5502 |
+- else |
5503 |
+- vscsi->state = PART_UP_WAIT_ENAB; |
5504 |
+- |
5505 |
+- return rc; |
5506 |
+-} |
5507 |
+- |
5508 |
+-/** |
5509 |
+ * ibmvscsis_enable_change_state() - Set new state based on enabled status |
5510 |
+ * @vscsi: Pointer to our adapter structure |
5511 |
+ * |
5512 |
+@@ -2827,77 +2797,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi) |
5513 |
+ */ |
5514 |
+ static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) |
5515 |
+ { |
5516 |
++ int bytes; |
5517 |
+ long rc = ADAPT_SUCCESS; |
5518 |
+ |
5519 |
+-handle_state_change: |
5520 |
+- switch (vscsi->state) { |
5521 |
+- case WAIT_ENABLED: |
5522 |
+- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); |
5523 |
+- switch (rc) { |
5524 |
+- case H_SUCCESS: |
5525 |
+- case H_DROPPED: |
5526 |
+- case H_CLOSED: |
5527 |
+- vscsi->state = WAIT_CONNECTION; |
5528 |
+- rc = ADAPT_SUCCESS; |
5529 |
+- break; |
5530 |
+- |
5531 |
+- case H_PARAMETER: |
5532 |
+- break; |
5533 |
+- |
5534 |
+- case H_HARDWARE: |
5535 |
+- break; |
5536 |
+- |
5537 |
+- default: |
5538 |
+- vscsi->state = UNDEFINED; |
5539 |
+- rc = H_HARDWARE; |
5540 |
+- break; |
5541 |
+- } |
5542 |
+- break; |
5543 |
+- case PART_UP_WAIT_ENAB: |
5544 |
+- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); |
5545 |
+- switch (rc) { |
5546 |
+- case H_SUCCESS: |
5547 |
+- vscsi->state = CONNECTED; |
5548 |
+- rc = ADAPT_SUCCESS; |
5549 |
+- break; |
5550 |
+- |
5551 |
+- case H_DROPPED: |
5552 |
+- case H_CLOSED: |
5553 |
+- vscsi->state = WAIT_ENABLED; |
5554 |
+- goto handle_state_change; |
5555 |
+- |
5556 |
+- case H_PARAMETER: |
5557 |
+- break; |
5558 |
+- |
5559 |
+- case H_HARDWARE: |
5560 |
+- break; |
5561 |
+- |
5562 |
+- default: |
5563 |
+- rc = H_HARDWARE; |
5564 |
+- break; |
5565 |
+- } |
5566 |
+- break; |
5567 |
+- |
5568 |
+- case WAIT_CONNECTION: |
5569 |
+- case WAIT_IDLE: |
5570 |
+- case SRP_PROCESSING: |
5571 |
+- case CONNECTED: |
5572 |
+- rc = ADAPT_SUCCESS; |
5573 |
+- break; |
5574 |
+- /* should not be able to get here */ |
5575 |
+- case UNCONFIGURING: |
5576 |
+- rc = ERROR; |
5577 |
+- vscsi->state = UNDEFINED; |
5578 |
+- break; |
5579 |
++ bytes = vscsi->cmd_q.size * PAGE_SIZE; |
5580 |
++ rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes); |
5581 |
++ if (rc == H_CLOSED || rc == H_SUCCESS) { |
5582 |
++ vscsi->state = WAIT_CONNECTION; |
5583 |
++ rc = ibmvscsis_establish_new_q(vscsi); |
5584 |
++ } |
5585 |
+ |
5586 |
+- /* driver should never allow this to happen */ |
5587 |
+- case ERR_DISCONNECT: |
5588 |
+- case ERR_DISCONNECT_RECONNECT: |
5589 |
+- default: |
5590 |
+- dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n", |
5591 |
+- vscsi->state); |
5592 |
+- rc = ADAPT_SUCCESS; |
5593 |
+- break; |
5594 |
++ if (rc != ADAPT_SUCCESS) { |
5595 |
++ vscsi->state = ERR_DISCONNECTED; |
5596 |
++ vscsi->flags |= RESPONSE_Q_DOWN; |
5597 |
+ } |
5598 |
+ |
5599 |
+ return rc; |
5600 |
+@@ -2917,7 +2829,6 @@ static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) |
5601 |
+ */ |
5602 |
+ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) |
5603 |
+ { |
5604 |
+- long rc = 0; |
5605 |
+ int pages; |
5606 |
+ struct vio_dev *vdev = vscsi->dma_dev; |
5607 |
+ |
5608 |
+@@ -2941,22 +2852,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) |
5609 |
+ return -ENOMEM; |
5610 |
+ } |
5611 |
+ |
5612 |
+- rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE); |
5613 |
+- if (rc) { |
5614 |
+- if (rc == H_CLOSED) { |
5615 |
+- vscsi->state = WAIT_ENABLED; |
5616 |
+- rc = 0; |
5617 |
+- } else { |
5618 |
+- dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token, |
5619 |
+- PAGE_SIZE, DMA_BIDIRECTIONAL); |
5620 |
+- free_page((unsigned long)vscsi->cmd_q.base_addr); |
5621 |
+- rc = -ENODEV; |
5622 |
+- } |
5623 |
+- } else { |
5624 |
+- vscsi->state = WAIT_ENABLED; |
5625 |
+- } |
5626 |
+- |
5627 |
+- return rc; |
5628 |
++ return 0; |
5629 |
+ } |
5630 |
+ |
5631 |
+ /** |
5632 |
+@@ -3271,7 +3167,7 @@ static void ibmvscsis_handle_crq(unsigned long data) |
5633 |
+ /* |
5634 |
+ * if we are in a path where we are waiting for all pending commands |
5635 |
+ * to complete because we received a transport event and anything in |
5636 |
+- * the command queue is for a new connection, do nothing |
5637 |
++ * the command queue is for a new connection, do nothing |
5638 |
+ */ |
5639 |
+ if (TARGET_STOP(vscsi)) { |
5640 |
+ vio_enable_interrupts(vscsi->dma_dev); |
5641 |
+@@ -3315,7 +3211,7 @@ static void ibmvscsis_handle_crq(unsigned long data) |
5642 |
+ * everything but transport events on the queue |
5643 |
+ * |
5644 |
+ * need to decrement the queue index so we can |
5645 |
+- * look at the elment again |
5646 |
++ * look at the element again |
5647 |
+ */ |
5648 |
+ if (vscsi->cmd_q.index) |
5649 |
+ vscsi->cmd_q.index -= 1; |
5650 |
+@@ -3379,7 +3275,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev, |
5651 |
+ INIT_LIST_HEAD(&vscsi->waiting_rsp); |
5652 |
+ INIT_LIST_HEAD(&vscsi->active_q); |
5653 |
+ |
5654 |
+- snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev)); |
5655 |
++ snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s", |
5656 |
++ dev_name(&vdev->dev)); |
5657 |
+ |
5658 |
+ pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name); |
5659 |
+ |
5660 |
+@@ -3394,6 +3291,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev, |
5661 |
+ strncat(vscsi->eye, vdev->name, MAX_EYE); |
5662 |
+ |
5663 |
+ vscsi->dds.unit_id = vdev->unit_address; |
5664 |
++ strncpy(vscsi->dds.partition_name, partition_name, |
5665 |
++ sizeof(vscsi->dds.partition_name)); |
5666 |
++ vscsi->dds.partition_num = partition_number; |
5667 |
+ |
5668 |
+ spin_lock_bh(&ibmvscsis_dev_lock); |
5669 |
+ list_add_tail(&vscsi->list, &ibmvscsis_dev_list); |
5670 |
+@@ -3470,6 +3370,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev, |
5671 |
+ (unsigned long)vscsi); |
5672 |
+ |
5673 |
+ init_completion(&vscsi->wait_idle); |
5674 |
++ init_completion(&vscsi->unconfig); |
5675 |
+ |
5676 |
+ snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); |
5677 |
+ vscsi->work_q = create_workqueue(wq_name); |
5678 |
+@@ -3486,31 +3387,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev, |
5679 |
+ goto destroy_WQ; |
5680 |
+ } |
5681 |
+ |
5682 |
+- spin_lock_bh(&vscsi->intr_lock); |
5683 |
+- vio_enable_interrupts(vdev); |
5684 |
+- if (rc) { |
5685 |
+- dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc); |
5686 |
+- rc = -ENODEV; |
5687 |
+- spin_unlock_bh(&vscsi->intr_lock); |
5688 |
+- goto free_irq; |
5689 |
+- } |
5690 |
+- |
5691 |
+- if (ibmvscsis_check_q(vscsi)) { |
5692 |
+- rc = ERROR; |
5693 |
+- dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc); |
5694 |
+- spin_unlock_bh(&vscsi->intr_lock); |
5695 |
+- goto disable_interrupt; |
5696 |
+- } |
5697 |
+- spin_unlock_bh(&vscsi->intr_lock); |
5698 |
++ vscsi->state = WAIT_ENABLED; |
5699 |
+ |
5700 |
+ dev_set_drvdata(&vdev->dev, vscsi); |
5701 |
+ |
5702 |
+ return 0; |
5703 |
+ |
5704 |
+-disable_interrupt: |
5705 |
+- vio_disable_interrupts(vdev); |
5706 |
+-free_irq: |
5707 |
+- free_irq(vdev->irq, vscsi); |
5708 |
+ destroy_WQ: |
5709 |
+ destroy_workqueue(vscsi->work_q); |
5710 |
+ unmap_buf: |
5711 |
+@@ -3544,10 +3426,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev) |
5712 |
+ |
5713 |
+ pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); |
5714 |
+ |
5715 |
+- /* |
5716 |
+- * TBD: Need to handle if there are commands on the waiting_rsp q |
5717 |
+- * Actually, can there still be cmds outstanding to tcm? |
5718 |
+- */ |
5719 |
++ spin_lock_bh(&vscsi->intr_lock); |
5720 |
++ ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0); |
5721 |
++ vscsi->flags |= CFG_SLEEPING; |
5722 |
++ spin_unlock_bh(&vscsi->intr_lock); |
5723 |
++ wait_for_completion(&vscsi->unconfig); |
5724 |
+ |
5725 |
+ vio_disable_interrupts(vdev); |
5726 |
+ free_irq(vdev->irq, vscsi); |
5727 |
+@@ -3556,7 +3439,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev) |
5728 |
+ DMA_BIDIRECTIONAL); |
5729 |
+ kfree(vscsi->map_buf); |
5730 |
+ tasklet_kill(&vscsi->work_task); |
5731 |
+- ibmvscsis_unregister_command_q(vscsi); |
5732 |
+ ibmvscsis_destroy_command_q(vscsi); |
5733 |
+ ibmvscsis_freetimer(vscsi); |
5734 |
+ ibmvscsis_free_cmds(vscsi); |
5735 |
+@@ -3610,7 +3492,7 @@ static int ibmvscsis_get_system_info(void) |
5736 |
+ |
5737 |
+ num = of_get_property(rootdn, "ibm,partition-no", NULL); |
5738 |
+ if (num) |
5739 |
+- partition_number = *num; |
5740 |
++ partition_number = of_read_number(num, 1); |
5741 |
+ |
5742 |
+ of_node_put(rootdn); |
5743 |
+ |
5744 |
+@@ -3904,18 +3786,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item, |
5745 |
+ } |
5746 |
+ |
5747 |
+ if (tmp) { |
5748 |
+- tport->enabled = true; |
5749 |
+ spin_lock_bh(&vscsi->intr_lock); |
5750 |
++ tport->enabled = true; |
5751 |
+ lrc = ibmvscsis_enable_change_state(vscsi); |
5752 |
+ if (lrc) |
5753 |
+ pr_err("enable_change_state failed, rc %ld state %d\n", |
5754 |
+ lrc, vscsi->state); |
5755 |
+ spin_unlock_bh(&vscsi->intr_lock); |
5756 |
+ } else { |
5757 |
++ spin_lock_bh(&vscsi->intr_lock); |
5758 |
+ tport->enabled = false; |
5759 |
++ /* This simulates the server going down */ |
5760 |
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); |
5761 |
++ spin_unlock_bh(&vscsi->intr_lock); |
5762 |
+ } |
5763 |
+ |
5764 |
+- pr_debug("tpg_enable_store, state %d\n", vscsi->state); |
5765 |
++ pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state); |
5766 |
+ |
5767 |
+ return count; |
5768 |
+ } |
5769 |
+@@ -3985,10 +3871,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = { |
5770 |
+ ATTRIBUTE_GROUPS(ibmvscsis_dev); |
5771 |
+ |
5772 |
+ static struct class ibmvscsis_class = { |
5773 |
+- .name = "ibmvscsis", |
5774 |
+- .dev_release = ibmvscsis_dev_release, |
5775 |
+- .class_attrs = ibmvscsis_class_attrs, |
5776 |
+- .dev_groups = ibmvscsis_dev_groups, |
5777 |
++ .name = "ibmvscsis", |
5778 |
++ .dev_release = ibmvscsis_dev_release, |
5779 |
++ .class_attrs = ibmvscsis_class_attrs, |
5780 |
++ .dev_groups = ibmvscsis_dev_groups, |
5781 |
+ }; |
5782 |
+ |
5783 |
+ static struct vio_device_id ibmvscsis_device_table[] = { |
5784 |
+diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h |
5785 |
+index 981a0c9..98b0ca7 100644 |
5786 |
+--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h |
5787 |
++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h |
5788 |
+@@ -204,8 +204,6 @@ struct scsi_info { |
5789 |
+ struct list_head waiting_rsp; |
5790 |
+ #define NO_QUEUE 0x00 |
5791 |
+ #define WAIT_ENABLED 0X01 |
5792 |
+- /* driver has received an initialize command */ |
5793 |
+-#define PART_UP_WAIT_ENAB 0x02 |
5794 |
+ #define WAIT_CONNECTION 0x04 |
5795 |
+ /* have established a connection */ |
5796 |
+ #define CONNECTED 0x08 |
5797 |
+@@ -259,6 +257,8 @@ struct scsi_info { |
5798 |
+ #define SCHEDULE_DISCONNECT 0x00400 |
5799 |
+ /* disconnect handler is scheduled */ |
5800 |
+ #define DISCONNECT_SCHEDULED 0x00800 |
5801 |
++ /* remove function is sleeping */ |
5802 |
++#define CFG_SLEEPING 0x01000 |
5803 |
+ u32 flags; |
5804 |
+ /* adapter lock */ |
5805 |
+ spinlock_t intr_lock; |
5806 |
+@@ -287,6 +287,7 @@ struct scsi_info { |
5807 |
+ |
5808 |
+ struct workqueue_struct *work_q; |
5809 |
+ struct completion wait_idle; |
5810 |
++ struct completion unconfig; |
5811 |
+ struct device dev; |
5812 |
+ struct vio_dev *dma_dev; |
5813 |
+ struct srp_target target; |
5814 |
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c |
5815 |
+index 4d09bd4..6e3e636 100644 |
5816 |
+--- a/drivers/tty/serial/8250/8250_pci.c |
5817 |
++++ b/drivers/tty/serial/8250/8250_pci.c |
5818 |
+@@ -52,6 +52,7 @@ struct serial_private { |
5819 |
+ struct pci_dev *dev; |
5820 |
+ unsigned int nr; |
5821 |
+ struct pci_serial_quirk *quirk; |
5822 |
++ const struct pciserial_board *board; |
5823 |
+ int line[0]; |
5824 |
+ }; |
5825 |
+ |
5826 |
+@@ -3871,6 +3872,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) |
5827 |
+ } |
5828 |
+ } |
5829 |
+ priv->nr = i; |
5830 |
++ priv->board = board; |
5831 |
+ return priv; |
5832 |
+ |
5833 |
+ err_deinit: |
5834 |
+@@ -3881,7 +3883,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) |
5835 |
+ } |
5836 |
+ EXPORT_SYMBOL_GPL(pciserial_init_ports); |
5837 |
+ |
5838 |
+-void pciserial_remove_ports(struct serial_private *priv) |
5839 |
++void pciserial_detach_ports(struct serial_private *priv) |
5840 |
+ { |
5841 |
+ struct pci_serial_quirk *quirk; |
5842 |
+ int i; |
5843 |
+@@ -3895,7 +3897,11 @@ void pciserial_remove_ports(struct serial_private *priv) |
5844 |
+ quirk = find_quirk(priv->dev); |
5845 |
+ if (quirk->exit) |
5846 |
+ quirk->exit(priv->dev); |
5847 |
++} |
5848 |
+ |
5849 |
++void pciserial_remove_ports(struct serial_private *priv) |
5850 |
++{ |
5851 |
++ pciserial_detach_ports(priv); |
5852 |
+ kfree(priv); |
5853 |
+ } |
5854 |
+ EXPORT_SYMBOL_GPL(pciserial_remove_ports); |
5855 |
+@@ -5590,7 +5596,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, |
5856 |
+ return PCI_ERS_RESULT_DISCONNECT; |
5857 |
+ |
5858 |
+ if (priv) |
5859 |
+- pciserial_suspend_ports(priv); |
5860 |
++ pciserial_detach_ports(priv); |
5861 |
+ |
5862 |
+ pci_disable_device(dev); |
5863 |
+ |
5864 |
+@@ -5615,9 +5621,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) |
5865 |
+ static void serial8250_io_resume(struct pci_dev *dev) |
5866 |
+ { |
5867 |
+ struct serial_private *priv = pci_get_drvdata(dev); |
5868 |
++ const struct pciserial_board *board; |
5869 |
+ |
5870 |
+- if (priv) |
5871 |
+- pciserial_resume_ports(priv); |
5872 |
++ if (!priv) |
5873 |
++ return; |
5874 |
++ |
5875 |
++ board = priv->board; |
5876 |
++ kfree(priv); |
5877 |
++ priv = pciserial_init_ports(dev, board); |
5878 |
++ |
5879 |
++ if (!IS_ERR(priv)) { |
5880 |
++ pci_set_drvdata(dev, priv); |
5881 |
++ } |
5882 |
+ } |
5883 |
+ |
5884 |
+ static const struct pci_error_handlers serial8250_err_handler = { |
5885 |
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c |
5886 |
+index 45bc997..a95b3e7 100644 |
5887 |
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.c |
5888 |
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c |
5889 |
+@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, |
5890 |
+ dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); |
5891 |
+ goto err; |
5892 |
+ } |
5893 |
+- ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); |
5894 |
++ sprintf(ep->name, "ep%d", ep->index); |
5895 |
++ ep->ep.name = ep->name; |
5896 |
+ |
5897 |
+ ep->ep_regs = udc->regs + USBA_EPT_BASE(i); |
5898 |
+ ep->dma_regs = udc->regs + USBA_DMA_BASE(i); |
5899 |
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h |
5900 |
+index 3e1c9d5..b03b2eb 100644 |
5901 |
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.h |
5902 |
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.h |
5903 |
+@@ -280,6 +280,7 @@ struct usba_ep { |
5904 |
+ void __iomem *ep_regs; |
5905 |
+ void __iomem *dma_regs; |
5906 |
+ void __iomem *fifo; |
5907 |
++ char name[8]; |
5908 |
+ struct usb_ep ep; |
5909 |
+ struct usba_udc *udc; |
5910 |
+ |
5911 |
+diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c |
5912 |
+index 80378dd..c882357 100644 |
5913 |
+--- a/drivers/vfio/vfio_iommu_spapr_tce.c |
5914 |
++++ b/drivers/vfio/vfio_iommu_spapr_tce.c |
5915 |
+@@ -31,49 +31,49 @@ |
5916 |
+ static void tce_iommu_detach_group(void *iommu_data, |
5917 |
+ struct iommu_group *iommu_group); |
5918 |
+ |
5919 |
+-static long try_increment_locked_vm(long npages) |
5920 |
++static long try_increment_locked_vm(struct mm_struct *mm, long npages) |
5921 |
+ { |
5922 |
+ long ret = 0, locked, lock_limit; |
5923 |
+ |
5924 |
+- if (!current || !current->mm) |
5925 |
+- return -ESRCH; /* process exited */ |
5926 |
++ if (WARN_ON_ONCE(!mm)) |
5927 |
++ return -EPERM; |
5928 |
+ |
5929 |
+ if (!npages) |
5930 |
+ return 0; |
5931 |
+ |
5932 |
+- down_write(¤t->mm->mmap_sem); |
5933 |
+- locked = current->mm->locked_vm + npages; |
5934 |
++ down_write(&mm->mmap_sem); |
5935 |
++ locked = mm->locked_vm + npages; |
5936 |
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
5937 |
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK)) |
5938 |
+ ret = -ENOMEM; |
5939 |
+ else |
5940 |
+- current->mm->locked_vm += npages; |
5941 |
++ mm->locked_vm += npages; |
5942 |
+ |
5943 |
+ pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, |
5944 |
+ npages << PAGE_SHIFT, |
5945 |
+- current->mm->locked_vm << PAGE_SHIFT, |
5946 |
++ mm->locked_vm << PAGE_SHIFT, |
5947 |
+ rlimit(RLIMIT_MEMLOCK), |
5948 |
+ ret ? " - exceeded" : ""); |
5949 |
+ |
5950 |
+- up_write(¤t->mm->mmap_sem); |
5951 |
++ up_write(&mm->mmap_sem); |
5952 |
+ |
5953 |
+ return ret; |
5954 |
+ } |
5955 |
+ |
5956 |
+-static void decrement_locked_vm(long npages) |
5957 |
++static void decrement_locked_vm(struct mm_struct *mm, long npages) |
5958 |
+ { |
5959 |
+- if (!current || !current->mm || !npages) |
5960 |
+- return; /* process exited */ |
5961 |
++ if (!mm || !npages) |
5962 |
++ return; |
5963 |
+ |
5964 |
+- down_write(¤t->mm->mmap_sem); |
5965 |
+- if (WARN_ON_ONCE(npages > current->mm->locked_vm)) |
5966 |
+- npages = current->mm->locked_vm; |
5967 |
+- current->mm->locked_vm -= npages; |
5968 |
++ down_write(&mm->mmap_sem); |
5969 |
++ if (WARN_ON_ONCE(npages > mm->locked_vm)) |
5970 |
++ npages = mm->locked_vm; |
5971 |
++ mm->locked_vm -= npages; |
5972 |
+ pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, |
5973 |
+ npages << PAGE_SHIFT, |
5974 |
+- current->mm->locked_vm << PAGE_SHIFT, |
5975 |
++ mm->locked_vm << PAGE_SHIFT, |
5976 |
+ rlimit(RLIMIT_MEMLOCK)); |
5977 |
+- up_write(¤t->mm->mmap_sem); |
5978 |
++ up_write(&mm->mmap_sem); |
5979 |
+ } |
5980 |
+ |
5981 |
+ /* |
5982 |
+@@ -89,6 +89,15 @@ struct tce_iommu_group { |
5983 |
+ }; |
5984 |
+ |
5985 |
+ /* |
5986 |
++ * A container needs to remember which preregistered region it has |
5987 |
++ * referenced to do proper cleanup at the userspace process exit. |
5988 |
++ */ |
5989 |
++struct tce_iommu_prereg { |
5990 |
++ struct list_head next; |
5991 |
++ struct mm_iommu_table_group_mem_t *mem; |
5992 |
++}; |
5993 |
++ |
5994 |
++/* |
5995 |
+ * The container descriptor supports only a single group per container. |
5996 |
+ * Required by the API as the container is not supplied with the IOMMU group |
5997 |
+ * at the moment of initialization. |
5998 |
+@@ -97,24 +106,68 @@ struct tce_container { |
5999 |
+ struct mutex lock; |
6000 |
+ bool enabled; |
6001 |
+ bool v2; |
6002 |
++ bool def_window_pending; |
6003 |
+ unsigned long locked_pages; |
6004 |
++ struct mm_struct *mm; |
6005 |
+ struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; |
6006 |
+ struct list_head group_list; |
6007 |
++ struct list_head prereg_list; |
6008 |
+ }; |
6009 |
+ |
6010 |
++static long tce_iommu_mm_set(struct tce_container *container) |
6011 |
++{ |
6012 |
++ if (container->mm) { |
6013 |
++ if (container->mm == current->mm) |
6014 |
++ return 0; |
6015 |
++ return -EPERM; |
6016 |
++ } |
6017 |
++ BUG_ON(!current->mm); |
6018 |
++ container->mm = current->mm; |
6019 |
++ atomic_inc(&container->mm->mm_count); |
6020 |
++ |
6021 |
++ return 0; |
6022 |
++} |
6023 |
++ |
6024 |
++static long tce_iommu_prereg_free(struct tce_container *container, |
6025 |
++ struct tce_iommu_prereg *tcemem) |
6026 |
++{ |
6027 |
++ long ret; |
6028 |
++ |
6029 |
++ ret = mm_iommu_put(container->mm, tcemem->mem); |
6030 |
++ if (ret) |
6031 |
++ return ret; |
6032 |
++ |
6033 |
++ list_del(&tcemem->next); |
6034 |
++ kfree(tcemem); |
6035 |
++ |
6036 |
++ return 0; |
6037 |
++} |
6038 |
++ |
6039 |
+ static long tce_iommu_unregister_pages(struct tce_container *container, |
6040 |
+ __u64 vaddr, __u64 size) |
6041 |
+ { |
6042 |
+ struct mm_iommu_table_group_mem_t *mem; |
6043 |
++ struct tce_iommu_prereg *tcemem; |
6044 |
++ bool found = false; |
6045 |
+ |
6046 |
+ if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) |
6047 |
+ return -EINVAL; |
6048 |
+ |
6049 |
+- mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT); |
6050 |
++ mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT); |
6051 |
+ if (!mem) |
6052 |
+ return -ENOENT; |
6053 |
+ |
6054 |
+- return mm_iommu_put(mem); |
6055 |
++ list_for_each_entry(tcemem, &container->prereg_list, next) { |
6056 |
++ if (tcemem->mem == mem) { |
6057 |
++ found = true; |
6058 |
++ break; |
6059 |
++ } |
6060 |
++ } |
6061 |
++ |
6062 |
++ if (!found) |
6063 |
++ return -ENOENT; |
6064 |
++ |
6065 |
++ return tce_iommu_prereg_free(container, tcemem); |
6066 |
+ } |
6067 |
+ |
6068 |
+ static long tce_iommu_register_pages(struct tce_container *container, |
6069 |
+@@ -122,22 +175,36 @@ static long tce_iommu_register_pages(struct tce_container *container, |
6070 |
+ { |
6071 |
+ long ret = 0; |
6072 |
+ struct mm_iommu_table_group_mem_t *mem = NULL; |
6073 |
++ struct tce_iommu_prereg *tcemem; |
6074 |
+ unsigned long entries = size >> PAGE_SHIFT; |
6075 |
+ |
6076 |
+ if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || |
6077 |
+ ((vaddr + size) < vaddr)) |
6078 |
+ return -EINVAL; |
6079 |
+ |
6080 |
+- ret = mm_iommu_get(vaddr, entries, &mem); |
6081 |
++ mem = mm_iommu_find(container->mm, vaddr, entries); |
6082 |
++ if (mem) { |
6083 |
++ list_for_each_entry(tcemem, &container->prereg_list, next) { |
6084 |
++ if (tcemem->mem == mem) |
6085 |
++ return -EBUSY; |
6086 |
++ } |
6087 |
++ } |
6088 |
++ |
6089 |
++ ret = mm_iommu_get(container->mm, vaddr, entries, &mem); |
6090 |
+ if (ret) |
6091 |
+ return ret; |
6092 |
+ |
6093 |
++ tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL); |
6094 |
++ tcemem->mem = mem; |
6095 |
++ list_add(&tcemem->next, &container->prereg_list); |
6096 |
++ |
6097 |
+ container->enabled = true; |
6098 |
+ |
6099 |
+ return 0; |
6100 |
+ } |
6101 |
+ |
6102 |
+-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) |
6103 |
++static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl, |
6104 |
++ struct mm_struct *mm) |
6105 |
+ { |
6106 |
+ unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * |
6107 |
+ tbl->it_size, PAGE_SIZE); |
6108 |
+@@ -146,13 +213,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) |
6109 |
+ |
6110 |
+ BUG_ON(tbl->it_userspace); |
6111 |
+ |
6112 |
+- ret = try_increment_locked_vm(cb >> PAGE_SHIFT); |
6113 |
++ ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT); |
6114 |
+ if (ret) |
6115 |
+ return ret; |
6116 |
+ |
6117 |
+ uas = vzalloc(cb); |
6118 |
+ if (!uas) { |
6119 |
+- decrement_locked_vm(cb >> PAGE_SHIFT); |
6120 |
++ decrement_locked_vm(mm, cb >> PAGE_SHIFT); |
6121 |
+ return -ENOMEM; |
6122 |
+ } |
6123 |
+ tbl->it_userspace = uas; |
6124 |
+@@ -160,7 +227,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) |
6125 |
+ return 0; |
6126 |
+ } |
6127 |
+ |
6128 |
+-static void tce_iommu_userspace_view_free(struct iommu_table *tbl) |
6129 |
++static void tce_iommu_userspace_view_free(struct iommu_table *tbl, |
6130 |
++ struct mm_struct *mm) |
6131 |
+ { |
6132 |
+ unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * |
6133 |
+ tbl->it_size, PAGE_SIZE); |
6134 |
+@@ -170,7 +238,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl) |
6135 |
+ |
6136 |
+ vfree(tbl->it_userspace); |
6137 |
+ tbl->it_userspace = NULL; |
6138 |
+- decrement_locked_vm(cb >> PAGE_SHIFT); |
6139 |
++ decrement_locked_vm(mm, cb >> PAGE_SHIFT); |
6140 |
+ } |
6141 |
+ |
6142 |
+ static bool tce_page_is_contained(struct page *page, unsigned page_shift) |
6143 |
+@@ -230,9 +298,6 @@ static int tce_iommu_enable(struct tce_container *container) |
6144 |
+ struct iommu_table_group *table_group; |
6145 |
+ struct tce_iommu_group *tcegrp; |
6146 |
+ |
6147 |
+- if (!current->mm) |
6148 |
+- return -ESRCH; /* process exited */ |
6149 |
+- |
6150 |
+ if (container->enabled) |
6151 |
+ return -EBUSY; |
6152 |
+ |
6153 |
+@@ -277,8 +342,12 @@ static int tce_iommu_enable(struct tce_container *container) |
6154 |
+ if (!table_group->tce32_size) |
6155 |
+ return -EPERM; |
6156 |
+ |
6157 |
++ ret = tce_iommu_mm_set(container); |
6158 |
++ if (ret) |
6159 |
++ return ret; |
6160 |
++ |
6161 |
+ locked = table_group->tce32_size >> PAGE_SHIFT; |
6162 |
+- ret = try_increment_locked_vm(locked); |
6163 |
++ ret = try_increment_locked_vm(container->mm, locked); |
6164 |
+ if (ret) |
6165 |
+ return ret; |
6166 |
+ |
6167 |
+@@ -296,10 +365,8 @@ static void tce_iommu_disable(struct tce_container *container) |
6168 |
+ |
6169 |
+ container->enabled = false; |
6170 |
+ |
6171 |
+- if (!current->mm) |
6172 |
+- return; |
6173 |
+- |
6174 |
+- decrement_locked_vm(container->locked_pages); |
6175 |
++ BUG_ON(!container->mm); |
6176 |
++ decrement_locked_vm(container->mm, container->locked_pages); |
6177 |
+ } |
6178 |
+ |
6179 |
+ static void *tce_iommu_open(unsigned long arg) |
6180 |
+@@ -317,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg) |
6181 |
+ |
6182 |
+ mutex_init(&container->lock); |
6183 |
+ INIT_LIST_HEAD_RCU(&container->group_list); |
6184 |
++ INIT_LIST_HEAD_RCU(&container->prereg_list); |
6185 |
+ |
6186 |
+ container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; |
6187 |
+ |
6188 |
+@@ -326,7 +394,8 @@ static void *tce_iommu_open(unsigned long arg) |
6189 |
+ static int tce_iommu_clear(struct tce_container *container, |
6190 |
+ struct iommu_table *tbl, |
6191 |
+ unsigned long entry, unsigned long pages); |
6192 |
+-static void tce_iommu_free_table(struct iommu_table *tbl); |
6193 |
++static void tce_iommu_free_table(struct tce_container *container, |
6194 |
++ struct iommu_table *tbl); |
6195 |
+ |
6196 |
+ static void tce_iommu_release(void *iommu_data) |
6197 |
+ { |
6198 |
+@@ -351,10 +420,20 @@ static void tce_iommu_release(void *iommu_data) |
6199 |
+ continue; |
6200 |
+ |
6201 |
+ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); |
6202 |
+- tce_iommu_free_table(tbl); |
6203 |
++ tce_iommu_free_table(container, tbl); |
6204 |
++ } |
6205 |
++ |
6206 |
++ while (!list_empty(&container->prereg_list)) { |
6207 |
++ struct tce_iommu_prereg *tcemem; |
6208 |
++ |
6209 |
++ tcemem = list_first_entry(&container->prereg_list, |
6210 |
++ struct tce_iommu_prereg, next); |
6211 |
++ WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem)); |
6212 |
+ } |
6213 |
+ |
6214 |
+ tce_iommu_disable(container); |
6215 |
++ if (container->mm) |
6216 |
++ mmdrop(container->mm); |
6217 |
+ mutex_destroy(&container->lock); |
6218 |
+ |
6219 |
+ kfree(container); |
6220 |
+@@ -369,13 +448,14 @@ static void tce_iommu_unuse_page(struct tce_container *container, |
6221 |
+ put_page(page); |
6222 |
+ } |
6223 |
+ |
6224 |
+-static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, |
6225 |
++static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, |
6226 |
++ unsigned long tce, unsigned long size, |
6227 |
+ unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) |
6228 |
+ { |
6229 |
+ long ret = 0; |
6230 |
+ struct mm_iommu_table_group_mem_t *mem; |
6231 |
+ |
6232 |
+- mem = mm_iommu_lookup(tce, size); |
6233 |
++ mem = mm_iommu_lookup(container->mm, tce, size); |
6234 |
+ if (!mem) |
6235 |
+ return -EINVAL; |
6236 |
+ |
6237 |
+@@ -388,18 +468,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, |
6238 |
+ return 0; |
6239 |
+ } |
6240 |
+ |
6241 |
+-static void tce_iommu_unuse_page_v2(struct iommu_table *tbl, |
6242 |
+- unsigned long entry) |
6243 |
++static void tce_iommu_unuse_page_v2(struct tce_container *container, |
6244 |
++ struct iommu_table *tbl, unsigned long entry) |
6245 |
+ { |
6246 |
+ struct mm_iommu_table_group_mem_t *mem = NULL; |
6247 |
+ int ret; |
6248 |
+ unsigned long hpa = 0; |
6249 |
+ unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); |
6250 |
+ |
6251 |
+- if (!pua || !current || !current->mm) |
6252 |
++ if (!pua) |
6253 |
+ return; |
6254 |
+ |
6255 |
+- ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl), |
6256 |
++ ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), |
6257 |
+ &hpa, &mem); |
6258 |
+ if (ret) |
6259 |
+ pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", |
6260 |
+@@ -429,7 +509,7 @@ static int tce_iommu_clear(struct tce_container *container, |
6261 |
+ continue; |
6262 |
+ |
6263 |
+ if (container->v2) { |
6264 |
+- tce_iommu_unuse_page_v2(tbl, entry); |
6265 |
++ tce_iommu_unuse_page_v2(container, tbl, entry); |
6266 |
+ continue; |
6267 |
+ } |
6268 |
+ |
6269 |
+@@ -509,13 +589,19 @@ static long tce_iommu_build_v2(struct tce_container *container, |
6270 |
+ unsigned long hpa; |
6271 |
+ enum dma_data_direction dirtmp; |
6272 |
+ |
6273 |
++ if (!tbl->it_userspace) { |
6274 |
++ ret = tce_iommu_userspace_view_alloc(tbl, container->mm); |
6275 |
++ if (ret) |
6276 |
++ return ret; |
6277 |
++ } |
6278 |
++ |
6279 |
+ for (i = 0; i < pages; ++i) { |
6280 |
+ struct mm_iommu_table_group_mem_t *mem = NULL; |
6281 |
+ unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, |
6282 |
+ entry + i); |
6283 |
+ |
6284 |
+- ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl), |
6285 |
+- &hpa, &mem); |
6286 |
++ ret = tce_iommu_prereg_ua_to_hpa(container, |
6287 |
++ tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); |
6288 |
+ if (ret) |
6289 |
+ break; |
6290 |
+ |
6291 |
+@@ -536,7 +622,7 @@ static long tce_iommu_build_v2(struct tce_container *container, |
6292 |
+ ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp); |
6293 |
+ if (ret) { |
6294 |
+ /* dirtmp cannot be DMA_NONE here */ |
6295 |
+- tce_iommu_unuse_page_v2(tbl, entry + i); |
6296 |
++ tce_iommu_unuse_page_v2(container, tbl, entry + i); |
6297 |
+ pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", |
6298 |
+ __func__, entry << tbl->it_page_shift, |
6299 |
+ tce, ret); |
6300 |
+@@ -544,7 +630,7 @@ static long tce_iommu_build_v2(struct tce_container *container, |
6301 |
+ } |
6302 |
+ |
6303 |
+ if (dirtmp != DMA_NONE) |
6304 |
+- tce_iommu_unuse_page_v2(tbl, entry + i); |
6305 |
++ tce_iommu_unuse_page_v2(container, tbl, entry + i); |
6306 |
+ |
6307 |
+ *pua = tce; |
6308 |
+ |
6309 |
+@@ -572,7 +658,7 @@ static long tce_iommu_create_table(struct tce_container *container, |
6310 |
+ if (!table_size) |
6311 |
+ return -EINVAL; |
6312 |
+ |
6313 |
+- ret = try_increment_locked_vm(table_size >> PAGE_SHIFT); |
6314 |
++ ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT); |
6315 |
+ if (ret) |
6316 |
+ return ret; |
6317 |
+ |
6318 |
+@@ -582,25 +668,17 @@ static long tce_iommu_create_table(struct tce_container *container, |
6319 |
+ WARN_ON(!ret && !(*ptbl)->it_ops->free); |
6320 |
+ WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size)); |
6321 |
+ |
6322 |
+- if (!ret && container->v2) { |
6323 |
+- ret = tce_iommu_userspace_view_alloc(*ptbl); |
6324 |
+- if (ret) |
6325 |
+- (*ptbl)->it_ops->free(*ptbl); |
6326 |
+- } |
6327 |
+- |
6328 |
+- if (ret) |
6329 |
+- decrement_locked_vm(table_size >> PAGE_SHIFT); |
6330 |
+- |
6331 |
+ return ret; |
6332 |
+ } |
6333 |
+ |
6334 |
+-static void tce_iommu_free_table(struct iommu_table *tbl) |
6335 |
++static void tce_iommu_free_table(struct tce_container *container, |
6336 |
++ struct iommu_table *tbl) |
6337 |
+ { |
6338 |
+ unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; |
6339 |
+ |
6340 |
+- tce_iommu_userspace_view_free(tbl); |
6341 |
++ tce_iommu_userspace_view_free(tbl, container->mm); |
6342 |
+ tbl->it_ops->free(tbl); |
6343 |
+- decrement_locked_vm(pages); |
6344 |
++ decrement_locked_vm(container->mm, pages); |
6345 |
+ } |
6346 |
+ |
6347 |
+ static long tce_iommu_create_window(struct tce_container *container, |
6348 |
+@@ -663,7 +741,7 @@ static long tce_iommu_create_window(struct tce_container *container, |
6349 |
+ table_group = iommu_group_get_iommudata(tcegrp->grp); |
6350 |
+ table_group->ops->unset_window(table_group, num); |
6351 |
+ } |
6352 |
+- tce_iommu_free_table(tbl); |
6353 |
++ tce_iommu_free_table(container, tbl); |
6354 |
+ |
6355 |
+ return ret; |
6356 |
+ } |
6357 |
+@@ -701,12 +779,41 @@ static long tce_iommu_remove_window(struct tce_container *container, |
6358 |
+ |
6359 |
+ /* Free table */ |
6360 |
+ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); |
6361 |
+- tce_iommu_free_table(tbl); |
6362 |
++ tce_iommu_free_table(container, tbl); |
6363 |
+ container->tables[num] = NULL; |
6364 |
+ |
6365 |
+ return 0; |
6366 |
+ } |
6367 |
+ |
6368 |
++static long tce_iommu_create_default_window(struct tce_container *container) |
6369 |
++{ |
6370 |
++ long ret; |
6371 |
++ __u64 start_addr = 0; |
6372 |
++ struct tce_iommu_group *tcegrp; |
6373 |
++ struct iommu_table_group *table_group; |
6374 |
++ |
6375 |
++ if (!container->def_window_pending) |
6376 |
++ return 0; |
6377 |
++ |
6378 |
++ if (!tce_groups_attached(container)) |
6379 |
++ return -ENODEV; |
6380 |
++ |
6381 |
++ tcegrp = list_first_entry(&container->group_list, |
6382 |
++ struct tce_iommu_group, next); |
6383 |
++ table_group = iommu_group_get_iommudata(tcegrp->grp); |
6384 |
++ if (!table_group) |
6385 |
++ return -ENODEV; |
6386 |
++ |
6387 |
++ ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K, |
6388 |
++ table_group->tce32_size, 1, &start_addr); |
6389 |
++ WARN_ON_ONCE(!ret && start_addr); |
6390 |
++ |
6391 |
++ if (!ret) |
6392 |
++ container->def_window_pending = false; |
6393 |
++ |
6394 |
++ return ret; |
6395 |
++} |
6396 |
++ |
6397 |
+ static long tce_iommu_ioctl(void *iommu_data, |
6398 |
+ unsigned int cmd, unsigned long arg) |
6399 |
+ { |
6400 |
+@@ -727,7 +834,17 @@ static long tce_iommu_ioctl(void *iommu_data, |
6401 |
+ } |
6402 |
+ |
6403 |
+ return (ret < 0) ? 0 : ret; |
6404 |
++ } |
6405 |
++ |
6406 |
++ /* |
6407 |
++ * Sanity check to prevent one userspace from manipulating |
6408 |
++ * another userspace mm. |
6409 |
++ */ |
6410 |
++ BUG_ON(!container); |
6411 |
++ if (container->mm && container->mm != current->mm) |
6412 |
++ return -EPERM; |
6413 |
+ |
6414 |
++ switch (cmd) { |
6415 |
+ case VFIO_IOMMU_SPAPR_TCE_GET_INFO: { |
6416 |
+ struct vfio_iommu_spapr_tce_info info; |
6417 |
+ struct tce_iommu_group *tcegrp; |
6418 |
+@@ -797,6 +914,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
6419 |
+ VFIO_DMA_MAP_FLAG_WRITE)) |
6420 |
+ return -EINVAL; |
6421 |
+ |
6422 |
++ ret = tce_iommu_create_default_window(container); |
6423 |
++ if (ret) |
6424 |
++ return ret; |
6425 |
++ |
6426 |
+ num = tce_iommu_find_table(container, param.iova, &tbl); |
6427 |
+ if (num < 0) |
6428 |
+ return -ENXIO; |
6429 |
+@@ -860,6 +981,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
6430 |
+ if (param.flags) |
6431 |
+ return -EINVAL; |
6432 |
+ |
6433 |
++ ret = tce_iommu_create_default_window(container); |
6434 |
++ if (ret) |
6435 |
++ return ret; |
6436 |
++ |
6437 |
+ num = tce_iommu_find_table(container, param.iova, &tbl); |
6438 |
+ if (num < 0) |
6439 |
+ return -ENXIO; |
6440 |
+@@ -888,6 +1013,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
6441 |
+ minsz = offsetofend(struct vfio_iommu_spapr_register_memory, |
6442 |
+ size); |
6443 |
+ |
6444 |
++ ret = tce_iommu_mm_set(container); |
6445 |
++ if (ret) |
6446 |
++ return ret; |
6447 |
++ |
6448 |
+ if (copy_from_user(¶m, (void __user *)arg, minsz)) |
6449 |
+ return -EFAULT; |
6450 |
+ |
6451 |
+@@ -911,6 +1040,9 @@ static long tce_iommu_ioctl(void *iommu_data, |
6452 |
+ if (!container->v2) |
6453 |
+ break; |
6454 |
+ |
6455 |
++ if (!container->mm) |
6456 |
++ return -EPERM; |
6457 |
++ |
6458 |
+ minsz = offsetofend(struct vfio_iommu_spapr_register_memory, |
6459 |
+ size); |
6460 |
+ |
6461 |
+@@ -969,6 +1101,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
6462 |
+ if (!container->v2) |
6463 |
+ break; |
6464 |
+ |
6465 |
++ ret = tce_iommu_mm_set(container); |
6466 |
++ if (ret) |
6467 |
++ return ret; |
6468 |
++ |
6469 |
+ if (!tce_groups_attached(container)) |
6470 |
+ return -ENXIO; |
6471 |
+ |
6472 |
+@@ -986,6 +1122,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
6473 |
+ |
6474 |
+ mutex_lock(&container->lock); |
6475 |
+ |
6476 |
++ ret = tce_iommu_create_default_window(container); |
6477 |
++ if (ret) |
6478 |
++ return ret; |
6479 |
++ |
6480 |
+ ret = tce_iommu_create_window(container, create.page_shift, |
6481 |
+ create.window_size, create.levels, |
6482 |
+ &create.start_addr); |
6483 |
+@@ -1003,6 +1143,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
6484 |
+ if (!container->v2) |
6485 |
+ break; |
6486 |
+ |
6487 |
++ ret = tce_iommu_mm_set(container); |
6488 |
++ if (ret) |
6489 |
++ return ret; |
6490 |
++ |
6491 |
+ if (!tce_groups_attached(container)) |
6492 |
+ return -ENXIO; |
6493 |
+ |
6494 |
+@@ -1018,6 +1162,11 @@ static long tce_iommu_ioctl(void *iommu_data, |
6495 |
+ if (remove.flags) |
6496 |
+ return -EINVAL; |
6497 |
+ |
6498 |
++ if (container->def_window_pending && !remove.start_addr) { |
6499 |
++ container->def_window_pending = false; |
6500 |
++ return 0; |
6501 |
++ } |
6502 |
++ |
6503 |
+ mutex_lock(&container->lock); |
6504 |
+ |
6505 |
+ ret = tce_iommu_remove_window(container, remove.start_addr); |
6506 |
+@@ -1043,7 +1192,7 @@ static void tce_iommu_release_ownership(struct tce_container *container, |
6507 |
+ continue; |
6508 |
+ |
6509 |
+ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); |
6510 |
+- tce_iommu_userspace_view_free(tbl); |
6511 |
++ tce_iommu_userspace_view_free(tbl, container->mm); |
6512 |
+ if (tbl->it_map) |
6513 |
+ iommu_release_ownership(tbl); |
6514 |
+ |
6515 |
+@@ -1062,10 +1211,7 @@ static int tce_iommu_take_ownership(struct tce_container *container, |
6516 |
+ if (!tbl || !tbl->it_map) |
6517 |
+ continue; |
6518 |
+ |
6519 |
+- rc = tce_iommu_userspace_view_alloc(tbl); |
6520 |
+- if (!rc) |
6521 |
+- rc = iommu_take_ownership(tbl); |
6522 |
+- |
6523 |
++ rc = iommu_take_ownership(tbl); |
6524 |
+ if (rc) { |
6525 |
+ for (j = 0; j < i; ++j) |
6526 |
+ iommu_release_ownership( |
6527 |
+@@ -1100,9 +1246,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container, |
6528 |
+ static long tce_iommu_take_ownership_ddw(struct tce_container *container, |
6529 |
+ struct iommu_table_group *table_group) |
6530 |
+ { |
6531 |
+- long i, ret = 0; |
6532 |
+- struct iommu_table *tbl = NULL; |
6533 |
+- |
6534 |
+ if (!table_group->ops->create_table || !table_group->ops->set_window || |
6535 |
+ !table_group->ops->release_ownership) { |
6536 |
+ WARN_ON_ONCE(1); |
6537 |
+@@ -1111,47 +1254,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container, |
6538 |
+ |
6539 |
+ table_group->ops->take_ownership(table_group); |
6540 |
+ |
6541 |
+- /* |
6542 |
+- * If it the first group attached, check if there is |
6543 |
+- * a default DMA window and create one if none as |
6544 |
+- * the userspace expects it to exist. |
6545 |
+- */ |
6546 |
+- if (!tce_groups_attached(container) && !container->tables[0]) { |
6547 |
+- ret = tce_iommu_create_table(container, |
6548 |
+- table_group, |
6549 |
+- 0, /* window number */ |
6550 |
+- IOMMU_PAGE_SHIFT_4K, |
6551 |
+- table_group->tce32_size, |
6552 |
+- 1, /* default levels */ |
6553 |
+- &tbl); |
6554 |
+- if (ret) |
6555 |
+- goto release_exit; |
6556 |
+- else |
6557 |
+- container->tables[0] = tbl; |
6558 |
+- } |
6559 |
+- |
6560 |
+- /* Set all windows to the new group */ |
6561 |
+- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { |
6562 |
+- tbl = container->tables[i]; |
6563 |
+- |
6564 |
+- if (!tbl) |
6565 |
+- continue; |
6566 |
+- |
6567 |
+- /* Set the default window to a new group */ |
6568 |
+- ret = table_group->ops->set_window(table_group, i, tbl); |
6569 |
+- if (ret) |
6570 |
+- goto release_exit; |
6571 |
+- } |
6572 |
+- |
6573 |
+ return 0; |
6574 |
+- |
6575 |
+-release_exit: |
6576 |
+- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) |
6577 |
+- table_group->ops->unset_window(table_group, i); |
6578 |
+- |
6579 |
+- table_group->ops->release_ownership(table_group); |
6580 |
+- |
6581 |
+- return ret; |
6582 |
+ } |
6583 |
+ |
6584 |
+ static int tce_iommu_attach_group(void *iommu_data, |
6585 |
+@@ -1203,10 +1306,13 @@ static int tce_iommu_attach_group(void *iommu_data, |
6586 |
+ } |
6587 |
+ |
6588 |
+ if (!table_group->ops || !table_group->ops->take_ownership || |
6589 |
+- !table_group->ops->release_ownership) |
6590 |
++ !table_group->ops->release_ownership) { |
6591 |
+ ret = tce_iommu_take_ownership(container, table_group); |
6592 |
+- else |
6593 |
++ } else { |
6594 |
+ ret = tce_iommu_take_ownership_ddw(container, table_group); |
6595 |
++ if (!tce_groups_attached(container) && !container->tables[0]) |
6596 |
++ container->def_window_pending = true; |
6597 |
++ } |
6598 |
+ |
6599 |
+ if (!ret) { |
6600 |
+ tcegrp->grp = iommu_group; |
6601 |
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h |
6602 |
+index 6aaf425..a13b031 100644 |
6603 |
+--- a/include/linux/bpf_verifier.h |
6604 |
++++ b/include/linux/bpf_verifier.h |
6605 |
+@@ -18,19 +18,12 @@ |
6606 |
+ |
6607 |
+ struct bpf_reg_state { |
6608 |
+ enum bpf_reg_type type; |
6609 |
+- /* |
6610 |
+- * Used to determine if any memory access using this register will |
6611 |
+- * result in a bad access. |
6612 |
+- */ |
6613 |
+- s64 min_value; |
6614 |
+- u64 max_value; |
6615 |
+ union { |
6616 |
+ /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ |
6617 |
+ s64 imm; |
6618 |
+ |
6619 |
+ /* valid when type == PTR_TO_PACKET* */ |
6620 |
+ struct { |
6621 |
+- u32 id; |
6622 |
+ u16 off; |
6623 |
+ u16 range; |
6624 |
+ }; |
6625 |
+@@ -40,6 +33,13 @@ struct bpf_reg_state { |
6626 |
+ */ |
6627 |
+ struct bpf_map *map_ptr; |
6628 |
+ }; |
6629 |
++ u32 id; |
6630 |
++ /* Used to determine if any memory access using this register will |
6631 |
++ * result in a bad access. These two fields must be last. |
6632 |
++ * See states_equal() |
6633 |
++ */ |
6634 |
++ s64 min_value; |
6635 |
++ u64 max_value; |
6636 |
+ }; |
6637 |
+ |
6638 |
+ enum bpf_stack_slot_type { |
6639 |
+diff --git a/include/linux/dccp.h b/include/linux/dccp.h |
6640 |
+index 61d042b..6844929 100644 |
6641 |
+--- a/include/linux/dccp.h |
6642 |
++++ b/include/linux/dccp.h |
6643 |
+@@ -163,6 +163,7 @@ struct dccp_request_sock { |
6644 |
+ __u64 dreq_isr; |
6645 |
+ __u64 dreq_gsr; |
6646 |
+ __be32 dreq_service; |
6647 |
++ spinlock_t dreq_lock; |
6648 |
+ struct list_head dreq_featneg; |
6649 |
+ __u32 dreq_timestamp_echo; |
6650 |
+ __u32 dreq_timestamp_time; |
6651 |
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h |
6652 |
+index 192eef2f..d596a07 100644 |
6653 |
+--- a/include/linux/hyperv.h |
6654 |
++++ b/include/linux/hyperv.h |
6655 |
+@@ -1548,31 +1548,23 @@ static inline struct vmpacket_descriptor * |
6656 |
+ get_next_pkt_raw(struct vmbus_channel *channel) |
6657 |
+ { |
6658 |
+ struct hv_ring_buffer_info *ring_info = &channel->inbound; |
6659 |
+- u32 read_loc = ring_info->priv_read_index; |
6660 |
++ u32 priv_read_loc = ring_info->priv_read_index; |
6661 |
+ void *ring_buffer = hv_get_ring_buffer(ring_info); |
6662 |
+- struct vmpacket_descriptor *cur_desc; |
6663 |
+- u32 packetlen; |
6664 |
+ u32 dsize = ring_info->ring_datasize; |
6665 |
+- u32 delta = read_loc - ring_info->ring_buffer->read_index; |
6666 |
++ /* |
6667 |
++ * delta is the difference between what is available to read and |
6668 |
++ * what was already consumed in place. We commit read index after |
6669 |
++ * the whole batch is processed. |
6670 |
++ */ |
6671 |
++ u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ? |
6672 |
++ priv_read_loc - ring_info->ring_buffer->read_index : |
6673 |
++ (dsize - ring_info->ring_buffer->read_index) + priv_read_loc; |
6674 |
+ u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); |
6675 |
+ |
6676 |
+ if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) |
6677 |
+ return NULL; |
6678 |
+ |
6679 |
+- if ((read_loc + sizeof(*cur_desc)) > dsize) |
6680 |
+- return NULL; |
6681 |
+- |
6682 |
+- cur_desc = ring_buffer + read_loc; |
6683 |
+- packetlen = cur_desc->len8 << 3; |
6684 |
+- |
6685 |
+- /* |
6686 |
+- * If the packet under consideration is wrapping around, |
6687 |
+- * return failure. |
6688 |
+- */ |
6689 |
+- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) |
6690 |
+- return NULL; |
6691 |
+- |
6692 |
+- return cur_desc; |
6693 |
++ return ring_buffer + priv_read_loc; |
6694 |
+ } |
6695 |
+ |
6696 |
+ /* |
6697 |
+@@ -1584,16 +1576,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel, |
6698 |
+ struct vmpacket_descriptor *desc) |
6699 |
+ { |
6700 |
+ struct hv_ring_buffer_info *ring_info = &channel->inbound; |
6701 |
+- u32 read_loc = ring_info->priv_read_index; |
6702 |
+ u32 packetlen = desc->len8 << 3; |
6703 |
+ u32 dsize = ring_info->ring_datasize; |
6704 |
+ |
6705 |
+- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) |
6706 |
+- BUG(); |
6707 |
+ /* |
6708 |
+ * Include the packet trailer. |
6709 |
+ */ |
6710 |
+ ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; |
6711 |
++ ring_info->priv_read_index %= dsize; |
6712 |
+ } |
6713 |
+ |
6714 |
+ /* |
6715 |
+diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h |
6716 |
+index d08c63f..0c5d5dd 100644 |
6717 |
+--- a/include/uapi/linux/packet_diag.h |
6718 |
++++ b/include/uapi/linux/packet_diag.h |
6719 |
+@@ -64,7 +64,7 @@ struct packet_diag_mclist { |
6720 |
+ __u32 pdmc_count; |
6721 |
+ __u16 pdmc_type; |
6722 |
+ __u16 pdmc_alen; |
6723 |
+- __u8 pdmc_addr[MAX_ADDR_LEN]; |
6724 |
++ __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */ |
6725 |
+ }; |
6726 |
+ |
6727 |
+ struct packet_diag_ring { |
6728 |
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
6729 |
+index 8199821..85d1c94 100644 |
6730 |
+--- a/kernel/bpf/verifier.c |
6731 |
++++ b/kernel/bpf/verifier.c |
6732 |
+@@ -212,9 +212,10 @@ static void print_verifier_state(struct bpf_verifier_state *state) |
6733 |
+ else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || |
6734 |
+ t == PTR_TO_MAP_VALUE_OR_NULL || |
6735 |
+ t == PTR_TO_MAP_VALUE_ADJ) |
6736 |
+- verbose("(ks=%d,vs=%d)", |
6737 |
++ verbose("(ks=%d,vs=%d,id=%u)", |
6738 |
+ reg->map_ptr->key_size, |
6739 |
+- reg->map_ptr->value_size); |
6740 |
++ reg->map_ptr->value_size, |
6741 |
++ reg->id); |
6742 |
+ if (reg->min_value != BPF_REGISTER_MIN_RANGE) |
6743 |
+ verbose(",min_value=%lld", |
6744 |
+ (long long)reg->min_value); |
6745 |
+@@ -443,13 +444,19 @@ static void init_reg_state(struct bpf_reg_state *regs) |
6746 |
+ regs[BPF_REG_1].type = PTR_TO_CTX; |
6747 |
+ } |
6748 |
+ |
6749 |
+-static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) |
6750 |
++static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) |
6751 |
+ { |
6752 |
+- BUG_ON(regno >= MAX_BPF_REG); |
6753 |
+ regs[regno].type = UNKNOWN_VALUE; |
6754 |
++ regs[regno].id = 0; |
6755 |
+ regs[regno].imm = 0; |
6756 |
+ } |
6757 |
+ |
6758 |
++static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) |
6759 |
++{ |
6760 |
++ BUG_ON(regno >= MAX_BPF_REG); |
6761 |
++ __mark_reg_unknown_value(regs, regno); |
6762 |
++} |
6763 |
++ |
6764 |
+ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) |
6765 |
+ { |
6766 |
+ regs[regno].min_value = BPF_REGISTER_MIN_RANGE; |
6767 |
+@@ -1252,6 +1259,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id) |
6768 |
+ return -EINVAL; |
6769 |
+ } |
6770 |
+ regs[BPF_REG_0].map_ptr = meta.map_ptr; |
6771 |
++ regs[BPF_REG_0].id = ++env->id_gen; |
6772 |
+ } else { |
6773 |
+ verbose("unknown return type %d of func %d\n", |
6774 |
+ fn->ret_type, func_id); |
6775 |
+@@ -1668,8 +1676,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) |
6776 |
+ insn->src_reg); |
6777 |
+ return -EACCES; |
6778 |
+ } |
6779 |
+- regs[insn->dst_reg].type = UNKNOWN_VALUE; |
6780 |
+- regs[insn->dst_reg].map_ptr = NULL; |
6781 |
++ mark_reg_unknown_value(regs, insn->dst_reg); |
6782 |
+ } |
6783 |
+ } else { |
6784 |
+ /* case: R = imm |
6785 |
+@@ -1931,6 +1938,43 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, |
6786 |
+ check_reg_overflow(true_reg); |
6787 |
+ } |
6788 |
+ |
6789 |
++static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, |
6790 |
++ enum bpf_reg_type type) |
6791 |
++{ |
6792 |
++ struct bpf_reg_state *reg = ®s[regno]; |
6793 |
++ |
6794 |
++ if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { |
6795 |
++ reg->type = type; |
6796 |
++ /* We don't need id from this point onwards anymore, thus we |
6797 |
++ * should better reset it, so that state pruning has chances |
6798 |
++ * to take effect. |
6799 |
++ */ |
6800 |
++ reg->id = 0; |
6801 |
++ if (type == UNKNOWN_VALUE) |
6802 |
++ __mark_reg_unknown_value(regs, regno); |
6803 |
++ } |
6804 |
++} |
6805 |
++ |
6806 |
++/* The logic is similar to find_good_pkt_pointers(), both could eventually |
6807 |
++ * be folded together at some point. |
6808 |
++ */ |
6809 |
++static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, |
6810 |
++ enum bpf_reg_type type) |
6811 |
++{ |
6812 |
++ struct bpf_reg_state *regs = state->regs; |
6813 |
++ u32 id = regs[regno].id; |
6814 |
++ int i; |
6815 |
++ |
6816 |
++ for (i = 0; i < MAX_BPF_REG; i++) |
6817 |
++ mark_map_reg(regs, i, id, type); |
6818 |
++ |
6819 |
++ for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { |
6820 |
++ if (state->stack_slot_type[i] != STACK_SPILL) |
6821 |
++ continue; |
6822 |
++ mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type); |
6823 |
++ } |
6824 |
++} |
6825 |
++ |
6826 |
+ static int check_cond_jmp_op(struct bpf_verifier_env *env, |
6827 |
+ struct bpf_insn *insn, int *insn_idx) |
6828 |
+ { |
6829 |
+@@ -2018,18 +2062,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, |
6830 |
+ if (BPF_SRC(insn->code) == BPF_K && |
6831 |
+ insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && |
6832 |
+ dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { |
6833 |
+- if (opcode == BPF_JEQ) { |
6834 |
+- /* next fallthrough insn can access memory via |
6835 |
+- * this register |
6836 |
+- */ |
6837 |
+- regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; |
6838 |
+- /* branch targer cannot access it, since reg == 0 */ |
6839 |
+- mark_reg_unknown_value(other_branch->regs, |
6840 |
+- insn->dst_reg); |
6841 |
+- } else { |
6842 |
+- other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; |
6843 |
+- mark_reg_unknown_value(regs, insn->dst_reg); |
6844 |
+- } |
6845 |
++ /* Mark all identical map registers in each branch as either |
6846 |
++ * safe or unknown depending R == 0 or R != 0 conditional. |
6847 |
++ */ |
6848 |
++ mark_map_regs(this_branch, insn->dst_reg, |
6849 |
++ opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE); |
6850 |
++ mark_map_regs(other_branch, insn->dst_reg, |
6851 |
++ opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE); |
6852 |
+ } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && |
6853 |
+ dst_reg->type == PTR_TO_PACKET && |
6854 |
+ regs[insn->src_reg].type == PTR_TO_PACKET_END) { |
6855 |
+@@ -2469,7 +2508,7 @@ static bool states_equal(struct bpf_verifier_env *env, |
6856 |
+ * we didn't do a variable access into a map then we are a-ok. |
6857 |
+ */ |
6858 |
+ if (!varlen_map_access && |
6859 |
+- rold->type == rcur->type && rold->imm == rcur->imm) |
6860 |
++ memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0) |
6861 |
+ continue; |
6862 |
+ |
6863 |
+ /* If we didn't map access then again we don't care about the |
6864 |
+diff --git a/kernel/futex.c b/kernel/futex.c |
6865 |
+index 38b68c2..4c6b6e6 100644 |
6866 |
+--- a/kernel/futex.c |
6867 |
++++ b/kernel/futex.c |
6868 |
+@@ -2813,7 +2813,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
6869 |
+ { |
6870 |
+ struct hrtimer_sleeper timeout, *to = NULL; |
6871 |
+ struct rt_mutex_waiter rt_waiter; |
6872 |
+- struct rt_mutex *pi_mutex = NULL; |
6873 |
+ struct futex_hash_bucket *hb; |
6874 |
+ union futex_key key2 = FUTEX_KEY_INIT; |
6875 |
+ struct futex_q q = futex_q_init; |
6876 |
+@@ -2897,6 +2896,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
6877 |
+ if (q.pi_state && (q.pi_state->owner != current)) { |
6878 |
+ spin_lock(q.lock_ptr); |
6879 |
+ ret = fixup_pi_state_owner(uaddr2, &q, current); |
6880 |
++ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) |
6881 |
++ rt_mutex_unlock(&q.pi_state->pi_mutex); |
6882 |
+ /* |
6883 |
+ * Drop the reference to the pi state which |
6884 |
+ * the requeue_pi() code acquired for us. |
6885 |
+@@ -2905,6 +2906,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
6886 |
+ spin_unlock(q.lock_ptr); |
6887 |
+ } |
6888 |
+ } else { |
6889 |
++ struct rt_mutex *pi_mutex; |
6890 |
++ |
6891 |
+ /* |
6892 |
+ * We have been woken up by futex_unlock_pi(), a timeout, or a |
6893 |
+ * signal. futex_unlock_pi() will not destroy the lock_ptr nor |
6894 |
+@@ -2928,18 +2931,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
6895 |
+ if (res) |
6896 |
+ ret = (res < 0) ? res : 0; |
6897 |
+ |
6898 |
++ /* |
6899 |
++ * If fixup_pi_state_owner() faulted and was unable to handle |
6900 |
++ * the fault, unlock the rt_mutex and return the fault to |
6901 |
++ * userspace. |
6902 |
++ */ |
6903 |
++ if (ret && rt_mutex_owner(pi_mutex) == current) |
6904 |
++ rt_mutex_unlock(pi_mutex); |
6905 |
++ |
6906 |
+ /* Unqueue and drop the lock. */ |
6907 |
+ unqueue_me_pi(&q); |
6908 |
+ } |
6909 |
+ |
6910 |
+- /* |
6911 |
+- * If fixup_pi_state_owner() faulted and was unable to handle the |
6912 |
+- * fault, unlock the rt_mutex and return the fault to userspace. |
6913 |
+- */ |
6914 |
+- if (ret == -EFAULT) { |
6915 |
+- if (pi_mutex && rt_mutex_owner(pi_mutex) == current) |
6916 |
+- rt_mutex_unlock(pi_mutex); |
6917 |
+- } else if (ret == -EINTR) { |
6918 |
++ if (ret == -EINTR) { |
6919 |
+ /* |
6920 |
+ * We've already been requeued, but cannot restart by calling |
6921 |
+ * futex_lock_pi() directly. We could restart this syscall, but |
6922 |
+diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c |
6923 |
+index 1591f6b..2bef4ab 100644 |
6924 |
+--- a/kernel/locking/rwsem-spinlock.c |
6925 |
++++ b/kernel/locking/rwsem-spinlock.c |
6926 |
+@@ -216,10 +216,8 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) |
6927 |
+ */ |
6928 |
+ if (sem->count == 0) |
6929 |
+ break; |
6930 |
+- if (signal_pending_state(state, current)) { |
6931 |
+- ret = -EINTR; |
6932 |
+- goto out; |
6933 |
+- } |
6934 |
++ if (signal_pending_state(state, current)) |
6935 |
++ goto out_nolock; |
6936 |
+ set_task_state(tsk, state); |
6937 |
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
6938 |
+ schedule(); |
6939 |
+@@ -227,12 +225,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) |
6940 |
+ } |
6941 |
+ /* got the lock */ |
6942 |
+ sem->count = -1; |
6943 |
+-out: |
6944 |
+ list_del(&waiter.list); |
6945 |
+ |
6946 |
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
6947 |
+ |
6948 |
+ return ret; |
6949 |
++ |
6950 |
++out_nolock: |
6951 |
++ list_del(&waiter.list); |
6952 |
++ if (!list_empty(&sem->wait_list)) |
6953 |
++ __rwsem_do_wake(sem, 1); |
6954 |
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
6955 |
++ |
6956 |
++ return -EINTR; |
6957 |
+ } |
6958 |
+ |
6959 |
+ void __sched __down_write(struct rw_semaphore *sem) |
6960 |
+diff --git a/mm/slab.c b/mm/slab.c |
6961 |
+index bd878f0..1f82d16 100644 |
6962 |
+--- a/mm/slab.c |
6963 |
++++ b/mm/slab.c |
6964 |
+@@ -2332,7 +2332,7 @@ static int drain_freelist(struct kmem_cache *cache, |
6965 |
+ return nr_freed; |
6966 |
+ } |
6967 |
+ |
6968 |
+-int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) |
6969 |
++int __kmem_cache_shrink(struct kmem_cache *cachep) |
6970 |
+ { |
6971 |
+ int ret = 0; |
6972 |
+ int node; |
6973 |
+@@ -2352,7 +2352,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) |
6974 |
+ |
6975 |
+ int __kmem_cache_shutdown(struct kmem_cache *cachep) |
6976 |
+ { |
6977 |
+- return __kmem_cache_shrink(cachep, false); |
6978 |
++ return __kmem_cache_shrink(cachep); |
6979 |
+ } |
6980 |
+ |
6981 |
+ void __kmem_cache_release(struct kmem_cache *cachep) |
6982 |
+diff --git a/mm/slab.h b/mm/slab.h |
6983 |
+index bc05fdc..ceb7d70 100644 |
6984 |
+--- a/mm/slab.h |
6985 |
++++ b/mm/slab.h |
6986 |
+@@ -146,7 +146,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, |
6987 |
+ |
6988 |
+ int __kmem_cache_shutdown(struct kmem_cache *); |
6989 |
+ void __kmem_cache_release(struct kmem_cache *); |
6990 |
+-int __kmem_cache_shrink(struct kmem_cache *, bool); |
6991 |
++int __kmem_cache_shrink(struct kmem_cache *); |
6992 |
+ void slab_kmem_cache_release(struct kmem_cache *); |
6993 |
+ |
6994 |
+ struct seq_file; |
6995 |
+diff --git a/mm/slab_common.c b/mm/slab_common.c |
6996 |
+index 329b038..5d2f24f 100644 |
6997 |
+--- a/mm/slab_common.c |
6998 |
++++ b/mm/slab_common.c |
6999 |
+@@ -573,6 +573,29 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) |
7000 |
+ get_online_cpus(); |
7001 |
+ get_online_mems(); |
7002 |
+ |
7003 |
++#ifdef CONFIG_SLUB |
7004 |
++ /* |
7005 |
++ * In case of SLUB, we need to disable empty slab caching to |
7006 |
++ * avoid pinning the offline memory cgroup by freeable kmem |
7007 |
++ * pages charged to it. SLAB doesn't need this, as it |
7008 |
++ * periodically purges unused slabs. |
7009 |
++ */ |
7010 |
++ mutex_lock(&slab_mutex); |
7011 |
++ list_for_each_entry(s, &slab_caches, list) { |
7012 |
++ c = is_root_cache(s) ? cache_from_memcg_idx(s, idx) : NULL; |
7013 |
++ if (c) { |
7014 |
++ c->cpu_partial = 0; |
7015 |
++ c->min_partial = 0; |
7016 |
++ } |
7017 |
++ } |
7018 |
++ mutex_unlock(&slab_mutex); |
7019 |
++ /* |
7020 |
++ * kmem_cache->cpu_partial is checked locklessly (see |
7021 |
++ * put_cpu_partial()). Make sure the change is visible. |
7022 |
++ */ |
7023 |
++ synchronize_sched(); |
7024 |
++#endif |
7025 |
++ |
7026 |
+ mutex_lock(&slab_mutex); |
7027 |
+ list_for_each_entry(s, &slab_caches, list) { |
7028 |
+ if (!is_root_cache(s)) |
7029 |
+@@ -584,7 +607,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) |
7030 |
+ if (!c) |
7031 |
+ continue; |
7032 |
+ |
7033 |
+- __kmem_cache_shrink(c, true); |
7034 |
++ __kmem_cache_shrink(c); |
7035 |
+ arr->entries[idx] = NULL; |
7036 |
+ } |
7037 |
+ mutex_unlock(&slab_mutex); |
7038 |
+@@ -755,7 +778,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep) |
7039 |
+ get_online_cpus(); |
7040 |
+ get_online_mems(); |
7041 |
+ kasan_cache_shrink(cachep); |
7042 |
+- ret = __kmem_cache_shrink(cachep, false); |
7043 |
++ ret = __kmem_cache_shrink(cachep); |
7044 |
+ put_online_mems(); |
7045 |
+ put_online_cpus(); |
7046 |
+ return ret; |
7047 |
+diff --git a/mm/slob.c b/mm/slob.c |
7048 |
+index 5ec1580..eac04d43 100644 |
7049 |
+--- a/mm/slob.c |
7050 |
++++ b/mm/slob.c |
7051 |
+@@ -634,7 +634,7 @@ void __kmem_cache_release(struct kmem_cache *c) |
7052 |
+ { |
7053 |
+ } |
7054 |
+ |
7055 |
+-int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) |
7056 |
++int __kmem_cache_shrink(struct kmem_cache *d) |
7057 |
+ { |
7058 |
+ return 0; |
7059 |
+ } |
7060 |
+diff --git a/mm/slub.c b/mm/slub.c |
7061 |
+index 7aa0e97..58c7526 100644 |
7062 |
+--- a/mm/slub.c |
7063 |
++++ b/mm/slub.c |
7064 |
+@@ -3887,7 +3887,7 @@ EXPORT_SYMBOL(kfree); |
7065 |
+ * being allocated from last increasing the chance that the last objects |
7066 |
+ * are freed in them. |
7067 |
+ */ |
7068 |
+-int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) |
7069 |
++int __kmem_cache_shrink(struct kmem_cache *s) |
7070 |
+ { |
7071 |
+ int node; |
7072 |
+ int i; |
7073 |
+@@ -3899,21 +3899,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) |
7074 |
+ unsigned long flags; |
7075 |
+ int ret = 0; |
7076 |
+ |
7077 |
+- if (deactivate) { |
7078 |
+- /* |
7079 |
+- * Disable empty slabs caching. Used to avoid pinning offline |
7080 |
+- * memory cgroups by kmem pages that can be freed. |
7081 |
+- */ |
7082 |
+- s->cpu_partial = 0; |
7083 |
+- s->min_partial = 0; |
7084 |
+- |
7085 |
+- /* |
7086 |
+- * s->cpu_partial is checked locklessly (see put_cpu_partial), |
7087 |
+- * so we have to make sure the change is visible. |
7088 |
+- */ |
7089 |
+- synchronize_sched(); |
7090 |
+- } |
7091 |
+- |
7092 |
+ flush_all(s); |
7093 |
+ for_each_kmem_cache_node(s, node, n) { |
7094 |
+ INIT_LIST_HEAD(&discard); |
7095 |
+@@ -3970,7 +3955,7 @@ static int slab_mem_going_offline_callback(void *arg) |
7096 |
+ |
7097 |
+ mutex_lock(&slab_mutex); |
7098 |
+ list_for_each_entry(s, &slab_caches, list) |
7099 |
+- __kmem_cache_shrink(s, false); |
7100 |
++ __kmem_cache_shrink(s); |
7101 |
+ mutex_unlock(&slab_mutex); |
7102 |
+ |
7103 |
+ return 0; |
7104 |
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c |
7105 |
+index 7cb41ae..8498e35 100644 |
7106 |
+--- a/net/bridge/br_forward.c |
7107 |
++++ b/net/bridge/br_forward.c |
7108 |
+@@ -186,8 +186,9 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb, |
7109 |
+ /* Do not flood unicast traffic to ports that turn it off */ |
7110 |
+ if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD)) |
7111 |
+ continue; |
7112 |
++ /* Do not flood if mc off, except for traffic we originate */ |
7113 |
+ if (pkt_type == BR_PKT_MULTICAST && |
7114 |
+- !(p->flags & BR_MCAST_FLOOD)) |
7115 |
++ !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) |
7116 |
+ continue; |
7117 |
+ |
7118 |
+ /* Do not flood to ports that enable proxy ARP */ |
7119 |
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c |
7120 |
+index 855b72f..267b46a 100644 |
7121 |
+--- a/net/bridge/br_input.c |
7122 |
++++ b/net/bridge/br_input.c |
7123 |
+@@ -29,6 +29,7 @@ EXPORT_SYMBOL(br_should_route_hook); |
7124 |
+ static int |
7125 |
+ br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) |
7126 |
+ { |
7127 |
++ br_drop_fake_rtable(skb); |
7128 |
+ return netif_receive_skb(skb); |
7129 |
+ } |
7130 |
+ |
7131 |
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c |
7132 |
+index 7fbdbae..aa1df1a 100644 |
7133 |
+--- a/net/bridge/br_netfilter_hooks.c |
7134 |
++++ b/net/bridge/br_netfilter_hooks.c |
7135 |
+@@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv, |
7136 |
+ } |
7137 |
+ |
7138 |
+ |
7139 |
+-/* PF_BRIDGE/LOCAL_IN ************************************************/ |
7140 |
+-/* The packet is locally destined, which requires a real |
7141 |
+- * dst_entry, so detach the fake one. On the way up, the |
7142 |
+- * packet would pass through PRE_ROUTING again (which already |
7143 |
+- * took place when the packet entered the bridge), but we |
7144 |
+- * register an IPv4 PRE_ROUTING 'sabotage' hook that will |
7145 |
+- * prevent this from happening. */ |
7146 |
+-static unsigned int br_nf_local_in(void *priv, |
7147 |
+- struct sk_buff *skb, |
7148 |
+- const struct nf_hook_state *state) |
7149 |
+-{ |
7150 |
+- br_drop_fake_rtable(skb); |
7151 |
+- return NF_ACCEPT; |
7152 |
+-} |
7153 |
+- |
7154 |
+ /* PF_BRIDGE/FORWARD *************************************************/ |
7155 |
+ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
7156 |
+ { |
7157 |
+@@ -906,12 +891,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = { |
7158 |
+ .priority = NF_BR_PRI_BRNF, |
7159 |
+ }, |
7160 |
+ { |
7161 |
+- .hook = br_nf_local_in, |
7162 |
+- .pf = NFPROTO_BRIDGE, |
7163 |
+- .hooknum = NF_BR_LOCAL_IN, |
7164 |
+- .priority = NF_BR_PRI_BRNF, |
7165 |
+- }, |
7166 |
+- { |
7167 |
+ .hook = br_nf_forward_ip, |
7168 |
+ .pf = NFPROTO_BRIDGE, |
7169 |
+ .hooknum = NF_BR_FORWARD, |
7170 |
+diff --git a/net/core/dev.c b/net/core/dev.c |
7171 |
+index 60b0a604..2e04fd1 100644 |
7172 |
+--- a/net/core/dev.c |
7173 |
++++ b/net/core/dev.c |
7174 |
+@@ -1697,27 +1697,54 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue); |
7175 |
+ static struct static_key netstamp_needed __read_mostly; |
7176 |
+ #ifdef HAVE_JUMP_LABEL |
7177 |
+ static atomic_t netstamp_needed_deferred; |
7178 |
++static atomic_t netstamp_wanted; |
7179 |
+ static void netstamp_clear(struct work_struct *work) |
7180 |
+ { |
7181 |
+ int deferred = atomic_xchg(&netstamp_needed_deferred, 0); |
7182 |
++ int wanted; |
7183 |
+ |
7184 |
+- while (deferred--) |
7185 |
+- static_key_slow_dec(&netstamp_needed); |
7186 |
++ wanted = atomic_add_return(deferred, &netstamp_wanted); |
7187 |
++ if (wanted > 0) |
7188 |
++ static_key_enable(&netstamp_needed); |
7189 |
++ else |
7190 |
++ static_key_disable(&netstamp_needed); |
7191 |
+ } |
7192 |
+ static DECLARE_WORK(netstamp_work, netstamp_clear); |
7193 |
+ #endif |
7194 |
+ |
7195 |
+ void net_enable_timestamp(void) |
7196 |
+ { |
7197 |
++#ifdef HAVE_JUMP_LABEL |
7198 |
++ int wanted; |
7199 |
++ |
7200 |
++ while (1) { |
7201 |
++ wanted = atomic_read(&netstamp_wanted); |
7202 |
++ if (wanted <= 0) |
7203 |
++ break; |
7204 |
++ if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) |
7205 |
++ return; |
7206 |
++ } |
7207 |
++ atomic_inc(&netstamp_needed_deferred); |
7208 |
++ schedule_work(&netstamp_work); |
7209 |
++#else |
7210 |
+ static_key_slow_inc(&netstamp_needed); |
7211 |
++#endif |
7212 |
+ } |
7213 |
+ EXPORT_SYMBOL(net_enable_timestamp); |
7214 |
+ |
7215 |
+ void net_disable_timestamp(void) |
7216 |
+ { |
7217 |
+ #ifdef HAVE_JUMP_LABEL |
7218 |
+- /* net_disable_timestamp() can be called from non process context */ |
7219 |
+- atomic_inc(&netstamp_needed_deferred); |
7220 |
++ int wanted; |
7221 |
++ |
7222 |
++ while (1) { |
7223 |
++ wanted = atomic_read(&netstamp_wanted); |
7224 |
++ if (wanted <= 1) |
7225 |
++ break; |
7226 |
++ if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) |
7227 |
++ return; |
7228 |
++ } |
7229 |
++ atomic_dec(&netstamp_needed_deferred); |
7230 |
+ schedule_work(&netstamp_work); |
7231 |
+ #else |
7232 |
+ static_key_slow_dec(&netstamp_needed); |
7233 |
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
7234 |
+index 1e3e008..f0f462c 100644 |
7235 |
+--- a/net/core/skbuff.c |
7236 |
++++ b/net/core/skbuff.c |
7237 |
+@@ -3814,13 +3814,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, |
7238 |
+ if (!skb_may_tx_timestamp(sk, false)) |
7239 |
+ return; |
7240 |
+ |
7241 |
+- /* take a reference to prevent skb_orphan() from freeing the socket */ |
7242 |
+- sock_hold(sk); |
7243 |
+- |
7244 |
+- *skb_hwtstamps(skb) = *hwtstamps; |
7245 |
+- __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); |
7246 |
+- |
7247 |
+- sock_put(sk); |
7248 |
++ /* Take a reference to prevent skb_orphan() from freeing the socket, |
7249 |
++ * but only if the socket refcount is not zero. |
7250 |
++ */ |
7251 |
++ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { |
7252 |
++ *skb_hwtstamps(skb) = *hwtstamps; |
7253 |
++ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); |
7254 |
++ sock_put(sk); |
7255 |
++ } |
7256 |
+ } |
7257 |
+ EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); |
7258 |
+ |
7259 |
+@@ -3871,7 +3872,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) |
7260 |
+ { |
7261 |
+ struct sock *sk = skb->sk; |
7262 |
+ struct sock_exterr_skb *serr; |
7263 |
+- int err; |
7264 |
++ int err = 1; |
7265 |
+ |
7266 |
+ skb->wifi_acked_valid = 1; |
7267 |
+ skb->wifi_acked = acked; |
7268 |
+@@ -3881,14 +3882,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) |
7269 |
+ serr->ee.ee_errno = ENOMSG; |
7270 |
+ serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; |
7271 |
+ |
7272 |
+- /* take a reference to prevent skb_orphan() from freeing the socket */ |
7273 |
+- sock_hold(sk); |
7274 |
+- |
7275 |
+- err = sock_queue_err_skb(sk, skb); |
7276 |
++ /* Take a reference to prevent skb_orphan() from freeing the socket, |
7277 |
++ * but only if the socket refcount is not zero. |
7278 |
++ */ |
7279 |
++ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { |
7280 |
++ err = sock_queue_err_skb(sk, skb); |
7281 |
++ sock_put(sk); |
7282 |
++ } |
7283 |
+ if (err) |
7284 |
+ kfree_skb(skb); |
7285 |
+- |
7286 |
+- sock_put(sk); |
7287 |
+ } |
7288 |
+ EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); |
7289 |
+ |
7290 |
+diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c |
7291 |
+index f053198..5e3a730 100644 |
7292 |
+--- a/net/dccp/ccids/ccid2.c |
7293 |
++++ b/net/dccp/ccids/ccid2.c |
7294 |
+@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk) |
7295 |
+ for (i = 0; i < hc->tx_seqbufc; i++) |
7296 |
+ kfree(hc->tx_seqbuf[i]); |
7297 |
+ hc->tx_seqbufc = 0; |
7298 |
++ dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); |
7299 |
+ } |
7300 |
+ |
7301 |
+ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) |
7302 |
+diff --git a/net/dccp/input.c b/net/dccp/input.c |
7303 |
+index 8fedc2d..4a05d78 100644 |
7304 |
+--- a/net/dccp/input.c |
7305 |
++++ b/net/dccp/input.c |
7306 |
+@@ -577,6 +577,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
7307 |
+ struct dccp_sock *dp = dccp_sk(sk); |
7308 |
+ struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); |
7309 |
+ const int old_state = sk->sk_state; |
7310 |
++ bool acceptable; |
7311 |
+ int queued = 0; |
7312 |
+ |
7313 |
+ /* |
7314 |
+@@ -603,8 +604,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
7315 |
+ */ |
7316 |
+ if (sk->sk_state == DCCP_LISTEN) { |
7317 |
+ if (dh->dccph_type == DCCP_PKT_REQUEST) { |
7318 |
+- if (inet_csk(sk)->icsk_af_ops->conn_request(sk, |
7319 |
+- skb) < 0) |
7320 |
++ /* It is possible that we process SYN packets from backlog, |
7321 |
++ * so we need to make sure to disable BH right there. |
7322 |
++ */ |
7323 |
++ local_bh_disable(); |
7324 |
++ acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0; |
7325 |
++ local_bh_enable(); |
7326 |
++ if (!acceptable) |
7327 |
+ return 1; |
7328 |
+ consume_skb(skb); |
7329 |
+ return 0; |
7330 |
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c |
7331 |
+index edbe59d..86b0933 100644 |
7332 |
+--- a/net/dccp/ipv4.c |
7333 |
++++ b/net/dccp/ipv4.c |
7334 |
+@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) |
7335 |
+ |
7336 |
+ switch (type) { |
7337 |
+ case ICMP_REDIRECT: |
7338 |
+- dccp_do_redirect(skb, sk); |
7339 |
++ if (!sock_owned_by_user(sk)) |
7340 |
++ dccp_do_redirect(skb, sk); |
7341 |
+ goto out; |
7342 |
+ case ICMP_SOURCE_QUENCH: |
7343 |
+ /* Just silently ignore these. */ |
7344 |
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c |
7345 |
+index 7506c03..237d62c 100644 |
7346 |
+--- a/net/dccp/ipv6.c |
7347 |
++++ b/net/dccp/ipv6.c |
7348 |
+@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
7349 |
+ np = inet6_sk(sk); |
7350 |
+ |
7351 |
+ if (type == NDISC_REDIRECT) { |
7352 |
+- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); |
7353 |
++ if (!sock_owned_by_user(sk)) { |
7354 |
++ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); |
7355 |
+ |
7356 |
+- if (dst) |
7357 |
+- dst->ops->redirect(dst, sk, skb); |
7358 |
++ if (dst) |
7359 |
++ dst->ops->redirect(dst, sk, skb); |
7360 |
++ } |
7361 |
+ goto out; |
7362 |
+ } |
7363 |
+ |
7364 |
+diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c |
7365 |
+index 53eddf9..39e7e2b 100644 |
7366 |
+--- a/net/dccp/minisocks.c |
7367 |
++++ b/net/dccp/minisocks.c |
7368 |
+@@ -122,6 +122,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, |
7369 |
+ /* It is still raw copy of parent, so invalidate |
7370 |
+ * destructor and make plain sk_free() */ |
7371 |
+ newsk->sk_destruct = NULL; |
7372 |
++ bh_unlock_sock(newsk); |
7373 |
+ sk_free(newsk); |
7374 |
+ return NULL; |
7375 |
+ } |
7376 |
+@@ -145,6 +146,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, |
7377 |
+ struct dccp_request_sock *dreq = dccp_rsk(req); |
7378 |
+ bool own_req; |
7379 |
+ |
7380 |
++ /* TCP/DCCP listeners became lockless. |
7381 |
++ * DCCP stores complex state in its request_sock, so we need |
7382 |
++ * a protection for them, now this code runs without being protected |
7383 |
++ * by the parent (listener) lock. |
7384 |
++ */ |
7385 |
++ spin_lock_bh(&dreq->dreq_lock); |
7386 |
++ |
7387 |
+ /* Check for retransmitted REQUEST */ |
7388 |
+ if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { |
7389 |
+ |
7390 |
+@@ -159,7 +167,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, |
7391 |
+ inet_rtx_syn_ack(sk, req); |
7392 |
+ } |
7393 |
+ /* Network Duplicate, discard packet */ |
7394 |
+- return NULL; |
7395 |
++ goto out; |
7396 |
+ } |
7397 |
+ |
7398 |
+ DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; |
7399 |
+@@ -185,20 +193,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, |
7400 |
+ |
7401 |
+ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, |
7402 |
+ req, &own_req); |
7403 |
+- if (!child) |
7404 |
+- goto listen_overflow; |
7405 |
+- |
7406 |
+- return inet_csk_complete_hashdance(sk, child, req, own_req); |
7407 |
++ if (child) { |
7408 |
++ child = inet_csk_complete_hashdance(sk, child, req, own_req); |
7409 |
++ goto out; |
7410 |
++ } |
7411 |
+ |
7412 |
+-listen_overflow: |
7413 |
+- dccp_pr_debug("listen_overflow!\n"); |
7414 |
+ DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; |
7415 |
+ drop: |
7416 |
+ if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) |
7417 |
+ req->rsk_ops->send_reset(sk, skb); |
7418 |
+ |
7419 |
+ inet_csk_reqsk_queue_drop(sk, req); |
7420 |
+- return NULL; |
7421 |
++out: |
7422 |
++ spin_unlock_bh(&dreq->dreq_lock); |
7423 |
++ return child; |
7424 |
+ } |
7425 |
+ |
7426 |
+ EXPORT_SYMBOL_GPL(dccp_check_req); |
7427 |
+@@ -249,6 +257,7 @@ int dccp_reqsk_init(struct request_sock *req, |
7428 |
+ { |
7429 |
+ struct dccp_request_sock *dreq = dccp_rsk(req); |
7430 |
+ |
7431 |
++ spin_lock_init(&dreq->dreq_lock); |
7432 |
+ inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; |
7433 |
+ inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport); |
7434 |
+ inet_rsk(req)->acked = 0; |
7435 |
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c |
7436 |
+index 21514324..971b947 100644 |
7437 |
+--- a/net/ipv4/af_inet.c |
7438 |
++++ b/net/ipv4/af_inet.c |
7439 |
+@@ -1460,8 +1460,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff) |
7440 |
+ int proto = iph->protocol; |
7441 |
+ int err = -ENOSYS; |
7442 |
+ |
7443 |
+- if (skb->encapsulation) |
7444 |
++ if (skb->encapsulation) { |
7445 |
++ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP)); |
7446 |
+ skb_set_inner_network_header(skb, nhoff); |
7447 |
++ } |
7448 |
+ |
7449 |
+ csum_replace2(&iph->check, iph->tot_len, newlen); |
7450 |
+ iph->tot_len = newlen; |
7451 |
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
7452 |
+index d851cae..17e6fbf 100644 |
7453 |
+--- a/net/ipv4/route.c |
7454 |
++++ b/net/ipv4/route.c |
7455 |
+@@ -1968,6 +1968,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
7456 |
+ { |
7457 |
+ int res; |
7458 |
+ |
7459 |
++ tos &= IPTOS_RT_MASK; |
7460 |
+ rcu_read_lock(); |
7461 |
+ |
7462 |
+ /* Multicast recognition logic is moved from route cache to here. |
7463 |
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
7464 |
+index c71d49c..ce42ded 100644 |
7465 |
+--- a/net/ipv4/tcp_input.c |
7466 |
++++ b/net/ipv4/tcp_input.c |
7467 |
+@@ -5916,9 +5916,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) |
7468 |
+ if (th->syn) { |
7469 |
+ if (th->fin) |
7470 |
+ goto discard; |
7471 |
+- if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) |
7472 |
+- return 1; |
7473 |
++ /* It is possible that we process SYN packets from backlog, |
7474 |
++ * so we need to make sure to disable BH right there. |
7475 |
++ */ |
7476 |
++ local_bh_disable(); |
7477 |
++ acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; |
7478 |
++ local_bh_enable(); |
7479 |
+ |
7480 |
++ if (!acceptable) |
7481 |
++ return 1; |
7482 |
+ consume_skb(skb); |
7483 |
+ return 0; |
7484 |
+ } |
7485 |
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
7486 |
+index 2259114..6988566 100644 |
7487 |
+--- a/net/ipv4/tcp_ipv4.c |
7488 |
++++ b/net/ipv4/tcp_ipv4.c |
7489 |
+@@ -269,10 +269,13 @@ EXPORT_SYMBOL(tcp_v4_connect); |
7490 |
+ */ |
7491 |
+ void tcp_v4_mtu_reduced(struct sock *sk) |
7492 |
+ { |
7493 |
+- struct dst_entry *dst; |
7494 |
+ struct inet_sock *inet = inet_sk(sk); |
7495 |
+- u32 mtu = tcp_sk(sk)->mtu_info; |
7496 |
++ struct dst_entry *dst; |
7497 |
++ u32 mtu; |
7498 |
+ |
7499 |
++ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) |
7500 |
++ return; |
7501 |
++ mtu = tcp_sk(sk)->mtu_info; |
7502 |
+ dst = inet_csk_update_pmtu(sk, mtu); |
7503 |
+ if (!dst) |
7504 |
+ return; |
7505 |
+@@ -418,7 +421,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) |
7506 |
+ |
7507 |
+ switch (type) { |
7508 |
+ case ICMP_REDIRECT: |
7509 |
+- do_redirect(icmp_skb, sk); |
7510 |
++ if (!sock_owned_by_user(sk)) |
7511 |
++ do_redirect(icmp_skb, sk); |
7512 |
+ goto out; |
7513 |
+ case ICMP_SOURCE_QUENCH: |
7514 |
+ /* Just silently ignore these. */ |
7515 |
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c |
7516 |
+index 3ea1cf8..b1e65b3 100644 |
7517 |
+--- a/net/ipv4/tcp_timer.c |
7518 |
++++ b/net/ipv4/tcp_timer.c |
7519 |
+@@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk) |
7520 |
+ |
7521 |
+ sk_mem_reclaim_partial(sk); |
7522 |
+ |
7523 |
+- if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
7524 |
++ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || |
7525 |
++ !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
7526 |
+ goto out; |
7527 |
+ |
7528 |
+ if (time_after(icsk->icsk_ack.timeout, jiffies)) { |
7529 |
+@@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk) |
7530 |
+ struct inet_connection_sock *icsk = inet_csk(sk); |
7531 |
+ int event; |
7532 |
+ |
7533 |
+- if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) |
7534 |
++ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || |
7535 |
++ !icsk->icsk_pending) |
7536 |
+ goto out; |
7537 |
+ |
7538 |
+ if (time_after(icsk->icsk_timeout, jiffies)) { |
7539 |
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
7540 |
+index ef54852..8c88a37 100644 |
7541 |
+--- a/net/ipv6/ip6_fib.c |
7542 |
++++ b/net/ipv6/ip6_fib.c |
7543 |
+@@ -908,6 +908,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, |
7544 |
+ ins = &rt->dst.rt6_next; |
7545 |
+ iter = *ins; |
7546 |
+ while (iter) { |
7547 |
++ if (iter->rt6i_metric > rt->rt6i_metric) |
7548 |
++ break; |
7549 |
+ if (rt6_qualify_for_ecmp(iter)) { |
7550 |
+ *ins = iter->dst.rt6_next; |
7551 |
+ fib6_purge_rt(iter, fn, info->nl_net); |
7552 |
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c |
7553 |
+index fc7b401..33b04ec 100644 |
7554 |
+--- a/net/ipv6/ip6_offload.c |
7555 |
++++ b/net/ipv6/ip6_offload.c |
7556 |
+@@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) |
7557 |
+ struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); |
7558 |
+ int err = -ENOSYS; |
7559 |
+ |
7560 |
+- if (skb->encapsulation) |
7561 |
++ if (skb->encapsulation) { |
7562 |
++ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6)); |
7563 |
+ skb_set_inner_network_header(skb, nhoff); |
7564 |
++ } |
7565 |
+ |
7566 |
+ iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); |
7567 |
+ |
7568 |
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
7569 |
+index 9a87bfb..e27b8fd 100644 |
7570 |
+--- a/net/ipv6/ip6_output.c |
7571 |
++++ b/net/ipv6/ip6_output.c |
7572 |
+@@ -757,13 +757,14 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, |
7573 |
+ * Fragment the datagram. |
7574 |
+ */ |
7575 |
+ |
7576 |
+- *prevhdr = NEXTHDR_FRAGMENT; |
7577 |
+ troom = rt->dst.dev->needed_tailroom; |
7578 |
+ |
7579 |
+ /* |
7580 |
+ * Keep copying data until we run out. |
7581 |
+ */ |
7582 |
+ while (left > 0) { |
7583 |
++ u8 *fragnexthdr_offset; |
7584 |
++ |
7585 |
+ len = left; |
7586 |
+ /* IF: it doesn't fit, use 'mtu' - the data space left */ |
7587 |
+ if (len > mtu) |
7588 |
+@@ -808,6 +809,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, |
7589 |
+ */ |
7590 |
+ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); |
7591 |
+ |
7592 |
++ fragnexthdr_offset = skb_network_header(frag); |
7593 |
++ fragnexthdr_offset += prevhdr - skb_network_header(skb); |
7594 |
++ *fragnexthdr_offset = NEXTHDR_FRAGMENT; |
7595 |
++ |
7596 |
+ /* |
7597 |
+ * Build fragment header. |
7598 |
+ */ |
7599 |
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c |
7600 |
+index c299c1e..66c2b4b 100644 |
7601 |
+--- a/net/ipv6/ip6_vti.c |
7602 |
++++ b/net/ipv6/ip6_vti.c |
7603 |
+@@ -691,6 +691,10 @@ vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p) |
7604 |
+ u->link = p->link; |
7605 |
+ u->i_key = p->i_key; |
7606 |
+ u->o_key = p->o_key; |
7607 |
++ if (u->i_key) |
7608 |
++ u->i_flags |= GRE_KEY; |
7609 |
++ if (u->o_key) |
7610 |
++ u->o_flags |= GRE_KEY; |
7611 |
+ u->proto = p->proto; |
7612 |
+ |
7613 |
+ memcpy(u->name, p->name, sizeof(u->name)); |
7614 |
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c |
7615 |
+index 9948b5c..986d4ca 100644 |
7616 |
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c |
7617 |
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c |
7618 |
+@@ -589,6 +589,7 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) |
7619 |
+ hdr = ipv6_hdr(skb); |
7620 |
+ fhdr = (struct frag_hdr *)skb_transport_header(skb); |
7621 |
+ |
7622 |
++ skb_orphan(skb); |
7623 |
+ fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, |
7624 |
+ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); |
7625 |
+ if (fq == NULL) { |
7626 |
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
7627 |
+index 6673965..b2e61a0 100644 |
7628 |
+--- a/net/ipv6/tcp_ipv6.c |
7629 |
++++ b/net/ipv6/tcp_ipv6.c |
7630 |
+@@ -375,10 +375,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
7631 |
+ np = inet6_sk(sk); |
7632 |
+ |
7633 |
+ if (type == NDISC_REDIRECT) { |
7634 |
+- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); |
7635 |
++ if (!sock_owned_by_user(sk)) { |
7636 |
++ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); |
7637 |
+ |
7638 |
+- if (dst) |
7639 |
+- dst->ops->redirect(dst, sk, skb); |
7640 |
++ if (dst) |
7641 |
++ dst->ops->redirect(dst, sk, skb); |
7642 |
++ } |
7643 |
+ goto out; |
7644 |
+ } |
7645 |
+ |
7646 |
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c |
7647 |
+index c0f0750..ff750bb 100644 |
7648 |
+--- a/net/l2tp/l2tp_ip.c |
7649 |
++++ b/net/l2tp/l2tp_ip.c |
7650 |
+@@ -388,7 +388,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) |
7651 |
+ drop: |
7652 |
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); |
7653 |
+ kfree_skb(skb); |
7654 |
+- return -1; |
7655 |
++ return 0; |
7656 |
+ } |
7657 |
+ |
7658 |
+ /* Userspace will call sendmsg() on the tunnel socket to send L2TP |
7659 |
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c |
7660 |
+index 5b77377..1309e2c 100644 |
7661 |
+--- a/net/mpls/af_mpls.c |
7662 |
++++ b/net/mpls/af_mpls.c |
7663 |
+@@ -956,7 +956,8 @@ static void mpls_ifdown(struct net_device *dev, int event) |
7664 |
+ /* fall through */ |
7665 |
+ case NETDEV_CHANGE: |
7666 |
+ nh->nh_flags |= RTNH_F_LINKDOWN; |
7667 |
+- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; |
7668 |
++ if (event != NETDEV_UNREGISTER) |
7669 |
++ ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; |
7670 |
+ break; |
7671 |
+ } |
7672 |
+ if (event == NETDEV_UNREGISTER) |
7673 |
+@@ -1696,6 +1697,7 @@ static void mpls_net_exit(struct net *net) |
7674 |
+ for (index = 0; index < platform_labels; index++) { |
7675 |
+ struct mpls_route *rt = rtnl_dereference(platform_label[index]); |
7676 |
+ RCU_INIT_POINTER(platform_label[index], NULL); |
7677 |
++ mpls_notify_route(net, index, rt, NULL, NULL); |
7678 |
+ mpls_rt_free(rt); |
7679 |
+ } |
7680 |
+ rtnl_unlock(); |
7681 |
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c |
7682 |
+index eab210b..48386bf 100644 |
7683 |
+--- a/net/openvswitch/conntrack.c |
7684 |
++++ b/net/openvswitch/conntrack.c |
7685 |
+@@ -367,7 +367,6 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key, |
7686 |
+ } else if (key->eth.type == htons(ETH_P_IPV6)) { |
7687 |
+ enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; |
7688 |
+ |
7689 |
+- skb_orphan(skb); |
7690 |
+ memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); |
7691 |
+ err = nf_ct_frag6_gather(net, skb, user); |
7692 |
+ if (err) { |
7693 |
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
7694 |
+index 34de326..f2b04a7 100644 |
7695 |
+--- a/net/packet/af_packet.c |
7696 |
++++ b/net/packet/af_packet.c |
7697 |
+@@ -3140,7 +3140,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, |
7698 |
+ int addr_len) |
7699 |
+ { |
7700 |
+ struct sock *sk = sock->sk; |
7701 |
+- char name[15]; |
7702 |
++ char name[sizeof(uaddr->sa_data) + 1]; |
7703 |
+ |
7704 |
+ /* |
7705 |
+ * Check legality |
7706 |
+@@ -3148,7 +3148,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, |
7707 |
+ |
7708 |
+ if (addr_len != sizeof(struct sockaddr)) |
7709 |
+ return -EINVAL; |
7710 |
+- strlcpy(name, uaddr->sa_data, sizeof(name)); |
7711 |
++ /* uaddr->sa_data comes from the userspace, it's not guaranteed to be |
7712 |
++ * zero-terminated. |
7713 |
++ */ |
7714 |
++ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); |
7715 |
++ name[sizeof(uaddr->sa_data)] = 0; |
7716 |
+ |
7717 |
+ return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); |
7718 |
+ } |
7719 |
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c |
7720 |
+index c6c2a93..c651cfc 100644 |
7721 |
+--- a/net/sched/act_api.c |
7722 |
++++ b/net/sched/act_api.c |
7723 |
+@@ -820,10 +820,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, |
7724 |
+ goto out_module_put; |
7725 |
+ |
7726 |
+ err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops); |
7727 |
+- if (err < 0) |
7728 |
++ if (err <= 0) |
7729 |
+ goto out_module_put; |
7730 |
+- if (err == 0) |
7731 |
+- goto noflush_out; |
7732 |
+ |
7733 |
+ nla_nest_end(skb, nest); |
7734 |
+ |
7735 |
+@@ -840,7 +838,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, |
7736 |
+ out_module_put: |
7737 |
+ module_put(ops->owner); |
7738 |
+ err_out: |
7739 |
+-noflush_out: |
7740 |
+ kfree_skb(skb); |
7741 |
+ return err; |
7742 |
+ } |
7743 |
+diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c |
7744 |
+index eae07a2..1191179 100644 |
7745 |
+--- a/net/sched/act_connmark.c |
7746 |
++++ b/net/sched/act_connmark.c |
7747 |
+@@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, |
7748 |
+ if (ret < 0) |
7749 |
+ return ret; |
7750 |
+ |
7751 |
++ if (!tb[TCA_CONNMARK_PARMS]) |
7752 |
++ return -EINVAL; |
7753 |
++ |
7754 |
+ parm = nla_data(tb[TCA_CONNMARK_PARMS]); |
7755 |
+ |
7756 |
+ if (!tcf_hash_check(tn, parm->index, a, bind)) { |
7757 |
+diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c |
7758 |
+index e7d9638..f85313d 100644 |
7759 |
+--- a/net/sched/act_skbmod.c |
7760 |
++++ b/net/sched/act_skbmod.c |
7761 |
+@@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, |
7762 |
+ |
7763 |
+ return skb->len; |
7764 |
+ nla_put_failure: |
7765 |
+- rcu_read_unlock(); |
7766 |
+ nlmsg_trim(skb, b); |
7767 |
+ return -1; |
7768 |
+ } |
7769 |
+diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c |
7770 |
+index 41adf36..b5c279b 100644 |
7771 |
+--- a/net/strparser/strparser.c |
7772 |
++++ b/net/strparser/strparser.c |
7773 |
+@@ -504,6 +504,7 @@ static int __init strp_mod_init(void) |
7774 |
+ |
7775 |
+ static void __exit strp_mod_exit(void) |
7776 |
+ { |
7777 |
++ destroy_workqueue(strp_wq); |
7778 |
+ } |
7779 |
+ module_init(strp_mod_init); |
7780 |
+ module_exit(strp_mod_exit); |
7781 |
|
7782 |
diff --git a/4.9.18/1017_linux-4.9.18.patch b/4.9.18/1017_linux-4.9.18.patch |
7783 |
new file mode 100644 |
7784 |
index 0000000..3f957a2 |
7785 |
--- /dev/null |
7786 |
+++ b/4.9.18/1017_linux-4.9.18.patch |
7787 |
@@ -0,0 +1,876 @@ |
7788 |
+diff --git a/Makefile b/Makefile |
7789 |
+index 004f90a..c10d0e6 100644 |
7790 |
+--- a/Makefile |
7791 |
++++ b/Makefile |
7792 |
+@@ -1,6 +1,6 @@ |
7793 |
+ VERSION = 4 |
7794 |
+ PATCHLEVEL = 9 |
7795 |
+-SUBLEVEL = 17 |
7796 |
++SUBLEVEL = 18 |
7797 |
+ EXTRAVERSION = |
7798 |
+ NAME = Roaring Lionus |
7799 |
+ |
7800 |
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h |
7801 |
+index 7bd69bd..1d8c24d 100644 |
7802 |
+--- a/arch/parisc/include/asm/cacheflush.h |
7803 |
++++ b/arch/parisc/include/asm/cacheflush.h |
7804 |
+@@ -45,28 +45,9 @@ static inline void flush_kernel_dcache_page(struct page *page) |
7805 |
+ |
7806 |
+ #define flush_kernel_dcache_range(start,size) \ |
7807 |
+ flush_kernel_dcache_range_asm((start), (start)+(size)); |
7808 |
+-/* vmap range flushes and invalidates. Architecturally, we don't need |
7809 |
+- * the invalidate, because the CPU should refuse to speculate once an |
7810 |
+- * area has been flushed, so invalidate is left empty */ |
7811 |
+-static inline void flush_kernel_vmap_range(void *vaddr, int size) |
7812 |
+-{ |
7813 |
+- unsigned long start = (unsigned long)vaddr; |
7814 |
+- |
7815 |
+- flush_kernel_dcache_range_asm(start, start + size); |
7816 |
+-} |
7817 |
+-static inline void invalidate_kernel_vmap_range(void *vaddr, int size) |
7818 |
+-{ |
7819 |
+- unsigned long start = (unsigned long)vaddr; |
7820 |
+- void *cursor = vaddr; |
7821 |
+ |
7822 |
+- for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { |
7823 |
+- struct page *page = vmalloc_to_page(cursor); |
7824 |
+- |
7825 |
+- if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) |
7826 |
+- flush_kernel_dcache_page(page); |
7827 |
+- } |
7828 |
+- flush_kernel_dcache_range_asm(start, start + size); |
7829 |
+-} |
7830 |
++void flush_kernel_vmap_range(void *vaddr, int size); |
7831 |
++void invalidate_kernel_vmap_range(void *vaddr, int size); |
7832 |
+ |
7833 |
+ #define flush_cache_vmap(start, end) flush_cache_all() |
7834 |
+ #define flush_cache_vunmap(start, end) flush_cache_all() |
7835 |
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c |
7836 |
+index 977f0a4f..53ec75f 100644 |
7837 |
+--- a/arch/parisc/kernel/cache.c |
7838 |
++++ b/arch/parisc/kernel/cache.c |
7839 |
+@@ -633,3 +633,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long |
7840 |
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); |
7841 |
+ } |
7842 |
+ } |
7843 |
++ |
7844 |
++void flush_kernel_vmap_range(void *vaddr, int size) |
7845 |
++{ |
7846 |
++ unsigned long start = (unsigned long)vaddr; |
7847 |
++ |
7848 |
++ if ((unsigned long)size > parisc_cache_flush_threshold) |
7849 |
++ flush_data_cache(); |
7850 |
++ else |
7851 |
++ flush_kernel_dcache_range_asm(start, start + size); |
7852 |
++} |
7853 |
++EXPORT_SYMBOL(flush_kernel_vmap_range); |
7854 |
++ |
7855 |
++void invalidate_kernel_vmap_range(void *vaddr, int size) |
7856 |
++{ |
7857 |
++ unsigned long start = (unsigned long)vaddr; |
7858 |
++ |
7859 |
++ if ((unsigned long)size > parisc_cache_flush_threshold) |
7860 |
++ flush_data_cache(); |
7861 |
++ else |
7862 |
++ flush_kernel_dcache_range_asm(start, start + size); |
7863 |
++} |
7864 |
++EXPORT_SYMBOL(invalidate_kernel_vmap_range); |
7865 |
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c |
7866 |
+index 4063943..e81afc37 100644 |
7867 |
+--- a/arch/parisc/kernel/process.c |
7868 |
++++ b/arch/parisc/kernel/process.c |
7869 |
+@@ -139,6 +139,8 @@ void machine_power_off(void) |
7870 |
+ |
7871 |
+ printk(KERN_EMERG "System shut down completed.\n" |
7872 |
+ "Please power this system off now."); |
7873 |
++ |
7874 |
++ for (;;); |
7875 |
+ } |
7876 |
+ |
7877 |
+ void (*pm_power_off)(void) = machine_power_off; |
7878 |
+diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S |
7879 |
+index 861e721..f080abf 100644 |
7880 |
+--- a/arch/powerpc/boot/zImage.lds.S |
7881 |
++++ b/arch/powerpc/boot/zImage.lds.S |
7882 |
+@@ -68,6 +68,7 @@ SECTIONS |
7883 |
+ } |
7884 |
+ |
7885 |
+ #ifdef CONFIG_PPC64_BOOT_WRAPPER |
7886 |
++ . = ALIGN(256); |
7887 |
+ .got : |
7888 |
+ { |
7889 |
+ __toc_start = .; |
7890 |
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
7891 |
+index 6e6c1fb..272608f 100644 |
7892 |
+--- a/drivers/cpufreq/cpufreq.c |
7893 |
++++ b/drivers/cpufreq/cpufreq.c |
7894 |
+@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, |
7895 |
+ char *buf) |
7896 |
+ { |
7897 |
+ unsigned int cur_freq = __cpufreq_get(policy); |
7898 |
+- if (!cur_freq) |
7899 |
+- return sprintf(buf, "<unknown>"); |
7900 |
+- return sprintf(buf, "%u\n", cur_freq); |
7901 |
++ |
7902 |
++ if (cur_freq) |
7903 |
++ return sprintf(buf, "%u\n", cur_freq); |
7904 |
++ |
7905 |
++ return sprintf(buf, "<unknown>\n"); |
7906 |
+ } |
7907 |
+ |
7908 |
+ /** |
7909 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c |
7910 |
+index b447a01..09e6a73 100644 |
7911 |
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c |
7912 |
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c |
7913 |
+@@ -3506,6 +3506,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, |
7914 |
+ max_sclk = 75000; |
7915 |
+ max_mclk = 80000; |
7916 |
+ } |
7917 |
++ } else if (adev->asic_type == CHIP_OLAND) { |
7918 |
++ if ((adev->pdev->device == 0x6604) && |
7919 |
++ (adev->pdev->subsystem_vendor == 0x1028) && |
7920 |
++ (adev->pdev->subsystem_device == 0x066F)) { |
7921 |
++ max_sclk = 75000; |
7922 |
++ } |
7923 |
+ } |
7924 |
+ /* Apply dpm quirks */ |
7925 |
+ while (p && p->chip_device != 0) { |
7926 |
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c |
7927 |
+index 8703f56..246d1ae 100644 |
7928 |
+--- a/drivers/gpu/drm/vc4/vc4_drv.c |
7929 |
++++ b/drivers/gpu/drm/vc4/vc4_drv.c |
7930 |
+@@ -61,21 +61,24 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, |
7931 |
+ if (ret < 0) |
7932 |
+ return ret; |
7933 |
+ args->value = V3D_READ(V3D_IDENT0); |
7934 |
+- pm_runtime_put(&vc4->v3d->pdev->dev); |
7935 |
++ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); |
7936 |
++ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); |
7937 |
+ break; |
7938 |
+ case DRM_VC4_PARAM_V3D_IDENT1: |
7939 |
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
7940 |
+ if (ret < 0) |
7941 |
+ return ret; |
7942 |
+ args->value = V3D_READ(V3D_IDENT1); |
7943 |
+- pm_runtime_put(&vc4->v3d->pdev->dev); |
7944 |
++ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); |
7945 |
++ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); |
7946 |
+ break; |
7947 |
+ case DRM_VC4_PARAM_V3D_IDENT2: |
7948 |
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
7949 |
+ if (ret < 0) |
7950 |
+ return ret; |
7951 |
+ args->value = V3D_READ(V3D_IDENT2); |
7952 |
+- pm_runtime_put(&vc4->v3d->pdev->dev); |
7953 |
++ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); |
7954 |
++ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); |
7955 |
+ break; |
7956 |
+ case DRM_VC4_PARAM_SUPPORTS_BRANCHES: |
7957 |
+ args->value = true; |
7958 |
+diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c |
7959 |
+index 18e3717..ab30169 100644 |
7960 |
+--- a/drivers/gpu/drm/vc4/vc4_gem.c |
7961 |
++++ b/drivers/gpu/drm/vc4/vc4_gem.c |
7962 |
+@@ -711,8 +711,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) |
7963 |
+ } |
7964 |
+ |
7965 |
+ mutex_lock(&vc4->power_lock); |
7966 |
+- if (--vc4->power_refcount == 0) |
7967 |
+- pm_runtime_put(&vc4->v3d->pdev->dev); |
7968 |
++ if (--vc4->power_refcount == 0) { |
7969 |
++ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); |
7970 |
++ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); |
7971 |
++ } |
7972 |
+ mutex_unlock(&vc4->power_lock); |
7973 |
+ |
7974 |
+ kfree(exec); |
7975 |
+diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c |
7976 |
+index e6d3c60..7cc346a 100644 |
7977 |
+--- a/drivers/gpu/drm/vc4/vc4_v3d.c |
7978 |
++++ b/drivers/gpu/drm/vc4/vc4_v3d.c |
7979 |
+@@ -222,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) |
7980 |
+ return ret; |
7981 |
+ } |
7982 |
+ |
7983 |
++ pm_runtime_use_autosuspend(dev); |
7984 |
++ pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */ |
7985 |
+ pm_runtime_enable(dev); |
7986 |
+ |
7987 |
+ return 0; |
7988 |
+diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c |
7989 |
+index 2543cf5..917321c 100644 |
7990 |
+--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c |
7991 |
++++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c |
7992 |
+@@ -608,9 +608,7 @@ static bool |
7993 |
+ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) |
7994 |
+ { |
7995 |
+ uint32_t max_branch_target = 0; |
7996 |
+- bool found_shader_end = false; |
7997 |
+ int ip; |
7998 |
+- int shader_end_ip = 0; |
7999 |
+ int last_branch = -2; |
8000 |
+ |
8001 |
+ for (ip = 0; ip < validation_state->max_ip; ip++) { |
8002 |
+@@ -621,8 +619,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) |
8003 |
+ uint32_t branch_target_ip; |
8004 |
+ |
8005 |
+ if (sig == QPU_SIG_PROG_END) { |
8006 |
+- shader_end_ip = ip; |
8007 |
+- found_shader_end = true; |
8008 |
++ /* There are two delay slots after program end is |
8009 |
++ * signaled that are still executed, then we're |
8010 |
++ * finished. validation_state->max_ip is the |
8011 |
++ * instruction after the last valid instruction in the |
8012 |
++ * program. |
8013 |
++ */ |
8014 |
++ validation_state->max_ip = ip + 3; |
8015 |
+ continue; |
8016 |
+ } |
8017 |
+ |
8018 |
+@@ -676,15 +679,9 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) |
8019 |
+ } |
8020 |
+ set_bit(after_delay_ip, validation_state->branch_targets); |
8021 |
+ max_branch_target = max(max_branch_target, after_delay_ip); |
8022 |
+- |
8023 |
+- /* There are two delay slots after program end is signaled |
8024 |
+- * that are still executed, then we're finished. |
8025 |
+- */ |
8026 |
+- if (found_shader_end && ip == shader_end_ip + 2) |
8027 |
+- break; |
8028 |
+ } |
8029 |
+ |
8030 |
+- if (max_branch_target > shader_end_ip) { |
8031 |
++ if (max_branch_target > validation_state->max_ip - 3) { |
8032 |
+ DRM_ERROR("Branch landed after QPU_SIG_PROG_END"); |
8033 |
+ return false; |
8034 |
+ } |
8035 |
+diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c |
8036 |
+index aecec6d..7f1c625 100644 |
8037 |
+--- a/drivers/isdn/gigaset/bas-gigaset.c |
8038 |
++++ b/drivers/isdn/gigaset/bas-gigaset.c |
8039 |
+@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface, |
8040 |
+ return -ENODEV; |
8041 |
+ } |
8042 |
+ |
8043 |
++ if (hostif->desc.bNumEndpoints < 1) |
8044 |
++ return -ENODEV; |
8045 |
++ |
8046 |
+ dev_info(&udev->dev, |
8047 |
+ "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n", |
8048 |
+ __func__, le16_to_cpu(udev->descriptor.idVendor), |
8049 |
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c |
8050 |
+index 39fddda..55b5e0e 100644 |
8051 |
+--- a/drivers/md/raid10.c |
8052 |
++++ b/drivers/md/raid10.c |
8053 |
+@@ -1470,7 +1470,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) |
8054 |
+ split = bio; |
8055 |
+ } |
8056 |
+ |
8057 |
++ /* |
8058 |
++ * If a bio is splitted, the first part of bio will pass |
8059 |
++ * barrier but the bio is queued in current->bio_list (see |
8060 |
++ * generic_make_request). If there is a raise_barrier() called |
8061 |
++ * here, the second part of bio can't pass barrier. But since |
8062 |
++ * the first part bio isn't dispatched to underlaying disks |
8063 |
++ * yet, the barrier is never released, hence raise_barrier will |
8064 |
++ * alays wait. We have a deadlock. |
8065 |
++ * Note, this only happens in read path. For write path, the |
8066 |
++ * first part of bio is dispatched in a schedule() call |
8067 |
++ * (because of blk plug) or offloaded to raid10d. |
8068 |
++ * Quitting from the function immediately can change the bio |
8069 |
++ * order queued in bio_list and avoid the deadlock. |
8070 |
++ */ |
8071 |
+ __make_request(mddev, split); |
8072 |
++ if (split != bio && bio_data_dir(bio) == READ) { |
8073 |
++ generic_make_request(bio); |
8074 |
++ break; |
8075 |
++ } |
8076 |
+ } while (split != bio); |
8077 |
+ |
8078 |
+ /* In case raid10d snuck in to freeze_array */ |
8079 |
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c |
8080 |
+index f9b6fba..a530f08 100644 |
8081 |
+--- a/drivers/scsi/libiscsi.c |
8082 |
++++ b/drivers/scsi/libiscsi.c |
8083 |
+@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) |
8084 |
+ WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); |
8085 |
+ task->state = state; |
8086 |
+ |
8087 |
+- if (!list_empty(&task->running)) |
8088 |
++ spin_lock_bh(&conn->taskqueuelock); |
8089 |
++ if (!list_empty(&task->running)) { |
8090 |
++ pr_debug_once("%s while task on list", __func__); |
8091 |
+ list_del_init(&task->running); |
8092 |
++ } |
8093 |
++ spin_unlock_bh(&conn->taskqueuelock); |
8094 |
+ |
8095 |
+ if (conn->task == task) |
8096 |
+ conn->task = NULL; |
8097 |
+@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, |
8098 |
+ if (session->tt->xmit_task(task)) |
8099 |
+ goto free_task; |
8100 |
+ } else { |
8101 |
++ spin_lock_bh(&conn->taskqueuelock); |
8102 |
+ list_add_tail(&task->running, &conn->mgmtqueue); |
8103 |
++ spin_unlock_bh(&conn->taskqueuelock); |
8104 |
+ iscsi_conn_queue_work(conn); |
8105 |
+ } |
8106 |
+ |
8107 |
+@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task) |
8108 |
+ * this may be on the requeue list already if the xmit_task callout |
8109 |
+ * is handling the r2ts while we are adding new ones |
8110 |
+ */ |
8111 |
++ spin_lock_bh(&conn->taskqueuelock); |
8112 |
+ if (list_empty(&task->running)) |
8113 |
+ list_add_tail(&task->running, &conn->requeue); |
8114 |
++ spin_unlock_bh(&conn->taskqueuelock); |
8115 |
+ iscsi_conn_queue_work(conn); |
8116 |
+ } |
8117 |
+ EXPORT_SYMBOL_GPL(iscsi_requeue_task); |
8118 |
+@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) |
8119 |
+ * only have one nop-out as a ping from us and targets should not |
8120 |
+ * overflow us with nop-ins |
8121 |
+ */ |
8122 |
++ spin_lock_bh(&conn->taskqueuelock); |
8123 |
+ check_mgmt: |
8124 |
+ while (!list_empty(&conn->mgmtqueue)) { |
8125 |
+ conn->task = list_entry(conn->mgmtqueue.next, |
8126 |
+ struct iscsi_task, running); |
8127 |
+ list_del_init(&conn->task->running); |
8128 |
++ spin_unlock_bh(&conn->taskqueuelock); |
8129 |
+ if (iscsi_prep_mgmt_task(conn, conn->task)) { |
8130 |
+ /* regular RX path uses back_lock */ |
8131 |
+ spin_lock_bh(&conn->session->back_lock); |
8132 |
+ __iscsi_put_task(conn->task); |
8133 |
+ spin_unlock_bh(&conn->session->back_lock); |
8134 |
+ conn->task = NULL; |
8135 |
++ spin_lock_bh(&conn->taskqueuelock); |
8136 |
+ continue; |
8137 |
+ } |
8138 |
+ rc = iscsi_xmit_task(conn); |
8139 |
+ if (rc) |
8140 |
+ goto done; |
8141 |
++ spin_lock_bh(&conn->taskqueuelock); |
8142 |
+ } |
8143 |
+ |
8144 |
+ /* process pending command queue */ |
8145 |
+@@ -1535,19 +1547,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) |
8146 |
+ conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, |
8147 |
+ running); |
8148 |
+ list_del_init(&conn->task->running); |
8149 |
++ spin_unlock_bh(&conn->taskqueuelock); |
8150 |
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { |
8151 |
+ fail_scsi_task(conn->task, DID_IMM_RETRY); |
8152 |
++ spin_lock_bh(&conn->taskqueuelock); |
8153 |
+ continue; |
8154 |
+ } |
8155 |
+ rc = iscsi_prep_scsi_cmd_pdu(conn->task); |
8156 |
+ if (rc) { |
8157 |
+ if (rc == -ENOMEM || rc == -EACCES) { |
8158 |
++ spin_lock_bh(&conn->taskqueuelock); |
8159 |
+ list_add_tail(&conn->task->running, |
8160 |
+ &conn->cmdqueue); |
8161 |
+ conn->task = NULL; |
8162 |
++ spin_unlock_bh(&conn->taskqueuelock); |
8163 |
+ goto done; |
8164 |
+ } else |
8165 |
+ fail_scsi_task(conn->task, DID_ABORT); |
8166 |
++ spin_lock_bh(&conn->taskqueuelock); |
8167 |
+ continue; |
8168 |
+ } |
8169 |
+ rc = iscsi_xmit_task(conn); |
8170 |
+@@ -1558,6 +1575,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) |
8171 |
+ * we need to check the mgmt queue for nops that need to |
8172 |
+ * be sent to aviod starvation |
8173 |
+ */ |
8174 |
++ spin_lock_bh(&conn->taskqueuelock); |
8175 |
+ if (!list_empty(&conn->mgmtqueue)) |
8176 |
+ goto check_mgmt; |
8177 |
+ } |
8178 |
+@@ -1577,12 +1595,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) |
8179 |
+ conn->task = task; |
8180 |
+ list_del_init(&conn->task->running); |
8181 |
+ conn->task->state = ISCSI_TASK_RUNNING; |
8182 |
++ spin_unlock_bh(&conn->taskqueuelock); |
8183 |
+ rc = iscsi_xmit_task(conn); |
8184 |
+ if (rc) |
8185 |
+ goto done; |
8186 |
++ spin_lock_bh(&conn->taskqueuelock); |
8187 |
+ if (!list_empty(&conn->mgmtqueue)) |
8188 |
+ goto check_mgmt; |
8189 |
+ } |
8190 |
++ spin_unlock_bh(&conn->taskqueuelock); |
8191 |
+ spin_unlock_bh(&conn->session->frwd_lock); |
8192 |
+ return -ENODATA; |
8193 |
+ |
8194 |
+@@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) |
8195 |
+ goto prepd_reject; |
8196 |
+ } |
8197 |
+ } else { |
8198 |
++ spin_lock_bh(&conn->taskqueuelock); |
8199 |
+ list_add_tail(&task->running, &conn->cmdqueue); |
8200 |
++ spin_unlock_bh(&conn->taskqueuelock); |
8201 |
+ iscsi_conn_queue_work(conn); |
8202 |
+ } |
8203 |
+ |
8204 |
+@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, |
8205 |
+ INIT_LIST_HEAD(&conn->mgmtqueue); |
8206 |
+ INIT_LIST_HEAD(&conn->cmdqueue); |
8207 |
+ INIT_LIST_HEAD(&conn->requeue); |
8208 |
++ spin_lock_init(&conn->taskqueuelock); |
8209 |
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker); |
8210 |
+ |
8211 |
+ /* allocate login_task used for the login/text sequences */ |
8212 |
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c |
8213 |
+index 734a042..f7e3f27 100644 |
8214 |
+--- a/drivers/scsi/lpfc/lpfc_init.c |
8215 |
++++ b/drivers/scsi/lpfc/lpfc_init.c |
8216 |
+@@ -11393,6 +11393,7 @@ static struct pci_driver lpfc_driver = { |
8217 |
+ .id_table = lpfc_id_table, |
8218 |
+ .probe = lpfc_pci_probe_one, |
8219 |
+ .remove = lpfc_pci_remove_one, |
8220 |
++ .shutdown = lpfc_pci_remove_one, |
8221 |
+ .suspend = lpfc_pci_suspend_one, |
8222 |
+ .resume = lpfc_pci_resume_one, |
8223 |
+ .err_handler = &lpfc_err_handler, |
8224 |
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c |
8225 |
+index bff9689..feab7ea 100644 |
8226 |
+--- a/drivers/scsi/qla2xxx/qla_target.c |
8227 |
++++ b/drivers/scsi/qla2xxx/qla_target.c |
8228 |
+@@ -5375,16 +5375,22 @@ qlt_send_busy(struct scsi_qla_host *vha, |
8229 |
+ |
8230 |
+ static int |
8231 |
+ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, |
8232 |
+- struct atio_from_isp *atio) |
8233 |
++ struct atio_from_isp *atio, bool ha_locked) |
8234 |
+ { |
8235 |
+ struct qla_hw_data *ha = vha->hw; |
8236 |
+ uint16_t status; |
8237 |
++ unsigned long flags; |
8238 |
+ |
8239 |
+ if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) |
8240 |
+ return 0; |
8241 |
+ |
8242 |
++ if (!ha_locked) |
8243 |
++ spin_lock_irqsave(&ha->hardware_lock, flags); |
8244 |
+ status = temp_sam_status; |
8245 |
+ qlt_send_busy(vha, atio, status); |
8246 |
++ if (!ha_locked) |
8247 |
++ spin_unlock_irqrestore(&ha->hardware_lock, flags); |
8248 |
++ |
8249 |
+ return 1; |
8250 |
+ } |
8251 |
+ |
8252 |
+@@ -5429,7 +5435,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, |
8253 |
+ |
8254 |
+ |
8255 |
+ if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { |
8256 |
+- rc = qlt_chk_qfull_thresh_hold(vha, atio); |
8257 |
++ rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked); |
8258 |
+ if (rc != 0) { |
8259 |
+ tgt->atio_irq_cmd_count--; |
8260 |
+ return; |
8261 |
+@@ -5552,7 +5558,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) |
8262 |
+ break; |
8263 |
+ } |
8264 |
+ |
8265 |
+- rc = qlt_chk_qfull_thresh_hold(vha, atio); |
8266 |
++ rc = qlt_chk_qfull_thresh_hold(vha, atio, true); |
8267 |
+ if (rc != 0) { |
8268 |
+ tgt->irq_cmd_count--; |
8269 |
+ return; |
8270 |
+@@ -6794,6 +6800,8 @@ qlt_handle_abts_recv_work(struct work_struct *work) |
8271 |
+ spin_lock_irqsave(&ha->hardware_lock, flags); |
8272 |
+ qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); |
8273 |
+ spin_unlock_irqrestore(&ha->hardware_lock, flags); |
8274 |
++ |
8275 |
++ kfree(op); |
8276 |
+ } |
8277 |
+ |
8278 |
+ void |
8279 |
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c |
8280 |
+index 9125d93..ef1c8c1 100644 |
8281 |
+--- a/drivers/target/target_core_pscsi.c |
8282 |
++++ b/drivers/target/target_core_pscsi.c |
8283 |
+@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, |
8284 |
+ |
8285 |
+ buf = kzalloc(12, GFP_KERNEL); |
8286 |
+ if (!buf) |
8287 |
+- return; |
8288 |
++ goto out_free; |
8289 |
+ |
8290 |
+ memset(cdb, 0, MAX_COMMAND_SIZE); |
8291 |
+ cdb[0] = MODE_SENSE; |
8292 |
+@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, |
8293 |
+ * If MODE_SENSE still returns zero, set the default value to 1024. |
8294 |
+ */ |
8295 |
+ sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); |
8296 |
++out_free: |
8297 |
+ if (!sdev->sector_size) |
8298 |
+ sdev->sector_size = 1024; |
8299 |
+-out_free: |
8300 |
++ |
8301 |
+ kfree(buf); |
8302 |
+ } |
8303 |
+ |
8304 |
+@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, |
8305 |
+ sd->lun, sd->queue_depth); |
8306 |
+ } |
8307 |
+ |
8308 |
+- dev->dev_attrib.hw_block_size = sd->sector_size; |
8309 |
++ dev->dev_attrib.hw_block_size = |
8310 |
++ min_not_zero((int)sd->sector_size, 512); |
8311 |
+ dev->dev_attrib.hw_max_sectors = |
8312 |
+- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); |
8313 |
++ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); |
8314 |
+ dev->dev_attrib.hw_queue_depth = sd->queue_depth; |
8315 |
+ |
8316 |
+ /* |
8317 |
+@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, |
8318 |
+ /* |
8319 |
+ * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. |
8320 |
+ */ |
8321 |
+- if (sd->type == TYPE_TAPE) |
8322 |
++ if (sd->type == TYPE_TAPE) { |
8323 |
+ pscsi_tape_read_blocksize(dev, sd); |
8324 |
++ dev->dev_attrib.hw_block_size = sd->sector_size; |
8325 |
++ } |
8326 |
+ return 0; |
8327 |
+ } |
8328 |
+ |
8329 |
+@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) |
8330 |
+ /* |
8331 |
+ * Called with struct Scsi_Host->host_lock called. |
8332 |
+ */ |
8333 |
+-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) |
8334 |
++static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) |
8335 |
+ __releases(sh->host_lock) |
8336 |
+ { |
8337 |
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; |
8338 |
+@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) |
8339 |
+ return 0; |
8340 |
+ } |
8341 |
+ |
8342 |
+-/* |
8343 |
+- * Called with struct Scsi_Host->host_lock called. |
8344 |
+- */ |
8345 |
+-static int pscsi_create_type_other(struct se_device *dev, |
8346 |
+- struct scsi_device *sd) |
8347 |
+- __releases(sh->host_lock) |
8348 |
+-{ |
8349 |
+- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; |
8350 |
+- struct Scsi_Host *sh = sd->host; |
8351 |
+- int ret; |
8352 |
+- |
8353 |
+- spin_unlock_irq(sh->host_lock); |
8354 |
+- ret = pscsi_add_device_to_list(dev, sd); |
8355 |
+- if (ret) |
8356 |
+- return ret; |
8357 |
+- |
8358 |
+- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", |
8359 |
+- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, |
8360 |
+- sd->channel, sd->id, sd->lun); |
8361 |
+- return 0; |
8362 |
+-} |
8363 |
+- |
8364 |
+ static int pscsi_configure_device(struct se_device *dev) |
8365 |
+ { |
8366 |
+ struct se_hba *hba = dev->se_hba; |
8367 |
+@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev) |
8368 |
+ case TYPE_DISK: |
8369 |
+ ret = pscsi_create_type_disk(dev, sd); |
8370 |
+ break; |
8371 |
+- case TYPE_ROM: |
8372 |
+- ret = pscsi_create_type_rom(dev, sd); |
8373 |
+- break; |
8374 |
+ default: |
8375 |
+- ret = pscsi_create_type_other(dev, sd); |
8376 |
++ ret = pscsi_create_type_nondisk(dev, sd); |
8377 |
+ break; |
8378 |
+ } |
8379 |
+ |
8380 |
+@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev) |
8381 |
+ else if (pdv->pdv_lld_host) |
8382 |
+ scsi_host_put(pdv->pdv_lld_host); |
8383 |
+ |
8384 |
+- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) |
8385 |
+- scsi_device_put(sd); |
8386 |
++ scsi_device_put(sd); |
8387 |
+ |
8388 |
+ pdv->pdv_sd = NULL; |
8389 |
+ } |
8390 |
+@@ -1069,7 +1047,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) |
8391 |
+ if (pdv->pdv_bd && pdv->pdv_bd->bd_part) |
8392 |
+ return pdv->pdv_bd->bd_part->nr_sects; |
8393 |
+ |
8394 |
+- dump_stack(); |
8395 |
+ return 0; |
8396 |
+ } |
8397 |
+ |
8398 |
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c |
8399 |
+index aabd660..a53fb23 100644 |
8400 |
+--- a/drivers/target/target_core_sbc.c |
8401 |
++++ b/drivers/target/target_core_sbc.c |
8402 |
+@@ -1104,9 +1104,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
8403 |
+ return ret; |
8404 |
+ break; |
8405 |
+ case VERIFY: |
8406 |
++ case VERIFY_16: |
8407 |
+ size = 0; |
8408 |
+- sectors = transport_get_sectors_10(cdb); |
8409 |
+- cmd->t_task_lba = transport_lba_32(cdb); |
8410 |
++ if (cdb[0] == VERIFY) { |
8411 |
++ sectors = transport_get_sectors_10(cdb); |
8412 |
++ cmd->t_task_lba = transport_lba_32(cdb); |
8413 |
++ } else { |
8414 |
++ sectors = transport_get_sectors_16(cdb); |
8415 |
++ cmd->t_task_lba = transport_lba_64(cdb); |
8416 |
++ } |
8417 |
+ cmd->execute_cmd = sbc_emulate_noop; |
8418 |
+ goto check_lba; |
8419 |
+ case REZERO_UNIT: |
8420 |
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
8421 |
+index afe29ba..5fa9ba1 100644 |
8422 |
+--- a/fs/ext4/super.c |
8423 |
++++ b/fs/ext4/super.c |
8424 |
+@@ -3830,7 +3830,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
8425 |
+ db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / |
8426 |
+ EXT4_DESC_PER_BLOCK(sb); |
8427 |
+ if (ext4_has_feature_meta_bg(sb)) { |
8428 |
+- if (le32_to_cpu(es->s_first_meta_bg) >= db_count) { |
8429 |
++ if (le32_to_cpu(es->s_first_meta_bg) > db_count) { |
8430 |
+ ext4_msg(sb, KERN_WARNING, |
8431 |
+ "first meta block group too large: %u " |
8432 |
+ "(group descriptor block count %u)", |
8433 |
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h |
8434 |
+index a6a3389..51519c2 100644 |
8435 |
+--- a/fs/gfs2/incore.h |
8436 |
++++ b/fs/gfs2/incore.h |
8437 |
+@@ -207,7 +207,7 @@ struct lm_lockname { |
8438 |
+ struct gfs2_sbd *ln_sbd; |
8439 |
+ u64 ln_number; |
8440 |
+ unsigned int ln_type; |
8441 |
+-}; |
8442 |
++} __packed __aligned(sizeof(int)); |
8443 |
+ |
8444 |
+ #define lm_name_equal(name1, name2) \ |
8445 |
+ (((name1)->ln_number == (name2)->ln_number) && \ |
8446 |
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
8447 |
+index 609840d..1536aeb 100644 |
8448 |
+--- a/fs/nfs/nfs4proc.c |
8449 |
++++ b/fs/nfs/nfs4proc.c |
8450 |
+@@ -7426,11 +7426,11 @@ static void nfs4_exchange_id_release(void *data) |
8451 |
+ struct nfs41_exchange_id_data *cdata = |
8452 |
+ (struct nfs41_exchange_id_data *)data; |
8453 |
+ |
8454 |
+- nfs_put_client(cdata->args.client); |
8455 |
+ if (cdata->xprt) { |
8456 |
+ xprt_put(cdata->xprt); |
8457 |
+ rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient); |
8458 |
+ } |
8459 |
++ nfs_put_client(cdata->args.client); |
8460 |
+ kfree(cdata->res.impl_id); |
8461 |
+ kfree(cdata->res.server_scope); |
8462 |
+ kfree(cdata->res.server_owner); |
8463 |
+@@ -7537,10 +7537,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, |
8464 |
+ task_setup_data.callback_data = calldata; |
8465 |
+ |
8466 |
+ task = rpc_run_task(&task_setup_data); |
8467 |
+- if (IS_ERR(task)) { |
8468 |
+- status = PTR_ERR(task); |
8469 |
+- goto out_impl_id; |
8470 |
+- } |
8471 |
++ if (IS_ERR(task)) |
8472 |
++ return PTR_ERR(task); |
8473 |
+ |
8474 |
+ if (!xprt) { |
8475 |
+ status = rpc_wait_for_completion_task(task); |
8476 |
+@@ -7568,6 +7566,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, |
8477 |
+ kfree(calldata->res.server_owner); |
8478 |
+ out_calldata: |
8479 |
+ kfree(calldata); |
8480 |
++ nfs_put_client(clp); |
8481 |
+ goto out; |
8482 |
+ } |
8483 |
+ |
8484 |
+diff --git a/include/linux/log2.h b/include/linux/log2.h |
8485 |
+index fd7ff3d..f38fae2 100644 |
8486 |
+--- a/include/linux/log2.h |
8487 |
++++ b/include/linux/log2.h |
8488 |
+@@ -16,12 +16,6 @@ |
8489 |
+ #include <linux/bitops.h> |
8490 |
+ |
8491 |
+ /* |
8492 |
+- * deal with unrepresentable constant logarithms |
8493 |
+- */ |
8494 |
+-extern __attribute__((const, noreturn)) |
8495 |
+-int ____ilog2_NaN(void); |
8496 |
+- |
8497 |
+-/* |
8498 |
+ * non-constant log of base 2 calculators |
8499 |
+ * - the arch may override these in asm/bitops.h if they can be implemented |
8500 |
+ * more efficiently than using fls() and fls64() |
8501 |
+@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
8502 |
+ #define ilog2(n) \ |
8503 |
+ ( \ |
8504 |
+ __builtin_constant_p(n) ? ( \ |
8505 |
+- (n) < 1 ? ____ilog2_NaN() : \ |
8506 |
++ (n) < 2 ? 0 : \ |
8507 |
+ (n) & (1ULL << 63) ? 63 : \ |
8508 |
+ (n) & (1ULL << 62) ? 62 : \ |
8509 |
+ (n) & (1ULL << 61) ? 61 : \ |
8510 |
+@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
8511 |
+ (n) & (1ULL << 4) ? 4 : \ |
8512 |
+ (n) & (1ULL << 3) ? 3 : \ |
8513 |
+ (n) & (1ULL << 2) ? 2 : \ |
8514 |
+- (n) & (1ULL << 1) ? 1 : \ |
8515 |
+- (n) & (1ULL << 0) ? 0 : \ |
8516 |
+- ____ilog2_NaN() \ |
8517 |
+- ) : \ |
8518 |
++ 1 ) : \ |
8519 |
+ (sizeof(n) <= 4) ? \ |
8520 |
+ __ilog2_u32(n) : \ |
8521 |
+ __ilog2_u64(n) \ |
8522 |
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h |
8523 |
+index 4d1c46a..c7b1dc7 100644 |
8524 |
+--- a/include/scsi/libiscsi.h |
8525 |
++++ b/include/scsi/libiscsi.h |
8526 |
+@@ -196,6 +196,7 @@ struct iscsi_conn { |
8527 |
+ struct iscsi_task *task; /* xmit task in progress */ |
8528 |
+ |
8529 |
+ /* xmit */ |
8530 |
++ spinlock_t taskqueuelock; /* protects the next three lists */ |
8531 |
+ struct list_head mgmtqueue; /* mgmt (control) xmit queue */ |
8532 |
+ struct list_head cmdqueue; /* data-path cmd queue */ |
8533 |
+ struct list_head requeue; /* tasks needing another run */ |
8534 |
+diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c |
8535 |
+index 2bd6737..a57242e 100644 |
8536 |
+--- a/kernel/cgroup_pids.c |
8537 |
++++ b/kernel/cgroup_pids.c |
8538 |
+@@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task) |
8539 |
+ /* Only log the first time events_limit is incremented. */ |
8540 |
+ if (atomic64_inc_return(&pids->events_limit) == 1) { |
8541 |
+ pr_info("cgroup: fork rejected by pids controller in "); |
8542 |
+- pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id)); |
8543 |
++ pr_cont_cgroup_path(css->cgroup); |
8544 |
+ pr_cont("\n"); |
8545 |
+ } |
8546 |
+ cgroup_file_notify(&pids->events_file); |
8547 |
+diff --git a/kernel/events/core.c b/kernel/events/core.c |
8548 |
+index 4b33231..07c0dc8 100644 |
8549 |
+--- a/kernel/events/core.c |
8550 |
++++ b/kernel/events/core.c |
8551 |
+@@ -10333,6 +10333,17 @@ void perf_event_free_task(struct task_struct *task) |
8552 |
+ continue; |
8553 |
+ |
8554 |
+ mutex_lock(&ctx->mutex); |
8555 |
++ raw_spin_lock_irq(&ctx->lock); |
8556 |
++ /* |
8557 |
++ * Destroy the task <-> ctx relation and mark the context dead. |
8558 |
++ * |
8559 |
++ * This is important because even though the task hasn't been |
8560 |
++ * exposed yet the context has been (through child_list). |
8561 |
++ */ |
8562 |
++ RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); |
8563 |
++ WRITE_ONCE(ctx->task, TASK_TOMBSTONE); |
8564 |
++ put_task_struct(task); /* cannot be last */ |
8565 |
++ raw_spin_unlock_irq(&ctx->lock); |
8566 |
+ again: |
8567 |
+ list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, |
8568 |
+ group_entry) |
8569 |
+@@ -10586,7 +10597,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) |
8570 |
+ ret = inherit_task_group(event, parent, parent_ctx, |
8571 |
+ child, ctxn, &inherited_all); |
8572 |
+ if (ret) |
8573 |
+- break; |
8574 |
++ goto out_unlock; |
8575 |
+ } |
8576 |
+ |
8577 |
+ /* |
8578 |
+@@ -10602,7 +10613,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) |
8579 |
+ ret = inherit_task_group(event, parent, parent_ctx, |
8580 |
+ child, ctxn, &inherited_all); |
8581 |
+ if (ret) |
8582 |
+- break; |
8583 |
++ goto out_unlock; |
8584 |
+ } |
8585 |
+ |
8586 |
+ raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
8587 |
+@@ -10630,6 +10641,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) |
8588 |
+ } |
8589 |
+ |
8590 |
+ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); |
8591 |
++out_unlock: |
8592 |
+ mutex_unlock(&parent_ctx->mutex); |
8593 |
+ |
8594 |
+ perf_unpin_context(parent_ctx); |
8595 |
+diff --git a/mm/percpu.c b/mm/percpu.c |
8596 |
+index 2557143..f014ceb 100644 |
8597 |
+--- a/mm/percpu.c |
8598 |
++++ b/mm/percpu.c |
8599 |
+@@ -1010,8 +1010,11 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, |
8600 |
+ mutex_unlock(&pcpu_alloc_mutex); |
8601 |
+ } |
8602 |
+ |
8603 |
+- if (chunk != pcpu_reserved_chunk) |
8604 |
++ if (chunk != pcpu_reserved_chunk) { |
8605 |
++ spin_lock_irqsave(&pcpu_lock, flags); |
8606 |
+ pcpu_nr_empty_pop_pages -= occ_pages; |
8607 |
++ spin_unlock_irqrestore(&pcpu_lock, flags); |
8608 |
++ } |
8609 |
+ |
8610 |
+ if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) |
8611 |
+ pcpu_schedule_balance_work(); |
8612 |
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c |
8613 |
+index e2c37061..69502fa 100644 |
8614 |
+--- a/net/sunrpc/xprtrdma/verbs.c |
8615 |
++++ b/net/sunrpc/xprtrdma/verbs.c |
8616 |
+@@ -486,7 +486,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, |
8617 |
+ struct ib_cq *sendcq, *recvcq; |
8618 |
+ int rc; |
8619 |
+ |
8620 |
+- max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES); |
8621 |
++ max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, |
8622 |
++ RPCRDMA_MAX_SEND_SGES); |
8623 |
+ if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
8624 |
+ pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); |
8625 |
+ return -ENOMEM; |
8626 |
+diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h |
8627 |
+index 4144666..d5677d3 100644 |
8628 |
+--- a/tools/include/linux/log2.h |
8629 |
++++ b/tools/include/linux/log2.h |
8630 |
+@@ -13,12 +13,6 @@ |
8631 |
+ #define _TOOLS_LINUX_LOG2_H |
8632 |
+ |
8633 |
+ /* |
8634 |
+- * deal with unrepresentable constant logarithms |
8635 |
+- */ |
8636 |
+-extern __attribute__((const, noreturn)) |
8637 |
+-int ____ilog2_NaN(void); |
8638 |
+- |
8639 |
+-/* |
8640 |
+ * non-constant log of base 2 calculators |
8641 |
+ * - the arch may override these in asm/bitops.h if they can be implemented |
8642 |
+ * more efficiently than using fls() and fls64() |
8643 |
+@@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
8644 |
+ #define ilog2(n) \ |
8645 |
+ ( \ |
8646 |
+ __builtin_constant_p(n) ? ( \ |
8647 |
+- (n) < 1 ? ____ilog2_NaN() : \ |
8648 |
++ (n) < 2 ? 0 : \ |
8649 |
+ (n) & (1ULL << 63) ? 63 : \ |
8650 |
+ (n) & (1ULL << 62) ? 62 : \ |
8651 |
+ (n) & (1ULL << 61) ? 61 : \ |
8652 |
+@@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
8653 |
+ (n) & (1ULL << 4) ? 4 : \ |
8654 |
+ (n) & (1ULL << 3) ? 3 : \ |
8655 |
+ (n) & (1ULL << 2) ? 2 : \ |
8656 |
+- (n) & (1ULL << 1) ? 1 : \ |
8657 |
+- (n) & (1ULL << 0) ? 0 : \ |
8658 |
+- ____ilog2_NaN() \ |
8659 |
+- ) : \ |
8660 |
++ 1 ) : \ |
8661 |
+ (sizeof(n) <= 4) ? \ |
8662 |
+ __ilog2_u32(n) : \ |
8663 |
+ __ilog2_u64(n) \ |
8664 |
|
8665 |
diff --git a/4.9.16/4420_grsecurity-3.1-4.9.16-201703180820.patch b/4.9.18/4420_grsecurity-3.1-4.9.18-201703261106.patch |
8666 |
similarity index 99% |
8667 |
rename from 4.9.16/4420_grsecurity-3.1-4.9.16-201703180820.patch |
8668 |
rename to 4.9.18/4420_grsecurity-3.1-4.9.18-201703261106.patch |
8669 |
index 8d585e2..3659b97 100644 |
8670 |
--- a/4.9.16/4420_grsecurity-3.1-4.9.16-201703180820.patch |
8671 |
+++ b/4.9.18/4420_grsecurity-3.1-4.9.18-201703261106.patch |
8672 |
@@ -419,7 +419,7 @@ index 3d0ae15..84e5412 100644 |
8673 |
cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags) |
8674 |
|
8675 |
diff --git a/Makefile b/Makefile |
8676 |
-index 4e0f962..202756a 100644 |
8677 |
+index c10d0e6..54799eb2 100644 |
8678 |
--- a/Makefile |
8679 |
+++ b/Makefile |
8680 |
@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ |
8681 |
@@ -4916,10 +4916,10 @@ index a4ec240..96faf9b 100644 |
8682 |
#ifdef CONFIG_THUMB2_KERNEL |
8683 |
|
8684 |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig |
8685 |
-index 969ef88..305b856 100644 |
8686 |
+index cf57a77..ab33bd2 100644 |
8687 |
--- a/arch/arm64/Kconfig |
8688 |
+++ b/arch/arm64/Kconfig |
8689 |
-@@ -896,6 +896,7 @@ config RELOCATABLE |
8690 |
+@@ -906,6 +906,7 @@ config RELOCATABLE |
8691 |
|
8692 |
config RANDOMIZE_BASE |
8693 |
bool "Randomize the address of the kernel image" |
8694 |
@@ -21174,7 +21174,7 @@ index b28200d..e93e14d 100644 |
8695 |
|
8696 |
while (amd_iommu_v2_event_descs[i].attr.attr.name) |
8697 |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c |
8698 |
-index 7fe88bb..afd1630 100644 |
8699 |
+index 38623e2..7eae820 100644 |
8700 |
--- a/arch/x86/events/core.c |
8701 |
+++ b/arch/x86/events/core.c |
8702 |
@@ -1570,7 +1570,7 @@ static void __init pmu_check_apic(void) |
8703 |
@@ -30027,10 +30027,10 @@ index cdc0dea..ada8a20 100644 |
8704 |
|
8705 |
static void microcode_fini_cpu(int cpu) |
8706 |
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c |
8707 |
-index 8f44c5a..ed71f8c 100644 |
8708 |
+index f228f74..8f3df2a 100644 |
8709 |
--- a/arch/x86/kernel/cpu/mshyperv.c |
8710 |
+++ b/arch/x86/kernel/cpu/mshyperv.c |
8711 |
-@@ -206,7 +206,7 @@ static void __init ms_hyperv_init_platform(void) |
8712 |
+@@ -230,7 +230,7 @@ static void __init ms_hyperv_init_platform(void) |
8713 |
x86_platform.get_nmi_reason = hv_get_nmi_reason; |
8714 |
} |
8715 |
|
8716 |
@@ -30928,10 +30928,10 @@ index 8639bb2..aaa97ae 100644 |
8717 |
/* ALLOC_TRAMP flags lets us know we created it */ |
8718 |
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; |
8719 |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c |
8720 |
-index 54a2372..46504a4 100644 |
8721 |
+index b5785c1..c60cbcf 100644 |
8722 |
--- a/arch/x86/kernel/head64.c |
8723 |
+++ b/arch/x86/kernel/head64.c |
8724 |
-@@ -62,12 +62,12 @@ int __init early_make_pgtable(unsigned long address) |
8725 |
+@@ -63,12 +63,12 @@ int __init early_make_pgtable(unsigned long address) |
8726 |
pgd = *pgd_p; |
8727 |
|
8728 |
/* |
8729 |
@@ -30947,7 +30947,7 @@ index 54a2372..46504a4 100644 |
8730 |
else { |
8731 |
if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { |
8732 |
reset_early_page_tables(); |
8733 |
-@@ -76,13 +76,13 @@ int __init early_make_pgtable(unsigned long address) |
8734 |
+@@ -77,13 +77,13 @@ int __init early_make_pgtable(unsigned long address) |
8735 |
|
8736 |
pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; |
8737 |
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); |
8738 |
@@ -30963,7 +30963,7 @@ index 54a2372..46504a4 100644 |
8739 |
else { |
8740 |
if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { |
8741 |
reset_early_page_tables(); |
8742 |
-@@ -91,7 +91,7 @@ int __init early_make_pgtable(unsigned long address) |
8743 |
+@@ -92,7 +92,7 @@ int __init early_make_pgtable(unsigned long address) |
8744 |
|
8745 |
pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; |
8746 |
memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); |
8747 |
@@ -30972,7 +30972,7 @@ index 54a2372..46504a4 100644 |
8748 |
} |
8749 |
pmd = (physaddr & PMD_MASK) + early_pmd_flags; |
8750 |
pmd_p[pmd_index(address)] = pmd; |
8751 |
-@@ -155,8 +155,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) |
8752 |
+@@ -156,8 +156,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) |
8753 |
|
8754 |
clear_bss(); |
8755 |
|
8756 |
@@ -35339,7 +35339,7 @@ index bd4e3d4..3e938e3 100644 |
8757 |
#endif |
8758 |
} |
8759 |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c |
8760 |
-index 46b2f41..666b35b 100644 |
8761 |
+index eea88fe..443da46 100644 |
8762 |
--- a/arch/x86/kernel/tsc.c |
8763 |
+++ b/arch/x86/kernel/tsc.c |
8764 |
@@ -24,6 +24,7 @@ |
8765 |
@@ -45469,7 +45469,7 @@ index bcd86e5..fe457ef 100644 |
8766 |
(u8 *) pte, count) < count) { |
8767 |
kfree(pte); |
8768 |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c |
8769 |
-index 0774799..a0012ea 100644 |
8770 |
+index c6fee74..49c7f8f 100644 |
8771 |
--- a/block/scsi_ioctl.c |
8772 |
+++ b/block/scsi_ioctl.c |
8773 |
@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p) |
8774 |
@@ -46125,7 +46125,7 @@ index 75f128e..0fbae68 100644 |
8775 |
bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj); |
8776 |
if (!bgrt_kobj) |
8777 |
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c |
8778 |
-index bdc67ba..a82756b 100644 |
8779 |
+index 4421f7c..aa32b81 100644 |
8780 |
--- a/drivers/acpi/blacklist.c |
8781 |
+++ b/drivers/acpi/blacklist.c |
8782 |
@@ -47,13 +47,13 @@ struct acpi_blacklist_item { |
8783 |
@@ -49501,7 +49501,7 @@ index 5649234..34b55b7 100644 |
8784 |
|
8785 |
static void resize_console(struct port *port) |
8786 |
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c |
8787 |
-index 3bbd2a5..69b87bb 100644 |
8788 |
+index 2acaa77..1d0128e 100644 |
8789 |
--- a/drivers/clk/bcm/clk-bcm2835.c |
8790 |
+++ b/drivers/clk/bcm/clk-bcm2835.c |
8791 |
@@ -1147,8 +1147,9 @@ static const struct clk_ops bcm2835_vpu_clock_clk_ops = { |
8792 |
@@ -49817,7 +49817,7 @@ index 4d3ec92..cf501fc 100644 |
8793 |
ret = cpufreq_register_driver(&dt_cpufreq_driver); |
8794 |
if (ret) |
8795 |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
8796 |
-index 6e6c1fb..ccc5cd2 100644 |
8797 |
+index 272608f..5c4a47a 100644 |
8798 |
--- a/drivers/cpufreq/cpufreq.c |
8799 |
+++ b/drivers/cpufreq/cpufreq.c |
8800 |
@@ -528,12 +528,12 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); |
8801 |
@@ -49835,7 +49835,7 @@ index 6e6c1fb..ccc5cd2 100644 |
8802 |
const char *buf, size_t count) |
8803 |
{ |
8804 |
int ret, enable; |
8805 |
-@@ -2114,7 +2114,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) |
8806 |
+@@ -2116,7 +2116,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) |
8807 |
read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
8808 |
|
8809 |
mutex_lock(&cpufreq_governor_mutex); |
8810 |
@@ -49844,7 +49844,7 @@ index 6e6c1fb..ccc5cd2 100644 |
8811 |
mutex_unlock(&cpufreq_governor_mutex); |
8812 |
return; |
8813 |
} |
8814 |
-@@ -2334,13 +2334,17 @@ int cpufreq_boost_trigger_state(int state) |
8815 |
+@@ -2336,13 +2336,17 @@ int cpufreq_boost_trigger_state(int state) |
8816 |
return 0; |
8817 |
|
8818 |
write_lock_irqsave(&cpufreq_driver_lock, flags); |
8819 |
@@ -49864,7 +49864,7 @@ index 6e6c1fb..ccc5cd2 100644 |
8820 |
write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
8821 |
|
8822 |
pr_err("%s: Cannot %s BOOST\n", |
8823 |
-@@ -2381,7 +2385,9 @@ int cpufreq_enable_boost_support(void) |
8824 |
+@@ -2383,7 +2387,9 @@ int cpufreq_enable_boost_support(void) |
8825 |
if (cpufreq_boost_supported()) |
8826 |
return 0; |
8827 |
|
8828 |
@@ -49875,7 +49875,7 @@ index 6e6c1fb..ccc5cd2 100644 |
8829 |
|
8830 |
/* This will get removed on driver unregister */ |
8831 |
return create_boost_sysfs_file(); |
8832 |
-@@ -2439,8 +2445,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) |
8833 |
+@@ -2441,8 +2447,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) |
8834 |
cpufreq_driver = driver_data; |
8835 |
write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
8836 |
|
8837 |
@@ -54947,10 +54947,10 @@ index 611b6b9..e0faec1 100644 |
8838 |
#endif |
8839 |
|
8840 |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c |
8841 |
-index 8703f56..7e8f99c 100644 |
8842 |
+index 246d1ae..aa305a2 100644 |
8843 |
--- a/drivers/gpu/drm/vc4/vc4_drv.c |
8844 |
+++ b/drivers/gpu/drm/vc4/vc4_drv.c |
8845 |
-@@ -180,6 +180,11 @@ static int compare_dev(struct device *dev, void *data) |
8846 |
+@@ -183,6 +183,11 @@ static int compare_dev(struct device *dev, void *data) |
8847 |
return dev == data; |
8848 |
} |
8849 |
|
8850 |
@@ -54962,7 +54962,7 @@ index 8703f56..7e8f99c 100644 |
8851 |
static void vc4_match_add_drivers(struct device *dev, |
8852 |
struct component_match **match, |
8853 |
struct platform_driver *const *drivers, |
8854 |
-@@ -191,8 +196,7 @@ static void vc4_match_add_drivers(struct device *dev, |
8855 |
+@@ -194,8 +199,7 @@ static void vc4_match_add_drivers(struct device *dev, |
8856 |
struct device_driver *drv = &drivers[i]->driver; |
8857 |
struct device *p = NULL, *d; |
8858 |
|
8859 |
@@ -58729,10 +58729,10 @@ index 6a2df32..dc962f1 100644 |
8860 |
capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */ |
8861 |
capimsg_setu16(skb->data, 16, len); /* Data length */ |
8862 |
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c |
8863 |
-index aecec6d..11e13c5 100644 |
8864 |
+index 7f1c625..2da3ff6 100644 |
8865 |
--- a/drivers/isdn/gigaset/bas-gigaset.c |
8866 |
+++ b/drivers/isdn/gigaset/bas-gigaset.c |
8867 |
-@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf) |
8868 |
+@@ -2568,22 +2568,22 @@ static int gigaset_post_reset(struct usb_interface *intf) |
8869 |
|
8870 |
|
8871 |
static const struct gigaset_ops gigops = { |
8872 |
@@ -62356,7 +62356,7 @@ index 29e2df5..c367325 100644 |
8873 |
"md/raid1:%s: read error corrected " |
8874 |
"(%d sectors at %llu on %s)\n", |
8875 |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c |
8876 |
-index 39fddda..be1dd54 100644 |
8877 |
+index 55b5e0e..4969510 100644 |
8878 |
--- a/drivers/md/raid10.c |
8879 |
+++ b/drivers/md/raid10.c |
8880 |
@@ -1063,7 +1063,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio) |
8881 |
@@ -62377,7 +62377,7 @@ index 39fddda..be1dd54 100644 |
8882 |
|
8883 |
struct bio *split; |
8884 |
|
8885 |
-@@ -1829,7 +1829,7 @@ static void end_sync_read(struct bio *bio) |
8886 |
+@@ -1847,7 +1847,7 @@ static void end_sync_read(struct bio *bio) |
8887 |
/* The write handler will notice the lack of |
8888 |
* R10BIO_Uptodate and record any errors etc |
8889 |
*/ |
8890 |
@@ -62386,7 +62386,7 @@ index 39fddda..be1dd54 100644 |
8891 |
&conf->mirrors[d].rdev->corrected_errors); |
8892 |
|
8893 |
/* for reconstruct, we always reschedule after a read. |
8894 |
-@@ -1978,7 +1978,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) |
8895 |
+@@ -1996,7 +1996,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) |
8896 |
} |
8897 |
if (j == vcnt) |
8898 |
continue; |
8899 |
@@ -62395,7 +62395,7 @@ index 39fddda..be1dd54 100644 |
8900 |
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) |
8901 |
/* Don't fix anything. */ |
8902 |
continue; |
8903 |
-@@ -2177,7 +2177,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) |
8904 |
+@@ -2195,7 +2195,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) |
8905 |
{ |
8906 |
long cur_time_mon; |
8907 |
unsigned long hours_since_last; |
8908 |
@@ -62404,7 +62404,7 @@ index 39fddda..be1dd54 100644 |
8909 |
|
8910 |
cur_time_mon = ktime_get_seconds(); |
8911 |
|
8912 |
-@@ -2198,9 +2198,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) |
8913 |
+@@ -2216,9 +2216,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) |
8914 |
* overflowing the shift of read_errors by hours_since_last. |
8915 |
*/ |
8916 |
if (hours_since_last >= 8 * sizeof(read_errors)) |
8917 |
@@ -62416,7 +62416,7 @@ index 39fddda..be1dd54 100644 |
8918 |
} |
8919 |
|
8920 |
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, |
8921 |
-@@ -2254,8 +2254,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 |
8922 |
+@@ -2272,8 +2272,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 |
8923 |
return; |
8924 |
|
8925 |
check_decay_read_errors(mddev, rdev); |
8926 |
@@ -62427,7 +62427,7 @@ index 39fddda..be1dd54 100644 |
8927 |
char b[BDEVNAME_SIZE]; |
8928 |
bdevname(rdev->bdev, b); |
8929 |
|
8930 |
-@@ -2263,7 +2263,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 |
8931 |
+@@ -2281,7 +2281,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 |
8932 |
"md/raid10:%s: %s: Raid device exceeded " |
8933 |
"read_error threshold [cur %d:max %d]\n", |
8934 |
mdname(mddev), b, |
8935 |
@@ -62436,7 +62436,7 @@ index 39fddda..be1dd54 100644 |
8936 |
printk(KERN_NOTICE |
8937 |
"md/raid10:%s: %s: Failing raid device\n", |
8938 |
mdname(mddev), b); |
8939 |
-@@ -2420,7 +2420,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 |
8940 |
+@@ -2438,7 +2438,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 |
8941 |
sect + |
8942 |
choose_data_offset(r10_bio, rdev)), |
8943 |
bdevname(rdev->bdev, b)); |
8944 |
@@ -62445,7 +62445,7 @@ index 39fddda..be1dd54 100644 |
8945 |
} |
8946 |
|
8947 |
rdev_dec_pending(rdev, mddev); |
8948 |
-@@ -3191,6 +3191,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, |
8949 |
+@@ -3209,6 +3209,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, |
8950 |
} else { |
8951 |
/* resync. Schedule a read for every block at this virt offset */ |
8952 |
int count = 0; |
8953 |
@@ -62453,7 +62453,7 @@ index 39fddda..be1dd54 100644 |
8954 |
|
8955 |
bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0); |
8956 |
|
8957 |
-@@ -3216,7 +3217,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, |
8958 |
+@@ -3234,7 +3235,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, |
8959 |
r10_bio->sector = sector_nr; |
8960 |
set_bit(R10BIO_IsSync, &r10_bio->state); |
8961 |
raid10_find_phys(conf, r10_bio); |
8962 |
@@ -63483,10 +63483,10 @@ index 2cc4d2b..3a559c8 100644 |
8963 |
if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret; |
8964 |
return pvr2_ioread_set_enabled(fh->rhp,!0); |
8965 |
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c |
8966 |
-index 302e284..93781d6 100644 |
8967 |
+index cde43b6..8412dfc 100644 |
8968 |
--- a/drivers/media/usb/uvc/uvc_driver.c |
8969 |
+++ b/drivers/media/usb/uvc/uvc_driver.c |
8970 |
-@@ -2078,7 +2078,7 @@ static int uvc_reset_resume(struct usb_interface *intf) |
8971 |
+@@ -2184,7 +2184,7 @@ static int uvc_reset_resume(struct usb_interface *intf) |
8972 |
* Module parameters |
8973 |
*/ |
8974 |
|
8975 |
@@ -63495,7 +63495,7 @@ index 302e284..93781d6 100644 |
8976 |
{ |
8977 |
if (uvc_clock_param == CLOCK_MONOTONIC) |
8978 |
return sprintf(buffer, "CLOCK_MONOTONIC"); |
8979 |
-@@ -2086,7 +2086,7 @@ static int uvc_clock_param_get(char *buffer, struct kernel_param *kp) |
8980 |
+@@ -2192,7 +2192,7 @@ static int uvc_clock_param_get(char *buffer, struct kernel_param *kp) |
8981 |
return sprintf(buffer, "CLOCK_REALTIME"); |
8982 |
} |
8983 |
|
8984 |
@@ -65417,7 +65417,7 @@ index 22570ea..c462375 100644 |
8985 |
Say Y here if you want to support for Freescale FlexCAN. |
8986 |
|
8987 |
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c |
8988 |
-index 1deb8ff..4e2b0c1 100644 |
8989 |
+index 1deb8ff9..4e2b0c1 100644 |
8990 |
--- a/drivers/net/can/bfin_can.c |
8991 |
+++ b/drivers/net/can/bfin_can.c |
8992 |
@@ -338,7 +338,7 @@ static int bfin_can_get_berr_counter(const struct net_device *dev, |
8993 |
@@ -69309,10 +69309,10 @@ index 93dc10b..6598671 100644 |
8994 |
struct net_local *lp = netdev_priv(dev); |
8995 |
struct sk_buff *new_skb; |
8996 |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c |
8997 |
-index 8b4822a..e99c1c4 100644 |
8998 |
+index 3c1f89a..9b9e82d 100644 |
8999 |
--- a/drivers/net/geneve.c |
9000 |
+++ b/drivers/net/geneve.c |
9001 |
-@@ -1467,7 +1467,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) |
9002 |
+@@ -1473,7 +1473,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) |
9003 |
return -EMSGSIZE; |
9004 |
} |
9005 |
|
9006 |
@@ -69321,7 +69321,7 @@ index 8b4822a..e99c1c4 100644 |
9007 |
.kind = "geneve", |
9008 |
.maxtype = IFLA_GENEVE_MAX, |
9009 |
.policy = geneve_policy, |
9010 |
-@@ -1533,7 +1533,7 @@ static int geneve_netdevice_event(struct notifier_block *unused, |
9011 |
+@@ -1539,7 +1539,7 @@ static int geneve_netdevice_event(struct notifier_block *unused, |
9012 |
return NOTIFY_DONE; |
9013 |
} |
9014 |
|
9015 |
@@ -69858,10 +69858,10 @@ index a380649..fd8fe79c 100644 |
9016 |
}; |
9017 |
|
9018 |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
9019 |
-index b31aca8..3853488 100644 |
9020 |
+index a931b73..a07f1cb 100644 |
9021 |
--- a/drivers/net/tun.c |
9022 |
+++ b/drivers/net/tun.c |
9023 |
-@@ -966,7 +966,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr) |
9024 |
+@@ -977,7 +977,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr) |
9025 |
{ |
9026 |
struct tun_struct *tun = netdev_priv(dev); |
9027 |
|
9028 |
@@ -69870,7 +69870,7 @@ index b31aca8..3853488 100644 |
9029 |
new_hr = NET_SKB_PAD; |
9030 |
|
9031 |
tun->align = new_hr; |
9032 |
-@@ -1550,7 +1550,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[]) |
9033 |
+@@ -1562,7 +1562,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[]) |
9034 |
return -EINVAL; |
9035 |
} |
9036 |
|
9037 |
@@ -69879,7 +69879,7 @@ index b31aca8..3853488 100644 |
9038 |
.kind = DRV_NAME, |
9039 |
.priv_size = sizeof(struct tun_struct), |
9040 |
.setup = tun_setup, |
9041 |
-@@ -1979,7 +1979,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) |
9042 |
+@@ -1991,7 +1991,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) |
9043 |
} |
9044 |
|
9045 |
static long __tun_chr_ioctl(struct file *file, unsigned int cmd, |
9046 |
@@ -69888,7 +69888,7 @@ index b31aca8..3853488 100644 |
9047 |
{ |
9048 |
struct tun_file *tfile = file->private_data; |
9049 |
struct tun_struct *tun; |
9050 |
-@@ -1993,6 +1993,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, |
9051 |
+@@ -2005,6 +2005,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, |
9052 |
int le; |
9053 |
int ret; |
9054 |
|
9055 |
@@ -69898,7 +69898,7 @@ index b31aca8..3853488 100644 |
9056 |
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) { |
9057 |
if (copy_from_user(&ifr, argp, ifreq_len)) |
9058 |
return -EFAULT; |
9059 |
-@@ -2508,7 +2511,7 @@ static int tun_device_event(struct notifier_block *unused, |
9060 |
+@@ -2520,7 +2523,7 @@ static int tun_device_event(struct notifier_block *unused, |
9061 |
return NOTIFY_DONE; |
9062 |
} |
9063 |
|
9064 |
@@ -70074,10 +70074,10 @@ index 51fc0c3..6cc1baa 100644 |
9065 |
#define VIRTNET_DRIVER_VERSION "1.0.0" |
9066 |
|
9067 |
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c |
9068 |
-index 95cf1d8..b2a00f6 100644 |
9069 |
+index bc744ac..2abf77e 100644 |
9070 |
--- a/drivers/net/vrf.c |
9071 |
+++ b/drivers/net/vrf.c |
9072 |
-@@ -1296,7 +1296,7 @@ static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { |
9073 |
+@@ -1297,7 +1297,7 @@ static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { |
9074 |
[IFLA_VRF_TABLE] = { .type = NLA_U32 }, |
9075 |
}; |
9076 |
|
9077 |
@@ -70086,7 +70086,7 @@ index 95cf1d8..b2a00f6 100644 |
9078 |
.kind = DRV_NAME, |
9079 |
.priv_size = sizeof(struct net_vrf), |
9080 |
|
9081 |
-@@ -1333,7 +1333,7 @@ static int vrf_device_event(struct notifier_block *unused, |
9082 |
+@@ -1334,7 +1334,7 @@ static int vrf_device_event(struct notifier_block *unused, |
9083 |
return NOTIFY_DONE; |
9084 |
} |
9085 |
|
9086 |
@@ -70096,10 +70096,10 @@ index 95cf1d8..b2a00f6 100644 |
9087 |
}; |
9088 |
|
9089 |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
9090 |
-index d4f495b..9b39d92 100644 |
9091 |
+index 3c4c2cf..3cbf47b 100644 |
9092 |
--- a/drivers/net/vxlan.c |
9093 |
+++ b/drivers/net/vxlan.c |
9094 |
-@@ -3195,7 +3195,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev) |
9095 |
+@@ -3196,7 +3196,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev) |
9096 |
return vxlan->net; |
9097 |
} |
9098 |
|
9099 |
@@ -70108,7 +70108,7 @@ index d4f495b..9b39d92 100644 |
9100 |
.kind = "vxlan", |
9101 |
.maxtype = IFLA_VXLAN_MAX, |
9102 |
.policy = vxlan_policy, |
9103 |
-@@ -3279,7 +3279,7 @@ static int vxlan_netdevice_event(struct notifier_block *unused, |
9104 |
+@@ -3280,7 +3280,7 @@ static int vxlan_netdevice_event(struct notifier_block *unused, |
9105 |
return NOTIFY_DONE; |
9106 |
} |
9107 |
|
9108 |
@@ -75437,7 +75437,7 @@ index bcd10c7..c7c18bc 100644 |
9109 |
if (!sysfs_initialized) |
9110 |
return -EACCES; |
9111 |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h |
9112 |
-index 45185621..fd0ac76 100644 |
9113 |
+index a5d37f6..8c7494b 100644 |
9114 |
--- a/drivers/pci/pci.h |
9115 |
+++ b/drivers/pci/pci.h |
9116 |
@@ -116,7 +116,7 @@ struct pci_vpd_ops { |
9117 |
@@ -75449,7 +75449,7 @@ index 45185621..fd0ac76 100644 |
9118 |
struct mutex lock; |
9119 |
unsigned int len; |
9120 |
u16 flag; |
9121 |
-@@ -317,7 +317,7 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) |
9122 |
+@@ -312,7 +312,7 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) |
9123 |
|
9124 |
#endif /* CONFIG_PCI_IOV */ |
9125 |
|
9126 |
@@ -75507,7 +75507,7 @@ index 79327cc..28fde3f 100644 |
9127 |
* Boxes that should not use MSI for PCIe PME signaling. |
9128 |
*/ |
9129 |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
9130 |
-index 300770c..552fc7e 100644 |
9131 |
+index d266d80..ada4895 100644 |
9132 |
--- a/drivers/pci/probe.c |
9133 |
+++ b/drivers/pci/probe.c |
9134 |
@@ -180,7 +180,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, |
9135 |
@@ -79116,7 +79116,7 @@ index a63542b..80692ee 100644 |
9136 |
snprintf(name, sizeof(name), "discovery_trace"); |
9137 |
vport->debug_disc_trc = |
9138 |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c |
9139 |
-index 734a042..5f4c380 100644 |
9140 |
+index f7e3f27..e77bed0 100644 |
9141 |
--- a/drivers/scsi/lpfc/lpfc_init.c |
9142 |
+++ b/drivers/scsi/lpfc/lpfc_init.c |
9143 |
@@ -11127,7 +11127,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev) |
9144 |
@@ -79128,7 +79128,7 @@ index 734a042..5f4c380 100644 |
9145 |
{ |
9146 |
struct Scsi_Host *shost = pci_get_drvdata(pdev); |
9147 |
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
9148 |
-@@ -11434,8 +11434,10 @@ lpfc_init(void) |
9149 |
+@@ -11435,8 +11435,10 @@ lpfc_init(void) |
9150 |
printk(KERN_ERR "Could not register lpfcmgmt device, " |
9151 |
"misc_register returned with status %d", error); |
9152 |
|
9153 |
@@ -79531,7 +79531,7 @@ index bea819e..fb745e0 100644 |
9154 |
scsi_qla_host_t *vha = pci_get_drvdata(pdev); |
9155 |
struct qla_hw_data *ha = vha->hw; |
9156 |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c |
9157 |
-index bff9689..8caa187 100644 |
9158 |
+index feab7ea..94d8a9c 100644 |
9159 |
--- a/drivers/scsi/qla2xxx/qla_target.c |
9160 |
+++ b/drivers/scsi/qla2xxx/qla_target.c |
9161 |
@@ -678,7 +678,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) |
9162 |
@@ -79554,7 +79554,7 @@ index bff9689..8caa187 100644 |
9163 |
struct qla_tgt *tgt = container_of(work, struct qla_tgt, |
9164 |
sess_del_work); |
9165 |
struct scsi_qla_host *vha = tgt->vha; |
9166 |
-@@ -5825,7 +5826,7 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, |
9167 |
+@@ -5831,7 +5832,7 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, |
9168 |
|
9169 |
retry: |
9170 |
global_resets = |
9171 |
@@ -79563,7 +79563,7 @@ index bff9689..8caa187 100644 |
9172 |
|
9173 |
rc = qla24xx_get_loop_id(vha, s_id, &loop_id); |
9174 |
if (rc != 0) { |
9175 |
-@@ -5864,12 +5865,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, |
9176 |
+@@ -5870,12 +5871,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, |
9177 |
} |
9178 |
|
9179 |
if (global_resets != |
9180 |
@@ -79578,7 +79578,7 @@ index bff9689..8caa187 100644 |
9181 |
qla_tgt->tgt_global_resets_count)); |
9182 |
goto retry; |
9183 |
} |
9184 |
-@@ -6080,8 +6081,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) |
9185 |
+@@ -6086,8 +6087,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) |
9186 |
init_waitqueue_head(&tgt->waitQ); |
9187 |
INIT_LIST_HEAD(&tgt->sess_list); |
9188 |
INIT_LIST_HEAD(&tgt->del_sess_list); |
9189 |
@@ -79588,7 +79588,7 @@ index bff9689..8caa187 100644 |
9190 |
spin_lock_init(&tgt->sess_work_lock); |
9191 |
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); |
9192 |
INIT_LIST_HEAD(&tgt->sess_works_list); |
9193 |
-@@ -6089,7 +6089,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) |
9194 |
+@@ -6095,7 +6095,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) |
9195 |
INIT_LIST_HEAD(&tgt->srr_ctio_list); |
9196 |
INIT_LIST_HEAD(&tgt->srr_imm_list); |
9197 |
INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); |
9198 |
@@ -84940,10 +84940,10 @@ index e8819aa..33d2176 100644 |
9199 |
if (share_irqs) |
9200 |
irqflag = IRQF_SHARED; |
9201 |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c |
9202 |
-index 4d09bd4..5c7839e 100644 |
9203 |
+index 6e3e636..9064253 100644 |
9204 |
--- a/drivers/tty/serial/8250/8250_pci.c |
9205 |
+++ b/drivers/tty/serial/8250/8250_pci.c |
9206 |
-@@ -5582,7 +5582,7 @@ static struct pci_device_id serial_pci_tbl[] = { |
9207 |
+@@ -5588,7 +5588,7 @@ static struct pci_device_id serial_pci_tbl[] = { |
9208 |
}; |
9209 |
|
9210 |
static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, |
9211 |
@@ -107692,7 +107692,7 @@ index cf68100..f96c5c0 100644 |
9212 |
err = ext4_handle_dirty_metadata(handle, NULL, bh); |
9213 |
if (unlikely(err)) |
9214 |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
9215 |
-index afe29ba..6032d48 100644 |
9216 |
+index 5fa9ba1..f4d4551 100644 |
9217 |
--- a/fs/ext4/super.c |
9218 |
+++ b/fs/ext4/super.c |
9219 |
@@ -994,10 +994,12 @@ static void init_once(void *foo) |
9220 |
@@ -148202,7 +148202,7 @@ index 4e2f3de..d50672d 100644 |
9221 |
list_for_each_entry(task, &cset->tasks, cg_list) { |
9222 |
if (count++ > MAX_TASKS_SHOWN_PER_CSS) |
9223 |
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c |
9224 |
-index 2bd6737..9b0ddd4 100644 |
9225 |
+index a57242e..da67bb2 100644 |
9226 |
--- a/kernel/cgroup_pids.c |
9227 |
+++ b/kernel/cgroup_pids.c |
9228 |
@@ -54,7 +54,7 @@ struct pids_cgroup { |
9229 |
@@ -148230,7 +148230,7 @@ index 2bd6737..9b0ddd4 100644 |
9230 |
- if (atomic64_inc_return(&pids->events_limit) == 1) { |
9231 |
+ if (atomic64_inc_return_unchecked(&pids->events_limit) == 1) { |
9232 |
pr_info("cgroup: fork rejected by pids controller in "); |
9233 |
- pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id)); |
9234 |
+ pr_cont_cgroup_path(css->cgroup); |
9235 |
pr_cont("\n"); |
9236 |
@@ -310,7 +310,7 @@ static int pids_events_show(struct seq_file *sf, void *v) |
9237 |
{ |
9238 |
@@ -148800,7 +148800,7 @@ index e9fdb52..cfb547d 100644 |
9239 |
new_table.data = &new_value; |
9240 |
ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos); |
9241 |
diff --git a/kernel/events/core.c b/kernel/events/core.c |
9242 |
-index 4b33231..e0edf1b 100644 |
9243 |
+index 07c0dc8..26e0271 100644 |
9244 |
--- a/kernel/events/core.c |
9245 |
+++ b/kernel/events/core.c |
9246 |
@@ -389,8 +389,15 @@ static struct srcu_struct pmus_srcu; |
9247 |
@@ -149740,7 +149740,7 @@ index ba8a015..37d2e1d 100644 |
9248 |
int threads = max_threads; |
9249 |
int min = MIN_THREADS; |
9250 |
diff --git a/kernel/futex.c b/kernel/futex.c |
9251 |
-index 38b68c2..1940ab9 100644 |
9252 |
+index 4c6b6e6..2f72a22 100644 |
9253 |
--- a/kernel/futex.c |
9254 |
+++ b/kernel/futex.c |
9255 |
@@ -210,7 +210,7 @@ struct futex_pi_state { |
9256 |
@@ -149773,7 +149773,7 @@ index 38b68c2..1940ab9 100644 |
9257 |
/* |
9258 |
* The futex address must be "naturally" aligned. |
9259 |
*/ |
9260 |
-@@ -3279,6 +3284,7 @@ static void __init futex_detect_cmpxchg(void) |
9261 |
+@@ -3283,6 +3288,7 @@ static void __init futex_detect_cmpxchg(void) |
9262 |
{ |
9263 |
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
9264 |
u32 curval; |
9265 |
@@ -149781,7 +149781,7 @@ index 38b68c2..1940ab9 100644 |
9266 |
|
9267 |
/* |
9268 |
* This will fail and we want it. Some arch implementations do |
9269 |
-@@ -3290,8 +3296,11 @@ static void __init futex_detect_cmpxchg(void) |
9270 |
+@@ -3294,8 +3300,11 @@ static void __init futex_detect_cmpxchg(void) |
9271 |
* implementation, the non-functional ones will return |
9272 |
* -ENOSYS. |
9273 |
*/ |
9274 |
@@ -160056,7 +160056,7 @@ index 1460e6a..154adc1f 100644 |
9275 |
|
9276 |
#ifdef CONFIG_HIBERNATION |
9277 |
diff --git a/mm/percpu.c b/mm/percpu.c |
9278 |
-index 2557143..19f5eca 100644 |
9279 |
+index f014ceb..9b37d31 100644 |
9280 |
--- a/mm/percpu.c |
9281 |
+++ b/mm/percpu.c |
9282 |
@@ -133,7 +133,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly; |
9283 |
@@ -160321,7 +160321,7 @@ index 9d32e1c..054adce 100644 |
9284 |
return -ENOMEM; |
9285 |
|
9286 |
diff --git a/mm/slab.c b/mm/slab.c |
9287 |
-index bd878f0..d96f2c6 100644 |
9288 |
+index 1f82d16..d9233f3 100644 |
9289 |
--- a/mm/slab.c |
9290 |
+++ b/mm/slab.c |
9291 |
@@ -116,6 +116,7 @@ |
9292 |
@@ -160569,7 +160569,7 @@ index bd878f0..d96f2c6 100644 |
9293 |
#endif /* CONFIG_HARDENED_USERCOPY */ |
9294 |
|
9295 |
diff --git a/mm/slab.h b/mm/slab.h |
9296 |
-index bc05fdc..ffe0dbc 100644 |
9297 |
+index ceb7d70..99ab7d7 100644 |
9298 |
--- a/mm/slab.h |
9299 |
+++ b/mm/slab.h |
9300 |
@@ -21,8 +21,10 @@ struct kmem_cache { |
9301 |
@@ -160653,7 +160653,7 @@ index bc05fdc..ffe0dbc 100644 |
9302 |
if (slab_equal_or_root(cachep, s)) |
9303 |
return cachep; |
9304 |
diff --git a/mm/slab_common.c b/mm/slab_common.c |
9305 |
-index 329b038..52e9e91 100644 |
9306 |
+index 5d2f24f..cb5d8a4 100644 |
9307 |
--- a/mm/slab_common.c |
9308 |
+++ b/mm/slab_common.c |
9309 |
@@ -25,11 +25,35 @@ |
9310 |
@@ -160839,7 +160839,7 @@ index 329b038..52e9e91 100644 |
9311 |
root_cache->ctor, memcg, root_cache); |
9312 |
/* |
9313 |
* If we could not create a memcg cache, do not complain, because |
9314 |
-@@ -718,8 +771,7 @@ void kmem_cache_destroy(struct kmem_cache *s) |
9315 |
+@@ -741,8 +794,7 @@ void kmem_cache_destroy(struct kmem_cache *s) |
9316 |
kasan_cache_destroy(s); |
9317 |
mutex_lock(&slab_mutex); |
9318 |
|
9319 |
@@ -160849,7 +160849,7 @@ index 329b038..52e9e91 100644 |
9320 |
goto out_unlock; |
9321 |
|
9322 |
err = shutdown_memcg_caches(s, &release, &need_rcu_barrier); |
9323 |
-@@ -770,13 +822,15 @@ bool slab_is_available(void) |
9324 |
+@@ -793,13 +845,15 @@ bool slab_is_available(void) |
9325 |
#ifndef CONFIG_SLOB |
9326 |
/* Create a cache during boot when no slab services are available yet */ |
9327 |
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, |
9328 |
@@ -160866,7 +160866,7 @@ index 329b038..52e9e91 100644 |
9329 |
|
9330 |
slab_init_memcg_params(s); |
9331 |
|
9332 |
-@@ -786,23 +840,29 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz |
9333 |
+@@ -809,23 +863,29 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz |
9334 |
panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n", |
9335 |
name, size, err); |
9336 |
|
9337 |
@@ -160901,7 +160901,7 @@ index 329b038..52e9e91 100644 |
9338 |
struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; |
9339 |
EXPORT_SYMBOL(kmalloc_caches); |
9340 |
|
9341 |
-@@ -811,6 +871,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; |
9342 |
+@@ -834,6 +894,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; |
9343 |
EXPORT_SYMBOL(kmalloc_dma_caches); |
9344 |
#endif |
9345 |
|
9346 |
@@ -160913,7 +160913,7 @@ index 329b038..52e9e91 100644 |
9347 |
/* |
9348 |
* Conversion table for small slabs sizes / 8 to the index in the |
9349 |
* kmalloc array. This is necessary for slabs < 192 since we have non power |
9350 |
-@@ -875,6 +940,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) |
9351 |
+@@ -898,6 +963,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) |
9352 |
return kmalloc_dma_caches[index]; |
9353 |
|
9354 |
#endif |
9355 |
@@ -160927,7 +160927,7 @@ index 329b038..52e9e91 100644 |
9356 |
return kmalloc_caches[index]; |
9357 |
} |
9358 |
|
9359 |
-@@ -952,8 +1024,8 @@ void __init setup_kmalloc_cache_index_table(void) |
9360 |
+@@ -975,8 +1047,8 @@ void __init setup_kmalloc_cache_index_table(void) |
9361 |
|
9362 |
static void __init new_kmalloc_cache(int idx, unsigned long flags) |
9363 |
{ |
9364 |
@@ -160938,7 +160938,7 @@ index 329b038..52e9e91 100644 |
9365 |
} |
9366 |
|
9367 |
/* |
9368 |
-@@ -998,6 +1070,23 @@ void __init create_kmalloc_caches(unsigned long flags) |
9369 |
+@@ -1021,6 +1093,23 @@ void __init create_kmalloc_caches(unsigned long flags) |
9370 |
} |
9371 |
} |
9372 |
#endif |
9373 |
@@ -160962,7 +160962,7 @@ index 329b038..52e9e91 100644 |
9374 |
} |
9375 |
#endif /* !CONFIG_SLOB */ |
9376 |
|
9377 |
-@@ -1013,6 +1102,12 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) |
9378 |
+@@ -1036,6 +1125,12 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) |
9379 |
|
9380 |
flags |= __GFP_COMP; |
9381 |
page = alloc_pages(flags, order); |
9382 |
@@ -160975,7 +160975,7 @@ index 329b038..52e9e91 100644 |
9383 |
ret = page ? page_address(page) : NULL; |
9384 |
kmemleak_alloc(ret, size, 1, flags); |
9385 |
kasan_kmalloc_large(ret, size, flags); |
9386 |
-@@ -1102,6 +1197,9 @@ static void print_slabinfo_header(struct seq_file *m) |
9387 |
+@@ -1125,6 +1220,9 @@ static void print_slabinfo_header(struct seq_file *m) |
9388 |
#ifdef CONFIG_DEBUG_SLAB |
9389 |
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); |
9390 |
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); |
9391 |
@@ -160985,7 +160985,7 @@ index 329b038..52e9e91 100644 |
9392 |
#endif |
9393 |
seq_putc(m, '\n'); |
9394 |
} |
9395 |
-@@ -1231,7 +1329,7 @@ static int __init slab_proc_init(void) |
9396 |
+@@ -1254,7 +1352,7 @@ static int __init slab_proc_init(void) |
9397 |
module_init(slab_proc_init); |
9398 |
#endif /* CONFIG_SLABINFO */ |
9399 |
|
9400 |
@@ -160995,7 +160995,7 @@ index 329b038..52e9e91 100644 |
9401 |
{ |
9402 |
void *ret; |
9403 |
diff --git a/mm/slob.c b/mm/slob.c |
9404 |
-index 5ec1580..eea07f2 100644 |
9405 |
+index eac04d43..73c02ba 100644 |
9406 |
--- a/mm/slob.c |
9407 |
+++ b/mm/slob.c |
9408 |
@@ -67,6 +67,7 @@ |
9409 |
@@ -161456,7 +161456,7 @@ index 5ec1580..eea07f2 100644 |
9410 |
EXPORT_SYMBOL(kmem_cache_free); |
9411 |
|
9412 |
diff --git a/mm/slub.c b/mm/slub.c |
9413 |
-index 7aa0e97..ca3813c 100644 |
9414 |
+index 58c7526..5566ff1 100644 |
9415 |
--- a/mm/slub.c |
9416 |
+++ b/mm/slub.c |
9417 |
@@ -34,6 +34,7 @@ |
9418 |
@@ -161663,7 +161663,7 @@ index 7aa0e97..ca3813c 100644 |
9419 |
page = virt_to_head_page(x); |
9420 |
if (unlikely(!PageSlab(page))) { |
9421 |
BUG_ON(!PageCompound(page)); |
9422 |
-@@ -4135,7 +4201,7 @@ void __init kmem_cache_init(void) |
9423 |
+@@ -4120,7 +4186,7 @@ void __init kmem_cache_init(void) |
9424 |
kmem_cache = &boot_kmem_cache; |
9425 |
|
9426 |
create_boot_cache(kmem_cache_node, "kmem_cache_node", |
9427 |
@@ -161672,7 +161672,7 @@ index 7aa0e97..ca3813c 100644 |
9428 |
|
9429 |
register_hotmemory_notifier(&slab_memory_callback_nb); |
9430 |
|
9431 |
-@@ -4145,7 +4211,7 @@ void __init kmem_cache_init(void) |
9432 |
+@@ -4130,7 +4196,7 @@ void __init kmem_cache_init(void) |
9433 |
create_boot_cache(kmem_cache, "kmem_cache", |
9434 |
offsetof(struct kmem_cache, node) + |
9435 |
nr_node_ids * sizeof(struct kmem_cache_node *), |
9436 |
@@ -161681,7 +161681,7 @@ index 7aa0e97..ca3813c 100644 |
9437 |
|
9438 |
kmem_cache = bootstrap(&boot_kmem_cache); |
9439 |
|
9440 |
-@@ -4184,7 +4250,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, |
9441 |
+@@ -4169,7 +4235,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, |
9442 |
|
9443 |
s = find_mergeable(size, align, flags, name, ctor); |
9444 |
if (s) { |
9445 |
@@ -161690,7 +161690,7 @@ index 7aa0e97..ca3813c 100644 |
9446 |
|
9447 |
/* |
9448 |
* Adjust the object sizes so that we clear |
9449 |
-@@ -4200,7 +4266,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, |
9450 |
+@@ -4185,7 +4251,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, |
9451 |
} |
9452 |
|
9453 |
if (sysfs_slab_alias(s, name)) { |
9454 |
@@ -161699,7 +161699,7 @@ index 7aa0e97..ca3813c 100644 |
9455 |
s = NULL; |
9456 |
} |
9457 |
} |
9458 |
-@@ -4212,6 +4278,8 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) |
9459 |
+@@ -4197,6 +4263,8 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) |
9460 |
{ |
9461 |
int err; |
9462 |
|
9463 |
@@ -161708,7 +161708,7 @@ index 7aa0e97..ca3813c 100644 |
9464 |
err = kmem_cache_open(s, flags); |
9465 |
if (err) |
9466 |
return err; |
9467 |
-@@ -4280,7 +4348,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
9468 |
+@@ -4265,7 +4333,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
9469 |
} |
9470 |
#endif |
9471 |
|
9472 |
@@ -161717,7 +161717,7 @@ index 7aa0e97..ca3813c 100644 |
9473 |
static int count_inuse(struct page *page) |
9474 |
{ |
9475 |
return page->inuse; |
9476 |
-@@ -4561,7 +4629,11 @@ static int list_locations(struct kmem_cache *s, char *buf, |
9477 |
+@@ -4546,7 +4614,11 @@ static int list_locations(struct kmem_cache *s, char *buf, |
9478 |
len += sprintf(buf + len, "%7ld ", l->count); |
9479 |
|
9480 |
if (l->addr) |
9481 |
@@ -161729,7 +161729,7 @@ index 7aa0e97..ca3813c 100644 |
9482 |
else |
9483 |
len += sprintf(buf + len, "<not-available>"); |
9484 |
|
9485 |
-@@ -4659,12 +4731,12 @@ static void __init resiliency_test(void) |
9486 |
+@@ -4644,12 +4716,12 @@ static void __init resiliency_test(void) |
9487 |
validate_slab_cache(kmalloc_caches[9]); |
9488 |
} |
9489 |
#else |
9490 |
@@ -161744,7 +161744,7 @@ index 7aa0e97..ca3813c 100644 |
9491 |
enum slab_stat_type { |
9492 |
SL_ALL, /* All slabs */ |
9493 |
SL_PARTIAL, /* Only partially allocated slabs */ |
9494 |
-@@ -4901,13 +4973,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf) |
9495 |
+@@ -4886,13 +4958,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf) |
9496 |
{ |
9497 |
if (!s->ctor) |
9498 |
return 0; |
9499 |
@@ -161763,7 +161763,7 @@ index 7aa0e97..ca3813c 100644 |
9500 |
} |
9501 |
SLAB_ATTR_RO(aliases); |
9502 |
|
9503 |
-@@ -4995,6 +5071,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) |
9504 |
+@@ -4980,6 +5056,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) |
9505 |
SLAB_ATTR_RO(cache_dma); |
9506 |
#endif |
9507 |
|
9508 |
@@ -161786,7 +161786,7 @@ index 7aa0e97..ca3813c 100644 |
9509 |
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) |
9510 |
{ |
9511 |
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); |
9512 |
-@@ -5050,7 +5142,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf, |
9513 |
+@@ -5035,7 +5127,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf, |
9514 |
* as well as cause other issues like converting a mergeable |
9515 |
* cache into an umergeable one. |
9516 |
*/ |
9517 |
@@ -161795,7 +161795,7 @@ index 7aa0e97..ca3813c 100644 |
9518 |
return -EINVAL; |
9519 |
|
9520 |
s->flags &= ~SLAB_TRACE; |
9521 |
-@@ -5168,7 +5260,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf) |
9522 |
+@@ -5153,7 +5245,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf) |
9523 |
static ssize_t failslab_store(struct kmem_cache *s, const char *buf, |
9524 |
size_t length) |
9525 |
{ |
9526 |
@@ -161804,7 +161804,7 @@ index 7aa0e97..ca3813c 100644 |
9527 |
return -EINVAL; |
9528 |
|
9529 |
s->flags &= ~SLAB_FAILSLAB; |
9530 |
-@@ -5300,7 +5392,7 @@ STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); |
9531 |
+@@ -5285,7 +5377,7 @@ STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); |
9532 |
STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); |
9533 |
#endif |
9534 |
|
9535 |
@@ -161813,7 +161813,7 @@ index 7aa0e97..ca3813c 100644 |
9536 |
&slab_size_attr.attr, |
9537 |
&object_size_attr.attr, |
9538 |
&objs_per_slab_attr.attr, |
9539 |
-@@ -5335,6 +5427,12 @@ static struct attribute *slab_attrs[] = { |
9540 |
+@@ -5320,6 +5412,12 @@ static struct attribute *slab_attrs[] = { |
9541 |
#ifdef CONFIG_ZONE_DMA |
9542 |
&cache_dma_attr.attr, |
9543 |
#endif |
9544 |
@@ -161826,7 +161826,7 @@ index 7aa0e97..ca3813c 100644 |
9545 |
#ifdef CONFIG_NUMA |
9546 |
&remote_node_defrag_ratio_attr.attr, |
9547 |
#endif |
9548 |
-@@ -5578,6 +5676,7 @@ static char *create_unique_id(struct kmem_cache *s) |
9549 |
+@@ -5563,6 +5661,7 @@ static char *create_unique_id(struct kmem_cache *s) |
9550 |
return name; |
9551 |
} |
9552 |
|
9553 |
@@ -161834,7 +161834,7 @@ index 7aa0e97..ca3813c 100644 |
9554 |
static int sysfs_slab_add(struct kmem_cache *s) |
9555 |
{ |
9556 |
int err; |
9557 |
-@@ -5649,6 +5748,7 @@ void sysfs_slab_remove(struct kmem_cache *s) |
9558 |
+@@ -5634,6 +5733,7 @@ void sysfs_slab_remove(struct kmem_cache *s) |
9559 |
kobject_del(&s->kobj); |
9560 |
kobject_put(&s->kobj); |
9561 |
} |
9562 |
@@ -161842,7 +161842,7 @@ index 7aa0e97..ca3813c 100644 |
9563 |
|
9564 |
/* |
9565 |
* Need to buffer aliases during bootup until sysfs becomes |
9566 |
-@@ -5662,6 +5762,7 @@ struct saved_alias { |
9567 |
+@@ -5647,6 +5747,7 @@ struct saved_alias { |
9568 |
|
9569 |
static struct saved_alias *alias_list; |
9570 |
|
9571 |
@@ -161850,7 +161850,7 @@ index 7aa0e97..ca3813c 100644 |
9572 |
static int sysfs_slab_alias(struct kmem_cache *s, const char *name) |
9573 |
{ |
9574 |
struct saved_alias *al; |
9575 |
-@@ -5684,6 +5785,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) |
9576 |
+@@ -5669,6 +5770,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) |
9577 |
alias_list = al; |
9578 |
return 0; |
9579 |
} |
9580 |
@@ -163534,10 +163534,10 @@ index 2f2cb5e..5b9d8c6 100644 |
9581 |
tty_port_close(&dev->port, tty, filp); |
9582 |
} |
9583 |
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c |
9584 |
-index 7fbdbae..89ac4a9 100644 |
9585 |
+index aa1df1a..0a9f1a9 100644 |
9586 |
--- a/net/bridge/br_netfilter_hooks.c |
9587 |
+++ b/net/bridge/br_netfilter_hooks.c |
9588 |
-@@ -980,13 +980,13 @@ static void __net_exit brnf_exit_net(struct net *net) |
9589 |
+@@ -959,13 +959,13 @@ static void __net_exit brnf_exit_net(struct net *net) |
9590 |
brnet->enabled = false; |
9591 |
} |
9592 |
|
9593 |
@@ -163912,10 +163912,10 @@ index b7de71f..808387d 100644 |
9594 |
|
9595 |
return err; |
9596 |
diff --git a/net/core/dev.c b/net/core/dev.c |
9597 |
-index 60b0a604..920cbea 100644 |
9598 |
+index 2e04fd1..723a3c6 100644 |
9599 |
--- a/net/core/dev.c |
9600 |
+++ b/net/core/dev.c |
9601 |
-@@ -2995,7 +2995,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device |
9602 |
+@@ -3022,7 +3022,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device |
9603 |
out_kfree_skb: |
9604 |
kfree_skb(skb); |
9605 |
out_null: |
9606 |
@@ -163924,7 +163924,7 @@ index 60b0a604..920cbea 100644 |
9607 |
return NULL; |
9608 |
} |
9609 |
|
9610 |
-@@ -3406,7 +3406,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) |
9611 |
+@@ -3433,7 +3433,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) |
9612 |
rc = -ENETDOWN; |
9613 |
rcu_read_unlock_bh(); |
9614 |
|
9615 |
@@ -163933,7 +163933,7 @@ index 60b0a604..920cbea 100644 |
9616 |
kfree_skb_list(skb); |
9617 |
return rc; |
9618 |
out: |
9619 |
-@@ -3759,7 +3759,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, |
9620 |
+@@ -3786,7 +3786,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, |
9621 |
|
9622 |
local_irq_restore(flags); |
9623 |
|
9624 |
@@ -163942,7 +163942,7 @@ index 60b0a604..920cbea 100644 |
9625 |
kfree_skb(skb); |
9626 |
return NET_RX_DROP; |
9627 |
} |
9628 |
-@@ -3836,7 +3836,7 @@ int netif_rx_ni(struct sk_buff *skb) |
9629 |
+@@ -3863,7 +3863,7 @@ int netif_rx_ni(struct sk_buff *skb) |
9630 |
} |
9631 |
EXPORT_SYMBOL(netif_rx_ni); |
9632 |
|
9633 |
@@ -163951,7 +163951,7 @@ index 60b0a604..920cbea 100644 |
9634 |
{ |
9635 |
struct softnet_data *sd = this_cpu_ptr(&softnet_data); |
9636 |
|
9637 |
-@@ -4203,9 +4203,9 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) |
9638 |
+@@ -4230,9 +4230,9 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) |
9639 |
} else { |
9640 |
drop: |
9641 |
if (!deliver_exact) |
9642 |
@@ -163963,7 +163963,7 @@ index 60b0a604..920cbea 100644 |
9643 |
kfree_skb(skb); |
9644 |
/* Jamal, now you will not able to escape explaining |
9645 |
* me how you were going to use this. :-) |
9646 |
-@@ -5192,7 +5192,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) |
9647 |
+@@ -5219,7 +5219,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) |
9648 |
return work; |
9649 |
} |
9650 |
|
9651 |
@@ -163972,7 +163972,7 @@ index 60b0a604..920cbea 100644 |
9652 |
{ |
9653 |
struct softnet_data *sd = this_cpu_ptr(&softnet_data); |
9654 |
unsigned long time_limit = jiffies + 2; |
9655 |
-@@ -7535,9 +7535,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, |
9656 |
+@@ -7562,9 +7562,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, |
9657 |
} else { |
9658 |
netdev_stats_to_stats64(storage, &dev->stats); |
9659 |
} |
9660 |
@@ -163985,7 +163985,7 @@ index 60b0a604..920cbea 100644 |
9661 |
return storage; |
9662 |
} |
9663 |
EXPORT_SYMBOL(dev_get_stats); |
9664 |
-@@ -8162,7 +8162,7 @@ static void __net_exit netdev_exit(struct net *net) |
9665 |
+@@ -8189,7 +8189,7 @@ static void __net_exit netdev_exit(struct net *net) |
9666 |
kfree(net->dev_index_head); |
9667 |
} |
9668 |
|
9669 |
@@ -163994,7 +163994,7 @@ index 60b0a604..920cbea 100644 |
9670 |
.init = netdev_init, |
9671 |
.exit = netdev_exit, |
9672 |
}; |
9673 |
-@@ -8262,7 +8262,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list) |
9674 |
+@@ -8289,7 +8289,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list) |
9675 |
rtnl_unlock(); |
9676 |
} |
9677 |
|
9678 |
@@ -164467,7 +164467,7 @@ index 2696aef..dbd5807 100644 |
9679 |
if (!err) |
9680 |
err = put_user(SCM_RIGHTS, &cm->cmsg_type); |
9681 |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
9682 |
-index 1e3e008..f3e4944 100644 |
9683 |
+index f0f462c..e5d59e8 100644 |
9684 |
--- a/net/core/skbuff.c |
9685 |
+++ b/net/core/skbuff.c |
9686 |
@@ -1047,7 +1047,8 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off) |
9687 |
@@ -165120,7 +165120,7 @@ index cb7176c..afd2c62 100644 |
9688 |
return NULL; |
9689 |
} |
9690 |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c |
9691 |
-index 21514324..fb6543d 100644 |
9692 |
+index 971b947..db7beb2 100644 |
9693 |
--- a/net/ipv4/af_inet.c |
9694 |
+++ b/net/ipv4/af_inet.c |
9695 |
@@ -1447,7 +1447,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) |
9696 |
@@ -165132,7 +165132,7 @@ index 21514324..fb6543d 100644 |
9697 |
#endif |
9698 |
return -EINVAL; |
9699 |
} |
9700 |
-@@ -1657,7 +1657,7 @@ static __net_exit void ipv4_mib_exit_net(struct net *net) |
9701 |
+@@ -1659,7 +1659,7 @@ static __net_exit void ipv4_mib_exit_net(struct net *net) |
9702 |
free_percpu(net->mib.tcp_statistics); |
9703 |
} |
9704 |
|
9705 |
@@ -165141,7 +165141,7 @@ index 21514324..fb6543d 100644 |
9706 |
.init = ipv4_mib_init_net, |
9707 |
.exit = ipv4_mib_exit_net, |
9708 |
}; |
9709 |
-@@ -1698,7 +1698,7 @@ static __net_exit void inet_exit_net(struct net *net) |
9710 |
+@@ -1700,7 +1700,7 @@ static __net_exit void inet_exit_net(struct net *net) |
9711 |
{ |
9712 |
} |
9713 |
|
9714 |
@@ -165883,7 +165883,7 @@ index ecbe5a7..8ae8a54 100644 |
9715 |
.exit = raw_exit_net, |
9716 |
}; |
9717 |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
9718 |
-index d851cae..5769b1a 100644 |
9719 |
+index 17e6fbf..fdb89dc 100644 |
9720 |
--- a/net/ipv4/route.c |
9721 |
+++ b/net/ipv4/route.c |
9722 |
@@ -232,7 +232,7 @@ static const struct seq_operations rt_cache_seq_ops = { |
9723 |
@@ -165931,7 +165931,7 @@ index d851cae..5769b1a 100644 |
9724 |
static u32 *ip_tstamps __read_mostly; |
9725 |
|
9726 |
/* In order to protect privacy, we add a perturbation to identifiers |
9727 |
-@@ -2777,34 +2777,34 @@ static struct ctl_table ipv4_route_flush_table[] = { |
9728 |
+@@ -2778,34 +2778,34 @@ static struct ctl_table ipv4_route_flush_table[] = { |
9729 |
.maxlen = sizeof(int), |
9730 |
.mode = 0200, |
9731 |
.proc_handler = ipv4_sysctl_rtcache_flush, |
9732 |
@@ -165974,7 +165974,7 @@ index d851cae..5769b1a 100644 |
9733 |
err_dup: |
9734 |
return -ENOMEM; |
9735 |
} |
9736 |
-@@ -2819,7 +2819,7 @@ static __net_exit void sysctl_route_net_exit(struct net *net) |
9737 |
+@@ -2820,7 +2820,7 @@ static __net_exit void sysctl_route_net_exit(struct net *net) |
9738 |
kfree(tbl); |
9739 |
} |
9740 |
|
9741 |
@@ -165983,7 +165983,7 @@ index d851cae..5769b1a 100644 |
9742 |
.init = sysctl_route_net_init, |
9743 |
.exit = sysctl_route_net_exit, |
9744 |
}; |
9745 |
-@@ -2827,14 +2827,14 @@ static __net_initdata struct pernet_operations sysctl_route_ops = { |
9746 |
+@@ -2828,14 +2828,14 @@ static __net_initdata struct pernet_operations sysctl_route_ops = { |
9747 |
|
9748 |
static __net_init int rt_genid_init(struct net *net) |
9749 |
{ |
9750 |
@@ -166001,7 +166001,7 @@ index d851cae..5769b1a 100644 |
9751 |
.init = rt_genid_init, |
9752 |
}; |
9753 |
|
9754 |
-@@ -2858,7 +2858,7 @@ static void __net_exit ipv4_inetpeer_exit(struct net *net) |
9755 |
+@@ -2859,7 +2859,7 @@ static void __net_exit ipv4_inetpeer_exit(struct net *net) |
9756 |
kfree(bp); |
9757 |
} |
9758 |
|
9759 |
@@ -166010,7 +166010,7 @@ index d851cae..5769b1a 100644 |
9760 |
.init = ipv4_inetpeer_init, |
9761 |
.exit = ipv4_inetpeer_exit, |
9762 |
}; |
9763 |
-@@ -2872,11 +2872,7 @@ int __init ip_rt_init(void) |
9764 |
+@@ -2873,11 +2873,7 @@ int __init ip_rt_init(void) |
9765 |
int rc = 0; |
9766 |
int cpu; |
9767 |
|
9768 |
@@ -166119,7 +166119,7 @@ index 80bc36b..d70d622 100644 |
9769 |
.exit = ipv4_sysctl_exit_net, |
9770 |
}; |
9771 |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
9772 |
-index c71d49c..306109a 100644 |
9773 |
+index ce42ded..9c93e33 100644 |
9774 |
--- a/net/ipv4/tcp_input.c |
9775 |
+++ b/net/ipv4/tcp_input.c |
9776 |
@@ -288,11 +288,13 @@ static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) |
9777 |
@@ -166177,9 +166177,9 @@ index c71d49c..306109a 100644 |
9778 |
- if (th->fin) |
9779 |
+ if (th->fin || th->urg || th->psh) |
9780 |
goto discard; |
9781 |
- if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) |
9782 |
- return 1; |
9783 |
-@@ -6235,7 +6239,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, |
9784 |
+ /* It is possible that we process SYN packets from backlog, |
9785 |
+ * so we need to make sure to disable BH right there. |
9786 |
+@@ -6241,7 +6245,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, |
9787 |
#if IS_ENABLED(CONFIG_IPV6) |
9788 |
ireq->pktopts = NULL; |
9789 |
#endif |
9790 |
@@ -166189,7 +166189,7 @@ index c71d49c..306109a 100644 |
9791 |
write_pnet(&ireq->ireq_net, sock_net(sk_listener)); |
9792 |
ireq->ireq_family = sk_listener->sk_family; |
9793 |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
9794 |
-index 2259114..2d5e8a0 100644 |
9795 |
+index 6988566..2b781b3 100644 |
9796 |
--- a/net/ipv4/tcp_ipv4.c |
9797 |
+++ b/net/ipv4/tcp_ipv4.c |
9798 |
@@ -87,6 +87,10 @@ |
9799 |
@@ -166203,7 +166203,7 @@ index 2259114..2d5e8a0 100644 |
9800 |
#ifdef CONFIG_TCP_MD5SIG |
9801 |
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, |
9802 |
__be32 daddr, __be32 saddr, const struct tcphdr *th); |
9803 |
-@@ -1427,6 +1431,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) |
9804 |
+@@ -1431,6 +1435,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) |
9805 |
return 0; |
9806 |
|
9807 |
reset: |
9808 |
@@ -166213,7 +166213,7 @@ index 2259114..2d5e8a0 100644 |
9809 |
tcp_v4_send_reset(rsk, skb); |
9810 |
discard: |
9811 |
kfree_skb(skb); |
9812 |
-@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb) |
9813 |
+@@ -1641,12 +1648,19 @@ int tcp_v4_rcv(struct sk_buff *skb) |
9814 |
lookup: |
9815 |
sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, |
9816 |
th->dest, &refcounted); |
9817 |
@@ -166236,7 +166236,7 @@ index 2259114..2d5e8a0 100644 |
9818 |
|
9819 |
if (sk->sk_state == TCP_NEW_SYN_RECV) { |
9820 |
struct request_sock *req = inet_reqsk(sk); |
9821 |
-@@ -1732,6 +1746,10 @@ int tcp_v4_rcv(struct sk_buff *skb) |
9822 |
+@@ -1736,6 +1750,10 @@ int tcp_v4_rcv(struct sk_buff *skb) |
9823 |
bad_packet: |
9824 |
__TCP_INC_STATS(net, TCP_MIB_INERRS); |
9825 |
} else { |
9826 |
@@ -166247,7 +166247,7 @@ index 2259114..2d5e8a0 100644 |
9827 |
tcp_v4_send_reset(NULL, skb); |
9828 |
} |
9829 |
|
9830 |
-@@ -2465,7 +2483,7 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) |
9831 |
+@@ -2469,7 +2487,7 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) |
9832 |
inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET); |
9833 |
} |
9834 |
|
9835 |
@@ -166310,7 +166310,7 @@ index f6c50af..1eb9aa5 100644 |
9836 |
cnt += width; |
9837 |
} |
9838 |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c |
9839 |
-index 3ea1cf8..9faf8d7 100644 |
9840 |
+index b1e65b3..fafad47 100644 |
9841 |
--- a/net/ipv4/tcp_timer.c |
9842 |
+++ b/net/ipv4/tcp_timer.c |
9843 |
@@ -22,6 +22,10 @@ |
9844 |
@@ -166767,7 +166767,7 @@ index 02761c9..530bd3e 100644 |
9845 |
{ |
9846 |
struct inet_hashinfo *hinfo = death_row->hashinfo; |
9847 |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
9848 |
-index ef54852..56699bb 100644 |
9849 |
+index 8c88a37..2885f6f 100644 |
9850 |
--- a/net/ipv6/ip6_fib.c |
9851 |
+++ b/net/ipv6/ip6_fib.c |
9852 |
@@ -98,9 +98,9 @@ static int fib6_new_sernum(struct net *net) |
9853 |
@@ -166847,7 +166847,7 @@ index f6ba452..b04707b 100644 |
9854 |
.maxtype = IFLA_IPTUN_MAX, |
9855 |
.policy = ip6_tnl_policy, |
9856 |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c |
9857 |
-index c299c1e..b5fd20d 100644 |
9858 |
+index 66c2b4b..0610be2 100644 |
9859 |
--- a/net/ipv6/ip6_vti.c |
9860 |
+++ b/net/ipv6/ip6_vti.c |
9861 |
@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) |
9862 |
@@ -166859,7 +166859,7 @@ index c299c1e..b5fd20d 100644 |
9863 |
|
9864 |
static int vti6_net_id __read_mostly; |
9865 |
struct vti6_net { |
9866 |
-@@ -1030,7 +1030,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = { |
9867 |
+@@ -1034,7 +1034,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = { |
9868 |
[IFLA_VTI_OKEY] = { .type = NLA_U32 }, |
9869 |
}; |
9870 |
|
9871 |
@@ -166868,7 +166868,7 @@ index c299c1e..b5fd20d 100644 |
9872 |
.kind = "vti6", |
9873 |
.maxtype = IFLA_VTI_MAX, |
9874 |
.policy = vti6_policy, |
9875 |
-@@ -1161,7 +1161,7 @@ static int vti6_device_event(struct notifier_block *unused, |
9876 |
+@@ -1165,7 +1165,7 @@ static int vti6_device_event(struct notifier_block *unused, |
9877 |
return NOTIFY_DONE; |
9878 |
} |
9879 |
|
9880 |
@@ -166951,7 +166951,7 @@ index 55aacea..482ad2e 100644 |
9881 |
|
9882 |
case IP6T_SO_GET_ENTRIES: |
9883 |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c |
9884 |
-index 9948b5c..95b3e7a 100644 |
9885 |
+index 986d4ca..f8a55a5 100644 |
9886 |
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c |
9887 |
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c |
9888 |
@@ -95,12 +95,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = { |
9889 |
@@ -167249,7 +167249,7 @@ index 69c50e7..ec875fa 100644 |
9890 |
struct ctl_table *ipv6_icmp_table; |
9891 |
int err; |
9892 |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
9893 |
-index 6673965..16ec3de 100644 |
9894 |
+index b2e61a0..bf47484 100644 |
9895 |
--- a/net/ipv6/tcp_ipv6.c |
9896 |
+++ b/net/ipv6/tcp_ipv6.c |
9897 |
@@ -101,6 +101,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) |
9898 |
@@ -167263,7 +167263,7 @@ index 6673965..16ec3de 100644 |
9899 |
static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) |
9900 |
{ |
9901 |
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, |
9902 |
-@@ -1302,6 +1306,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) |
9903 |
+@@ -1304,6 +1308,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) |
9904 |
return 0; |
9905 |
|
9906 |
reset: |
9907 |
@@ -167273,7 +167273,7 @@ index 6673965..16ec3de 100644 |
9908 |
tcp_v6_send_reset(sk, skb); |
9909 |
discard: |
9910 |
if (opt_skb) |
9911 |
-@@ -1406,12 +1413,20 @@ static int tcp_v6_rcv(struct sk_buff *skb) |
9912 |
+@@ -1408,12 +1415,20 @@ static int tcp_v6_rcv(struct sk_buff *skb) |
9913 |
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), |
9914 |
th->source, th->dest, inet6_iif(skb), |
9915 |
&refcounted); |
9916 |
@@ -167296,7 +167296,7 @@ index 6673965..16ec3de 100644 |
9917 |
|
9918 |
if (sk->sk_state == TCP_NEW_SYN_RECV) { |
9919 |
struct request_sock *req = inet_reqsk(sk); |
9920 |
-@@ -1501,6 +1516,10 @@ static int tcp_v6_rcv(struct sk_buff *skb) |
9921 |
+@@ -1503,6 +1518,10 @@ static int tcp_v6_rcv(struct sk_buff *skb) |
9922 |
bad_packet: |
9923 |
__TCP_INC_STATS(net, TCP_MIB_INERRS); |
9924 |
} else { |
9925 |
@@ -168259,7 +168259,7 @@ index 965f7e3..daa74100 100644 |
9926 |
} |
9927 |
|
9928 |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c |
9929 |
-index c0f0750..7f2e432 100644 |
9930 |
+index ff750bb..6e9865d 100644 |
9931 |
--- a/net/l2tp/l2tp_ip.c |
9932 |
+++ b/net/l2tp/l2tp_ip.c |
9933 |
@@ -641,7 +641,7 @@ static struct inet_protosw l2tp_ip_protosw = { |
9934 |
@@ -168695,7 +168695,7 @@ index 06019db..8b752f48 100644 |
9935 |
/* defaults per 802.15.4-2011 */ |
9936 |
wpan_dev->min_be = 3; |
9937 |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c |
9938 |
-index 5b77377..7bd5994 100644 |
9939 |
+index 1309e2c..6e543c6 100644 |
9940 |
--- a/net/mpls/af_mpls.c |
9941 |
+++ b/net/mpls/af_mpls.c |
9942 |
@@ -873,7 +873,7 @@ static int mpls_dev_sysctl_register(struct net_device *dev, |
9943 |
@@ -168707,16 +168707,16 @@ index 5b77377..7bd5994 100644 |
9944 |
int i; |
9945 |
|
9946 |
table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL); |
9947 |
-@@ -956,7 +956,7 @@ static void mpls_ifdown(struct net_device *dev, int event) |
9948 |
- /* fall through */ |
9949 |
+@@ -957,7 +957,7 @@ static void mpls_ifdown(struct net_device *dev, int event) |
9950 |
case NETDEV_CHANGE: |
9951 |
nh->nh_flags |= RTNH_F_LINKDOWN; |
9952 |
-- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; |
9953 |
-+ ACCESS_ONCE_RW(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; |
9954 |
+ if (event != NETDEV_UNREGISTER) |
9955 |
+- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; |
9956 |
++ ACCESS_ONCE_RW(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; |
9957 |
break; |
9958 |
} |
9959 |
if (event == NETDEV_UNREGISTER) |
9960 |
-@@ -994,7 +994,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int nh_flags) |
9961 |
+@@ -995,7 +995,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int nh_flags) |
9962 |
nh->nh_flags &= ~nh_flags; |
9963 |
} endfor_nexthops(rt); |
9964 |
|
9965 |
@@ -168725,7 +168725,7 @@ index 5b77377..7bd5994 100644 |
9966 |
} |
9967 |
} |
9968 |
|
9969 |
-@@ -1621,7 +1621,7 @@ static int mpls_platform_labels(struct ctl_table *table, int write, |
9970 |
+@@ -1622,7 +1622,7 @@ static int mpls_platform_labels(struct ctl_table *table, int write, |
9971 |
struct net *net = table->data; |
9972 |
int platform_labels = net->mpls.platform_labels; |
9973 |
int ret; |
9974 |
@@ -168734,7 +168734,7 @@ index 5b77377..7bd5994 100644 |
9975 |
.procname = table->procname, |
9976 |
.data = &platform_labels, |
9977 |
.maxlen = sizeof(int), |
9978 |
-@@ -1651,7 +1651,7 @@ static const struct ctl_table mpls_table[] = { |
9979 |
+@@ -1652,7 +1652,7 @@ static const struct ctl_table mpls_table[] = { |
9980 |
|
9981 |
static int mpls_net_init(struct net *net) |
9982 |
{ |
9983 |
@@ -169949,7 +169949,7 @@ index 7eb955e..479c9a6 100644 |
9984 |
|
9985 |
static int __init ovs_vxlan_tnl_init(void) |
9986 |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
9987 |
-index 34de326..071ac96 100644 |
9988 |
+index f2b04a7..44ba4de 100644 |
9989 |
--- a/net/packet/af_packet.c |
9990 |
+++ b/net/packet/af_packet.c |
9991 |
@@ -278,7 +278,7 @@ static int packet_direct_xmit(struct sk_buff *skb) |
9992 |
@@ -170004,7 +170004,7 @@ index 34de326..071ac96 100644 |
9993 |
spin_unlock(&sk->sk_receive_queue.lock); |
9994 |
|
9995 |
drop_n_restore: |
9996 |
-@@ -3867,7 +3867,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, |
9997 |
+@@ -3871,7 +3871,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, |
9998 |
case PACKET_HDRLEN: |
9999 |
if (len > sizeof(int)) |
10000 |
len = sizeof(int); |
10001 |
@@ -170013,7 +170013,7 @@ index 34de326..071ac96 100644 |
10002 |
return -EFAULT; |
10003 |
switch (val) { |
10004 |
case TPACKET_V1: |
10005 |
-@@ -3902,9 +3902,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, |
10006 |
+@@ -3906,9 +3906,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, |
10007 |
case PACKET_ROLLOVER_STATS: |
10008 |
if (!po->rollover) |
10009 |
return -EINVAL; |
10010 |
@@ -170026,7 +170026,7 @@ index 34de326..071ac96 100644 |
10011 |
data = &rstats; |
10012 |
lv = sizeof(rstats); |
10013 |
break; |
10014 |
-@@ -3922,7 +3922,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, |
10015 |
+@@ -3926,7 +3926,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, |
10016 |
len = lv; |
10017 |
if (put_user(len, optlen)) |
10018 |
return -EFAULT; |
10019 |
|
10020 |
diff --git a/4.9.16/4425_grsec_remove_EI_PAX.patch b/4.9.18/4425_grsec_remove_EI_PAX.patch |
10021 |
similarity index 100% |
10022 |
rename from 4.9.16/4425_grsec_remove_EI_PAX.patch |
10023 |
rename to 4.9.18/4425_grsec_remove_EI_PAX.patch |
10024 |
|
10025 |
diff --git a/4.9.16/4426_default_XATTR_PAX_FLAGS.patch b/4.9.18/4426_default_XATTR_PAX_FLAGS.patch |
10026 |
similarity index 100% |
10027 |
rename from 4.9.16/4426_default_XATTR_PAX_FLAGS.patch |
10028 |
rename to 4.9.18/4426_default_XATTR_PAX_FLAGS.patch |
10029 |
|
10030 |
diff --git a/4.9.16/4427_force_XATTR_PAX_tmpfs.patch b/4.9.18/4427_force_XATTR_PAX_tmpfs.patch |
10031 |
similarity index 100% |
10032 |
rename from 4.9.16/4427_force_XATTR_PAX_tmpfs.patch |
10033 |
rename to 4.9.18/4427_force_XATTR_PAX_tmpfs.patch |
10034 |
|
10035 |
diff --git a/4.9.16/4430_grsec-remove-localversion-grsec.patch b/4.9.18/4430_grsec-remove-localversion-grsec.patch |
10036 |
similarity index 100% |
10037 |
rename from 4.9.16/4430_grsec-remove-localversion-grsec.patch |
10038 |
rename to 4.9.18/4430_grsec-remove-localversion-grsec.patch |
10039 |
|
10040 |
diff --git a/4.9.16/4435_grsec-mute-warnings.patch b/4.9.18/4435_grsec-mute-warnings.patch |
10041 |
similarity index 100% |
10042 |
rename from 4.9.16/4435_grsec-mute-warnings.patch |
10043 |
rename to 4.9.18/4435_grsec-mute-warnings.patch |
10044 |
|
10045 |
diff --git a/4.9.16/4440_grsec-remove-protected-paths.patch b/4.9.18/4440_grsec-remove-protected-paths.patch |
10046 |
similarity index 100% |
10047 |
rename from 4.9.16/4440_grsec-remove-protected-paths.patch |
10048 |
rename to 4.9.18/4440_grsec-remove-protected-paths.patch |
10049 |
|
10050 |
diff --git a/4.9.16/4450_grsec-kconfig-default-gids.patch b/4.9.18/4450_grsec-kconfig-default-gids.patch |
10051 |
similarity index 100% |
10052 |
rename from 4.9.16/4450_grsec-kconfig-default-gids.patch |
10053 |
rename to 4.9.18/4450_grsec-kconfig-default-gids.patch |
10054 |
|
10055 |
diff --git a/4.9.16/4465_selinux-avc_audit-log-curr_ip.patch b/4.9.18/4465_selinux-avc_audit-log-curr_ip.patch |
10056 |
similarity index 100% |
10057 |
rename from 4.9.16/4465_selinux-avc_audit-log-curr_ip.patch |
10058 |
rename to 4.9.18/4465_selinux-avc_audit-log-curr_ip.patch |
10059 |
|
10060 |
diff --git a/4.9.16/4470_disable-compat_vdso.patch b/4.9.18/4470_disable-compat_vdso.patch |
10061 |
similarity index 100% |
10062 |
rename from 4.9.16/4470_disable-compat_vdso.patch |
10063 |
rename to 4.9.18/4470_disable-compat_vdso.patch |
10064 |
|
10065 |
diff --git a/4.9.16/4475_emutramp_default_on.patch b/4.9.18/4475_emutramp_default_on.patch |
10066 |
similarity index 100% |
10067 |
rename from 4.9.16/4475_emutramp_default_on.patch |
10068 |
rename to 4.9.18/4475_emutramp_default_on.patch |