1 |
commit: 4395dd51acf5698749593ea693441291af71e1de |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Tue Sep 10 11:14:46 2019 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Tue Sep 10 11:14:46 2019 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4395dd51 |
7 |
|
8 |
Linux patch 5.2.14 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 12 + |
13 |
1013_linux-5.2.14.patch | 3717 +++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 3729 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 374124c..6458e28 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -87,6 +87,18 @@ Patch: 1010_linux-5.2.11.patch |
21 |
From: https://www.kernel.org |
22 |
Desc: Linux 5.2.11 |
23 |
|
24 |
+Patch: 1011_linux-5.2.12.patch |
25 |
+From: https://www.kernel.org |
26 |
+Desc: Linux 5.2.12 |
27 |
+ |
28 |
+Patch: 1012_linux-5.2.13.patch |
29 |
+From: https://www.kernel.org |
30 |
+Desc: Linux 5.2.13 |
31 |
+ |
32 |
+Patch: 1013_linux-5.2.14.patch |
33 |
+From: https://www.kernel.org |
34 |
+Desc: Linux 5.2.14 |
35 |
+ |
36 |
Patch: 1500_XATTR_USER_PREFIX.patch |
37 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
38 |
Desc: Support for namespace user.pax.* on tmpfs. |
39 |
|
40 |
diff --git a/1013_linux-5.2.14.patch b/1013_linux-5.2.14.patch |
41 |
new file mode 100644 |
42 |
index 0000000..0c47490 |
43 |
--- /dev/null |
44 |
+++ b/1013_linux-5.2.14.patch |
45 |
@@ -0,0 +1,3717 @@ |
46 |
+diff --git a/Makefile b/Makefile |
47 |
+index 288284de8858..d019994462ba 100644 |
48 |
+--- a/Makefile |
49 |
++++ b/Makefile |
50 |
+@@ -1,7 +1,7 @@ |
51 |
+ # SPDX-License-Identifier: GPL-2.0 |
52 |
+ VERSION = 5 |
53 |
+ PATCHLEVEL = 2 |
54 |
+-SUBLEVEL = 13 |
55 |
++SUBLEVEL = 14 |
56 |
+ EXTRAVERSION = |
57 |
+ NAME = Bobtail Squid |
58 |
+ |
59 |
+diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c |
60 |
+index f8debf7aeb4c..76e1edf5bf12 100644 |
61 |
+--- a/arch/x86/boot/compressed/pgtable_64.c |
62 |
++++ b/arch/x86/boot/compressed/pgtable_64.c |
63 |
+@@ -73,6 +73,8 @@ static unsigned long find_trampoline_placement(void) |
64 |
+ |
65 |
+ /* Find the first usable memory region under bios_start. */ |
66 |
+ for (i = boot_params->e820_entries - 1; i >= 0; i--) { |
67 |
++ unsigned long new = bios_start; |
68 |
++ |
69 |
+ entry = &boot_params->e820_table[i]; |
70 |
+ |
71 |
+ /* Skip all entries above bios_start. */ |
72 |
+@@ -85,15 +87,20 @@ static unsigned long find_trampoline_placement(void) |
73 |
+ |
74 |
+ /* Adjust bios_start to the end of the entry if needed. */ |
75 |
+ if (bios_start > entry->addr + entry->size) |
76 |
+- bios_start = entry->addr + entry->size; |
77 |
++ new = entry->addr + entry->size; |
78 |
+ |
79 |
+ /* Keep bios_start page-aligned. */ |
80 |
+- bios_start = round_down(bios_start, PAGE_SIZE); |
81 |
++ new = round_down(new, PAGE_SIZE); |
82 |
+ |
83 |
+ /* Skip the entry if it's too small. */ |
84 |
+- if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr) |
85 |
++ if (new - TRAMPOLINE_32BIT_SIZE < entry->addr) |
86 |
+ continue; |
87 |
+ |
88 |
++ /* Protect against underflow. */ |
89 |
++ if (new - TRAMPOLINE_32BIT_SIZE > bios_start) |
90 |
++ break; |
91 |
++ |
92 |
++ bios_start = new; |
93 |
+ break; |
94 |
+ } |
95 |
+ |
96 |
+diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h |
97 |
+index b16a6c7da6eb..f497697aa15d 100644 |
98 |
+--- a/arch/x86/include/asm/bootparam_utils.h |
99 |
++++ b/arch/x86/include/asm/bootparam_utils.h |
100 |
+@@ -70,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params) |
101 |
+ BOOT_PARAM_PRESERVE(eddbuf_entries), |
102 |
+ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), |
103 |
+ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), |
104 |
++ BOOT_PARAM_PRESERVE(secure_boot), |
105 |
+ BOOT_PARAM_PRESERVE(hdr), |
106 |
+ BOOT_PARAM_PRESERVE(e820_table), |
107 |
+ BOOT_PARAM_PRESERVE(eddbuf), |
108 |
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c |
109 |
+index 97c3a1c9502e..2f067b443326 100644 |
110 |
+--- a/arch/x86/kernel/apic/apic.c |
111 |
++++ b/arch/x86/kernel/apic/apic.c |
112 |
+@@ -1152,10 +1152,6 @@ void clear_local_APIC(void) |
113 |
+ apic_write(APIC_LVT0, v | APIC_LVT_MASKED); |
114 |
+ v = apic_read(APIC_LVT1); |
115 |
+ apic_write(APIC_LVT1, v | APIC_LVT_MASKED); |
116 |
+- if (!x2apic_enabled()) { |
117 |
+- v = apic_read(APIC_LDR) & ~APIC_LDR_MASK; |
118 |
+- apic_write(APIC_LDR, v); |
119 |
+- } |
120 |
+ if (maxlvt >= 4) { |
121 |
+ v = apic_read(APIC_LVTPC); |
122 |
+ apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); |
123 |
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c |
124 |
+index aff1d22223bd..ee25e6ae1a09 100644 |
125 |
+--- a/drivers/bluetooth/btqca.c |
126 |
++++ b/drivers/bluetooth/btqca.c |
127 |
+@@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev) |
128 |
+ return 0; |
129 |
+ } |
130 |
+ |
131 |
++int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) |
132 |
++{ |
133 |
++ struct sk_buff *skb; |
134 |
++ int err; |
135 |
++ |
136 |
++ bt_dev_dbg(hdev, "QCA pre shutdown cmd"); |
137 |
++ |
138 |
++ skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0, |
139 |
++ NULL, HCI_INIT_TIMEOUT); |
140 |
++ if (IS_ERR(skb)) { |
141 |
++ err = PTR_ERR(skb); |
142 |
++ bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err); |
143 |
++ return err; |
144 |
++ } |
145 |
++ |
146 |
++ kfree_skb(skb); |
147 |
++ |
148 |
++ return 0; |
149 |
++} |
150 |
++EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd); |
151 |
++ |
152 |
+ static void qca_tlv_check_data(struct rome_config *config, |
153 |
+ const struct firmware *fw) |
154 |
+ { |
155 |
+@@ -350,6 +371,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, |
156 |
+ return err; |
157 |
+ } |
158 |
+ |
159 |
++ /* Give the controller some time to get ready to receive the NVM */ |
160 |
++ msleep(10); |
161 |
++ |
162 |
+ /* Download NVM configuration */ |
163 |
+ config.type = TLV_TYPE_NVM; |
164 |
+ if (qca_is_wcn399x(soc_type)) |
165 |
+diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h |
166 |
+index e9c999959603..f2a9e576a86c 100644 |
167 |
+--- a/drivers/bluetooth/btqca.h |
168 |
++++ b/drivers/bluetooth/btqca.h |
169 |
+@@ -13,6 +13,7 @@ |
170 |
+ #define EDL_PATCH_TLV_REQ_CMD (0x1E) |
171 |
+ #define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) |
172 |
+ #define MAX_SIZE_PER_TLV_SEGMENT (243) |
173 |
++#define QCA_PRE_SHUTDOWN_CMD (0xFC08) |
174 |
+ |
175 |
+ #define EDL_CMD_REQ_RES_EVT (0x00) |
176 |
+ #define EDL_PATCH_VER_RES_EVT (0x19) |
177 |
+@@ -130,6 +131,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, |
178 |
+ enum qca_btsoc_type soc_type, u32 soc_ver); |
179 |
+ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); |
180 |
+ int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); |
181 |
++int qca_send_pre_shutdown_cmd(struct hci_dev *hdev); |
182 |
+ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) |
183 |
+ { |
184 |
+ return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998; |
185 |
+@@ -161,4 +163,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) |
186 |
+ { |
187 |
+ return false; |
188 |
+ } |
189 |
++ |
190 |
++static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) |
191 |
++{ |
192 |
++ return -EOPNOTSUPP; |
193 |
++} |
194 |
+ #endif |
195 |
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c |
196 |
+index f41fb2c02e4f..d88b024eaf56 100644 |
197 |
+--- a/drivers/bluetooth/hci_qca.c |
198 |
++++ b/drivers/bluetooth/hci_qca.c |
199 |
+@@ -1319,6 +1319,9 @@ static int qca_power_off(struct hci_dev *hdev) |
200 |
+ { |
201 |
+ struct hci_uart *hu = hci_get_drvdata(hdev); |
202 |
+ |
203 |
++ /* Perform pre shutdown command */ |
204 |
++ qca_send_pre_shutdown_cmd(hdev); |
205 |
++ |
206 |
+ qca_power_shutdown(hu); |
207 |
+ return 0; |
208 |
+ } |
209 |
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c |
210 |
+index 87b410d6e51d..3a4961dc5831 100644 |
211 |
+--- a/drivers/clk/clk.c |
212 |
++++ b/drivers/clk/clk.c |
213 |
+@@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name) |
214 |
+ return NULL; |
215 |
+ } |
216 |
+ |
217 |
++#ifdef CONFIG_OF |
218 |
++static int of_parse_clkspec(const struct device_node *np, int index, |
219 |
++ const char *name, struct of_phandle_args *out_args); |
220 |
++static struct clk_hw * |
221 |
++of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); |
222 |
++#else |
223 |
++static inline int of_parse_clkspec(const struct device_node *np, int index, |
224 |
++ const char *name, |
225 |
++ struct of_phandle_args *out_args) |
226 |
++{ |
227 |
++ return -ENOENT; |
228 |
++} |
229 |
++static inline struct clk_hw * |
230 |
++of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) |
231 |
++{ |
232 |
++ return ERR_PTR(-ENOENT); |
233 |
++} |
234 |
++#endif |
235 |
++ |
236 |
+ /** |
237 |
+ * clk_core_get - Find the clk_core parent of a clk |
238 |
+ * @core: clk to find parent of |
239 |
+@@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name) |
240 |
+ * }; |
241 |
+ * |
242 |
+ * Returns: -ENOENT when the provider can't be found or the clk doesn't |
243 |
+- * exist in the provider. -EINVAL when the name can't be found. NULL when the |
244 |
+- * provider knows about the clk but it isn't provided on this system. |
245 |
++ * exist in the provider or the name can't be found in the DT node or |
246 |
++ * in a clkdev lookup. NULL when the provider knows about the clk but it |
247 |
++ * isn't provided on this system. |
248 |
+ * A valid clk_core pointer when the clk can be found in the provider. |
249 |
+ */ |
250 |
+ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) |
251 |
+@@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) |
252 |
+ struct device *dev = core->dev; |
253 |
+ const char *dev_id = dev ? dev_name(dev) : NULL; |
254 |
+ struct device_node *np = core->of_node; |
255 |
++ struct of_phandle_args clkspec; |
256 |
+ |
257 |
+- if (np && (name || index >= 0)) |
258 |
+- hw = of_clk_get_hw(np, index, name); |
259 |
+- |
260 |
+- /* |
261 |
+- * If the DT search above couldn't find the provider or the provider |
262 |
+- * didn't know about this clk, fallback to looking up via clkdev based |
263 |
+- * clk_lookups |
264 |
+- */ |
265 |
+- if (PTR_ERR(hw) == -ENOENT && name) |
266 |
++ if (np && (name || index >= 0) && |
267 |
++ !of_parse_clkspec(np, index, name, &clkspec)) { |
268 |
++ hw = of_clk_get_hw_from_clkspec(&clkspec); |
269 |
++ of_node_put(clkspec.np); |
270 |
++ } else if (name) { |
271 |
++ /* |
272 |
++ * If the DT search above couldn't find the provider fallback to |
273 |
++ * looking up via clkdev based clk_lookups. |
274 |
++ */ |
275 |
+ hw = clk_find_hw(dev_id, name); |
276 |
++ } |
277 |
+ |
278 |
+ if (IS_ERR(hw)) |
279 |
+ return ERR_CAST(hw); |
280 |
+@@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index) |
281 |
+ parent = ERR_PTR(-EPROBE_DEFER); |
282 |
+ } else { |
283 |
+ parent = clk_core_get(core, index); |
284 |
+- if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT) |
285 |
++ if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name) |
286 |
+ parent = clk_core_lookup(entry->name); |
287 |
+ } |
288 |
+ |
289 |
+@@ -1635,7 +1657,8 @@ static int clk_fetch_parent_index(struct clk_core *core, |
290 |
+ break; |
291 |
+ |
292 |
+ /* Fallback to comparing globally unique names */ |
293 |
+- if (!strcmp(parent->name, core->parents[i].name)) |
294 |
++ if (core->parents[i].name && |
295 |
++ !strcmp(parent->name, core->parents[i].name)) |
296 |
+ break; |
297 |
+ } |
298 |
+ |
299 |
+diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c |
300 |
+index 91db7894125d..65c82d922b05 100644 |
301 |
+--- a/drivers/clk/samsung/clk-exynos5-subcmu.c |
302 |
++++ b/drivers/clk/samsung/clk-exynos5-subcmu.c |
303 |
+@@ -14,7 +14,7 @@ |
304 |
+ #include "clk-exynos5-subcmu.h" |
305 |
+ |
306 |
+ static struct samsung_clk_provider *ctx; |
307 |
+-static const struct exynos5_subcmu_info *cmu; |
308 |
++static const struct exynos5_subcmu_info **cmu; |
309 |
+ static int nr_cmus; |
310 |
+ |
311 |
+ static void exynos5_subcmu_clk_save(void __iomem *base, |
312 |
+@@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx, |
313 |
+ * when OF-core populates all device-tree nodes. |
314 |
+ */ |
315 |
+ void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus, |
316 |
+- const struct exynos5_subcmu_info *_cmu) |
317 |
++ const struct exynos5_subcmu_info **_cmu) |
318 |
+ { |
319 |
+ ctx = _ctx; |
320 |
+ cmu = _cmu; |
321 |
+ nr_cmus = _nr_cmus; |
322 |
+ |
323 |
+ for (; _nr_cmus--; _cmu++) { |
324 |
+- exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks, |
325 |
+- _cmu->nr_gate_clks); |
326 |
+- exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs, |
327 |
+- _cmu->nr_suspend_regs); |
328 |
++ exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks, |
329 |
++ (*_cmu)->nr_gate_clks); |
330 |
++ exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs, |
331 |
++ (*_cmu)->nr_suspend_regs); |
332 |
+ } |
333 |
+ } |
334 |
+ |
335 |
+@@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev) |
336 |
+ if (of_property_read_string(np, "label", &name) < 0) |
337 |
+ continue; |
338 |
+ for (i = 0; i < nr_cmus; i++) |
339 |
+- if (strcmp(cmu[i].pd_name, name) == 0) |
340 |
++ if (strcmp(cmu[i]->pd_name, name) == 0) |
341 |
+ exynos5_clk_register_subcmu(&pdev->dev, |
342 |
+- &cmu[i], np); |
343 |
++ cmu[i], np); |
344 |
+ } |
345 |
+ return 0; |
346 |
+ } |
347 |
+diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h |
348 |
+index 755ee8aaa3de..9ae5356f25aa 100644 |
349 |
+--- a/drivers/clk/samsung/clk-exynos5-subcmu.h |
350 |
++++ b/drivers/clk/samsung/clk-exynos5-subcmu.h |
351 |
+@@ -21,6 +21,6 @@ struct exynos5_subcmu_info { |
352 |
+ }; |
353 |
+ |
354 |
+ void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus, |
355 |
+- const struct exynos5_subcmu_info *cmu); |
356 |
++ const struct exynos5_subcmu_info **cmu); |
357 |
+ |
358 |
+ #endif |
359 |
+diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c |
360 |
+index f2b896881768..931c70a4da19 100644 |
361 |
+--- a/drivers/clk/samsung/clk-exynos5250.c |
362 |
++++ b/drivers/clk/samsung/clk-exynos5250.c |
363 |
+@@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = { |
364 |
+ .pd_name = "DISP1", |
365 |
+ }; |
366 |
+ |
367 |
++static const struct exynos5_subcmu_info *exynos5250_subcmus[] = { |
368 |
++ &exynos5250_disp_subcmu, |
369 |
++}; |
370 |
++ |
371 |
+ static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = { |
372 |
+ /* sorted in descending order */ |
373 |
+ /* PLL_36XX_RATE(rate, m, p, s, k) */ |
374 |
+@@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np) |
375 |
+ |
376 |
+ samsung_clk_sleep_init(reg_base, exynos5250_clk_regs, |
377 |
+ ARRAY_SIZE(exynos5250_clk_regs)); |
378 |
+- exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu); |
379 |
++ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus), |
380 |
++ exynos5250_subcmus); |
381 |
+ |
382 |
+ samsung_clk_of_add_provider(np, ctx); |
383 |
+ |
384 |
+diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c |
385 |
+index 12d800fd9528..893697e00d2a 100644 |
386 |
+--- a/drivers/clk/samsung/clk-exynos5420.c |
387 |
++++ b/drivers/clk/samsung/clk-exynos5420.c |
388 |
+@@ -524,8 +524,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { |
389 |
+ GATE_BUS_TOP, 24, 0, 0), |
390 |
+ GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", |
391 |
+ GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), |
392 |
+- GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll", |
393 |
+- SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), |
394 |
+ }; |
395 |
+ |
396 |
+ static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { |
397 |
+@@ -567,8 +565,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = { |
398 |
+ |
399 |
+ static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { |
400 |
+ GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), |
401 |
++ /* Maudio Block */ |
402 |
+ GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", |
403 |
+ SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), |
404 |
++ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", |
405 |
++ GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), |
406 |
++ GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", |
407 |
++ GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), |
408 |
+ }; |
409 |
+ |
410 |
+ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { |
411 |
+@@ -867,9 +870,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = { |
412 |
+ /* GSCL Block */ |
413 |
+ DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2), |
414 |
+ |
415 |
+- /* MSCL Block */ |
416 |
+- DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2), |
417 |
+- |
418 |
+ /* PSGEN */ |
419 |
+ DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1), |
420 |
+ DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1), |
421 |
+@@ -994,12 +994,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { |
422 |
+ GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1", |
423 |
+ GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0), |
424 |
+ |
425 |
+- /* Maudio Block */ |
426 |
+- GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", |
427 |
+- GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), |
428 |
+- GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", |
429 |
+- GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), |
430 |
+- |
431 |
+ /* FSYS Block */ |
432 |
+ GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0), |
433 |
+ GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0), |
434 |
+@@ -1139,17 +1133,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { |
435 |
+ GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", |
436 |
+ GATE_IP_GSCL1, 17, 0, 0), |
437 |
+ |
438 |
+- /* MSCL Block */ |
439 |
+- GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0), |
440 |
+- GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0), |
441 |
+- GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0), |
442 |
+- GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk", |
443 |
+- GATE_IP_MSCL, 8, 0, 0), |
444 |
+- GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk", |
445 |
+- GATE_IP_MSCL, 9, 0, 0), |
446 |
+- GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk", |
447 |
+- GATE_IP_MSCL, 10, 0, 0), |
448 |
+- |
449 |
+ /* ISP */ |
450 |
+ GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp", |
451 |
+ GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0), |
452 |
+@@ -1232,32 +1215,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = { |
453 |
+ { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ |
454 |
+ }; |
455 |
+ |
456 |
+-static const struct exynos5_subcmu_info exynos5x_subcmus[] = { |
457 |
+- { |
458 |
+- .div_clks = exynos5x_disp_div_clks, |
459 |
+- .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), |
460 |
+- .gate_clks = exynos5x_disp_gate_clks, |
461 |
+- .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), |
462 |
+- .suspend_regs = exynos5x_disp_suspend_regs, |
463 |
+- .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), |
464 |
+- .pd_name = "DISP", |
465 |
+- }, { |
466 |
+- .div_clks = exynos5x_gsc_div_clks, |
467 |
+- .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), |
468 |
+- .gate_clks = exynos5x_gsc_gate_clks, |
469 |
+- .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), |
470 |
+- .suspend_regs = exynos5x_gsc_suspend_regs, |
471 |
+- .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), |
472 |
+- .pd_name = "GSC", |
473 |
+- }, { |
474 |
+- .div_clks = exynos5x_mfc_div_clks, |
475 |
+- .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), |
476 |
+- .gate_clks = exynos5x_mfc_gate_clks, |
477 |
+- .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), |
478 |
+- .suspend_regs = exynos5x_mfc_suspend_regs, |
479 |
+- .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), |
480 |
+- .pd_name = "MFC", |
481 |
+- }, |
482 |
++static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = { |
483 |
++ /* MSCL Block */ |
484 |
++ GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0), |
485 |
++ GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0), |
486 |
++ GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0), |
487 |
++ GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk", |
488 |
++ GATE_IP_MSCL, 8, 0, 0), |
489 |
++ GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk", |
490 |
++ GATE_IP_MSCL, 9, 0, 0), |
491 |
++ GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk", |
492 |
++ GATE_IP_MSCL, 10, 0, 0), |
493 |
++}; |
494 |
++ |
495 |
++static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = { |
496 |
++ DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2), |
497 |
++}; |
498 |
++ |
499 |
++static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = { |
500 |
++ { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */ |
501 |
++ { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */ |
502 |
++ { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */ |
503 |
++}; |
504 |
++ |
505 |
++static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = { |
506 |
++ GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll", |
507 |
++ SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), |
508 |
++ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", |
509 |
++ GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), |
510 |
++ GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", |
511 |
++ GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), |
512 |
++}; |
513 |
++ |
514 |
++static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = { |
515 |
++ { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */ |
516 |
++}; |
517 |
++ |
518 |
++static const struct exynos5_subcmu_info exynos5x_disp_subcmu = { |
519 |
++ .div_clks = exynos5x_disp_div_clks, |
520 |
++ .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), |
521 |
++ .gate_clks = exynos5x_disp_gate_clks, |
522 |
++ .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), |
523 |
++ .suspend_regs = exynos5x_disp_suspend_regs, |
524 |
++ .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), |
525 |
++ .pd_name = "DISP", |
526 |
++}; |
527 |
++ |
528 |
++static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = { |
529 |
++ .div_clks = exynos5x_gsc_div_clks, |
530 |
++ .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), |
531 |
++ .gate_clks = exynos5x_gsc_gate_clks, |
532 |
++ .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), |
533 |
++ .suspend_regs = exynos5x_gsc_suspend_regs, |
534 |
++ .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), |
535 |
++ .pd_name = "GSC", |
536 |
++}; |
537 |
++ |
538 |
++static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = { |
539 |
++ .div_clks = exynos5x_mfc_div_clks, |
540 |
++ .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), |
541 |
++ .gate_clks = exynos5x_mfc_gate_clks, |
542 |
++ .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), |
543 |
++ .suspend_regs = exynos5x_mfc_suspend_regs, |
544 |
++ .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), |
545 |
++ .pd_name = "MFC", |
546 |
++}; |
547 |
++ |
548 |
++static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = { |
549 |
++ .div_clks = exynos5x_mscl_div_clks, |
550 |
++ .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks), |
551 |
++ .gate_clks = exynos5x_mscl_gate_clks, |
552 |
++ .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks), |
553 |
++ .suspend_regs = exynos5x_mscl_suspend_regs, |
554 |
++ .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs), |
555 |
++ .pd_name = "MSC", |
556 |
++}; |
557 |
++ |
558 |
++static const struct exynos5_subcmu_info exynos5800_mau_subcmu = { |
559 |
++ .gate_clks = exynos5800_mau_gate_clks, |
560 |
++ .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks), |
561 |
++ .suspend_regs = exynos5800_mau_suspend_regs, |
562 |
++ .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs), |
563 |
++ .pd_name = "MAU", |
564 |
++}; |
565 |
++ |
566 |
++static const struct exynos5_subcmu_info *exynos5x_subcmus[] = { |
567 |
++ &exynos5x_disp_subcmu, |
568 |
++ &exynos5x_gsc_subcmu, |
569 |
++ &exynos5x_mfc_subcmu, |
570 |
++ &exynos5x_mscl_subcmu, |
571 |
++}; |
572 |
++ |
573 |
++static const struct exynos5_subcmu_info *exynos5800_subcmus[] = { |
574 |
++ &exynos5x_disp_subcmu, |
575 |
++ &exynos5x_gsc_subcmu, |
576 |
++ &exynos5x_mfc_subcmu, |
577 |
++ &exynos5x_mscl_subcmu, |
578 |
++ &exynos5800_mau_subcmu, |
579 |
+ }; |
580 |
+ |
581 |
+ static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = { |
582 |
+@@ -1475,11 +1529,17 @@ static void __init exynos5x_clk_init(struct device_node *np, |
583 |
+ samsung_clk_extended_sleep_init(reg_base, |
584 |
+ exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs), |
585 |
+ exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc)); |
586 |
+- if (soc == EXYNOS5800) |
587 |
++ |
588 |
++ if (soc == EXYNOS5800) { |
589 |
+ samsung_clk_sleep_init(reg_base, exynos5800_clk_regs, |
590 |
+ ARRAY_SIZE(exynos5800_clk_regs)); |
591 |
+- exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), |
592 |
+- exynos5x_subcmus); |
593 |
++ |
594 |
++ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus), |
595 |
++ exynos5800_subcmus); |
596 |
++ } else { |
597 |
++ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), |
598 |
++ exynos5x_subcmus); |
599 |
++ } |
600 |
+ |
601 |
+ samsung_clk_of_add_provider(np, ctx); |
602 |
+ } |
603 |
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c |
604 |
+index 7f9f75201138..f272b5143997 100644 |
605 |
+--- a/drivers/gpio/gpiolib.c |
606 |
++++ b/drivers/gpio/gpiolib.c |
607 |
+@@ -1373,21 +1373,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, |
608 |
+ if (status) |
609 |
+ goto err_remove_from_list; |
610 |
+ |
611 |
+- status = gpiochip_irqchip_init_valid_mask(chip); |
612 |
+- if (status) |
613 |
+- goto err_remove_from_list; |
614 |
+- |
615 |
+ status = gpiochip_alloc_valid_mask(chip); |
616 |
+ if (status) |
617 |
+- goto err_remove_irqchip_mask; |
618 |
+- |
619 |
+- status = gpiochip_add_irqchip(chip, lock_key, request_key); |
620 |
+- if (status) |
621 |
+- goto err_free_gpiochip_mask; |
622 |
++ goto err_remove_from_list; |
623 |
+ |
624 |
+ status = of_gpiochip_add(chip); |
625 |
+ if (status) |
626 |
+- goto err_remove_chip; |
627 |
++ goto err_free_gpiochip_mask; |
628 |
+ |
629 |
+ status = gpiochip_init_valid_mask(chip); |
630 |
+ if (status) |
631 |
+@@ -1413,6 +1405,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, |
632 |
+ |
633 |
+ machine_gpiochip_add(chip); |
634 |
+ |
635 |
++ status = gpiochip_irqchip_init_valid_mask(chip); |
636 |
++ if (status) |
637 |
++ goto err_remove_acpi_chip; |
638 |
++ |
639 |
++ status = gpiochip_add_irqchip(chip, lock_key, request_key); |
640 |
++ if (status) |
641 |
++ goto err_remove_irqchip_mask; |
642 |
++ |
643 |
+ /* |
644 |
+ * By first adding the chardev, and then adding the device, |
645 |
+ * we get a device node entry in sysfs under |
646 |
+@@ -1424,21 +1424,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, |
647 |
+ if (gpiolib_initialized) { |
648 |
+ status = gpiochip_setup_dev(gdev); |
649 |
+ if (status) |
650 |
+- goto err_remove_acpi_chip; |
651 |
++ goto err_remove_irqchip; |
652 |
+ } |
653 |
+ return 0; |
654 |
+ |
655 |
++err_remove_irqchip: |
656 |
++ gpiochip_irqchip_remove(chip); |
657 |
++err_remove_irqchip_mask: |
658 |
++ gpiochip_irqchip_free_valid_mask(chip); |
659 |
+ err_remove_acpi_chip: |
660 |
+ acpi_gpiochip_remove(chip); |
661 |
+ err_remove_of_chip: |
662 |
+ gpiochip_free_hogs(chip); |
663 |
+ of_gpiochip_remove(chip); |
664 |
+-err_remove_chip: |
665 |
+- gpiochip_irqchip_remove(chip); |
666 |
+ err_free_gpiochip_mask: |
667 |
+ gpiochip_free_valid_mask(chip); |
668 |
+-err_remove_irqchip_mask: |
669 |
+- gpiochip_irqchip_free_valid_mask(chip); |
670 |
+ err_remove_from_list: |
671 |
+ spin_lock_irqsave(&gpio_lock, flags); |
672 |
+ list_del(&gdev->list); |
673 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |
674 |
+index fe028561dc0e..bc40d6eabce7 100644 |
675 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |
676 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |
677 |
+@@ -1192,6 +1192,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, |
678 |
+ num_deps = chunk->length_dw * 4 / |
679 |
+ sizeof(struct drm_amdgpu_cs_chunk_sem); |
680 |
+ |
681 |
++ if (p->post_deps) |
682 |
++ return -EINVAL; |
683 |
++ |
684 |
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), |
685 |
+ GFP_KERNEL); |
686 |
+ p->num_post_deps = 0; |
687 |
+@@ -1215,8 +1218,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, |
688 |
+ |
689 |
+ |
690 |
+ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, |
691 |
+- struct amdgpu_cs_chunk |
692 |
+- *chunk) |
693 |
++ struct amdgpu_cs_chunk *chunk) |
694 |
+ { |
695 |
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; |
696 |
+ unsigned num_deps; |
697 |
+@@ -1226,6 +1228,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p |
698 |
+ num_deps = chunk->length_dw * 4 / |
699 |
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj); |
700 |
+ |
701 |
++ if (p->post_deps) |
702 |
++ return -EINVAL; |
703 |
++ |
704 |
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), |
705 |
+ GFP_KERNEL); |
706 |
+ p->num_post_deps = 0; |
707 |
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c |
708 |
+index 95fdbd0fbcac..c021d4c8324f 100644 |
709 |
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c |
710 |
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c |
711 |
+@@ -213,6 +213,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) |
712 |
+ struct mtk_drm_private *private = drm->dev_private; |
713 |
+ struct platform_device *pdev; |
714 |
+ struct device_node *np; |
715 |
++ struct device *dma_dev; |
716 |
+ int ret; |
717 |
+ |
718 |
+ if (!iommu_present(&platform_bus_type)) |
719 |
+@@ -275,7 +276,29 @@ static int mtk_drm_kms_init(struct drm_device *drm) |
720 |
+ goto err_component_unbind; |
721 |
+ } |
722 |
+ |
723 |
+- private->dma_dev = &pdev->dev; |
724 |
++ dma_dev = &pdev->dev; |
725 |
++ private->dma_dev = dma_dev; |
726 |
++ |
727 |
++ /* |
728 |
++ * Configure the DMA segment size to make sure we get contiguous IOVA |
729 |
++ * when importing PRIME buffers. |
730 |
++ */ |
731 |
++ if (!dma_dev->dma_parms) { |
732 |
++ private->dma_parms_allocated = true; |
733 |
++ dma_dev->dma_parms = |
734 |
++ devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms), |
735 |
++ GFP_KERNEL); |
736 |
++ } |
737 |
++ if (!dma_dev->dma_parms) { |
738 |
++ ret = -ENOMEM; |
739 |
++ goto err_component_unbind; |
740 |
++ } |
741 |
++ |
742 |
++ ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32)); |
743 |
++ if (ret) { |
744 |
++ dev_err(dma_dev, "Failed to set DMA segment size\n"); |
745 |
++ goto err_unset_dma_parms; |
746 |
++ } |
747 |
+ |
748 |
+ /* |
749 |
+ * We don't use the drm_irq_install() helpers provided by the DRM |
750 |
+@@ -285,13 +308,16 @@ static int mtk_drm_kms_init(struct drm_device *drm) |
751 |
+ drm->irq_enabled = true; |
752 |
+ ret = drm_vblank_init(drm, MAX_CRTC); |
753 |
+ if (ret < 0) |
754 |
+- goto err_component_unbind; |
755 |
++ goto err_unset_dma_parms; |
756 |
+ |
757 |
+ drm_kms_helper_poll_init(drm); |
758 |
+ drm_mode_config_reset(drm); |
759 |
+ |
760 |
+ return 0; |
761 |
+ |
762 |
++err_unset_dma_parms: |
763 |
++ if (private->dma_parms_allocated) |
764 |
++ dma_dev->dma_parms = NULL; |
765 |
+ err_component_unbind: |
766 |
+ component_unbind_all(drm->dev, drm); |
767 |
+ err_config_cleanup: |
768 |
+@@ -302,9 +328,14 @@ err_config_cleanup: |
769 |
+ |
770 |
+ static void mtk_drm_kms_deinit(struct drm_device *drm) |
771 |
+ { |
772 |
++ struct mtk_drm_private *private = drm->dev_private; |
773 |
++ |
774 |
+ drm_kms_helper_poll_fini(drm); |
775 |
+ drm_atomic_helper_shutdown(drm); |
776 |
+ |
777 |
++ if (private->dma_parms_allocated) |
778 |
++ private->dma_dev->dma_parms = NULL; |
779 |
++ |
780 |
+ component_unbind_all(drm->dev, drm); |
781 |
+ drm_mode_config_cleanup(drm); |
782 |
+ } |
783 |
+@@ -320,6 +351,18 @@ static const struct file_operations mtk_drm_fops = { |
784 |
+ .compat_ioctl = drm_compat_ioctl, |
785 |
+ }; |
786 |
+ |
787 |
++/* |
788 |
++ * We need to override this because the device used to import the memory is |
789 |
++ * not dev->dev, as drm_gem_prime_import() expects. |
790 |
++ */ |
791 |
++struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, |
792 |
++ struct dma_buf *dma_buf) |
793 |
++{ |
794 |
++ struct mtk_drm_private *private = dev->dev_private; |
795 |
++ |
796 |
++ return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev); |
797 |
++} |
798 |
++ |
799 |
+ static struct drm_driver mtk_drm_driver = { |
800 |
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | |
801 |
+ DRIVER_ATOMIC, |
802 |
+@@ -331,7 +374,7 @@ static struct drm_driver mtk_drm_driver = { |
803 |
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
804 |
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
805 |
+ .gem_prime_export = drm_gem_prime_export, |
806 |
+- .gem_prime_import = drm_gem_prime_import, |
807 |
++ .gem_prime_import = mtk_drm_gem_prime_import, |
808 |
+ .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, |
809 |
+ .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, |
810 |
+ .gem_prime_mmap = mtk_drm_gem_mmap_buf, |
811 |
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h |
812 |
+index 598ff3e70446..e03fea12ff59 100644 |
813 |
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h |
814 |
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h |
815 |
+@@ -51,6 +51,8 @@ struct mtk_drm_private { |
816 |
+ } commit; |
817 |
+ |
818 |
+ struct drm_atomic_state *suspend_state; |
819 |
++ |
820 |
++ bool dma_parms_allocated; |
821 |
+ }; |
822 |
+ |
823 |
+ extern struct platform_driver mtk_ddp_driver; |
824 |
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c |
825 |
+index 8bbe3d0cbe5d..8fd44407a0df 100644 |
826 |
+--- a/drivers/hid/hid-cp2112.c |
827 |
++++ b/drivers/hid/hid-cp2112.c |
828 |
+@@ -1152,8 +1152,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) |
829 |
+ |
830 |
+ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); |
831 |
+ |
832 |
+- cp2112_gpio_direction_input(gc, d->hwirq); |
833 |
+- |
834 |
+ if (!dev->gpio_poll) { |
835 |
+ dev->gpio_poll = true; |
836 |
+ schedule_delayed_work(&dev->gpio_poll_worker, 0); |
837 |
+@@ -1201,6 +1199,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev, |
838 |
+ return PTR_ERR(dev->desc[pin]); |
839 |
+ } |
840 |
+ |
841 |
++ ret = cp2112_gpio_direction_input(&dev->gc, pin); |
842 |
++ if (ret < 0) { |
843 |
++ dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n"); |
844 |
++ goto err_desc; |
845 |
++ } |
846 |
++ |
847 |
+ ret = gpiochip_lock_as_irq(&dev->gc, pin); |
848 |
+ if (ret) { |
849 |
+ dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); |
850 |
+diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h |
851 |
+index 1065692f90e2..5792a104000a 100644 |
852 |
+--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h |
853 |
++++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h |
854 |
+@@ -24,6 +24,7 @@ |
855 |
+ #define ICL_MOBILE_DEVICE_ID 0x34FC |
856 |
+ #define SPT_H_DEVICE_ID 0xA135 |
857 |
+ #define CML_LP_DEVICE_ID 0x02FC |
858 |
++#define EHL_Ax_DEVICE_ID 0x4BB3 |
859 |
+ |
860 |
+ #define REVISION_ID_CHT_A0 0x6 |
861 |
+ #define REVISION_ID_CHT_Ax_SI 0x0 |
862 |
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c |
863 |
+index 17ae49fba920..8cce3cfe28e0 100644 |
864 |
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c |
865 |
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c |
866 |
+@@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = { |
867 |
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, |
868 |
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, |
869 |
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, |
870 |
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)}, |
871 |
+ {0, } |
872 |
+ }; |
873 |
+ MODULE_DEVICE_TABLE(pci, ish_pci_tbl); |
874 |
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c |
875 |
+index 19f1730a4f24..a68d0ccf67a4 100644 |
876 |
+--- a/drivers/infiniband/core/cma.c |
877 |
++++ b/drivers/infiniband/core/cma.c |
878 |
+@@ -4724,10 +4724,14 @@ static int __init cma_init(void) |
879 |
+ if (ret) |
880 |
+ goto err; |
881 |
+ |
882 |
+- cma_configfs_init(); |
883 |
++ ret = cma_configfs_init(); |
884 |
++ if (ret) |
885 |
++ goto err_ib; |
886 |
+ |
887 |
+ return 0; |
888 |
+ |
889 |
++err_ib: |
890 |
++ ib_unregister_client(&cma_client); |
891 |
+ err: |
892 |
+ unregister_netdevice_notifier(&cma_nb); |
893 |
+ ib_sa_unregister_client(&sa_client); |
894 |
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c |
895 |
+index 48b04d2f175f..60c8f76aab33 100644 |
896 |
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c |
897 |
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c |
898 |
+@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, |
899 |
+ spin_unlock_irqrestore(&cmdq->lock, flags); |
900 |
+ return -EBUSY; |
901 |
+ } |
902 |
++ |
903 |
++ size = req->cmd_size; |
904 |
++ /* change the cmd_size to the number of 16byte cmdq unit. |
905 |
++ * req->cmd_size is modified here |
906 |
++ */ |
907 |
++ bnxt_qplib_set_cmd_slots(req); |
908 |
++ |
909 |
+ memset(resp, 0, sizeof(*resp)); |
910 |
+ crsqe->resp = (struct creq_qp_event *)resp; |
911 |
+ crsqe->resp->cookie = req->cookie; |
912 |
+@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, |
913 |
+ |
914 |
+ cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; |
915 |
+ preq = (u8 *)req; |
916 |
+- size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; |
917 |
+ do { |
918 |
+ /* Locate the next cmdq slot */ |
919 |
+ sw_prod = HWQ_CMP(cmdq->prod, cmdq); |
920 |
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h |
921 |
+index 2138533bb642..dfeadc192e17 100644 |
922 |
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h |
923 |
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h |
924 |
+@@ -55,9 +55,7 @@ |
925 |
+ do { \ |
926 |
+ memset(&(req), 0, sizeof((req))); \ |
927 |
+ (req).opcode = CMDQ_BASE_OPCODE_##CMD; \ |
928 |
+- (req).cmd_size = (sizeof((req)) + \ |
929 |
+- BNXT_QPLIB_CMDQE_UNITS - 1) / \ |
930 |
+- BNXT_QPLIB_CMDQE_UNITS; \ |
931 |
++ (req).cmd_size = sizeof((req)); \ |
932 |
+ (req).flags = cpu_to_le16(cmd_flags); \ |
933 |
+ } while (0) |
934 |
+ |
935 |
+@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth) |
936 |
+ BNXT_QPLIB_CMDQE_UNITS); |
937 |
+ } |
938 |
+ |
939 |
++/* Set the cmd_size to a factor of CMDQE unit */ |
940 |
++static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req) |
941 |
++{ |
942 |
++ req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) / |
943 |
++ BNXT_QPLIB_CMDQE_UNITS; |
944 |
++} |
945 |
++ |
946 |
+ #define MAX_CMDQ_IDX(depth) ((depth) - 1) |
947 |
+ |
948 |
+ static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth) |
949 |
+diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c |
950 |
+index 93613e5def9b..986c12153e62 100644 |
951 |
+--- a/drivers/infiniband/hw/hfi1/fault.c |
952 |
++++ b/drivers/infiniband/hw/hfi1/fault.c |
953 |
+@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, |
954 |
+ if (!data) |
955 |
+ return -ENOMEM; |
956 |
+ copy = min(len, datalen - 1); |
957 |
+- if (copy_from_user(data, buf, copy)) |
958 |
+- return -EFAULT; |
959 |
++ if (copy_from_user(data, buf, copy)) { |
960 |
++ ret = -EFAULT; |
961 |
++ goto free_data; |
962 |
++ } |
963 |
+ |
964 |
+ ret = debugfs_file_get(file->f_path.dentry); |
965 |
+ if (unlikely(ret)) |
966 |
+- return ret; |
967 |
++ goto free_data; |
968 |
+ ptr = data; |
969 |
+ token = ptr; |
970 |
+ for (ptr = data; *ptr; ptr = end + 1, token = ptr) { |
971 |
+@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, |
972 |
+ ret = len; |
973 |
+ |
974 |
+ debugfs_file_put(file->f_path.dentry); |
975 |
++free_data: |
976 |
+ kfree(data); |
977 |
+ return ret; |
978 |
+ } |
979 |
+@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, |
980 |
+ return -ENOMEM; |
981 |
+ ret = debugfs_file_get(file->f_path.dentry); |
982 |
+ if (unlikely(ret)) |
983 |
+- return ret; |
984 |
++ goto free_data; |
985 |
+ bit = find_first_bit(fault->opcodes, bitsize); |
986 |
+ while (bit < bitsize) { |
987 |
+ zero = find_next_zero_bit(fault->opcodes, bitsize, bit); |
988 |
+@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, |
989 |
+ data[size - 1] = '\n'; |
990 |
+ data[size] = '\0'; |
991 |
+ ret = simple_read_from_buffer(buf, len, pos, data, size); |
992 |
++free_data: |
993 |
+ kfree(data); |
994 |
+ return ret; |
995 |
+ } |
996 |
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c |
997 |
+index 68c951491a08..57079110af9b 100644 |
998 |
+--- a/drivers/infiniband/hw/mlx4/mad.c |
999 |
++++ b/drivers/infiniband/hw/mlx4/mad.c |
1000 |
+@@ -1677,8 +1677,6 @@ tx_err: |
1001 |
+ tx_buf_size, DMA_TO_DEVICE); |
1002 |
+ kfree(tun_qp->tx_ring[i].buf.addr); |
1003 |
+ } |
1004 |
+- kfree(tun_qp->tx_ring); |
1005 |
+- tun_qp->tx_ring = NULL; |
1006 |
+ i = MLX4_NUM_TUNNEL_BUFS; |
1007 |
+ err: |
1008 |
+ while (i > 0) { |
1009 |
+@@ -1687,6 +1685,8 @@ err: |
1010 |
+ rx_buf_size, DMA_FROM_DEVICE); |
1011 |
+ kfree(tun_qp->ring[i].addr); |
1012 |
+ } |
1013 |
++ kfree(tun_qp->tx_ring); |
1014 |
++ tun_qp->tx_ring = NULL; |
1015 |
+ kfree(tun_qp->ring); |
1016 |
+ tun_qp->ring = NULL; |
1017 |
+ return -ENOMEM; |
1018 |
+diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c |
1019 |
+index 8e457e50f837..770e36d0c66f 100644 |
1020 |
+--- a/drivers/input/serio/hyperv-keyboard.c |
1021 |
++++ b/drivers/input/serio/hyperv-keyboard.c |
1022 |
+@@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev, |
1023 |
+ |
1024 |
+ static void hv_kbd_on_channel_callback(void *context) |
1025 |
+ { |
1026 |
++ struct vmpacket_descriptor *desc; |
1027 |
+ struct hv_device *hv_dev = context; |
1028 |
+- void *buffer; |
1029 |
+- int bufferlen = 0x100; /* Start with sensible size */ |
1030 |
+ u32 bytes_recvd; |
1031 |
+ u64 req_id; |
1032 |
+- int error; |
1033 |
+ |
1034 |
+- buffer = kmalloc(bufferlen, GFP_ATOMIC); |
1035 |
+- if (!buffer) |
1036 |
+- return; |
1037 |
+- |
1038 |
+- while (1) { |
1039 |
+- error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen, |
1040 |
+- &bytes_recvd, &req_id); |
1041 |
+- switch (error) { |
1042 |
+- case 0: |
1043 |
+- if (bytes_recvd == 0) { |
1044 |
+- kfree(buffer); |
1045 |
+- return; |
1046 |
+- } |
1047 |
+- |
1048 |
+- hv_kbd_handle_received_packet(hv_dev, buffer, |
1049 |
+- bytes_recvd, req_id); |
1050 |
+- break; |
1051 |
++ foreach_vmbus_pkt(desc, hv_dev->channel) { |
1052 |
++ bytes_recvd = desc->len8 * 8; |
1053 |
++ req_id = desc->trans_id; |
1054 |
+ |
1055 |
+- case -ENOBUFS: |
1056 |
+- kfree(buffer); |
1057 |
+- /* Handle large packet */ |
1058 |
+- bufferlen = bytes_recvd; |
1059 |
+- buffer = kmalloc(bytes_recvd, GFP_ATOMIC); |
1060 |
+- if (!buffer) |
1061 |
+- return; |
1062 |
+- break; |
1063 |
+- } |
1064 |
++ hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd, |
1065 |
++ req_id); |
1066 |
+ } |
1067 |
+ } |
1068 |
+ |
1069 |
+diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c |
1070 |
+index 74e4364bc9fb..09113b9ad679 100644 |
1071 |
+--- a/drivers/mmc/core/mmc_ops.c |
1072 |
++++ b/drivers/mmc/core/mmc_ops.c |
1073 |
+@@ -564,7 +564,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, |
1074 |
+ if (index == EXT_CSD_SANITIZE_START) |
1075 |
+ cmd.sanitize_busy = true; |
1076 |
+ |
1077 |
+- err = mmc_wait_for_cmd(host, &cmd, 0); |
1078 |
++ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); |
1079 |
+ if (err) |
1080 |
+ goto out; |
1081 |
+ |
1082 |
+diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c |
1083 |
+index 73632b843749..b821c9e1604c 100644 |
1084 |
+--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c |
1085 |
++++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c |
1086 |
+@@ -10,7 +10,7 @@ |
1087 |
+ |
1088 |
+ #include "cavium_ptp.h" |
1089 |
+ |
1090 |
+-#define DRV_NAME "Cavium PTP Driver" |
1091 |
++#define DRV_NAME "cavium_ptp" |
1092 |
+ |
1093 |
+ #define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C |
1094 |
+ #define PCI_DEVICE_ID_CAVIUM_RST 0xA00E |
1095 |
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c |
1096 |
+index fcf20a8f92d9..6a823710987d 100644 |
1097 |
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c |
1098 |
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c |
1099 |
+@@ -239,8 +239,10 @@ int octeon_setup_iq(struct octeon_device *oct, |
1100 |
+ } |
1101 |
+ |
1102 |
+ oct->num_iqs++; |
1103 |
+- if (oct->fn_list.enable_io_queues(oct)) |
1104 |
++ if (oct->fn_list.enable_io_queues(oct)) { |
1105 |
++ octeon_delete_instr_queue(oct, iq_no); |
1106 |
+ return 1; |
1107 |
++ } |
1108 |
+ |
1109 |
+ return 0; |
1110 |
+ } |
1111 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c |
1112 |
+index 02959035ed3f..d692251ee252 100644 |
1113 |
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c |
1114 |
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c |
1115 |
+@@ -3236,8 +3236,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf, |
1116 |
+ return -ENOMEM; |
1117 |
+ |
1118 |
+ err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); |
1119 |
+- if (err) |
1120 |
++ if (err) { |
1121 |
++ kvfree(t); |
1122 |
+ return err; |
1123 |
++ } |
1124 |
+ |
1125 |
+ bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); |
1126 |
+ kvfree(t); |
1127 |
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c |
1128 |
+index d654c234aaf7..c5be4ebd8437 100644 |
1129 |
+--- a/drivers/net/ethernet/ibm/ibmveth.c |
1130 |
++++ b/drivers/net/ethernet/ibm/ibmveth.c |
1131 |
+@@ -1605,7 +1605,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
1132 |
+ struct net_device *netdev; |
1133 |
+ struct ibmveth_adapter *adapter; |
1134 |
+ unsigned char *mac_addr_p; |
1135 |
+- unsigned int *mcastFilterSize_p; |
1136 |
++ __be32 *mcastFilterSize_p; |
1137 |
+ long ret; |
1138 |
+ unsigned long ret_attr; |
1139 |
+ |
1140 |
+@@ -1627,8 +1627,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
1141 |
+ return -EINVAL; |
1142 |
+ } |
1143 |
+ |
1144 |
+- mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, |
1145 |
+- VETH_MCAST_FILTER_SIZE, NULL); |
1146 |
++ mcastFilterSize_p = (__be32 *)vio_get_attribute(dev, |
1147 |
++ VETH_MCAST_FILTER_SIZE, |
1148 |
++ NULL); |
1149 |
+ if (!mcastFilterSize_p) { |
1150 |
+ dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " |
1151 |
+ "attribute\n"); |
1152 |
+@@ -1645,7 +1646,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
1153 |
+ |
1154 |
+ adapter->vdev = dev; |
1155 |
+ adapter->netdev = netdev; |
1156 |
+- adapter->mcastFilterSize = *mcastFilterSize_p; |
1157 |
++ adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); |
1158 |
+ adapter->pool_config = 0; |
1159 |
+ |
1160 |
+ netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); |
1161 |
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c |
1162 |
+index 3da680073265..cebd20f3128d 100644 |
1163 |
+--- a/drivers/net/ethernet/ibm/ibmvnic.c |
1164 |
++++ b/drivers/net/ethernet/ibm/ibmvnic.c |
1165 |
+@@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) |
1166 |
+ lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], |
1167 |
+ (u64)tx_buff->indir_dma, |
1168 |
+ (u64)num_entries); |
1169 |
++ dma_unmap_single(dev, tx_buff->indir_dma, |
1170 |
++ sizeof(tx_buff->indir_arr), DMA_TO_DEVICE); |
1171 |
+ } else { |
1172 |
+ tx_buff->num_entries = num_entries; |
1173 |
+ lpar_rc = send_subcrq(adapter, handle_array[queue_num], |
1174 |
+@@ -2788,7 +2790,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, |
1175 |
+ union sub_crq *next; |
1176 |
+ int index; |
1177 |
+ int i, j; |
1178 |
+- u8 *first; |
1179 |
+ |
1180 |
+ restart_loop: |
1181 |
+ while (pending_scrq(adapter, scrq)) { |
1182 |
+@@ -2818,14 +2819,6 @@ restart_loop: |
1183 |
+ |
1184 |
+ txbuff->data_dma[j] = 0; |
1185 |
+ } |
1186 |
+- /* if sub_crq was sent indirectly */ |
1187 |
+- first = &txbuff->indir_arr[0].generic.first; |
1188 |
+- if (*first == IBMVNIC_CRQ_CMD) { |
1189 |
+- dma_unmap_single(dev, txbuff->indir_dma, |
1190 |
+- sizeof(txbuff->indir_arr), |
1191 |
+- DMA_TO_DEVICE); |
1192 |
+- *first = 0; |
1193 |
+- } |
1194 |
+ |
1195 |
+ if (txbuff->last_frag) { |
1196 |
+ dev_kfree_skb_any(txbuff->skb); |
1197 |
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
1198 |
+index 57fd9ee6de66..f7c049559c1a 100644 |
1199 |
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
1200 |
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
1201 |
+@@ -7893,11 +7893,8 @@ static void ixgbe_service_task(struct work_struct *work) |
1202 |
+ return; |
1203 |
+ } |
1204 |
+ if (ixgbe_check_fw_error(adapter)) { |
1205 |
+- if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
1206 |
+- rtnl_lock(); |
1207 |
++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1208 |
+ unregister_netdev(adapter->netdev); |
1209 |
+- rtnl_unlock(); |
1210 |
+- } |
1211 |
+ ixgbe_service_event_complete(adapter); |
1212 |
+ return; |
1213 |
+ } |
1214 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c |
1215 |
+index c1caf14bc334..c7f86453c638 100644 |
1216 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c |
1217 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c |
1218 |
+@@ -80,17 +80,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) |
1219 |
+ if (err) { |
1220 |
+ netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", |
1221 |
+ sq->sqn, err); |
1222 |
+- return err; |
1223 |
++ goto out; |
1224 |
+ } |
1225 |
+ |
1226 |
+ if (state != MLX5_SQC_STATE_ERR) |
1227 |
+- return 0; |
1228 |
++ goto out; |
1229 |
+ |
1230 |
+ mlx5e_tx_disable_queue(sq->txq); |
1231 |
+ |
1232 |
+ err = mlx5e_wait_for_sq_flush(sq); |
1233 |
+ if (err) |
1234 |
+- return err; |
1235 |
++ goto out; |
1236 |
+ |
1237 |
+ /* At this point, no new packets will arrive from the stack as TXQ is |
1238 |
+ * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all |
1239 |
+@@ -99,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) |
1240 |
+ |
1241 |
+ err = mlx5e_sq_to_ready(sq, state); |
1242 |
+ if (err) |
1243 |
+- return err; |
1244 |
++ goto out; |
1245 |
+ |
1246 |
+ mlx5e_reset_txqsq_cc_pc(sq); |
1247 |
+ sq->stats->recover++; |
1248 |
++ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); |
1249 |
+ mlx5e_activate_txqsq(sq); |
1250 |
+ |
1251 |
+ return 0; |
1252 |
++out: |
1253 |
++ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); |
1254 |
++ return err; |
1255 |
+ } |
1256 |
+ |
1257 |
+ static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, |
1258 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
1259 |
+index 882d26b8095d..bbdfdaf06391 100644 |
1260 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
1261 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
1262 |
+@@ -1279,7 +1279,6 @@ err_free_txqsq: |
1263 |
+ void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) |
1264 |
+ { |
1265 |
+ sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); |
1266 |
+- clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); |
1267 |
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
1268 |
+ netdev_tx_reset_queue(sq->txq); |
1269 |
+ netif_tx_start_queue(sq->txq); |
1270 |
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
1271 |
+index d8b7fba96d58..337b0cbfd153 100644 |
1272 |
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
1273 |
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
1274 |
+@@ -3919,7 +3919,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1275 |
+ * setup (if available). */ |
1276 |
+ status = myri10ge_request_irq(mgp); |
1277 |
+ if (status != 0) |
1278 |
+- goto abort_with_firmware; |
1279 |
++ goto abort_with_slices; |
1280 |
+ myri10ge_free_irq(mgp); |
1281 |
+ |
1282 |
+ /* Save configuration space to be restored if the |
1283 |
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c |
1284 |
+index 1fbfeb43c538..f5ebd9403d72 100644 |
1285 |
+--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c |
1286 |
++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c |
1287 |
+@@ -1280,9 +1280,10 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, |
1288 |
+ struct nfp_flower_priv *priv = app->priv; |
1289 |
+ int err; |
1290 |
+ |
1291 |
+- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS && |
1292 |
+- !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS && |
1293 |
+- nfp_flower_internal_port_can_offload(app, netdev))) |
1294 |
++ if ((f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS && |
1295 |
++ !nfp_flower_internal_port_can_offload(app, netdev)) || |
1296 |
++ (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS && |
1297 |
++ nfp_flower_internal_port_can_offload(app, netdev))) |
1298 |
+ return -EOPNOTSUPP; |
1299 |
+ |
1300 |
+ switch (f->command) { |
1301 |
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c |
1302 |
+index 8c67505865a4..43faad1893f7 100644 |
1303 |
+--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c |
1304 |
++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c |
1305 |
+@@ -329,13 +329,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, |
1306 |
+ |
1307 |
+ flow.daddr = *(__be32 *)n->primary_key; |
1308 |
+ |
1309 |
+- /* Only concerned with route changes for representors. */ |
1310 |
+- if (!nfp_netdev_is_nfp_repr(n->dev)) |
1311 |
+- return NOTIFY_DONE; |
1312 |
+- |
1313 |
+ app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); |
1314 |
+ app = app_priv->app; |
1315 |
+ |
1316 |
++ if (!nfp_netdev_is_nfp_repr(n->dev) && |
1317 |
++ !nfp_flower_internal_port_can_offload(app, n->dev)) |
1318 |
++ return NOTIFY_DONE; |
1319 |
++ |
1320 |
+ /* Only concerned with changes to routes already added to NFP. */ |
1321 |
+ if (!nfp_tun_has_route(app, flow.daddr)) |
1322 |
+ return NOTIFY_DONE; |
1323 |
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c |
1324 |
+index ef8f08931fe8..6cacd5e893ac 100644 |
1325 |
+--- a/drivers/net/ethernet/renesas/ravb_main.c |
1326 |
++++ b/drivers/net/ethernet/renesas/ravb_main.c |
1327 |
+@@ -1,7 +1,7 @@ |
1328 |
+ // SPDX-License-Identifier: GPL-2.0 |
1329 |
+ /* Renesas Ethernet AVB device driver |
1330 |
+ * |
1331 |
+- * Copyright (C) 2014-2015 Renesas Electronics Corporation |
1332 |
++ * Copyright (C) 2014-2019 Renesas Electronics Corporation |
1333 |
+ * Copyright (C) 2015 Renesas Solutions Corp. |
1334 |
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@××××××××××××××.com> |
1335 |
+ * |
1336 |
+@@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) |
1337 |
+ kfree(ts_skb); |
1338 |
+ if (tag == tfa_tag) { |
1339 |
+ skb_tstamp_tx(skb, &shhwtstamps); |
1340 |
++ dev_consume_skb_any(skb); |
1341 |
+ break; |
1342 |
++ } else { |
1343 |
++ dev_kfree_skb_any(skb); |
1344 |
+ } |
1345 |
+ } |
1346 |
+ ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); |
1347 |
+@@ -1564,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
1348 |
+ } |
1349 |
+ goto unmap; |
1350 |
+ } |
1351 |
+- ts_skb->skb = skb; |
1352 |
++ ts_skb->skb = skb_get(skb); |
1353 |
+ ts_skb->tag = priv->ts_skb_tag++; |
1354 |
+ priv->ts_skb_tag &= 0x3ff; |
1355 |
+ list_add_tail(&ts_skb->list, &priv->ts_skb_list); |
1356 |
+@@ -1693,6 +1696,7 @@ static int ravb_close(struct net_device *ndev) |
1357 |
+ /* Clear the timestamp list */ |
1358 |
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { |
1359 |
+ list_del(&ts_skb->list); |
1360 |
++ kfree_skb(ts_skb->skb); |
1361 |
+ kfree(ts_skb); |
1362 |
+ } |
1363 |
+ |
1364 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c |
1365 |
+index 4644b2aeeba1..e2e469c37a4d 100644 |
1366 |
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c |
1367 |
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c |
1368 |
+@@ -1194,10 +1194,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) |
1369 |
+ int ret; |
1370 |
+ struct device *dev = &bsp_priv->pdev->dev; |
1371 |
+ |
1372 |
+- if (!ldo) { |
1373 |
+- dev_err(dev, "no regulator found\n"); |
1374 |
+- return -1; |
1375 |
+- } |
1376 |
++ if (!ldo) |
1377 |
++ return 0; |
1378 |
+ |
1379 |
+ if (enable) { |
1380 |
+ ret = regulator_enable(ldo); |
1381 |
+diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c |
1382 |
+index c50a9772f4af..3b5a26b05295 100644 |
1383 |
+--- a/drivers/net/ethernet/toshiba/tc35815.c |
1384 |
++++ b/drivers/net/ethernet/toshiba/tc35815.c |
1385 |
+@@ -1504,7 +1504,7 @@ tc35815_rx(struct net_device *dev, int limit) |
1386 |
+ pci_unmap_single(lp->pci_dev, |
1387 |
+ lp->rx_skbs[cur_bd].skb_dma, |
1388 |
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE); |
1389 |
+- if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) |
1390 |
++ if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0) |
1391 |
+ memmove(skb->data, skb->data - NET_IP_ALIGN, |
1392 |
+ pkt_len); |
1393 |
+ data = skb_put(skb, pkt_len); |
1394 |
+diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c |
1395 |
+index 78a7de3fb622..c62f474b6d08 100644 |
1396 |
+--- a/drivers/net/ethernet/tundra/tsi108_eth.c |
1397 |
++++ b/drivers/net/ethernet/tundra/tsi108_eth.c |
1398 |
+@@ -371,9 +371,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift, |
1399 |
+ static void tsi108_stat_carry(struct net_device *dev) |
1400 |
+ { |
1401 |
+ struct tsi108_prv_data *data = netdev_priv(dev); |
1402 |
++ unsigned long flags; |
1403 |
+ u32 carry1, carry2; |
1404 |
+ |
1405 |
+- spin_lock_irq(&data->misclock); |
1406 |
++ spin_lock_irqsave(&data->misclock, flags); |
1407 |
+ |
1408 |
+ carry1 = TSI_READ(TSI108_STAT_CARRY1); |
1409 |
+ carry2 = TSI_READ(TSI108_STAT_CARRY2); |
1410 |
+@@ -441,7 +442,7 @@ static void tsi108_stat_carry(struct net_device *dev) |
1411 |
+ TSI108_STAT_TXPAUSEDROP_CARRY, |
1412 |
+ &data->tx_pause_drop); |
1413 |
+ |
1414 |
+- spin_unlock_irq(&data->misclock); |
1415 |
++ spin_unlock_irqrestore(&data->misclock, flags); |
1416 |
+ } |
1417 |
+ |
1418 |
+ /* Read a stat counter atomically with respect to carries. |
1419 |
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c |
1420 |
+index 3544e1991579..e8fce6d715ef 100644 |
1421 |
+--- a/drivers/net/hyperv/netvsc_drv.c |
1422 |
++++ b/drivers/net/hyperv/netvsc_drv.c |
1423 |
+@@ -1239,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net, |
1424 |
+ struct rtnl_link_stats64 *t) |
1425 |
+ { |
1426 |
+ struct net_device_context *ndev_ctx = netdev_priv(net); |
1427 |
+- struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); |
1428 |
++ struct netvsc_device *nvdev; |
1429 |
+ struct netvsc_vf_pcpu_stats vf_tot; |
1430 |
+ int i; |
1431 |
+ |
1432 |
++ rcu_read_lock(); |
1433 |
++ |
1434 |
++ nvdev = rcu_dereference(ndev_ctx->nvdev); |
1435 |
+ if (!nvdev) |
1436 |
+- return; |
1437 |
++ goto out; |
1438 |
+ |
1439 |
+ netdev_stats_to_stats64(t, &net->stats); |
1440 |
+ |
1441 |
+@@ -1283,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net, |
1442 |
+ t->rx_packets += packets; |
1443 |
+ t->multicast += multicast; |
1444 |
+ } |
1445 |
++out: |
1446 |
++ rcu_read_unlock(); |
1447 |
+ } |
1448 |
+ |
1449 |
+ static int netvsc_set_mac_addr(struct net_device *ndev, void *p) |
1450 |
+diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c |
1451 |
+index 58bb25e4af10..7935593debb1 100644 |
1452 |
+--- a/drivers/net/phy/phy-c45.c |
1453 |
++++ b/drivers/net/phy/phy-c45.c |
1454 |
+@@ -523,6 +523,32 @@ int genphy_c45_read_status(struct phy_device *phydev) |
1455 |
+ } |
1456 |
+ EXPORT_SYMBOL_GPL(genphy_c45_read_status); |
1457 |
+ |
1458 |
++/** |
1459 |
++ * genphy_c45_config_aneg - restart auto-negotiation or forced setup |
1460 |
++ * @phydev: target phy_device struct |
1461 |
++ * |
1462 |
++ * Description: If auto-negotiation is enabled, we configure the |
1463 |
++ * advertising, and then restart auto-negotiation. If it is not |
1464 |
++ * enabled, then we force a configuration. |
1465 |
++ */ |
1466 |
++int genphy_c45_config_aneg(struct phy_device *phydev) |
1467 |
++{ |
1468 |
++ bool changed = false; |
1469 |
++ int ret; |
1470 |
++ |
1471 |
++ if (phydev->autoneg == AUTONEG_DISABLE) |
1472 |
++ return genphy_c45_pma_setup_forced(phydev); |
1473 |
++ |
1474 |
++ ret = genphy_c45_an_config_aneg(phydev); |
1475 |
++ if (ret < 0) |
1476 |
++ return ret; |
1477 |
++ if (ret > 0) |
1478 |
++ changed = true; |
1479 |
++ |
1480 |
++ return genphy_c45_check_and_restart_aneg(phydev, changed); |
1481 |
++} |
1482 |
++EXPORT_SYMBOL_GPL(genphy_c45_config_aneg); |
1483 |
++ |
1484 |
+ /* The gen10g_* functions are the old Clause 45 stub */ |
1485 |
+ |
1486 |
+ int gen10g_config_aneg(struct phy_device *phydev) |
1487 |
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
1488 |
+index e8885429293a..57b337687782 100644 |
1489 |
+--- a/drivers/net/phy/phy.c |
1490 |
++++ b/drivers/net/phy/phy.c |
1491 |
+@@ -499,7 +499,7 @@ static int phy_config_aneg(struct phy_device *phydev) |
1492 |
+ * allowed to call genphy_config_aneg() |
1493 |
+ */ |
1494 |
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) |
1495 |
+- return -EOPNOTSUPP; |
1496 |
++ return genphy_c45_config_aneg(phydev); |
1497 |
+ |
1498 |
+ return genphy_config_aneg(phydev); |
1499 |
+ } |
1500 |
+diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c |
1501 |
+index 5519248a791e..32b08b18e120 100644 |
1502 |
+--- a/drivers/net/usb/cx82310_eth.c |
1503 |
++++ b/drivers/net/usb/cx82310_eth.c |
1504 |
+@@ -163,7 +163,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) |
1505 |
+ } |
1506 |
+ if (!timeout) { |
1507 |
+ dev_err(&udev->dev, "firmware not ready in time\n"); |
1508 |
+- return -ETIMEDOUT; |
1509 |
++ ret = -ETIMEDOUT; |
1510 |
++ goto err; |
1511 |
+ } |
1512 |
+ |
1513 |
+ /* enable ethernet mode (?) */ |
1514 |
+diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c |
1515 |
+index d62b6706a537..fc5895f85cee 100644 |
1516 |
+--- a/drivers/net/usb/kalmia.c |
1517 |
++++ b/drivers/net/usb/kalmia.c |
1518 |
+@@ -113,16 +113,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr) |
1519 |
+ status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1), |
1520 |
+ usb_buf, 24); |
1521 |
+ if (status != 0) |
1522 |
+- return status; |
1523 |
++ goto out; |
1524 |
+ |
1525 |
+ memcpy(usb_buf, init_msg_2, 12); |
1526 |
+ status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2), |
1527 |
+ usb_buf, 28); |
1528 |
+ if (status != 0) |
1529 |
+- return status; |
1530 |
++ goto out; |
1531 |
+ |
1532 |
+ memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN); |
1533 |
+- |
1534 |
++out: |
1535 |
+ kfree(usb_buf); |
1536 |
+ return status; |
1537 |
+ } |
1538 |
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c |
1539 |
+index 3d92ea6fcc02..f033fee225a1 100644 |
1540 |
+--- a/drivers/net/usb/lan78xx.c |
1541 |
++++ b/drivers/net/usb/lan78xx.c |
1542 |
+@@ -3792,7 +3792,7 @@ static int lan78xx_probe(struct usb_interface *intf, |
1543 |
+ ret = register_netdev(netdev); |
1544 |
+ if (ret != 0) { |
1545 |
+ netif_err(dev, probe, netdev, "couldn't register the device\n"); |
1546 |
+- goto out3; |
1547 |
++ goto out4; |
1548 |
+ } |
1549 |
+ |
1550 |
+ usb_set_intfdata(intf, dev); |
1551 |
+@@ -3807,12 +3807,14 @@ static int lan78xx_probe(struct usb_interface *intf, |
1552 |
+ |
1553 |
+ ret = lan78xx_phy_init(dev); |
1554 |
+ if (ret < 0) |
1555 |
+- goto out4; |
1556 |
++ goto out5; |
1557 |
+ |
1558 |
+ return 0; |
1559 |
+ |
1560 |
+-out4: |
1561 |
++out5: |
1562 |
+ unregister_netdev(netdev); |
1563 |
++out4: |
1564 |
++ usb_free_urb(dev->urb_intr); |
1565 |
+ out3: |
1566 |
+ lan78xx_unbind(dev, intf); |
1567 |
+ out2: |
1568 |
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c |
1569 |
+index e0dcb681cfe5..1a7b7bd412f9 100644 |
1570 |
+--- a/drivers/net/usb/r8152.c |
1571 |
++++ b/drivers/net/usb/r8152.c |
1572 |
+@@ -3987,8 +3987,7 @@ static int rtl8152_close(struct net_device *netdev) |
1573 |
+ #ifdef CONFIG_PM_SLEEP |
1574 |
+ unregister_pm_notifier(&tp->pm_notifier); |
1575 |
+ #endif |
1576 |
+- if (!test_bit(RTL8152_UNPLUG, &tp->flags)) |
1577 |
+- napi_disable(&tp->napi); |
1578 |
++ napi_disable(&tp->napi); |
1579 |
+ clear_bit(WORK_ENABLE, &tp->flags); |
1580 |
+ usb_kill_urb(tp->intr_urb); |
1581 |
+ cancel_delayed_work_sync(&tp->schedule); |
1582 |
+@@ -5310,7 +5309,6 @@ static int rtl8152_probe(struct usb_interface *intf, |
1583 |
+ return 0; |
1584 |
+ |
1585 |
+ out1: |
1586 |
+- netif_napi_del(&tp->napi); |
1587 |
+ usb_set_intfdata(intf, NULL); |
1588 |
+ out: |
1589 |
+ free_netdev(netdev); |
1590 |
+@@ -5328,7 +5326,6 @@ static void rtl8152_disconnect(struct usb_interface *intf) |
1591 |
+ if (udev->state == USB_STATE_NOTATTACHED) |
1592 |
+ set_bit(RTL8152_UNPLUG, &tp->flags); |
1593 |
+ |
1594 |
+- netif_napi_del(&tp->napi); |
1595 |
+ unregister_netdev(tp->netdev); |
1596 |
+ cancel_delayed_work_sync(&tp->hw_phy_work); |
1597 |
+ tp->rtl_ops.unload(tp); |
1598 |
+diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c |
1599 |
+index e9fc168bb734..489cba9b284d 100644 |
1600 |
+--- a/drivers/net/wimax/i2400m/fw.c |
1601 |
++++ b/drivers/net/wimax/i2400m/fw.c |
1602 |
+@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options) |
1603 |
+ } |
1604 |
+ result = i2400m_barker_db_add(barker); |
1605 |
+ if (result < 0) |
1606 |
+- goto error_add; |
1607 |
++ goto error_parse_add; |
1608 |
+ } |
1609 |
+ kfree(options_orig); |
1610 |
+ } |
1611 |
+ return 0; |
1612 |
+ |
1613 |
++error_parse_add: |
1614 |
+ error_parse: |
1615 |
++ kfree(options_orig); |
1616 |
+ error_add: |
1617 |
+ kfree(i2400m_barker_db); |
1618 |
+ return result; |
1619 |
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c |
1620 |
+index 601509b3251a..963b4c6309b9 100644 |
1621 |
+--- a/drivers/nvme/host/core.c |
1622 |
++++ b/drivers/nvme/host/core.c |
1623 |
+@@ -2549,6 +2549,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) |
1624 |
+ goto out_free; |
1625 |
+ } |
1626 |
+ |
1627 |
++ if (!(ctrl->ops->flags & NVME_F_FABRICS)) |
1628 |
++ ctrl->cntlid = le16_to_cpu(id->cntlid); |
1629 |
++ |
1630 |
+ if (!ctrl->identified) { |
1631 |
+ int i; |
1632 |
+ |
1633 |
+@@ -2649,7 +2652,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) |
1634 |
+ goto out_free; |
1635 |
+ } |
1636 |
+ } else { |
1637 |
+- ctrl->cntlid = le16_to_cpu(id->cntlid); |
1638 |
+ ctrl->hmpre = le32_to_cpu(id->hmpre); |
1639 |
+ ctrl->hmmin = le32_to_cpu(id->hmmin); |
1640 |
+ ctrl->hmminds = le32_to_cpu(id->hmminds); |
1641 |
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c |
1642 |
+index 747c0d4f9ff5..304aa8a65f2f 100644 |
1643 |
+--- a/drivers/nvme/host/multipath.c |
1644 |
++++ b/drivers/nvme/host/multipath.c |
1645 |
+@@ -420,6 +420,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) |
1646 |
+ srcu_read_unlock(&head->srcu, srcu_idx); |
1647 |
+ } |
1648 |
+ |
1649 |
++ synchronize_srcu(&ns->head->srcu); |
1650 |
+ kblockd_schedule_work(&ns->head->requeue_work); |
1651 |
+ } |
1652 |
+ |
1653 |
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h |
1654 |
+index 784a2e76a1b0..c5f60f95e8db 100644 |
1655 |
+--- a/drivers/s390/net/qeth_core.h |
1656 |
++++ b/drivers/s390/net/qeth_core.h |
1657 |
+@@ -640,6 +640,7 @@ struct qeth_seqno { |
1658 |
+ struct qeth_reply { |
1659 |
+ struct list_head list; |
1660 |
+ struct completion received; |
1661 |
++ spinlock_t lock; |
1662 |
+ int (*callback)(struct qeth_card *, struct qeth_reply *, |
1663 |
+ unsigned long); |
1664 |
+ u32 seqno; |
1665 |
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c |
1666 |
+index b1823d75dd35..6b8f99e7d8a8 100644 |
1667 |
+--- a/drivers/s390/net/qeth_core_main.c |
1668 |
++++ b/drivers/s390/net/qeth_core_main.c |
1669 |
+@@ -548,6 +548,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) |
1670 |
+ if (reply) { |
1671 |
+ refcount_set(&reply->refcnt, 1); |
1672 |
+ init_completion(&reply->received); |
1673 |
++ spin_lock_init(&reply->lock); |
1674 |
+ } |
1675 |
+ return reply; |
1676 |
+ } |
1677 |
+@@ -832,6 +833,13 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, |
1678 |
+ |
1679 |
+ if (!reply->callback) { |
1680 |
+ rc = 0; |
1681 |
++ goto no_callback; |
1682 |
++ } |
1683 |
++ |
1684 |
++ spin_lock_irqsave(&reply->lock, flags); |
1685 |
++ if (reply->rc) { |
1686 |
++ /* Bail out when the requestor has already left: */ |
1687 |
++ rc = reply->rc; |
1688 |
+ } else { |
1689 |
+ if (cmd) { |
1690 |
+ reply->offset = (u16)((char *)cmd - (char *)iob->data); |
1691 |
+@@ -840,7 +848,9 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, |
1692 |
+ rc = reply->callback(card, reply, (unsigned long)iob); |
1693 |
+ } |
1694 |
+ } |
1695 |
++ spin_unlock_irqrestore(&reply->lock, flags); |
1696 |
+ |
1697 |
++no_callback: |
1698 |
+ if (rc <= 0) |
1699 |
+ qeth_notify_reply(reply, rc); |
1700 |
+ qeth_put_reply(reply); |
1701 |
+@@ -1880,6 +1890,16 @@ static int qeth_send_control_data(struct qeth_card *card, int len, |
1702 |
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; |
1703 |
+ |
1704 |
+ qeth_dequeue_reply(card, reply); |
1705 |
++ |
1706 |
++ if (reply_cb) { |
1707 |
++ /* Wait until the callback for a late reply has completed: */ |
1708 |
++ spin_lock_irq(&reply->lock); |
1709 |
++ if (rc) |
1710 |
++ /* Zap any callback that's still pending: */ |
1711 |
++ reply->rc = rc; |
1712 |
++ spin_unlock_irq(&reply->lock); |
1713 |
++ } |
1714 |
++ |
1715 |
+ if (!rc) |
1716 |
+ rc = reply->rc; |
1717 |
+ qeth_put_reply(reply); |
1718 |
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h |
1719 |
+index aafcffaa25f7..4604e1bc334c 100644 |
1720 |
+--- a/drivers/scsi/lpfc/lpfc.h |
1721 |
++++ b/drivers/scsi/lpfc/lpfc.h |
1722 |
+@@ -822,6 +822,7 @@ struct lpfc_hba { |
1723 |
+ uint32_t cfg_cq_poll_threshold; |
1724 |
+ uint32_t cfg_cq_max_proc_limit; |
1725 |
+ uint32_t cfg_fcp_cpu_map; |
1726 |
++ uint32_t cfg_fcp_mq_threshold; |
1727 |
+ uint32_t cfg_hdw_queue; |
1728 |
+ uint32_t cfg_irq_chann; |
1729 |
+ uint32_t cfg_suppress_rsp; |
1730 |
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c |
1731 |
+index d4c65e2109e2..353da12d797b 100644 |
1732 |
+--- a/drivers/scsi/lpfc/lpfc_attr.c |
1733 |
++++ b/drivers/scsi/lpfc/lpfc_attr.c |
1734 |
+@@ -5640,6 +5640,19 @@ LPFC_ATTR_RW(nvme_oas, 0, 0, 1, |
1735 |
+ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, |
1736 |
+ "Embed NVME Command in WQE"); |
1737 |
+ |
1738 |
++/* |
1739 |
++ * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues |
1740 |
++ * the driver will advertise it supports to the SCSI layer. |
1741 |
++ * |
1742 |
++ * 0 = Set nr_hw_queues by the number of CPUs or HW queues. |
1743 |
++ * 1,128 = Manually specify the maximum nr_hw_queue value to be set, |
1744 |
++ * |
1745 |
++ * Value range is [0,128]. Default value is 8. |
1746 |
++ */ |
1747 |
++LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, |
1748 |
++ LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, |
1749 |
++ "Set the number of SCSI Queues advertised"); |
1750 |
++ |
1751 |
+ /* |
1752 |
+ * lpfc_hdw_queue: Set the number of Hardware Queues the driver |
1753 |
+ * will advertise it supports to the NVME and SCSI layers. This also |
1754 |
+@@ -5961,6 +5974,7 @@ struct device_attribute *lpfc_hba_attrs[] = { |
1755 |
+ &dev_attr_lpfc_cq_poll_threshold, |
1756 |
+ &dev_attr_lpfc_cq_max_proc_limit, |
1757 |
+ &dev_attr_lpfc_fcp_cpu_map, |
1758 |
++ &dev_attr_lpfc_fcp_mq_threshold, |
1759 |
+ &dev_attr_lpfc_hdw_queue, |
1760 |
+ &dev_attr_lpfc_irq_chann, |
1761 |
+ &dev_attr_lpfc_suppress_rsp, |
1762 |
+@@ -7042,6 +7056,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) |
1763 |
+ /* Initialize first burst. Target vs Initiator are different. */ |
1764 |
+ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); |
1765 |
+ lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); |
1766 |
++ lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); |
1767 |
+ lpfc_hdw_queue_init(phba, lpfc_hdw_queue); |
1768 |
+ lpfc_irq_chann_init(phba, lpfc_irq_chann); |
1769 |
+ lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); |
1770 |
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c |
1771 |
+index eaaef682de25..2fd8f15f9997 100644 |
1772 |
+--- a/drivers/scsi/lpfc/lpfc_init.c |
1773 |
++++ b/drivers/scsi/lpfc/lpfc_init.c |
1774 |
+@@ -4308,10 +4308,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) |
1775 |
+ shost->max_cmd_len = 16; |
1776 |
+ |
1777 |
+ if (phba->sli_rev == LPFC_SLI_REV4) { |
1778 |
+- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) |
1779 |
+- shost->nr_hw_queues = phba->cfg_hdw_queue; |
1780 |
+- else |
1781 |
+- shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; |
1782 |
++ if (!phba->cfg_fcp_mq_threshold || |
1783 |
++ phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) |
1784 |
++ phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; |
1785 |
++ |
1786 |
++ shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), |
1787 |
++ phba->cfg_fcp_mq_threshold); |
1788 |
+ |
1789 |
+ shost->dma_boundary = |
1790 |
+ phba->sli4_hba.pc_sli4_params.sge_supp_len-1; |
1791 |
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h |
1792 |
+index 8e4fd1a98023..986594ec40e2 100644 |
1793 |
+--- a/drivers/scsi/lpfc/lpfc_sli4.h |
1794 |
++++ b/drivers/scsi/lpfc/lpfc_sli4.h |
1795 |
+@@ -44,6 +44,11 @@ |
1796 |
+ #define LPFC_HBA_HDWQ_MAX 128 |
1797 |
+ #define LPFC_HBA_HDWQ_DEF 0 |
1798 |
+ |
1799 |
++/* FCP MQ queue count limiting */ |
1800 |
++#define LPFC_FCP_MQ_THRESHOLD_MIN 0 |
1801 |
++#define LPFC_FCP_MQ_THRESHOLD_MAX 128 |
1802 |
++#define LPFC_FCP_MQ_THRESHOLD_DEF 8 |
1803 |
++ |
1804 |
+ /* Common buffer size to accomidate SCSI and NVME IO buffers */ |
1805 |
+ #define LPFC_COMMON_IO_BUF_SZ 768 |
1806 |
+ |
1807 |
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c |
1808 |
+index 8d560c562e9c..6b7b390b2e52 100644 |
1809 |
+--- a/drivers/scsi/qla2xxx/qla_attr.c |
1810 |
++++ b/drivers/scsi/qla2xxx/qla_attr.c |
1811 |
+@@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) |
1812 |
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, |
1813 |
+ vha->gnl.ldma); |
1814 |
+ |
1815 |
++ vha->gnl.l = NULL; |
1816 |
++ |
1817 |
+ vfree(vha->scan.l); |
1818 |
+ |
1819 |
+ if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { |
1820 |
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c |
1821 |
+index d056f5e7cf93..794478e5f7ec 100644 |
1822 |
+--- a/drivers/scsi/qla2xxx/qla_os.c |
1823 |
++++ b/drivers/scsi/qla2xxx/qla_os.c |
1824 |
+@@ -3440,6 +3440,12 @@ skip_dpc: |
1825 |
+ return 0; |
1826 |
+ |
1827 |
+ probe_failed: |
1828 |
++ if (base_vha->gnl.l) { |
1829 |
++ dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, |
1830 |
++ base_vha->gnl.l, base_vha->gnl.ldma); |
1831 |
++ base_vha->gnl.l = NULL; |
1832 |
++ } |
1833 |
++ |
1834 |
+ if (base_vha->timer_active) |
1835 |
+ qla2x00_stop_timer(base_vha); |
1836 |
+ base_vha->flags.online = 0; |
1837 |
+@@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev) |
1838 |
+ if (!atomic_read(&pdev->enable_cnt)) { |
1839 |
+ dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, |
1840 |
+ base_vha->gnl.l, base_vha->gnl.ldma); |
1841 |
+- |
1842 |
++ base_vha->gnl.l = NULL; |
1843 |
+ scsi_host_put(base_vha->host); |
1844 |
+ kfree(ha); |
1845 |
+ pci_set_drvdata(pdev, NULL); |
1846 |
+@@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev) |
1847 |
+ dma_free_coherent(&ha->pdev->dev, |
1848 |
+ base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); |
1849 |
+ |
1850 |
++ base_vha->gnl.l = NULL; |
1851 |
++ |
1852 |
+ vfree(base_vha->scan.l); |
1853 |
+ |
1854 |
+ if (IS_QLAFX00(ha)) |
1855 |
+@@ -4817,6 +4825,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, |
1856 |
+ "Alloc failed for scan database.\n"); |
1857 |
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size, |
1858 |
+ vha->gnl.l, vha->gnl.ldma); |
1859 |
++ vha->gnl.l = NULL; |
1860 |
+ scsi_remove_host(vha->host); |
1861 |
+ return NULL; |
1862 |
+ } |
1863 |
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c |
1864 |
+index b43d6385a1a0..95b2371fb67b 100644 |
1865 |
+--- a/drivers/target/target_core_user.c |
1866 |
++++ b/drivers/target/target_core_user.c |
1867 |
+@@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * |
1868 |
+ struct se_cmd *se_cmd = cmd->se_cmd; |
1869 |
+ struct tcmu_dev *udev = cmd->tcmu_dev; |
1870 |
+ bool read_len_valid = false; |
1871 |
+- uint32_t read_len = se_cmd->data_length; |
1872 |
++ uint32_t read_len; |
1873 |
+ |
1874 |
+ /* |
1875 |
+ * cmd has been completed already from timeout, just reclaim |
1876 |
+ * data area space and free cmd |
1877 |
+ */ |
1878 |
+- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) |
1879 |
++ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { |
1880 |
++ WARN_ON_ONCE(se_cmd); |
1881 |
+ goto out; |
1882 |
++ } |
1883 |
+ |
1884 |
+ list_del_init(&cmd->queue_entry); |
1885 |
+ |
1886 |
+@@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * |
1887 |
+ goto done; |
1888 |
+ } |
1889 |
+ |
1890 |
++ read_len = se_cmd->data_length; |
1891 |
+ if (se_cmd->data_direction == DMA_FROM_DEVICE && |
1892 |
+ (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { |
1893 |
+ read_len_valid = true; |
1894 |
+@@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) |
1895 |
+ */ |
1896 |
+ scsi_status = SAM_STAT_CHECK_CONDITION; |
1897 |
+ list_del_init(&cmd->queue_entry); |
1898 |
++ cmd->se_cmd = NULL; |
1899 |
+ } else { |
1900 |
+ list_del_init(&cmd->queue_entry); |
1901 |
+ idr_remove(&udev->commands, id); |
1902 |
+@@ -2024,6 +2028,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) |
1903 |
+ |
1904 |
+ idr_remove(&udev->commands, i); |
1905 |
+ if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { |
1906 |
++ WARN_ON(!cmd->se_cmd); |
1907 |
+ list_del_init(&cmd->queue_entry); |
1908 |
+ if (err_level == 1) { |
1909 |
+ /* |
1910 |
+diff --git a/fs/afs/cell.c b/fs/afs/cell.c |
1911 |
+index a2a87117d262..fd5133e26a38 100644 |
1912 |
+--- a/fs/afs/cell.c |
1913 |
++++ b/fs/afs/cell.c |
1914 |
+@@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, |
1915 |
+ cell = rcu_dereference_raw(net->ws_cell); |
1916 |
+ if (cell) { |
1917 |
+ afs_get_cell(cell); |
1918 |
++ ret = 0; |
1919 |
+ break; |
1920 |
+ } |
1921 |
+ ret = -EDESTADDRREQ; |
1922 |
+@@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, |
1923 |
+ |
1924 |
+ done_seqretry(&net->cells_lock, seq); |
1925 |
+ |
1926 |
++ if (ret != 0 && cell) |
1927 |
++ afs_put_cell(net, cell); |
1928 |
++ |
1929 |
+ return ret == 0 ? cell : ERR_PTR(ret); |
1930 |
+ } |
1931 |
+ |
1932 |
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c |
1933 |
+index 9620f19308f5..9bd5c067d55d 100644 |
1934 |
+--- a/fs/afs/dir.c |
1935 |
++++ b/fs/afs/dir.c |
1936 |
+@@ -960,7 +960,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, |
1937 |
+ inode ? AFS_FS_I(inode) : NULL); |
1938 |
+ } else { |
1939 |
+ trace_afs_lookup(dvnode, &dentry->d_name, |
1940 |
+- inode ? AFS_FS_I(inode) : NULL); |
1941 |
++ IS_ERR_OR_NULL(inode) ? NULL |
1942 |
++ : AFS_FS_I(inode)); |
1943 |
+ } |
1944 |
+ return d; |
1945 |
+ } |
1946 |
+diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c |
1947 |
+index 18722aaeda33..a1baf3f1f14d 100644 |
1948 |
+--- a/fs/afs/yfsclient.c |
1949 |
++++ b/fs/afs/yfsclient.c |
1950 |
+@@ -2155,7 +2155,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl |
1951 |
+ key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); |
1952 |
+ |
1953 |
+ size = round_up(acl->size, 4); |
1954 |
+- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus, |
1955 |
++ call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2, |
1956 |
+ sizeof(__be32) * 2 + |
1957 |
+ sizeof(struct yfs_xdr_YFSFid) + |
1958 |
+ sizeof(__be32) + size, |
1959 |
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c |
1960 |
+index 7754d7679122..622467e47cde 100644 |
1961 |
+--- a/fs/ceph/caps.c |
1962 |
++++ b/fs/ceph/caps.c |
1963 |
+@@ -1305,6 +1305,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, |
1964 |
+ { |
1965 |
+ struct ceph_inode_info *ci = cap->ci; |
1966 |
+ struct inode *inode = &ci->vfs_inode; |
1967 |
++ struct ceph_buffer *old_blob = NULL; |
1968 |
+ struct cap_msg_args arg; |
1969 |
+ int held, revoking; |
1970 |
+ int wake = 0; |
1971 |
+@@ -1369,7 +1370,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, |
1972 |
+ ci->i_requested_max_size = arg.max_size; |
1973 |
+ |
1974 |
+ if (flushing & CEPH_CAP_XATTR_EXCL) { |
1975 |
+- __ceph_build_xattrs_blob(ci); |
1976 |
++ old_blob = __ceph_build_xattrs_blob(ci); |
1977 |
+ arg.xattr_version = ci->i_xattrs.version; |
1978 |
+ arg.xattr_buf = ci->i_xattrs.blob; |
1979 |
+ } else { |
1980 |
+@@ -1404,6 +1405,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, |
1981 |
+ |
1982 |
+ spin_unlock(&ci->i_ceph_lock); |
1983 |
+ |
1984 |
++ ceph_buffer_put(old_blob); |
1985 |
++ |
1986 |
+ ret = send_cap_msg(&arg); |
1987 |
+ if (ret < 0) { |
1988 |
+ dout("error sending cap msg, must requeue %p\n", inode); |
1989 |
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c |
1990 |
+index 3c7a32779574..ca3821b0309f 100644 |
1991 |
+--- a/fs/ceph/inode.c |
1992 |
++++ b/fs/ceph/inode.c |
1993 |
+@@ -743,6 +743,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page, |
1994 |
+ int issued, new_issued, info_caps; |
1995 |
+ struct timespec64 mtime, atime, ctime; |
1996 |
+ struct ceph_buffer *xattr_blob = NULL; |
1997 |
++ struct ceph_buffer *old_blob = NULL; |
1998 |
+ struct ceph_string *pool_ns = NULL; |
1999 |
+ struct ceph_cap *new_cap = NULL; |
2000 |
+ int err = 0; |
2001 |
+@@ -883,7 +884,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page, |
2002 |
+ if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && |
2003 |
+ le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { |
2004 |
+ if (ci->i_xattrs.blob) |
2005 |
+- ceph_buffer_put(ci->i_xattrs.blob); |
2006 |
++ old_blob = ci->i_xattrs.blob; |
2007 |
+ ci->i_xattrs.blob = xattr_blob; |
2008 |
+ if (xattr_blob) |
2009 |
+ memcpy(ci->i_xattrs.blob->vec.iov_base, |
2010 |
+@@ -1023,8 +1024,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page, |
2011 |
+ out: |
2012 |
+ if (new_cap) |
2013 |
+ ceph_put_cap(mdsc, new_cap); |
2014 |
+- if (xattr_blob) |
2015 |
+- ceph_buffer_put(xattr_blob); |
2016 |
++ ceph_buffer_put(old_blob); |
2017 |
++ ceph_buffer_put(xattr_blob); |
2018 |
+ ceph_put_string(pool_ns); |
2019 |
+ return err; |
2020 |
+ } |
2021 |
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c |
2022 |
+index 72c6c022f02b..213bc1475e91 100644 |
2023 |
+--- a/fs/ceph/snap.c |
2024 |
++++ b/fs/ceph/snap.c |
2025 |
+@@ -464,6 +464,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) |
2026 |
+ struct inode *inode = &ci->vfs_inode; |
2027 |
+ struct ceph_cap_snap *capsnap; |
2028 |
+ struct ceph_snap_context *old_snapc, *new_snapc; |
2029 |
++ struct ceph_buffer *old_blob = NULL; |
2030 |
+ int used, dirty; |
2031 |
+ |
2032 |
+ capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); |
2033 |
+@@ -540,7 +541,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) |
2034 |
+ capsnap->gid = inode->i_gid; |
2035 |
+ |
2036 |
+ if (dirty & CEPH_CAP_XATTR_EXCL) { |
2037 |
+- __ceph_build_xattrs_blob(ci); |
2038 |
++ old_blob = __ceph_build_xattrs_blob(ci); |
2039 |
+ capsnap->xattr_blob = |
2040 |
+ ceph_buffer_get(ci->i_xattrs.blob); |
2041 |
+ capsnap->xattr_version = ci->i_xattrs.version; |
2042 |
+@@ -583,6 +584,7 @@ update_snapc: |
2043 |
+ } |
2044 |
+ spin_unlock(&ci->i_ceph_lock); |
2045 |
+ |
2046 |
++ ceph_buffer_put(old_blob); |
2047 |
+ kfree(capsnap); |
2048 |
+ ceph_put_snap_context(old_snapc); |
2049 |
+ } |
2050 |
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h |
2051 |
+index 1d313d0536f9..38b42d7594b6 100644 |
2052 |
+--- a/fs/ceph/super.h |
2053 |
++++ b/fs/ceph/super.h |
2054 |
+@@ -924,7 +924,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat, |
2055 |
+ int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); |
2056 |
+ ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); |
2057 |
+ extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); |
2058 |
+-extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci); |
2059 |
++extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci); |
2060 |
+ extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); |
2061 |
+ extern void __init ceph_xattr_init(void); |
2062 |
+ extern void ceph_xattr_exit(void); |
2063 |
+diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c |
2064 |
+index 0619adbcbe14..9772db01720b 100644 |
2065 |
+--- a/fs/ceph/xattr.c |
2066 |
++++ b/fs/ceph/xattr.c |
2067 |
+@@ -752,12 +752,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size, |
2068 |
+ |
2069 |
+ /* |
2070 |
+ * If there are dirty xattrs, reencode xattrs into the prealloc_blob |
2071 |
+- * and swap into place. |
2072 |
++ * and swap into place. It returns the old i_xattrs.blob (or NULL) so |
2073 |
++ * that it can be freed by the caller as the i_ceph_lock is likely to be |
2074 |
++ * held. |
2075 |
+ */ |
2076 |
+-void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) |
2077 |
++struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci) |
2078 |
+ { |
2079 |
+ struct rb_node *p; |
2080 |
+ struct ceph_inode_xattr *xattr = NULL; |
2081 |
++ struct ceph_buffer *old_blob = NULL; |
2082 |
+ void *dest; |
2083 |
+ |
2084 |
+ dout("__build_xattrs_blob %p\n", &ci->vfs_inode); |
2085 |
+@@ -788,12 +791,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) |
2086 |
+ dest - ci->i_xattrs.prealloc_blob->vec.iov_base; |
2087 |
+ |
2088 |
+ if (ci->i_xattrs.blob) |
2089 |
+- ceph_buffer_put(ci->i_xattrs.blob); |
2090 |
++ old_blob = ci->i_xattrs.blob; |
2091 |
+ ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; |
2092 |
+ ci->i_xattrs.prealloc_blob = NULL; |
2093 |
+ ci->i_xattrs.dirty = false; |
2094 |
+ ci->i_xattrs.version++; |
2095 |
+ } |
2096 |
++ |
2097 |
++ return old_blob; |
2098 |
+ } |
2099 |
+ |
2100 |
+ static inline int __get_request_mask(struct inode *in) { |
2101 |
+@@ -1028,6 +1033,7 @@ int __ceph_setxattr(struct inode *inode, const char *name, |
2102 |
+ struct ceph_inode_info *ci = ceph_inode(inode); |
2103 |
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; |
2104 |
+ struct ceph_cap_flush *prealloc_cf = NULL; |
2105 |
++ struct ceph_buffer *old_blob = NULL; |
2106 |
+ int issued; |
2107 |
+ int err; |
2108 |
+ int dirty = 0; |
2109 |
+@@ -1101,13 +1107,15 @@ retry: |
2110 |
+ struct ceph_buffer *blob; |
2111 |
+ |
2112 |
+ spin_unlock(&ci->i_ceph_lock); |
2113 |
+- dout(" preaallocating new blob size=%d\n", required_blob_size); |
2114 |
++ ceph_buffer_put(old_blob); /* Shouldn't be required */ |
2115 |
++ dout(" pre-allocating new blob size=%d\n", required_blob_size); |
2116 |
+ blob = ceph_buffer_new(required_blob_size, GFP_NOFS); |
2117 |
+ if (!blob) |
2118 |
+ goto do_sync_unlocked; |
2119 |
+ spin_lock(&ci->i_ceph_lock); |
2120 |
++ /* prealloc_blob can't be released while holding i_ceph_lock */ |
2121 |
+ if (ci->i_xattrs.prealloc_blob) |
2122 |
+- ceph_buffer_put(ci->i_xattrs.prealloc_blob); |
2123 |
++ old_blob = ci->i_xattrs.prealloc_blob; |
2124 |
+ ci->i_xattrs.prealloc_blob = blob; |
2125 |
+ goto retry; |
2126 |
+ } |
2127 |
+@@ -1123,6 +1131,7 @@ retry: |
2128 |
+ } |
2129 |
+ |
2130 |
+ spin_unlock(&ci->i_ceph_lock); |
2131 |
++ ceph_buffer_put(old_blob); |
2132 |
+ if (lock_snap_rwsem) |
2133 |
+ up_read(&mdsc->snap_rwsem); |
2134 |
+ if (dirty) |
2135 |
+diff --git a/fs/read_write.c b/fs/read_write.c |
2136 |
+index c543d965e288..e8b0f1192a3a 100644 |
2137 |
+--- a/fs/read_write.c |
2138 |
++++ b/fs/read_write.c |
2139 |
+@@ -1776,10 +1776,7 @@ static int generic_remap_check_len(struct inode *inode_in, |
2140 |
+ return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; |
2141 |
+ } |
2142 |
+ |
2143 |
+-/* |
2144 |
+- * Read a page's worth of file data into the page cache. Return the page |
2145 |
+- * locked. |
2146 |
+- */ |
2147 |
++/* Read a page's worth of file data into the page cache. */ |
2148 |
+ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) |
2149 |
+ { |
2150 |
+ struct page *page; |
2151 |
+@@ -1791,10 +1788,32 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) |
2152 |
+ put_page(page); |
2153 |
+ return ERR_PTR(-EIO); |
2154 |
+ } |
2155 |
+- lock_page(page); |
2156 |
+ return page; |
2157 |
+ } |
2158 |
+ |
2159 |
++/* |
2160 |
++ * Lock two pages, ensuring that we lock in offset order if the pages are from |
2161 |
++ * the same file. |
2162 |
++ */ |
2163 |
++static void vfs_lock_two_pages(struct page *page1, struct page *page2) |
2164 |
++{ |
2165 |
++ /* Always lock in order of increasing index. */ |
2166 |
++ if (page1->index > page2->index) |
2167 |
++ swap(page1, page2); |
2168 |
++ |
2169 |
++ lock_page(page1); |
2170 |
++ if (page1 != page2) |
2171 |
++ lock_page(page2); |
2172 |
++} |
2173 |
++ |
2174 |
++/* Unlock two pages, being careful not to unlock the same page twice. */ |
2175 |
++static void vfs_unlock_two_pages(struct page *page1, struct page *page2) |
2176 |
++{ |
2177 |
++ unlock_page(page1); |
2178 |
++ if (page1 != page2) |
2179 |
++ unlock_page(page2); |
2180 |
++} |
2181 |
++ |
2182 |
+ /* |
2183 |
+ * Compare extents of two files to see if they are the same. |
2184 |
+ * Caller must have locked both inodes to prevent write races. |
2185 |
+@@ -1832,10 +1851,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, |
2186 |
+ dest_page = vfs_dedupe_get_page(dest, destoff); |
2187 |
+ if (IS_ERR(dest_page)) { |
2188 |
+ error = PTR_ERR(dest_page); |
2189 |
+- unlock_page(src_page); |
2190 |
+ put_page(src_page); |
2191 |
+ goto out_error; |
2192 |
+ } |
2193 |
++ |
2194 |
++ vfs_lock_two_pages(src_page, dest_page); |
2195 |
++ |
2196 |
++ /* |
2197 |
++ * Now that we've locked both pages, make sure they're still |
2198 |
++ * mapped to the file data we're interested in. If not, |
2199 |
++ * someone is invalidating pages on us and we lose. |
2200 |
++ */ |
2201 |
++ if (!PageUptodate(src_page) || !PageUptodate(dest_page) || |
2202 |
++ src_page->mapping != src->i_mapping || |
2203 |
++ dest_page->mapping != dest->i_mapping) { |
2204 |
++ same = false; |
2205 |
++ goto unlock; |
2206 |
++ } |
2207 |
++ |
2208 |
+ src_addr = kmap_atomic(src_page); |
2209 |
+ dest_addr = kmap_atomic(dest_page); |
2210 |
+ |
2211 |
+@@ -1847,8 +1880,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, |
2212 |
+ |
2213 |
+ kunmap_atomic(dest_addr); |
2214 |
+ kunmap_atomic(src_addr); |
2215 |
+- unlock_page(dest_page); |
2216 |
+- unlock_page(src_page); |
2217 |
++unlock: |
2218 |
++ vfs_unlock_two_pages(src_page, dest_page); |
2219 |
+ put_page(dest_page); |
2220 |
+ put_page(src_page); |
2221 |
+ |
2222 |
+diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h |
2223 |
+index 5e58bb29b1a3..11cdc7c60480 100644 |
2224 |
+--- a/include/linux/ceph/buffer.h |
2225 |
++++ b/include/linux/ceph/buffer.h |
2226 |
+@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) |
2227 |
+ |
2228 |
+ static inline void ceph_buffer_put(struct ceph_buffer *b) |
2229 |
+ { |
2230 |
+- kref_put(&b->kref, ceph_buffer_release); |
2231 |
++ if (b) |
2232 |
++ kref_put(&b->kref, ceph_buffer_release); |
2233 |
+ } |
2234 |
+ |
2235 |
+ extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); |
2236 |
+diff --git a/include/linux/gpio.h b/include/linux/gpio.h |
2237 |
+index 39745b8bdd65..b3115d1a7d49 100644 |
2238 |
+--- a/include/linux/gpio.h |
2239 |
++++ b/include/linux/gpio.h |
2240 |
+@@ -240,30 +240,6 @@ static inline int irq_to_gpio(unsigned irq) |
2241 |
+ return -EINVAL; |
2242 |
+ } |
2243 |
+ |
2244 |
+-static inline int |
2245 |
+-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, |
2246 |
+- unsigned int gpio_offset, unsigned int pin_offset, |
2247 |
+- unsigned int npins) |
2248 |
+-{ |
2249 |
+- WARN_ON(1); |
2250 |
+- return -EINVAL; |
2251 |
+-} |
2252 |
+- |
2253 |
+-static inline int |
2254 |
+-gpiochip_add_pingroup_range(struct gpio_chip *chip, |
2255 |
+- struct pinctrl_dev *pctldev, |
2256 |
+- unsigned int gpio_offset, const char *pin_group) |
2257 |
+-{ |
2258 |
+- WARN_ON(1); |
2259 |
+- return -EINVAL; |
2260 |
+-} |
2261 |
+- |
2262 |
+-static inline void |
2263 |
+-gpiochip_remove_pin_ranges(struct gpio_chip *chip) |
2264 |
+-{ |
2265 |
+- WARN_ON(1); |
2266 |
+-} |
2267 |
+- |
2268 |
+ static inline int devm_gpio_request(struct device *dev, unsigned gpio, |
2269 |
+ const char *label) |
2270 |
+ { |
2271 |
+diff --git a/include/linux/phy.h b/include/linux/phy.h |
2272 |
+index 6424586fe2d6..7c5a9fb9c9f4 100644 |
2273 |
+--- a/include/linux/phy.h |
2274 |
++++ b/include/linux/phy.h |
2275 |
+@@ -1108,6 +1108,7 @@ int genphy_c45_an_disable_aneg(struct phy_device *phydev); |
2276 |
+ int genphy_c45_read_mdix(struct phy_device *phydev); |
2277 |
+ int genphy_c45_pma_read_abilities(struct phy_device *phydev); |
2278 |
+ int genphy_c45_read_status(struct phy_device *phydev); |
2279 |
++int genphy_c45_config_aneg(struct phy_device *phydev); |
2280 |
+ |
2281 |
+ /* The gen10g_* functions are the old Clause 45 stub */ |
2282 |
+ int gen10g_config_aneg(struct phy_device *phydev); |
2283 |
+diff --git a/include/net/act_api.h b/include/net/act_api.h |
2284 |
+index c61a1bf4e3de..3a1a72990fce 100644 |
2285 |
+--- a/include/net/act_api.h |
2286 |
++++ b/include/net/act_api.h |
2287 |
+@@ -15,6 +15,7 @@ |
2288 |
+ struct tcf_idrinfo { |
2289 |
+ struct mutex lock; |
2290 |
+ struct idr action_idr; |
2291 |
++ struct net *net; |
2292 |
+ }; |
2293 |
+ |
2294 |
+ struct tc_action_ops; |
2295 |
+@@ -108,7 +109,7 @@ struct tc_action_net { |
2296 |
+ }; |
2297 |
+ |
2298 |
+ static inline |
2299 |
+-int tc_action_net_init(struct tc_action_net *tn, |
2300 |
++int tc_action_net_init(struct net *net, struct tc_action_net *tn, |
2301 |
+ const struct tc_action_ops *ops) |
2302 |
+ { |
2303 |
+ int err = 0; |
2304 |
+@@ -117,6 +118,7 @@ int tc_action_net_init(struct tc_action_net *tn, |
2305 |
+ if (!tn->idrinfo) |
2306 |
+ return -ENOMEM; |
2307 |
+ tn->ops = ops; |
2308 |
++ tn->idrinfo->net = net; |
2309 |
+ mutex_init(&tn->idrinfo->lock); |
2310 |
+ idr_init(&tn->idrinfo->action_idr); |
2311 |
+ return err; |
2312 |
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h |
2313 |
+index 5b8624ae4a27..930d062940b7 100644 |
2314 |
+--- a/include/net/netfilter/nf_tables.h |
2315 |
++++ b/include/net/netfilter/nf_tables.h |
2316 |
+@@ -419,8 +419,7 @@ struct nft_set { |
2317 |
+ unsigned char *udata; |
2318 |
+ /* runtime data below here */ |
2319 |
+ const struct nft_set_ops *ops ____cacheline_aligned; |
2320 |
+- u16 flags:13, |
2321 |
+- bound:1, |
2322 |
++ u16 flags:14, |
2323 |
+ genmask:2; |
2324 |
+ u8 klen; |
2325 |
+ u8 dlen; |
2326 |
+@@ -1333,12 +1332,15 @@ struct nft_trans_rule { |
2327 |
+ struct nft_trans_set { |
2328 |
+ struct nft_set *set; |
2329 |
+ u32 set_id; |
2330 |
++ bool bound; |
2331 |
+ }; |
2332 |
+ |
2333 |
+ #define nft_trans_set(trans) \ |
2334 |
+ (((struct nft_trans_set *)trans->data)->set) |
2335 |
+ #define nft_trans_set_id(trans) \ |
2336 |
+ (((struct nft_trans_set *)trans->data)->set_id) |
2337 |
++#define nft_trans_set_bound(trans) \ |
2338 |
++ (((struct nft_trans_set *)trans->data)->bound) |
2339 |
+ |
2340 |
+ struct nft_trans_chain { |
2341 |
+ bool update; |
2342 |
+@@ -1369,12 +1371,15 @@ struct nft_trans_table { |
2343 |
+ struct nft_trans_elem { |
2344 |
+ struct nft_set *set; |
2345 |
+ struct nft_set_elem elem; |
2346 |
++ bool bound; |
2347 |
+ }; |
2348 |
+ |
2349 |
+ #define nft_trans_elem_set(trans) \ |
2350 |
+ (((struct nft_trans_elem *)trans->data)->set) |
2351 |
+ #define nft_trans_elem(trans) \ |
2352 |
+ (((struct nft_trans_elem *)trans->data)->elem) |
2353 |
++#define nft_trans_elem_set_bound(trans) \ |
2354 |
++ (((struct nft_trans_elem *)trans->data)->bound) |
2355 |
+ |
2356 |
+ struct nft_trans_obj { |
2357 |
+ struct nft_object *obj; |
2358 |
+diff --git a/include/net/psample.h b/include/net/psample.h |
2359 |
+index 37a4df2325b2..6b578ce69cd8 100644 |
2360 |
+--- a/include/net/psample.h |
2361 |
++++ b/include/net/psample.h |
2362 |
+@@ -11,6 +11,7 @@ struct psample_group { |
2363 |
+ u32 group_num; |
2364 |
+ u32 refcount; |
2365 |
+ u32 seq; |
2366 |
++ struct rcu_head rcu; |
2367 |
+ }; |
2368 |
+ |
2369 |
+ struct psample_group *psample_group_get(struct net *net, u32 group_num); |
2370 |
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
2371 |
+index 445337c107e0..2504c269e658 100644 |
2372 |
+--- a/kernel/kprobes.c |
2373 |
++++ b/kernel/kprobes.c |
2374 |
+@@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); |
2375 |
+ */ |
2376 |
+ static void do_optimize_kprobes(void) |
2377 |
+ { |
2378 |
++ lockdep_assert_held(&text_mutex); |
2379 |
+ /* |
2380 |
+ * The optimization/unoptimization refers online_cpus via |
2381 |
+ * stop_machine() and cpu-hotplug modifies online_cpus. |
2382 |
+@@ -487,9 +488,7 @@ static void do_optimize_kprobes(void) |
2383 |
+ list_empty(&optimizing_list)) |
2384 |
+ return; |
2385 |
+ |
2386 |
+- mutex_lock(&text_mutex); |
2387 |
+ arch_optimize_kprobes(&optimizing_list); |
2388 |
+- mutex_unlock(&text_mutex); |
2389 |
+ } |
2390 |
+ |
2391 |
+ /* |
2392 |
+@@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void) |
2393 |
+ { |
2394 |
+ struct optimized_kprobe *op, *tmp; |
2395 |
+ |
2396 |
++ lockdep_assert_held(&text_mutex); |
2397 |
+ /* See comment in do_optimize_kprobes() */ |
2398 |
+ lockdep_assert_cpus_held(); |
2399 |
+ |
2400 |
+@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void) |
2401 |
+ if (list_empty(&unoptimizing_list)) |
2402 |
+ return; |
2403 |
+ |
2404 |
+- mutex_lock(&text_mutex); |
2405 |
+ arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); |
2406 |
+ /* Loop free_list for disarming */ |
2407 |
+ list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
2408 |
+@@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void) |
2409 |
+ } else |
2410 |
+ list_del_init(&op->list); |
2411 |
+ } |
2412 |
+- mutex_unlock(&text_mutex); |
2413 |
+ } |
2414 |
+ |
2415 |
+ /* Reclaim all kprobes on the free_list */ |
2416 |
+@@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work) |
2417 |
+ { |
2418 |
+ mutex_lock(&kprobe_mutex); |
2419 |
+ cpus_read_lock(); |
2420 |
++ mutex_lock(&text_mutex); |
2421 |
+ /* Lock modules while optimizing kprobes */ |
2422 |
+ mutex_lock(&module_mutex); |
2423 |
+ |
2424 |
+@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work) |
2425 |
+ do_free_cleaned_kprobes(); |
2426 |
+ |
2427 |
+ mutex_unlock(&module_mutex); |
2428 |
++ mutex_unlock(&text_mutex); |
2429 |
+ cpus_read_unlock(); |
2430 |
+ mutex_unlock(&kprobe_mutex); |
2431 |
+ |
2432 |
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
2433 |
+index 4d5962232a55..42bc2986520d 100644 |
2434 |
+--- a/kernel/sched/core.c |
2435 |
++++ b/kernel/sched/core.c |
2436 |
+@@ -3469,7 +3469,7 @@ void __noreturn do_task_dead(void) |
2437 |
+ |
2438 |
+ static inline void sched_submit_work(struct task_struct *tsk) |
2439 |
+ { |
2440 |
+- if (!tsk->state || tsk_is_pi_blocked(tsk)) |
2441 |
++ if (!tsk->state) |
2442 |
+ return; |
2443 |
+ |
2444 |
+ /* |
2445 |
+@@ -3485,6 +3485,9 @@ static inline void sched_submit_work(struct task_struct *tsk) |
2446 |
+ preempt_enable_no_resched(); |
2447 |
+ } |
2448 |
+ |
2449 |
++ if (tsk_is_pi_blocked(tsk)) |
2450 |
++ return; |
2451 |
++ |
2452 |
+ /* |
2453 |
+ * If we are going to sleep and we have plugged IO queued, |
2454 |
+ * make sure to submit it to avoid deadlocks. |
2455 |
+diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c |
2456 |
+index ec54e236e345..50fe9dfb088b 100644 |
2457 |
+--- a/net/batman-adv/multicast.c |
2458 |
++++ b/net/batman-adv/multicast.c |
2459 |
+@@ -1653,7 +1653,7 @@ __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, |
2460 |
+ |
2461 |
+ while (bucket_tmp < hash->size) { |
2462 |
+ if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, |
2463 |
+- *bucket, &idx_tmp)) |
2464 |
++ bucket_tmp, &idx_tmp)) |
2465 |
+ break; |
2466 |
+ |
2467 |
+ bucket_tmp++; |
2468 |
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c |
2469 |
+index dd8b1a460d64..cb36d01ea0dd 100644 |
2470 |
+--- a/net/core/netpoll.c |
2471 |
++++ b/net/core/netpoll.c |
2472 |
+@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work) |
2473 |
+ txq = netdev_get_tx_queue(dev, q_index); |
2474 |
+ HARD_TX_LOCK(dev, txq, smp_processor_id()); |
2475 |
+ if (netif_xmit_frozen_or_stopped(txq) || |
2476 |
+- netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { |
2477 |
++ !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) { |
2478 |
+ skb_queue_head(&npinfo->txq, skb); |
2479 |
+ HARD_TX_UNLOCK(dev, txq); |
2480 |
+ local_irq_restore(flags); |
2481 |
+@@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
2482 |
+ |
2483 |
+ HARD_TX_UNLOCK(dev, txq); |
2484 |
+ |
2485 |
+- if (status == NETDEV_TX_OK) |
2486 |
++ if (dev_xmit_complete(status)) |
2487 |
+ break; |
2488 |
+ |
2489 |
+ } |
2490 |
+@@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
2491 |
+ |
2492 |
+ } |
2493 |
+ |
2494 |
+- if (status != NETDEV_TX_OK) { |
2495 |
++ if (!dev_xmit_complete(status)) { |
2496 |
+ skb_queue_tail(&npinfo->txq, skb); |
2497 |
+ schedule_delayed_work(&npinfo->tx_work,0); |
2498 |
+ } |
2499 |
+diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c |
2500 |
+index 65a35e976d7b..4d0312b97cce 100644 |
2501 |
+--- a/net/dsa/tag_8021q.c |
2502 |
++++ b/net/dsa/tag_8021q.c |
2503 |
+@@ -28,6 +28,7 @@ |
2504 |
+ * |
2505 |
+ * RSV - VID[9]: |
2506 |
+ * To be used for further expansion of SWITCH_ID or for other purposes. |
2507 |
++ * Must be transmitted as zero and ignored on receive. |
2508 |
+ * |
2509 |
+ * SWITCH_ID - VID[8:6]: |
2510 |
+ * Index of switch within DSA tree. Must be between 0 and |
2511 |
+@@ -35,6 +36,7 @@ |
2512 |
+ * |
2513 |
+ * RSV - VID[5:4]: |
2514 |
+ * To be used for further expansion of PORT or for other purposes. |
2515 |
++ * Must be transmitted as zero and ignored on receive. |
2516 |
+ * |
2517 |
+ * PORT - VID[3:0]: |
2518 |
+ * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1. |
2519 |
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
2520 |
+index b30f7f877181..b2f0d2988a8e 100644 |
2521 |
+--- a/net/ipv4/tcp.c |
2522 |
++++ b/net/ipv4/tcp.c |
2523 |
+@@ -935,6 +935,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) |
2524 |
+ return mss_now; |
2525 |
+ } |
2526 |
+ |
2527 |
++/* In some cases, both sendpage() and sendmsg() could have added |
2528 |
++ * an skb to the write queue, but failed adding payload on it. |
2529 |
++ * We need to remove it to consume less memory, but more |
2530 |
++ * importantly be able to generate EPOLLOUT for Edge Trigger epoll() |
2531 |
++ * users. |
2532 |
++ */ |
2533 |
++static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) |
2534 |
++{ |
2535 |
++ if (skb && !skb->len) { |
2536 |
++ tcp_unlink_write_queue(skb, sk); |
2537 |
++ if (tcp_write_queue_empty(sk)) |
2538 |
++ tcp_chrono_stop(sk, TCP_CHRONO_BUSY); |
2539 |
++ sk_wmem_free_skb(sk, skb); |
2540 |
++ } |
2541 |
++} |
2542 |
++ |
2543 |
+ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, |
2544 |
+ size_t size, int flags) |
2545 |
+ { |
2546 |
+@@ -1064,6 +1080,7 @@ out: |
2547 |
+ return copied; |
2548 |
+ |
2549 |
+ do_error: |
2550 |
++ tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk)); |
2551 |
+ if (copied) |
2552 |
+ goto out; |
2553 |
+ out_err: |
2554 |
+@@ -1388,18 +1405,11 @@ out_nopush: |
2555 |
+ sock_zerocopy_put(uarg); |
2556 |
+ return copied + copied_syn; |
2557 |
+ |
2558 |
++do_error: |
2559 |
++ skb = tcp_write_queue_tail(sk); |
2560 |
+ do_fault: |
2561 |
+- if (!skb->len) { |
2562 |
+- tcp_unlink_write_queue(skb, sk); |
2563 |
+- /* It is the one place in all of TCP, except connection |
2564 |
+- * reset, where we can be unlinking the send_head. |
2565 |
+- */ |
2566 |
+- if (tcp_write_queue_empty(sk)) |
2567 |
+- tcp_chrono_stop(sk, TCP_CHRONO_BUSY); |
2568 |
+- sk_wmem_free_skb(sk, skb); |
2569 |
+- } |
2570 |
++ tcp_remove_empty_skb(sk, skb); |
2571 |
+ |
2572 |
+-do_error: |
2573 |
+ if (copied + copied_syn) |
2574 |
+ goto out; |
2575 |
+ out_err: |
2576 |
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
2577 |
+index 359d298348c7..37c2f1204c1a 100644 |
2578 |
+--- a/net/ipv4/tcp_output.c |
2579 |
++++ b/net/ipv4/tcp_output.c |
2580 |
+@@ -2051,7 +2051,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) |
2581 |
+ if (len <= skb->len) |
2582 |
+ break; |
2583 |
+ |
2584 |
+- if (unlikely(TCP_SKB_CB(skb)->eor)) |
2585 |
++ if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb)) |
2586 |
+ return false; |
2587 |
+ |
2588 |
+ len -= skb->len; |
2589 |
+@@ -2168,6 +2168,7 @@ static int tcp_mtu_probe(struct sock *sk) |
2590 |
+ * we need to propagate it to the new skb. |
2591 |
+ */ |
2592 |
+ TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; |
2593 |
++ tcp_skb_collapse_tstamp(nskb, skb); |
2594 |
+ tcp_unlink_write_queue(skb, sk); |
2595 |
+ sk_wmem_free_skb(sk, skb); |
2596 |
+ } else { |
2597 |
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c |
2598 |
+index 7f3f13c37916..eaa4c2cc2fbb 100644 |
2599 |
+--- a/net/ipv6/mcast.c |
2600 |
++++ b/net/ipv6/mcast.c |
2601 |
+@@ -787,14 +787,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) |
2602 |
+ if (pmc) { |
2603 |
+ im->idev = pmc->idev; |
2604 |
+ if (im->mca_sfmode == MCAST_INCLUDE) { |
2605 |
+- im->mca_tomb = pmc->mca_tomb; |
2606 |
+- im->mca_sources = pmc->mca_sources; |
2607 |
++ swap(im->mca_tomb, pmc->mca_tomb); |
2608 |
++ swap(im->mca_sources, pmc->mca_sources); |
2609 |
+ for (psf = im->mca_sources; psf; psf = psf->sf_next) |
2610 |
+ psf->sf_crcount = idev->mc_qrv; |
2611 |
+ } else { |
2612 |
+ im->mca_crcount = idev->mc_qrv; |
2613 |
+ } |
2614 |
+ in6_dev_put(pmc->idev); |
2615 |
++ ip6_mc_clear_src(pmc); |
2616 |
+ kfree(pmc); |
2617 |
+ } |
2618 |
+ spin_unlock_bh(&im->mca_lock); |
2619 |
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c |
2620 |
+index 948b4ebbe3fb..49248fe5847a 100644 |
2621 |
+--- a/net/netfilter/nf_flow_table_core.c |
2622 |
++++ b/net/netfilter/nf_flow_table_core.c |
2623 |
+@@ -112,15 +112,16 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp) |
2624 |
+ #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) |
2625 |
+ #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) |
2626 |
+ |
2627 |
+-static void flow_offload_fixup_ct_state(struct nf_conn *ct) |
2628 |
++static inline __s32 nf_flow_timeout_delta(unsigned int timeout) |
2629 |
++{ |
2630 |
++ return (__s32)(timeout - (u32)jiffies); |
2631 |
++} |
2632 |
++ |
2633 |
++static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) |
2634 |
+ { |
2635 |
+ const struct nf_conntrack_l4proto *l4proto; |
2636 |
++ int l4num = nf_ct_protonum(ct); |
2637 |
+ unsigned int timeout; |
2638 |
+- int l4num; |
2639 |
+- |
2640 |
+- l4num = nf_ct_protonum(ct); |
2641 |
+- if (l4num == IPPROTO_TCP) |
2642 |
+- flow_offload_fixup_tcp(&ct->proto.tcp); |
2643 |
+ |
2644 |
+ l4proto = nf_ct_l4proto_find(l4num); |
2645 |
+ if (!l4proto) |
2646 |
+@@ -133,7 +134,20 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct) |
2647 |
+ else |
2648 |
+ return; |
2649 |
+ |
2650 |
+- ct->timeout = nfct_time_stamp + timeout; |
2651 |
++ if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout) |
2652 |
++ ct->timeout = nfct_time_stamp + timeout; |
2653 |
++} |
2654 |
++ |
2655 |
++static void flow_offload_fixup_ct_state(struct nf_conn *ct) |
2656 |
++{ |
2657 |
++ if (nf_ct_protonum(ct) == IPPROTO_TCP) |
2658 |
++ flow_offload_fixup_tcp(&ct->proto.tcp); |
2659 |
++} |
2660 |
++ |
2661 |
++static void flow_offload_fixup_ct(struct nf_conn *ct) |
2662 |
++{ |
2663 |
++ flow_offload_fixup_ct_state(ct); |
2664 |
++ flow_offload_fixup_ct_timeout(ct); |
2665 |
+ } |
2666 |
+ |
2667 |
+ void flow_offload_free(struct flow_offload *flow) |
2668 |
+@@ -209,6 +223,11 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) |
2669 |
+ } |
2670 |
+ EXPORT_SYMBOL_GPL(flow_offload_add); |
2671 |
+ |
2672 |
++static inline bool nf_flow_has_expired(const struct flow_offload *flow) |
2673 |
++{ |
2674 |
++ return nf_flow_timeout_delta(flow->timeout) <= 0; |
2675 |
++} |
2676 |
++ |
2677 |
+ static void flow_offload_del(struct nf_flowtable *flow_table, |
2678 |
+ struct flow_offload *flow) |
2679 |
+ { |
2680 |
+@@ -224,6 +243,11 @@ static void flow_offload_del(struct nf_flowtable *flow_table, |
2681 |
+ e = container_of(flow, struct flow_offload_entry, flow); |
2682 |
+ clear_bit(IPS_OFFLOAD_BIT, &e->ct->status); |
2683 |
+ |
2684 |
++ if (nf_flow_has_expired(flow)) |
2685 |
++ flow_offload_fixup_ct(e->ct); |
2686 |
++ else if (flow->flags & FLOW_OFFLOAD_TEARDOWN) |
2687 |
++ flow_offload_fixup_ct_timeout(e->ct); |
2688 |
++ |
2689 |
+ flow_offload_free(flow); |
2690 |
+ } |
2691 |
+ |
2692 |
+@@ -299,11 +323,6 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table, |
2693 |
+ return err; |
2694 |
+ } |
2695 |
+ |
2696 |
+-static inline bool nf_flow_has_expired(const struct flow_offload *flow) |
2697 |
+-{ |
2698 |
+- return (__s32)(flow->timeout - (u32)jiffies) <= 0; |
2699 |
+-} |
2700 |
+- |
2701 |
+ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) |
2702 |
+ { |
2703 |
+ struct nf_flowtable *flow_table = data; |
2704 |
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c |
2705 |
+index cdfc33517e85..d68c801dd614 100644 |
2706 |
+--- a/net/netfilter/nf_flow_table_ip.c |
2707 |
++++ b/net/netfilter/nf_flow_table_ip.c |
2708 |
+@@ -214,6 +214,25 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) |
2709 |
+ return true; |
2710 |
+ } |
2711 |
+ |
2712 |
++static int nf_flow_offload_dst_check(struct dst_entry *dst) |
2713 |
++{ |
2714 |
++ if (unlikely(dst_xfrm(dst))) |
2715 |
++ return dst_check(dst, 0) ? 0 : -1; |
2716 |
++ |
2717 |
++ return 0; |
2718 |
++} |
2719 |
++ |
2720 |
++static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, |
2721 |
++ const struct nf_hook_state *state, |
2722 |
++ struct dst_entry *dst) |
2723 |
++{ |
2724 |
++ skb_orphan(skb); |
2725 |
++ skb_dst_set_noref(skb, dst); |
2726 |
++ skb->tstamp = 0; |
2727 |
++ dst_output(state->net, state->sk, skb); |
2728 |
++ return NF_STOLEN; |
2729 |
++} |
2730 |
++ |
2731 |
+ unsigned int |
2732 |
+ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, |
2733 |
+ const struct nf_hook_state *state) |
2734 |
+@@ -254,6 +273,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, |
2735 |
+ if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff)) |
2736 |
+ return NF_ACCEPT; |
2737 |
+ |
2738 |
++ if (nf_flow_offload_dst_check(&rt->dst)) { |
2739 |
++ flow_offload_teardown(flow); |
2740 |
++ return NF_ACCEPT; |
2741 |
++ } |
2742 |
++ |
2743 |
+ if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) |
2744 |
+ return NF_DROP; |
2745 |
+ |
2746 |
+@@ -261,6 +285,13 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, |
2747 |
+ iph = ip_hdr(skb); |
2748 |
+ ip_decrease_ttl(iph); |
2749 |
+ |
2750 |
++ if (unlikely(dst_xfrm(&rt->dst))) { |
2751 |
++ memset(skb->cb, 0, sizeof(struct inet_skb_parm)); |
2752 |
++ IPCB(skb)->iif = skb->dev->ifindex; |
2753 |
++ IPCB(skb)->flags = IPSKB_FORWARDED; |
2754 |
++ return nf_flow_xmit_xfrm(skb, state, &rt->dst); |
2755 |
++ } |
2756 |
++ |
2757 |
+ skb->dev = outdev; |
2758 |
+ nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); |
2759 |
+ skb_dst_set_noref(skb, &rt->dst); |
2760 |
+@@ -467,6 +498,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, |
2761 |
+ sizeof(*ip6h))) |
2762 |
+ return NF_ACCEPT; |
2763 |
+ |
2764 |
++ if (nf_flow_offload_dst_check(&rt->dst)) { |
2765 |
++ flow_offload_teardown(flow); |
2766 |
++ return NF_ACCEPT; |
2767 |
++ } |
2768 |
++ |
2769 |
+ if (skb_try_make_writable(skb, sizeof(*ip6h))) |
2770 |
+ return NF_DROP; |
2771 |
+ |
2772 |
+@@ -477,6 +513,13 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, |
2773 |
+ ip6h = ipv6_hdr(skb); |
2774 |
+ ip6h->hop_limit--; |
2775 |
+ |
2776 |
++ if (unlikely(dst_xfrm(&rt->dst))) { |
2777 |
++ memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); |
2778 |
++ IP6CB(skb)->iif = skb->dev->ifindex; |
2779 |
++ IP6CB(skb)->flags = IP6SKB_FORWARDED; |
2780 |
++ return nf_flow_xmit_xfrm(skb, state, &rt->dst); |
2781 |
++ } |
2782 |
++ |
2783 |
+ skb->dev = outdev; |
2784 |
+ nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); |
2785 |
+ skb_dst_set_noref(skb, &rt->dst); |
2786 |
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
2787 |
+index bcf17fb46d96..8e4cdae2c4f1 100644 |
2788 |
+--- a/net/netfilter/nf_tables_api.c |
2789 |
++++ b/net/netfilter/nf_tables_api.c |
2790 |
+@@ -136,9 +136,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set) |
2791 |
+ return; |
2792 |
+ |
2793 |
+ list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { |
2794 |
+- if (trans->msg_type == NFT_MSG_NEWSET && |
2795 |
+- nft_trans_set(trans) == set) { |
2796 |
+- set->bound = true; |
2797 |
++ switch (trans->msg_type) { |
2798 |
++ case NFT_MSG_NEWSET: |
2799 |
++ if (nft_trans_set(trans) == set) |
2800 |
++ nft_trans_set_bound(trans) = true; |
2801 |
++ break; |
2802 |
++ case NFT_MSG_NEWSETELEM: |
2803 |
++ if (nft_trans_elem_set(trans) == set) |
2804 |
++ nft_trans_elem_set_bound(trans) = true; |
2805 |
+ break; |
2806 |
+ } |
2807 |
+ } |
2808 |
+@@ -6849,7 +6854,7 @@ static int __nf_tables_abort(struct net *net) |
2809 |
+ break; |
2810 |
+ case NFT_MSG_NEWSET: |
2811 |
+ trans->ctx.table->use--; |
2812 |
+- if (nft_trans_set(trans)->bound) { |
2813 |
++ if (nft_trans_set_bound(trans)) { |
2814 |
+ nft_trans_destroy(trans); |
2815 |
+ break; |
2816 |
+ } |
2817 |
+@@ -6861,7 +6866,7 @@ static int __nf_tables_abort(struct net *net) |
2818 |
+ nft_trans_destroy(trans); |
2819 |
+ break; |
2820 |
+ case NFT_MSG_NEWSETELEM: |
2821 |
+- if (nft_trans_elem_set(trans)->bound) { |
2822 |
++ if (nft_trans_elem_set_bound(trans)) { |
2823 |
+ nft_trans_destroy(trans); |
2824 |
+ break; |
2825 |
+ } |
2826 |
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c |
2827 |
+index aa5f571d4361..060a4ed46d5e 100644 |
2828 |
+--- a/net/netfilter/nft_flow_offload.c |
2829 |
++++ b/net/netfilter/nft_flow_offload.c |
2830 |
+@@ -72,11 +72,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, |
2831 |
+ { |
2832 |
+ struct nft_flow_offload *priv = nft_expr_priv(expr); |
2833 |
+ struct nf_flowtable *flowtable = &priv->flowtable->data; |
2834 |
++ struct tcphdr _tcph, *tcph = NULL; |
2835 |
+ enum ip_conntrack_info ctinfo; |
2836 |
+ struct nf_flow_route route; |
2837 |
+ struct flow_offload *flow; |
2838 |
+ enum ip_conntrack_dir dir; |
2839 |
+- bool is_tcp = false; |
2840 |
+ struct nf_conn *ct; |
2841 |
+ int ret; |
2842 |
+ |
2843 |
+@@ -89,7 +89,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, |
2844 |
+ |
2845 |
+ switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) { |
2846 |
+ case IPPROTO_TCP: |
2847 |
+- is_tcp = true; |
2848 |
++ tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, |
2849 |
++ sizeof(_tcph), &_tcph); |
2850 |
++ if (unlikely(!tcph || tcph->fin || tcph->rst)) |
2851 |
++ goto out; |
2852 |
+ break; |
2853 |
+ case IPPROTO_UDP: |
2854 |
+ break; |
2855 |
+@@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, |
2856 |
+ if (!flow) |
2857 |
+ goto err_flow_alloc; |
2858 |
+ |
2859 |
+- if (is_tcp) { |
2860 |
++ if (tcph) { |
2861 |
+ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; |
2862 |
+ ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; |
2863 |
+ } |
2864 |
+diff --git a/net/psample/psample.c b/net/psample/psample.c |
2865 |
+index 841f198ea1a8..66e4b61a350d 100644 |
2866 |
+--- a/net/psample/psample.c |
2867 |
++++ b/net/psample/psample.c |
2868 |
+@@ -154,7 +154,7 @@ static void psample_group_destroy(struct psample_group *group) |
2869 |
+ { |
2870 |
+ psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP); |
2871 |
+ list_del(&group->list); |
2872 |
+- kfree(group); |
2873 |
++ kfree_rcu(group, rcu); |
2874 |
+ } |
2875 |
+ |
2876 |
+ static struct psample_group * |
2877 |
+diff --git a/net/rds/recv.c b/net/rds/recv.c |
2878 |
+index 853de4876088..a42ba7fa06d5 100644 |
2879 |
+--- a/net/rds/recv.c |
2880 |
++++ b/net/rds/recv.c |
2881 |
+@@ -1,5 +1,5 @@ |
2882 |
+ /* |
2883 |
+- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. |
2884 |
++ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. |
2885 |
+ * |
2886 |
+ * This software is available to you under a choice of one of two |
2887 |
+ * licenses. You may choose to be licensed under the terms of the GNU |
2888 |
+@@ -811,6 +811,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc, |
2889 |
+ |
2890 |
+ minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); |
2891 |
+ minfo6.len = be32_to_cpu(inc->i_hdr.h_len); |
2892 |
++ minfo6.tos = inc->i_conn->c_tos; |
2893 |
+ |
2894 |
+ if (flip) { |
2895 |
+ minfo6.laddr = *daddr; |
2896 |
+@@ -824,6 +825,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc, |
2897 |
+ minfo6.fport = inc->i_hdr.h_dport; |
2898 |
+ } |
2899 |
+ |
2900 |
++ minfo6.flags = 0; |
2901 |
++ |
2902 |
+ rds_info_copy(iter, &minfo6, sizeof(minfo6)); |
2903 |
+ } |
2904 |
+ #endif |
2905 |
+diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c |
2906 |
+index fd1f7e799e23..04b7bd4ec751 100644 |
2907 |
+--- a/net/sched/act_bpf.c |
2908 |
++++ b/net/sched/act_bpf.c |
2909 |
+@@ -422,7 +422,7 @@ static __net_init int bpf_init_net(struct net *net) |
2910 |
+ { |
2911 |
+ struct tc_action_net *tn = net_generic(net, bpf_net_id); |
2912 |
+ |
2913 |
+- return tc_action_net_init(tn, &act_bpf_ops); |
2914 |
++ return tc_action_net_init(net, tn, &act_bpf_ops); |
2915 |
+ } |
2916 |
+ |
2917 |
+ static void __net_exit bpf_exit_net(struct list_head *net_list) |
2918 |
+diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c |
2919 |
+index 32ac04d77a45..2b43cacf82af 100644 |
2920 |
+--- a/net/sched/act_connmark.c |
2921 |
++++ b/net/sched/act_connmark.c |
2922 |
+@@ -231,7 +231,7 @@ static __net_init int connmark_init_net(struct net *net) |
2923 |
+ { |
2924 |
+ struct tc_action_net *tn = net_generic(net, connmark_net_id); |
2925 |
+ |
2926 |
+- return tc_action_net_init(tn, &act_connmark_ops); |
2927 |
++ return tc_action_net_init(net, tn, &act_connmark_ops); |
2928 |
+ } |
2929 |
+ |
2930 |
+ static void __net_exit connmark_exit_net(struct list_head *net_list) |
2931 |
+diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c |
2932 |
+index 9b9288267a54..d3cfad88dc3a 100644 |
2933 |
+--- a/net/sched/act_csum.c |
2934 |
++++ b/net/sched/act_csum.c |
2935 |
+@@ -714,7 +714,7 @@ static __net_init int csum_init_net(struct net *net) |
2936 |
+ { |
2937 |
+ struct tc_action_net *tn = net_generic(net, csum_net_id); |
2938 |
+ |
2939 |
+- return tc_action_net_init(tn, &act_csum_ops); |
2940 |
++ return tc_action_net_init(net, tn, &act_csum_ops); |
2941 |
+ } |
2942 |
+ |
2943 |
+ static void __net_exit csum_exit_net(struct list_head *net_list) |
2944 |
+diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c |
2945 |
+index 8f0140c6ca58..324f1d1f6d47 100644 |
2946 |
+--- a/net/sched/act_gact.c |
2947 |
++++ b/net/sched/act_gact.c |
2948 |
+@@ -278,7 +278,7 @@ static __net_init int gact_init_net(struct net *net) |
2949 |
+ { |
2950 |
+ struct tc_action_net *tn = net_generic(net, gact_net_id); |
2951 |
+ |
2952 |
+- return tc_action_net_init(tn, &act_gact_ops); |
2953 |
++ return tc_action_net_init(net, tn, &act_gact_ops); |
2954 |
+ } |
2955 |
+ |
2956 |
+ static void __net_exit gact_exit_net(struct list_head *net_list) |
2957 |
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c |
2958 |
+index 92ee853d43e6..3a31e241c647 100644 |
2959 |
+--- a/net/sched/act_ife.c |
2960 |
++++ b/net/sched/act_ife.c |
2961 |
+@@ -890,7 +890,7 @@ static __net_init int ife_init_net(struct net *net) |
2962 |
+ { |
2963 |
+ struct tc_action_net *tn = net_generic(net, ife_net_id); |
2964 |
+ |
2965 |
+- return tc_action_net_init(tn, &act_ife_ops); |
2966 |
++ return tc_action_net_init(net, tn, &act_ife_ops); |
2967 |
+ } |
2968 |
+ |
2969 |
+ static void __net_exit ife_exit_net(struct list_head *net_list) |
2970 |
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c |
2971 |
+index ce2c30a591d2..214a03d405cf 100644 |
2972 |
+--- a/net/sched/act_ipt.c |
2973 |
++++ b/net/sched/act_ipt.c |
2974 |
+@@ -61,12 +61,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t, |
2975 |
+ return 0; |
2976 |
+ } |
2977 |
+ |
2978 |
+-static void ipt_destroy_target(struct xt_entry_target *t) |
2979 |
++static void ipt_destroy_target(struct xt_entry_target *t, struct net *net) |
2980 |
+ { |
2981 |
+ struct xt_tgdtor_param par = { |
2982 |
+ .target = t->u.kernel.target, |
2983 |
+ .targinfo = t->data, |
2984 |
+ .family = NFPROTO_IPV4, |
2985 |
++ .net = net, |
2986 |
+ }; |
2987 |
+ if (par.target->destroy != NULL) |
2988 |
+ par.target->destroy(&par); |
2989 |
+@@ -78,7 +79,7 @@ static void tcf_ipt_release(struct tc_action *a) |
2990 |
+ struct tcf_ipt *ipt = to_ipt(a); |
2991 |
+ |
2992 |
+ if (ipt->tcfi_t) { |
2993 |
+- ipt_destroy_target(ipt->tcfi_t); |
2994 |
++ ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net); |
2995 |
+ kfree(ipt->tcfi_t); |
2996 |
+ } |
2997 |
+ kfree(ipt->tcfi_tname); |
2998 |
+@@ -180,7 +181,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, |
2999 |
+ |
3000 |
+ spin_lock_bh(&ipt->tcf_lock); |
3001 |
+ if (ret != ACT_P_CREATED) { |
3002 |
+- ipt_destroy_target(ipt->tcfi_t); |
3003 |
++ ipt_destroy_target(ipt->tcfi_t, net); |
3004 |
+ kfree(ipt->tcfi_tname); |
3005 |
+ kfree(ipt->tcfi_t); |
3006 |
+ } |
3007 |
+@@ -350,7 +351,7 @@ static __net_init int ipt_init_net(struct net *net) |
3008 |
+ { |
3009 |
+ struct tc_action_net *tn = net_generic(net, ipt_net_id); |
3010 |
+ |
3011 |
+- return tc_action_net_init(tn, &act_ipt_ops); |
3012 |
++ return tc_action_net_init(net, tn, &act_ipt_ops); |
3013 |
+ } |
3014 |
+ |
3015 |
+ static void __net_exit ipt_exit_net(struct list_head *net_list) |
3016 |
+@@ -399,7 +400,7 @@ static __net_init int xt_init_net(struct net *net) |
3017 |
+ { |
3018 |
+ struct tc_action_net *tn = net_generic(net, xt_net_id); |
3019 |
+ |
3020 |
+- return tc_action_net_init(tn, &act_xt_ops); |
3021 |
++ return tc_action_net_init(net, tn, &act_xt_ops); |
3022 |
+ } |
3023 |
+ |
3024 |
+ static void __net_exit xt_exit_net(struct list_head *net_list) |
3025 |
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c |
3026 |
+index d10dca7a13e1..bd3178a95cb9 100644 |
3027 |
+--- a/net/sched/act_mirred.c |
3028 |
++++ b/net/sched/act_mirred.c |
3029 |
+@@ -432,7 +432,7 @@ static __net_init int mirred_init_net(struct net *net) |
3030 |
+ { |
3031 |
+ struct tc_action_net *tn = net_generic(net, mirred_net_id); |
3032 |
+ |
3033 |
+- return tc_action_net_init(tn, &act_mirred_ops); |
3034 |
++ return tc_action_net_init(net, tn, &act_mirred_ops); |
3035 |
+ } |
3036 |
+ |
3037 |
+ static void __net_exit mirred_exit_net(struct list_head *net_list) |
3038 |
+diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c |
3039 |
+index 7b858c11b1b5..ea4c5359e7df 100644 |
3040 |
+--- a/net/sched/act_nat.c |
3041 |
++++ b/net/sched/act_nat.c |
3042 |
+@@ -327,7 +327,7 @@ static __net_init int nat_init_net(struct net *net) |
3043 |
+ { |
3044 |
+ struct tc_action_net *tn = net_generic(net, nat_net_id); |
3045 |
+ |
3046 |
+- return tc_action_net_init(tn, &act_nat_ops); |
3047 |
++ return tc_action_net_init(net, tn, &act_nat_ops); |
3048 |
+ } |
3049 |
+ |
3050 |
+ static void __net_exit nat_exit_net(struct list_head *net_list) |
3051 |
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c |
3052 |
+index 17360c6faeaa..cdfaa79382a2 100644 |
3053 |
+--- a/net/sched/act_pedit.c |
3054 |
++++ b/net/sched/act_pedit.c |
3055 |
+@@ -498,7 +498,7 @@ static __net_init int pedit_init_net(struct net *net) |
3056 |
+ { |
3057 |
+ struct tc_action_net *tn = net_generic(net, pedit_net_id); |
3058 |
+ |
3059 |
+- return tc_action_net_init(tn, &act_pedit_ops); |
3060 |
++ return tc_action_net_init(net, tn, &act_pedit_ops); |
3061 |
+ } |
3062 |
+ |
3063 |
+ static void __net_exit pedit_exit_net(struct list_head *net_list) |
3064 |
+diff --git a/net/sched/act_police.c b/net/sched/act_police.c |
3065 |
+index 49cec3e64a4d..6315e0f8d26e 100644 |
3066 |
+--- a/net/sched/act_police.c |
3067 |
++++ b/net/sched/act_police.c |
3068 |
+@@ -371,7 +371,7 @@ static __net_init int police_init_net(struct net *net) |
3069 |
+ { |
3070 |
+ struct tc_action_net *tn = net_generic(net, police_net_id); |
3071 |
+ |
3072 |
+- return tc_action_net_init(tn, &act_police_ops); |
3073 |
++ return tc_action_net_init(net, tn, &act_police_ops); |
3074 |
+ } |
3075 |
+ |
3076 |
+ static void __net_exit police_exit_net(struct list_head *net_list) |
3077 |
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c |
3078 |
+index 595308d60133..10229124a992 100644 |
3079 |
+--- a/net/sched/act_sample.c |
3080 |
++++ b/net/sched/act_sample.c |
3081 |
+@@ -102,13 +102,17 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, |
3082 |
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
3083 |
+ s->rate = rate; |
3084 |
+ s->psample_group_num = psample_group_num; |
3085 |
+- RCU_INIT_POINTER(s->psample_group, psample_group); |
3086 |
++ rcu_swap_protected(s->psample_group, psample_group, |
3087 |
++ lockdep_is_held(&s->tcf_lock)); |
3088 |
+ |
3089 |
+ if (tb[TCA_SAMPLE_TRUNC_SIZE]) { |
3090 |
+ s->truncate = true; |
3091 |
+ s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); |
3092 |
+ } |
3093 |
+ spin_unlock_bh(&s->tcf_lock); |
3094 |
++ |
3095 |
++ if (psample_group) |
3096 |
++ psample_group_put(psample_group); |
3097 |
+ if (goto_ch) |
3098 |
+ tcf_chain_put_by_act(goto_ch); |
3099 |
+ |
3100 |
+@@ -265,7 +269,7 @@ static __net_init int sample_init_net(struct net *net) |
3101 |
+ { |
3102 |
+ struct tc_action_net *tn = net_generic(net, sample_net_id); |
3103 |
+ |
3104 |
+- return tc_action_net_init(tn, &act_sample_ops); |
3105 |
++ return tc_action_net_init(net, tn, &act_sample_ops); |
3106 |
+ } |
3107 |
+ |
3108 |
+ static void __net_exit sample_exit_net(struct list_head *net_list) |
3109 |
+diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c |
3110 |
+index 33aefa25b545..6120e56117ca 100644 |
3111 |
+--- a/net/sched/act_simple.c |
3112 |
++++ b/net/sched/act_simple.c |
3113 |
+@@ -232,7 +232,7 @@ static __net_init int simp_init_net(struct net *net) |
3114 |
+ { |
3115 |
+ struct tc_action_net *tn = net_generic(net, simp_net_id); |
3116 |
+ |
3117 |
+- return tc_action_net_init(tn, &act_simp_ops); |
3118 |
++ return tc_action_net_init(net, tn, &act_simp_ops); |
3119 |
+ } |
3120 |
+ |
3121 |
+ static void __net_exit simp_exit_net(struct list_head *net_list) |
3122 |
+diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c |
3123 |
+index 37dced00b63d..6a8d3337c577 100644 |
3124 |
+--- a/net/sched/act_skbedit.c |
3125 |
++++ b/net/sched/act_skbedit.c |
3126 |
+@@ -336,7 +336,7 @@ static __net_init int skbedit_init_net(struct net *net) |
3127 |
+ { |
3128 |
+ struct tc_action_net *tn = net_generic(net, skbedit_net_id); |
3129 |
+ |
3130 |
+- return tc_action_net_init(tn, &act_skbedit_ops); |
3131 |
++ return tc_action_net_init(net, tn, &act_skbedit_ops); |
3132 |
+ } |
3133 |
+ |
3134 |
+ static void __net_exit skbedit_exit_net(struct list_head *net_list) |
3135 |
+diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c |
3136 |
+index 7da3518e18ef..888437f97ba6 100644 |
3137 |
+--- a/net/sched/act_skbmod.c |
3138 |
++++ b/net/sched/act_skbmod.c |
3139 |
+@@ -287,7 +287,7 @@ static __net_init int skbmod_init_net(struct net *net) |
3140 |
+ { |
3141 |
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id); |
3142 |
+ |
3143 |
+- return tc_action_net_init(tn, &act_skbmod_ops); |
3144 |
++ return tc_action_net_init(net, tn, &act_skbmod_ops); |
3145 |
+ } |
3146 |
+ |
3147 |
+ static void __net_exit skbmod_exit_net(struct list_head *net_list) |
3148 |
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c |
3149 |
+index 6d0debdc9b97..2f83a79f76aa 100644 |
3150 |
+--- a/net/sched/act_tunnel_key.c |
3151 |
++++ b/net/sched/act_tunnel_key.c |
3152 |
+@@ -600,7 +600,7 @@ static __net_init int tunnel_key_init_net(struct net *net) |
3153 |
+ { |
3154 |
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); |
3155 |
+ |
3156 |
+- return tc_action_net_init(tn, &act_tunnel_key_ops); |
3157 |
++ return tc_action_net_init(net, tn, &act_tunnel_key_ops); |
3158 |
+ } |
3159 |
+ |
3160 |
+ static void __net_exit tunnel_key_exit_net(struct list_head *net_list) |
3161 |
+diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c |
3162 |
+index a3c9eea1ee8a..287a30bf8930 100644 |
3163 |
+--- a/net/sched/act_vlan.c |
3164 |
++++ b/net/sched/act_vlan.c |
3165 |
+@@ -334,7 +334,7 @@ static __net_init int vlan_init_net(struct net *net) |
3166 |
+ { |
3167 |
+ struct tc_action_net *tn = net_generic(net, vlan_net_id); |
3168 |
+ |
3169 |
+- return tc_action_net_init(tn, &act_vlan_ops); |
3170 |
++ return tc_action_net_init(net, tn, &act_vlan_ops); |
3171 |
+ } |
3172 |
+ |
3173 |
+ static void __net_exit vlan_exit_net(struct list_head *net_list) |
3174 |
+diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c |
3175 |
+index 732e109c3055..810645b5c086 100644 |
3176 |
+--- a/net/sched/sch_cbs.c |
3177 |
++++ b/net/sched/sch_cbs.c |
3178 |
+@@ -181,11 +181,6 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) |
3179 |
+ s64 credits; |
3180 |
+ int len; |
3181 |
+ |
3182 |
+- if (atomic64_read(&q->port_rate) == -1) { |
3183 |
+- WARN_ONCE(1, "cbs: dequeue() called with unknown port rate."); |
3184 |
+- return NULL; |
3185 |
+- } |
3186 |
+- |
3187 |
+ if (q->credits < 0) { |
3188 |
+ credits = timediff_to_credits(now - q->last, q->idleslope); |
3189 |
+ |
3190 |
+@@ -303,11 +298,19 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q, |
3191 |
+ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) |
3192 |
+ { |
3193 |
+ struct ethtool_link_ksettings ecmd; |
3194 |
++ int speed = SPEED_10; |
3195 |
+ int port_rate = -1; |
3196 |
++ int err; |
3197 |
++ |
3198 |
++ err = __ethtool_get_link_ksettings(dev, &ecmd); |
3199 |
++ if (err < 0) |
3200 |
++ goto skip; |
3201 |
++ |
3202 |
++ if (ecmd.base.speed != SPEED_UNKNOWN) |
3203 |
++ speed = ecmd.base.speed; |
3204 |
+ |
3205 |
+- if (!__ethtool_get_link_ksettings(dev, &ecmd) && |
3206 |
+- ecmd.base.speed != SPEED_UNKNOWN) |
3207 |
+- port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT; |
3208 |
++skip: |
3209 |
++ port_rate = speed * 1000 * BYTES_PER_KBIT; |
3210 |
+ |
3211 |
+ atomic64_set(&q->port_rate, port_rate); |
3212 |
+ netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n", |
3213 |
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c |
3214 |
+index 11c03cf4aa74..137db1cbde85 100644 |
3215 |
+--- a/net/sched/sch_generic.c |
3216 |
++++ b/net/sched/sch_generic.c |
3217 |
+@@ -624,8 +624,12 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, |
3218 |
+ |
3219 |
+ err = skb_array_produce(q, skb); |
3220 |
+ |
3221 |
+- if (unlikely(err)) |
3222 |
+- return qdisc_drop_cpu(skb, qdisc, to_free); |
3223 |
++ if (unlikely(err)) { |
3224 |
++ if (qdisc_is_percpu_stats(qdisc)) |
3225 |
++ return qdisc_drop_cpu(skb, qdisc, to_free); |
3226 |
++ else |
3227 |
++ return qdisc_drop(skb, qdisc, to_free); |
3228 |
++ } |
3229 |
+ |
3230 |
+ qdisc_update_stats_at_enqueue(qdisc, pkt_len); |
3231 |
+ return NET_XMIT_SUCCESS; |
3232 |
+@@ -688,11 +692,14 @@ static void pfifo_fast_reset(struct Qdisc *qdisc) |
3233 |
+ kfree_skb(skb); |
3234 |
+ } |
3235 |
+ |
3236 |
+- for_each_possible_cpu(i) { |
3237 |
+- struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); |
3238 |
++ if (qdisc_is_percpu_stats(qdisc)) { |
3239 |
++ for_each_possible_cpu(i) { |
3240 |
++ struct gnet_stats_queue *q; |
3241 |
+ |
3242 |
+- q->backlog = 0; |
3243 |
+- q->qlen = 0; |
3244 |
++ q = per_cpu_ptr(qdisc->cpu_qstats, i); |
3245 |
++ q->backlog = 0; |
3246 |
++ q->qlen = 0; |
3247 |
++ } |
3248 |
+ } |
3249 |
+ } |
3250 |
+ |
3251 |
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c |
3252 |
+index 8be89aa52b6e..11c2873ec68b 100644 |
3253 |
+--- a/net/sched/sch_taprio.c |
3254 |
++++ b/net/sched/sch_taprio.c |
3255 |
+@@ -205,11 +205,6 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch) |
3256 |
+ u32 gate_mask; |
3257 |
+ int i; |
3258 |
+ |
3259 |
+- if (atomic64_read(&q->picos_per_byte) == -1) { |
3260 |
+- WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte."); |
3261 |
+- return NULL; |
3262 |
+- } |
3263 |
+- |
3264 |
+ rcu_read_lock(); |
3265 |
+ entry = rcu_dereference(q->current_entry); |
3266 |
+ /* if there's no entry, it means that the schedule didn't |
3267 |
+@@ -665,12 +660,20 @@ static void taprio_set_picos_per_byte(struct net_device *dev, |
3268 |
+ struct taprio_sched *q) |
3269 |
+ { |
3270 |
+ struct ethtool_link_ksettings ecmd; |
3271 |
+- int picos_per_byte = -1; |
3272 |
++ int speed = SPEED_10; |
3273 |
++ int picos_per_byte; |
3274 |
++ int err; |
3275 |
+ |
3276 |
+- if (!__ethtool_get_link_ksettings(dev, &ecmd) && |
3277 |
+- ecmd.base.speed != SPEED_UNKNOWN) |
3278 |
+- picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, |
3279 |
+- ecmd.base.speed * 1000 * 1000); |
3280 |
++ err = __ethtool_get_link_ksettings(dev, &ecmd); |
3281 |
++ if (err < 0) |
3282 |
++ goto skip; |
3283 |
++ |
3284 |
++ if (ecmd.base.speed != SPEED_UNKNOWN) |
3285 |
++ speed = ecmd.base.speed; |
3286 |
++ |
3287 |
++skip: |
3288 |
++ picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, |
3289 |
++ speed * 1000 * 1000); |
3290 |
+ |
3291 |
+ atomic64_set(&q->picos_per_byte, picos_per_byte); |
3292 |
+ netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", |
3293 |
+@@ -903,6 +906,10 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, |
3294 |
+ */ |
3295 |
+ q->clockid = -1; |
3296 |
+ |
3297 |
++ spin_lock(&taprio_list_lock); |
3298 |
++ list_add(&q->taprio_list, &taprio_list); |
3299 |
++ spin_unlock(&taprio_list_lock); |
3300 |
++ |
3301 |
+ if (sch->parent != TC_H_ROOT) |
3302 |
+ return -EOPNOTSUPP; |
3303 |
+ |
3304 |
+@@ -920,10 +927,6 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, |
3305 |
+ if (!opt) |
3306 |
+ return -EINVAL; |
3307 |
+ |
3308 |
+- spin_lock(&taprio_list_lock); |
3309 |
+- list_add(&q->taprio_list, &taprio_list); |
3310 |
+- spin_unlock(&taprio_list_lock); |
3311 |
+- |
3312 |
+ for (i = 0; i < dev->num_tx_queues; i++) { |
3313 |
+ struct netdev_queue *dev_queue; |
3314 |
+ struct Qdisc *qdisc; |
3315 |
+diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c |
3316 |
+index f7261fad45c1..647d8a4044fb 100644 |
3317 |
+--- a/tools/bpf/bpftool/common.c |
3318 |
++++ b/tools/bpf/bpftool/common.c |
3319 |
+@@ -236,7 +236,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)) |
3320 |
+ |
3321 |
+ fd = get_fd_by_id(id); |
3322 |
+ if (fd < 0) { |
3323 |
+- p_err("can't get prog by id (%u): %s", id, strerror(errno)); |
3324 |
++ p_err("can't open object by id (%u): %s", id, strerror(errno)); |
3325 |
+ return -1; |
3326 |
+ } |
3327 |
+ |
3328 |
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c |
3329 |
+index 0ce50c319cfd..ef8a82f29f02 100644 |
3330 |
+--- a/tools/hv/hv_kvp_daemon.c |
3331 |
++++ b/tools/hv/hv_kvp_daemon.c |
3332 |
+@@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op, |
3333 |
+ int sn_offset = 0; |
3334 |
+ int error = 0; |
3335 |
+ char *buffer; |
3336 |
+- struct hv_kvp_ipaddr_value *ip_buffer; |
3337 |
++ struct hv_kvp_ipaddr_value *ip_buffer = NULL; |
3338 |
+ char cidr_mask[5]; /* /xyz */ |
3339 |
+ int weight; |
3340 |
+ int i; |
3341 |
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c |
3342 |
+index 77e14d995479..0ccf6aa533ae 100644 |
3343 |
+--- a/tools/lib/bpf/libbpf.c |
3344 |
++++ b/tools/lib/bpf/libbpf.c |
3345 |
+@@ -178,7 +178,6 @@ struct bpf_program { |
3346 |
+ bpf_program_clear_priv_t clear_priv; |
3347 |
+ |
3348 |
+ enum bpf_attach_type expected_attach_type; |
3349 |
+- int btf_fd; |
3350 |
+ void *func_info; |
3351 |
+ __u32 func_info_rec_size; |
3352 |
+ __u32 func_info_cnt; |
3353 |
+@@ -305,7 +304,6 @@ void bpf_program__unload(struct bpf_program *prog) |
3354 |
+ prog->instances.nr = -1; |
3355 |
+ zfree(&prog->instances.fds); |
3356 |
+ |
3357 |
+- zclose(prog->btf_fd); |
3358 |
+ zfree(&prog->func_info); |
3359 |
+ zfree(&prog->line_info); |
3360 |
+ } |
3361 |
+@@ -382,7 +380,6 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx, |
3362 |
+ prog->instances.fds = NULL; |
3363 |
+ prog->instances.nr = -1; |
3364 |
+ prog->type = BPF_PROG_TYPE_UNSPEC; |
3365 |
+- prog->btf_fd = -1; |
3366 |
+ |
3367 |
+ return 0; |
3368 |
+ errout: |
3369 |
+@@ -1888,9 +1885,6 @@ bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj, |
3370 |
+ prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); |
3371 |
+ } |
3372 |
+ |
3373 |
+- if (!insn_offset) |
3374 |
+- prog->btf_fd = btf__fd(obj->btf); |
3375 |
+- |
3376 |
+ return 0; |
3377 |
+ } |
3378 |
+ |
3379 |
+@@ -2065,7 +2059,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, |
3380 |
+ char *cp, errmsg[STRERR_BUFSIZE]; |
3381 |
+ int log_buf_size = BPF_LOG_BUF_SIZE; |
3382 |
+ char *log_buf; |
3383 |
+- int ret; |
3384 |
++ int btf_fd, ret; |
3385 |
+ |
3386 |
+ memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); |
3387 |
+ load_attr.prog_type = prog->type; |
3388 |
+@@ -2077,7 +2071,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, |
3389 |
+ load_attr.license = license; |
3390 |
+ load_attr.kern_version = kern_version; |
3391 |
+ load_attr.prog_ifindex = prog->prog_ifindex; |
3392 |
+- load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; |
3393 |
++ /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ |
3394 |
++ if (prog->obj->btf_ext) |
3395 |
++ btf_fd = bpf_object__btf_fd(prog->obj); |
3396 |
++ else |
3397 |
++ btf_fd = -1; |
3398 |
++ load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0; |
3399 |
+ load_attr.func_info = prog->func_info; |
3400 |
+ load_attr.func_info_rec_size = prog->func_info_rec_size; |
3401 |
+ load_attr.func_info_cnt = prog->func_info_cnt; |
3402 |
+diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h |
3403 |
+index 4059014d93ea..4912d23844bc 100644 |
3404 |
+--- a/tools/testing/selftests/kvm/include/evmcs.h |
3405 |
++++ b/tools/testing/selftests/kvm/include/evmcs.h |
3406 |
+@@ -220,6 +220,8 @@ struct hv_enlightened_vmcs { |
3407 |
+ struct hv_enlightened_vmcs *current_evmcs; |
3408 |
+ struct hv_vp_assist_page *current_vp_assist; |
3409 |
+ |
3410 |
++int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id); |
3411 |
++ |
3412 |
+ static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) |
3413 |
+ { |
3414 |
+ u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | |
3415 |
+diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c |
3416 |
+index d2ad85fb01ac..5f1ba3da2dbd 100644 |
3417 |
+--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c |
3418 |
++++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c |
3419 |
+@@ -1059,9 +1059,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) |
3420 |
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i", |
3421 |
+ r); |
3422 |
+ |
3423 |
+- r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); |
3424 |
+- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", |
3425 |
+- r); |
3426 |
++ if (kvm_check_cap(KVM_CAP_XCRS)) { |
3427 |
++ r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); |
3428 |
++ TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", |
3429 |
++ r); |
3430 |
++ } |
3431 |
+ |
3432 |
+ r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs); |
3433 |
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i", |
3434 |
+@@ -1102,9 +1104,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s |
3435 |
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", |
3436 |
+ r); |
3437 |
+ |
3438 |
+- r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); |
3439 |
+- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", |
3440 |
+- r); |
3441 |
++ if (kvm_check_cap(KVM_CAP_XCRS)) { |
3442 |
++ r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); |
3443 |
++ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", |
3444 |
++ r); |
3445 |
++ } |
3446 |
+ |
3447 |
+ r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); |
3448 |
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", |
3449 |
+diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c |
3450 |
+index fe56d159d65f..52b6491ed706 100644 |
3451 |
+--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c |
3452 |
++++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c |
3453 |
+@@ -14,6 +14,26 @@ |
3454 |
+ |
3455 |
+ bool enable_evmcs; |
3456 |
+ |
3457 |
++int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) |
3458 |
++{ |
3459 |
++ uint16_t evmcs_ver; |
3460 |
++ |
3461 |
++ struct kvm_enable_cap enable_evmcs_cap = { |
3462 |
++ .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS, |
3463 |
++ .args[0] = (unsigned long)&evmcs_ver |
3464 |
++ }; |
3465 |
++ |
3466 |
++ vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap); |
3467 |
++ |
3468 |
++ /* KVM should return supported EVMCS version range */ |
3469 |
++ TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && |
3470 |
++ (evmcs_ver & 0xff) > 0, |
3471 |
++ "Incorrect EVMCS version range: %x:%x\n", |
3472 |
++ evmcs_ver & 0xff, evmcs_ver >> 8); |
3473 |
++ |
3474 |
++ return evmcs_ver; |
3475 |
++} |
3476 |
++ |
3477 |
+ /* Allocate memory regions for nested VMX tests. |
3478 |
+ * |
3479 |
+ * Input Args: |
3480 |
+diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c |
3481 |
+index 241919ef1eac..9f250c39c9bb 100644 |
3482 |
+--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c |
3483 |
++++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c |
3484 |
+@@ -79,11 +79,6 @@ int main(int argc, char *argv[]) |
3485 |
+ struct kvm_x86_state *state; |
3486 |
+ struct ucall uc; |
3487 |
+ int stage; |
3488 |
+- uint16_t evmcs_ver; |
3489 |
+- struct kvm_enable_cap enable_evmcs_cap = { |
3490 |
+- .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS, |
3491 |
+- .args[0] = (unsigned long)&evmcs_ver |
3492 |
+- }; |
3493 |
+ |
3494 |
+ /* Create VM */ |
3495 |
+ vm = vm_create_default(VCPU_ID, 0, guest_code); |
3496 |
+@@ -96,13 +91,7 @@ int main(int argc, char *argv[]) |
3497 |
+ exit(KSFT_SKIP); |
3498 |
+ } |
3499 |
+ |
3500 |
+- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); |
3501 |
+- |
3502 |
+- /* KVM should return supported EVMCS version range */ |
3503 |
+- TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && |
3504 |
+- (evmcs_ver & 0xff) > 0, |
3505 |
+- "Incorrect EVMCS version range: %x:%x\n", |
3506 |
+- evmcs_ver & 0xff, evmcs_ver >> 8); |
3507 |
++ vcpu_enable_evmcs(vm, VCPU_ID); |
3508 |
+ |
3509 |
+ run = vcpu_state(vm, VCPU_ID); |
3510 |
+ |
3511 |
+@@ -146,7 +135,7 @@ int main(int argc, char *argv[]) |
3512 |
+ kvm_vm_restart(vm, O_RDWR); |
3513 |
+ vm_vcpu_add(vm, VCPU_ID, 0, 0); |
3514 |
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); |
3515 |
+- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); |
3516 |
++ vcpu_enable_evmcs(vm, VCPU_ID); |
3517 |
+ vcpu_load_state(vm, VCPU_ID, state); |
3518 |
+ run = vcpu_state(vm, VCPU_ID); |
3519 |
+ free(state); |
3520 |
+diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c |
3521 |
+index f72b3043db0e..ee59831fbc98 100644 |
3522 |
+--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c |
3523 |
++++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c |
3524 |
+@@ -18,6 +18,7 @@ |
3525 |
+ #include "test_util.h" |
3526 |
+ #include "kvm_util.h" |
3527 |
+ #include "processor.h" |
3528 |
++#include "vmx.h" |
3529 |
+ |
3530 |
+ #define VCPU_ID 0 |
3531 |
+ |
3532 |
+@@ -106,12 +107,7 @@ int main(int argc, char *argv[]) |
3533 |
+ { |
3534 |
+ struct kvm_vm *vm; |
3535 |
+ int rv; |
3536 |
+- uint16_t evmcs_ver; |
3537 |
+ struct kvm_cpuid2 *hv_cpuid_entries; |
3538 |
+- struct kvm_enable_cap enable_evmcs_cap = { |
3539 |
+- .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS, |
3540 |
+- .args[0] = (unsigned long)&evmcs_ver |
3541 |
+- }; |
3542 |
+ |
3543 |
+ /* Tell stdout not to buffer its content */ |
3544 |
+ setbuf(stdout, NULL); |
3545 |
+@@ -136,14 +132,14 @@ int main(int argc, char *argv[]) |
3546 |
+ |
3547 |
+ free(hv_cpuid_entries); |
3548 |
+ |
3549 |
+- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); |
3550 |
+- |
3551 |
+- if (rv) { |
3552 |
++ if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { |
3553 |
+ fprintf(stderr, |
3554 |
+ "Enlightened VMCS is unsupported, skip related test\n"); |
3555 |
+ goto vm_free; |
3556 |
+ } |
3557 |
+ |
3558 |
++ vcpu_enable_evmcs(vm, VCPU_ID); |
3559 |
++ |
3560 |
+ hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); |
3561 |
+ if (!hv_cpuid_entries) |
3562 |
+ return 1; |
3563 |
+diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c |
3564 |
+index 40050e44ec0a..f9334bd3cce9 100644 |
3565 |
+--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c |
3566 |
++++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c |
3567 |
+@@ -99,8 +99,8 @@ int main(int argc, char *argv[]) |
3568 |
+ msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); |
3569 |
+ vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, |
3570 |
+ msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); |
3571 |
+- test_msr_platform_info_disabled(vm); |
3572 |
+ test_msr_platform_info_enabled(vm); |
3573 |
++ test_msr_platform_info_disabled(vm); |
3574 |
+ vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); |
3575 |
+ |
3576 |
+ kvm_vm_free(vm); |
3577 |
+diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c |
3578 |
+index ed7218d166da..853e370e8a39 100644 |
3579 |
+--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c |
3580 |
++++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c |
3581 |
+@@ -25,24 +25,17 @@ |
3582 |
+ #define VMCS12_REVISION 0x11e57ed0 |
3583 |
+ #define VCPU_ID 5 |
3584 |
+ |
3585 |
++bool have_evmcs; |
3586 |
++ |
3587 |
+ void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) |
3588 |
+ { |
3589 |
+- volatile struct kvm_run *run; |
3590 |
+- |
3591 |
+ vcpu_nested_state_set(vm, VCPU_ID, state, false); |
3592 |
+- run = vcpu_state(vm, VCPU_ID); |
3593 |
+- vcpu_run(vm, VCPU_ID); |
3594 |
+- TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, |
3595 |
+- "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n", |
3596 |
+- run->exit_reason, |
3597 |
+- exit_reason_str(run->exit_reason)); |
3598 |
+ } |
3599 |
+ |
3600 |
+ void test_nested_state_expect_errno(struct kvm_vm *vm, |
3601 |
+ struct kvm_nested_state *state, |
3602 |
+ int expected_errno) |
3603 |
+ { |
3604 |
+- volatile struct kvm_run *run; |
3605 |
+ int rv; |
3606 |
+ |
3607 |
+ rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); |
3608 |
+@@ -50,12 +43,6 @@ void test_nested_state_expect_errno(struct kvm_vm *vm, |
3609 |
+ "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", |
3610 |
+ strerror(expected_errno), expected_errno, rv, strerror(errno), |
3611 |
+ errno); |
3612 |
+- run = vcpu_state(vm, VCPU_ID); |
3613 |
+- vcpu_run(vm, VCPU_ID); |
3614 |
+- TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, |
3615 |
+- "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n", |
3616 |
+- run->exit_reason, |
3617 |
+- exit_reason_str(run->exit_reason)); |
3618 |
+ } |
3619 |
+ |
3620 |
+ void test_nested_state_expect_einval(struct kvm_vm *vm, |
3621 |
+@@ -90,8 +77,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size) |
3622 |
+ { |
3623 |
+ memset(state, 0, size); |
3624 |
+ state->flags = KVM_STATE_NESTED_GUEST_MODE | |
3625 |
+- KVM_STATE_NESTED_RUN_PENDING | |
3626 |
+- KVM_STATE_NESTED_EVMCS; |
3627 |
++ KVM_STATE_NESTED_RUN_PENDING; |
3628 |
++ if (have_evmcs) |
3629 |
++ state->flags |= KVM_STATE_NESTED_EVMCS; |
3630 |
+ state->format = 0; |
3631 |
+ state->size = size; |
3632 |
+ state->hdr.vmx.vmxon_pa = 0x1000; |
3633 |
+@@ -141,13 +129,19 @@ void test_vmx_nested_state(struct kvm_vm *vm) |
3634 |
+ /* |
3635 |
+ * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without |
3636 |
+ * setting the nested state but flags other than eVMCS must be clear. |
3637 |
++ * The eVMCS flag can be set if the enlightened VMCS capability has |
3638 |
++ * been enabled. |
3639 |
+ */ |
3640 |
+ set_default_vmx_state(state, state_sz); |
3641 |
+ state->hdr.vmx.vmxon_pa = -1ull; |
3642 |
+ state->hdr.vmx.vmcs12_pa = -1ull; |
3643 |
+ test_nested_state_expect_einval(vm, state); |
3644 |
+ |
3645 |
+- state->flags = KVM_STATE_NESTED_EVMCS; |
3646 |
++ state->flags &= KVM_STATE_NESTED_EVMCS; |
3647 |
++ if (have_evmcs) { |
3648 |
++ test_nested_state_expect_einval(vm, state); |
3649 |
++ vcpu_enable_evmcs(vm, VCPU_ID); |
3650 |
++ } |
3651 |
+ test_nested_state(vm, state); |
3652 |
+ |
3653 |
+ /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ |
3654 |
+@@ -232,6 +226,8 @@ int main(int argc, char *argv[]) |
3655 |
+ struct kvm_nested_state state; |
3656 |
+ struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); |
3657 |
+ |
3658 |
++ have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS); |
3659 |
++ |
3660 |
+ if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) { |
3661 |
+ printf("KVM_CAP_NESTED_STATE not available, skipping test\n"); |
3662 |
+ exit(KSFT_SKIP); |
3663 |
+diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c |
3664 |
+index a8a6a0c883f1..6af5c91337f2 100644 |
3665 |
+--- a/virt/kvm/arm/mmio.c |
3666 |
++++ b/virt/kvm/arm/mmio.c |
3667 |
+@@ -86,6 +86,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) |
3668 |
+ unsigned int len; |
3669 |
+ int mask; |
3670 |
+ |
3671 |
++ /* Detect an already handled MMIO return */ |
3672 |
++ if (unlikely(!vcpu->mmio_needed)) |
3673 |
++ return 0; |
3674 |
++ |
3675 |
++ vcpu->mmio_needed = 0; |
3676 |
++ |
3677 |
+ if (!run->mmio.is_write) { |
3678 |
+ len = run->mmio.len; |
3679 |
+ if (len > sizeof(unsigned long)) |
3680 |
+@@ -188,6 +194,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, |
3681 |
+ run->mmio.is_write = is_write; |
3682 |
+ run->mmio.phys_addr = fault_ipa; |
3683 |
+ run->mmio.len = len; |
3684 |
++ vcpu->mmio_needed = 1; |
3685 |
+ |
3686 |
+ if (!ret) { |
3687 |
+ /* We handled the access successfully in the kernel. */ |
3688 |
+diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c |
3689 |
+index bdbc297d06fb..e621b5d45b27 100644 |
3690 |
+--- a/virt/kvm/arm/vgic/vgic-init.c |
3691 |
++++ b/virt/kvm/arm/vgic/vgic-init.c |
3692 |
+@@ -8,6 +8,7 @@ |
3693 |
+ #include <linux/cpu.h> |
3694 |
+ #include <linux/kvm_host.h> |
3695 |
+ #include <kvm/arm_vgic.h> |
3696 |
++#include <asm/kvm_emulate.h> |
3697 |
+ #include <asm/kvm_mmu.h> |
3698 |
+ #include "vgic.h" |
3699 |
+ |
3700 |
+@@ -164,12 +165,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) |
3701 |
+ irq->vcpu = NULL; |
3702 |
+ irq->target_vcpu = vcpu0; |
3703 |
+ kref_init(&irq->refcount); |
3704 |
+- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { |
3705 |
++ switch (dist->vgic_model) { |
3706 |
++ case KVM_DEV_TYPE_ARM_VGIC_V2: |
3707 |
+ irq->targets = 0; |
3708 |
+ irq->group = 0; |
3709 |
+- } else { |
3710 |
++ break; |
3711 |
++ case KVM_DEV_TYPE_ARM_VGIC_V3: |
3712 |
+ irq->mpidr = 0; |
3713 |
+ irq->group = 1; |
3714 |
++ break; |
3715 |
++ default: |
3716 |
++ kfree(dist->spis); |
3717 |
++ return -EINVAL; |
3718 |
+ } |
3719 |
+ } |
3720 |
+ return 0; |
3721 |
+@@ -209,7 +216,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) |
3722 |
+ irq->intid = i; |
3723 |
+ irq->vcpu = NULL; |
3724 |
+ irq->target_vcpu = vcpu; |
3725 |
+- irq->targets = 1U << vcpu->vcpu_id; |
3726 |
+ kref_init(&irq->refcount); |
3727 |
+ if (vgic_irq_is_sgi(i)) { |
3728 |
+ /* SGIs */ |
3729 |
+@@ -219,11 +225,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) |
3730 |
+ /* PPIs */ |
3731 |
+ irq->config = VGIC_CONFIG_LEVEL; |
3732 |
+ } |
3733 |
+- |
3734 |
+- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) |
3735 |
+- irq->group = 1; |
3736 |
+- else |
3737 |
+- irq->group = 0; |
3738 |
+ } |
3739 |
+ |
3740 |
+ if (!irqchip_in_kernel(vcpu->kvm)) |
3741 |
+@@ -286,10 +287,19 @@ int vgic_init(struct kvm *kvm) |
3742 |
+ |
3743 |
+ for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { |
3744 |
+ struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; |
3745 |
+- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) |
3746 |
++ switch (dist->vgic_model) { |
3747 |
++ case KVM_DEV_TYPE_ARM_VGIC_V3: |
3748 |
+ irq->group = 1; |
3749 |
+- else |
3750 |
++ irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu); |
3751 |
++ break; |
3752 |
++ case KVM_DEV_TYPE_ARM_VGIC_V2: |
3753 |
+ irq->group = 0; |
3754 |
++ irq->targets = 1U << idx; |
3755 |
++ break; |
3756 |
++ default: |
3757 |
++ ret = -EINVAL; |
3758 |
++ goto out; |
3759 |
++ } |
3760 |
+ } |
3761 |
+ } |
3762 |
+ |