1 |
commit: e3d4af682ac7e412f6f7e870493e0cbb2b17a0d2 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Fri Jul 29 16:39:00 2022 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Fri Jul 29 16:39:00 2022 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e3d4af68 |
7 |
|
8 |
Linux patch 5.18.15 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1014_linux-5.18.15.patch | 9072 ++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 9076 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 32e6ba50..7c448f23 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -99,6 +99,10 @@ Patch: 1013_linux-5.18.14.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.18.14 |
23 |
|
24 |
+Patch: 1014_linux-5.18.15.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.18.15 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1014_linux-5.18.15.patch b/1014_linux-5.18.15.patch |
33 |
new file mode 100644 |
34 |
index 00000000..01c926b9 |
35 |
--- /dev/null |
36 |
+++ b/1014_linux-5.18.15.patch |
37 |
@@ -0,0 +1,9072 @@ |
38 |
+diff --git a/Makefile b/Makefile |
39 |
+index d3723b2f6d6ca..5957afa296922 100644 |
40 |
+--- a/Makefile |
41 |
++++ b/Makefile |
42 |
+@@ -1,7 +1,7 @@ |
43 |
+ # SPDX-License-Identifier: GPL-2.0 |
44 |
+ VERSION = 5 |
45 |
+ PATCHLEVEL = 18 |
46 |
+-SUBLEVEL = 14 |
47 |
++SUBLEVEL = 15 |
48 |
+ EXTRAVERSION = |
49 |
+ NAME = Superb Owl |
50 |
+ |
51 |
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile |
52 |
+index c6ca1b9cbf712..8e236a0221564 100644 |
53 |
+--- a/arch/riscv/Makefile |
54 |
++++ b/arch/riscv/Makefile |
55 |
+@@ -73,6 +73,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y) |
56 |
+ endif |
57 |
+ |
58 |
+ KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) |
59 |
++KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) |
60 |
+ |
61 |
+ # GCC versions that support the "-mstrict-align" option default to allowing |
62 |
+ # unaligned accesses. While unaligned accesses are explicitly allowed in the |
63 |
+diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c |
64 |
+index fe1742c4ca498..1f156098a5bf5 100644 |
65 |
+--- a/arch/x86/events/intel/lbr.c |
66 |
++++ b/arch/x86/events/intel/lbr.c |
67 |
+@@ -278,9 +278,9 @@ enum { |
68 |
+ }; |
69 |
+ |
70 |
+ /* |
71 |
+- * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in |
72 |
+- * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when |
73 |
+- * TSX is not supported they have no consistent behavior: |
74 |
++ * For format LBR_FORMAT_EIP_FLAGS2, bits 61:62 in MSR_LAST_BRANCH_FROM_x |
75 |
++ * are the TSX flags when TSX is supported, but when TSX is not supported |
76 |
++ * they have no consistent behavior: |
77 |
+ * |
78 |
+ * - For wrmsr(), bits 61:62 are considered part of the sign extension. |
79 |
+ * - For HW updates (branch captures) bits 61:62 are always OFF and are not |
80 |
+@@ -288,7 +288,7 @@ enum { |
81 |
+ * |
82 |
+ * Therefore, if: |
83 |
+ * |
84 |
+- * 1) LBR has TSX format |
85 |
++ * 1) LBR format LBR_FORMAT_EIP_FLAGS2 |
86 |
+ * 2) CPU has no TSX support enabled |
87 |
+ * |
88 |
+ * ... then any value passed to wrmsr() must be sign extended to 63 bits and any |
89 |
+@@ -300,7 +300,7 @@ static inline bool lbr_from_signext_quirk_needed(void) |
90 |
+ bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) || |
91 |
+ boot_cpu_has(X86_FEATURE_RTM); |
92 |
+ |
93 |
+- return !tsx_support && x86_pmu.lbr_has_tsx; |
94 |
++ return !tsx_support; |
95 |
+ } |
96 |
+ |
97 |
+ static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key); |
98 |
+@@ -1611,9 +1611,6 @@ void intel_pmu_lbr_init_hsw(void) |
99 |
+ x86_pmu.lbr_sel_map = hsw_lbr_sel_map; |
100 |
+ |
101 |
+ x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0); |
102 |
+- |
103 |
+- if (lbr_from_signext_quirk_needed()) |
104 |
+- static_branch_enable(&lbr_from_quirk_key); |
105 |
+ } |
106 |
+ |
107 |
+ /* skylake */ |
108 |
+@@ -1704,7 +1701,11 @@ void intel_pmu_lbr_init(void) |
109 |
+ switch (x86_pmu.intel_cap.lbr_format) { |
110 |
+ case LBR_FORMAT_EIP_FLAGS2: |
111 |
+ x86_pmu.lbr_has_tsx = 1; |
112 |
+- fallthrough; |
113 |
++ x86_pmu.lbr_from_flags = 1; |
114 |
++ if (lbr_from_signext_quirk_needed()) |
115 |
++ static_branch_enable(&lbr_from_quirk_key); |
116 |
++ break; |
117 |
++ |
118 |
+ case LBR_FORMAT_EIP_FLAGS: |
119 |
+ x86_pmu.lbr_from_flags = 1; |
120 |
+ break; |
121 |
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
122 |
+index 5d09ded0c491f..49889f171e860 100644 |
123 |
+--- a/arch/x86/include/asm/cpufeatures.h |
124 |
++++ b/arch/x86/include/asm/cpufeatures.h |
125 |
+@@ -301,6 +301,7 @@ |
126 |
+ #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ |
127 |
+ #define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ |
128 |
+ #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ |
129 |
++#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */ |
130 |
+ |
131 |
+ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ |
132 |
+ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ |
133 |
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
134 |
+index 10a3bfc1eb230..38a3e86e665ef 100644 |
135 |
+--- a/arch/x86/include/asm/nospec-branch.h |
136 |
++++ b/arch/x86/include/asm/nospec-branch.h |
137 |
+@@ -297,6 +297,8 @@ do { \ |
138 |
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, \ |
139 |
+ spec_ctrl_current() | SPEC_CTRL_IBRS, \ |
140 |
+ X86_FEATURE_USE_IBRS_FW); \ |
141 |
++ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \ |
142 |
++ X86_FEATURE_USE_IBPB_FW); \ |
143 |
+ } while (0) |
144 |
+ |
145 |
+ #define firmware_restrict_branch_speculation_end() \ |
146 |
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c |
147 |
+index 46427b785bc89..d440f6726df07 100644 |
148 |
+--- a/arch/x86/kernel/alternative.c |
149 |
++++ b/arch/x86/kernel/alternative.c |
150 |
+@@ -555,7 +555,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) |
151 |
+ dest = addr + insn.length + insn.immediate.value; |
152 |
+ |
153 |
+ if (__static_call_fixup(addr, op, dest) || |
154 |
+- WARN_ON_ONCE(dest != &__x86_return_thunk)) |
155 |
++ WARN_ONCE(dest != &__x86_return_thunk, |
156 |
++ "missing return thunk: %pS-%pS: %*ph", |
157 |
++ addr, dest, 5, addr)) |
158 |
+ continue; |
159 |
+ |
160 |
+ DPRINTK("return thunk at: %pS (%px) len: %d to: %pS", |
161 |
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
162 |
+index 0b64e894b3838..8179fa4d5004f 100644 |
163 |
+--- a/arch/x86/kernel/cpu/bugs.c |
164 |
++++ b/arch/x86/kernel/cpu/bugs.c |
165 |
+@@ -968,6 +968,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; } |
166 |
+ #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" |
167 |
+ #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" |
168 |
+ #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" |
169 |
++#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" |
170 |
+ |
171 |
+ #ifdef CONFIG_BPF_SYSCALL |
172 |
+ void unpriv_ebpf_notify(int new_state) |
173 |
+@@ -1408,6 +1409,8 @@ static void __init spectre_v2_select_mitigation(void) |
174 |
+ |
175 |
+ case SPECTRE_V2_IBRS: |
176 |
+ setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); |
177 |
++ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) |
178 |
++ pr_warn(SPECTRE_V2_IBRS_PERF_MSG); |
179 |
+ break; |
180 |
+ |
181 |
+ case SPECTRE_V2_LFENCE: |
182 |
+@@ -1509,7 +1512,16 @@ static void __init spectre_v2_select_mitigation(void) |
183 |
+ * the CPU supports Enhanced IBRS, kernel might un-intentionally not |
184 |
+ * enable IBRS around firmware calls. |
185 |
+ */ |
186 |
+- if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { |
187 |
++ if (boot_cpu_has_bug(X86_BUG_RETBLEED) && |
188 |
++ (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || |
189 |
++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { |
190 |
++ |
191 |
++ if (retbleed_cmd != RETBLEED_CMD_IBPB) { |
192 |
++ setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); |
193 |
++ pr_info("Enabling Speculation Barrier for firmware calls\n"); |
194 |
++ } |
195 |
++ |
196 |
++ } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { |
197 |
+ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); |
198 |
+ pr_info("Enabling Restricted Speculation for firmware calls\n"); |
199 |
+ } |
200 |
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c |
201 |
+index 57ca7aa0e169a..b8e26b6b55236 100644 |
202 |
+--- a/drivers/acpi/cppc_acpi.c |
203 |
++++ b/drivers/acpi/cppc_acpi.c |
204 |
+@@ -764,7 +764,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) |
205 |
+ |
206 |
+ if (!osc_cpc_flexible_adr_space_confirmed) { |
207 |
+ pr_debug("Flexible address space capability not supported\n"); |
208 |
+- goto out_free; |
209 |
++ if (!cpc_supported_by_cpu()) |
210 |
++ goto out_free; |
211 |
+ } |
212 |
+ |
213 |
+ addr = ioremap(gas_t->address, gas_t->bit_width/8); |
214 |
+@@ -791,7 +792,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) |
215 |
+ } |
216 |
+ if (!osc_cpc_flexible_adr_space_confirmed) { |
217 |
+ pr_debug("Flexible address space capability not supported\n"); |
218 |
+- goto out_free; |
219 |
++ if (!cpc_supported_by_cpu()) |
220 |
++ goto out_free; |
221 |
+ } |
222 |
+ } else { |
223 |
+ if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { |
224 |
+diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c |
225 |
+index 541ced27d9412..de1e934a4f7ec 100644 |
226 |
+--- a/drivers/bus/mhi/host/pci_generic.c |
227 |
++++ b/drivers/bus/mhi/host/pci_generic.c |
228 |
+@@ -446,14 +446,93 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { |
229 |
+ .sideband_wake = false, |
230 |
+ }; |
231 |
+ |
232 |
++static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = { |
233 |
++ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), |
234 |
++ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), |
235 |
++ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0), |
236 |
++ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0), |
237 |
++ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1), |
238 |
++ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2), |
239 |
++}; |
240 |
++ |
241 |
++static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = { |
242 |
++ MHI_EVENT_CONFIG_CTRL(0, 128), |
243 |
++ MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100), |
244 |
++ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) |
245 |
++}; |
246 |
++ |
247 |
++static struct mhi_controller_config modem_telit_fn980_hw_v1_config = { |
248 |
++ .max_channels = 128, |
249 |
++ .timeout_ms = 20000, |
250 |
++ .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels), |
251 |
++ .ch_cfg = mhi_telit_fn980_hw_v1_channels, |
252 |
++ .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events), |
253 |
++ .event_cfg = mhi_telit_fn980_hw_v1_events, |
254 |
++}; |
255 |
++ |
256 |
++static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = { |
257 |
++ .name = "telit-fn980-hwv1", |
258 |
++ .fw = "qcom/sdx55m/sbl1.mbn", |
259 |
++ .edl = "qcom/sdx55m/edl.mbn", |
260 |
++ .config = &modem_telit_fn980_hw_v1_config, |
261 |
++ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, |
262 |
++ .dma_data_width = 32, |
263 |
++ .mru_default = 32768, |
264 |
++ .sideband_wake = false, |
265 |
++}; |
266 |
++ |
267 |
++static const struct mhi_channel_config mhi_telit_fn990_channels[] = { |
268 |
++ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), |
269 |
++ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), |
270 |
++ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1), |
271 |
++ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1), |
272 |
++ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), |
273 |
++ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), |
274 |
++ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), |
275 |
++ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), |
276 |
++ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), |
277 |
++ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), |
278 |
++}; |
279 |
++ |
280 |
++static struct mhi_event_config mhi_telit_fn990_events[] = { |
281 |
++ MHI_EVENT_CONFIG_CTRL(0, 128), |
282 |
++ MHI_EVENT_CONFIG_DATA(1, 128), |
283 |
++ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), |
284 |
++ MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) |
285 |
++}; |
286 |
++ |
287 |
++static const struct mhi_controller_config modem_telit_fn990_config = { |
288 |
++ .max_channels = 128, |
289 |
++ .timeout_ms = 20000, |
290 |
++ .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels), |
291 |
++ .ch_cfg = mhi_telit_fn990_channels, |
292 |
++ .num_events = ARRAY_SIZE(mhi_telit_fn990_events), |
293 |
++ .event_cfg = mhi_telit_fn990_events, |
294 |
++}; |
295 |
++ |
296 |
++static const struct mhi_pci_dev_info mhi_telit_fn990_info = { |
297 |
++ .name = "telit-fn990", |
298 |
++ .config = &modem_telit_fn990_config, |
299 |
++ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, |
300 |
++ .dma_data_width = 32, |
301 |
++ .sideband_wake = false, |
302 |
++ .mru_default = 32768, |
303 |
++}; |
304 |
++ |
305 |
+ static const struct pci_device_id mhi_pci_id_table[] = { |
306 |
+ /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ |
307 |
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), |
308 |
+ .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, |
309 |
++ /* Telit FN980 hardware revision v1 */ |
310 |
++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000), |
311 |
++ .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, |
312 |
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), |
313 |
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, |
314 |
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), |
315 |
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, |
316 |
++ /* Telit FN990 */ |
317 |
++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), |
318 |
++ .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, |
319 |
+ { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ |
320 |
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, |
321 |
+ { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ |
322 |
+diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c |
323 |
+index d1535ac13e894..81cb90955d68b 100644 |
324 |
+--- a/drivers/clk/clk-lan966x.c |
325 |
++++ b/drivers/clk/clk-lan966x.c |
326 |
+@@ -213,7 +213,7 @@ static int lan966x_gate_clk_register(struct device *dev, |
327 |
+ |
328 |
+ hw_data->hws[i] = |
329 |
+ devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name, |
330 |
+- "lan966x", 0, base, |
331 |
++ "lan966x", 0, gate_base, |
332 |
+ clk_gate_desc[idx].bit_idx, |
333 |
+ 0, &clk_gate_lock); |
334 |
+ |
335 |
+diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c |
336 |
+index fa4c350c1bf92..a6c78b9c730bc 100644 |
337 |
+--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c |
338 |
++++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c |
339 |
+@@ -75,13 +75,6 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) |
340 |
+ if (ret) |
341 |
+ goto err; |
342 |
+ |
343 |
+- /* Temporarily set the number of crypto instances to zero to avoid |
344 |
+- * registering the crypto algorithms. |
345 |
+- * This will be removed when the algorithms will support the |
346 |
+- * CRYPTO_TFM_REQ_MAY_BACKLOG flag |
347 |
+- */ |
348 |
+- instances = 0; |
349 |
+- |
350 |
+ for (i = 0; i < instances; i++) { |
351 |
+ val = i; |
352 |
+ bank = i * 2; |
353 |
+diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile |
354 |
+index f25a6c8edfc73..04f058acc4d37 100644 |
355 |
+--- a/drivers/crypto/qat/qat_common/Makefile |
356 |
++++ b/drivers/crypto/qat/qat_common/Makefile |
357 |
+@@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \ |
358 |
+ qat_crypto.o \ |
359 |
+ qat_algs.o \ |
360 |
+ qat_asym_algs.o \ |
361 |
++ qat_algs_send.o \ |
362 |
+ qat_uclo.o \ |
363 |
+ qat_hal.o |
364 |
+ |
365 |
+diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c |
366 |
+index 8ba28409fb74b..630d0483c4e0a 100644 |
367 |
+--- a/drivers/crypto/qat/qat_common/adf_transport.c |
368 |
++++ b/drivers/crypto/qat/qat_common/adf_transport.c |
369 |
+@@ -8,6 +8,9 @@ |
370 |
+ #include "adf_cfg.h" |
371 |
+ #include "adf_common_drv.h" |
372 |
+ |
373 |
++#define ADF_MAX_RING_THRESHOLD 80 |
374 |
++#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100) |
375 |
++ |
376 |
+ static inline u32 adf_modulo(u32 data, u32 shift) |
377 |
+ { |
378 |
+ u32 div = data >> shift; |
379 |
+@@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) |
380 |
+ bank->irq_mask); |
381 |
+ } |
382 |
+ |
383 |
++bool adf_ring_nearly_full(struct adf_etr_ring_data *ring) |
384 |
++{ |
385 |
++ return atomic_read(ring->inflights) > ring->threshold; |
386 |
++} |
387 |
++ |
388 |
+ int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) |
389 |
+ { |
390 |
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); |
391 |
+@@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, |
392 |
+ struct adf_etr_bank_data *bank; |
393 |
+ struct adf_etr_ring_data *ring; |
394 |
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; |
395 |
++ int max_inflights; |
396 |
+ u32 ring_num; |
397 |
+ int ret; |
398 |
+ |
399 |
+@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, |
400 |
+ ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); |
401 |
+ ring->head = 0; |
402 |
+ ring->tail = 0; |
403 |
++ max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size); |
404 |
++ ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD); |
405 |
+ atomic_set(ring->inflights, 0); |
406 |
+ ret = adf_init_ring(ring); |
407 |
+ if (ret) |
408 |
+diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h |
409 |
+index 2c95f1697c76f..e6ef6f9b76913 100644 |
410 |
+--- a/drivers/crypto/qat/qat_common/adf_transport.h |
411 |
++++ b/drivers/crypto/qat/qat_common/adf_transport.h |
412 |
+@@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, |
413 |
+ const char *ring_name, adf_callback_fn callback, |
414 |
+ int poll_mode, struct adf_etr_ring_data **ring_ptr); |
415 |
+ |
416 |
++bool adf_ring_nearly_full(struct adf_etr_ring_data *ring); |
417 |
+ int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg); |
418 |
+ void adf_remove_ring(struct adf_etr_ring_data *ring); |
419 |
+ #endif |
420 |
+diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h |
421 |
+index 501bcf0f1809a..8b2c92ba7ca1f 100644 |
422 |
+--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h |
423 |
++++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h |
424 |
+@@ -22,6 +22,7 @@ struct adf_etr_ring_data { |
425 |
+ spinlock_t lock; /* protects ring data struct */ |
426 |
+ u16 head; |
427 |
+ u16 tail; |
428 |
++ u32 threshold; |
429 |
+ u8 ring_number; |
430 |
+ u8 ring_size; |
431 |
+ u8 msg_size; |
432 |
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c |
433 |
+index f998ed58457c2..873533dc43a74 100644 |
434 |
+--- a/drivers/crypto/qat/qat_common/qat_algs.c |
435 |
++++ b/drivers/crypto/qat/qat_common/qat_algs.c |
436 |
+@@ -17,7 +17,7 @@ |
437 |
+ #include <crypto/xts.h> |
438 |
+ #include <linux/dma-mapping.h> |
439 |
+ #include "adf_accel_devices.h" |
440 |
+-#include "adf_transport.h" |
441 |
++#include "qat_algs_send.h" |
442 |
+ #include "adf_common_drv.h" |
443 |
+ #include "qat_crypto.h" |
444 |
+ #include "icp_qat_hw.h" |
445 |
+@@ -46,19 +46,6 @@ |
446 |
+ static DEFINE_MUTEX(algs_lock); |
447 |
+ static unsigned int active_devs; |
448 |
+ |
449 |
+-struct qat_alg_buf { |
450 |
+- u32 len; |
451 |
+- u32 resrvd; |
452 |
+- u64 addr; |
453 |
+-} __packed; |
454 |
+- |
455 |
+-struct qat_alg_buf_list { |
456 |
+- u64 resrvd; |
457 |
+- u32 num_bufs; |
458 |
+- u32 num_mapped_bufs; |
459 |
+- struct qat_alg_buf bufers[]; |
460 |
+-} __packed __aligned(64); |
461 |
+- |
462 |
+ /* Common content descriptor */ |
463 |
+ struct qat_alg_cd { |
464 |
+ union { |
465 |
+@@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, |
466 |
+ bl->bufers[i].len, DMA_BIDIRECTIONAL); |
467 |
+ |
468 |
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); |
469 |
+- kfree(bl); |
470 |
++ |
471 |
++ if (!qat_req->buf.sgl_src_valid) |
472 |
++ kfree(bl); |
473 |
++ |
474 |
+ if (blp != blpout) { |
475 |
+ /* If out of place operation dma unmap only data */ |
476 |
+ int bufless = blout->num_bufs - blout->num_mapped_bufs; |
477 |
+@@ -704,7 +694,9 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, |
478 |
+ DMA_BIDIRECTIONAL); |
479 |
+ } |
480 |
+ dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); |
481 |
+- kfree(blout); |
482 |
++ |
483 |
++ if (!qat_req->buf.sgl_dst_valid) |
484 |
++ kfree(blout); |
485 |
+ } |
486 |
+ } |
487 |
+ |
488 |
+@@ -721,15 +713,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, |
489 |
+ dma_addr_t blp = DMA_MAPPING_ERROR; |
490 |
+ dma_addr_t bloutp = DMA_MAPPING_ERROR; |
491 |
+ struct scatterlist *sg; |
492 |
+- size_t sz_out, sz = struct_size(bufl, bufers, n + 1); |
493 |
++ size_t sz_out, sz = struct_size(bufl, bufers, n); |
494 |
++ int node = dev_to_node(&GET_DEV(inst->accel_dev)); |
495 |
+ |
496 |
+ if (unlikely(!n)) |
497 |
+ return -EINVAL; |
498 |
+ |
499 |
+- bufl = kzalloc_node(sz, GFP_ATOMIC, |
500 |
+- dev_to_node(&GET_DEV(inst->accel_dev))); |
501 |
+- if (unlikely(!bufl)) |
502 |
+- return -ENOMEM; |
503 |
++ qat_req->buf.sgl_src_valid = false; |
504 |
++ qat_req->buf.sgl_dst_valid = false; |
505 |
++ |
506 |
++ if (n > QAT_MAX_BUFF_DESC) { |
507 |
++ bufl = kzalloc_node(sz, GFP_ATOMIC, node); |
508 |
++ if (unlikely(!bufl)) |
509 |
++ return -ENOMEM; |
510 |
++ } else { |
511 |
++ bufl = &qat_req->buf.sgl_src.sgl_hdr; |
512 |
++ memset(bufl, 0, sizeof(struct qat_alg_buf_list)); |
513 |
++ qat_req->buf.sgl_src_valid = true; |
514 |
++ } |
515 |
+ |
516 |
+ for_each_sg(sgl, sg, n, i) |
517 |
+ bufl->bufers[i].addr = DMA_MAPPING_ERROR; |
518 |
+@@ -760,12 +761,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, |
519 |
+ struct qat_alg_buf *bufers; |
520 |
+ |
521 |
+ n = sg_nents(sglout); |
522 |
+- sz_out = struct_size(buflout, bufers, n + 1); |
523 |
++ sz_out = struct_size(buflout, bufers, n); |
524 |
+ sg_nctr = 0; |
525 |
+- buflout = kzalloc_node(sz_out, GFP_ATOMIC, |
526 |
+- dev_to_node(&GET_DEV(inst->accel_dev))); |
527 |
+- if (unlikely(!buflout)) |
528 |
+- goto err_in; |
529 |
++ |
530 |
++ if (n > QAT_MAX_BUFF_DESC) { |
531 |
++ buflout = kzalloc_node(sz_out, GFP_ATOMIC, node); |
532 |
++ if (unlikely(!buflout)) |
533 |
++ goto err_in; |
534 |
++ } else { |
535 |
++ buflout = &qat_req->buf.sgl_dst.sgl_hdr; |
536 |
++ memset(buflout, 0, sizeof(struct qat_alg_buf_list)); |
537 |
++ qat_req->buf.sgl_dst_valid = true; |
538 |
++ } |
539 |
+ |
540 |
+ bufers = buflout->bufers; |
541 |
+ for_each_sg(sglout, sg, n, i) |
542 |
+@@ -810,7 +817,9 @@ err_out: |
543 |
+ dma_unmap_single(dev, buflout->bufers[i].addr, |
544 |
+ buflout->bufers[i].len, |
545 |
+ DMA_BIDIRECTIONAL); |
546 |
+- kfree(buflout); |
547 |
++ |
548 |
++ if (!qat_req->buf.sgl_dst_valid) |
549 |
++ kfree(buflout); |
550 |
+ |
551 |
+ err_in: |
552 |
+ if (!dma_mapping_error(dev, blp)) |
553 |
+@@ -823,7 +832,8 @@ err_in: |
554 |
+ bufl->bufers[i].len, |
555 |
+ DMA_BIDIRECTIONAL); |
556 |
+ |
557 |
+- kfree(bufl); |
558 |
++ if (!qat_req->buf.sgl_src_valid) |
559 |
++ kfree(bufl); |
560 |
+ |
561 |
+ dev_err(dev, "Failed to map buf for dma\n"); |
562 |
+ return -ENOMEM; |
563 |
+@@ -925,8 +935,25 @@ void qat_alg_callback(void *resp) |
564 |
+ struct icp_qat_fw_la_resp *qat_resp = resp; |
565 |
+ struct qat_crypto_request *qat_req = |
566 |
+ (void *)(__force long)qat_resp->opaque_data; |
567 |
++ struct qat_instance_backlog *backlog = qat_req->alg_req.backlog; |
568 |
+ |
569 |
+ qat_req->cb(qat_resp, qat_req); |
570 |
++ |
571 |
++ qat_alg_send_backlog(backlog); |
572 |
++} |
573 |
++ |
574 |
++static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req, |
575 |
++ struct qat_crypto_instance *inst, |
576 |
++ struct crypto_async_request *base) |
577 |
++{ |
578 |
++ struct qat_alg_req *alg_req = &qat_req->alg_req; |
579 |
++ |
580 |
++ alg_req->fw_req = (u32 *)&qat_req->req; |
581 |
++ alg_req->tx_ring = inst->sym_tx; |
582 |
++ alg_req->base = base; |
583 |
++ alg_req->backlog = &inst->backlog; |
584 |
++ |
585 |
++ return qat_alg_send_message(alg_req); |
586 |
+ } |
587 |
+ |
588 |
+ static int qat_alg_aead_dec(struct aead_request *areq) |
589 |
+@@ -939,7 +966,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) |
590 |
+ struct icp_qat_fw_la_auth_req_params *auth_param; |
591 |
+ struct icp_qat_fw_la_bulk_req *msg; |
592 |
+ int digst_size = crypto_aead_authsize(aead_tfm); |
593 |
+- int ret, ctr = 0; |
594 |
++ int ret; |
595 |
+ u32 cipher_len; |
596 |
+ |
597 |
+ cipher_len = areq->cryptlen - digst_size; |
598 |
+@@ -965,15 +992,12 @@ static int qat_alg_aead_dec(struct aead_request *areq) |
599 |
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); |
600 |
+ auth_param->auth_off = 0; |
601 |
+ auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; |
602 |
+- do { |
603 |
+- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); |
604 |
+- } while (ret == -EAGAIN && ctr++ < 10); |
605 |
+ |
606 |
+- if (ret == -EAGAIN) { |
607 |
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); |
608 |
++ if (ret == -ENOSPC) |
609 |
+ qat_alg_free_bufl(ctx->inst, qat_req); |
610 |
+- return -EBUSY; |
611 |
+- } |
612 |
+- return -EINPROGRESS; |
613 |
++ |
614 |
++ return ret; |
615 |
+ } |
616 |
+ |
617 |
+ static int qat_alg_aead_enc(struct aead_request *areq) |
618 |
+@@ -986,7 +1010,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) |
619 |
+ struct icp_qat_fw_la_auth_req_params *auth_param; |
620 |
+ struct icp_qat_fw_la_bulk_req *msg; |
621 |
+ u8 *iv = areq->iv; |
622 |
+- int ret, ctr = 0; |
623 |
++ int ret; |
624 |
+ |
625 |
+ if (areq->cryptlen % AES_BLOCK_SIZE != 0) |
626 |
+ return -EINVAL; |
627 |
+@@ -1013,15 +1037,11 @@ static int qat_alg_aead_enc(struct aead_request *areq) |
628 |
+ auth_param->auth_off = 0; |
629 |
+ auth_param->auth_len = areq->assoclen + areq->cryptlen; |
630 |
+ |
631 |
+- do { |
632 |
+- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); |
633 |
+- } while (ret == -EAGAIN && ctr++ < 10); |
634 |
+- |
635 |
+- if (ret == -EAGAIN) { |
636 |
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); |
637 |
++ if (ret == -ENOSPC) |
638 |
+ qat_alg_free_bufl(ctx->inst, qat_req); |
639 |
+- return -EBUSY; |
640 |
+- } |
641 |
+- return -EINPROGRESS; |
642 |
++ |
643 |
++ return ret; |
644 |
+ } |
645 |
+ |
646 |
+ static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx, |
647 |
+@@ -1174,7 +1194,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) |
648 |
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req); |
649 |
+ struct icp_qat_fw_la_cipher_req_params *cipher_param; |
650 |
+ struct icp_qat_fw_la_bulk_req *msg; |
651 |
+- int ret, ctr = 0; |
652 |
++ int ret; |
653 |
+ |
654 |
+ if (req->cryptlen == 0) |
655 |
+ return 0; |
656 |
+@@ -1198,15 +1218,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) |
657 |
+ |
658 |
+ qat_alg_set_req_iv(qat_req); |
659 |
+ |
660 |
+- do { |
661 |
+- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); |
662 |
+- } while (ret == -EAGAIN && ctr++ < 10); |
663 |
+- |
664 |
+- if (ret == -EAGAIN) { |
665 |
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); |
666 |
++ if (ret == -ENOSPC) |
667 |
+ qat_alg_free_bufl(ctx->inst, qat_req); |
668 |
+- return -EBUSY; |
669 |
+- } |
670 |
+- return -EINPROGRESS; |
671 |
++ |
672 |
++ return ret; |
673 |
+ } |
674 |
+ |
675 |
+ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) |
676 |
+@@ -1243,7 +1259,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) |
677 |
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req); |
678 |
+ struct icp_qat_fw_la_cipher_req_params *cipher_param; |
679 |
+ struct icp_qat_fw_la_bulk_req *msg; |
680 |
+- int ret, ctr = 0; |
681 |
++ int ret; |
682 |
+ |
683 |
+ if (req->cryptlen == 0) |
684 |
+ return 0; |
685 |
+@@ -1268,15 +1284,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) |
686 |
+ qat_alg_set_req_iv(qat_req); |
687 |
+ qat_alg_update_iv(qat_req); |
688 |
+ |
689 |
+- do { |
690 |
+- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); |
691 |
+- } while (ret == -EAGAIN && ctr++ < 10); |
692 |
+- |
693 |
+- if (ret == -EAGAIN) { |
694 |
++ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); |
695 |
++ if (ret == -ENOSPC) |
696 |
+ qat_alg_free_bufl(ctx->inst, qat_req); |
697 |
+- return -EBUSY; |
698 |
+- } |
699 |
+- return -EINPROGRESS; |
700 |
++ |
701 |
++ return ret; |
702 |
+ } |
703 |
+ |
704 |
+ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) |
705 |
+diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c |
706 |
+new file mode 100644 |
707 |
+index 0000000000000..ff5b4347f7831 |
708 |
+--- /dev/null |
709 |
++++ b/drivers/crypto/qat/qat_common/qat_algs_send.c |
710 |
+@@ -0,0 +1,86 @@ |
711 |
++// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) |
712 |
++/* Copyright(c) 2022 Intel Corporation */ |
713 |
++#include "adf_transport.h" |
714 |
++#include "qat_algs_send.h" |
715 |
++#include "qat_crypto.h" |
716 |
++ |
717 |
++#define ADF_MAX_RETRIES 20 |
718 |
++ |
719 |
++static int qat_alg_send_message_retry(struct qat_alg_req *req) |
720 |
++{ |
721 |
++ int ret = 0, ctr = 0; |
722 |
++ |
723 |
++ do { |
724 |
++ ret = adf_send_message(req->tx_ring, req->fw_req); |
725 |
++ } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES); |
726 |
++ |
727 |
++ if (ret == -EAGAIN) |
728 |
++ return -ENOSPC; |
729 |
++ |
730 |
++ return -EINPROGRESS; |
731 |
++} |
732 |
++ |
733 |
++void qat_alg_send_backlog(struct qat_instance_backlog *backlog) |
734 |
++{ |
735 |
++ struct qat_alg_req *req, *tmp; |
736 |
++ |
737 |
++ spin_lock_bh(&backlog->lock); |
738 |
++ list_for_each_entry_safe(req, tmp, &backlog->list, list) { |
739 |
++ if (adf_send_message(req->tx_ring, req->fw_req)) { |
740 |
++ /* The HW ring is full. Do nothing. |
741 |
++ * qat_alg_send_backlog() will be invoked again by |
742 |
++ * another callback. |
743 |
++ */ |
744 |
++ break; |
745 |
++ } |
746 |
++ list_del(&req->list); |
747 |
++ req->base->complete(req->base, -EINPROGRESS); |
748 |
++ } |
749 |
++ spin_unlock_bh(&backlog->lock); |
750 |
++} |
751 |
++ |
752 |
++static void qat_alg_backlog_req(struct qat_alg_req *req, |
753 |
++ struct qat_instance_backlog *backlog) |
754 |
++{ |
755 |
++ INIT_LIST_HEAD(&req->list); |
756 |
++ |
757 |
++ spin_lock_bh(&backlog->lock); |
758 |
++ list_add_tail(&req->list, &backlog->list); |
759 |
++ spin_unlock_bh(&backlog->lock); |
760 |
++} |
761 |
++ |
762 |
++static int qat_alg_send_message_maybacklog(struct qat_alg_req *req) |
763 |
++{ |
764 |
++ struct qat_instance_backlog *backlog = req->backlog; |
765 |
++ struct adf_etr_ring_data *tx_ring = req->tx_ring; |
766 |
++ u32 *fw_req = req->fw_req; |
767 |
++ |
768 |
++ /* If any request is already backlogged, then add to backlog list */ |
769 |
++ if (!list_empty(&backlog->list)) |
770 |
++ goto enqueue; |
771 |
++ |
772 |
++ /* If ring is nearly full, then add to backlog list */ |
773 |
++ if (adf_ring_nearly_full(tx_ring)) |
774 |
++ goto enqueue; |
775 |
++ |
776 |
++ /* If adding request to HW ring fails, then add to backlog list */ |
777 |
++ if (adf_send_message(tx_ring, fw_req)) |
778 |
++ goto enqueue; |
779 |
++ |
780 |
++ return -EINPROGRESS; |
781 |
++ |
782 |
++enqueue: |
783 |
++ qat_alg_backlog_req(req, backlog); |
784 |
++ |
785 |
++ return -EBUSY; |
786 |
++} |
787 |
++ |
788 |
++int qat_alg_send_message(struct qat_alg_req *req) |
789 |
++{ |
790 |
++ u32 flags = req->base->flags; |
791 |
++ |
792 |
++ if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG) |
793 |
++ return qat_alg_send_message_maybacklog(req); |
794 |
++ else |
795 |
++ return qat_alg_send_message_retry(req); |
796 |
++} |
797 |
+diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h |
798 |
+new file mode 100644 |
799 |
+index 0000000000000..5ce9f4f69d8ff |
800 |
+--- /dev/null |
801 |
++++ b/drivers/crypto/qat/qat_common/qat_algs_send.h |
802 |
+@@ -0,0 +1,11 @@ |
803 |
++/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ |
804 |
++/* Copyright(c) 2022 Intel Corporation */ |
805 |
++#ifndef QAT_ALGS_SEND_H |
806 |
++#define QAT_ALGS_SEND_H |
807 |
++ |
808 |
++#include "qat_crypto.h" |
809 |
++ |
810 |
++int qat_alg_send_message(struct qat_alg_req *req); |
811 |
++void qat_alg_send_backlog(struct qat_instance_backlog *backlog); |
812 |
++ |
813 |
++#endif |
814 |
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c |
815 |
+index b0b78445418bb..7173a2a0a484f 100644 |
816 |
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c |
817 |
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c |
818 |
+@@ -12,6 +12,7 @@ |
819 |
+ #include <crypto/scatterwalk.h> |
820 |
+ #include "icp_qat_fw_pke.h" |
821 |
+ #include "adf_accel_devices.h" |
822 |
++#include "qat_algs_send.h" |
823 |
+ #include "adf_transport.h" |
824 |
+ #include "adf_common_drv.h" |
825 |
+ #include "qat_crypto.h" |
826 |
+@@ -135,8 +136,23 @@ struct qat_asym_request { |
827 |
+ } areq; |
828 |
+ int err; |
829 |
+ void (*cb)(struct icp_qat_fw_pke_resp *resp); |
830 |
++ struct qat_alg_req alg_req; |
831 |
+ } __aligned(64); |
832 |
+ |
833 |
++static int qat_alg_send_asym_message(struct qat_asym_request *qat_req, |
834 |
++ struct qat_crypto_instance *inst, |
835 |
++ struct crypto_async_request *base) |
836 |
++{ |
837 |
++ struct qat_alg_req *alg_req = &qat_req->alg_req; |
838 |
++ |
839 |
++ alg_req->fw_req = (u32 *)&qat_req->req; |
840 |
++ alg_req->tx_ring = inst->pke_tx; |
841 |
++ alg_req->base = base; |
842 |
++ alg_req->backlog = &inst->backlog; |
843 |
++ |
844 |
++ return qat_alg_send_message(alg_req); |
845 |
++} |
846 |
++ |
847 |
+ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) |
848 |
+ { |
849 |
+ struct qat_asym_request *req = (void *)(__force long)resp->opaque; |
850 |
+@@ -148,26 +164,21 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) |
851 |
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; |
852 |
+ |
853 |
+ if (areq->src) { |
854 |
+- if (req->src_align) |
855 |
+- dma_free_coherent(dev, req->ctx.dh->p_size, |
856 |
+- req->src_align, req->in.dh.in.b); |
857 |
+- else |
858 |
+- dma_unmap_single(dev, req->in.dh.in.b, |
859 |
+- req->ctx.dh->p_size, DMA_TO_DEVICE); |
860 |
++ dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size, |
861 |
++ DMA_TO_DEVICE); |
862 |
++ kfree_sensitive(req->src_align); |
863 |
+ } |
864 |
+ |
865 |
+ areq->dst_len = req->ctx.dh->p_size; |
866 |
+ if (req->dst_align) { |
867 |
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, |
868 |
+ areq->dst_len, 1); |
869 |
+- |
870 |
+- dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align, |
871 |
+- req->out.dh.r); |
872 |
+- } else { |
873 |
+- dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, |
874 |
+- DMA_FROM_DEVICE); |
875 |
++ kfree_sensitive(req->dst_align); |
876 |
+ } |
877 |
+ |
878 |
++ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, |
879 |
++ DMA_FROM_DEVICE); |
880 |
++ |
881 |
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params), |
882 |
+ DMA_TO_DEVICE); |
883 |
+ dma_unmap_single(dev, req->phy_out, |
884 |
+@@ -213,8 +224,9 @@ static int qat_dh_compute_value(struct kpp_request *req) |
885 |
+ struct qat_asym_request *qat_req = |
886 |
+ PTR_ALIGN(kpp_request_ctx(req), 64); |
887 |
+ struct icp_qat_fw_pke_request *msg = &qat_req->req; |
888 |
+- int ret, ctr = 0; |
889 |
++ int ret; |
890 |
+ int n_input_params = 0; |
891 |
++ u8 *vaddr; |
892 |
+ |
893 |
+ if (unlikely(!ctx->xa)) |
894 |
+ return -EINVAL; |
895 |
+@@ -223,6 +235,10 @@ static int qat_dh_compute_value(struct kpp_request *req) |
896 |
+ req->dst_len = ctx->p_size; |
897 |
+ return -EOVERFLOW; |
898 |
+ } |
899 |
++ |
900 |
++ if (req->src_len > ctx->p_size) |
901 |
++ return -EINVAL; |
902 |
++ |
903 |
+ memset(msg, '\0', sizeof(*msg)); |
904 |
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, |
905 |
+ ICP_QAT_FW_COMN_REQ_FLAG_SET); |
906 |
+@@ -271,27 +287,24 @@ static int qat_dh_compute_value(struct kpp_request *req) |
907 |
+ */ |
908 |
+ if (sg_is_last(req->src) && req->src_len == ctx->p_size) { |
909 |
+ qat_req->src_align = NULL; |
910 |
+- qat_req->in.dh.in.b = dma_map_single(dev, |
911 |
+- sg_virt(req->src), |
912 |
+- req->src_len, |
913 |
+- DMA_TO_DEVICE); |
914 |
+- if (unlikely(dma_mapping_error(dev, |
915 |
+- qat_req->in.dh.in.b))) |
916 |
+- return ret; |
917 |
+- |
918 |
++ vaddr = sg_virt(req->src); |
919 |
+ } else { |
920 |
+ int shift = ctx->p_size - req->src_len; |
921 |
+ |
922 |
+- qat_req->src_align = dma_alloc_coherent(dev, |
923 |
+- ctx->p_size, |
924 |
+- &qat_req->in.dh.in.b, |
925 |
+- GFP_KERNEL); |
926 |
++ qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL); |
927 |
+ if (unlikely(!qat_req->src_align)) |
928 |
+ return ret; |
929 |
+ |
930 |
+ scatterwalk_map_and_copy(qat_req->src_align + shift, |
931 |
+ req->src, 0, req->src_len, 0); |
932 |
++ |
933 |
++ vaddr = qat_req->src_align; |
934 |
+ } |
935 |
++ |
936 |
++ qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size, |
937 |
++ DMA_TO_DEVICE); |
938 |
++ if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b))) |
939 |
++ goto unmap_src; |
940 |
+ } |
941 |
+ /* |
942 |
+ * dst can be of any size in valid range, but HW expects it to be the |
943 |
+@@ -302,20 +315,18 @@ static int qat_dh_compute_value(struct kpp_request *req) |
944 |
+ */ |
945 |
+ if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) { |
946 |
+ qat_req->dst_align = NULL; |
947 |
+- qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst), |
948 |
+- req->dst_len, |
949 |
+- DMA_FROM_DEVICE); |
950 |
+- |
951 |
+- if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r))) |
952 |
+- goto unmap_src; |
953 |
+- |
954 |
++ vaddr = sg_virt(req->dst); |
955 |
+ } else { |
956 |
+- qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, |
957 |
+- &qat_req->out.dh.r, |
958 |
+- GFP_KERNEL); |
959 |
++ qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL); |
960 |
+ if (unlikely(!qat_req->dst_align)) |
961 |
+ goto unmap_src; |
962 |
++ |
963 |
++ vaddr = qat_req->dst_align; |
964 |
+ } |
965 |
++ qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size, |
966 |
++ DMA_FROM_DEVICE); |
967 |
++ if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r))) |
968 |
++ goto unmap_dst; |
969 |
+ |
970 |
+ qat_req->in.dh.in_tab[n_input_params] = 0; |
971 |
+ qat_req->out.dh.out_tab[1] = 0; |
972 |
+@@ -338,13 +349,13 @@ static int qat_dh_compute_value(struct kpp_request *req) |
973 |
+ msg->input_param_count = n_input_params; |
974 |
+ msg->output_param_count = 1; |
975 |
+ |
976 |
+- do { |
977 |
+- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); |
978 |
+- } while (ret == -EBUSY && ctr++ < 100); |
979 |
++ ret = qat_alg_send_asym_message(qat_req, inst, &req->base); |
980 |
++ if (ret == -ENOSPC) |
981 |
++ goto unmap_all; |
982 |
+ |
983 |
+- if (!ret) |
984 |
+- return -EINPROGRESS; |
985 |
++ return ret; |
986 |
+ |
987 |
++unmap_all: |
988 |
+ if (!dma_mapping_error(dev, qat_req->phy_out)) |
989 |
+ dma_unmap_single(dev, qat_req->phy_out, |
990 |
+ sizeof(struct qat_dh_output_params), |
991 |
+@@ -355,23 +366,17 @@ unmap_in_params: |
992 |
+ sizeof(struct qat_dh_input_params), |
993 |
+ DMA_TO_DEVICE); |
994 |
+ unmap_dst: |
995 |
+- if (qat_req->dst_align) |
996 |
+- dma_free_coherent(dev, ctx->p_size, qat_req->dst_align, |
997 |
+- qat_req->out.dh.r); |
998 |
+- else |
999 |
+- if (!dma_mapping_error(dev, qat_req->out.dh.r)) |
1000 |
+- dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, |
1001 |
+- DMA_FROM_DEVICE); |
1002 |
++ if (!dma_mapping_error(dev, qat_req->out.dh.r)) |
1003 |
++ dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, |
1004 |
++ DMA_FROM_DEVICE); |
1005 |
++ kfree_sensitive(qat_req->dst_align); |
1006 |
+ unmap_src: |
1007 |
+ if (req->src) { |
1008 |
+- if (qat_req->src_align) |
1009 |
+- dma_free_coherent(dev, ctx->p_size, qat_req->src_align, |
1010 |
+- qat_req->in.dh.in.b); |
1011 |
+- else |
1012 |
+- if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) |
1013 |
+- dma_unmap_single(dev, qat_req->in.dh.in.b, |
1014 |
+- ctx->p_size, |
1015 |
+- DMA_TO_DEVICE); |
1016 |
++ if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) |
1017 |
++ dma_unmap_single(dev, qat_req->in.dh.in.b, |
1018 |
++ ctx->p_size, |
1019 |
++ DMA_TO_DEVICE); |
1020 |
++ kfree_sensitive(qat_req->src_align); |
1021 |
+ } |
1022 |
+ return ret; |
1023 |
+ } |
1024 |
+@@ -420,14 +425,17 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) |
1025 |
+ static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx) |
1026 |
+ { |
1027 |
+ if (ctx->g) { |
1028 |
++ memset(ctx->g, 0, ctx->p_size); |
1029 |
+ dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g); |
1030 |
+ ctx->g = NULL; |
1031 |
+ } |
1032 |
+ if (ctx->xa) { |
1033 |
++ memset(ctx->xa, 0, ctx->p_size); |
1034 |
+ dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa); |
1035 |
+ ctx->xa = NULL; |
1036 |
+ } |
1037 |
+ if (ctx->p) { |
1038 |
++ memset(ctx->p, 0, ctx->p_size); |
1039 |
+ dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); |
1040 |
+ ctx->p = NULL; |
1041 |
+ } |
1042 |
+@@ -510,25 +518,22 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) |
1043 |
+ |
1044 |
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; |
1045 |
+ |
1046 |
+- if (req->src_align) |
1047 |
+- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align, |
1048 |
+- req->in.rsa.enc.m); |
1049 |
+- else |
1050 |
+- dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, |
1051 |
+- DMA_TO_DEVICE); |
1052 |
++ kfree_sensitive(req->src_align); |
1053 |
++ |
1054 |
++ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, |
1055 |
++ DMA_TO_DEVICE); |
1056 |
+ |
1057 |
+ areq->dst_len = req->ctx.rsa->key_sz; |
1058 |
+ if (req->dst_align) { |
1059 |
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, |
1060 |
+ areq->dst_len, 1); |
1061 |
+ |
1062 |
+- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align, |
1063 |
+- req->out.rsa.enc.c); |
1064 |
+- } else { |
1065 |
+- dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, |
1066 |
+- DMA_FROM_DEVICE); |
1067 |
++ kfree_sensitive(req->dst_align); |
1068 |
+ } |
1069 |
+ |
1070 |
++ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, |
1071 |
++ DMA_FROM_DEVICE); |
1072 |
++ |
1073 |
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), |
1074 |
+ DMA_TO_DEVICE); |
1075 |
+ dma_unmap_single(dev, req->phy_out, |
1076 |
+@@ -542,8 +547,11 @@ void qat_alg_asym_callback(void *_resp) |
1077 |
+ { |
1078 |
+ struct icp_qat_fw_pke_resp *resp = _resp; |
1079 |
+ struct qat_asym_request *areq = (void *)(__force long)resp->opaque; |
1080 |
++ struct qat_instance_backlog *backlog = areq->alg_req.backlog; |
1081 |
+ |
1082 |
+ areq->cb(resp); |
1083 |
++ |
1084 |
++ qat_alg_send_backlog(backlog); |
1085 |
+ } |
1086 |
+ |
1087 |
+ #define PKE_RSA_EP_512 0x1c161b21 |
1088 |
+@@ -642,7 +650,8 @@ static int qat_rsa_enc(struct akcipher_request *req) |
1089 |
+ struct qat_asym_request *qat_req = |
1090 |
+ PTR_ALIGN(akcipher_request_ctx(req), 64); |
1091 |
+ struct icp_qat_fw_pke_request *msg = &qat_req->req; |
1092 |
+- int ret, ctr = 0; |
1093 |
++ u8 *vaddr; |
1094 |
++ int ret; |
1095 |
+ |
1096 |
+ if (unlikely(!ctx->n || !ctx->e)) |
1097 |
+ return -EINVAL; |
1098 |
+@@ -651,6 +660,10 @@ static int qat_rsa_enc(struct akcipher_request *req) |
1099 |
+ req->dst_len = ctx->key_sz; |
1100 |
+ return -EOVERFLOW; |
1101 |
+ } |
1102 |
++ |
1103 |
++ if (req->src_len > ctx->key_sz) |
1104 |
++ return -EINVAL; |
1105 |
++ |
1106 |
+ memset(msg, '\0', sizeof(*msg)); |
1107 |
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, |
1108 |
+ ICP_QAT_FW_COMN_REQ_FLAG_SET); |
1109 |
+@@ -679,40 +692,39 @@ static int qat_rsa_enc(struct akcipher_request *req) |
1110 |
+ */ |
1111 |
+ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { |
1112 |
+ qat_req->src_align = NULL; |
1113 |
+- qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src), |
1114 |
+- req->src_len, DMA_TO_DEVICE); |
1115 |
+- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) |
1116 |
+- return ret; |
1117 |
+- |
1118 |
++ vaddr = sg_virt(req->src); |
1119 |
+ } else { |
1120 |
+ int shift = ctx->key_sz - req->src_len; |
1121 |
+ |
1122 |
+- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, |
1123 |
+- &qat_req->in.rsa.enc.m, |
1124 |
+- GFP_KERNEL); |
1125 |
++ qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL); |
1126 |
+ if (unlikely(!qat_req->src_align)) |
1127 |
+ return ret; |
1128 |
+ |
1129 |
+ scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, |
1130 |
+ 0, req->src_len, 0); |
1131 |
++ vaddr = qat_req->src_align; |
1132 |
+ } |
1133 |
+- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { |
1134 |
+- qat_req->dst_align = NULL; |
1135 |
+- qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst), |
1136 |
+- req->dst_len, |
1137 |
+- DMA_FROM_DEVICE); |
1138 |
+ |
1139 |
+- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) |
1140 |
+- goto unmap_src; |
1141 |
++ qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz, |
1142 |
++ DMA_TO_DEVICE); |
1143 |
++ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) |
1144 |
++ goto unmap_src; |
1145 |
+ |
1146 |
++ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { |
1147 |
++ qat_req->dst_align = NULL; |
1148 |
++ vaddr = sg_virt(req->dst); |
1149 |
+ } else { |
1150 |
+- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, |
1151 |
+- &qat_req->out.rsa.enc.c, |
1152 |
+- GFP_KERNEL); |
1153 |
++ qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL); |
1154 |
+ if (unlikely(!qat_req->dst_align)) |
1155 |
+ goto unmap_src; |
1156 |
+- |
1157 |
++ vaddr = qat_req->dst_align; |
1158 |
+ } |
1159 |
++ |
1160 |
++ qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz, |
1161 |
++ DMA_FROM_DEVICE); |
1162 |
++ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) |
1163 |
++ goto unmap_dst; |
1164 |
++ |
1165 |
+ qat_req->in.rsa.in_tab[3] = 0; |
1166 |
+ qat_req->out.rsa.out_tab[1] = 0; |
1167 |
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, |
1168 |
+@@ -732,13 +744,14 @@ static int qat_rsa_enc(struct akcipher_request *req) |
1169 |
+ msg->pke_mid.opaque = (u64)(__force long)qat_req; |
1170 |
+ msg->input_param_count = 3; |
1171 |
+ msg->output_param_count = 1; |
1172 |
+- do { |
1173 |
+- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); |
1174 |
+- } while (ret == -EBUSY && ctr++ < 100); |
1175 |
+ |
1176 |
+- if (!ret) |
1177 |
+- return -EINPROGRESS; |
1178 |
++ ret = qat_alg_send_asym_message(qat_req, inst, &req->base); |
1179 |
++ if (ret == -ENOSPC) |
1180 |
++ goto unmap_all; |
1181 |
+ |
1182 |
++ return ret; |
1183 |
++ |
1184 |
++unmap_all: |
1185 |
+ if (!dma_mapping_error(dev, qat_req->phy_out)) |
1186 |
+ dma_unmap_single(dev, qat_req->phy_out, |
1187 |
+ sizeof(struct qat_rsa_output_params), |
1188 |
+@@ -749,21 +762,15 @@ unmap_in_params: |
1189 |
+ sizeof(struct qat_rsa_input_params), |
1190 |
+ DMA_TO_DEVICE); |
1191 |
+ unmap_dst: |
1192 |
+- if (qat_req->dst_align) |
1193 |
+- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, |
1194 |
+- qat_req->out.rsa.enc.c); |
1195 |
+- else |
1196 |
+- if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) |
1197 |
+- dma_unmap_single(dev, qat_req->out.rsa.enc.c, |
1198 |
+- ctx->key_sz, DMA_FROM_DEVICE); |
1199 |
++ if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) |
1200 |
++ dma_unmap_single(dev, qat_req->out.rsa.enc.c, |
1201 |
++ ctx->key_sz, DMA_FROM_DEVICE); |
1202 |
++ kfree_sensitive(qat_req->dst_align); |
1203 |
+ unmap_src: |
1204 |
+- if (qat_req->src_align) |
1205 |
+- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, |
1206 |
+- qat_req->in.rsa.enc.m); |
1207 |
+- else |
1208 |
+- if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) |
1209 |
+- dma_unmap_single(dev, qat_req->in.rsa.enc.m, |
1210 |
+- ctx->key_sz, DMA_TO_DEVICE); |
1211 |
++ if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) |
1212 |
++ dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz, |
1213 |
++ DMA_TO_DEVICE); |
1214 |
++ kfree_sensitive(qat_req->src_align); |
1215 |
+ return ret; |
1216 |
+ } |
1217 |
+ |
1218 |
+@@ -776,7 +783,8 @@ static int qat_rsa_dec(struct akcipher_request *req) |
1219 |
+ struct qat_asym_request *qat_req = |
1220 |
+ PTR_ALIGN(akcipher_request_ctx(req), 64); |
1221 |
+ struct icp_qat_fw_pke_request *msg = &qat_req->req; |
1222 |
+- int ret, ctr = 0; |
1223 |
++ u8 *vaddr; |
1224 |
++ int ret; |
1225 |
+ |
1226 |
+ if (unlikely(!ctx->n || !ctx->d)) |
1227 |
+ return -EINVAL; |
1228 |
+@@ -785,6 +793,10 @@ static int qat_rsa_dec(struct akcipher_request *req) |
1229 |
+ req->dst_len = ctx->key_sz; |
1230 |
+ return -EOVERFLOW; |
1231 |
+ } |
1232 |
++ |
1233 |
++ if (req->src_len > ctx->key_sz) |
1234 |
++ return -EINVAL; |
1235 |
++ |
1236 |
+ memset(msg, '\0', sizeof(*msg)); |
1237 |
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, |
1238 |
+ ICP_QAT_FW_COMN_REQ_FLAG_SET); |
1239 |
+@@ -823,40 +835,37 @@ static int qat_rsa_dec(struct akcipher_request *req) |
1240 |
+ */ |
1241 |
+ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { |
1242 |
+ qat_req->src_align = NULL; |
1243 |
+- qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src), |
1244 |
+- req->dst_len, DMA_TO_DEVICE); |
1245 |
+- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) |
1246 |
+- return ret; |
1247 |
+- |
1248 |
++ vaddr = sg_virt(req->src); |
1249 |
+ } else { |
1250 |
+ int shift = ctx->key_sz - req->src_len; |
1251 |
+ |
1252 |
+- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, |
1253 |
+- &qat_req->in.rsa.dec.c, |
1254 |
+- GFP_KERNEL); |
1255 |
++ qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL); |
1256 |
+ if (unlikely(!qat_req->src_align)) |
1257 |
+ return ret; |
1258 |
+ |
1259 |
+ scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, |
1260 |
+ 0, req->src_len, 0); |
1261 |
++ vaddr = qat_req->src_align; |
1262 |
+ } |
1263 |
+- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { |
1264 |
+- qat_req->dst_align = NULL; |
1265 |
+- qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst), |
1266 |
+- req->dst_len, |
1267 |
+- DMA_FROM_DEVICE); |
1268 |
+ |
1269 |
+- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) |
1270 |
+- goto unmap_src; |
1271 |
++ qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz, |
1272 |
++ DMA_TO_DEVICE); |
1273 |
++ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) |
1274 |
++ goto unmap_src; |
1275 |
+ |
1276 |
++ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { |
1277 |
++ qat_req->dst_align = NULL; |
1278 |
++ vaddr = sg_virt(req->dst); |
1279 |
+ } else { |
1280 |
+- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, |
1281 |
+- &qat_req->out.rsa.dec.m, |
1282 |
+- GFP_KERNEL); |
1283 |
++ qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL); |
1284 |
+ if (unlikely(!qat_req->dst_align)) |
1285 |
+ goto unmap_src; |
1286 |
+- |
1287 |
++ vaddr = qat_req->dst_align; |
1288 |
+ } |
1289 |
++ qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz, |
1290 |
++ DMA_FROM_DEVICE); |
1291 |
++ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) |
1292 |
++ goto unmap_dst; |
1293 |
+ |
1294 |
+ if (ctx->crt_mode) |
1295 |
+ qat_req->in.rsa.in_tab[6] = 0; |
1296 |
+@@ -884,13 +893,14 @@ static int qat_rsa_dec(struct akcipher_request *req) |
1297 |
+ msg->input_param_count = 3; |
1298 |
+ |
1299 |
+ msg->output_param_count = 1; |
1300 |
+- do { |
1301 |
+- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); |
1302 |
+- } while (ret == -EBUSY && ctr++ < 100); |
1303 |
+ |
1304 |
+- if (!ret) |
1305 |
+- return -EINPROGRESS; |
1306 |
++ ret = qat_alg_send_asym_message(qat_req, inst, &req->base); |
1307 |
++ if (ret == -ENOSPC) |
1308 |
++ goto unmap_all; |
1309 |
++ |
1310 |
++ return ret; |
1311 |
+ |
1312 |
++unmap_all: |
1313 |
+ if (!dma_mapping_error(dev, qat_req->phy_out)) |
1314 |
+ dma_unmap_single(dev, qat_req->phy_out, |
1315 |
+ sizeof(struct qat_rsa_output_params), |
1316 |
+@@ -901,21 +911,15 @@ unmap_in_params: |
1317 |
+ sizeof(struct qat_rsa_input_params), |
1318 |
+ DMA_TO_DEVICE); |
1319 |
+ unmap_dst: |
1320 |
+- if (qat_req->dst_align) |
1321 |
+- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, |
1322 |
+- qat_req->out.rsa.dec.m); |
1323 |
+- else |
1324 |
+- if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) |
1325 |
+- dma_unmap_single(dev, qat_req->out.rsa.dec.m, |
1326 |
+- ctx->key_sz, DMA_FROM_DEVICE); |
1327 |
++ if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) |
1328 |
++ dma_unmap_single(dev, qat_req->out.rsa.dec.m, |
1329 |
++ ctx->key_sz, DMA_FROM_DEVICE); |
1330 |
++ kfree_sensitive(qat_req->dst_align); |
1331 |
+ unmap_src: |
1332 |
+- if (qat_req->src_align) |
1333 |
+- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, |
1334 |
+- qat_req->in.rsa.dec.c); |
1335 |
+- else |
1336 |
+- if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) |
1337 |
+- dma_unmap_single(dev, qat_req->in.rsa.dec.c, |
1338 |
+- ctx->key_sz, DMA_TO_DEVICE); |
1339 |
++ if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) |
1340 |
++ dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz, |
1341 |
++ DMA_TO_DEVICE); |
1342 |
++ kfree_sensitive(qat_req->src_align); |
1343 |
+ return ret; |
1344 |
+ } |
1345 |
+ |
1346 |
+@@ -1233,18 +1237,8 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) |
1347 |
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
1348 |
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev); |
1349 |
+ |
1350 |
+- if (ctx->n) |
1351 |
+- dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); |
1352 |
+- if (ctx->e) |
1353 |
+- dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); |
1354 |
+- if (ctx->d) { |
1355 |
+- memset(ctx->d, '\0', ctx->key_sz); |
1356 |
+- dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); |
1357 |
+- } |
1358 |
++ qat_rsa_clear_ctx(dev, ctx); |
1359 |
+ qat_crypto_put_instance(ctx->inst); |
1360 |
+- ctx->n = NULL; |
1361 |
+- ctx->e = NULL; |
1362 |
+- ctx->d = NULL; |
1363 |
+ } |
1364 |
+ |
1365 |
+ static struct akcipher_alg rsa = { |
1366 |
+diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c |
1367 |
+index 67c9588e89df9..9341d892533a7 100644 |
1368 |
+--- a/drivers/crypto/qat/qat_common/qat_crypto.c |
1369 |
++++ b/drivers/crypto/qat/qat_common/qat_crypto.c |
1370 |
+@@ -161,13 +161,6 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) |
1371 |
+ if (ret) |
1372 |
+ goto err; |
1373 |
+ |
1374 |
+- /* Temporarily set the number of crypto instances to zero to avoid |
1375 |
+- * registering the crypto algorithms. |
1376 |
+- * This will be removed when the algorithms will support the |
1377 |
+- * CRYPTO_TFM_REQ_MAY_BACKLOG flag |
1378 |
+- */ |
1379 |
+- instances = 0; |
1380 |
+- |
1381 |
+ for (i = 0; i < instances; i++) { |
1382 |
+ val = i; |
1383 |
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); |
1384 |
+@@ -353,6 +346,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) |
1385 |
+ &inst->pke_rx); |
1386 |
+ if (ret) |
1387 |
+ goto err; |
1388 |
++ |
1389 |
++ INIT_LIST_HEAD(&inst->backlog.list); |
1390 |
++ spin_lock_init(&inst->backlog.lock); |
1391 |
+ } |
1392 |
+ return 0; |
1393 |
+ err: |
1394 |
+diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h |
1395 |
+index b6a4c95ae003f..245b6d9a36507 100644 |
1396 |
+--- a/drivers/crypto/qat/qat_common/qat_crypto.h |
1397 |
++++ b/drivers/crypto/qat/qat_common/qat_crypto.h |
1398 |
+@@ -9,6 +9,19 @@ |
1399 |
+ #include "adf_accel_devices.h" |
1400 |
+ #include "icp_qat_fw_la.h" |
1401 |
+ |
1402 |
++struct qat_instance_backlog { |
1403 |
++ struct list_head list; |
1404 |
++ spinlock_t lock; /* protects backlog list */ |
1405 |
++}; |
1406 |
++ |
1407 |
++struct qat_alg_req { |
1408 |
++ u32 *fw_req; |
1409 |
++ struct adf_etr_ring_data *tx_ring; |
1410 |
++ struct crypto_async_request *base; |
1411 |
++ struct list_head list; |
1412 |
++ struct qat_instance_backlog *backlog; |
1413 |
++}; |
1414 |
++ |
1415 |
+ struct qat_crypto_instance { |
1416 |
+ struct adf_etr_ring_data *sym_tx; |
1417 |
+ struct adf_etr_ring_data *sym_rx; |
1418 |
+@@ -19,8 +32,29 @@ struct qat_crypto_instance { |
1419 |
+ unsigned long state; |
1420 |
+ int id; |
1421 |
+ atomic_t refctr; |
1422 |
++ struct qat_instance_backlog backlog; |
1423 |
+ }; |
1424 |
+ |
1425 |
++#define QAT_MAX_BUFF_DESC 4 |
1426 |
++ |
1427 |
++struct qat_alg_buf { |
1428 |
++ u32 len; |
1429 |
++ u32 resrvd; |
1430 |
++ u64 addr; |
1431 |
++} __packed; |
1432 |
++ |
1433 |
++struct qat_alg_buf_list { |
1434 |
++ u64 resrvd; |
1435 |
++ u32 num_bufs; |
1436 |
++ u32 num_mapped_bufs; |
1437 |
++ struct qat_alg_buf bufers[]; |
1438 |
++} __packed; |
1439 |
++ |
1440 |
++struct qat_alg_fixed_buf_list { |
1441 |
++ struct qat_alg_buf_list sgl_hdr; |
1442 |
++ struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; |
1443 |
++} __packed __aligned(64); |
1444 |
++ |
1445 |
+ struct qat_crypto_request_buffs { |
1446 |
+ struct qat_alg_buf_list *bl; |
1447 |
+ dma_addr_t blp; |
1448 |
+@@ -28,6 +62,10 @@ struct qat_crypto_request_buffs { |
1449 |
+ dma_addr_t bloutp; |
1450 |
+ size_t sz; |
1451 |
+ size_t sz_out; |
1452 |
++ bool sgl_src_valid; |
1453 |
++ bool sgl_dst_valid; |
1454 |
++ struct qat_alg_fixed_buf_list sgl_src; |
1455 |
++ struct qat_alg_fixed_buf_list sgl_dst; |
1456 |
+ }; |
1457 |
+ |
1458 |
+ struct qat_crypto_request; |
1459 |
+@@ -53,6 +91,7 @@ struct qat_crypto_request { |
1460 |
+ u8 iv[AES_BLOCK_SIZE]; |
1461 |
+ }; |
1462 |
+ bool encryption; |
1463 |
++ struct qat_alg_req alg_req; |
1464 |
+ }; |
1465 |
+ |
1466 |
+ static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev) |
1467 |
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c |
1468 |
+index 33683295a0bfe..64befd6f702b2 100644 |
1469 |
+--- a/drivers/gpio/gpio-pca953x.c |
1470 |
++++ b/drivers/gpio/gpio-pca953x.c |
1471 |
+@@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = { |
1472 |
+ .reg_bits = 8, |
1473 |
+ .val_bits = 8, |
1474 |
+ |
1475 |
++ .use_single_read = true, |
1476 |
++ .use_single_write = true, |
1477 |
++ |
1478 |
+ .readable_reg = pca953x_readable_register, |
1479 |
+ .writeable_reg = pca953x_writeable_register, |
1480 |
+ .volatile_reg = pca953x_volatile_register, |
1481 |
+@@ -894,15 +897,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, |
1482 |
+ static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert) |
1483 |
+ { |
1484 |
+ DECLARE_BITMAP(val, MAX_LINE); |
1485 |
++ u8 regaddr; |
1486 |
+ int ret; |
1487 |
+ |
1488 |
+- ret = regcache_sync_region(chip->regmap, chip->regs->output, |
1489 |
+- chip->regs->output + NBANK(chip)); |
1490 |
++ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); |
1491 |
++ ret = regcache_sync_region(chip->regmap, regaddr, |
1492 |
++ regaddr + NBANK(chip) - 1); |
1493 |
+ if (ret) |
1494 |
+ goto out; |
1495 |
+ |
1496 |
+- ret = regcache_sync_region(chip->regmap, chip->regs->direction, |
1497 |
+- chip->regs->direction + NBANK(chip)); |
1498 |
++ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); |
1499 |
++ ret = regcache_sync_region(chip->regmap, regaddr, |
1500 |
++ regaddr + NBANK(chip) - 1); |
1501 |
+ if (ret) |
1502 |
+ goto out; |
1503 |
+ |
1504 |
+@@ -1115,14 +1121,14 @@ static int pca953x_regcache_sync(struct device *dev) |
1505 |
+ * sync these registers first and only then sync the rest. |
1506 |
+ */ |
1507 |
+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); |
1508 |
+- ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip)); |
1509 |
++ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1); |
1510 |
+ if (ret) { |
1511 |
+ dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret); |
1512 |
+ return ret; |
1513 |
+ } |
1514 |
+ |
1515 |
+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); |
1516 |
+- ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip)); |
1517 |
++ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1); |
1518 |
+ if (ret) { |
1519 |
+ dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret); |
1520 |
+ return ret; |
1521 |
+@@ -1132,7 +1138,7 @@ static int pca953x_regcache_sync(struct device *dev) |
1522 |
+ if (chip->driver_data & PCA_PCAL) { |
1523 |
+ regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0); |
1524 |
+ ret = regcache_sync_region(chip->regmap, regaddr, |
1525 |
+- regaddr + NBANK(chip)); |
1526 |
++ regaddr + NBANK(chip) - 1); |
1527 |
+ if (ret) { |
1528 |
+ dev_err(dev, "Failed to sync INT latch registers: %d\n", |
1529 |
+ ret); |
1530 |
+@@ -1141,7 +1147,7 @@ static int pca953x_regcache_sync(struct device *dev) |
1531 |
+ |
1532 |
+ regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0); |
1533 |
+ ret = regcache_sync_region(chip->regmap, regaddr, |
1534 |
+- regaddr + NBANK(chip)); |
1535 |
++ regaddr + NBANK(chip) - 1); |
1536 |
+ if (ret) { |
1537 |
+ dev_err(dev, "Failed to sync INT mask registers: %d\n", |
1538 |
+ ret); |
1539 |
+diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c |
1540 |
+index b6d3a57e27edc..7f8e2fed29884 100644 |
1541 |
+--- a/drivers/gpio/gpio-xilinx.c |
1542 |
++++ b/drivers/gpio/gpio-xilinx.c |
1543 |
+@@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v) |
1544 |
+ const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5); |
1545 |
+ |
1546 |
+ map[index] &= ~(0xFFFFFFFFul << offset); |
1547 |
+- map[index] |= v << offset; |
1548 |
++ map[index] |= (unsigned long)v << offset; |
1549 |
+ } |
1550 |
+ |
1551 |
+ static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch) |
1552 |
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
1553 |
+index 810965bd06921..a2575195c4e07 100644 |
1554 |
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
1555 |
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
1556 |
+@@ -1670,7 +1670,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) |
1557 |
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) |
1558 |
+ adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); |
1559 |
+ #endif |
1560 |
+- if (dc_enable_dmub_notifications(adev->dm.dc)) { |
1561 |
++ if (dc_is_dmub_outbox_supported(adev->dm.dc)) { |
1562 |
+ init_completion(&adev->dm.dmub_aux_transfer_done); |
1563 |
+ adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); |
1564 |
+ if (!adev->dm.dmub_notify) { |
1565 |
+@@ -1708,6 +1708,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) |
1566 |
+ goto error; |
1567 |
+ } |
1568 |
+ |
1569 |
++ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. |
1570 |
++ * It is expected that DMUB will resend any pending notifications at this point, for |
1571 |
++ * example HPD from DPIA. |
1572 |
++ */ |
1573 |
++ if (dc_is_dmub_outbox_supported(adev->dm.dc)) |
1574 |
++ dc_enable_dmub_outbox(adev->dm.dc); |
1575 |
++ |
1576 |
+ /* create fake encoders for MST */ |
1577 |
+ dm_dp_create_fake_mst_encoders(adev); |
1578 |
+ |
1579 |
+@@ -2701,9 +2708,6 @@ static int dm_resume(void *handle) |
1580 |
+ */ |
1581 |
+ link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); |
1582 |
+ |
1583 |
+- if (dc_enable_dmub_notifications(adev->dm.dc)) |
1584 |
+- amdgpu_dm_outbox_init(adev); |
1585 |
+- |
1586 |
+ r = dm_dmub_hw_init(adev); |
1587 |
+ if (r) |
1588 |
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); |
1589 |
+@@ -2721,6 +2725,11 @@ static int dm_resume(void *handle) |
1590 |
+ } |
1591 |
+ } |
1592 |
+ |
1593 |
++ if (dc_is_dmub_outbox_supported(adev->dm.dc)) { |
1594 |
++ amdgpu_dm_outbox_init(adev); |
1595 |
++ dc_enable_dmub_outbox(adev->dm.dc); |
1596 |
++ } |
1597 |
++ |
1598 |
+ WARN_ON(!dc_commit_state(dm->dc, dc_state)); |
1599 |
+ |
1600 |
+ dm_gpureset_commit_state(dm->cached_dc_state, dm); |
1601 |
+@@ -2742,13 +2751,15 @@ static int dm_resume(void *handle) |
1602 |
+ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ |
1603 |
+ dc_resource_state_construct(dm->dc, dm_state->context); |
1604 |
+ |
1605 |
+- /* Re-enable outbox interrupts for DPIA. */ |
1606 |
+- if (dc_enable_dmub_notifications(adev->dm.dc)) |
1607 |
+- amdgpu_dm_outbox_init(adev); |
1608 |
+- |
1609 |
+ /* Before powering on DC we need to re-initialize DMUB. */ |
1610 |
+ dm_dmub_hw_resume(adev); |
1611 |
+ |
1612 |
++ /* Re-enable outbox interrupts for DPIA. */ |
1613 |
++ if (dc_is_dmub_outbox_supported(adev->dm.dc)) { |
1614 |
++ amdgpu_dm_outbox_init(adev); |
1615 |
++ dc_enable_dmub_outbox(adev->dm.dc); |
1616 |
++ } |
1617 |
++ |
1618 |
+ /* power on hardware */ |
1619 |
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); |
1620 |
+ |
1621 |
+diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c |
1622 |
+index d5962a34c01d5..e5fc875990c4f 100644 |
1623 |
+--- a/drivers/gpu/drm/drm_gem_ttm_helper.c |
1624 |
++++ b/drivers/gpu/drm/drm_gem_ttm_helper.c |
1625 |
+@@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem, |
1626 |
+ struct iosys_map *map) |
1627 |
+ { |
1628 |
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); |
1629 |
++ int ret; |
1630 |
++ |
1631 |
++ dma_resv_lock(gem->resv, NULL); |
1632 |
++ ret = ttm_bo_vmap(bo, map); |
1633 |
++ dma_resv_unlock(gem->resv); |
1634 |
+ |
1635 |
+- return ttm_bo_vmap(bo, map); |
1636 |
++ return ret; |
1637 |
+ } |
1638 |
+ EXPORT_SYMBOL(drm_gem_ttm_vmap); |
1639 |
+ |
1640 |
+@@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem, |
1641 |
+ { |
1642 |
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); |
1643 |
+ |
1644 |
++ dma_resv_lock(gem->resv, NULL); |
1645 |
+ ttm_bo_vunmap(bo, map); |
1646 |
++ dma_resv_unlock(gem->resv); |
1647 |
+ } |
1648 |
+ EXPORT_SYMBOL(drm_gem_ttm_vunmap); |
1649 |
+ |
1650 |
+diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c |
1651 |
+index c849533ca83e3..3f5750cc2673e 100644 |
1652 |
+--- a/drivers/gpu/drm/imx/dcss/dcss-dev.c |
1653 |
++++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c |
1654 |
+@@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output) |
1655 |
+ |
1656 |
+ ret = dcss_submodules_init(dcss); |
1657 |
+ if (ret) { |
1658 |
++ of_node_put(dcss->of_port); |
1659 |
+ dev_err(dev, "submodules initialization failed\n"); |
1660 |
+ goto clks_err; |
1661 |
+ } |
1662 |
+@@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss) |
1663 |
+ dcss_clocks_disable(dcss); |
1664 |
+ } |
1665 |
+ |
1666 |
++ of_node_put(dcss->of_port); |
1667 |
++ |
1668 |
+ pm_runtime_disable(dcss->dev); |
1669 |
+ |
1670 |
+ dcss_submodules_stop(dcss); |
1671 |
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c |
1672 |
+index f7bfcf63d48ee..701a258d2e111 100644 |
1673 |
+--- a/drivers/gpu/drm/panel/panel-edp.c |
1674 |
++++ b/drivers/gpu/drm/panel/panel-edp.c |
1675 |
+@@ -713,7 +713,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) |
1676 |
+ of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms); |
1677 |
+ desc->delay.hpd_reliable = reliable_ms; |
1678 |
+ of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms); |
1679 |
+- desc->delay.hpd_reliable = absent_ms; |
1680 |
++ desc->delay.hpd_absent = absent_ms; |
1681 |
+ |
1682 |
+ /* Power the panel on so we can read the EDID */ |
1683 |
+ ret = pm_runtime_get_sync(dev); |
1684 |
+diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c |
1685 |
+index 191c56064f196..6b25b2f4f5a30 100644 |
1686 |
+--- a/drivers/gpu/drm/scheduler/sched_entity.c |
1687 |
++++ b/drivers/gpu/drm/scheduler/sched_entity.c |
1688 |
+@@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) |
1689 |
+ } |
1690 |
+ EXPORT_SYMBOL(drm_sched_entity_flush); |
1691 |
+ |
1692 |
+-static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk) |
1693 |
++static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) |
1694 |
+ { |
1695 |
+ struct drm_sched_job *job = container_of(wrk, typeof(*job), work); |
1696 |
+ |
1697 |
+@@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, |
1698 |
+ struct drm_sched_job *job = container_of(cb, struct drm_sched_job, |
1699 |
+ finish_cb); |
1700 |
+ |
1701 |
+- init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work); |
1702 |
+- irq_work_queue(&job->work); |
1703 |
++ INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); |
1704 |
++ schedule_work(&job->work); |
1705 |
+ } |
1706 |
+ |
1707 |
+ static struct dma_fence * |
1708 |
+diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c |
1709 |
+index 3d6f8ee355bfc..630cfa4ddd468 100644 |
1710 |
+--- a/drivers/i2c/busses/i2c-cadence.c |
1711 |
++++ b/drivers/i2c/busses/i2c-cadence.c |
1712 |
+@@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr) |
1713 |
+ */ |
1714 |
+ static irqreturn_t cdns_i2c_master_isr(void *ptr) |
1715 |
+ { |
1716 |
+- unsigned int isr_status, avail_bytes, updatetx; |
1717 |
++ unsigned int isr_status, avail_bytes; |
1718 |
+ unsigned int bytes_to_send; |
1719 |
+- bool hold_quirk; |
1720 |
++ bool updatetx; |
1721 |
+ struct cdns_i2c *id = ptr; |
1722 |
+ /* Signal completion only after everything is updated */ |
1723 |
+ int done_flag = 0; |
1724 |
+@@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) |
1725 |
+ * Check if transfer size register needs to be updated again for a |
1726 |
+ * large data receive operation. |
1727 |
+ */ |
1728 |
+- updatetx = 0; |
1729 |
+- if (id->recv_count > id->curr_recv_count) |
1730 |
+- updatetx = 1; |
1731 |
+- |
1732 |
+- hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx; |
1733 |
++ updatetx = id->recv_count > id->curr_recv_count; |
1734 |
+ |
1735 |
+ /* When receiving, handle data interrupt and completion interrupt */ |
1736 |
+ if (id->p_recv_buf && |
1737 |
+@@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) |
1738 |
+ break; |
1739 |
+ } |
1740 |
+ |
1741 |
+- if (cdns_is_holdquirk(id, hold_quirk)) |
1742 |
++ if (cdns_is_holdquirk(id, updatetx)) |
1743 |
+ break; |
1744 |
+ } |
1745 |
+ |
1746 |
+@@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) |
1747 |
+ * maintain transfer size non-zero while performing a large |
1748 |
+ * receive operation. |
1749 |
+ */ |
1750 |
+- if (cdns_is_holdquirk(id, hold_quirk)) { |
1751 |
++ if (cdns_is_holdquirk(id, updatetx)) { |
1752 |
+ /* wait while fifo is full */ |
1753 |
+ while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != |
1754 |
+ (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH)) |
1755 |
+@@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) |
1756 |
+ CDNS_I2C_XFER_SIZE_OFFSET); |
1757 |
+ id->curr_recv_count = id->recv_count; |
1758 |
+ } |
1759 |
+- } else if (id->recv_count && !hold_quirk && |
1760 |
+- !id->curr_recv_count) { |
1761 |
+- |
1762 |
+- /* Set the slave address in address register*/ |
1763 |
+- cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, |
1764 |
+- CDNS_I2C_ADDR_OFFSET); |
1765 |
+- |
1766 |
+- if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) { |
1767 |
+- cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, |
1768 |
+- CDNS_I2C_XFER_SIZE_OFFSET); |
1769 |
+- id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE; |
1770 |
+- } else { |
1771 |
+- cdns_i2c_writereg(id->recv_count, |
1772 |
+- CDNS_I2C_XFER_SIZE_OFFSET); |
1773 |
+- id->curr_recv_count = id->recv_count; |
1774 |
+- } |
1775 |
+ } |
1776 |
+ |
1777 |
+ /* Clear hold (if not repeated start) and signal completion */ |
1778 |
+diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c |
1779 |
+index 56aa424fd71d5..815cc561386b0 100644 |
1780 |
+--- a/drivers/i2c/busses/i2c-mlxcpld.c |
1781 |
++++ b/drivers/i2c/busses/i2c-mlxcpld.c |
1782 |
+@@ -49,7 +49,7 @@ |
1783 |
+ #define MLXCPLD_LPCI2C_NACK_IND 2 |
1784 |
+ |
1785 |
+ #define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04 |
1786 |
+-#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c |
1787 |
++#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0e |
1788 |
+ #define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42 |
1789 |
+ |
1790 |
+ enum mlxcpld_i2c_frequency { |
1791 |
+diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c |
1792 |
+index 638bf4a1ed946..646fa86774909 100644 |
1793 |
+--- a/drivers/infiniband/hw/irdma/cm.c |
1794 |
++++ b/drivers/infiniband/hw/irdma/cm.c |
1795 |
+@@ -4231,10 +4231,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, |
1796 |
+ struct irdma_cm_node *cm_node; |
1797 |
+ struct list_head teardown_list; |
1798 |
+ struct ib_qp_attr attr; |
1799 |
+- struct irdma_sc_vsi *vsi = &iwdev->vsi; |
1800 |
+- struct irdma_sc_qp *sc_qp; |
1801 |
+- struct irdma_qp *qp; |
1802 |
+- int i; |
1803 |
+ |
1804 |
+ INIT_LIST_HEAD(&teardown_list); |
1805 |
+ |
1806 |
+@@ -4251,52 +4247,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, |
1807 |
+ irdma_cm_disconn(cm_node->iwqp); |
1808 |
+ irdma_rem_ref_cm_node(cm_node); |
1809 |
+ } |
1810 |
+- if (!iwdev->roce_mode) |
1811 |
+- return; |
1812 |
+- |
1813 |
+- INIT_LIST_HEAD(&teardown_list); |
1814 |
+- for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { |
1815 |
+- mutex_lock(&vsi->qos[i].qos_mutex); |
1816 |
+- list_for_each_safe (list_node, list_core_temp, |
1817 |
+- &vsi->qos[i].qplist) { |
1818 |
+- u32 qp_ip[4]; |
1819 |
+- |
1820 |
+- sc_qp = container_of(list_node, struct irdma_sc_qp, |
1821 |
+- list); |
1822 |
+- if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC) |
1823 |
+- continue; |
1824 |
+- |
1825 |
+- qp = sc_qp->qp_uk.back_qp; |
1826 |
+- if (!disconnect_all) { |
1827 |
+- if (nfo->ipv4) |
1828 |
+- qp_ip[0] = qp->udp_info.local_ipaddr[3]; |
1829 |
+- else |
1830 |
+- memcpy(qp_ip, |
1831 |
+- &qp->udp_info.local_ipaddr[0], |
1832 |
+- sizeof(qp_ip)); |
1833 |
+- } |
1834 |
+- |
1835 |
+- if (disconnect_all || |
1836 |
+- (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) && |
1837 |
+- !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) { |
1838 |
+- spin_lock(&iwdev->rf->qptable_lock); |
1839 |
+- if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) { |
1840 |
+- irdma_qp_add_ref(&qp->ibqp); |
1841 |
+- list_add(&qp->teardown_entry, |
1842 |
+- &teardown_list); |
1843 |
+- } |
1844 |
+- spin_unlock(&iwdev->rf->qptable_lock); |
1845 |
+- } |
1846 |
+- } |
1847 |
+- mutex_unlock(&vsi->qos[i].qos_mutex); |
1848 |
+- } |
1849 |
+- |
1850 |
+- list_for_each_safe (list_node, list_core_temp, &teardown_list) { |
1851 |
+- qp = container_of(list_node, struct irdma_qp, teardown_entry); |
1852 |
+- attr.qp_state = IB_QPS_ERR; |
1853 |
+- irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL); |
1854 |
+- irdma_qp_rem_ref(&qp->ibqp); |
1855 |
+- } |
1856 |
+ } |
1857 |
+ |
1858 |
+ /** |
1859 |
+diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c |
1860 |
+index e46fc110004d0..50299f58b6b31 100644 |
1861 |
+--- a/drivers/infiniband/hw/irdma/i40iw_hw.c |
1862 |
++++ b/drivers/infiniband/hw/irdma/i40iw_hw.c |
1863 |
+@@ -201,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev) |
1864 |
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD; |
1865 |
+ dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT; |
1866 |
+ dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE; |
1867 |
++ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M; |
1868 |
+ dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE; |
1869 |
+ dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE; |
1870 |
+ dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES; |
1871 |
+diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c |
1872 |
+index cf53b17510cdb..5986fd906308c 100644 |
1873 |
+--- a/drivers/infiniband/hw/irdma/icrdma_hw.c |
1874 |
++++ b/drivers/infiniband/hw/irdma/icrdma_hw.c |
1875 |
+@@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev) |
1876 |
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB]; |
1877 |
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; |
1878 |
+ dev->irq_ops = &icrdma_irq_ops; |
1879 |
++ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G; |
1880 |
+ dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; |
1881 |
+ dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; |
1882 |
+ dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; |
1883 |
+diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h |
1884 |
+index 46c12334c7354..4789e85d717b3 100644 |
1885 |
+--- a/drivers/infiniband/hw/irdma/irdma.h |
1886 |
++++ b/drivers/infiniband/hw/irdma/irdma.h |
1887 |
+@@ -127,6 +127,7 @@ struct irdma_hw_attrs { |
1888 |
+ u64 max_hw_outbound_msg_size; |
1889 |
+ u64 max_hw_inbound_msg_size; |
1890 |
+ u64 max_mr_size; |
1891 |
++ u64 page_size_cap; |
1892 |
+ u32 min_hw_qp_id; |
1893 |
+ u32 min_hw_aeq_size; |
1894 |
+ u32 max_hw_aeq_size; |
1895 |
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c |
1896 |
+index 52f3e88f85695..6daa149dcbda2 100644 |
1897 |
+--- a/drivers/infiniband/hw/irdma/verbs.c |
1898 |
++++ b/drivers/infiniband/hw/irdma/verbs.c |
1899 |
+@@ -30,7 +30,7 @@ static int irdma_query_device(struct ib_device *ibdev, |
1900 |
+ props->vendor_part_id = pcidev->device; |
1901 |
+ |
1902 |
+ props->hw_ver = rf->pcidev->revision; |
1903 |
+- props->page_size_cap = SZ_4K | SZ_2M | SZ_1G; |
1904 |
++ props->page_size_cap = hw_attrs->page_size_cap; |
1905 |
+ props->max_mr_size = hw_attrs->max_mr_size; |
1906 |
+ props->max_qp = rf->max_qp - rf->used_qps; |
1907 |
+ props->max_qp_wr = hw_attrs->max_qp_wr; |
1908 |
+@@ -2764,7 +2764,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, |
1909 |
+ |
1910 |
+ if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) { |
1911 |
+ iwmr->page_size = ib_umem_find_best_pgsz(region, |
1912 |
+- SZ_4K | SZ_2M | SZ_1G, |
1913 |
++ iwdev->rf->sc_dev.hw_attrs.page_size_cap, |
1914 |
+ virt); |
1915 |
+ if (unlikely(!iwmr->page_size)) { |
1916 |
+ kfree(iwmr); |
1917 |
+diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c |
1918 |
+index 64e27c2821f99..ada23040cb654 100644 |
1919 |
+--- a/drivers/mmc/host/sdhci-omap.c |
1920 |
++++ b/drivers/mmc/host/sdhci-omap.c |
1921 |
+@@ -1303,8 +1303,9 @@ static int sdhci_omap_probe(struct platform_device *pdev) |
1922 |
+ /* |
1923 |
+ * omap_device_pm_domain has callbacks to enable the main |
1924 |
+ * functional clock, interface clock and also configure the |
1925 |
+- * SYSCONFIG register of omap devices. The callback will be invoked |
1926 |
+- * as part of pm_runtime_get_sync. |
1927 |
++ * SYSCONFIG register to clear any boot loader set voltage |
1928 |
++ * capabilities before calling sdhci_setup_host(). The |
1929 |
++ * callback will be invoked as part of pm_runtime_get_sync. |
1930 |
+ */ |
1931 |
+ pm_runtime_use_autosuspend(dev); |
1932 |
+ pm_runtime_set_autosuspend_delay(dev, 50); |
1933 |
+@@ -1446,7 +1447,8 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev) |
1934 |
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
1935 |
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); |
1936 |
+ |
1937 |
+- sdhci_runtime_suspend_host(host); |
1938 |
++ if (omap_host->con != -EINVAL) |
1939 |
++ sdhci_runtime_suspend_host(host); |
1940 |
+ |
1941 |
+ sdhci_omap_context_save(omap_host); |
1942 |
+ |
1943 |
+@@ -1463,10 +1465,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev) |
1944 |
+ |
1945 |
+ pinctrl_pm_select_default_state(dev); |
1946 |
+ |
1947 |
+- if (omap_host->con != -EINVAL) |
1948 |
++ if (omap_host->con != -EINVAL) { |
1949 |
+ sdhci_omap_context_restore(omap_host); |
1950 |
+- |
1951 |
+- sdhci_runtime_resume_host(host, 0); |
1952 |
++ sdhci_runtime_resume_host(host, 0); |
1953 |
++ } |
1954 |
+ |
1955 |
+ return 0; |
1956 |
+ } |
1957 |
+diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c |
1958 |
+index 44b14c9dc9a73..a626028336d3f 100644 |
1959 |
+--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c |
1960 |
++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c |
1961 |
+@@ -655,9 +655,10 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, |
1962 |
+ unsigned int tRP_ps; |
1963 |
+ bool use_half_period; |
1964 |
+ int sample_delay_ps, sample_delay_factor; |
1965 |
+- u16 busy_timeout_cycles; |
1966 |
++ unsigned int busy_timeout_cycles; |
1967 |
+ u8 wrn_dly_sel; |
1968 |
+ unsigned long clk_rate, min_rate; |
1969 |
++ u64 busy_timeout_ps; |
1970 |
+ |
1971 |
+ if (sdr->tRC_min >= 30000) { |
1972 |
+ /* ONFI non-EDO modes [0-3] */ |
1973 |
+@@ -690,7 +691,8 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, |
1974 |
+ addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); |
1975 |
+ data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); |
1976 |
+ data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); |
1977 |
+- busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); |
1978 |
++ busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max); |
1979 |
++ busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps); |
1980 |
+ |
1981 |
+ hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | |
1982 |
+ BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | |
1983 |
+diff --git a/drivers/net/amt.c b/drivers/net/amt.c |
1984 |
+index 14fe03dbd9b1d..acf5ea96652f8 100644 |
1985 |
+--- a/drivers/net/amt.c |
1986 |
++++ b/drivers/net/amt.c |
1987 |
+@@ -563,7 +563,7 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt) |
1988 |
+ ihv3->nsrcs = 0; |
1989 |
+ ihv3->resv = 0; |
1990 |
+ ihv3->suppress = false; |
1991 |
+- ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv; |
1992 |
++ ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv); |
1993 |
+ ihv3->csum = 0; |
1994 |
+ csum = &ihv3->csum; |
1995 |
+ csum_start = (void *)ihv3; |
1996 |
+@@ -577,14 +577,14 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt) |
1997 |
+ return skb; |
1998 |
+ } |
1999 |
+ |
2000 |
+-static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status, |
2001 |
+- bool validate) |
2002 |
++static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status, |
2003 |
++ bool validate) |
2004 |
+ { |
2005 |
+ if (validate && amt->status >= status) |
2006 |
+ return; |
2007 |
+ netdev_dbg(amt->dev, "Update GW status %s -> %s", |
2008 |
+ status_str[amt->status], status_str[status]); |
2009 |
+- amt->status = status; |
2010 |
++ WRITE_ONCE(amt->status, status); |
2011 |
+ } |
2012 |
+ |
2013 |
+ static void __amt_update_relay_status(struct amt_tunnel_list *tunnel, |
2014 |
+@@ -600,14 +600,6 @@ static void __amt_update_relay_status(struct amt_tunnel_list *tunnel, |
2015 |
+ tunnel->status = status; |
2016 |
+ } |
2017 |
+ |
2018 |
+-static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status, |
2019 |
+- bool validate) |
2020 |
+-{ |
2021 |
+- spin_lock_bh(&amt->lock); |
2022 |
+- __amt_update_gw_status(amt, status, validate); |
2023 |
+- spin_unlock_bh(&amt->lock); |
2024 |
+-} |
2025 |
+- |
2026 |
+ static void amt_update_relay_status(struct amt_tunnel_list *tunnel, |
2027 |
+ enum amt_status status, bool validate) |
2028 |
+ { |
2029 |
+@@ -700,9 +692,7 @@ static void amt_send_discovery(struct amt_dev *amt) |
2030 |
+ if (unlikely(net_xmit_eval(err))) |
2031 |
+ amt->dev->stats.tx_errors++; |
2032 |
+ |
2033 |
+- spin_lock_bh(&amt->lock); |
2034 |
+- __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true); |
2035 |
+- spin_unlock_bh(&amt->lock); |
2036 |
++ amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true); |
2037 |
+ out: |
2038 |
+ rcu_read_unlock(); |
2039 |
+ } |
2040 |
+@@ -900,6 +890,28 @@ static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel) |
2041 |
+ } |
2042 |
+ #endif |
2043 |
+ |
2044 |
++static bool amt_queue_event(struct amt_dev *amt, enum amt_event event, |
2045 |
++ struct sk_buff *skb) |
2046 |
++{ |
2047 |
++ int index; |
2048 |
++ |
2049 |
++ spin_lock_bh(&amt->lock); |
2050 |
++ if (amt->nr_events >= AMT_MAX_EVENTS) { |
2051 |
++ spin_unlock_bh(&amt->lock); |
2052 |
++ return 1; |
2053 |
++ } |
2054 |
++ |
2055 |
++ index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS; |
2056 |
++ amt->events[index].event = event; |
2057 |
++ amt->events[index].skb = skb; |
2058 |
++ amt->nr_events++; |
2059 |
++ amt->event_idx %= AMT_MAX_EVENTS; |
2060 |
++ queue_work(amt_wq, &amt->event_wq); |
2061 |
++ spin_unlock_bh(&amt->lock); |
2062 |
++ |
2063 |
++ return 0; |
2064 |
++} |
2065 |
++ |
2066 |
+ static void amt_secret_work(struct work_struct *work) |
2067 |
+ { |
2068 |
+ struct amt_dev *amt = container_of(to_delayed_work(work), |
2069 |
+@@ -913,58 +925,72 @@ static void amt_secret_work(struct work_struct *work) |
2070 |
+ msecs_to_jiffies(AMT_SECRET_TIMEOUT)); |
2071 |
+ } |
2072 |
+ |
2073 |
+-static void amt_discovery_work(struct work_struct *work) |
2074 |
++static void amt_event_send_discovery(struct amt_dev *amt) |
2075 |
+ { |
2076 |
+- struct amt_dev *amt = container_of(to_delayed_work(work), |
2077 |
+- struct amt_dev, |
2078 |
+- discovery_wq); |
2079 |
+- |
2080 |
+- spin_lock_bh(&amt->lock); |
2081 |
+ if (amt->status > AMT_STATUS_SENT_DISCOVERY) |
2082 |
+ goto out; |
2083 |
+ get_random_bytes(&amt->nonce, sizeof(__be32)); |
2084 |
+- spin_unlock_bh(&amt->lock); |
2085 |
+ |
2086 |
+ amt_send_discovery(amt); |
2087 |
+- spin_lock_bh(&amt->lock); |
2088 |
+ out: |
2089 |
+ mod_delayed_work(amt_wq, &amt->discovery_wq, |
2090 |
+ msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT)); |
2091 |
+- spin_unlock_bh(&amt->lock); |
2092 |
+ } |
2093 |
+ |
2094 |
+-static void amt_req_work(struct work_struct *work) |
2095 |
++static void amt_discovery_work(struct work_struct *work) |
2096 |
+ { |
2097 |
+ struct amt_dev *amt = container_of(to_delayed_work(work), |
2098 |
+ struct amt_dev, |
2099 |
+- req_wq); |
2100 |
++ discovery_wq); |
2101 |
++ |
2102 |
++ if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL)) |
2103 |
++ mod_delayed_work(amt_wq, &amt->discovery_wq, |
2104 |
++ msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT)); |
2105 |
++} |
2106 |
++ |
2107 |
++static void amt_event_send_request(struct amt_dev *amt) |
2108 |
++{ |
2109 |
+ u32 exp; |
2110 |
+ |
2111 |
+- spin_lock_bh(&amt->lock); |
2112 |
+ if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT) |
2113 |
+ goto out; |
2114 |
+ |
2115 |
+ if (amt->req_cnt > AMT_MAX_REQ_COUNT) { |
2116 |
+ netdev_dbg(amt->dev, "Gateway is not ready"); |
2117 |
+ amt->qi = AMT_INIT_REQ_TIMEOUT; |
2118 |
+- amt->ready4 = false; |
2119 |
+- amt->ready6 = false; |
2120 |
++ WRITE_ONCE(amt->ready4, false); |
2121 |
++ WRITE_ONCE(amt->ready6, false); |
2122 |
+ amt->remote_ip = 0; |
2123 |
+- __amt_update_gw_status(amt, AMT_STATUS_INIT, false); |
2124 |
++ amt_update_gw_status(amt, AMT_STATUS_INIT, false); |
2125 |
+ amt->req_cnt = 0; |
2126 |
++ amt->nonce = 0; |
2127 |
+ goto out; |
2128 |
+ } |
2129 |
+- spin_unlock_bh(&amt->lock); |
2130 |
++ |
2131 |
++ if (!amt->req_cnt) { |
2132 |
++ WRITE_ONCE(amt->ready4, false); |
2133 |
++ WRITE_ONCE(amt->ready6, false); |
2134 |
++ get_random_bytes(&amt->nonce, sizeof(__be32)); |
2135 |
++ } |
2136 |
+ |
2137 |
+ amt_send_request(amt, false); |
2138 |
+ amt_send_request(amt, true); |
2139 |
+- spin_lock_bh(&amt->lock); |
2140 |
+- __amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); |
2141 |
++ amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); |
2142 |
+ amt->req_cnt++; |
2143 |
+ out: |
2144 |
+ exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT); |
2145 |
+ mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000)); |
2146 |
+- spin_unlock_bh(&amt->lock); |
2147 |
++} |
2148 |
++ |
2149 |
++static void amt_req_work(struct work_struct *work) |
2150 |
++{ |
2151 |
++ struct amt_dev *amt = container_of(to_delayed_work(work), |
2152 |
++ struct amt_dev, |
2153 |
++ req_wq); |
2154 |
++ |
2155 |
++ if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL)) |
2156 |
++ mod_delayed_work(amt_wq, &amt->req_wq, |
2157 |
++ msecs_to_jiffies(100)); |
2158 |
+ } |
2159 |
+ |
2160 |
+ static bool amt_send_membership_update(struct amt_dev *amt, |
2161 |
+@@ -1220,7 +1246,8 @@ static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev) |
2162 |
+ /* Gateway only passes IGMP/MLD packets */ |
2163 |
+ if (!report) |
2164 |
+ goto free; |
2165 |
+- if ((!v6 && !amt->ready4) || (v6 && !amt->ready6)) |
2166 |
++ if ((!v6 && !READ_ONCE(amt->ready4)) || |
2167 |
++ (v6 && !READ_ONCE(amt->ready6))) |
2168 |
+ goto free; |
2169 |
+ if (amt_send_membership_update(amt, skb, v6)) |
2170 |
+ goto free; |
2171 |
+@@ -2236,6 +2263,10 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb) |
2172 |
+ ipv4_is_zeronet(amta->ip4)) |
2173 |
+ return true; |
2174 |
+ |
2175 |
++ if (amt->status != AMT_STATUS_SENT_DISCOVERY || |
2176 |
++ amt->nonce != amta->nonce) |
2177 |
++ return true; |
2178 |
++ |
2179 |
+ amt->remote_ip = amta->ip4; |
2180 |
+ netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip); |
2181 |
+ mod_delayed_work(amt_wq, &amt->req_wq, 0); |
2182 |
+@@ -2251,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) |
2183 |
+ struct ethhdr *eth; |
2184 |
+ struct iphdr *iph; |
2185 |
+ |
2186 |
++ if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE) |
2187 |
++ return true; |
2188 |
++ |
2189 |
+ hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); |
2190 |
+ if (!pskb_may_pull(skb, hdr_size)) |
2191 |
+ return true; |
2192 |
+@@ -2325,6 +2359,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt, |
2193 |
+ if (amtmq->reserved || amtmq->version) |
2194 |
+ return true; |
2195 |
+ |
2196 |
++ if (amtmq->nonce != amt->nonce) |
2197 |
++ return true; |
2198 |
++ |
2199 |
+ hdr_size -= sizeof(*eth); |
2200 |
+ if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false)) |
2201 |
+ return true; |
2202 |
+@@ -2339,6 +2376,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt, |
2203 |
+ |
2204 |
+ iph = ip_hdr(skb); |
2205 |
+ if (iph->version == 4) { |
2206 |
++ if (READ_ONCE(amt->ready4)) |
2207 |
++ return true; |
2208 |
++ |
2209 |
+ if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS + |
2210 |
+ sizeof(*ihv3))) |
2211 |
+ return true; |
2212 |
+@@ -2349,12 +2389,10 @@ static bool amt_membership_query_handler(struct amt_dev *amt, |
2213 |
+ ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); |
2214 |
+ skb_reset_transport_header(skb); |
2215 |
+ skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS); |
2216 |
+- spin_lock_bh(&amt->lock); |
2217 |
+- amt->ready4 = true; |
2218 |
++ WRITE_ONCE(amt->ready4, true); |
2219 |
+ amt->mac = amtmq->response_mac; |
2220 |
+ amt->req_cnt = 0; |
2221 |
+ amt->qi = ihv3->qqic; |
2222 |
+- spin_unlock_bh(&amt->lock); |
2223 |
+ skb->protocol = htons(ETH_P_IP); |
2224 |
+ eth->h_proto = htons(ETH_P_IP); |
2225 |
+ ip_eth_mc_map(iph->daddr, eth->h_dest); |
2226 |
+@@ -2363,6 +2401,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt, |
2227 |
+ struct mld2_query *mld2q; |
2228 |
+ struct ipv6hdr *ip6h; |
2229 |
+ |
2230 |
++ if (READ_ONCE(amt->ready6)) |
2231 |
++ return true; |
2232 |
++ |
2233 |
+ if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS + |
2234 |
+ sizeof(*mld2q))) |
2235 |
+ return true; |
2236 |
+@@ -2374,12 +2415,10 @@ static bool amt_membership_query_handler(struct amt_dev *amt, |
2237 |
+ mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); |
2238 |
+ skb_reset_transport_header(skb); |
2239 |
+ skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); |
2240 |
+- spin_lock_bh(&amt->lock); |
2241 |
+- amt->ready6 = true; |
2242 |
++ WRITE_ONCE(amt->ready6, true); |
2243 |
+ amt->mac = amtmq->response_mac; |
2244 |
+ amt->req_cnt = 0; |
2245 |
+ amt->qi = mld2q->mld2q_qqic; |
2246 |
+- spin_unlock_bh(&amt->lock); |
2247 |
+ skb->protocol = htons(ETH_P_IPV6); |
2248 |
+ eth->h_proto = htons(ETH_P_IPV6); |
2249 |
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); |
2250 |
+@@ -2392,12 +2431,14 @@ static bool amt_membership_query_handler(struct amt_dev *amt, |
2251 |
+ skb->pkt_type = PACKET_MULTICAST; |
2252 |
+ skb->ip_summed = CHECKSUM_NONE; |
2253 |
+ len = skb->len; |
2254 |
++ local_bh_disable(); |
2255 |
+ if (__netif_rx(skb) == NET_RX_SUCCESS) { |
2256 |
+ amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true); |
2257 |
+ dev_sw_netstats_rx_add(amt->dev, len); |
2258 |
+ } else { |
2259 |
+ amt->dev->stats.rx_dropped++; |
2260 |
+ } |
2261 |
++ local_bh_enable(); |
2262 |
+ |
2263 |
+ return false; |
2264 |
+ } |
2265 |
+@@ -2638,7 +2679,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) |
2266 |
+ if (tunnel->ip4 == iph->saddr) |
2267 |
+ goto send; |
2268 |
+ |
2269 |
++ spin_lock_bh(&amt->lock); |
2270 |
+ if (amt->nr_tunnels >= amt->max_tunnels) { |
2271 |
++ spin_unlock_bh(&amt->lock); |
2272 |
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); |
2273 |
+ return true; |
2274 |
+ } |
2275 |
+@@ -2646,8 +2689,10 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) |
2276 |
+ tunnel = kzalloc(sizeof(*tunnel) + |
2277 |
+ (sizeof(struct hlist_head) * amt->hash_buckets), |
2278 |
+ GFP_ATOMIC); |
2279 |
+- if (!tunnel) |
2280 |
++ if (!tunnel) { |
2281 |
++ spin_unlock_bh(&amt->lock); |
2282 |
+ return true; |
2283 |
++ } |
2284 |
+ |
2285 |
+ tunnel->source_port = udph->source; |
2286 |
+ tunnel->ip4 = iph->saddr; |
2287 |
+@@ -2660,10 +2705,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) |
2288 |
+ |
2289 |
+ INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire); |
2290 |
+ |
2291 |
+- spin_lock_bh(&amt->lock); |
2292 |
+ list_add_tail_rcu(&tunnel->list, &amt->tunnel_list); |
2293 |
+ tunnel->key = amt->key; |
2294 |
+- amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true); |
2295 |
++ __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true); |
2296 |
+ amt->nr_tunnels++; |
2297 |
+ mod_delayed_work(amt_wq, &tunnel->gc_wq, |
2298 |
+ msecs_to_jiffies(amt_gmi(amt))); |
2299 |
+@@ -2688,6 +2732,38 @@ send: |
2300 |
+ return false; |
2301 |
+ } |
2302 |
+ |
2303 |
++static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb) |
2304 |
++{ |
2305 |
++ int type = amt_parse_type(skb); |
2306 |
++ int err = 1; |
2307 |
++ |
2308 |
++ if (type == -1) |
2309 |
++ goto drop; |
2310 |
++ |
2311 |
++ if (amt->mode == AMT_MODE_GATEWAY) { |
2312 |
++ switch (type) { |
2313 |
++ case AMT_MSG_ADVERTISEMENT: |
2314 |
++ err = amt_advertisement_handler(amt, skb); |
2315 |
++ break; |
2316 |
++ case AMT_MSG_MEMBERSHIP_QUERY: |
2317 |
++ err = amt_membership_query_handler(amt, skb); |
2318 |
++ if (!err) |
2319 |
++ return; |
2320 |
++ break; |
2321 |
++ default: |
2322 |
++ netdev_dbg(amt->dev, "Invalid type of Gateway\n"); |
2323 |
++ break; |
2324 |
++ } |
2325 |
++ } |
2326 |
++drop: |
2327 |
++ if (err) { |
2328 |
++ amt->dev->stats.rx_dropped++; |
2329 |
++ kfree_skb(skb); |
2330 |
++ } else { |
2331 |
++ consume_skb(skb); |
2332 |
++ } |
2333 |
++} |
2334 |
++ |
2335 |
+ static int amt_rcv(struct sock *sk, struct sk_buff *skb) |
2336 |
+ { |
2337 |
+ struct amt_dev *amt; |
2338 |
+@@ -2719,8 +2795,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) |
2339 |
+ err = true; |
2340 |
+ goto drop; |
2341 |
+ } |
2342 |
+- err = amt_advertisement_handler(amt, skb); |
2343 |
+- break; |
2344 |
++ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) { |
2345 |
++ netdev_dbg(amt->dev, "AMT Event queue full\n"); |
2346 |
++ err = true; |
2347 |
++ goto drop; |
2348 |
++ } |
2349 |
++ goto out; |
2350 |
+ case AMT_MSG_MULTICAST_DATA: |
2351 |
+ if (iph->saddr != amt->remote_ip) { |
2352 |
+ netdev_dbg(amt->dev, "Invalid Relay IP\n"); |
2353 |
+@@ -2738,11 +2818,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) |
2354 |
+ err = true; |
2355 |
+ goto drop; |
2356 |
+ } |
2357 |
+- err = amt_membership_query_handler(amt, skb); |
2358 |
+- if (err) |
2359 |
++ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) { |
2360 |
++ netdev_dbg(amt->dev, "AMT Event queue full\n"); |
2361 |
++ err = true; |
2362 |
+ goto drop; |
2363 |
+- else |
2364 |
+- goto out; |
2365 |
++ } |
2366 |
++ goto out; |
2367 |
+ default: |
2368 |
+ err = true; |
2369 |
+ netdev_dbg(amt->dev, "Invalid type of Gateway\n"); |
2370 |
+@@ -2780,6 +2861,46 @@ out: |
2371 |
+ return 0; |
2372 |
+ } |
2373 |
+ |
2374 |
++static void amt_event_work(struct work_struct *work) |
2375 |
++{ |
2376 |
++ struct amt_dev *amt = container_of(work, struct amt_dev, event_wq); |
2377 |
++ struct sk_buff *skb; |
2378 |
++ u8 event; |
2379 |
++ int i; |
2380 |
++ |
2381 |
++ for (i = 0; i < AMT_MAX_EVENTS; i++) { |
2382 |
++ spin_lock_bh(&amt->lock); |
2383 |
++ if (amt->nr_events == 0) { |
2384 |
++ spin_unlock_bh(&amt->lock); |
2385 |
++ return; |
2386 |
++ } |
2387 |
++ event = amt->events[amt->event_idx].event; |
2388 |
++ skb = amt->events[amt->event_idx].skb; |
2389 |
++ amt->events[amt->event_idx].event = AMT_EVENT_NONE; |
2390 |
++ amt->events[amt->event_idx].skb = NULL; |
2391 |
++ amt->nr_events--; |
2392 |
++ amt->event_idx++; |
2393 |
++ amt->event_idx %= AMT_MAX_EVENTS; |
2394 |
++ spin_unlock_bh(&amt->lock); |
2395 |
++ |
2396 |
++ switch (event) { |
2397 |
++ case AMT_EVENT_RECEIVE: |
2398 |
++ amt_gw_rcv(amt, skb); |
2399 |
++ break; |
2400 |
++ case AMT_EVENT_SEND_DISCOVERY: |
2401 |
++ amt_event_send_discovery(amt); |
2402 |
++ break; |
2403 |
++ case AMT_EVENT_SEND_REQUEST: |
2404 |
++ amt_event_send_request(amt); |
2405 |
++ break; |
2406 |
++ default: |
2407 |
++ if (skb) |
2408 |
++ kfree_skb(skb); |
2409 |
++ break; |
2410 |
++ } |
2411 |
++ } |
2412 |
++} |
2413 |
++ |
2414 |
+ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb) |
2415 |
+ { |
2416 |
+ struct amt_dev *amt; |
2417 |
+@@ -2804,7 +2925,7 @@ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb) |
2418 |
+ break; |
2419 |
+ case AMT_MSG_REQUEST: |
2420 |
+ case AMT_MSG_MEMBERSHIP_UPDATE: |
2421 |
+- if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT) |
2422 |
++ if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT) |
2423 |
+ mod_delayed_work(amt_wq, &amt->req_wq, 0); |
2424 |
+ break; |
2425 |
+ default: |
2426 |
+@@ -2867,6 +2988,8 @@ static int amt_dev_open(struct net_device *dev) |
2427 |
+ |
2428 |
+ amt->ready4 = false; |
2429 |
+ amt->ready6 = false; |
2430 |
++ amt->event_idx = 0; |
2431 |
++ amt->nr_events = 0; |
2432 |
+ |
2433 |
+ err = amt_socket_create(amt); |
2434 |
+ if (err) |
2435 |
+@@ -2874,6 +2997,7 @@ static int amt_dev_open(struct net_device *dev) |
2436 |
+ |
2437 |
+ amt->req_cnt = 0; |
2438 |
+ amt->remote_ip = 0; |
2439 |
++ amt->nonce = 0; |
2440 |
+ get_random_bytes(&amt->key, sizeof(siphash_key_t)); |
2441 |
+ |
2442 |
+ amt->status = AMT_STATUS_INIT; |
2443 |
+@@ -2892,6 +3016,8 @@ static int amt_dev_stop(struct net_device *dev) |
2444 |
+ struct amt_dev *amt = netdev_priv(dev); |
2445 |
+ struct amt_tunnel_list *tunnel, *tmp; |
2446 |
+ struct socket *sock; |
2447 |
++ struct sk_buff *skb; |
2448 |
++ int i; |
2449 |
+ |
2450 |
+ cancel_delayed_work_sync(&amt->req_wq); |
2451 |
+ cancel_delayed_work_sync(&amt->discovery_wq); |
2452 |
+@@ -2904,6 +3030,15 @@ static int amt_dev_stop(struct net_device *dev) |
2453 |
+ if (sock) |
2454 |
+ udp_tunnel_sock_release(sock); |
2455 |
+ |
2456 |
++ cancel_work_sync(&amt->event_wq); |
2457 |
++ for (i = 0; i < AMT_MAX_EVENTS; i++) { |
2458 |
++ skb = amt->events[i].skb; |
2459 |
++ if (skb) |
2460 |
++ kfree_skb(skb); |
2461 |
++ amt->events[i].event = AMT_EVENT_NONE; |
2462 |
++ amt->events[i].skb = NULL; |
2463 |
++ } |
2464 |
++ |
2465 |
+ amt->ready4 = false; |
2466 |
+ amt->ready6 = false; |
2467 |
+ amt->req_cnt = 0; |
2468 |
+@@ -3095,7 +3230,7 @@ static int amt_newlink(struct net *net, struct net_device *dev, |
2469 |
+ goto err; |
2470 |
+ } |
2471 |
+ if (amt->mode == AMT_MODE_RELAY) { |
2472 |
+- amt->qrv = amt->net->ipv4.sysctl_igmp_qrv; |
2473 |
++ amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv); |
2474 |
+ amt->qri = 10; |
2475 |
+ dev->needed_headroom = amt->stream_dev->needed_headroom + |
2476 |
+ AMT_RELAY_HLEN; |
2477 |
+@@ -3146,8 +3281,8 @@ static int amt_newlink(struct net *net, struct net_device *dev, |
2478 |
+ INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work); |
2479 |
+ INIT_DELAYED_WORK(&amt->req_wq, amt_req_work); |
2480 |
+ INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work); |
2481 |
++ INIT_WORK(&amt->event_wq, amt_event_work); |
2482 |
+ INIT_LIST_HEAD(&amt->tunnel_list); |
2483 |
+- |
2484 |
+ return 0; |
2485 |
+ err: |
2486 |
+ dev_put(amt->stream_dev); |
2487 |
+@@ -3280,7 +3415,7 @@ static int __init amt_init(void) |
2488 |
+ if (err < 0) |
2489 |
+ goto unregister_notifier; |
2490 |
+ |
2491 |
+- amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1); |
2492 |
++ amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0); |
2493 |
+ if (!amt_wq) { |
2494 |
+ err = -ENOMEM; |
2495 |
+ goto rtnl_unregister; |
2496 |
+diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c |
2497 |
+index 589996cef5db3..8d457d2c3bccb 100644 |
2498 |
+--- a/drivers/net/can/rcar/rcar_canfd.c |
2499 |
++++ b/drivers/net/can/rcar/rcar_canfd.c |
2500 |
+@@ -1850,6 +1850,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) |
2501 |
+ of_child = of_get_child_by_name(pdev->dev.of_node, name); |
2502 |
+ if (of_child && of_device_is_available(of_child)) |
2503 |
+ channels_mask |= BIT(i); |
2504 |
++ of_node_put(of_child); |
2505 |
+ } |
2506 |
+ |
2507 |
+ if (chip_id != RENESAS_RZG2L) { |
2508 |
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c |
2509 |
+index 8014b18d93914..aa0bcf01e20ac 100644 |
2510 |
+--- a/drivers/net/dsa/microchip/ksz_common.c |
2511 |
++++ b/drivers/net/dsa/microchip/ksz_common.c |
2512 |
+@@ -447,18 +447,21 @@ int ksz_switch_register(struct ksz_device *dev, |
2513 |
+ ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); |
2514 |
+ if (!ports) |
2515 |
+ ports = of_get_child_by_name(dev->dev->of_node, "ports"); |
2516 |
+- if (ports) |
2517 |
++ if (ports) { |
2518 |
+ for_each_available_child_of_node(ports, port) { |
2519 |
+ if (of_property_read_u32(port, "reg", |
2520 |
+ &port_num)) |
2521 |
+ continue; |
2522 |
+ if (!(dev->port_mask & BIT(port_num))) { |
2523 |
+ of_node_put(port); |
2524 |
++ of_node_put(ports); |
2525 |
+ return -EINVAL; |
2526 |
+ } |
2527 |
+ of_get_phy_mode(port, |
2528 |
+ &dev->ports[port_num].interface); |
2529 |
+ } |
2530 |
++ of_node_put(ports); |
2531 |
++ } |
2532 |
+ dev->synclko_125 = of_property_read_bool(dev->dev->of_node, |
2533 |
+ "microchip,synclko-125"); |
2534 |
+ dev->synclko_disable = of_property_read_bool(dev->dev->of_node, |
2535 |
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c |
2536 |
+index b33841c6507ae..7734c6b1bacae 100644 |
2537 |
+--- a/drivers/net/dsa/sja1105/sja1105_main.c |
2538 |
++++ b/drivers/net/dsa/sja1105/sja1105_main.c |
2539 |
+@@ -3383,12 +3383,28 @@ static const struct of_device_id sja1105_dt_ids[] = { |
2540 |
+ }; |
2541 |
+ MODULE_DEVICE_TABLE(of, sja1105_dt_ids); |
2542 |
+ |
2543 |
++static const struct spi_device_id sja1105_spi_ids[] = { |
2544 |
++ { "sja1105e" }, |
2545 |
++ { "sja1105t" }, |
2546 |
++ { "sja1105p" }, |
2547 |
++ { "sja1105q" }, |
2548 |
++ { "sja1105r" }, |
2549 |
++ { "sja1105s" }, |
2550 |
++ { "sja1110a" }, |
2551 |
++ { "sja1110b" }, |
2552 |
++ { "sja1110c" }, |
2553 |
++ { "sja1110d" }, |
2554 |
++ { }, |
2555 |
++}; |
2556 |
++MODULE_DEVICE_TABLE(spi, sja1105_spi_ids); |
2557 |
++ |
2558 |
+ static struct spi_driver sja1105_driver = { |
2559 |
+ .driver = { |
2560 |
+ .name = "sja1105", |
2561 |
+ .owner = THIS_MODULE, |
2562 |
+ .of_match_table = of_match_ptr(sja1105_dt_ids), |
2563 |
+ }, |
2564 |
++ .id_table = sja1105_spi_ids, |
2565 |
+ .probe = sja1105_probe, |
2566 |
+ .remove = sja1105_remove, |
2567 |
+ .shutdown = sja1105_shutdown, |
2568 |
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c |
2569 |
+index 3110895358d8d..97a92e6da60d8 100644 |
2570 |
+--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c |
2571 |
++++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c |
2572 |
+@@ -205,10 +205,20 @@ static const struct of_device_id vsc73xx_of_match[] = { |
2573 |
+ }; |
2574 |
+ MODULE_DEVICE_TABLE(of, vsc73xx_of_match); |
2575 |
+ |
2576 |
++static const struct spi_device_id vsc73xx_spi_ids[] = { |
2577 |
++ { "vsc7385" }, |
2578 |
++ { "vsc7388" }, |
2579 |
++ { "vsc7395" }, |
2580 |
++ { "vsc7398" }, |
2581 |
++ { }, |
2582 |
++}; |
2583 |
++MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids); |
2584 |
++ |
2585 |
+ static struct spi_driver vsc73xx_spi_driver = { |
2586 |
+ .probe = vsc73xx_spi_probe, |
2587 |
+ .remove = vsc73xx_spi_remove, |
2588 |
+ .shutdown = vsc73xx_spi_shutdown, |
2589 |
++ .id_table = vsc73xx_spi_ids, |
2590 |
+ .driver = { |
2591 |
+ .name = "vsc73xx-spi", |
2592 |
+ .of_match_table = vsc73xx_of_match, |
2593 |
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c |
2594 |
+index 7c760aa655404..ddfe9208529a5 100644 |
2595 |
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c |
2596 |
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c |
2597 |
+@@ -1236,8 +1236,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, |
2598 |
+ csk->sndbuf = newsk->sk_sndbuf; |
2599 |
+ csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; |
2600 |
+ RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk), |
2601 |
+- sock_net(newsk)-> |
2602 |
+- ipv4.sysctl_tcp_window_scaling, |
2603 |
++ READ_ONCE(sock_net(newsk)-> |
2604 |
++ ipv4.sysctl_tcp_window_scaling), |
2605 |
+ tp->window_clamp); |
2606 |
+ neigh_release(n); |
2607 |
+ inet_inherit_port(&tcp_hashinfo, lsk, newsk); |
2608 |
+@@ -1384,7 +1384,7 @@ static void chtls_pass_accept_request(struct sock *sk, |
2609 |
+ #endif |
2610 |
+ } |
2611 |
+ if (req->tcpopt.wsf <= 14 && |
2612 |
+- sock_net(sk)->ipv4.sysctl_tcp_window_scaling) { |
2613 |
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { |
2614 |
+ inet_rsk(oreq)->wscale_ok = 1; |
2615 |
+ inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf; |
2616 |
+ } |
2617 |
+diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c |
2618 |
+index 528eb0f223b17..b4f5e57d0285c 100644 |
2619 |
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.c |
2620 |
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c |
2621 |
+@@ -2287,7 +2287,7 @@ err: |
2622 |
+ |
2623 |
+ /* Uses sync mcc */ |
2624 |
+ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, |
2625 |
+- u8 page_num, u8 *data) |
2626 |
++ u8 page_num, u32 off, u32 len, u8 *data) |
2627 |
+ { |
2628 |
+ struct be_dma_mem cmd; |
2629 |
+ struct be_mcc_wrb *wrb; |
2630 |
+@@ -2321,10 +2321,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, |
2631 |
+ req->port = cpu_to_le32(adapter->hba_port_num); |
2632 |
+ req->page_num = cpu_to_le32(page_num); |
2633 |
+ status = be_mcc_notify_wait(adapter); |
2634 |
+- if (!status) { |
2635 |
++ if (!status && len > 0) { |
2636 |
+ struct be_cmd_resp_port_type *resp = cmd.va; |
2637 |
+ |
2638 |
+- memcpy(data, resp->page_data, PAGE_DATA_LEN); |
2639 |
++ memcpy(data, resp->page_data + off, len); |
2640 |
+ } |
2641 |
+ err: |
2642 |
+ mutex_unlock(&adapter->mcc_lock); |
2643 |
+@@ -2415,7 +2415,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter) |
2644 |
+ int status; |
2645 |
+ |
2646 |
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, |
2647 |
+- page_data); |
2648 |
++ 0, PAGE_DATA_LEN, page_data); |
2649 |
+ if (!status) { |
2650 |
+ switch (adapter->phy.interface_type) { |
2651 |
+ case PHY_TYPE_QSFP: |
2652 |
+@@ -2440,7 +2440,7 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter) |
2653 |
+ int status; |
2654 |
+ |
2655 |
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, |
2656 |
+- page_data); |
2657 |
++ 0, PAGE_DATA_LEN, page_data); |
2658 |
+ if (!status) { |
2659 |
+ strlcpy(adapter->phy.vendor_name, page_data + |
2660 |
+ SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1); |
2661 |
+diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h |
2662 |
+index db1f3b908582e..e2085c68c0ee7 100644 |
2663 |
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.h |
2664 |
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.h |
2665 |
+@@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, |
2666 |
+ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, |
2667 |
+ u32 *state); |
2668 |
+ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, |
2669 |
+- u8 page_num, u8 *data); |
2670 |
++ u8 page_num, u32 off, u32 len, u8 *data); |
2671 |
+ int be_cmd_query_cable_type(struct be_adapter *adapter); |
2672 |
+ int be_cmd_query_sfp_info(struct be_adapter *adapter); |
2673 |
+ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, |
2674 |
+diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c |
2675 |
+index dfa784339781d..bd0df189d8719 100644 |
2676 |
+--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c |
2677 |
++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c |
2678 |
+@@ -1344,7 +1344,7 @@ static int be_get_module_info(struct net_device *netdev, |
2679 |
+ return -EOPNOTSUPP; |
2680 |
+ |
2681 |
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, |
2682 |
+- page_data); |
2683 |
++ 0, PAGE_DATA_LEN, page_data); |
2684 |
+ if (!status) { |
2685 |
+ if (!page_data[SFP_PLUS_SFF_8472_COMP]) { |
2686 |
+ modinfo->type = ETH_MODULE_SFF_8079; |
2687 |
+@@ -1362,25 +1362,32 @@ static int be_get_module_eeprom(struct net_device *netdev, |
2688 |
+ { |
2689 |
+ struct be_adapter *adapter = netdev_priv(netdev); |
2690 |
+ int status; |
2691 |
++ u32 begin, end; |
2692 |
+ |
2693 |
+ if (!check_privilege(adapter, MAX_PRIVILEGES)) |
2694 |
+ return -EOPNOTSUPP; |
2695 |
+ |
2696 |
+- status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, |
2697 |
+- data); |
2698 |
+- if (status) |
2699 |
+- goto err; |
2700 |
++ begin = eeprom->offset; |
2701 |
++ end = eeprom->offset + eeprom->len; |
2702 |
++ |
2703 |
++ if (begin < PAGE_DATA_LEN) { |
2704 |
++ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin, |
2705 |
++ min_t(u32, end, PAGE_DATA_LEN) - begin, |
2706 |
++ data); |
2707 |
++ if (status) |
2708 |
++ goto err; |
2709 |
++ |
2710 |
++ data += PAGE_DATA_LEN - begin; |
2711 |
++ begin = PAGE_DATA_LEN; |
2712 |
++ } |
2713 |
+ |
2714 |
+- if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) { |
2715 |
+- status = be_cmd_read_port_transceiver_data(adapter, |
2716 |
+- TR_PAGE_A2, |
2717 |
+- data + |
2718 |
+- PAGE_DATA_LEN); |
2719 |
++ if (end > PAGE_DATA_LEN) { |
2720 |
++ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2, |
2721 |
++ begin - PAGE_DATA_LEN, |
2722 |
++ end - begin, data); |
2723 |
+ if (status) |
2724 |
+ goto err; |
2725 |
+ } |
2726 |
+- if (eeprom->offset) |
2727 |
+- memcpy(data, data + eeprom->offset, eeprom->len); |
2728 |
+ err: |
2729 |
+ return be_cmd_status(status); |
2730 |
+ } |
2731 |
+diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h |
2732 |
+index 13382df2f2eff..bcf680e838113 100644 |
2733 |
+--- a/drivers/net/ethernet/intel/e1000e/hw.h |
2734 |
++++ b/drivers/net/ethernet/intel/e1000e/hw.h |
2735 |
+@@ -630,7 +630,6 @@ struct e1000_phy_info { |
2736 |
+ bool disable_polarity_correction; |
2737 |
+ bool is_mdix; |
2738 |
+ bool polarity_correction; |
2739 |
+- bool reset_disable; |
2740 |
+ bool speed_downgraded; |
2741 |
+ bool autoneg_wait_to_complete; |
2742 |
+ }; |
2743 |
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c |
2744 |
+index e6c8e6d5234f8..9466f65a6da77 100644 |
2745 |
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c |
2746 |
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c |
2747 |
+@@ -2050,10 +2050,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) |
2748 |
+ bool blocked = false; |
2749 |
+ int i = 0; |
2750 |
+ |
2751 |
+- /* Check the PHY (LCD) reset flag */ |
2752 |
+- if (hw->phy.reset_disable) |
2753 |
+- return true; |
2754 |
+- |
2755 |
+ while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && |
2756 |
+ (i++ < 30)) |
2757 |
+ usleep_range(10000, 11000); |
2758 |
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h |
2759 |
+index 638a3ddd7ada8..2504b11c3169f 100644 |
2760 |
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h |
2761 |
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h |
2762 |
+@@ -271,7 +271,6 @@ |
2763 |
+ #define I217_CGFREG_ENABLE_MTA_RESET 0x0002 |
2764 |
+ #define I217_MEMPWR PHY_REG(772, 26) |
2765 |
+ #define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 |
2766 |
+-#define I217_MEMPWR_MOEM 0x1000 |
2767 |
+ |
2768 |
+ /* Receive Address Initial CRC Calculation */ |
2769 |
+ #define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) |
2770 |
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c |
2771 |
+index fa06f68c8c803..f1729940e46ce 100644 |
2772 |
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c |
2773 |
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c |
2774 |
+@@ -6494,6 +6494,10 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) |
2775 |
+ |
2776 |
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && |
2777 |
+ hw->mac.type >= e1000_pch_adp) { |
2778 |
++ /* Keep the GPT clock enabled for CSME */ |
2779 |
++ mac_data = er32(FEXTNVM); |
2780 |
++ mac_data |= BIT(3); |
2781 |
++ ew32(FEXTNVM, mac_data); |
2782 |
+ /* Request ME unconfigure the device from S0ix */ |
2783 |
+ mac_data = er32(H2ME); |
2784 |
+ mac_data &= ~E1000_H2ME_START_DPG; |
2785 |
+@@ -6987,21 +6991,8 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev) |
2786 |
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); |
2787 |
+ struct e1000_adapter *adapter = netdev_priv(netdev); |
2788 |
+ struct pci_dev *pdev = to_pci_dev(dev); |
2789 |
+- struct e1000_hw *hw = &adapter->hw; |
2790 |
+- u16 phy_data; |
2791 |
+ int rc; |
2792 |
+ |
2793 |
+- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && |
2794 |
+- hw->mac.type >= e1000_pch_adp) { |
2795 |
+- /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */ |
2796 |
+- e1e_rphy(hw, I217_MEMPWR, &phy_data); |
2797 |
+- phy_data |= I217_MEMPWR_MOEM; |
2798 |
+- e1e_wphy(hw, I217_MEMPWR, phy_data); |
2799 |
+- |
2800 |
+- /* Disable LCD reset */ |
2801 |
+- hw->phy.reset_disable = true; |
2802 |
+- } |
2803 |
+- |
2804 |
+ e1000e_flush_lpic(pdev); |
2805 |
+ |
2806 |
+ e1000e_pm_freeze(dev); |
2807 |
+@@ -7023,8 +7014,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) |
2808 |
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); |
2809 |
+ struct e1000_adapter *adapter = netdev_priv(netdev); |
2810 |
+ struct pci_dev *pdev = to_pci_dev(dev); |
2811 |
+- struct e1000_hw *hw = &adapter->hw; |
2812 |
+- u16 phy_data; |
2813 |
+ int rc; |
2814 |
+ |
2815 |
+ /* Introduce S0ix implementation */ |
2816 |
+@@ -7035,17 +7024,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) |
2817 |
+ if (rc) |
2818 |
+ return rc; |
2819 |
+ |
2820 |
+- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && |
2821 |
+- hw->mac.type >= e1000_pch_adp) { |
2822 |
+- /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */ |
2823 |
+- e1e_rphy(hw, I217_MEMPWR, &phy_data); |
2824 |
+- phy_data &= ~I217_MEMPWR_MOEM; |
2825 |
+- e1e_wphy(hw, I217_MEMPWR, phy_data); |
2826 |
+- |
2827 |
+- /* Enable LCD reset */ |
2828 |
+- hw->phy.reset_disable = false; |
2829 |
+- } |
2830 |
+- |
2831 |
+ return e1000e_pm_thaw(dev); |
2832 |
+ } |
2833 |
+ |
2834 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c |
2835 |
+index 77eb9c7262053..6f01bffd7e5c2 100644 |
2836 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c |
2837 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c |
2838 |
+@@ -10645,7 +10645,7 @@ static int i40e_reset(struct i40e_pf *pf) |
2839 |
+ **/ |
2840 |
+ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) |
2841 |
+ { |
2842 |
+- int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state); |
2843 |
++ const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); |
2844 |
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; |
2845 |
+ struct i40e_hw *hw = &pf->hw; |
2846 |
+ i40e_status ret; |
2847 |
+@@ -10653,13 +10653,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) |
2848 |
+ int v; |
2849 |
+ |
2850 |
+ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && |
2851 |
+- i40e_check_recovery_mode(pf)) { |
2852 |
++ is_recovery_mode_reported) |
2853 |
+ i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); |
2854 |
+- } |
2855 |
+ |
2856 |
+ if (test_bit(__I40E_DOWN, pf->state) && |
2857 |
+- !test_bit(__I40E_RECOVERY_MODE, pf->state) && |
2858 |
+- !old_recovery_mode_bit) |
2859 |
++ !test_bit(__I40E_RECOVERY_MODE, pf->state)) |
2860 |
+ goto clear_recovery; |
2861 |
+ dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); |
2862 |
+ |
2863 |
+@@ -10686,13 +10684,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) |
2864 |
+ * accordingly with regard to resources initialization |
2865 |
+ * and deinitialization |
2866 |
+ */ |
2867 |
+- if (test_bit(__I40E_RECOVERY_MODE, pf->state) || |
2868 |
+- old_recovery_mode_bit) { |
2869 |
++ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { |
2870 |
+ if (i40e_get_capabilities(pf, |
2871 |
+ i40e_aqc_opc_list_func_capabilities)) |
2872 |
+ goto end_unlock; |
2873 |
+ |
2874 |
+- if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { |
2875 |
++ if (is_recovery_mode_reported) { |
2876 |
+ /* we're staying in recovery mode so we'll reinitialize |
2877 |
+ * misc vector here |
2878 |
+ */ |
2879 |
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h |
2880 |
+index 49aed3e506a66..0ea0361cd86b1 100644 |
2881 |
+--- a/drivers/net/ethernet/intel/iavf/iavf.h |
2882 |
++++ b/drivers/net/ethernet/intel/iavf/iavf.h |
2883 |
+@@ -64,7 +64,6 @@ struct iavf_vsi { |
2884 |
+ u16 id; |
2885 |
+ DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); |
2886 |
+ int base_vector; |
2887 |
+- u16 work_limit; |
2888 |
+ u16 qs_handle; |
2889 |
+ void *priv; /* client driver data reference. */ |
2890 |
+ }; |
2891 |
+@@ -159,8 +158,12 @@ struct iavf_vlan { |
2892 |
+ struct iavf_vlan_filter { |
2893 |
+ struct list_head list; |
2894 |
+ struct iavf_vlan vlan; |
2895 |
+- bool remove; /* filter needs to be removed */ |
2896 |
+- bool add; /* filter needs to be added */ |
2897 |
++ struct { |
2898 |
++ u8 is_new_vlan:1; /* filter is new, wait for PF answer */ |
2899 |
++ u8 remove:1; /* filter needs to be removed */ |
2900 |
++ u8 add:1; /* filter needs to be added */ |
2901 |
++ u8 padding:5; |
2902 |
++ }; |
2903 |
+ }; |
2904 |
+ |
2905 |
+ #define IAVF_MAX_TRAFFIC_CLASS 4 |
2906 |
+@@ -461,6 +464,10 @@ static inline const char *iavf_state_str(enum iavf_state_t state) |
2907 |
+ return "__IAVF_INIT_VERSION_CHECK"; |
2908 |
+ case __IAVF_INIT_GET_RESOURCES: |
2909 |
+ return "__IAVF_INIT_GET_RESOURCES"; |
2910 |
++ case __IAVF_INIT_EXTENDED_CAPS: |
2911 |
++ return "__IAVF_INIT_EXTENDED_CAPS"; |
2912 |
++ case __IAVF_INIT_CONFIG_ADAPTER: |
2913 |
++ return "__IAVF_INIT_CONFIG_ADAPTER"; |
2914 |
+ case __IAVF_INIT_SW: |
2915 |
+ return "__IAVF_INIT_SW"; |
2916 |
+ case __IAVF_INIT_FAILED: |
2917 |
+@@ -520,6 +527,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter); |
2918 |
+ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter); |
2919 |
+ int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter); |
2920 |
+ void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter); |
2921 |
++u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter); |
2922 |
+ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush); |
2923 |
+ void iavf_configure_queues(struct iavf_adapter *adapter); |
2924 |
+ void iavf_deconfigure_queues(struct iavf_adapter *adapter); |
2925 |
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c |
2926 |
+index 3bb56714beb03..e535d4c3da49d 100644 |
2927 |
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c |
2928 |
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c |
2929 |
+@@ -692,12 +692,8 @@ static int __iavf_get_coalesce(struct net_device *netdev, |
2930 |
+ struct ethtool_coalesce *ec, int queue) |
2931 |
+ { |
2932 |
+ struct iavf_adapter *adapter = netdev_priv(netdev); |
2933 |
+- struct iavf_vsi *vsi = &adapter->vsi; |
2934 |
+ struct iavf_ring *rx_ring, *tx_ring; |
2935 |
+ |
2936 |
+- ec->tx_max_coalesced_frames = vsi->work_limit; |
2937 |
+- ec->rx_max_coalesced_frames = vsi->work_limit; |
2938 |
+- |
2939 |
+ /* Rx and Tx usecs per queue value. If user doesn't specify the |
2940 |
+ * queue, return queue 0's value to represent. |
2941 |
+ */ |
2942 |
+@@ -825,12 +821,8 @@ static int __iavf_set_coalesce(struct net_device *netdev, |
2943 |
+ struct ethtool_coalesce *ec, int queue) |
2944 |
+ { |
2945 |
+ struct iavf_adapter *adapter = netdev_priv(netdev); |
2946 |
+- struct iavf_vsi *vsi = &adapter->vsi; |
2947 |
+ int i; |
2948 |
+ |
2949 |
+- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) |
2950 |
+- vsi->work_limit = ec->tx_max_coalesced_frames_irq; |
2951 |
+- |
2952 |
+ if (ec->rx_coalesce_usecs == 0) { |
2953 |
+ if (ec->use_adaptive_rx_coalesce) |
2954 |
+ netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); |
2955 |
+@@ -1969,8 +1961,6 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, |
2956 |
+ |
2957 |
+ static const struct ethtool_ops iavf_ethtool_ops = { |
2958 |
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
2959 |
+- ETHTOOL_COALESCE_MAX_FRAMES | |
2960 |
+- ETHTOOL_COALESCE_MAX_FRAMES_IRQ | |
2961 |
+ ETHTOOL_COALESCE_USE_ADAPTIVE, |
2962 |
+ .get_drvinfo = iavf_get_drvinfo, |
2963 |
+ .get_link = ethtool_op_get_link, |
2964 |
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c |
2965 |
+index f3ecb3bca33dd..2e2c153ce46a3 100644 |
2966 |
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c |
2967 |
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c |
2968 |
+@@ -843,7 +843,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter) |
2969 |
+ * iavf_get_num_vlans_added - get number of VLANs added |
2970 |
+ * @adapter: board private structure |
2971 |
+ */ |
2972 |
+-static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) |
2973 |
++u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) |
2974 |
+ { |
2975 |
+ return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) + |
2976 |
+ bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID); |
2977 |
+@@ -906,11 +906,6 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev, |
2978 |
+ if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)))) |
2979 |
+ return -ENOMEM; |
2980 |
+ |
2981 |
+- if (proto == cpu_to_be16(ETH_P_8021Q)) |
2982 |
+- set_bit(vid, adapter->vsi.active_cvlans); |
2983 |
+- else |
2984 |
+- set_bit(vid, adapter->vsi.active_svlans); |
2985 |
+- |
2986 |
+ return 0; |
2987 |
+ } |
2988 |
+ |
2989 |
+@@ -2245,7 +2240,6 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter) |
2990 |
+ |
2991 |
+ adapter->vsi.back = adapter; |
2992 |
+ adapter->vsi.base_vector = 1; |
2993 |
+- adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; |
2994 |
+ vsi->netdev = adapter->netdev; |
2995 |
+ vsi->qs_handle = adapter->vsi_res->qset_handle; |
2996 |
+ if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { |
2997 |
+@@ -2956,6 +2950,9 @@ continue_reset: |
2998 |
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; |
2999 |
+ iavf_misc_irq_enable(adapter); |
3000 |
+ |
3001 |
++ bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID); |
3002 |
++ bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID); |
3003 |
++ |
3004 |
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); |
3005 |
+ |
3006 |
+ /* We were running when the reset started, so we need to restore some |
3007 |
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c |
3008 |
+index 978f651c6b093..06d18797d25a2 100644 |
3009 |
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c |
3010 |
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c |
3011 |
+@@ -194,7 +194,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, |
3012 |
+ struct iavf_tx_buffer *tx_buf; |
3013 |
+ struct iavf_tx_desc *tx_desc; |
3014 |
+ unsigned int total_bytes = 0, total_packets = 0; |
3015 |
+- unsigned int budget = vsi->work_limit; |
3016 |
++ unsigned int budget = IAVF_DEFAULT_IRQ_WORK; |
3017 |
+ |
3018 |
+ tx_buf = &tx_ring->tx_bi[i]; |
3019 |
+ tx_desc = IAVF_TX_DESC(tx_ring, i); |
3020 |
+@@ -1285,11 +1285,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, |
3021 |
+ { |
3022 |
+ struct iavf_rx_buffer *rx_buffer; |
3023 |
+ |
3024 |
+- if (!size) |
3025 |
+- return NULL; |
3026 |
+- |
3027 |
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; |
3028 |
+ prefetchw(rx_buffer->page); |
3029 |
++ if (!size) |
3030 |
++ return rx_buffer; |
3031 |
+ |
3032 |
+ /* we are reusing so sync this buffer for CPU use */ |
3033 |
+ dma_sync_single_range_for_cpu(rx_ring->dev, |
3034 |
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c |
3035 |
+index 782450d5c12fc..1603e99bae4af 100644 |
3036 |
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c |
3037 |
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c |
3038 |
+@@ -626,6 +626,33 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter) |
3039 |
+ spin_unlock_bh(&adapter->mac_vlan_list_lock); |
3040 |
+ } |
3041 |
+ |
3042 |
++/** |
3043 |
++ * iavf_vlan_add_reject |
3044 |
++ * @adapter: adapter structure |
3045 |
++ * |
3046 |
++ * Remove VLAN filters from list based on PF response. |
3047 |
++ **/ |
3048 |
++static void iavf_vlan_add_reject(struct iavf_adapter *adapter) |
3049 |
++{ |
3050 |
++ struct iavf_vlan_filter *f, *ftmp; |
3051 |
++ |
3052 |
++ spin_lock_bh(&adapter->mac_vlan_list_lock); |
3053 |
++ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { |
3054 |
++ if (f->is_new_vlan) { |
3055 |
++ if (f->vlan.tpid == ETH_P_8021Q) |
3056 |
++ clear_bit(f->vlan.vid, |
3057 |
++ adapter->vsi.active_cvlans); |
3058 |
++ else |
3059 |
++ clear_bit(f->vlan.vid, |
3060 |
++ adapter->vsi.active_svlans); |
3061 |
++ |
3062 |
++ list_del(&f->list); |
3063 |
++ kfree(f); |
3064 |
++ } |
3065 |
++ } |
3066 |
++ spin_unlock_bh(&adapter->mac_vlan_list_lock); |
3067 |
++} |
3068 |
++ |
3069 |
+ /** |
3070 |
+ * iavf_add_vlans |
3071 |
+ * @adapter: adapter structure |
3072 |
+@@ -683,6 +710,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter) |
3073 |
+ vvfl->vlan_id[i] = f->vlan.vid; |
3074 |
+ i++; |
3075 |
+ f->add = false; |
3076 |
++ f->is_new_vlan = true; |
3077 |
+ if (i == count) |
3078 |
+ break; |
3079 |
+ } |
3080 |
+@@ -695,10 +723,18 @@ void iavf_add_vlans(struct iavf_adapter *adapter) |
3081 |
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); |
3082 |
+ kfree(vvfl); |
3083 |
+ } else { |
3084 |
++ u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters; |
3085 |
++ u16 current_vlans = iavf_get_num_vlans_added(adapter); |
3086 |
+ struct virtchnl_vlan_filter_list_v2 *vvfl_v2; |
3087 |
+ |
3088 |
+ adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; |
3089 |
+ |
3090 |
++ if ((count + current_vlans) > max_vlans && |
3091 |
++ current_vlans < max_vlans) { |
3092 |
++ count = max_vlans - iavf_get_num_vlans_added(adapter); |
3093 |
++ more = true; |
3094 |
++ } |
3095 |
++ |
3096 |
+ len = sizeof(*vvfl_v2) + ((count - 1) * |
3097 |
+ sizeof(struct virtchnl_vlan_filter)); |
3098 |
+ if (len > IAVF_MAX_AQ_BUF_SIZE) { |
3099 |
+@@ -725,6 +761,9 @@ void iavf_add_vlans(struct iavf_adapter *adapter) |
3100 |
+ &adapter->vlan_v2_caps.filtering.filtering_support; |
3101 |
+ struct virtchnl_vlan *vlan; |
3102 |
+ |
3103 |
++ if (i == count) |
3104 |
++ break; |
3105 |
++ |
3106 |
+ /* give priority over outer if it's enabled */ |
3107 |
+ if (filtering_support->outer) |
3108 |
+ vlan = &vvfl_v2->filters[i].outer; |
3109 |
+@@ -736,8 +775,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter) |
3110 |
+ |
3111 |
+ i++; |
3112 |
+ f->add = false; |
3113 |
+- if (i == count) |
3114 |
+- break; |
3115 |
++ f->is_new_vlan = true; |
3116 |
+ } |
3117 |
+ } |
3118 |
+ |
3119 |
+@@ -2080,6 +2118,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, |
3120 |
+ */ |
3121 |
+ iavf_netdev_features_vlan_strip_set(netdev, true); |
3122 |
+ break; |
3123 |
++ case VIRTCHNL_OP_ADD_VLAN_V2: |
3124 |
++ iavf_vlan_add_reject(adapter); |
3125 |
++ dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", |
3126 |
++ iavf_stat_str(&adapter->hw, v_retval)); |
3127 |
++ break; |
3128 |
+ default: |
3129 |
+ dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", |
3130 |
+ v_retval, iavf_stat_str(&adapter->hw, v_retval), |
3131 |
+@@ -2332,6 +2375,24 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, |
3132 |
+ spin_unlock_bh(&adapter->adv_rss_lock); |
3133 |
+ } |
3134 |
+ break; |
3135 |
++ case VIRTCHNL_OP_ADD_VLAN_V2: { |
3136 |
++ struct iavf_vlan_filter *f; |
3137 |
++ |
3138 |
++ spin_lock_bh(&adapter->mac_vlan_list_lock); |
3139 |
++ list_for_each_entry(f, &adapter->vlan_filter_list, list) { |
3140 |
++ if (f->is_new_vlan) { |
3141 |
++ f->is_new_vlan = false; |
3142 |
++ if (f->vlan.tpid == ETH_P_8021Q) |
3143 |
++ set_bit(f->vlan.vid, |
3144 |
++ adapter->vsi.active_cvlans); |
3145 |
++ else |
3146 |
++ set_bit(f->vlan.vid, |
3147 |
++ adapter->vsi.active_svlans); |
3148 |
++ } |
3149 |
++ } |
3150 |
++ spin_unlock_bh(&adapter->mac_vlan_list_lock); |
3151 |
++ } |
3152 |
++ break; |
3153 |
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: |
3154 |
+ /* PF enabled vlan strip on this VF. |
3155 |
+ * Update netdev->features if needed to be in sync with ethtool. |
3156 |
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c |
3157 |
+index 74b2c590ed5d0..38e46e9ba8bb8 100644 |
3158 |
+--- a/drivers/net/ethernet/intel/igc/igc_main.c |
3159 |
++++ b/drivers/net/ethernet/intel/igc/igc_main.c |
3160 |
+@@ -6171,6 +6171,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) |
3161 |
+ u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); |
3162 |
+ u32 value = 0; |
3163 |
+ |
3164 |
++ if (IGC_REMOVED(hw_addr)) |
3165 |
++ return ~value; |
3166 |
++ |
3167 |
+ value = readl(&hw_addr[reg]); |
3168 |
+ |
3169 |
+ /* reads should not return all F's */ |
3170 |
+diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h |
3171 |
+index e197a33d93a03..026c3b65fc37a 100644 |
3172 |
+--- a/drivers/net/ethernet/intel/igc/igc_regs.h |
3173 |
++++ b/drivers/net/ethernet/intel/igc/igc_regs.h |
3174 |
+@@ -306,7 +306,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg); |
3175 |
+ #define wr32(reg, val) \ |
3176 |
+ do { \ |
3177 |
+ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ |
3178 |
+- writel((val), &hw_addr[(reg)]); \ |
3179 |
++ if (!IGC_REMOVED(hw_addr)) \ |
3180 |
++ writel((val), &hw_addr[(reg)]); \ |
3181 |
+ } while (0) |
3182 |
+ |
3183 |
+ #define rd32(reg) (igc_rd32(hw, reg)) |
3184 |
+@@ -318,4 +319,6 @@ do { \ |
3185 |
+ |
3186 |
+ #define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) |
3187 |
+ |
3188 |
++#define IGC_REMOVED(h) unlikely(!(h)) |
3189 |
++ |
3190 |
+ #endif |
3191 |
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h |
3192 |
+index 921a4d977d651..8813b4dd6872f 100644 |
3193 |
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h |
3194 |
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h |
3195 |
+@@ -779,6 +779,7 @@ struct ixgbe_adapter { |
3196 |
+ #ifdef CONFIG_IXGBE_IPSEC |
3197 |
+ struct ixgbe_ipsec *ipsec; |
3198 |
+ #endif /* CONFIG_IXGBE_IPSEC */ |
3199 |
++ spinlock_t vfs_lock; |
3200 |
+ }; |
3201 |
+ |
3202 |
+ static inline int ixgbe_determine_xdp_q_idx(int cpu) |
3203 |
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
3204 |
+index c4a4954aa3177..6c403f112d294 100644 |
3205 |
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
3206 |
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
3207 |
+@@ -6402,6 +6402,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, |
3208 |
+ /* n-tuple support exists, always init our spinlock */ |
3209 |
+ spin_lock_init(&adapter->fdir_perfect_lock); |
3210 |
+ |
3211 |
++ /* init spinlock to avoid concurrency of VF resources */ |
3212 |
++ spin_lock_init(&adapter->vfs_lock); |
3213 |
++ |
3214 |
+ #ifdef CONFIG_IXGBE_DCB |
3215 |
+ ixgbe_init_dcb(adapter); |
3216 |
+ #endif |
3217 |
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c |
3218 |
+index d4e63f0644c36..a1e69c7348632 100644 |
3219 |
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c |
3220 |
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c |
3221 |
+@@ -205,10 +205,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) |
3222 |
+ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) |
3223 |
+ { |
3224 |
+ unsigned int num_vfs = adapter->num_vfs, vf; |
3225 |
++ unsigned long flags; |
3226 |
+ int rss; |
3227 |
+ |
3228 |
++ spin_lock_irqsave(&adapter->vfs_lock, flags); |
3229 |
+ /* set num VFs to 0 to prevent access to vfinfo */ |
3230 |
+ adapter->num_vfs = 0; |
3231 |
++ spin_unlock_irqrestore(&adapter->vfs_lock, flags); |
3232 |
+ |
3233 |
+ /* put the reference to all of the vf devices */ |
3234 |
+ for (vf = 0; vf < num_vfs; ++vf) { |
3235 |
+@@ -1355,8 +1358,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) |
3236 |
+ void ixgbe_msg_task(struct ixgbe_adapter *adapter) |
3237 |
+ { |
3238 |
+ struct ixgbe_hw *hw = &adapter->hw; |
3239 |
++ unsigned long flags; |
3240 |
+ u32 vf; |
3241 |
+ |
3242 |
++ spin_lock_irqsave(&adapter->vfs_lock, flags); |
3243 |
+ for (vf = 0; vf < adapter->num_vfs; vf++) { |
3244 |
+ /* process any reset requests */ |
3245 |
+ if (!ixgbe_check_for_rst(hw, vf)) |
3246 |
+@@ -1370,6 +1375,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter) |
3247 |
+ if (!ixgbe_check_for_ack(hw, vf)) |
3248 |
+ ixgbe_rcv_ack_from_vf(adapter, vf); |
3249 |
+ } |
3250 |
++ spin_unlock_irqrestore(&adapter->vfs_lock, flags); |
3251 |
+ } |
3252 |
+ |
3253 |
+ static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) |
3254 |
+diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c |
3255 |
+index 921959a980ee4..d8cfa4a7de0f2 100644 |
3256 |
+--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c |
3257 |
++++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c |
3258 |
+@@ -139,12 +139,12 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, |
3259 |
+ } |
3260 |
+ port = netdev_priv(ingress_dev); |
3261 |
+ |
3262 |
+- mask = htons(0x1FFF); |
3263 |
+- key = htons(port->hw_id); |
3264 |
++ mask = htons(0x1FFF << 3); |
3265 |
++ key = htons(port->hw_id << 3); |
3266 |
+ rule_match_set(r_match->key, SYS_PORT, key); |
3267 |
+ rule_match_set(r_match->mask, SYS_PORT, mask); |
3268 |
+ |
3269 |
+- mask = htons(0x1FF); |
3270 |
++ mask = htons(0x3FF); |
3271 |
+ key = htons(port->dev_id); |
3272 |
+ rule_match_set(r_match->key, SYS_DEV, key); |
3273 |
+ rule_match_set(r_match->mask, SYS_DEV, mask); |
3274 |
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
3275 |
+index 7ad663c5b1ab7..c00d6c4ed37c3 100644 |
3276 |
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
3277 |
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
3278 |
+@@ -5387,7 +5387,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, |
3279 |
+ { |
3280 |
+ const struct fib_nh *nh = fib_info_nh(fi, 0); |
3281 |
+ |
3282 |
+- return nh->fib_nh_scope == RT_SCOPE_LINK || |
3283 |
++ return nh->fib_nh_gw_family || |
3284 |
+ mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL); |
3285 |
+ } |
3286 |
+ |
3287 |
+@@ -10263,7 +10263,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, |
3288 |
+ unsigned long *fields = config->fields; |
3289 |
+ u32 hash_fields; |
3290 |
+ |
3291 |
+- switch (net->ipv4.sysctl_fib_multipath_hash_policy) { |
3292 |
++ switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) { |
3293 |
+ case 0: |
3294 |
+ mlxsw_sp_mp4_hash_outer_addr(config); |
3295 |
+ break; |
3296 |
+@@ -10281,7 +10281,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, |
3297 |
+ mlxsw_sp_mp_hash_inner_l3(config); |
3298 |
+ break; |
3299 |
+ case 3: |
3300 |
+- hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; |
3301 |
++ hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); |
3302 |
+ /* Outer */ |
3303 |
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP); |
3304 |
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP); |
3305 |
+@@ -10462,13 +10462,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) |
3306 |
+ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) |
3307 |
+ { |
3308 |
+ struct net *net = mlxsw_sp_net(mlxsw_sp); |
3309 |
+- bool usp = net->ipv4.sysctl_ip_fwd_update_priority; |
3310 |
+ char rgcr_pl[MLXSW_REG_RGCR_LEN]; |
3311 |
+ u64 max_rifs; |
3312 |
++ bool usp; |
3313 |
+ |
3314 |
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) |
3315 |
+ return -EIO; |
3316 |
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); |
3317 |
++ usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority); |
3318 |
+ |
3319 |
+ mlxsw_reg_rgcr_pack(rgcr_pl, true, true); |
3320 |
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); |
3321 |
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c |
3322 |
+index 005e56ea5da12..5893770bfd946 100644 |
3323 |
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c |
3324 |
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c |
3325 |
+@@ -75,6 +75,9 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid, |
3326 |
+ unsigned int vid, |
3327 |
+ enum macaccess_entry_type type) |
3328 |
+ { |
3329 |
++ int ret; |
3330 |
++ |
3331 |
++ spin_lock(&lan966x->mac_lock); |
3332 |
+ lan966x_mac_select(lan966x, mac, vid); |
3333 |
+ |
3334 |
+ /* Issue a write command */ |
3335 |
+@@ -86,7 +89,10 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid, |
3336 |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN), |
3337 |
+ lan966x, ANA_MACACCESS); |
3338 |
+ |
3339 |
+- return lan966x_mac_wait_for_completion(lan966x); |
3340 |
++ ret = lan966x_mac_wait_for_completion(lan966x); |
3341 |
++ spin_unlock(&lan966x->mac_lock); |
3342 |
++ |
3343 |
++ return ret; |
3344 |
+ } |
3345 |
+ |
3346 |
+ /* The mask of the front ports is encoded inside the mac parameter via a call |
3347 |
+@@ -113,11 +119,13 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port, |
3348 |
+ return __lan966x_mac_learn(lan966x, port, false, mac, vid, type); |
3349 |
+ } |
3350 |
+ |
3351 |
+-int lan966x_mac_forget(struct lan966x *lan966x, |
3352 |
+- const unsigned char mac[ETH_ALEN], |
3353 |
+- unsigned int vid, |
3354 |
+- enum macaccess_entry_type type) |
3355 |
++static int lan966x_mac_forget_locked(struct lan966x *lan966x, |
3356 |
++ const unsigned char mac[ETH_ALEN], |
3357 |
++ unsigned int vid, |
3358 |
++ enum macaccess_entry_type type) |
3359 |
+ { |
3360 |
++ lockdep_assert_held(&lan966x->mac_lock); |
3361 |
++ |
3362 |
+ lan966x_mac_select(lan966x, mac, vid); |
3363 |
+ |
3364 |
+ /* Issue a forget command */ |
3365 |
+@@ -128,6 +136,20 @@ int lan966x_mac_forget(struct lan966x *lan966x, |
3366 |
+ return lan966x_mac_wait_for_completion(lan966x); |
3367 |
+ } |
3368 |
+ |
3369 |
++int lan966x_mac_forget(struct lan966x *lan966x, |
3370 |
++ const unsigned char mac[ETH_ALEN], |
3371 |
++ unsigned int vid, |
3372 |
++ enum macaccess_entry_type type) |
3373 |
++{ |
3374 |
++ int ret; |
3375 |
++ |
3376 |
++ spin_lock(&lan966x->mac_lock); |
3377 |
++ ret = lan966x_mac_forget_locked(lan966x, mac, vid, type); |
3378 |
++ spin_unlock(&lan966x->mac_lock); |
3379 |
++ |
3380 |
++ return ret; |
3381 |
++} |
3382 |
++ |
3383 |
+ int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid) |
3384 |
+ { |
3385 |
+ return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED); |
3386 |
+@@ -161,7 +183,7 @@ static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *ma |
3387 |
+ { |
3388 |
+ struct lan966x_mac_entry *mac_entry; |
3389 |
+ |
3390 |
+- mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL); |
3391 |
++ mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC); |
3392 |
+ if (!mac_entry) |
3393 |
+ return NULL; |
3394 |
+ |
3395 |
+@@ -179,7 +201,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x, |
3396 |
+ struct lan966x_mac_entry *res = NULL; |
3397 |
+ struct lan966x_mac_entry *mac_entry; |
3398 |
+ |
3399 |
+- spin_lock(&lan966x->mac_lock); |
3400 |
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) { |
3401 |
+ if (mac_entry->vid == vid && |
3402 |
+ ether_addr_equal(mac, mac_entry->mac) && |
3403 |
+@@ -188,7 +209,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x, |
3404 |
+ break; |
3405 |
+ } |
3406 |
+ } |
3407 |
+- spin_unlock(&lan966x->mac_lock); |
3408 |
+ |
3409 |
+ return res; |
3410 |
+ } |
3411 |
+@@ -231,8 +251,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port, |
3412 |
+ { |
3413 |
+ struct lan966x_mac_entry *mac_entry; |
3414 |
+ |
3415 |
+- if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) |
3416 |
++ spin_lock(&lan966x->mac_lock); |
3417 |
++ if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) { |
3418 |
++ spin_unlock(&lan966x->mac_lock); |
3419 |
+ return 0; |
3420 |
++ } |
3421 |
+ |
3422 |
+ /* In case the entry already exists, don't add it again to SW, |
3423 |
+ * just update HW, but we need to look in the actual HW because |
3424 |
+@@ -241,21 +264,25 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port, |
3425 |
+ * add the entry but without the extern_learn flag. |
3426 |
+ */ |
3427 |
+ mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port); |
3428 |
+- if (mac_entry) |
3429 |
+- return lan966x_mac_learn(lan966x, port->chip_port, |
3430 |
+- addr, vid, ENTRYTYPE_LOCKED); |
3431 |
++ if (mac_entry) { |
3432 |
++ spin_unlock(&lan966x->mac_lock); |
3433 |
++ goto mac_learn; |
3434 |
++ } |
3435 |
+ |
3436 |
+ mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port); |
3437 |
+- if (!mac_entry) |
3438 |
++ if (!mac_entry) { |
3439 |
++ spin_unlock(&lan966x->mac_lock); |
3440 |
+ return -ENOMEM; |
3441 |
++ } |
3442 |
+ |
3443 |
+- spin_lock(&lan966x->mac_lock); |
3444 |
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries); |
3445 |
+ spin_unlock(&lan966x->mac_lock); |
3446 |
+ |
3447 |
+- lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED); |
3448 |
+ lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev); |
3449 |
+ |
3450 |
++mac_learn: |
3451 |
++ lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED); |
3452 |
++ |
3453 |
+ return 0; |
3454 |
+ } |
3455 |
+ |
3456 |
+@@ -269,8 +296,9 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr, |
3457 |
+ list) { |
3458 |
+ if (mac_entry->vid == vid && |
3459 |
+ ether_addr_equal(addr, mac_entry->mac)) { |
3460 |
+- lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid, |
3461 |
+- ENTRYTYPE_LOCKED); |
3462 |
++ lan966x_mac_forget_locked(lan966x, mac_entry->mac, |
3463 |
++ mac_entry->vid, |
3464 |
++ ENTRYTYPE_LOCKED); |
3465 |
+ |
3466 |
+ list_del(&mac_entry->list); |
3467 |
+ kfree(mac_entry); |
3468 |
+@@ -288,8 +316,8 @@ void lan966x_mac_purge_entries(struct lan966x *lan966x) |
3469 |
+ spin_lock(&lan966x->mac_lock); |
3470 |
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, |
3471 |
+ list) { |
3472 |
+- lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid, |
3473 |
+- ENTRYTYPE_LOCKED); |
3474 |
++ lan966x_mac_forget_locked(lan966x, mac_entry->mac, |
3475 |
++ mac_entry->vid, ENTRYTYPE_LOCKED); |
3476 |
+ |
3477 |
+ list_del(&mac_entry->list); |
3478 |
+ kfree(mac_entry); |
3479 |
+@@ -325,10 +353,13 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, |
3480 |
+ { |
3481 |
+ struct lan966x_mac_entry *mac_entry, *tmp; |
3482 |
+ unsigned char mac[ETH_ALEN] __aligned(2); |
3483 |
++ struct list_head mac_deleted_entries; |
3484 |
+ u32 dest_idx; |
3485 |
+ u32 column; |
3486 |
+ u16 vid; |
3487 |
+ |
3488 |
++ INIT_LIST_HEAD(&mac_deleted_entries); |
3489 |
++ |
3490 |
+ spin_lock(&lan966x->mac_lock); |
3491 |
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) { |
3492 |
+ bool found = false; |
3493 |
+@@ -362,20 +393,26 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, |
3494 |
+ } |
3495 |
+ |
3496 |
+ if (!found) { |
3497 |
+- /* Notify the bridge that the entry doesn't exist |
3498 |
+- * anymore in the HW and remove the entry from the SW |
3499 |
+- * list |
3500 |
+- */ |
3501 |
+- lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, |
3502 |
+- mac_entry->mac, mac_entry->vid, |
3503 |
+- lan966x->ports[mac_entry->port_index]->dev); |
3504 |
+- |
3505 |
+ list_del(&mac_entry->list); |
3506 |
+- kfree(mac_entry); |
3507 |
++ /* Move the entry from SW list to a tmp list such that |
3508 |
++ * it would be deleted later |
3509 |
++ */ |
3510 |
++ list_add_tail(&mac_entry->list, &mac_deleted_entries); |
3511 |
+ } |
3512 |
+ } |
3513 |
+ spin_unlock(&lan966x->mac_lock); |
3514 |
+ |
3515 |
++ list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) { |
3516 |
++ /* Notify the bridge that the entry doesn't exist |
3517 |
++ * anymore in the HW |
3518 |
++ */ |
3519 |
++ lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, |
3520 |
++ mac_entry->mac, mac_entry->vid, |
3521 |
++ lan966x->ports[mac_entry->port_index]->dev); |
3522 |
++ list_del(&mac_entry->list); |
3523 |
++ kfree(mac_entry); |
3524 |
++ } |
3525 |
++ |
3526 |
+ /* Now go to the list of columns and see if any entry was not in the SW |
3527 |
+ * list, then that means that the entry is new so it needs to notify the |
3528 |
+ * bridge. |
3529 |
+@@ -396,13 +433,20 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, |
3530 |
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports)) |
3531 |
+ continue; |
3532 |
+ |
3533 |
++ spin_lock(&lan966x->mac_lock); |
3534 |
++ mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx); |
3535 |
++ if (mac_entry) { |
3536 |
++ spin_unlock(&lan966x->mac_lock); |
3537 |
++ continue; |
3538 |
++ } |
3539 |
++ |
3540 |
+ mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx); |
3541 |
+- if (!mac_entry) |
3542 |
++ if (!mac_entry) { |
3543 |
++ spin_unlock(&lan966x->mac_lock); |
3544 |
+ return; |
3545 |
++ } |
3546 |
+ |
3547 |
+ mac_entry->row = row; |
3548 |
+- |
3549 |
+- spin_lock(&lan966x->mac_lock); |
3550 |
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries); |
3551 |
+ spin_unlock(&lan966x->mac_lock); |
3552 |
+ |
3553 |
+@@ -424,6 +468,7 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x) |
3554 |
+ lan966x, ANA_MACTINDX); |
3555 |
+ |
3556 |
+ while (1) { |
3557 |
++ spin_lock(&lan966x->mac_lock); |
3558 |
+ lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT), |
3559 |
+ ANA_MACACCESS_MAC_TABLE_CMD, |
3560 |
+ lan966x, ANA_MACACCESS); |
3561 |
+@@ -447,12 +492,15 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x) |
3562 |
+ stop = false; |
3563 |
+ |
3564 |
+ if (column == LAN966X_MAC_COLUMNS - 1 && |
3565 |
+- index == 0 && stop) |
3566 |
++ index == 0 && stop) { |
3567 |
++ spin_unlock(&lan966x->mac_lock); |
3568 |
+ break; |
3569 |
++ } |
3570 |
+ |
3571 |
+ entry[column].mach = lan_rd(lan966x, ANA_MACHDATA); |
3572 |
+ entry[column].macl = lan_rd(lan966x, ANA_MACLDATA); |
3573 |
+ entry[column].maca = lan_rd(lan966x, ANA_MACACCESS); |
3574 |
++ spin_unlock(&lan966x->mac_lock); |
3575 |
+ |
3576 |
+ /* Once all the columns are read process them */ |
3577 |
+ if (column == LAN966X_MAC_COLUMNS - 1) { |
3578 |
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c |
3579 |
+index 1b9421e844a95..79036767c99d1 100644 |
3580 |
+--- a/drivers/net/ethernet/netronome/nfp/flower/action.c |
3581 |
++++ b/drivers/net/ethernet/netronome/nfp/flower/action.c |
3582 |
+@@ -473,7 +473,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, |
3583 |
+ set_tun->ttl = ip4_dst_hoplimit(&rt->dst); |
3584 |
+ ip_rt_put(rt); |
3585 |
+ } else { |
3586 |
+- set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; |
3587 |
++ set_tun->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); |
3588 |
+ } |
3589 |
+ } |
3590 |
+ |
3591 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c |
3592 |
+index 6ff88df587673..ca8ab290013ce 100644 |
3593 |
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c |
3594 |
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c |
3595 |
+@@ -576,32 +576,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv) |
3596 |
+ } |
3597 |
+ } |
3598 |
+ |
3599 |
+- ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks); |
3600 |
+- if (ret) { |
3601 |
+- dev_err(plat->dev, "failed to enable clks, err = %d\n", ret); |
3602 |
+- return ret; |
3603 |
+- } |
3604 |
+- |
3605 |
+- ret = clk_prepare_enable(plat->rmii_internal_clk); |
3606 |
+- if (ret) { |
3607 |
+- dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret); |
3608 |
+- goto err_clk; |
3609 |
+- } |
3610 |
+- |
3611 |
+ return 0; |
3612 |
+- |
3613 |
+-err_clk: |
3614 |
+- clk_bulk_disable_unprepare(variant->num_clks, plat->clks); |
3615 |
+- return ret; |
3616 |
+-} |
3617 |
+- |
3618 |
+-static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv) |
3619 |
+-{ |
3620 |
+- struct mediatek_dwmac_plat_data *plat = priv; |
3621 |
+- const struct mediatek_dwmac_variant *variant = plat->variant; |
3622 |
+- |
3623 |
+- clk_disable_unprepare(plat->rmii_internal_clk); |
3624 |
+- clk_bulk_disable_unprepare(variant->num_clks, plat->clks); |
3625 |
+ } |
3626 |
+ |
3627 |
+ static int mediatek_dwmac_clks_config(void *priv, bool enabled) |
3628 |
+@@ -643,7 +618,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev, |
3629 |
+ plat->addr64 = priv_plat->variant->dma_bit_mask; |
3630 |
+ plat->bsp_priv = priv_plat; |
3631 |
+ plat->init = mediatek_dwmac_init; |
3632 |
+- plat->exit = mediatek_dwmac_exit; |
3633 |
+ plat->clks_config = mediatek_dwmac_clks_config; |
3634 |
+ if (priv_plat->variant->dwmac_fix_mac_speed) |
3635 |
+ plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed; |
3636 |
+@@ -712,13 +686,32 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) |
3637 |
+ mediatek_dwmac_common_data(pdev, plat_dat, priv_plat); |
3638 |
+ mediatek_dwmac_init(pdev, priv_plat); |
3639 |
+ |
3640 |
++ ret = mediatek_dwmac_clks_config(priv_plat, true); |
3641 |
++ if (ret) |
3642 |
++ return ret; |
3643 |
++ |
3644 |
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); |
3645 |
+ if (ret) { |
3646 |
+ stmmac_remove_config_dt(pdev, plat_dat); |
3647 |
+- return ret; |
3648 |
++ goto err_drv_probe; |
3649 |
+ } |
3650 |
+ |
3651 |
+ return 0; |
3652 |
++ |
3653 |
++err_drv_probe: |
3654 |
++ mediatek_dwmac_clks_config(priv_plat, false); |
3655 |
++ return ret; |
3656 |
++} |
3657 |
++ |
3658 |
++static int mediatek_dwmac_remove(struct platform_device *pdev) |
3659 |
++{ |
3660 |
++ struct mediatek_dwmac_plat_data *priv_plat = get_stmmac_bsp_priv(&pdev->dev); |
3661 |
++ int ret; |
3662 |
++ |
3663 |
++ ret = stmmac_pltfr_remove(pdev); |
3664 |
++ mediatek_dwmac_clks_config(priv_plat, false); |
3665 |
++ |
3666 |
++ return ret; |
3667 |
+ } |
3668 |
+ |
3669 |
+ static const struct of_device_id mediatek_dwmac_match[] = { |
3670 |
+@@ -733,7 +726,7 @@ MODULE_DEVICE_TABLE(of, mediatek_dwmac_match); |
3671 |
+ |
3672 |
+ static struct platform_driver mediatek_dwmac_driver = { |
3673 |
+ .probe = mediatek_dwmac_probe, |
3674 |
+- .remove = stmmac_pltfr_remove, |
3675 |
++ .remove = mediatek_dwmac_remove, |
3676 |
+ .driver = { |
3677 |
+ .name = "dwmac-mediatek", |
3678 |
+ .pm = &stmmac_pltfr_pm_ops, |
3679 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c |
3680 |
+index fd41db65fe1df..af33390411346 100644 |
3681 |
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c |
3682 |
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c |
3683 |
+@@ -219,6 +219,9 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) |
3684 |
+ if (queue == 0 || queue == 4) { |
3685 |
+ value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; |
3686 |
+ value |= MTL_RXQ_DMA_Q04MDMACH(chan); |
3687 |
++ } else if (queue > 4) { |
3688 |
++ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4); |
3689 |
++ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4); |
3690 |
+ } else { |
3691 |
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); |
3692 |
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); |
3693 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
3694 |
+index abfb3cd5958df..9c3055ee26085 100644 |
3695 |
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
3696 |
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
3697 |
+@@ -803,14 +803,6 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, |
3698 |
+ netdev_warn(priv->dev, |
3699 |
+ "Setting EEE tx-lpi is not supported\n"); |
3700 |
+ |
3701 |
+- if (priv->hw->xpcs) { |
3702 |
+- ret = xpcs_config_eee(priv->hw->xpcs, |
3703 |
+- priv->plat->mult_fact_100ns, |
3704 |
+- edata->eee_enabled); |
3705 |
+- if (ret) |
3706 |
+- return ret; |
3707 |
+- } |
3708 |
+- |
3709 |
+ if (!edata->eee_enabled) |
3710 |
+ stmmac_disable_eee_mode(priv); |
3711 |
+ |
3712 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
3713 |
+index 2525a80353b70..6a7f63a58aef8 100644 |
3714 |
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
3715 |
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
3716 |
+@@ -834,19 +834,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) |
3717 |
+ struct timespec64 now; |
3718 |
+ u32 sec_inc = 0; |
3719 |
+ u64 temp = 0; |
3720 |
+- int ret; |
3721 |
+ |
3722 |
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
3723 |
+ return -EOPNOTSUPP; |
3724 |
+ |
3725 |
+- ret = clk_prepare_enable(priv->plat->clk_ptp_ref); |
3726 |
+- if (ret < 0) { |
3727 |
+- netdev_warn(priv->dev, |
3728 |
+- "failed to enable PTP reference clock: %pe\n", |
3729 |
+- ERR_PTR(ret)); |
3730 |
+- return ret; |
3731 |
+- } |
3732 |
+- |
3733 |
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); |
3734 |
+ priv->systime_flags = systime_flags; |
3735 |
+ |
3736 |
+@@ -3270,6 +3261,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) |
3737 |
+ |
3738 |
+ stmmac_mmc_setup(priv); |
3739 |
+ |
3740 |
++ if (ptp_register) { |
3741 |
++ ret = clk_prepare_enable(priv->plat->clk_ptp_ref); |
3742 |
++ if (ret < 0) |
3743 |
++ netdev_warn(priv->dev, |
3744 |
++ "failed to enable PTP reference clock: %pe\n", |
3745 |
++ ERR_PTR(ret)); |
3746 |
++ } |
3747 |
++ |
3748 |
+ ret = stmmac_init_ptp(priv); |
3749 |
+ if (ret == -EOPNOTSUPP) |
3750 |
+ netdev_info(priv->dev, "PTP not supported by HW\n"); |
3751 |
+@@ -7220,8 +7219,6 @@ int stmmac_dvr_remove(struct device *dev) |
3752 |
+ netdev_info(priv->dev, "%s: removing driver", __func__); |
3753 |
+ |
3754 |
+ pm_runtime_get_sync(dev); |
3755 |
+- pm_runtime_disable(dev); |
3756 |
+- pm_runtime_put_noidle(dev); |
3757 |
+ |
3758 |
+ stmmac_stop_all_dma(priv); |
3759 |
+ stmmac_mac_set(priv, priv->ioaddr, false); |
3760 |
+@@ -7248,6 +7245,9 @@ int stmmac_dvr_remove(struct device *dev) |
3761 |
+ mutex_destroy(&priv->lock); |
3762 |
+ bitmap_free(priv->af_xdp_zc_qps); |
3763 |
+ |
3764 |
++ pm_runtime_disable(dev); |
3765 |
++ pm_runtime_put_noidle(dev); |
3766 |
++ |
3767 |
+ return 0; |
3768 |
+ } |
3769 |
+ EXPORT_SYMBOL_GPL(stmmac_dvr_remove); |
3770 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c |
3771 |
+index 11e1055e8260f..9f5cac4000da6 100644 |
3772 |
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c |
3773 |
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c |
3774 |
+@@ -815,7 +815,13 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) |
3775 |
+ if (ret) |
3776 |
+ return ret; |
3777 |
+ |
3778 |
+- stmmac_init_tstamp_counter(priv, priv->systime_flags); |
3779 |
++ ret = clk_prepare_enable(priv->plat->clk_ptp_ref); |
3780 |
++ if (ret < 0) { |
3781 |
++ netdev_warn(priv->dev, |
3782 |
++ "failed to enable PTP reference clock: %pe\n", |
3783 |
++ ERR_PTR(ret)); |
3784 |
++ return ret; |
3785 |
++ } |
3786 |
+ } |
3787 |
+ |
3788 |
+ return 0; |
3789 |
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c |
3790 |
+index 873f6deabbd1a..dc1f6d8444ad0 100644 |
3791 |
+--- a/drivers/net/usb/ax88179_178a.c |
3792 |
++++ b/drivers/net/usb/ax88179_178a.c |
3793 |
+@@ -1801,7 +1801,7 @@ static const struct driver_info ax88179_info = { |
3794 |
+ .link_reset = ax88179_link_reset, |
3795 |
+ .reset = ax88179_reset, |
3796 |
+ .stop = ax88179_stop, |
3797 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3798 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3799 |
+ .rx_fixup = ax88179_rx_fixup, |
3800 |
+ .tx_fixup = ax88179_tx_fixup, |
3801 |
+ }; |
3802 |
+@@ -1814,7 +1814,7 @@ static const struct driver_info ax88178a_info = { |
3803 |
+ .link_reset = ax88179_link_reset, |
3804 |
+ .reset = ax88179_reset, |
3805 |
+ .stop = ax88179_stop, |
3806 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3807 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3808 |
+ .rx_fixup = ax88179_rx_fixup, |
3809 |
+ .tx_fixup = ax88179_tx_fixup, |
3810 |
+ }; |
3811 |
+@@ -1827,7 +1827,7 @@ static const struct driver_info cypress_GX3_info = { |
3812 |
+ .link_reset = ax88179_link_reset, |
3813 |
+ .reset = ax88179_reset, |
3814 |
+ .stop = ax88179_stop, |
3815 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3816 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3817 |
+ .rx_fixup = ax88179_rx_fixup, |
3818 |
+ .tx_fixup = ax88179_tx_fixup, |
3819 |
+ }; |
3820 |
+@@ -1840,7 +1840,7 @@ static const struct driver_info dlink_dub1312_info = { |
3821 |
+ .link_reset = ax88179_link_reset, |
3822 |
+ .reset = ax88179_reset, |
3823 |
+ .stop = ax88179_stop, |
3824 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3825 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3826 |
+ .rx_fixup = ax88179_rx_fixup, |
3827 |
+ .tx_fixup = ax88179_tx_fixup, |
3828 |
+ }; |
3829 |
+@@ -1853,7 +1853,7 @@ static const struct driver_info sitecom_info = { |
3830 |
+ .link_reset = ax88179_link_reset, |
3831 |
+ .reset = ax88179_reset, |
3832 |
+ .stop = ax88179_stop, |
3833 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3834 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3835 |
+ .rx_fixup = ax88179_rx_fixup, |
3836 |
+ .tx_fixup = ax88179_tx_fixup, |
3837 |
+ }; |
3838 |
+@@ -1866,7 +1866,7 @@ static const struct driver_info samsung_info = { |
3839 |
+ .link_reset = ax88179_link_reset, |
3840 |
+ .reset = ax88179_reset, |
3841 |
+ .stop = ax88179_stop, |
3842 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3843 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3844 |
+ .rx_fixup = ax88179_rx_fixup, |
3845 |
+ .tx_fixup = ax88179_tx_fixup, |
3846 |
+ }; |
3847 |
+@@ -1879,7 +1879,7 @@ static const struct driver_info lenovo_info = { |
3848 |
+ .link_reset = ax88179_link_reset, |
3849 |
+ .reset = ax88179_reset, |
3850 |
+ .stop = ax88179_stop, |
3851 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3852 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3853 |
+ .rx_fixup = ax88179_rx_fixup, |
3854 |
+ .tx_fixup = ax88179_tx_fixup, |
3855 |
+ }; |
3856 |
+@@ -1892,7 +1892,7 @@ static const struct driver_info belkin_info = { |
3857 |
+ .link_reset = ax88179_link_reset, |
3858 |
+ .reset = ax88179_reset, |
3859 |
+ .stop = ax88179_stop, |
3860 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3861 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3862 |
+ .rx_fixup = ax88179_rx_fixup, |
3863 |
+ .tx_fixup = ax88179_tx_fixup, |
3864 |
+ }; |
3865 |
+@@ -1905,7 +1905,7 @@ static const struct driver_info toshiba_info = { |
3866 |
+ .link_reset = ax88179_link_reset, |
3867 |
+ .reset = ax88179_reset, |
3868 |
+ .stop = ax88179_stop, |
3869 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3870 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3871 |
+ .rx_fixup = ax88179_rx_fixup, |
3872 |
+ .tx_fixup = ax88179_tx_fixup, |
3873 |
+ }; |
3874 |
+@@ -1918,7 +1918,7 @@ static const struct driver_info mct_info = { |
3875 |
+ .link_reset = ax88179_link_reset, |
3876 |
+ .reset = ax88179_reset, |
3877 |
+ .stop = ax88179_stop, |
3878 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3879 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3880 |
+ .rx_fixup = ax88179_rx_fixup, |
3881 |
+ .tx_fixup = ax88179_tx_fixup, |
3882 |
+ }; |
3883 |
+@@ -1931,7 +1931,7 @@ static const struct driver_info at_umc2000_info = { |
3884 |
+ .link_reset = ax88179_link_reset, |
3885 |
+ .reset = ax88179_reset, |
3886 |
+ .stop = ax88179_stop, |
3887 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3888 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3889 |
+ .rx_fixup = ax88179_rx_fixup, |
3890 |
+ .tx_fixup = ax88179_tx_fixup, |
3891 |
+ }; |
3892 |
+@@ -1944,7 +1944,7 @@ static const struct driver_info at_umc200_info = { |
3893 |
+ .link_reset = ax88179_link_reset, |
3894 |
+ .reset = ax88179_reset, |
3895 |
+ .stop = ax88179_stop, |
3896 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3897 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3898 |
+ .rx_fixup = ax88179_rx_fixup, |
3899 |
+ .tx_fixup = ax88179_tx_fixup, |
3900 |
+ }; |
3901 |
+@@ -1957,7 +1957,7 @@ static const struct driver_info at_umc2000sp_info = { |
3902 |
+ .link_reset = ax88179_link_reset, |
3903 |
+ .reset = ax88179_reset, |
3904 |
+ .stop = ax88179_stop, |
3905 |
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX, |
3906 |
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, |
3907 |
+ .rx_fixup = ax88179_rx_fixup, |
3908 |
+ .tx_fixup = ax88179_tx_fixup, |
3909 |
+ }; |
3910 |
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c |
3911 |
+index ee41088c52518..6b4efae11e57c 100644 |
3912 |
+--- a/drivers/net/usb/r8152.c |
3913 |
++++ b/drivers/net/usb/r8152.c |
3914 |
+@@ -32,7 +32,7 @@ |
3915 |
+ #define NETNEXT_VERSION "12" |
3916 |
+ |
3917 |
+ /* Information for net */ |
3918 |
+-#define NET_VERSION "12" |
3919 |
++#define NET_VERSION "13" |
3920 |
+ |
3921 |
+ #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION |
3922 |
+ #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@×××××××.com>" |
3923 |
+@@ -5915,7 +5915,8 @@ static void r8153_enter_oob(struct r8152 *tp) |
3924 |
+ |
3925 |
+ wait_oob_link_list_ready(tp); |
3926 |
+ |
3927 |
+- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); |
3928 |
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); |
3929 |
++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); |
3930 |
+ |
3931 |
+ switch (tp->version) { |
3932 |
+ case RTL_VER_03: |
3933 |
+@@ -5951,6 +5952,10 @@ static void r8153_enter_oob(struct r8152 *tp) |
3934 |
+ ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; |
3935 |
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); |
3936 |
+ |
3937 |
++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); |
3938 |
++ ocp_data |= MCU_BORW_EN; |
3939 |
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); |
3940 |
++ |
3941 |
+ rxdy_gated_en(tp, false); |
3942 |
+ |
3943 |
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); |
3944 |
+@@ -6553,6 +6558,9 @@ static void rtl8156_down(struct r8152 *tp) |
3945 |
+ rtl_disable(tp); |
3946 |
+ rtl_reset_bmu(tp); |
3947 |
+ |
3948 |
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); |
3949 |
++ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); |
3950 |
++ |
3951 |
+ /* Clear teredo wake event. bit[15:8] is the teredo wakeup |
3952 |
+ * type. Set it to zero. bits[7:0] are the W1C bits about |
3953 |
+ * the events. Set them to all 1 to clear them. |
3954 |
+@@ -6563,6 +6571,10 @@ static void rtl8156_down(struct r8152 *tp) |
3955 |
+ ocp_data |= NOW_IS_OOB; |
3956 |
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); |
3957 |
+ |
3958 |
++ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); |
3959 |
++ ocp_data |= MCU_BORW_EN; |
3960 |
++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); |
3961 |
++ |
3962 |
+ rtl_rx_vlan_en(tp, true); |
3963 |
+ rxdy_gated_en(tp, false); |
3964 |
+ |
3965 |
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c |
3966 |
+index d270a204324e9..fb185cb052583 100644 |
3967 |
+--- a/drivers/pci/controller/pci-hyperv.c |
3968 |
++++ b/drivers/pci/controller/pci-hyperv.c |
3969 |
+@@ -604,17 +604,19 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data) |
3970 |
+ return cfg->vector; |
3971 |
+ } |
3972 |
+ |
3973 |
+-static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, |
3974 |
+- struct msi_desc *msi_desc) |
3975 |
+-{ |
3976 |
+- msi_entry->address.as_uint32 = msi_desc->msg.address_lo; |
3977 |
+- msi_entry->data.as_uint32 = msi_desc->msg.data; |
3978 |
+-} |
3979 |
+- |
3980 |
+ static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, |
3981 |
+ int nvec, msi_alloc_info_t *info) |
3982 |
+ { |
3983 |
+- return pci_msi_prepare(domain, dev, nvec, info); |
3984 |
++ int ret = pci_msi_prepare(domain, dev, nvec, info); |
3985 |
++ |
3986 |
++ /* |
3987 |
++ * By using the interrupt remapper in the hypervisor IOMMU, contiguous |
3988 |
++ * CPU vectors is not needed for multi-MSI |
3989 |
++ */ |
3990 |
++ if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) |
3991 |
++ info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; |
3992 |
++ |
3993 |
++ return ret; |
3994 |
+ } |
3995 |
+ |
3996 |
+ /** |
3997 |
+@@ -631,6 +633,7 @@ static void hv_arch_irq_unmask(struct irq_data *data) |
3998 |
+ { |
3999 |
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(data); |
4000 |
+ struct hv_retarget_device_interrupt *params; |
4001 |
++ struct tran_int_desc *int_desc; |
4002 |
+ struct hv_pcibus_device *hbus; |
4003 |
+ struct cpumask *dest; |
4004 |
+ cpumask_var_t tmp; |
4005 |
+@@ -645,6 +648,7 @@ static void hv_arch_irq_unmask(struct irq_data *data) |
4006 |
+ pdev = msi_desc_to_pci_dev(msi_desc); |
4007 |
+ pbus = pdev->bus; |
4008 |
+ hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); |
4009 |
++ int_desc = data->chip_data; |
4010 |
+ |
4011 |
+ spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); |
4012 |
+ |
4013 |
+@@ -652,7 +656,8 @@ static void hv_arch_irq_unmask(struct irq_data *data) |
4014 |
+ memset(params, 0, sizeof(*params)); |
4015 |
+ params->partition_id = HV_PARTITION_ID_SELF; |
4016 |
+ params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; |
4017 |
+- hv_set_msi_entry_from_desc(¶ms->int_entry.msi_entry, msi_desc); |
4018 |
++ params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff; |
4019 |
++ params->int_entry.msi_entry.data.as_uint32 = int_desc->data; |
4020 |
+ params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | |
4021 |
+ (hbus->hdev->dev_instance.b[4] << 16) | |
4022 |
+ (hbus->hdev->dev_instance.b[7] << 8) | |
4023 |
+@@ -1513,6 +1518,10 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, |
4024 |
+ u8 buffer[sizeof(struct pci_delete_interrupt)]; |
4025 |
+ } ctxt; |
4026 |
+ |
4027 |
++ if (!int_desc->vector_count) { |
4028 |
++ kfree(int_desc); |
4029 |
++ return; |
4030 |
++ } |
4031 |
+ memset(&ctxt, 0, sizeof(ctxt)); |
4032 |
+ int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; |
4033 |
+ int_pkt->message_type.type = |
4034 |
+@@ -1597,12 +1606,12 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp, |
4035 |
+ |
4036 |
+ static u32 hv_compose_msi_req_v1( |
4037 |
+ struct pci_create_interrupt *int_pkt, struct cpumask *affinity, |
4038 |
+- u32 slot, u8 vector) |
4039 |
++ u32 slot, u8 vector, u8 vector_count) |
4040 |
+ { |
4041 |
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; |
4042 |
+ int_pkt->wslot.slot = slot; |
4043 |
+ int_pkt->int_desc.vector = vector; |
4044 |
+- int_pkt->int_desc.vector_count = 1; |
4045 |
++ int_pkt->int_desc.vector_count = vector_count; |
4046 |
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE; |
4047 |
+ |
4048 |
+ /* |
4049 |
+@@ -1625,14 +1634,14 @@ static int hv_compose_msi_req_get_cpu(struct cpumask *affinity) |
4050 |
+ |
4051 |
+ static u32 hv_compose_msi_req_v2( |
4052 |
+ struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, |
4053 |
+- u32 slot, u8 vector) |
4054 |
++ u32 slot, u8 vector, u8 vector_count) |
4055 |
+ { |
4056 |
+ int cpu; |
4057 |
+ |
4058 |
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; |
4059 |
+ int_pkt->wslot.slot = slot; |
4060 |
+ int_pkt->int_desc.vector = vector; |
4061 |
+- int_pkt->int_desc.vector_count = 1; |
4062 |
++ int_pkt->int_desc.vector_count = vector_count; |
4063 |
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE; |
4064 |
+ cpu = hv_compose_msi_req_get_cpu(affinity); |
4065 |
+ int_pkt->int_desc.processor_array[0] = |
4066 |
+@@ -1644,7 +1653,7 @@ static u32 hv_compose_msi_req_v2( |
4067 |
+ |
4068 |
+ static u32 hv_compose_msi_req_v3( |
4069 |
+ struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity, |
4070 |
+- u32 slot, u32 vector) |
4071 |
++ u32 slot, u32 vector, u8 vector_count) |
4072 |
+ { |
4073 |
+ int cpu; |
4074 |
+ |
4075 |
+@@ -1652,7 +1661,7 @@ static u32 hv_compose_msi_req_v3( |
4076 |
+ int_pkt->wslot.slot = slot; |
4077 |
+ int_pkt->int_desc.vector = vector; |
4078 |
+ int_pkt->int_desc.reserved = 0; |
4079 |
+- int_pkt->int_desc.vector_count = 1; |
4080 |
++ int_pkt->int_desc.vector_count = vector_count; |
4081 |
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE; |
4082 |
+ cpu = hv_compose_msi_req_get_cpu(affinity); |
4083 |
+ int_pkt->int_desc.processor_array[0] = |
4084 |
+@@ -1683,6 +1692,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
4085 |
+ struct cpumask *dest; |
4086 |
+ struct compose_comp_ctxt comp; |
4087 |
+ struct tran_int_desc *int_desc; |
4088 |
++ struct msi_desc *msi_desc; |
4089 |
++ u8 vector, vector_count; |
4090 |
+ struct { |
4091 |
+ struct pci_packet pci_pkt; |
4092 |
+ union { |
4093 |
+@@ -1695,7 +1706,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
4094 |
+ u32 size; |
4095 |
+ int ret; |
4096 |
+ |
4097 |
+- pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); |
4098 |
++ /* Reuse the previous allocation */ |
4099 |
++ if (data->chip_data) { |
4100 |
++ int_desc = data->chip_data; |
4101 |
++ msg->address_hi = int_desc->address >> 32; |
4102 |
++ msg->address_lo = int_desc->address & 0xffffffff; |
4103 |
++ msg->data = int_desc->data; |
4104 |
++ return; |
4105 |
++ } |
4106 |
++ |
4107 |
++ msi_desc = irq_data_get_msi_desc(data); |
4108 |
++ pdev = msi_desc_to_pci_dev(msi_desc); |
4109 |
+ dest = irq_data_get_effective_affinity_mask(data); |
4110 |
+ pbus = pdev->bus; |
4111 |
+ hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); |
4112 |
+@@ -1704,17 +1725,40 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
4113 |
+ if (!hpdev) |
4114 |
+ goto return_null_message; |
4115 |
+ |
4116 |
+- /* Free any previous message that might have already been composed. */ |
4117 |
+- if (data->chip_data) { |
4118 |
+- int_desc = data->chip_data; |
4119 |
+- data->chip_data = NULL; |
4120 |
+- hv_int_desc_free(hpdev, int_desc); |
4121 |
+- } |
4122 |
+- |
4123 |
+ int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); |
4124 |
+ if (!int_desc) |
4125 |
+ goto drop_reference; |
4126 |
+ |
4127 |
++ if (!msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1) { |
4128 |
++ /* |
4129 |
++ * If this is not the first MSI of Multi MSI, we already have |
4130 |
++ * a mapping. Can exit early. |
4131 |
++ */ |
4132 |
++ if (msi_desc->irq != data->irq) { |
4133 |
++ data->chip_data = int_desc; |
4134 |
++ int_desc->address = msi_desc->msg.address_lo | |
4135 |
++ (u64)msi_desc->msg.address_hi << 32; |
4136 |
++ int_desc->data = msi_desc->msg.data + |
4137 |
++ (data->irq - msi_desc->irq); |
4138 |
++ msg->address_hi = msi_desc->msg.address_hi; |
4139 |
++ msg->address_lo = msi_desc->msg.address_lo; |
4140 |
++ msg->data = int_desc->data; |
4141 |
++ put_pcichild(hpdev); |
4142 |
++ return; |
4143 |
++ } |
4144 |
++ /* |
4145 |
++ * The vector we select here is a dummy value. The correct |
4146 |
++ * value gets sent to the hypervisor in unmask(). This needs |
4147 |
++ * to be aligned with the count, and also not zero. Multi-msi |
4148 |
++ * is powers of 2 up to 32, so 32 will always work here. |
4149 |
++ */ |
4150 |
++ vector = 32; |
4151 |
++ vector_count = msi_desc->nvec_used; |
4152 |
++ } else { |
4153 |
++ vector = hv_msi_get_int_vector(data); |
4154 |
++ vector_count = 1; |
4155 |
++ } |
4156 |
++ |
4157 |
+ memset(&ctxt, 0, sizeof(ctxt)); |
4158 |
+ init_completion(&comp.comp_pkt.host_event); |
4159 |
+ ctxt.pci_pkt.completion_func = hv_pci_compose_compl; |
4160 |
+@@ -1725,7 +1769,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
4161 |
+ size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, |
4162 |
+ dest, |
4163 |
+ hpdev->desc.win_slot.slot, |
4164 |
+- hv_msi_get_int_vector(data)); |
4165 |
++ vector, |
4166 |
++ vector_count); |
4167 |
+ break; |
4168 |
+ |
4169 |
+ case PCI_PROTOCOL_VERSION_1_2: |
4170 |
+@@ -1733,14 +1778,16 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
4171 |
+ size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, |
4172 |
+ dest, |
4173 |
+ hpdev->desc.win_slot.slot, |
4174 |
+- hv_msi_get_int_vector(data)); |
4175 |
++ vector, |
4176 |
++ vector_count); |
4177 |
+ break; |
4178 |
+ |
4179 |
+ case PCI_PROTOCOL_VERSION_1_4: |
4180 |
+ size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3, |
4181 |
+ dest, |
4182 |
+ hpdev->desc.win_slot.slot, |
4183 |
+- hv_msi_get_int_vector(data)); |
4184 |
++ vector, |
4185 |
++ vector_count); |
4186 |
+ break; |
4187 |
+ |
4188 |
+ default: |
4189 |
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c |
4190 |
+index adccf03b3e5af..b920dd5237c75 100644 |
4191 |
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c |
4192 |
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c |
4193 |
+@@ -101,7 +101,7 @@ struct armada_37xx_pinctrl { |
4194 |
+ struct device *dev; |
4195 |
+ struct gpio_chip gpio_chip; |
4196 |
+ struct irq_chip irq_chip; |
4197 |
+- spinlock_t irq_lock; |
4198 |
++ raw_spinlock_t irq_lock; |
4199 |
+ struct pinctrl_desc pctl; |
4200 |
+ struct pinctrl_dev *pctl_dev; |
4201 |
+ struct armada_37xx_pin_group *groups; |
4202 |
+@@ -522,9 +522,9 @@ static void armada_37xx_irq_ack(struct irq_data *d) |
4203 |
+ unsigned long flags; |
4204 |
+ |
4205 |
+ armada_37xx_irq_update_reg(®, d); |
4206 |
+- spin_lock_irqsave(&info->irq_lock, flags); |
4207 |
++ raw_spin_lock_irqsave(&info->irq_lock, flags); |
4208 |
+ writel(d->mask, info->base + reg); |
4209 |
+- spin_unlock_irqrestore(&info->irq_lock, flags); |
4210 |
++ raw_spin_unlock_irqrestore(&info->irq_lock, flags); |
4211 |
+ } |
4212 |
+ |
4213 |
+ static void armada_37xx_irq_mask(struct irq_data *d) |
4214 |
+@@ -535,10 +535,10 @@ static void armada_37xx_irq_mask(struct irq_data *d) |
4215 |
+ unsigned long flags; |
4216 |
+ |
4217 |
+ armada_37xx_irq_update_reg(®, d); |
4218 |
+- spin_lock_irqsave(&info->irq_lock, flags); |
4219 |
++ raw_spin_lock_irqsave(&info->irq_lock, flags); |
4220 |
+ val = readl(info->base + reg); |
4221 |
+ writel(val & ~d->mask, info->base + reg); |
4222 |
+- spin_unlock_irqrestore(&info->irq_lock, flags); |
4223 |
++ raw_spin_unlock_irqrestore(&info->irq_lock, flags); |
4224 |
+ } |
4225 |
+ |
4226 |
+ static void armada_37xx_irq_unmask(struct irq_data *d) |
4227 |
+@@ -549,10 +549,10 @@ static void armada_37xx_irq_unmask(struct irq_data *d) |
4228 |
+ unsigned long flags; |
4229 |
+ |
4230 |
+ armada_37xx_irq_update_reg(®, d); |
4231 |
+- spin_lock_irqsave(&info->irq_lock, flags); |
4232 |
++ raw_spin_lock_irqsave(&info->irq_lock, flags); |
4233 |
+ val = readl(info->base + reg); |
4234 |
+ writel(val | d->mask, info->base + reg); |
4235 |
+- spin_unlock_irqrestore(&info->irq_lock, flags); |
4236 |
++ raw_spin_unlock_irqrestore(&info->irq_lock, flags); |
4237 |
+ } |
4238 |
+ |
4239 |
+ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on) |
4240 |
+@@ -563,14 +563,14 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on) |
4241 |
+ unsigned long flags; |
4242 |
+ |
4243 |
+ armada_37xx_irq_update_reg(®, d); |
4244 |
+- spin_lock_irqsave(&info->irq_lock, flags); |
4245 |
++ raw_spin_lock_irqsave(&info->irq_lock, flags); |
4246 |
+ val = readl(info->base + reg); |
4247 |
+ if (on) |
4248 |
+ val |= (BIT(d->hwirq % GPIO_PER_REG)); |
4249 |
+ else |
4250 |
+ val &= ~(BIT(d->hwirq % GPIO_PER_REG)); |
4251 |
+ writel(val, info->base + reg); |
4252 |
+- spin_unlock_irqrestore(&info->irq_lock, flags); |
4253 |
++ raw_spin_unlock_irqrestore(&info->irq_lock, flags); |
4254 |
+ |
4255 |
+ return 0; |
4256 |
+ } |
4257 |
+@@ -582,7 +582,7 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) |
4258 |
+ u32 val, reg = IRQ_POL; |
4259 |
+ unsigned long flags; |
4260 |
+ |
4261 |
+- spin_lock_irqsave(&info->irq_lock, flags); |
4262 |
++ raw_spin_lock_irqsave(&info->irq_lock, flags); |
4263 |
+ armada_37xx_irq_update_reg(®, d); |
4264 |
+ val = readl(info->base + reg); |
4265 |
+ switch (type) { |
4266 |
+@@ -606,11 +606,11 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) |
4267 |
+ break; |
4268 |
+ } |
4269 |
+ default: |
4270 |
+- spin_unlock_irqrestore(&info->irq_lock, flags); |
4271 |
++ raw_spin_unlock_irqrestore(&info->irq_lock, flags); |
4272 |
+ return -EINVAL; |
4273 |
+ } |
4274 |
+ writel(val, info->base + reg); |
4275 |
+- spin_unlock_irqrestore(&info->irq_lock, flags); |
4276 |
++ raw_spin_unlock_irqrestore(&info->irq_lock, flags); |
4277 |
+ |
4278 |
+ return 0; |
4279 |
+ } |
4280 |
+@@ -625,7 +625,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info, |
4281 |
+ |
4282 |
+ regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l); |
4283 |
+ |
4284 |
+- spin_lock_irqsave(&info->irq_lock, flags); |
4285 |
++ raw_spin_lock_irqsave(&info->irq_lock, flags); |
4286 |
+ p = readl(info->base + IRQ_POL + 4 * reg_idx); |
4287 |
+ if ((p ^ l) & (1 << bit_num)) { |
4288 |
+ /* |
4289 |
+@@ -646,7 +646,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info, |
4290 |
+ ret = -1; |
4291 |
+ } |
4292 |
+ |
4293 |
+- spin_unlock_irqrestore(&info->irq_lock, flags); |
4294 |
++ raw_spin_unlock_irqrestore(&info->irq_lock, flags); |
4295 |
+ return ret; |
4296 |
+ } |
4297 |
+ |
4298 |
+@@ -663,11 +663,11 @@ static void armada_37xx_irq_handler(struct irq_desc *desc) |
4299 |
+ u32 status; |
4300 |
+ unsigned long flags; |
4301 |
+ |
4302 |
+- spin_lock_irqsave(&info->irq_lock, flags); |
4303 |
++ raw_spin_lock_irqsave(&info->irq_lock, flags); |
4304 |
+ status = readl_relaxed(info->base + IRQ_STATUS + 4 * i); |
4305 |
+ /* Manage only the interrupt that was enabled */ |
4306 |
+ status &= readl_relaxed(info->base + IRQ_EN + 4 * i); |
4307 |
+- spin_unlock_irqrestore(&info->irq_lock, flags); |
4308 |
++ raw_spin_unlock_irqrestore(&info->irq_lock, flags); |
4309 |
+ while (status) { |
4310 |
+ u32 hwirq = ffs(status) - 1; |
4311 |
+ u32 virq = irq_find_mapping(d, hwirq + |
4312 |
+@@ -694,12 +694,12 @@ static void armada_37xx_irq_handler(struct irq_desc *desc) |
4313 |
+ |
4314 |
+ update_status: |
4315 |
+ /* Update status in case a new IRQ appears */ |
4316 |
+- spin_lock_irqsave(&info->irq_lock, flags); |
4317 |
++ raw_spin_lock_irqsave(&info->irq_lock, flags); |
4318 |
+ status = readl_relaxed(info->base + |
4319 |
+ IRQ_STATUS + 4 * i); |
4320 |
+ /* Manage only the interrupt that was enabled */ |
4321 |
+ status &= readl_relaxed(info->base + IRQ_EN + 4 * i); |
4322 |
+- spin_unlock_irqrestore(&info->irq_lock, flags); |
4323 |
++ raw_spin_unlock_irqrestore(&info->irq_lock, flags); |
4324 |
+ } |
4325 |
+ } |
4326 |
+ chained_irq_exit(chip, desc); |
4327 |
+@@ -726,23 +726,13 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, |
4328 |
+ struct gpio_chip *gc = &info->gpio_chip; |
4329 |
+ struct irq_chip *irqchip = &info->irq_chip; |
4330 |
+ struct gpio_irq_chip *girq = &gc->irq; |
4331 |
++ struct device_node *np = to_of_node(gc->fwnode); |
4332 |
+ struct device *dev = &pdev->dev; |
4333 |
+- struct device_node *np; |
4334 |
+- int ret = -ENODEV, i, nr_irq_parent; |
4335 |
++ unsigned int i, nr_irq_parent; |
4336 |
+ |
4337 |
+- /* Check if we have at least one gpio-controller child node */ |
4338 |
+- for_each_child_of_node(dev->of_node, np) { |
4339 |
+- if (of_property_read_bool(np, "gpio-controller")) { |
4340 |
+- ret = 0; |
4341 |
+- break; |
4342 |
+- } |
4343 |
+- } |
4344 |
+- if (ret) |
4345 |
+- return dev_err_probe(dev, ret, "no gpio-controller child node\n"); |
4346 |
++ raw_spin_lock_init(&info->irq_lock); |
4347 |
+ |
4348 |
+ nr_irq_parent = of_irq_count(np); |
4349 |
+- spin_lock_init(&info->irq_lock); |
4350 |
+- |
4351 |
+ if (!nr_irq_parent) { |
4352 |
+ dev_err(dev, "invalid or no IRQ\n"); |
4353 |
+ return 0; |
4354 |
+@@ -1121,25 +1111,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = { |
4355 |
+ { }, |
4356 |
+ }; |
4357 |
+ |
4358 |
++static const struct regmap_config armada_37xx_pinctrl_regmap_config = { |
4359 |
++ .reg_bits = 32, |
4360 |
++ .val_bits = 32, |
4361 |
++ .reg_stride = 4, |
4362 |
++ .use_raw_spinlock = true, |
4363 |
++}; |
4364 |
++ |
4365 |
+ static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev) |
4366 |
+ { |
4367 |
+ struct armada_37xx_pinctrl *info; |
4368 |
+ struct device *dev = &pdev->dev; |
4369 |
+- struct device_node *np = dev->of_node; |
4370 |
+ struct regmap *regmap; |
4371 |
++ void __iomem *base; |
4372 |
+ int ret; |
4373 |
+ |
4374 |
++ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); |
4375 |
++ if (IS_ERR(base)) { |
4376 |
++ dev_err(dev, "failed to ioremap base address: %pe\n", base); |
4377 |
++ return PTR_ERR(base); |
4378 |
++ } |
4379 |
++ |
4380 |
++ regmap = devm_regmap_init_mmio(dev, base, |
4381 |
++ &armada_37xx_pinctrl_regmap_config); |
4382 |
++ if (IS_ERR(regmap)) { |
4383 |
++ dev_err(dev, "failed to create regmap: %pe\n", regmap); |
4384 |
++ return PTR_ERR(regmap); |
4385 |
++ } |
4386 |
++ |
4387 |
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); |
4388 |
+ if (!info) |
4389 |
+ return -ENOMEM; |
4390 |
+ |
4391 |
+ info->dev = dev; |
4392 |
+- |
4393 |
+- regmap = syscon_node_to_regmap(np); |
4394 |
+- if (IS_ERR(regmap)) |
4395 |
+- return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n"); |
4396 |
+ info->regmap = regmap; |
4397 |
+- |
4398 |
+ info->data = of_device_get_match_data(dev); |
4399 |
+ |
4400 |
+ ret = armada_37xx_pinctrl_register(pdev, info); |
4401 |
+diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c |
4402 |
+index 6a956ee94494f..6ee9f0de8ede3 100644 |
4403 |
+--- a/drivers/pinctrl/pinctrl-ocelot.c |
4404 |
++++ b/drivers/pinctrl/pinctrl-ocelot.c |
4405 |
+@@ -28,19 +28,12 @@ |
4406 |
+ #define ocelot_clrsetbits(addr, clear, set) \ |
4407 |
+ writel((readl(addr) & ~(clear)) | (set), (addr)) |
4408 |
+ |
4409 |
+-/* PINCONFIG bits (sparx5 only) */ |
4410 |
+ enum { |
4411 |
+ PINCONF_BIAS, |
4412 |
+ PINCONF_SCHMITT, |
4413 |
+ PINCONF_DRIVE_STRENGTH, |
4414 |
+ }; |
4415 |
+ |
4416 |
+-#define BIAS_PD_BIT BIT(4) |
4417 |
+-#define BIAS_PU_BIT BIT(3) |
4418 |
+-#define BIAS_BITS (BIAS_PD_BIT|BIAS_PU_BIT) |
4419 |
+-#define SCHMITT_BIT BIT(2) |
4420 |
+-#define DRIVE_BITS GENMASK(1, 0) |
4421 |
+- |
4422 |
+ /* GPIO standard registers */ |
4423 |
+ #define OCELOT_GPIO_OUT_SET 0x0 |
4424 |
+ #define OCELOT_GPIO_OUT_CLR 0x4 |
4425 |
+@@ -314,6 +307,13 @@ struct ocelot_pin_caps { |
4426 |
+ unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */ |
4427 |
+ }; |
4428 |
+ |
4429 |
++struct ocelot_pincfg_data { |
4430 |
++ u8 pd_bit; |
4431 |
++ u8 pu_bit; |
4432 |
++ u8 drive_bits; |
4433 |
++ u8 schmitt_bit; |
4434 |
++}; |
4435 |
++ |
4436 |
+ struct ocelot_pinctrl { |
4437 |
+ struct device *dev; |
4438 |
+ struct pinctrl_dev *pctl; |
4439 |
+@@ -321,10 +321,16 @@ struct ocelot_pinctrl { |
4440 |
+ struct regmap *map; |
4441 |
+ struct regmap *pincfg; |
4442 |
+ struct pinctrl_desc *desc; |
4443 |
++ const struct ocelot_pincfg_data *pincfg_data; |
4444 |
+ struct ocelot_pmx_func func[FUNC_MAX]; |
4445 |
+ u8 stride; |
4446 |
+ }; |
4447 |
+ |
4448 |
++struct ocelot_match_data { |
4449 |
++ struct pinctrl_desc desc; |
4450 |
++ struct ocelot_pincfg_data pincfg_data; |
4451 |
++}; |
4452 |
++ |
4453 |
+ #define LUTON_P(p, f0, f1) \ |
4454 |
+ static struct ocelot_pin_caps luton_pin_##p = { \ |
4455 |
+ .pin = p, \ |
4456 |
+@@ -1318,24 +1324,27 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info, |
4457 |
+ int ret = -EOPNOTSUPP; |
4458 |
+ |
4459 |
+ if (info->pincfg) { |
4460 |
++ const struct ocelot_pincfg_data *opd = info->pincfg_data; |
4461 |
+ u32 regcfg; |
4462 |
+ |
4463 |
+- ret = regmap_read(info->pincfg, pin, ®cfg); |
4464 |
++ ret = regmap_read(info->pincfg, |
4465 |
++ pin * regmap_get_reg_stride(info->pincfg), |
4466 |
++ ®cfg); |
4467 |
+ if (ret) |
4468 |
+ return ret; |
4469 |
+ |
4470 |
+ ret = 0; |
4471 |
+ switch (reg) { |
4472 |
+ case PINCONF_BIAS: |
4473 |
+- *val = regcfg & BIAS_BITS; |
4474 |
++ *val = regcfg & (opd->pd_bit | opd->pu_bit); |
4475 |
+ break; |
4476 |
+ |
4477 |
+ case PINCONF_SCHMITT: |
4478 |
+- *val = regcfg & SCHMITT_BIT; |
4479 |
++ *val = regcfg & opd->schmitt_bit; |
4480 |
+ break; |
4481 |
+ |
4482 |
+ case PINCONF_DRIVE_STRENGTH: |
4483 |
+- *val = regcfg & DRIVE_BITS; |
4484 |
++ *val = regcfg & opd->drive_bits; |
4485 |
+ break; |
4486 |
+ |
4487 |
+ default: |
4488 |
+@@ -1352,14 +1361,18 @@ static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr, |
4489 |
+ u32 val; |
4490 |
+ int ret; |
4491 |
+ |
4492 |
+- ret = regmap_read(info->pincfg, regaddr, &val); |
4493 |
++ ret = regmap_read(info->pincfg, |
4494 |
++ regaddr * regmap_get_reg_stride(info->pincfg), |
4495 |
++ &val); |
4496 |
+ if (ret) |
4497 |
+ return ret; |
4498 |
+ |
4499 |
+ val &= ~clrbits; |
4500 |
+ val |= setbits; |
4501 |
+ |
4502 |
+- ret = regmap_write(info->pincfg, regaddr, val); |
4503 |
++ ret = regmap_write(info->pincfg, |
4504 |
++ regaddr * regmap_get_reg_stride(info->pincfg), |
4505 |
++ val); |
4506 |
+ |
4507 |
+ return ret; |
4508 |
+ } |
4509 |
+@@ -1372,23 +1385,27 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info, |
4510 |
+ int ret = -EOPNOTSUPP; |
4511 |
+ |
4512 |
+ if (info->pincfg) { |
4513 |
++ const struct ocelot_pincfg_data *opd = info->pincfg_data; |
4514 |
+ |
4515 |
+ ret = 0; |
4516 |
+ switch (reg) { |
4517 |
+ case PINCONF_BIAS: |
4518 |
+- ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS, |
4519 |
++ ret = ocelot_pincfg_clrsetbits(info, pin, |
4520 |
++ opd->pd_bit | opd->pu_bit, |
4521 |
+ val); |
4522 |
+ break; |
4523 |
+ |
4524 |
+ case PINCONF_SCHMITT: |
4525 |
+- ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT, |
4526 |
++ ret = ocelot_pincfg_clrsetbits(info, pin, |
4527 |
++ opd->schmitt_bit, |
4528 |
+ val); |
4529 |
+ break; |
4530 |
+ |
4531 |
+ case PINCONF_DRIVE_STRENGTH: |
4532 |
+ if (val <= 3) |
4533 |
+ ret = ocelot_pincfg_clrsetbits(info, pin, |
4534 |
+- DRIVE_BITS, val); |
4535 |
++ opd->drive_bits, |
4536 |
++ val); |
4537 |
+ else |
4538 |
+ ret = -EINVAL; |
4539 |
+ break; |
4540 |
+@@ -1418,17 +1435,20 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev, |
4541 |
+ if (param == PIN_CONFIG_BIAS_DISABLE) |
4542 |
+ val = (val == 0); |
4543 |
+ else if (param == PIN_CONFIG_BIAS_PULL_DOWN) |
4544 |
+- val = (val & BIAS_PD_BIT ? true : false); |
4545 |
++ val = !!(val & info->pincfg_data->pd_bit); |
4546 |
+ else /* PIN_CONFIG_BIAS_PULL_UP */ |
4547 |
+- val = (val & BIAS_PU_BIT ? true : false); |
4548 |
++ val = !!(val & info->pincfg_data->pu_bit); |
4549 |
+ break; |
4550 |
+ |
4551 |
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE: |
4552 |
++ if (!info->pincfg_data->schmitt_bit) |
4553 |
++ return -EOPNOTSUPP; |
4554 |
++ |
4555 |
+ err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val); |
4556 |
+ if (err) |
4557 |
+ return err; |
4558 |
+ |
4559 |
+- val = (val & SCHMITT_BIT ? true : false); |
4560 |
++ val = !!(val & info->pincfg_data->schmitt_bit); |
4561 |
+ break; |
4562 |
+ |
4563 |
+ case PIN_CONFIG_DRIVE_STRENGTH: |
4564 |
+@@ -1472,6 +1492,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, |
4565 |
+ unsigned long *configs, unsigned int num_configs) |
4566 |
+ { |
4567 |
+ struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); |
4568 |
++ const struct ocelot_pincfg_data *opd = info->pincfg_data; |
4569 |
+ u32 param, arg, p; |
4570 |
+ int cfg, err = 0; |
4571 |
+ |
4572 |
+@@ -1484,8 +1505,8 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, |
4573 |
+ case PIN_CONFIG_BIAS_PULL_UP: |
4574 |
+ case PIN_CONFIG_BIAS_PULL_DOWN: |
4575 |
+ arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 : |
4576 |
+- (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT : |
4577 |
+- BIAS_PD_BIT; |
4578 |
++ (param == PIN_CONFIG_BIAS_PULL_UP) ? |
4579 |
++ opd->pu_bit : opd->pd_bit; |
4580 |
+ |
4581 |
+ err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg); |
4582 |
+ if (err) |
4583 |
+@@ -1494,7 +1515,10 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, |
4584 |
+ break; |
4585 |
+ |
4586 |
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE: |
4587 |
+- arg = arg ? SCHMITT_BIT : 0; |
4588 |
++ if (!opd->schmitt_bit) |
4589 |
++ return -EOPNOTSUPP; |
4590 |
++ |
4591 |
++ arg = arg ? opd->schmitt_bit : 0; |
4592 |
+ err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT, |
4593 |
+ arg); |
4594 |
+ if (err) |
4595 |
+@@ -1555,69 +1579,94 @@ static const struct pinctrl_ops ocelot_pctl_ops = { |
4596 |
+ .dt_free_map = pinconf_generic_dt_free_map, |
4597 |
+ }; |
4598 |
+ |
4599 |
+-static struct pinctrl_desc luton_desc = { |
4600 |
+- .name = "luton-pinctrl", |
4601 |
+- .pins = luton_pins, |
4602 |
+- .npins = ARRAY_SIZE(luton_pins), |
4603 |
+- .pctlops = &ocelot_pctl_ops, |
4604 |
+- .pmxops = &ocelot_pmx_ops, |
4605 |
+- .owner = THIS_MODULE, |
4606 |
++static struct ocelot_match_data luton_desc = { |
4607 |
++ .desc = { |
4608 |
++ .name = "luton-pinctrl", |
4609 |
++ .pins = luton_pins, |
4610 |
++ .npins = ARRAY_SIZE(luton_pins), |
4611 |
++ .pctlops = &ocelot_pctl_ops, |
4612 |
++ .pmxops = &ocelot_pmx_ops, |
4613 |
++ .owner = THIS_MODULE, |
4614 |
++ }, |
4615 |
+ }; |
4616 |
+ |
4617 |
+-static struct pinctrl_desc serval_desc = { |
4618 |
+- .name = "serval-pinctrl", |
4619 |
+- .pins = serval_pins, |
4620 |
+- .npins = ARRAY_SIZE(serval_pins), |
4621 |
+- .pctlops = &ocelot_pctl_ops, |
4622 |
+- .pmxops = &ocelot_pmx_ops, |
4623 |
+- .owner = THIS_MODULE, |
4624 |
++static struct ocelot_match_data serval_desc = { |
4625 |
++ .desc = { |
4626 |
++ .name = "serval-pinctrl", |
4627 |
++ .pins = serval_pins, |
4628 |
++ .npins = ARRAY_SIZE(serval_pins), |
4629 |
++ .pctlops = &ocelot_pctl_ops, |
4630 |
++ .pmxops = &ocelot_pmx_ops, |
4631 |
++ .owner = THIS_MODULE, |
4632 |
++ }, |
4633 |
+ }; |
4634 |
+ |
4635 |
+-static struct pinctrl_desc ocelot_desc = { |
4636 |
+- .name = "ocelot-pinctrl", |
4637 |
+- .pins = ocelot_pins, |
4638 |
+- .npins = ARRAY_SIZE(ocelot_pins), |
4639 |
+- .pctlops = &ocelot_pctl_ops, |
4640 |
+- .pmxops = &ocelot_pmx_ops, |
4641 |
+- .owner = THIS_MODULE, |
4642 |
++static struct ocelot_match_data ocelot_desc = { |
4643 |
++ .desc = { |
4644 |
++ .name = "ocelot-pinctrl", |
4645 |
++ .pins = ocelot_pins, |
4646 |
++ .npins = ARRAY_SIZE(ocelot_pins), |
4647 |
++ .pctlops = &ocelot_pctl_ops, |
4648 |
++ .pmxops = &ocelot_pmx_ops, |
4649 |
++ .owner = THIS_MODULE, |
4650 |
++ }, |
4651 |
+ }; |
4652 |
+ |
4653 |
+-static struct pinctrl_desc jaguar2_desc = { |
4654 |
+- .name = "jaguar2-pinctrl", |
4655 |
+- .pins = jaguar2_pins, |
4656 |
+- .npins = ARRAY_SIZE(jaguar2_pins), |
4657 |
+- .pctlops = &ocelot_pctl_ops, |
4658 |
+- .pmxops = &ocelot_pmx_ops, |
4659 |
+- .owner = THIS_MODULE, |
4660 |
++static struct ocelot_match_data jaguar2_desc = { |
4661 |
++ .desc = { |
4662 |
++ .name = "jaguar2-pinctrl", |
4663 |
++ .pins = jaguar2_pins, |
4664 |
++ .npins = ARRAY_SIZE(jaguar2_pins), |
4665 |
++ .pctlops = &ocelot_pctl_ops, |
4666 |
++ .pmxops = &ocelot_pmx_ops, |
4667 |
++ .owner = THIS_MODULE, |
4668 |
++ }, |
4669 |
+ }; |
4670 |
+ |
4671 |
+-static struct pinctrl_desc servalt_desc = { |
4672 |
+- .name = "servalt-pinctrl", |
4673 |
+- .pins = servalt_pins, |
4674 |
+- .npins = ARRAY_SIZE(servalt_pins), |
4675 |
+- .pctlops = &ocelot_pctl_ops, |
4676 |
+- .pmxops = &ocelot_pmx_ops, |
4677 |
+- .owner = THIS_MODULE, |
4678 |
++static struct ocelot_match_data servalt_desc = { |
4679 |
++ .desc = { |
4680 |
++ .name = "servalt-pinctrl", |
4681 |
++ .pins = servalt_pins, |
4682 |
++ .npins = ARRAY_SIZE(servalt_pins), |
4683 |
++ .pctlops = &ocelot_pctl_ops, |
4684 |
++ .pmxops = &ocelot_pmx_ops, |
4685 |
++ .owner = THIS_MODULE, |
4686 |
++ }, |
4687 |
+ }; |
4688 |
+ |
4689 |
+-static struct pinctrl_desc sparx5_desc = { |
4690 |
+- .name = "sparx5-pinctrl", |
4691 |
+- .pins = sparx5_pins, |
4692 |
+- .npins = ARRAY_SIZE(sparx5_pins), |
4693 |
+- .pctlops = &ocelot_pctl_ops, |
4694 |
+- .pmxops = &ocelot_pmx_ops, |
4695 |
+- .confops = &ocelot_confops, |
4696 |
+- .owner = THIS_MODULE, |
4697 |
++static struct ocelot_match_data sparx5_desc = { |
4698 |
++ .desc = { |
4699 |
++ .name = "sparx5-pinctrl", |
4700 |
++ .pins = sparx5_pins, |
4701 |
++ .npins = ARRAY_SIZE(sparx5_pins), |
4702 |
++ .pctlops = &ocelot_pctl_ops, |
4703 |
++ .pmxops = &ocelot_pmx_ops, |
4704 |
++ .confops = &ocelot_confops, |
4705 |
++ .owner = THIS_MODULE, |
4706 |
++ }, |
4707 |
++ .pincfg_data = { |
4708 |
++ .pd_bit = BIT(4), |
4709 |
++ .pu_bit = BIT(3), |
4710 |
++ .drive_bits = GENMASK(1, 0), |
4711 |
++ .schmitt_bit = BIT(2), |
4712 |
++ }, |
4713 |
+ }; |
4714 |
+ |
4715 |
+-static struct pinctrl_desc lan966x_desc = { |
4716 |
+- .name = "lan966x-pinctrl", |
4717 |
+- .pins = lan966x_pins, |
4718 |
+- .npins = ARRAY_SIZE(lan966x_pins), |
4719 |
+- .pctlops = &ocelot_pctl_ops, |
4720 |
+- .pmxops = &lan966x_pmx_ops, |
4721 |
+- .confops = &ocelot_confops, |
4722 |
+- .owner = THIS_MODULE, |
4723 |
++static struct ocelot_match_data lan966x_desc = { |
4724 |
++ .desc = { |
4725 |
++ .name = "lan966x-pinctrl", |
4726 |
++ .pins = lan966x_pins, |
4727 |
++ .npins = ARRAY_SIZE(lan966x_pins), |
4728 |
++ .pctlops = &ocelot_pctl_ops, |
4729 |
++ .pmxops = &lan966x_pmx_ops, |
4730 |
++ .confops = &ocelot_confops, |
4731 |
++ .owner = THIS_MODULE, |
4732 |
++ }, |
4733 |
++ .pincfg_data = { |
4734 |
++ .pd_bit = BIT(3), |
4735 |
++ .pu_bit = BIT(2), |
4736 |
++ .drive_bits = GENMASK(1, 0), |
4737 |
++ }, |
4738 |
+ }; |
4739 |
+ |
4740 |
+ static int ocelot_create_group_func_map(struct device *dev, |
4741 |
+@@ -1883,7 +1932,8 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = { |
4742 |
+ {}, |
4743 |
+ }; |
4744 |
+ |
4745 |
+-static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev) |
4746 |
++static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev, |
4747 |
++ const struct ocelot_pinctrl *info) |
4748 |
+ { |
4749 |
+ void __iomem *base; |
4750 |
+ |
4751 |
+@@ -1891,7 +1941,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev) |
4752 |
+ .reg_bits = 32, |
4753 |
+ .val_bits = 32, |
4754 |
+ .reg_stride = 4, |
4755 |
+- .max_register = 32, |
4756 |
++ .max_register = info->desc->npins * 4, |
4757 |
+ .name = "pincfg", |
4758 |
+ }; |
4759 |
+ |
4760 |
+@@ -1906,6 +1956,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev) |
4761 |
+ |
4762 |
+ static int ocelot_pinctrl_probe(struct platform_device *pdev) |
4763 |
+ { |
4764 |
++ const struct ocelot_match_data *data; |
4765 |
+ struct device *dev = &pdev->dev; |
4766 |
+ struct ocelot_pinctrl *info; |
4767 |
+ struct regmap *pincfg; |
4768 |
+@@ -1921,7 +1972,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) |
4769 |
+ if (!info) |
4770 |
+ return -ENOMEM; |
4771 |
+ |
4772 |
+- info->desc = (struct pinctrl_desc *)device_get_match_data(dev); |
4773 |
++ data = device_get_match_data(dev); |
4774 |
++ if (!data) |
4775 |
++ return -EINVAL; |
4776 |
++ |
4777 |
++ info->desc = devm_kmemdup(dev, &data->desc, sizeof(*info->desc), |
4778 |
++ GFP_KERNEL); |
4779 |
++ if (!info->desc) |
4780 |
++ return -ENOMEM; |
4781 |
++ |
4782 |
++ info->pincfg_data = &data->pincfg_data; |
4783 |
+ |
4784 |
+ base = devm_ioremap_resource(dev, |
4785 |
+ platform_get_resource(pdev, IORESOURCE_MEM, 0)); |
4786 |
+@@ -1942,7 +2002,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) |
4787 |
+ |
4788 |
+ /* Pinconf registers */ |
4789 |
+ if (info->desc->confops) { |
4790 |
+- pincfg = ocelot_pinctrl_create_pincfg(pdev); |
4791 |
++ pincfg = ocelot_pinctrl_create_pincfg(pdev, info); |
4792 |
+ if (IS_ERR(pincfg)) |
4793 |
+ dev_dbg(dev, "Failed to create pincfg regmap\n"); |
4794 |
+ else |
4795 |
+diff --git a/drivers/pinctrl/ralink/Kconfig b/drivers/pinctrl/ralink/Kconfig |
4796 |
+index a76ee3deb8c31..d0f0a8f2b9b7d 100644 |
4797 |
+--- a/drivers/pinctrl/ralink/Kconfig |
4798 |
++++ b/drivers/pinctrl/ralink/Kconfig |
4799 |
+@@ -3,37 +3,33 @@ menu "Ralink pinctrl drivers" |
4800 |
+ depends on RALINK |
4801 |
+ |
4802 |
+ config PINCTRL_RALINK |
4803 |
+- bool "Ralink pin control support" |
4804 |
+- default y if RALINK |
4805 |
+- |
4806 |
+-config PINCTRL_RT2880 |
4807 |
+- bool "RT2880 pinctrl driver for RALINK/Mediatek SOCs" |
4808 |
++ bool "Ralink pinctrl driver" |
4809 |
+ select PINMUX |
4810 |
+ select GENERIC_PINCONF |
4811 |
+ |
4812 |
+ config PINCTRL_MT7620 |
4813 |
+ bool "mt7620 pinctrl driver for RALINK/Mediatek SOCs" |
4814 |
+ depends on RALINK && SOC_MT7620 |
4815 |
+- select PINCTRL_RT2880 |
4816 |
++ select PINCTRL_RALINK |
4817 |
+ |
4818 |
+ config PINCTRL_MT7621 |
4819 |
+ bool "mt7621 pinctrl driver for RALINK/Mediatek SOCs" |
4820 |
+ depends on RALINK && SOC_MT7621 |
4821 |
+- select PINCTRL_RT2880 |
4822 |
++ select PINCTRL_RALINK |
4823 |
+ |
4824 |
+ config PINCTRL_RT288X |
4825 |
+ bool "RT288X pinctrl driver for RALINK/Mediatek SOCs" |
4826 |
+ depends on RALINK && SOC_RT288X |
4827 |
+- select PINCTRL_RT2880 |
4828 |
++ select PINCTRL_RALINK |
4829 |
+ |
4830 |
+ config PINCTRL_RT305X |
4831 |
+ bool "RT305X pinctrl driver for RALINK/Mediatek SOCs" |
4832 |
+ depends on RALINK && SOC_RT305X |
4833 |
+- select PINCTRL_RT2880 |
4834 |
++ select PINCTRL_RALINK |
4835 |
+ |
4836 |
+ config PINCTRL_RT3883 |
4837 |
+ bool "RT3883 pinctrl driver for RALINK/Mediatek SOCs" |
4838 |
+ depends on RALINK && SOC_RT3883 |
4839 |
+- select PINCTRL_RT2880 |
4840 |
++ select PINCTRL_RALINK |
4841 |
+ |
4842 |
+ endmenu |
4843 |
+diff --git a/drivers/pinctrl/ralink/Makefile b/drivers/pinctrl/ralink/Makefile |
4844 |
+index a15610206ced4..2c1323b74e96f 100644 |
4845 |
+--- a/drivers/pinctrl/ralink/Makefile |
4846 |
++++ b/drivers/pinctrl/ralink/Makefile |
4847 |
+@@ -1,5 +1,5 @@ |
4848 |
+ # SPDX-License-Identifier: GPL-2.0 |
4849 |
+-obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o |
4850 |
++obj-$(CONFIG_PINCTRL_RALINK) += pinctrl-ralink.o |
4851 |
+ |
4852 |
+ obj-$(CONFIG_PINCTRL_MT7620) += pinctrl-mt7620.o |
4853 |
+ obj-$(CONFIG_PINCTRL_MT7621) += pinctrl-mt7621.o |
4854 |
+diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c |
4855 |
+index 6853b5b8b0fe7..51b863d85c51e 100644 |
4856 |
+--- a/drivers/pinctrl/ralink/pinctrl-mt7620.c |
4857 |
++++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c |
4858 |
+@@ -5,7 +5,7 @@ |
4859 |
+ #include <linux/module.h> |
4860 |
+ #include <linux/platform_device.h> |
4861 |
+ #include <linux/of.h> |
4862 |
+-#include "pinmux.h" |
4863 |
++#include "pinctrl-ralink.h" |
4864 |
+ |
4865 |
+ #define MT7620_GPIO_MODE_UART0_SHIFT 2 |
4866 |
+ #define MT7620_GPIO_MODE_UART0_MASK 0x7 |
4867 |
+@@ -54,20 +54,20 @@ |
4868 |
+ #define MT7620_GPIO_MODE_EPHY 15 |
4869 |
+ #define MT7620_GPIO_MODE_PA 20 |
4870 |
+ |
4871 |
+-static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) }; |
4872 |
+-static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) }; |
4873 |
+-static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) }; |
4874 |
+-static struct rt2880_pmx_func mdio_grp[] = { |
4875 |
++static struct ralink_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) }; |
4876 |
++static struct ralink_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) }; |
4877 |
++static struct ralink_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) }; |
4878 |
++static struct ralink_pmx_func mdio_grp[] = { |
4879 |
+ FUNC("mdio", MT7620_GPIO_MODE_MDIO, 22, 2), |
4880 |
+ FUNC("refclk", MT7620_GPIO_MODE_MDIO_REFCLK, 22, 2), |
4881 |
+ }; |
4882 |
+-static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) }; |
4883 |
+-static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) }; |
4884 |
+-static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) }; |
4885 |
+-static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) }; |
4886 |
+-static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) }; |
4887 |
+-static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) }; |
4888 |
+-static struct rt2880_pmx_func uartf_grp[] = { |
4889 |
++static struct ralink_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) }; |
4890 |
++static struct ralink_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) }; |
4891 |
++static struct ralink_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) }; |
4892 |
++static struct ralink_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) }; |
4893 |
++static struct ralink_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) }; |
4894 |
++static struct ralink_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) }; |
4895 |
++static struct ralink_pmx_func uartf_grp[] = { |
4896 |
+ FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8), |
4897 |
+ FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8), |
4898 |
+ FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8), |
4899 |
+@@ -76,20 +76,20 @@ static struct rt2880_pmx_func uartf_grp[] = { |
4900 |
+ FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4), |
4901 |
+ FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4), |
4902 |
+ }; |
4903 |
+-static struct rt2880_pmx_func wdt_grp[] = { |
4904 |
++static struct ralink_pmx_func wdt_grp[] = { |
4905 |
+ FUNC("wdt rst", 0, 17, 1), |
4906 |
+ FUNC("wdt refclk", 0, 17, 1), |
4907 |
+ }; |
4908 |
+-static struct rt2880_pmx_func pcie_rst_grp[] = { |
4909 |
++static struct ralink_pmx_func pcie_rst_grp[] = { |
4910 |
+ FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1), |
4911 |
+ FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1) |
4912 |
+ }; |
4913 |
+-static struct rt2880_pmx_func nd_sd_grp[] = { |
4914 |
++static struct ralink_pmx_func nd_sd_grp[] = { |
4915 |
+ FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), |
4916 |
+ FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13) |
4917 |
+ }; |
4918 |
+ |
4919 |
+-static struct rt2880_pmx_group mt7620a_pinmux_data[] = { |
4920 |
++static struct ralink_pmx_group mt7620a_pinmux_data[] = { |
4921 |
+ GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C), |
4922 |
+ GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK, |
4923 |
+ MT7620_GPIO_MODE_UART0_SHIFT), |
4924 |
+@@ -112,262 +112,262 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = { |
4925 |
+ { 0 } |
4926 |
+ }; |
4927 |
+ |
4928 |
+-static struct rt2880_pmx_func pwm1_grp_mt7628[] = { |
4929 |
++static struct ralink_pmx_func pwm1_grp_mt76x8[] = { |
4930 |
+ FUNC("sdxc d6", 3, 19, 1), |
4931 |
+ FUNC("utif", 2, 19, 1), |
4932 |
+ FUNC("gpio", 1, 19, 1), |
4933 |
+ FUNC("pwm1", 0, 19, 1), |
4934 |
+ }; |
4935 |
+ |
4936 |
+-static struct rt2880_pmx_func pwm0_grp_mt7628[] = { |
4937 |
++static struct ralink_pmx_func pwm0_grp_mt76x8[] = { |
4938 |
+ FUNC("sdxc d7", 3, 18, 1), |
4939 |
+ FUNC("utif", 2, 18, 1), |
4940 |
+ FUNC("gpio", 1, 18, 1), |
4941 |
+ FUNC("pwm0", 0, 18, 1), |
4942 |
+ }; |
4943 |
+ |
4944 |
+-static struct rt2880_pmx_func uart2_grp_mt7628[] = { |
4945 |
++static struct ralink_pmx_func uart2_grp_mt76x8[] = { |
4946 |
+ FUNC("sdxc d5 d4", 3, 20, 2), |
4947 |
+ FUNC("pwm", 2, 20, 2), |
4948 |
+ FUNC("gpio", 1, 20, 2), |
4949 |
+ FUNC("uart2", 0, 20, 2), |
4950 |
+ }; |
4951 |
+ |
4952 |
+-static struct rt2880_pmx_func uart1_grp_mt7628[] = { |
4953 |
++static struct ralink_pmx_func uart1_grp_mt76x8[] = { |
4954 |
+ FUNC("sw_r", 3, 45, 2), |
4955 |
+ FUNC("pwm", 2, 45, 2), |
4956 |
+ FUNC("gpio", 1, 45, 2), |
4957 |
+ FUNC("uart1", 0, 45, 2), |
4958 |
+ }; |
4959 |
+ |
4960 |
+-static struct rt2880_pmx_func i2c_grp_mt7628[] = { |
4961 |
++static struct ralink_pmx_func i2c_grp_mt76x8[] = { |
4962 |
+ FUNC("-", 3, 4, 2), |
4963 |
+ FUNC("debug", 2, 4, 2), |
4964 |
+ FUNC("gpio", 1, 4, 2), |
4965 |
+ FUNC("i2c", 0, 4, 2), |
4966 |
+ }; |
4967 |
+ |
4968 |
+-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) }; |
4969 |
+-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; |
4970 |
+-static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; |
4971 |
+-static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; |
4972 |
++static struct ralink_pmx_func refclk_grp_mt76x8[] = { FUNC("refclk", 0, 37, 1) }; |
4973 |
++static struct ralink_pmx_func perst_grp_mt76x8[] = { FUNC("perst", 0, 36, 1) }; |
4974 |
++static struct ralink_pmx_func wdt_grp_mt76x8[] = { FUNC("wdt", 0, 38, 1) }; |
4975 |
++static struct ralink_pmx_func spi_grp_mt76x8[] = { FUNC("spi", 0, 7, 4) }; |
4976 |
+ |
4977 |
+-static struct rt2880_pmx_func sd_mode_grp_mt7628[] = { |
4978 |
++static struct ralink_pmx_func sd_mode_grp_mt76x8[] = { |
4979 |
+ FUNC("jtag", 3, 22, 8), |
4980 |
+ FUNC("utif", 2, 22, 8), |
4981 |
+ FUNC("gpio", 1, 22, 8), |
4982 |
+ FUNC("sdxc", 0, 22, 8), |
4983 |
+ }; |
4984 |
+ |
4985 |
+-static struct rt2880_pmx_func uart0_grp_mt7628[] = { |
4986 |
++static struct ralink_pmx_func uart0_grp_mt76x8[] = { |
4987 |
+ FUNC("-", 3, 12, 2), |
4988 |
+ FUNC("-", 2, 12, 2), |
4989 |
+ FUNC("gpio", 1, 12, 2), |
4990 |
+ FUNC("uart0", 0, 12, 2), |
4991 |
+ }; |
4992 |
+ |
4993 |
+-static struct rt2880_pmx_func i2s_grp_mt7628[] = { |
4994 |
++static struct ralink_pmx_func i2s_grp_mt76x8[] = { |
4995 |
+ FUNC("antenna", 3, 0, 4), |
4996 |
+ FUNC("pcm", 2, 0, 4), |
4997 |
+ FUNC("gpio", 1, 0, 4), |
4998 |
+ FUNC("i2s", 0, 0, 4), |
4999 |
+ }; |
5000 |
+ |
5001 |
+-static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = { |
5002 |
++static struct ralink_pmx_func spi_cs1_grp_mt76x8[] = { |
5003 |
+ FUNC("-", 3, 6, 1), |
5004 |
+ FUNC("refclk", 2, 6, 1), |
5005 |
+ FUNC("gpio", 1, 6, 1), |
5006 |
+ FUNC("spi cs1", 0, 6, 1), |
5007 |
+ }; |
5008 |
+ |
5009 |
+-static struct rt2880_pmx_func spis_grp_mt7628[] = { |
5010 |
++static struct ralink_pmx_func spis_grp_mt76x8[] = { |
5011 |
+ FUNC("pwm_uart2", 3, 14, 4), |
5012 |
+ FUNC("utif", 2, 14, 4), |
5013 |
+ FUNC("gpio", 1, 14, 4), |
5014 |
+ FUNC("spis", 0, 14, 4), |
5015 |
+ }; |
5016 |
+ |
5017 |
+-static struct rt2880_pmx_func gpio_grp_mt7628[] = { |
5018 |
++static struct ralink_pmx_func gpio_grp_mt76x8[] = { |
5019 |
+ FUNC("pcie", 3, 11, 1), |
5020 |
+ FUNC("refclk", 2, 11, 1), |
5021 |
+ FUNC("gpio", 1, 11, 1), |
5022 |
+ FUNC("gpio", 0, 11, 1), |
5023 |
+ }; |
5024 |
+ |
5025 |
+-static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = { |
5026 |
++static struct ralink_pmx_func p4led_kn_grp_mt76x8[] = { |
5027 |
+ FUNC("jtag", 3, 30, 1), |
5028 |
+ FUNC("utif", 2, 30, 1), |
5029 |
+ FUNC("gpio", 1, 30, 1), |
5030 |
+ FUNC("p4led_kn", 0, 30, 1), |
5031 |
+ }; |
5032 |
+ |
5033 |
+-static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = { |
5034 |
++static struct ralink_pmx_func p3led_kn_grp_mt76x8[] = { |
5035 |
+ FUNC("jtag", 3, 31, 1), |
5036 |
+ FUNC("utif", 2, 31, 1), |
5037 |
+ FUNC("gpio", 1, 31, 1), |
5038 |
+ FUNC("p3led_kn", 0, 31, 1), |
5039 |
+ }; |
5040 |
+ |
5041 |
+-static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = { |
5042 |
++static struct ralink_pmx_func p2led_kn_grp_mt76x8[] = { |
5043 |
+ FUNC("jtag", 3, 32, 1), |
5044 |
+ FUNC("utif", 2, 32, 1), |
5045 |
+ FUNC("gpio", 1, 32, 1), |
5046 |
+ FUNC("p2led_kn", 0, 32, 1), |
5047 |
+ }; |
5048 |
+ |
5049 |
+-static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = { |
5050 |
++static struct ralink_pmx_func p1led_kn_grp_mt76x8[] = { |
5051 |
+ FUNC("jtag", 3, 33, 1), |
5052 |
+ FUNC("utif", 2, 33, 1), |
5053 |
+ FUNC("gpio", 1, 33, 1), |
5054 |
+ FUNC("p1led_kn", 0, 33, 1), |
5055 |
+ }; |
5056 |
+ |
5057 |
+-static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = { |
5058 |
++static struct ralink_pmx_func p0led_kn_grp_mt76x8[] = { |
5059 |
+ FUNC("jtag", 3, 34, 1), |
5060 |
+ FUNC("rsvd", 2, 34, 1), |
5061 |
+ FUNC("gpio", 1, 34, 1), |
5062 |
+ FUNC("p0led_kn", 0, 34, 1), |
5063 |
+ }; |
5064 |
+ |
5065 |
+-static struct rt2880_pmx_func wled_kn_grp_mt7628[] = { |
5066 |
++static struct ralink_pmx_func wled_kn_grp_mt76x8[] = { |
5067 |
+ FUNC("rsvd", 3, 35, 1), |
5068 |
+ FUNC("rsvd", 2, 35, 1), |
5069 |
+ FUNC("gpio", 1, 35, 1), |
5070 |
+ FUNC("wled_kn", 0, 35, 1), |
5071 |
+ }; |
5072 |
+ |
5073 |
+-static struct rt2880_pmx_func p4led_an_grp_mt7628[] = { |
5074 |
++static struct ralink_pmx_func p4led_an_grp_mt76x8[] = { |
5075 |
+ FUNC("jtag", 3, 39, 1), |
5076 |
+ FUNC("utif", 2, 39, 1), |
5077 |
+ FUNC("gpio", 1, 39, 1), |
5078 |
+ FUNC("p4led_an", 0, 39, 1), |
5079 |
+ }; |
5080 |
+ |
5081 |
+-static struct rt2880_pmx_func p3led_an_grp_mt7628[] = { |
5082 |
++static struct ralink_pmx_func p3led_an_grp_mt76x8[] = { |
5083 |
+ FUNC("jtag", 3, 40, 1), |
5084 |
+ FUNC("utif", 2, 40, 1), |
5085 |
+ FUNC("gpio", 1, 40, 1), |
5086 |
+ FUNC("p3led_an", 0, 40, 1), |
5087 |
+ }; |
5088 |
+ |
5089 |
+-static struct rt2880_pmx_func p2led_an_grp_mt7628[] = { |
5090 |
++static struct ralink_pmx_func p2led_an_grp_mt76x8[] = { |
5091 |
+ FUNC("jtag", 3, 41, 1), |
5092 |
+ FUNC("utif", 2, 41, 1), |
5093 |
+ FUNC("gpio", 1, 41, 1), |
5094 |
+ FUNC("p2led_an", 0, 41, 1), |
5095 |
+ }; |
5096 |
+ |
5097 |
+-static struct rt2880_pmx_func p1led_an_grp_mt7628[] = { |
5098 |
++static struct ralink_pmx_func p1led_an_grp_mt76x8[] = { |
5099 |
+ FUNC("jtag", 3, 42, 1), |
5100 |
+ FUNC("utif", 2, 42, 1), |
5101 |
+ FUNC("gpio", 1, 42, 1), |
5102 |
+ FUNC("p1led_an", 0, 42, 1), |
5103 |
+ }; |
5104 |
+ |
5105 |
+-static struct rt2880_pmx_func p0led_an_grp_mt7628[] = { |
5106 |
++static struct ralink_pmx_func p0led_an_grp_mt76x8[] = { |
5107 |
+ FUNC("jtag", 3, 43, 1), |
5108 |
+ FUNC("rsvd", 2, 43, 1), |
5109 |
+ FUNC("gpio", 1, 43, 1), |
5110 |
+ FUNC("p0led_an", 0, 43, 1), |
5111 |
+ }; |
5112 |
+ |
5113 |
+-static struct rt2880_pmx_func wled_an_grp_mt7628[] = { |
5114 |
++static struct ralink_pmx_func wled_an_grp_mt76x8[] = { |
5115 |
+ FUNC("rsvd", 3, 44, 1), |
5116 |
+ FUNC("rsvd", 2, 44, 1), |
5117 |
+ FUNC("gpio", 1, 44, 1), |
5118 |
+ FUNC("wled_an", 0, 44, 1), |
5119 |
+ }; |
5120 |
+ |
5121 |
+-#define MT7628_GPIO_MODE_MASK 0x3 |
5122 |
+- |
5123 |
+-#define MT7628_GPIO_MODE_P4LED_KN 58 |
5124 |
+-#define MT7628_GPIO_MODE_P3LED_KN 56 |
5125 |
+-#define MT7628_GPIO_MODE_P2LED_KN 54 |
5126 |
+-#define MT7628_GPIO_MODE_P1LED_KN 52 |
5127 |
+-#define MT7628_GPIO_MODE_P0LED_KN 50 |
5128 |
+-#define MT7628_GPIO_MODE_WLED_KN 48 |
5129 |
+-#define MT7628_GPIO_MODE_P4LED_AN 42 |
5130 |
+-#define MT7628_GPIO_MODE_P3LED_AN 40 |
5131 |
+-#define MT7628_GPIO_MODE_P2LED_AN 38 |
5132 |
+-#define MT7628_GPIO_MODE_P1LED_AN 36 |
5133 |
+-#define MT7628_GPIO_MODE_P0LED_AN 34 |
5134 |
+-#define MT7628_GPIO_MODE_WLED_AN 32 |
5135 |
+-#define MT7628_GPIO_MODE_PWM1 30 |
5136 |
+-#define MT7628_GPIO_MODE_PWM0 28 |
5137 |
+-#define MT7628_GPIO_MODE_UART2 26 |
5138 |
+-#define MT7628_GPIO_MODE_UART1 24 |
5139 |
+-#define MT7628_GPIO_MODE_I2C 20 |
5140 |
+-#define MT7628_GPIO_MODE_REFCLK 18 |
5141 |
+-#define MT7628_GPIO_MODE_PERST 16 |
5142 |
+-#define MT7628_GPIO_MODE_WDT 14 |
5143 |
+-#define MT7628_GPIO_MODE_SPI 12 |
5144 |
+-#define MT7628_GPIO_MODE_SDMODE 10 |
5145 |
+-#define MT7628_GPIO_MODE_UART0 8 |
5146 |
+-#define MT7628_GPIO_MODE_I2S 6 |
5147 |
+-#define MT7628_GPIO_MODE_CS1 4 |
5148 |
+-#define MT7628_GPIO_MODE_SPIS 2 |
5149 |
+-#define MT7628_GPIO_MODE_GPIO 0 |
5150 |
+- |
5151 |
+-static struct rt2880_pmx_group mt7628an_pinmux_data[] = { |
5152 |
+- GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5153 |
+- 1, MT7628_GPIO_MODE_PWM1), |
5154 |
+- GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5155 |
+- 1, MT7628_GPIO_MODE_PWM0), |
5156 |
+- GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5157 |
+- 1, MT7628_GPIO_MODE_UART2), |
5158 |
+- GRP_G("uart1", uart1_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5159 |
+- 1, MT7628_GPIO_MODE_UART1), |
5160 |
+- GRP_G("i2c", i2c_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5161 |
+- 1, MT7628_GPIO_MODE_I2C), |
5162 |
+- GRP("refclk", refclk_grp_mt7628, 1, MT7628_GPIO_MODE_REFCLK), |
5163 |
+- GRP("perst", perst_grp_mt7628, 1, MT7628_GPIO_MODE_PERST), |
5164 |
+- GRP("wdt", wdt_grp_mt7628, 1, MT7628_GPIO_MODE_WDT), |
5165 |
+- GRP("spi", spi_grp_mt7628, 1, MT7628_GPIO_MODE_SPI), |
5166 |
+- GRP_G("sdmode", sd_mode_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5167 |
+- 1, MT7628_GPIO_MODE_SDMODE), |
5168 |
+- GRP_G("uart0", uart0_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5169 |
+- 1, MT7628_GPIO_MODE_UART0), |
5170 |
+- GRP_G("i2s", i2s_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5171 |
+- 1, MT7628_GPIO_MODE_I2S), |
5172 |
+- GRP_G("spi cs1", spi_cs1_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5173 |
+- 1, MT7628_GPIO_MODE_CS1), |
5174 |
+- GRP_G("spis", spis_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5175 |
+- 1, MT7628_GPIO_MODE_SPIS), |
5176 |
+- GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5177 |
+- 1, MT7628_GPIO_MODE_GPIO), |
5178 |
+- GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5179 |
+- 1, MT7628_GPIO_MODE_WLED_AN), |
5180 |
+- GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5181 |
+- 1, MT7628_GPIO_MODE_P0LED_AN), |
5182 |
+- GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5183 |
+- 1, MT7628_GPIO_MODE_P1LED_AN), |
5184 |
+- GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5185 |
+- 1, MT7628_GPIO_MODE_P2LED_AN), |
5186 |
+- GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5187 |
+- 1, MT7628_GPIO_MODE_P3LED_AN), |
5188 |
+- GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5189 |
+- 1, MT7628_GPIO_MODE_P4LED_AN), |
5190 |
+- GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5191 |
+- 1, MT7628_GPIO_MODE_WLED_KN), |
5192 |
+- GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5193 |
+- 1, MT7628_GPIO_MODE_P0LED_KN), |
5194 |
+- GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5195 |
+- 1, MT7628_GPIO_MODE_P1LED_KN), |
5196 |
+- GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5197 |
+- 1, MT7628_GPIO_MODE_P2LED_KN), |
5198 |
+- GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5199 |
+- 1, MT7628_GPIO_MODE_P3LED_KN), |
5200 |
+- GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, |
5201 |
+- 1, MT7628_GPIO_MODE_P4LED_KN), |
5202 |
++#define MT76X8_GPIO_MODE_MASK 0x3 |
5203 |
++ |
5204 |
++#define MT76X8_GPIO_MODE_P4LED_KN 58 |
5205 |
++#define MT76X8_GPIO_MODE_P3LED_KN 56 |
5206 |
++#define MT76X8_GPIO_MODE_P2LED_KN 54 |
5207 |
++#define MT76X8_GPIO_MODE_P1LED_KN 52 |
5208 |
++#define MT76X8_GPIO_MODE_P0LED_KN 50 |
5209 |
++#define MT76X8_GPIO_MODE_WLED_KN 48 |
5210 |
++#define MT76X8_GPIO_MODE_P4LED_AN 42 |
5211 |
++#define MT76X8_GPIO_MODE_P3LED_AN 40 |
5212 |
++#define MT76X8_GPIO_MODE_P2LED_AN 38 |
5213 |
++#define MT76X8_GPIO_MODE_P1LED_AN 36 |
5214 |
++#define MT76X8_GPIO_MODE_P0LED_AN 34 |
5215 |
++#define MT76X8_GPIO_MODE_WLED_AN 32 |
5216 |
++#define MT76X8_GPIO_MODE_PWM1 30 |
5217 |
++#define MT76X8_GPIO_MODE_PWM0 28 |
5218 |
++#define MT76X8_GPIO_MODE_UART2 26 |
5219 |
++#define MT76X8_GPIO_MODE_UART1 24 |
5220 |
++#define MT76X8_GPIO_MODE_I2C 20 |
5221 |
++#define MT76X8_GPIO_MODE_REFCLK 18 |
5222 |
++#define MT76X8_GPIO_MODE_PERST 16 |
5223 |
++#define MT76X8_GPIO_MODE_WDT 14 |
5224 |
++#define MT76X8_GPIO_MODE_SPI 12 |
5225 |
++#define MT76X8_GPIO_MODE_SDMODE 10 |
5226 |
++#define MT76X8_GPIO_MODE_UART0 8 |
5227 |
++#define MT76X8_GPIO_MODE_I2S 6 |
5228 |
++#define MT76X8_GPIO_MODE_CS1 4 |
5229 |
++#define MT76X8_GPIO_MODE_SPIS 2 |
5230 |
++#define MT76X8_GPIO_MODE_GPIO 0 |
5231 |
++ |
5232 |
++static struct ralink_pmx_group mt76x8_pinmux_data[] = { |
5233 |
++ GRP_G("pwm1", pwm1_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5234 |
++ 1, MT76X8_GPIO_MODE_PWM1), |
5235 |
++ GRP_G("pwm0", pwm0_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5236 |
++ 1, MT76X8_GPIO_MODE_PWM0), |
5237 |
++ GRP_G("uart2", uart2_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5238 |
++ 1, MT76X8_GPIO_MODE_UART2), |
5239 |
++ GRP_G("uart1", uart1_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5240 |
++ 1, MT76X8_GPIO_MODE_UART1), |
5241 |
++ GRP_G("i2c", i2c_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5242 |
++ 1, MT76X8_GPIO_MODE_I2C), |
5243 |
++ GRP("refclk", refclk_grp_mt76x8, 1, MT76X8_GPIO_MODE_REFCLK), |
5244 |
++ GRP("perst", perst_grp_mt76x8, 1, MT76X8_GPIO_MODE_PERST), |
5245 |
++ GRP("wdt", wdt_grp_mt76x8, 1, MT76X8_GPIO_MODE_WDT), |
5246 |
++ GRP("spi", spi_grp_mt76x8, 1, MT76X8_GPIO_MODE_SPI), |
5247 |
++ GRP_G("sdmode", sd_mode_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5248 |
++ 1, MT76X8_GPIO_MODE_SDMODE), |
5249 |
++ GRP_G("uart0", uart0_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5250 |
++ 1, MT76X8_GPIO_MODE_UART0), |
5251 |
++ GRP_G("i2s", i2s_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5252 |
++ 1, MT76X8_GPIO_MODE_I2S), |
5253 |
++ GRP_G("spi cs1", spi_cs1_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5254 |
++ 1, MT76X8_GPIO_MODE_CS1), |
5255 |
++ GRP_G("spis", spis_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5256 |
++ 1, MT76X8_GPIO_MODE_SPIS), |
5257 |
++ GRP_G("gpio", gpio_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5258 |
++ 1, MT76X8_GPIO_MODE_GPIO), |
5259 |
++ GRP_G("wled_an", wled_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5260 |
++ 1, MT76X8_GPIO_MODE_WLED_AN), |
5261 |
++ GRP_G("p0led_an", p0led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5262 |
++ 1, MT76X8_GPIO_MODE_P0LED_AN), |
5263 |
++ GRP_G("p1led_an", p1led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5264 |
++ 1, MT76X8_GPIO_MODE_P1LED_AN), |
5265 |
++ GRP_G("p2led_an", p2led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5266 |
++ 1, MT76X8_GPIO_MODE_P2LED_AN), |
5267 |
++ GRP_G("p3led_an", p3led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5268 |
++ 1, MT76X8_GPIO_MODE_P3LED_AN), |
5269 |
++ GRP_G("p4led_an", p4led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5270 |
++ 1, MT76X8_GPIO_MODE_P4LED_AN), |
5271 |
++ GRP_G("wled_kn", wled_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5272 |
++ 1, MT76X8_GPIO_MODE_WLED_KN), |
5273 |
++ GRP_G("p0led_kn", p0led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5274 |
++ 1, MT76X8_GPIO_MODE_P0LED_KN), |
5275 |
++ GRP_G("p1led_kn", p1led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5276 |
++ 1, MT76X8_GPIO_MODE_P1LED_KN), |
5277 |
++ GRP_G("p2led_kn", p2led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5278 |
++ 1, MT76X8_GPIO_MODE_P2LED_KN), |
5279 |
++ GRP_G("p3led_kn", p3led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5280 |
++ 1, MT76X8_GPIO_MODE_P3LED_KN), |
5281 |
++ GRP_G("p4led_kn", p4led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, |
5282 |
++ 1, MT76X8_GPIO_MODE_P4LED_KN), |
5283 |
+ { 0 } |
5284 |
+ }; |
5285 |
+ |
5286 |
+ static int mt7620_pinmux_probe(struct platform_device *pdev) |
5287 |
+ { |
5288 |
+ if (is_mt76x8()) |
5289 |
+- return rt2880_pinmux_init(pdev, mt7628an_pinmux_data); |
5290 |
++ return ralink_pinmux_init(pdev, mt76x8_pinmux_data); |
5291 |
+ else |
5292 |
+- return rt2880_pinmux_init(pdev, mt7620a_pinmux_data); |
5293 |
++ return ralink_pinmux_init(pdev, mt7620a_pinmux_data); |
5294 |
+ } |
5295 |
+ |
5296 |
+ static const struct of_device_id mt7620_pinmux_match[] = { |
5297 |
+diff --git a/drivers/pinctrl/ralink/pinctrl-mt7621.c b/drivers/pinctrl/ralink/pinctrl-mt7621.c |
5298 |
+index 7d96144c474e7..14b89cb43d4cb 100644 |
5299 |
+--- a/drivers/pinctrl/ralink/pinctrl-mt7621.c |
5300 |
++++ b/drivers/pinctrl/ralink/pinctrl-mt7621.c |
5301 |
+@@ -3,7 +3,7 @@ |
5302 |
+ #include <linux/module.h> |
5303 |
+ #include <linux/platform_device.h> |
5304 |
+ #include <linux/of.h> |
5305 |
+-#include "pinmux.h" |
5306 |
++#include "pinctrl-ralink.h" |
5307 |
+ |
5308 |
+ #define MT7621_GPIO_MODE_UART1 1 |
5309 |
+ #define MT7621_GPIO_MODE_I2C 2 |
5310 |
+@@ -34,40 +34,40 @@ |
5311 |
+ #define MT7621_GPIO_MODE_SDHCI_SHIFT 18 |
5312 |
+ #define MT7621_GPIO_MODE_SDHCI_GPIO 1 |
5313 |
+ |
5314 |
+-static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) }; |
5315 |
+-static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) }; |
5316 |
+-static struct rt2880_pmx_func uart3_grp[] = { |
5317 |
++static struct ralink_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) }; |
5318 |
++static struct ralink_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) }; |
5319 |
++static struct ralink_pmx_func uart3_grp[] = { |
5320 |
+ FUNC("uart3", 0, 5, 4), |
5321 |
+ FUNC("i2s", 2, 5, 4), |
5322 |
+ FUNC("spdif3", 3, 5, 4), |
5323 |
+ }; |
5324 |
+-static struct rt2880_pmx_func uart2_grp[] = { |
5325 |
++static struct ralink_pmx_func uart2_grp[] = { |
5326 |
+ FUNC("uart2", 0, 9, 4), |
5327 |
+ FUNC("pcm", 2, 9, 4), |
5328 |
+ FUNC("spdif2", 3, 9, 4), |
5329 |
+ }; |
5330 |
+-static struct rt2880_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) }; |
5331 |
+-static struct rt2880_pmx_func wdt_grp[] = { |
5332 |
++static struct ralink_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) }; |
5333 |
++static struct ralink_pmx_func wdt_grp[] = { |
5334 |
+ FUNC("wdt rst", 0, 18, 1), |
5335 |
+ FUNC("wdt refclk", 2, 18, 1), |
5336 |
+ }; |
5337 |
+-static struct rt2880_pmx_func pcie_rst_grp[] = { |
5338 |
++static struct ralink_pmx_func pcie_rst_grp[] = { |
5339 |
+ FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1), |
5340 |
+ FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1) |
5341 |
+ }; |
5342 |
+-static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) }; |
5343 |
+-static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) }; |
5344 |
+-static struct rt2880_pmx_func spi_grp[] = { |
5345 |
++static struct ralink_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) }; |
5346 |
++static struct ralink_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) }; |
5347 |
++static struct ralink_pmx_func spi_grp[] = { |
5348 |
+ FUNC("spi", 0, 34, 7), |
5349 |
+ FUNC("nand1", 2, 34, 7), |
5350 |
+ }; |
5351 |
+-static struct rt2880_pmx_func sdhci_grp[] = { |
5352 |
++static struct ralink_pmx_func sdhci_grp[] = { |
5353 |
+ FUNC("sdhci", 0, 41, 8), |
5354 |
+ FUNC("nand2", 2, 41, 8), |
5355 |
+ }; |
5356 |
+-static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) }; |
5357 |
++static struct ralink_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) }; |
5358 |
+ |
5359 |
+-static struct rt2880_pmx_group mt7621_pinmux_data[] = { |
5360 |
++static struct ralink_pmx_group mt7621_pinmux_data[] = { |
5361 |
+ GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1), |
5362 |
+ GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C), |
5363 |
+ GRP_G("uart3", uart3_grp, MT7621_GPIO_MODE_UART3_MASK, |
5364 |
+@@ -92,7 +92,7 @@ static struct rt2880_pmx_group mt7621_pinmux_data[] = { |
5365 |
+ |
5366 |
+ static int mt7621_pinmux_probe(struct platform_device *pdev) |
5367 |
+ { |
5368 |
+- return rt2880_pinmux_init(pdev, mt7621_pinmux_data); |
5369 |
++ return ralink_pinmux_init(pdev, mt7621_pinmux_data); |
5370 |
+ } |
5371 |
+ |
5372 |
+ static const struct of_device_id mt7621_pinmux_match[] = { |
5373 |
+diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.c b/drivers/pinctrl/ralink/pinctrl-ralink.c |
5374 |
+new file mode 100644 |
5375 |
+index 0000000000000..3a8268a43d74a |
5376 |
+--- /dev/null |
5377 |
++++ b/drivers/pinctrl/ralink/pinctrl-ralink.c |
5378 |
+@@ -0,0 +1,351 @@ |
5379 |
++// SPDX-License-Identifier: GPL-2.0 |
5380 |
++/* |
5381 |
++ * Copyright (C) 2013 John Crispin <blogic@×××××××.org> |
5382 |
++ */ |
5383 |
++ |
5384 |
++#include <linux/module.h> |
5385 |
++#include <linux/device.h> |
5386 |
++#include <linux/io.h> |
5387 |
++#include <linux/platform_device.h> |
5388 |
++#include <linux/slab.h> |
5389 |
++#include <linux/of.h> |
5390 |
++#include <linux/pinctrl/pinctrl.h> |
5391 |
++#include <linux/pinctrl/pinconf.h> |
5392 |
++#include <linux/pinctrl/pinconf-generic.h> |
5393 |
++#include <linux/pinctrl/pinmux.h> |
5394 |
++#include <linux/pinctrl/consumer.h> |
5395 |
++#include <linux/pinctrl/machine.h> |
5396 |
++ |
5397 |
++#include <asm/mach-ralink/ralink_regs.h> |
5398 |
++#include <asm/mach-ralink/mt7620.h> |
5399 |
++ |
5400 |
++#include "pinctrl-ralink.h" |
5401 |
++#include "../core.h" |
5402 |
++#include "../pinctrl-utils.h" |
5403 |
++ |
5404 |
++#define SYSC_REG_GPIO_MODE 0x60 |
5405 |
++#define SYSC_REG_GPIO_MODE2 0x64 |
5406 |
++ |
5407 |
++struct ralink_priv { |
5408 |
++ struct device *dev; |
5409 |
++ |
5410 |
++ struct pinctrl_pin_desc *pads; |
5411 |
++ struct pinctrl_desc *desc; |
5412 |
++ |
5413 |
++ struct ralink_pmx_func **func; |
5414 |
++ int func_count; |
5415 |
++ |
5416 |
++ struct ralink_pmx_group *groups; |
5417 |
++ const char **group_names; |
5418 |
++ int group_count; |
5419 |
++ |
5420 |
++ u8 *gpio; |
5421 |
++ int max_pins; |
5422 |
++}; |
5423 |
++ |
5424 |
++static int ralink_get_group_count(struct pinctrl_dev *pctrldev) |
5425 |
++{ |
5426 |
++ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5427 |
++ |
5428 |
++ return p->group_count; |
5429 |
++} |
5430 |
++ |
5431 |
++static const char *ralink_get_group_name(struct pinctrl_dev *pctrldev, |
5432 |
++ unsigned int group) |
5433 |
++{ |
5434 |
++ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5435 |
++ |
5436 |
++ return (group >= p->group_count) ? NULL : p->group_names[group]; |
5437 |
++} |
5438 |
++ |
5439 |
++static int ralink_get_group_pins(struct pinctrl_dev *pctrldev, |
5440 |
++ unsigned int group, |
5441 |
++ const unsigned int **pins, |
5442 |
++ unsigned int *num_pins) |
5443 |
++{ |
5444 |
++ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5445 |
++ |
5446 |
++ if (group >= p->group_count) |
5447 |
++ return -EINVAL; |
5448 |
++ |
5449 |
++ *pins = p->groups[group].func[0].pins; |
5450 |
++ *num_pins = p->groups[group].func[0].pin_count; |
5451 |
++ |
5452 |
++ return 0; |
5453 |
++} |
5454 |
++ |
5455 |
++static const struct pinctrl_ops ralink_pctrl_ops = { |
5456 |
++ .get_groups_count = ralink_get_group_count, |
5457 |
++ .get_group_name = ralink_get_group_name, |
5458 |
++ .get_group_pins = ralink_get_group_pins, |
5459 |
++ .dt_node_to_map = pinconf_generic_dt_node_to_map_all, |
5460 |
++ .dt_free_map = pinconf_generic_dt_free_map, |
5461 |
++}; |
5462 |
++ |
5463 |
++static int ralink_pmx_func_count(struct pinctrl_dev *pctrldev) |
5464 |
++{ |
5465 |
++ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5466 |
++ |
5467 |
++ return p->func_count; |
5468 |
++} |
5469 |
++ |
5470 |
++static const char *ralink_pmx_func_name(struct pinctrl_dev *pctrldev, |
5471 |
++ unsigned int func) |
5472 |
++{ |
5473 |
++ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5474 |
++ |
5475 |
++ return p->func[func]->name; |
5476 |
++} |
5477 |
++ |
5478 |
++static int ralink_pmx_group_get_groups(struct pinctrl_dev *pctrldev, |
5479 |
++ unsigned int func, |
5480 |
++ const char * const **groups, |
5481 |
++ unsigned int * const num_groups) |
5482 |
++{ |
5483 |
++ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5484 |
++ |
5485 |
++ if (p->func[func]->group_count == 1) |
5486 |
++ *groups = &p->group_names[p->func[func]->groups[0]]; |
5487 |
++ else |
5488 |
++ *groups = p->group_names; |
5489 |
++ |
5490 |
++ *num_groups = p->func[func]->group_count; |
5491 |
++ |
5492 |
++ return 0; |
5493 |
++} |
5494 |
++ |
5495 |
++static int ralink_pmx_group_enable(struct pinctrl_dev *pctrldev, |
5496 |
++ unsigned int func, unsigned int group) |
5497 |
++{ |
5498 |
++ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5499 |
++ u32 mode = 0; |
5500 |
++ u32 reg = SYSC_REG_GPIO_MODE; |
5501 |
++ int i; |
5502 |
++ int shift; |
5503 |
++ |
5504 |
++ /* dont allow double use */ |
5505 |
++ if (p->groups[group].enabled) { |
5506 |
++ dev_err(p->dev, "%s is already enabled\n", |
5507 |
++ p->groups[group].name); |
5508 |
++ return 0; |
5509 |
++ } |
5510 |
++ |
5511 |
++ p->groups[group].enabled = 1; |
5512 |
++ p->func[func]->enabled = 1; |
5513 |
++ |
5514 |
++ shift = p->groups[group].shift; |
5515 |
++ if (shift >= 32) { |
5516 |
++ shift -= 32; |
5517 |
++ reg = SYSC_REG_GPIO_MODE2; |
5518 |
++ } |
5519 |
++ mode = rt_sysc_r32(reg); |
5520 |
++ mode &= ~(p->groups[group].mask << shift); |
5521 |
++ |
5522 |
++ /* mark the pins as gpio */ |
5523 |
++ for (i = 0; i < p->groups[group].func[0].pin_count; i++) |
5524 |
++ p->gpio[p->groups[group].func[0].pins[i]] = 1; |
5525 |
++ |
5526 |
++ /* function 0 is gpio and needs special handling */ |
5527 |
++ if (func == 0) { |
5528 |
++ mode |= p->groups[group].gpio << shift; |
5529 |
++ } else { |
5530 |
++ for (i = 0; i < p->func[func]->pin_count; i++) |
5531 |
++ p->gpio[p->func[func]->pins[i]] = 0; |
5532 |
++ mode |= p->func[func]->value << shift; |
5533 |
++ } |
5534 |
++ rt_sysc_w32(mode, reg); |
5535 |
++ |
5536 |
++ return 0; |
5537 |
++} |
5538 |
++ |
5539 |
++static int ralink_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev, |
5540 |
++ struct pinctrl_gpio_range *range, |
5541 |
++ unsigned int pin) |
5542 |
++{ |
5543 |
++ struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5544 |
++ |
5545 |
++ if (!p->gpio[pin]) { |
5546 |
++ dev_err(p->dev, "pin %d is not set to gpio mux\n", pin); |
5547 |
++ return -EINVAL; |
5548 |
++ } |
5549 |
++ |
5550 |
++ return 0; |
5551 |
++} |
5552 |
++ |
5553 |
++static const struct pinmux_ops ralink_pmx_group_ops = { |
5554 |
++ .get_functions_count = ralink_pmx_func_count, |
5555 |
++ .get_function_name = ralink_pmx_func_name, |
5556 |
++ .get_function_groups = ralink_pmx_group_get_groups, |
5557 |
++ .set_mux = ralink_pmx_group_enable, |
5558 |
++ .gpio_request_enable = ralink_pmx_group_gpio_request_enable, |
5559 |
++}; |
5560 |
++ |
5561 |
++static struct pinctrl_desc ralink_pctrl_desc = { |
5562 |
++ .owner = THIS_MODULE, |
5563 |
++ .name = "ralink-pinmux", |
5564 |
++ .pctlops = &ralink_pctrl_ops, |
5565 |
++ .pmxops = &ralink_pmx_group_ops, |
5566 |
++}; |
5567 |
++ |
5568 |
++static struct ralink_pmx_func gpio_func = { |
5569 |
++ .name = "gpio", |
5570 |
++}; |
5571 |
++ |
5572 |
++static int ralink_pinmux_index(struct ralink_priv *p) |
5573 |
++{ |
5574 |
++ struct ralink_pmx_group *mux = p->groups; |
5575 |
++ int i, j, c = 0; |
5576 |
++ |
5577 |
++ /* count the mux functions */ |
5578 |
++ while (mux->name) { |
5579 |
++ p->group_count++; |
5580 |
++ mux++; |
5581 |
++ } |
5582 |
++ |
5583 |
++ /* allocate the group names array needed by the gpio function */ |
5584 |
++ p->group_names = devm_kcalloc(p->dev, p->group_count, |
5585 |
++ sizeof(char *), GFP_KERNEL); |
5586 |
++ if (!p->group_names) |
5587 |
++ return -ENOMEM; |
5588 |
++ |
5589 |
++ for (i = 0; i < p->group_count; i++) { |
5590 |
++ p->group_names[i] = p->groups[i].name; |
5591 |
++ p->func_count += p->groups[i].func_count; |
5592 |
++ } |
5593 |
++ |
5594 |
++ /* we have a dummy function[0] for gpio */ |
5595 |
++ p->func_count++; |
5596 |
++ |
5597 |
++ /* allocate our function and group mapping index buffers */ |
5598 |
++ p->func = devm_kcalloc(p->dev, p->func_count, |
5599 |
++ sizeof(*p->func), GFP_KERNEL); |
5600 |
++ gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int), |
5601 |
++ GFP_KERNEL); |
5602 |
++ if (!p->func || !gpio_func.groups) |
5603 |
++ return -ENOMEM; |
5604 |
++ |
5605 |
++ /* add a backpointer to the function so it knows its group */ |
5606 |
++ gpio_func.group_count = p->group_count; |
5607 |
++ for (i = 0; i < gpio_func.group_count; i++) |
5608 |
++ gpio_func.groups[i] = i; |
5609 |
++ |
5610 |
++ p->func[c] = &gpio_func; |
5611 |
++ c++; |
5612 |
++ |
5613 |
++ /* add remaining functions */ |
5614 |
++ for (i = 0; i < p->group_count; i++) { |
5615 |
++ for (j = 0; j < p->groups[i].func_count; j++) { |
5616 |
++ p->func[c] = &p->groups[i].func[j]; |
5617 |
++ p->func[c]->groups = devm_kzalloc(p->dev, sizeof(int), |
5618 |
++ GFP_KERNEL); |
5619 |
++ if (!p->func[c]->groups) |
5620 |
++ return -ENOMEM; |
5621 |
++ p->func[c]->groups[0] = i; |
5622 |
++ p->func[c]->group_count = 1; |
5623 |
++ c++; |
5624 |
++ } |
5625 |
++ } |
5626 |
++ return 0; |
5627 |
++} |
5628 |
++ |
5629 |
++static int ralink_pinmux_pins(struct ralink_priv *p) |
5630 |
++{ |
5631 |
++ int i, j; |
5632 |
++ |
5633 |
++ /* |
5634 |
++ * loop over the functions and initialize the pins array. |
5635 |
++ * also work out the highest pin used. |
5636 |
++ */ |
5637 |
++ for (i = 0; i < p->func_count; i++) { |
5638 |
++ int pin; |
5639 |
++ |
5640 |
++ if (!p->func[i]->pin_count) |
5641 |
++ continue; |
5642 |
++ |
5643 |
++ p->func[i]->pins = devm_kcalloc(p->dev, |
5644 |
++ p->func[i]->pin_count, |
5645 |
++ sizeof(int), |
5646 |
++ GFP_KERNEL); |
5647 |
++ if (!p->func[i]->pins) |
5648 |
++ return -ENOMEM; |
5649 |
++ for (j = 0; j < p->func[i]->pin_count; j++) |
5650 |
++ p->func[i]->pins[j] = p->func[i]->pin_first + j; |
5651 |
++ |
5652 |
++ pin = p->func[i]->pin_first + p->func[i]->pin_count; |
5653 |
++ if (pin > p->max_pins) |
5654 |
++ p->max_pins = pin; |
5655 |
++ } |
5656 |
++ |
5657 |
++ /* the buffer that tells us which pins are gpio */ |
5658 |
++ p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL); |
5659 |
++ /* the pads needed to tell pinctrl about our pins */ |
5660 |
++ p->pads = devm_kcalloc(p->dev, p->max_pins, |
5661 |
++ sizeof(struct pinctrl_pin_desc), GFP_KERNEL); |
5662 |
++ if (!p->pads || !p->gpio) |
5663 |
++ return -ENOMEM; |
5664 |
++ |
5665 |
++ memset(p->gpio, 1, sizeof(u8) * p->max_pins); |
5666 |
++ for (i = 0; i < p->func_count; i++) { |
5667 |
++ if (!p->func[i]->pin_count) |
5668 |
++ continue; |
5669 |
++ |
5670 |
++ for (j = 0; j < p->func[i]->pin_count; j++) |
5671 |
++ p->gpio[p->func[i]->pins[j]] = 0; |
5672 |
++ } |
5673 |
++ |
5674 |
++ /* pin 0 is always a gpio */ |
5675 |
++ p->gpio[0] = 1; |
5676 |
++ |
5677 |
++ /* set the pads */ |
5678 |
++ for (i = 0; i < p->max_pins; i++) { |
5679 |
++ /* strlen("ioXY") + 1 = 5 */ |
5680 |
++ char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL); |
5681 |
++ |
5682 |
++ if (!name) |
5683 |
++ return -ENOMEM; |
5684 |
++ snprintf(name, 5, "io%d", i); |
5685 |
++ p->pads[i].number = i; |
5686 |
++ p->pads[i].name = name; |
5687 |
++ } |
5688 |
++ p->desc->pins = p->pads; |
5689 |
++ p->desc->npins = p->max_pins; |
5690 |
++ |
5691 |
++ return 0; |
5692 |
++} |
5693 |
++ |
5694 |
++int ralink_pinmux_init(struct platform_device *pdev, |
5695 |
++ struct ralink_pmx_group *data) |
5696 |
++{ |
5697 |
++ struct ralink_priv *p; |
5698 |
++ struct pinctrl_dev *dev; |
5699 |
++ int err; |
5700 |
++ |
5701 |
++ if (!data) |
5702 |
++ return -ENOTSUPP; |
5703 |
++ |
5704 |
++ /* setup the private data */ |
5705 |
++ p = devm_kzalloc(&pdev->dev, sizeof(struct ralink_priv), GFP_KERNEL); |
5706 |
++ if (!p) |
5707 |
++ return -ENOMEM; |
5708 |
++ |
5709 |
++ p->dev = &pdev->dev; |
5710 |
++ p->desc = &ralink_pctrl_desc; |
5711 |
++ p->groups = data; |
5712 |
++ platform_set_drvdata(pdev, p); |
5713 |
++ |
5714 |
++ /* init the device */ |
5715 |
++ err = ralink_pinmux_index(p); |
5716 |
++ if (err) { |
5717 |
++ dev_err(&pdev->dev, "failed to load index\n"); |
5718 |
++ return err; |
5719 |
++ } |
5720 |
++ |
5721 |
++ err = ralink_pinmux_pins(p); |
5722 |
++ if (err) { |
5723 |
++ dev_err(&pdev->dev, "failed to load pins\n"); |
5724 |
++ return err; |
5725 |
++ } |
5726 |
++ dev = pinctrl_register(p->desc, &pdev->dev, p); |
5727 |
++ |
5728 |
++ return PTR_ERR_OR_ZERO(dev); |
5729 |
++} |
5730 |
+diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.h b/drivers/pinctrl/ralink/pinctrl-ralink.h |
5731 |
+new file mode 100644 |
5732 |
+index 0000000000000..1349694095852 |
5733 |
+--- /dev/null |
5734 |
++++ b/drivers/pinctrl/ralink/pinctrl-ralink.h |
5735 |
+@@ -0,0 +1,53 @@ |
5736 |
++/* SPDX-License-Identifier: GPL-2.0-only */ |
5737 |
++/* |
5738 |
++ * Copyright (C) 2012 John Crispin <john@×××××××.org> |
5739 |
++ */ |
5740 |
++ |
5741 |
++#ifndef _PINCTRL_RALINK_H__ |
5742 |
++#define _PINCTRL_RALINK_H__ |
5743 |
++ |
5744 |
++#define FUNC(name, value, pin_first, pin_count) \ |
5745 |
++ { name, value, pin_first, pin_count } |
5746 |
++ |
5747 |
++#define GRP(_name, _func, _mask, _shift) \ |
5748 |
++ { .name = _name, .mask = _mask, .shift = _shift, \ |
5749 |
++ .func = _func, .gpio = _mask, \ |
5750 |
++ .func_count = ARRAY_SIZE(_func) } |
5751 |
++ |
5752 |
++#define GRP_G(_name, _func, _mask, _gpio, _shift) \ |
5753 |
++ { .name = _name, .mask = _mask, .shift = _shift, \ |
5754 |
++ .func = _func, .gpio = _gpio, \ |
5755 |
++ .func_count = ARRAY_SIZE(_func) } |
5756 |
++ |
5757 |
++struct ralink_pmx_group; |
5758 |
++ |
5759 |
++struct ralink_pmx_func { |
5760 |
++ const char *name; |
5761 |
++ const char value; |
5762 |
++ |
5763 |
++ int pin_first; |
5764 |
++ int pin_count; |
5765 |
++ int *pins; |
5766 |
++ |
5767 |
++ int *groups; |
5768 |
++ int group_count; |
5769 |
++ |
5770 |
++ int enabled; |
5771 |
++}; |
5772 |
++ |
5773 |
++struct ralink_pmx_group { |
5774 |
++ const char *name; |
5775 |
++ int enabled; |
5776 |
++ |
5777 |
++ const u32 shift; |
5778 |
++ const char mask; |
5779 |
++ const char gpio; |
5780 |
++ |
5781 |
++ struct ralink_pmx_func *func; |
5782 |
++ int func_count; |
5783 |
++}; |
5784 |
++ |
5785 |
++int ralink_pinmux_init(struct platform_device *pdev, |
5786 |
++ struct ralink_pmx_group *data); |
5787 |
++ |
5788 |
++#endif |
5789 |
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c |
5790 |
+deleted file mode 100644 |
5791 |
+index 96fc06d1b8b92..0000000000000 |
5792 |
+--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c |
5793 |
++++ /dev/null |
5794 |
+@@ -1,349 +0,0 @@ |
5795 |
+-// SPDX-License-Identifier: GPL-2.0 |
5796 |
+-/* |
5797 |
+- * Copyright (C) 2013 John Crispin <blogic@×××××××.org> |
5798 |
+- */ |
5799 |
+- |
5800 |
+-#include <linux/module.h> |
5801 |
+-#include <linux/device.h> |
5802 |
+-#include <linux/io.h> |
5803 |
+-#include <linux/platform_device.h> |
5804 |
+-#include <linux/slab.h> |
5805 |
+-#include <linux/of.h> |
5806 |
+-#include <linux/pinctrl/pinctrl.h> |
5807 |
+-#include <linux/pinctrl/pinconf.h> |
5808 |
+-#include <linux/pinctrl/pinconf-generic.h> |
5809 |
+-#include <linux/pinctrl/pinmux.h> |
5810 |
+-#include <linux/pinctrl/consumer.h> |
5811 |
+-#include <linux/pinctrl/machine.h> |
5812 |
+- |
5813 |
+-#include <asm/mach-ralink/ralink_regs.h> |
5814 |
+-#include <asm/mach-ralink/mt7620.h> |
5815 |
+- |
5816 |
+-#include "pinmux.h" |
5817 |
+-#include "../core.h" |
5818 |
+-#include "../pinctrl-utils.h" |
5819 |
+- |
5820 |
+-#define SYSC_REG_GPIO_MODE 0x60 |
5821 |
+-#define SYSC_REG_GPIO_MODE2 0x64 |
5822 |
+- |
5823 |
+-struct rt2880_priv { |
5824 |
+- struct device *dev; |
5825 |
+- |
5826 |
+- struct pinctrl_pin_desc *pads; |
5827 |
+- struct pinctrl_desc *desc; |
5828 |
+- |
5829 |
+- struct rt2880_pmx_func **func; |
5830 |
+- int func_count; |
5831 |
+- |
5832 |
+- struct rt2880_pmx_group *groups; |
5833 |
+- const char **group_names; |
5834 |
+- int group_count; |
5835 |
+- |
5836 |
+- u8 *gpio; |
5837 |
+- int max_pins; |
5838 |
+-}; |
5839 |
+- |
5840 |
+-static int rt2880_get_group_count(struct pinctrl_dev *pctrldev) |
5841 |
+-{ |
5842 |
+- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5843 |
+- |
5844 |
+- return p->group_count; |
5845 |
+-} |
5846 |
+- |
5847 |
+-static const char *rt2880_get_group_name(struct pinctrl_dev *pctrldev, |
5848 |
+- unsigned int group) |
5849 |
+-{ |
5850 |
+- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5851 |
+- |
5852 |
+- return (group >= p->group_count) ? NULL : p->group_names[group]; |
5853 |
+-} |
5854 |
+- |
5855 |
+-static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev, |
5856 |
+- unsigned int group, |
5857 |
+- const unsigned int **pins, |
5858 |
+- unsigned int *num_pins) |
5859 |
+-{ |
5860 |
+- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5861 |
+- |
5862 |
+- if (group >= p->group_count) |
5863 |
+- return -EINVAL; |
5864 |
+- |
5865 |
+- *pins = p->groups[group].func[0].pins; |
5866 |
+- *num_pins = p->groups[group].func[0].pin_count; |
5867 |
+- |
5868 |
+- return 0; |
5869 |
+-} |
5870 |
+- |
5871 |
+-static const struct pinctrl_ops rt2880_pctrl_ops = { |
5872 |
+- .get_groups_count = rt2880_get_group_count, |
5873 |
+- .get_group_name = rt2880_get_group_name, |
5874 |
+- .get_group_pins = rt2880_get_group_pins, |
5875 |
+- .dt_node_to_map = pinconf_generic_dt_node_to_map_all, |
5876 |
+- .dt_free_map = pinconf_generic_dt_free_map, |
5877 |
+-}; |
5878 |
+- |
5879 |
+-static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev) |
5880 |
+-{ |
5881 |
+- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5882 |
+- |
5883 |
+- return p->func_count; |
5884 |
+-} |
5885 |
+- |
5886 |
+-static const char *rt2880_pmx_func_name(struct pinctrl_dev *pctrldev, |
5887 |
+- unsigned int func) |
5888 |
+-{ |
5889 |
+- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5890 |
+- |
5891 |
+- return p->func[func]->name; |
5892 |
+-} |
5893 |
+- |
5894 |
+-static int rt2880_pmx_group_get_groups(struct pinctrl_dev *pctrldev, |
5895 |
+- unsigned int func, |
5896 |
+- const char * const **groups, |
5897 |
+- unsigned int * const num_groups) |
5898 |
+-{ |
5899 |
+- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5900 |
+- |
5901 |
+- if (p->func[func]->group_count == 1) |
5902 |
+- *groups = &p->group_names[p->func[func]->groups[0]]; |
5903 |
+- else |
5904 |
+- *groups = p->group_names; |
5905 |
+- |
5906 |
+- *num_groups = p->func[func]->group_count; |
5907 |
+- |
5908 |
+- return 0; |
5909 |
+-} |
5910 |
+- |
5911 |
+-static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev, |
5912 |
+- unsigned int func, unsigned int group) |
5913 |
+-{ |
5914 |
+- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5915 |
+- u32 mode = 0; |
5916 |
+- u32 reg = SYSC_REG_GPIO_MODE; |
5917 |
+- int i; |
5918 |
+- int shift; |
5919 |
+- |
5920 |
+- /* dont allow double use */ |
5921 |
+- if (p->groups[group].enabled) { |
5922 |
+- dev_err(p->dev, "%s is already enabled\n", |
5923 |
+- p->groups[group].name); |
5924 |
+- return 0; |
5925 |
+- } |
5926 |
+- |
5927 |
+- p->groups[group].enabled = 1; |
5928 |
+- p->func[func]->enabled = 1; |
5929 |
+- |
5930 |
+- shift = p->groups[group].shift; |
5931 |
+- if (shift >= 32) { |
5932 |
+- shift -= 32; |
5933 |
+- reg = SYSC_REG_GPIO_MODE2; |
5934 |
+- } |
5935 |
+- mode = rt_sysc_r32(reg); |
5936 |
+- mode &= ~(p->groups[group].mask << shift); |
5937 |
+- |
5938 |
+- /* mark the pins as gpio */ |
5939 |
+- for (i = 0; i < p->groups[group].func[0].pin_count; i++) |
5940 |
+- p->gpio[p->groups[group].func[0].pins[i]] = 1; |
5941 |
+- |
5942 |
+- /* function 0 is gpio and needs special handling */ |
5943 |
+- if (func == 0) { |
5944 |
+- mode |= p->groups[group].gpio << shift; |
5945 |
+- } else { |
5946 |
+- for (i = 0; i < p->func[func]->pin_count; i++) |
5947 |
+- p->gpio[p->func[func]->pins[i]] = 0; |
5948 |
+- mode |= p->func[func]->value << shift; |
5949 |
+- } |
5950 |
+- rt_sysc_w32(mode, reg); |
5951 |
+- |
5952 |
+- return 0; |
5953 |
+-} |
5954 |
+- |
5955 |
+-static int rt2880_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev, |
5956 |
+- struct pinctrl_gpio_range *range, |
5957 |
+- unsigned int pin) |
5958 |
+-{ |
5959 |
+- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); |
5960 |
+- |
5961 |
+- if (!p->gpio[pin]) { |
5962 |
+- dev_err(p->dev, "pin %d is not set to gpio mux\n", pin); |
5963 |
+- return -EINVAL; |
5964 |
+- } |
5965 |
+- |
5966 |
+- return 0; |
5967 |
+-} |
5968 |
+- |
5969 |
+-static const struct pinmux_ops rt2880_pmx_group_ops = { |
5970 |
+- .get_functions_count = rt2880_pmx_func_count, |
5971 |
+- .get_function_name = rt2880_pmx_func_name, |
5972 |
+- .get_function_groups = rt2880_pmx_group_get_groups, |
5973 |
+- .set_mux = rt2880_pmx_group_enable, |
5974 |
+- .gpio_request_enable = rt2880_pmx_group_gpio_request_enable, |
5975 |
+-}; |
5976 |
+- |
5977 |
+-static struct pinctrl_desc rt2880_pctrl_desc = { |
5978 |
+- .owner = THIS_MODULE, |
5979 |
+- .name = "rt2880-pinmux", |
5980 |
+- .pctlops = &rt2880_pctrl_ops, |
5981 |
+- .pmxops = &rt2880_pmx_group_ops, |
5982 |
+-}; |
5983 |
+- |
5984 |
+-static struct rt2880_pmx_func gpio_func = { |
5985 |
+- .name = "gpio", |
5986 |
+-}; |
5987 |
+- |
5988 |
+-static int rt2880_pinmux_index(struct rt2880_priv *p) |
5989 |
+-{ |
5990 |
+- struct rt2880_pmx_group *mux = p->groups; |
5991 |
+- int i, j, c = 0; |
5992 |
+- |
5993 |
+- /* count the mux functions */ |
5994 |
+- while (mux->name) { |
5995 |
+- p->group_count++; |
5996 |
+- mux++; |
5997 |
+- } |
5998 |
+- |
5999 |
+- /* allocate the group names array needed by the gpio function */ |
6000 |
+- p->group_names = devm_kcalloc(p->dev, p->group_count, |
6001 |
+- sizeof(char *), GFP_KERNEL); |
6002 |
+- if (!p->group_names) |
6003 |
+- return -ENOMEM; |
6004 |
+- |
6005 |
+- for (i = 0; i < p->group_count; i++) { |
6006 |
+- p->group_names[i] = p->groups[i].name; |
6007 |
+- p->func_count += p->groups[i].func_count; |
6008 |
+- } |
6009 |
+- |
6010 |
+- /* we have a dummy function[0] for gpio */ |
6011 |
+- p->func_count++; |
6012 |
+- |
6013 |
+- /* allocate our function and group mapping index buffers */ |
6014 |
+- p->func = devm_kcalloc(p->dev, p->func_count, |
6015 |
+- sizeof(*p->func), GFP_KERNEL); |
6016 |
+- gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int), |
6017 |
+- GFP_KERNEL); |
6018 |
+- if (!p->func || !gpio_func.groups) |
6019 |
+- return -ENOMEM; |
6020 |
+- |
6021 |
+- /* add a backpointer to the function so it knows its group */ |
6022 |
+- gpio_func.group_count = p->group_count; |
6023 |
+- for (i = 0; i < gpio_func.group_count; i++) |
6024 |
+- gpio_func.groups[i] = i; |
6025 |
+- |
6026 |
+- p->func[c] = &gpio_func; |
6027 |
+- c++; |
6028 |
+- |
6029 |
+- /* add remaining functions */ |
6030 |
+- for (i = 0; i < p->group_count; i++) { |
6031 |
+- for (j = 0; j < p->groups[i].func_count; j++) { |
6032 |
+- p->func[c] = &p->groups[i].func[j]; |
6033 |
+- p->func[c]->groups = devm_kzalloc(p->dev, sizeof(int), |
6034 |
+- GFP_KERNEL); |
6035 |
+- if (!p->func[c]->groups) |
6036 |
+- return -ENOMEM; |
6037 |
+- p->func[c]->groups[0] = i; |
6038 |
+- p->func[c]->group_count = 1; |
6039 |
+- c++; |
6040 |
+- } |
6041 |
+- } |
6042 |
+- return 0; |
6043 |
+-} |
6044 |
+- |
6045 |
+-static int rt2880_pinmux_pins(struct rt2880_priv *p) |
6046 |
+-{ |
6047 |
+- int i, j; |
6048 |
+- |
6049 |
+- /* |
6050 |
+- * loop over the functions and initialize the pins array. |
6051 |
+- * also work out the highest pin used. |
6052 |
+- */ |
6053 |
+- for (i = 0; i < p->func_count; i++) { |
6054 |
+- int pin; |
6055 |
+- |
6056 |
+- if (!p->func[i]->pin_count) |
6057 |
+- continue; |
6058 |
+- |
6059 |
+- p->func[i]->pins = devm_kcalloc(p->dev, |
6060 |
+- p->func[i]->pin_count, |
6061 |
+- sizeof(int), |
6062 |
+- GFP_KERNEL); |
6063 |
+- for (j = 0; j < p->func[i]->pin_count; j++) |
6064 |
+- p->func[i]->pins[j] = p->func[i]->pin_first + j; |
6065 |
+- |
6066 |
+- pin = p->func[i]->pin_first + p->func[i]->pin_count; |
6067 |
+- if (pin > p->max_pins) |
6068 |
+- p->max_pins = pin; |
6069 |
+- } |
6070 |
+- |
6071 |
+- /* the buffer that tells us which pins are gpio */ |
6072 |
+- p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL); |
6073 |
+- /* the pads needed to tell pinctrl about our pins */ |
6074 |
+- p->pads = devm_kcalloc(p->dev, p->max_pins, |
6075 |
+- sizeof(struct pinctrl_pin_desc), GFP_KERNEL); |
6076 |
+- if (!p->pads || !p->gpio) |
6077 |
+- return -ENOMEM; |
6078 |
+- |
6079 |
+- memset(p->gpio, 1, sizeof(u8) * p->max_pins); |
6080 |
+- for (i = 0; i < p->func_count; i++) { |
6081 |
+- if (!p->func[i]->pin_count) |
6082 |
+- continue; |
6083 |
+- |
6084 |
+- for (j = 0; j < p->func[i]->pin_count; j++) |
6085 |
+- p->gpio[p->func[i]->pins[j]] = 0; |
6086 |
+- } |
6087 |
+- |
6088 |
+- /* pin 0 is always a gpio */ |
6089 |
+- p->gpio[0] = 1; |
6090 |
+- |
6091 |
+- /* set the pads */ |
6092 |
+- for (i = 0; i < p->max_pins; i++) { |
6093 |
+- /* strlen("ioXY") + 1 = 5 */ |
6094 |
+- char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL); |
6095 |
+- |
6096 |
+- if (!name) |
6097 |
+- return -ENOMEM; |
6098 |
+- snprintf(name, 5, "io%d", i); |
6099 |
+- p->pads[i].number = i; |
6100 |
+- p->pads[i].name = name; |
6101 |
+- } |
6102 |
+- p->desc->pins = p->pads; |
6103 |
+- p->desc->npins = p->max_pins; |
6104 |
+- |
6105 |
+- return 0; |
6106 |
+-} |
6107 |
+- |
6108 |
+-int rt2880_pinmux_init(struct platform_device *pdev, |
6109 |
+- struct rt2880_pmx_group *data) |
6110 |
+-{ |
6111 |
+- struct rt2880_priv *p; |
6112 |
+- struct pinctrl_dev *dev; |
6113 |
+- int err; |
6114 |
+- |
6115 |
+- if (!data) |
6116 |
+- return -ENOTSUPP; |
6117 |
+- |
6118 |
+- /* setup the private data */ |
6119 |
+- p = devm_kzalloc(&pdev->dev, sizeof(struct rt2880_priv), GFP_KERNEL); |
6120 |
+- if (!p) |
6121 |
+- return -ENOMEM; |
6122 |
+- |
6123 |
+- p->dev = &pdev->dev; |
6124 |
+- p->desc = &rt2880_pctrl_desc; |
6125 |
+- p->groups = data; |
6126 |
+- platform_set_drvdata(pdev, p); |
6127 |
+- |
6128 |
+- /* init the device */ |
6129 |
+- err = rt2880_pinmux_index(p); |
6130 |
+- if (err) { |
6131 |
+- dev_err(&pdev->dev, "failed to load index\n"); |
6132 |
+- return err; |
6133 |
+- } |
6134 |
+- |
6135 |
+- err = rt2880_pinmux_pins(p); |
6136 |
+- if (err) { |
6137 |
+- dev_err(&pdev->dev, "failed to load pins\n"); |
6138 |
+- return err; |
6139 |
+- } |
6140 |
+- dev = pinctrl_register(p->desc, &pdev->dev, p); |
6141 |
+- |
6142 |
+- return PTR_ERR_OR_ZERO(dev); |
6143 |
+-} |
6144 |
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt288x.c b/drivers/pinctrl/ralink/pinctrl-rt288x.c |
6145 |
+index 0744aebbace52..40c45140ff8a3 100644 |
6146 |
+--- a/drivers/pinctrl/ralink/pinctrl-rt288x.c |
6147 |
++++ b/drivers/pinctrl/ralink/pinctrl-rt288x.c |
6148 |
+@@ -4,7 +4,7 @@ |
6149 |
+ #include <linux/module.h> |
6150 |
+ #include <linux/platform_device.h> |
6151 |
+ #include <linux/of.h> |
6152 |
+-#include "pinmux.h" |
6153 |
++#include "pinctrl-ralink.h" |
6154 |
+ |
6155 |
+ #define RT2880_GPIO_MODE_I2C BIT(0) |
6156 |
+ #define RT2880_GPIO_MODE_UART0 BIT(1) |
6157 |
+@@ -15,15 +15,15 @@ |
6158 |
+ #define RT2880_GPIO_MODE_SDRAM BIT(6) |
6159 |
+ #define RT2880_GPIO_MODE_PCI BIT(7) |
6160 |
+ |
6161 |
+-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; |
6162 |
+-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; |
6163 |
+-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) }; |
6164 |
+-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; |
6165 |
+-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; |
6166 |
+-static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; |
6167 |
+-static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) }; |
6168 |
++static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; |
6169 |
++static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; |
6170 |
++static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) }; |
6171 |
++static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; |
6172 |
++static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; |
6173 |
++static struct ralink_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; |
6174 |
++static struct ralink_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) }; |
6175 |
+ |
6176 |
+-static struct rt2880_pmx_group rt2880_pinmux_data_act[] = { |
6177 |
++static struct ralink_pmx_group rt2880_pinmux_data_act[] = { |
6178 |
+ GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C), |
6179 |
+ GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI), |
6180 |
+ GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0), |
6181 |
+@@ -36,7 +36,7 @@ static struct rt2880_pmx_group rt2880_pinmux_data_act[] = { |
6182 |
+ |
6183 |
+ static int rt288x_pinmux_probe(struct platform_device *pdev) |
6184 |
+ { |
6185 |
+- return rt2880_pinmux_init(pdev, rt2880_pinmux_data_act); |
6186 |
++ return ralink_pinmux_init(pdev, rt2880_pinmux_data_act); |
6187 |
+ } |
6188 |
+ |
6189 |
+ static const struct of_device_id rt288x_pinmux_match[] = { |
6190 |
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt305x.c b/drivers/pinctrl/ralink/pinctrl-rt305x.c |
6191 |
+index 5d8fa156c0037..25527ca1ccaae 100644 |
6192 |
+--- a/drivers/pinctrl/ralink/pinctrl-rt305x.c |
6193 |
++++ b/drivers/pinctrl/ralink/pinctrl-rt305x.c |
6194 |
+@@ -5,7 +5,7 @@ |
6195 |
+ #include <linux/module.h> |
6196 |
+ #include <linux/platform_device.h> |
6197 |
+ #include <linux/of.h> |
6198 |
+-#include "pinmux.h" |
6199 |
++#include "pinctrl-ralink.h" |
6200 |
+ |
6201 |
+ #define RT305X_GPIO_MODE_UART0_SHIFT 2 |
6202 |
+ #define RT305X_GPIO_MODE_UART0_MASK 0x7 |
6203 |
+@@ -31,9 +31,9 @@ |
6204 |
+ #define RT3352_GPIO_MODE_LNA 18 |
6205 |
+ #define RT3352_GPIO_MODE_PA 20 |
6206 |
+ |
6207 |
+-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; |
6208 |
+-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; |
6209 |
+-static struct rt2880_pmx_func uartf_func[] = { |
6210 |
++static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; |
6211 |
++static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; |
6212 |
++static struct ralink_pmx_func uartf_func[] = { |
6213 |
+ FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8), |
6214 |
+ FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8), |
6215 |
+ FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8), |
6216 |
+@@ -42,28 +42,28 @@ static struct rt2880_pmx_func uartf_func[] = { |
6217 |
+ FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4), |
6218 |
+ FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4), |
6219 |
+ }; |
6220 |
+-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; |
6221 |
+-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; |
6222 |
+-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; |
6223 |
+-static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) }; |
6224 |
+-static struct rt2880_pmx_func rt5350_cs1_func[] = { |
6225 |
++static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; |
6226 |
++static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; |
6227 |
++static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; |
6228 |
++static struct ralink_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) }; |
6229 |
++static struct ralink_pmx_func rt5350_cs1_func[] = { |
6230 |
+ FUNC("spi_cs1", 0, 27, 1), |
6231 |
+ FUNC("wdg_cs1", 1, 27, 1), |
6232 |
+ }; |
6233 |
+-static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; |
6234 |
+-static struct rt2880_pmx_func rt3352_rgmii_func[] = { |
6235 |
++static struct ralink_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; |
6236 |
++static struct ralink_pmx_func rt3352_rgmii_func[] = { |
6237 |
+ FUNC("rgmii", 0, 24, 12) |
6238 |
+ }; |
6239 |
+-static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) }; |
6240 |
+-static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) }; |
6241 |
+-static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) }; |
6242 |
+-static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) }; |
6243 |
+-static struct rt2880_pmx_func rt3352_cs1_func[] = { |
6244 |
++static struct ralink_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) }; |
6245 |
++static struct ralink_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) }; |
6246 |
++static struct ralink_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) }; |
6247 |
++static struct ralink_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) }; |
6248 |
++static struct ralink_pmx_func rt3352_cs1_func[] = { |
6249 |
+ FUNC("spi_cs1", 0, 45, 1), |
6250 |
+ FUNC("wdg_cs1", 1, 45, 1), |
6251 |
+ }; |
6252 |
+ |
6253 |
+-static struct rt2880_pmx_group rt3050_pinmux_data[] = { |
6254 |
++static struct ralink_pmx_group rt3050_pinmux_data[] = { |
6255 |
+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), |
6256 |
+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), |
6257 |
+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, |
6258 |
+@@ -76,7 +76,7 @@ static struct rt2880_pmx_group rt3050_pinmux_data[] = { |
6259 |
+ { 0 } |
6260 |
+ }; |
6261 |
+ |
6262 |
+-static struct rt2880_pmx_group rt3352_pinmux_data[] = { |
6263 |
++static struct ralink_pmx_group rt3352_pinmux_data[] = { |
6264 |
+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), |
6265 |
+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), |
6266 |
+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, |
6267 |
+@@ -92,7 +92,7 @@ static struct rt2880_pmx_group rt3352_pinmux_data[] = { |
6268 |
+ { 0 } |
6269 |
+ }; |
6270 |
+ |
6271 |
+-static struct rt2880_pmx_group rt5350_pinmux_data[] = { |
6272 |
++static struct ralink_pmx_group rt5350_pinmux_data[] = { |
6273 |
+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), |
6274 |
+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), |
6275 |
+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, |
6276 |
+@@ -107,11 +107,11 @@ static struct rt2880_pmx_group rt5350_pinmux_data[] = { |
6277 |
+ static int rt305x_pinmux_probe(struct platform_device *pdev) |
6278 |
+ { |
6279 |
+ if (soc_is_rt5350()) |
6280 |
+- return rt2880_pinmux_init(pdev, rt5350_pinmux_data); |
6281 |
++ return ralink_pinmux_init(pdev, rt5350_pinmux_data); |
6282 |
+ else if (soc_is_rt305x() || soc_is_rt3350()) |
6283 |
+- return rt2880_pinmux_init(pdev, rt3050_pinmux_data); |
6284 |
++ return ralink_pinmux_init(pdev, rt3050_pinmux_data); |
6285 |
+ else if (soc_is_rt3352()) |
6286 |
+- return rt2880_pinmux_init(pdev, rt3352_pinmux_data); |
6287 |
++ return ralink_pinmux_init(pdev, rt3352_pinmux_data); |
6288 |
+ else |
6289 |
+ return -EINVAL; |
6290 |
+ } |
6291 |
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt3883.c b/drivers/pinctrl/ralink/pinctrl-rt3883.c |
6292 |
+index 3e0e1b4caa647..0b8674dbe1880 100644 |
6293 |
+--- a/drivers/pinctrl/ralink/pinctrl-rt3883.c |
6294 |
++++ b/drivers/pinctrl/ralink/pinctrl-rt3883.c |
6295 |
+@@ -3,7 +3,7 @@ |
6296 |
+ #include <linux/module.h> |
6297 |
+ #include <linux/platform_device.h> |
6298 |
+ #include <linux/of.h> |
6299 |
+-#include "pinmux.h" |
6300 |
++#include "pinctrl-ralink.h" |
6301 |
+ |
6302 |
+ #define RT3883_GPIO_MODE_UART0_SHIFT 2 |
6303 |
+ #define RT3883_GPIO_MODE_UART0_MASK 0x7 |
6304 |
+@@ -39,9 +39,9 @@ |
6305 |
+ #define RT3883_GPIO_MODE_LNA_G_GPIO 0x3 |
6306 |
+ #define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK) |
6307 |
+ |
6308 |
+-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; |
6309 |
+-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; |
6310 |
+-static struct rt2880_pmx_func uartf_func[] = { |
6311 |
++static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; |
6312 |
++static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; |
6313 |
++static struct ralink_pmx_func uartf_func[] = { |
6314 |
+ FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8), |
6315 |
+ FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8), |
6316 |
+ FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8), |
6317 |
+@@ -50,21 +50,21 @@ static struct rt2880_pmx_func uartf_func[] = { |
6318 |
+ FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4), |
6319 |
+ FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4), |
6320 |
+ }; |
6321 |
+-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; |
6322 |
+-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; |
6323 |
+-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; |
6324 |
+-static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; |
6325 |
+-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) }; |
6326 |
+-static struct rt2880_pmx_func pci_func[] = { |
6327 |
++static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; |
6328 |
++static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; |
6329 |
++static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; |
6330 |
++static struct ralink_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; |
6331 |
++static struct ralink_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) }; |
6332 |
++static struct ralink_pmx_func pci_func[] = { |
6333 |
+ FUNC("pci-dev", 0, 40, 32), |
6334 |
+ FUNC("pci-host2", 1, 40, 32), |
6335 |
+ FUNC("pci-host1", 2, 40, 32), |
6336 |
+ FUNC("pci-fnc", 3, 40, 32) |
6337 |
+ }; |
6338 |
+-static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; |
6339 |
+-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) }; |
6340 |
++static struct ralink_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; |
6341 |
++static struct ralink_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) }; |
6342 |
+ |
6343 |
+-static struct rt2880_pmx_group rt3883_pinmux_data[] = { |
6344 |
++static struct ralink_pmx_group rt3883_pinmux_data[] = { |
6345 |
+ GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), |
6346 |
+ GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI), |
6347 |
+ GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK, |
6348 |
+@@ -83,7 +83,7 @@ static struct rt2880_pmx_group rt3883_pinmux_data[] = { |
6349 |
+ |
6350 |
+ static int rt3883_pinmux_probe(struct platform_device *pdev) |
6351 |
+ { |
6352 |
+- return rt2880_pinmux_init(pdev, rt3883_pinmux_data); |
6353 |
++ return ralink_pinmux_init(pdev, rt3883_pinmux_data); |
6354 |
+ } |
6355 |
+ |
6356 |
+ static const struct of_device_id rt3883_pinmux_match[] = { |
6357 |
+diff --git a/drivers/pinctrl/ralink/pinmux.h b/drivers/pinctrl/ralink/pinmux.h |
6358 |
+deleted file mode 100644 |
6359 |
+index 0046abe3bcc79..0000000000000 |
6360 |
+--- a/drivers/pinctrl/ralink/pinmux.h |
6361 |
++++ /dev/null |
6362 |
+@@ -1,53 +0,0 @@ |
6363 |
+-/* SPDX-License-Identifier: GPL-2.0-only */ |
6364 |
+-/* |
6365 |
+- * Copyright (C) 2012 John Crispin <john@×××××××.org> |
6366 |
+- */ |
6367 |
+- |
6368 |
+-#ifndef _RT288X_PINMUX_H__ |
6369 |
+-#define _RT288X_PINMUX_H__ |
6370 |
+- |
6371 |
+-#define FUNC(name, value, pin_first, pin_count) \ |
6372 |
+- { name, value, pin_first, pin_count } |
6373 |
+- |
6374 |
+-#define GRP(_name, _func, _mask, _shift) \ |
6375 |
+- { .name = _name, .mask = _mask, .shift = _shift, \ |
6376 |
+- .func = _func, .gpio = _mask, \ |
6377 |
+- .func_count = ARRAY_SIZE(_func) } |
6378 |
+- |
6379 |
+-#define GRP_G(_name, _func, _mask, _gpio, _shift) \ |
6380 |
+- { .name = _name, .mask = _mask, .shift = _shift, \ |
6381 |
+- .func = _func, .gpio = _gpio, \ |
6382 |
+- .func_count = ARRAY_SIZE(_func) } |
6383 |
+- |
6384 |
+-struct rt2880_pmx_group; |
6385 |
+- |
6386 |
+-struct rt2880_pmx_func { |
6387 |
+- const char *name; |
6388 |
+- const char value; |
6389 |
+- |
6390 |
+- int pin_first; |
6391 |
+- int pin_count; |
6392 |
+- int *pins; |
6393 |
+- |
6394 |
+- int *groups; |
6395 |
+- int group_count; |
6396 |
+- |
6397 |
+- int enabled; |
6398 |
+-}; |
6399 |
+- |
6400 |
+-struct rt2880_pmx_group { |
6401 |
+- const char *name; |
6402 |
+- int enabled; |
6403 |
+- |
6404 |
+- const u32 shift; |
6405 |
+- const char mask; |
6406 |
+- const char gpio; |
6407 |
+- |
6408 |
+- struct rt2880_pmx_func *func; |
6409 |
+- int func_count; |
6410 |
+-}; |
6411 |
+- |
6412 |
+-int rt2880_pinmux_init(struct platform_device *pdev, |
6413 |
+- struct rt2880_pmx_group *data); |
6414 |
+- |
6415 |
+-#endif |
6416 |
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c |
6417 |
+index f7c9459f66283..edd0d0af5c147 100644 |
6418 |
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c |
6419 |
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c |
6420 |
+@@ -1299,15 +1299,17 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, |
6421 |
+ bank->bank_ioport_nr = bank_ioport_nr; |
6422 |
+ spin_lock_init(&bank->lock); |
6423 |
+ |
6424 |
+- /* create irq hierarchical domain */ |
6425 |
+- bank->fwnode = of_node_to_fwnode(np); |
6426 |
++ if (pctl->domain) { |
6427 |
++ /* create irq hierarchical domain */ |
6428 |
++ bank->fwnode = of_node_to_fwnode(np); |
6429 |
+ |
6430 |
+- bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, |
6431 |
+- STM32_GPIO_IRQ_LINE, bank->fwnode, |
6432 |
+- &stm32_gpio_domain_ops, bank); |
6433 |
++ bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, STM32_GPIO_IRQ_LINE, |
6434 |
++ bank->fwnode, &stm32_gpio_domain_ops, |
6435 |
++ bank); |
6436 |
+ |
6437 |
+- if (!bank->domain) |
6438 |
+- return -ENODEV; |
6439 |
++ if (!bank->domain) |
6440 |
++ return -ENODEV; |
6441 |
++ } |
6442 |
+ |
6443 |
+ err = gpiochip_add_data(&bank->gpio_chip, bank); |
6444 |
+ if (err) { |
6445 |
+@@ -1466,6 +1468,8 @@ int stm32_pctl_probe(struct platform_device *pdev) |
6446 |
+ pctl->domain = stm32_pctrl_get_irq_domain(np); |
6447 |
+ if (IS_ERR(pctl->domain)) |
6448 |
+ return PTR_ERR(pctl->domain); |
6449 |
++ if (!pctl->domain) |
6450 |
++ dev_warn(dev, "pinctrl without interrupt support\n"); |
6451 |
+ |
6452 |
+ /* hwspinlock is optional */ |
6453 |
+ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); |
6454 |
+diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c |
6455 |
+index 3ba47040ac423..2b3335ab56c66 100644 |
6456 |
+--- a/drivers/pinctrl/sunplus/sppctl.c |
6457 |
++++ b/drivers/pinctrl/sunplus/sppctl.c |
6458 |
+@@ -871,6 +871,9 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node |
6459 |
+ } |
6460 |
+ |
6461 |
+ *map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL); |
6462 |
++ if (*map == NULL) |
6463 |
++ return -ENOMEM; |
6464 |
++ |
6465 |
+ for (i = 0; i < (*num_maps); i++) { |
6466 |
+ dt_pin = be32_to_cpu(list[i]); |
6467 |
+ pin_num = FIELD_GET(GENMASK(31, 24), dt_pin); |
6468 |
+diff --git a/drivers/power/reset/arm-versatile-reboot.c b/drivers/power/reset/arm-versatile-reboot.c |
6469 |
+index 08d0a07b58ef2..c7624d7611a7e 100644 |
6470 |
+--- a/drivers/power/reset/arm-versatile-reboot.c |
6471 |
++++ b/drivers/power/reset/arm-versatile-reboot.c |
6472 |
+@@ -146,6 +146,7 @@ static int __init versatile_reboot_probe(void) |
6473 |
+ versatile_reboot_type = (enum versatile_reboot)reboot_id->data; |
6474 |
+ |
6475 |
+ syscon_regmap = syscon_node_to_regmap(np); |
6476 |
++ of_node_put(np); |
6477 |
+ if (IS_ERR(syscon_regmap)) |
6478 |
+ return PTR_ERR(syscon_regmap); |
6479 |
+ |
6480 |
+diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c |
6481 |
+index ec8a404d71b44..4339fa9ff0099 100644 |
6482 |
+--- a/drivers/power/supply/ab8500_fg.c |
6483 |
++++ b/drivers/power/supply/ab8500_fg.c |
6484 |
+@@ -3148,6 +3148,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) |
6485 |
+ ret = ab8500_fg_init_hw_registers(di); |
6486 |
+ if (ret) { |
6487 |
+ dev_err(dev, "failed to initialize registers\n"); |
6488 |
++ destroy_workqueue(di->fg_wq); |
6489 |
+ return ret; |
6490 |
+ } |
6491 |
+ |
6492 |
+@@ -3159,6 +3160,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) |
6493 |
+ di->fg_psy = devm_power_supply_register(dev, &ab8500_fg_desc, &psy_cfg); |
6494 |
+ if (IS_ERR(di->fg_psy)) { |
6495 |
+ dev_err(dev, "failed to register FG psy\n"); |
6496 |
++ destroy_workqueue(di->fg_wq); |
6497 |
+ return PTR_ERR(di->fg_psy); |
6498 |
+ } |
6499 |
+ |
6500 |
+@@ -3174,8 +3176,10 @@ static int ab8500_fg_probe(struct platform_device *pdev) |
6501 |
+ /* Register primary interrupt handlers */ |
6502 |
+ for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) { |
6503 |
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name); |
6504 |
+- if (irq < 0) |
6505 |
++ if (irq < 0) { |
6506 |
++ destroy_workqueue(di->fg_wq); |
6507 |
+ return irq; |
6508 |
++ } |
6509 |
+ |
6510 |
+ ret = devm_request_threaded_irq(dev, irq, NULL, |
6511 |
+ ab8500_fg_irq[i].isr, |
6512 |
+@@ -3185,6 +3189,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) |
6513 |
+ if (ret != 0) { |
6514 |
+ dev_err(dev, "failed to request %s IRQ %d: %d\n", |
6515 |
+ ab8500_fg_irq[i].name, irq, ret); |
6516 |
++ destroy_workqueue(di->fg_wq); |
6517 |
+ return ret; |
6518 |
+ } |
6519 |
+ dev_dbg(dev, "Requested %s IRQ %d: %d\n", |
6520 |
+@@ -3200,6 +3205,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) |
6521 |
+ ret = ab8500_fg_sysfs_init(di); |
6522 |
+ if (ret) { |
6523 |
+ dev_err(dev, "failed to create sysfs entry\n"); |
6524 |
++ destroy_workqueue(di->fg_wq); |
6525 |
+ return ret; |
6526 |
+ } |
6527 |
+ |
6528 |
+@@ -3207,6 +3213,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) |
6529 |
+ if (ret) { |
6530 |
+ dev_err(dev, "failed to create FG psy\n"); |
6531 |
+ ab8500_fg_sysfs_exit(di); |
6532 |
++ destroy_workqueue(di->fg_wq); |
6533 |
+ return ret; |
6534 |
+ } |
6535 |
+ |
6536 |
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c |
6537 |
+index 775c0bf2f923d..0933948d7df3d 100644 |
6538 |
+--- a/drivers/spi/spi-bcm2835.c |
6539 |
++++ b/drivers/spi/spi-bcm2835.c |
6540 |
+@@ -1138,10 +1138,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr, |
6541 |
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); |
6542 |
+ |
6543 |
+ /* if an error occurred and we have an active dma, then terminate */ |
6544 |
+- dmaengine_terminate_sync(ctlr->dma_tx); |
6545 |
+- bs->tx_dma_active = false; |
6546 |
+- dmaengine_terminate_sync(ctlr->dma_rx); |
6547 |
+- bs->rx_dma_active = false; |
6548 |
++ if (ctlr->dma_tx) { |
6549 |
++ dmaengine_terminate_sync(ctlr->dma_tx); |
6550 |
++ bs->tx_dma_active = false; |
6551 |
++ } |
6552 |
++ if (ctlr->dma_rx) { |
6553 |
++ dmaengine_terminate_sync(ctlr->dma_rx); |
6554 |
++ bs->rx_dma_active = false; |
6555 |
++ } |
6556 |
+ bcm2835_spi_undo_prologue(bs); |
6557 |
+ |
6558 |
+ /* and reset */ |
6559 |
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c |
6560 |
+index 5b485cd96c931..5298a3a43bc71 100644 |
6561 |
+--- a/fs/dlm/lock.c |
6562 |
++++ b/fs/dlm/lock.c |
6563 |
+@@ -4085,13 +4085,14 @@ static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len) |
6564 |
+ rv = _create_message(ls, sizeof(struct dlm_message) + len, |
6565 |
+ dir_nodeid, DLM_MSG_REMOVE, &ms, &mh); |
6566 |
+ if (rv) |
6567 |
+- return; |
6568 |
++ goto out; |
6569 |
+ |
6570 |
+ memcpy(ms->m_extra, name, len); |
6571 |
+ ms->m_hash = hash; |
6572 |
+ |
6573 |
+ send_message(mh, ms); |
6574 |
+ |
6575 |
++out: |
6576 |
+ spin_lock(&ls->ls_remove_spin); |
6577 |
+ ls->ls_remove_len = 0; |
6578 |
+ memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN); |
6579 |
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c |
6580 |
+index a02a04a993bfa..c6eaf7e9ea743 100644 |
6581 |
+--- a/fs/exfat/namei.c |
6582 |
++++ b/fs/exfat/namei.c |
6583 |
+@@ -1080,6 +1080,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir, |
6584 |
+ |
6585 |
+ exfat_remove_entries(inode, p_dir, oldentry, 0, |
6586 |
+ num_old_entries); |
6587 |
++ ei->dir = *p_dir; |
6588 |
+ ei->entry = newentry; |
6589 |
+ } else { |
6590 |
+ if (exfat_get_entry_type(epold) == TYPE_FILE) { |
6591 |
+@@ -1167,28 +1168,6 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir, |
6592 |
+ return 0; |
6593 |
+ } |
6594 |
+ |
6595 |
+-static void exfat_update_parent_info(struct exfat_inode_info *ei, |
6596 |
+- struct inode *parent_inode) |
6597 |
+-{ |
6598 |
+- struct exfat_sb_info *sbi = EXFAT_SB(parent_inode->i_sb); |
6599 |
+- struct exfat_inode_info *parent_ei = EXFAT_I(parent_inode); |
6600 |
+- loff_t parent_isize = i_size_read(parent_inode); |
6601 |
+- |
6602 |
+- /* |
6603 |
+- * the problem that struct exfat_inode_info caches wrong parent info. |
6604 |
+- * |
6605 |
+- * because of flag-mismatch of ei->dir, |
6606 |
+- * there is abnormal traversing cluster chain. |
6607 |
+- */ |
6608 |
+- if (unlikely(parent_ei->flags != ei->dir.flags || |
6609 |
+- parent_isize != EXFAT_CLU_TO_B(ei->dir.size, sbi) || |
6610 |
+- parent_ei->start_clu != ei->dir.dir)) { |
6611 |
+- exfat_chain_set(&ei->dir, parent_ei->start_clu, |
6612 |
+- EXFAT_B_TO_CLU_ROUND_UP(parent_isize, sbi), |
6613 |
+- parent_ei->flags); |
6614 |
+- } |
6615 |
+-} |
6616 |
+- |
6617 |
+ /* rename or move a old file into a new file */ |
6618 |
+ static int __exfat_rename(struct inode *old_parent_inode, |
6619 |
+ struct exfat_inode_info *ei, struct inode *new_parent_inode, |
6620 |
+@@ -1219,9 +1198,9 @@ static int __exfat_rename(struct inode *old_parent_inode, |
6621 |
+ return -ENOENT; |
6622 |
+ } |
6623 |
+ |
6624 |
+- exfat_update_parent_info(ei, old_parent_inode); |
6625 |
+- |
6626 |
+- exfat_chain_dup(&olddir, &ei->dir); |
6627 |
++ exfat_chain_set(&olddir, EXFAT_I(old_parent_inode)->start_clu, |
6628 |
++ EXFAT_B_TO_CLU_ROUND_UP(i_size_read(old_parent_inode), sbi), |
6629 |
++ EXFAT_I(old_parent_inode)->flags); |
6630 |
+ dentry = ei->entry; |
6631 |
+ |
6632 |
+ ep = exfat_get_dentry(sb, &olddir, dentry, &old_bh); |
6633 |
+@@ -1241,8 +1220,6 @@ static int __exfat_rename(struct inode *old_parent_inode, |
6634 |
+ goto out; |
6635 |
+ } |
6636 |
+ |
6637 |
+- exfat_update_parent_info(new_ei, new_parent_inode); |
6638 |
+- |
6639 |
+ p_dir = &(new_ei->dir); |
6640 |
+ new_entry = new_ei->entry; |
6641 |
+ ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh); |
6642 |
+diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h |
6643 |
+index 944f83ef9f2ef..dcd15e0249767 100644 |
6644 |
+--- a/include/drm/gpu_scheduler.h |
6645 |
++++ b/include/drm/gpu_scheduler.h |
6646 |
+@@ -28,7 +28,7 @@ |
6647 |
+ #include <linux/dma-fence.h> |
6648 |
+ #include <linux/completion.h> |
6649 |
+ #include <linux/xarray.h> |
6650 |
+-#include <linux/irq_work.h> |
6651 |
++#include <linux/workqueue.h> |
6652 |
+ |
6653 |
+ #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) |
6654 |
+ |
6655 |
+@@ -294,7 +294,7 @@ struct drm_sched_job { |
6656 |
+ */ |
6657 |
+ union { |
6658 |
+ struct dma_fence_cb finish_cb; |
6659 |
+- struct irq_work work; |
6660 |
++ struct work_struct work; |
6661 |
+ }; |
6662 |
+ |
6663 |
+ uint64_t id; |
6664 |
+diff --git a/include/net/amt.h b/include/net/amt.h |
6665 |
+index 7a4db8b903eed..44acadf3a69e3 100644 |
6666 |
+--- a/include/net/amt.h |
6667 |
++++ b/include/net/amt.h |
6668 |
+@@ -78,6 +78,15 @@ enum amt_status { |
6669 |
+ |
6670 |
+ #define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1) |
6671 |
+ |
6672 |
++/* Gateway events only */ |
6673 |
++enum amt_event { |
6674 |
++ AMT_EVENT_NONE, |
6675 |
++ AMT_EVENT_RECEIVE, |
6676 |
++ AMT_EVENT_SEND_DISCOVERY, |
6677 |
++ AMT_EVENT_SEND_REQUEST, |
6678 |
++ __AMT_EVENT_MAX, |
6679 |
++}; |
6680 |
++ |
6681 |
+ struct amt_header { |
6682 |
+ #if defined(__LITTLE_ENDIAN_BITFIELD) |
6683 |
+ u8 type:4, |
6684 |
+@@ -292,6 +301,12 @@ struct amt_group_node { |
6685 |
+ struct hlist_head sources[]; |
6686 |
+ }; |
6687 |
+ |
6688 |
++#define AMT_MAX_EVENTS 16 |
6689 |
++struct amt_events { |
6690 |
++ enum amt_event event; |
6691 |
++ struct sk_buff *skb; |
6692 |
++}; |
6693 |
++ |
6694 |
+ struct amt_dev { |
6695 |
+ struct net_device *dev; |
6696 |
+ struct net_device *stream_dev; |
6697 |
+@@ -308,6 +323,7 @@ struct amt_dev { |
6698 |
+ struct delayed_work req_wq; |
6699 |
+ /* Protected by RTNL */ |
6700 |
+ struct delayed_work secret_wq; |
6701 |
++ struct work_struct event_wq; |
6702 |
+ /* AMT status */ |
6703 |
+ enum amt_status status; |
6704 |
+ /* Generated key */ |
6705 |
+@@ -345,6 +361,10 @@ struct amt_dev { |
6706 |
+ /* Used only in gateway mode */ |
6707 |
+ u64 mac:48, |
6708 |
+ reserved:16; |
6709 |
++ /* AMT gateway side message handler queue */ |
6710 |
++ struct amt_events events[AMT_MAX_EVENTS]; |
6711 |
++ u8 event_idx; |
6712 |
++ u8 nr_events; |
6713 |
+ }; |
6714 |
+ |
6715 |
+ #define AMT_TOS 0xc0 |
6716 |
+diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h |
6717 |
+index 98e1ec1a14f03..749bb1e460871 100644 |
6718 |
+--- a/include/net/inet_hashtables.h |
6719 |
++++ b/include/net/inet_hashtables.h |
6720 |
+@@ -207,7 +207,7 @@ static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, |
6721 |
+ int dif, int sdif) |
6722 |
+ { |
6723 |
+ #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) |
6724 |
+- return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept, |
6725 |
++ return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept), |
6726 |
+ bound_dev_if, dif, sdif); |
6727 |
+ #else |
6728 |
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); |
6729 |
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h |
6730 |
+index 48e4c59d85e24..6395f6b9a5d29 100644 |
6731 |
+--- a/include/net/inet_sock.h |
6732 |
++++ b/include/net/inet_sock.h |
6733 |
+@@ -107,7 +107,8 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) |
6734 |
+ |
6735 |
+ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) |
6736 |
+ { |
6737 |
+- if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) |
6738 |
++ if (!sk->sk_mark && |
6739 |
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) |
6740 |
+ return skb->mark; |
6741 |
+ |
6742 |
+ return sk->sk_mark; |
6743 |
+@@ -116,14 +117,15 @@ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) |
6744 |
+ static inline int inet_request_bound_dev_if(const struct sock *sk, |
6745 |
+ struct sk_buff *skb) |
6746 |
+ { |
6747 |
++ int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); |
6748 |
+ #ifdef CONFIG_NET_L3_MASTER_DEV |
6749 |
+ struct net *net = sock_net(sk); |
6750 |
+ |
6751 |
+- if (!sk->sk_bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept) |
6752 |
++ if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) |
6753 |
+ return l3mdev_master_ifindex_by_index(net, skb->skb_iif); |
6754 |
+ #endif |
6755 |
+ |
6756 |
+- return sk->sk_bound_dev_if; |
6757 |
++ return bound_dev_if; |
6758 |
+ } |
6759 |
+ |
6760 |
+ static inline int inet_sk_bound_l3mdev(const struct sock *sk) |
6761 |
+@@ -131,7 +133,7 @@ static inline int inet_sk_bound_l3mdev(const struct sock *sk) |
6762 |
+ #ifdef CONFIG_NET_L3_MASTER_DEV |
6763 |
+ struct net *net = sock_net(sk); |
6764 |
+ |
6765 |
+- if (!net->ipv4.sysctl_tcp_l3mdev_accept) |
6766 |
++ if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) |
6767 |
+ return l3mdev_master_ifindex_by_index(net, |
6768 |
+ sk->sk_bound_dev_if); |
6769 |
+ #endif |
6770 |
+@@ -373,7 +375,7 @@ static inline bool inet_get_convert_csum(struct sock *sk) |
6771 |
+ static inline bool inet_can_nonlocal_bind(struct net *net, |
6772 |
+ struct inet_sock *inet) |
6773 |
+ { |
6774 |
+- return net->ipv4.sysctl_ip_nonlocal_bind || |
6775 |
++ return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) || |
6776 |
+ inet->freebind || inet->transparent; |
6777 |
+ } |
6778 |
+ |
6779 |
+diff --git a/include/net/ip.h b/include/net/ip.h |
6780 |
+index 26fffda78cca4..1c979fd1904ce 100644 |
6781 |
+--- a/include/net/ip.h |
6782 |
++++ b/include/net/ip.h |
6783 |
+@@ -357,7 +357,7 @@ static inline bool sysctl_dev_name_is_allowed(const char *name) |
6784 |
+ |
6785 |
+ static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port) |
6786 |
+ { |
6787 |
+- return port < net->ipv4.sysctl_ip_prot_sock; |
6788 |
++ return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock); |
6789 |
+ } |
6790 |
+ |
6791 |
+ #else |
6792 |
+@@ -384,7 +384,7 @@ void ipfrag_init(void); |
6793 |
+ void ip_static_sysctl_init(void); |
6794 |
+ |
6795 |
+ #define IP4_REPLY_MARK(net, mark) \ |
6796 |
+- ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) |
6797 |
++ (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0) |
6798 |
+ |
6799 |
+ static inline bool ip_is_fragment(const struct iphdr *iph) |
6800 |
+ { |
6801 |
+@@ -446,7 +446,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, |
6802 |
+ struct net *net = dev_net(dst->dev); |
6803 |
+ unsigned int mtu; |
6804 |
+ |
6805 |
+- if (net->ipv4.sysctl_ip_fwd_use_pmtu || |
6806 |
++ if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) || |
6807 |
+ ip_mtu_locked(dst) || |
6808 |
+ !forwarding) { |
6809 |
+ mtu = rt->rt_pmtu; |
6810 |
+diff --git a/include/net/protocol.h b/include/net/protocol.h |
6811 |
+index f51c06ae365f5..6aef8cb11cc8c 100644 |
6812 |
+--- a/include/net/protocol.h |
6813 |
++++ b/include/net/protocol.h |
6814 |
+@@ -35,8 +35,6 @@ |
6815 |
+ |
6816 |
+ /* This is used to register protocols. */ |
6817 |
+ struct net_protocol { |
6818 |
+- int (*early_demux)(struct sk_buff *skb); |
6819 |
+- int (*early_demux_handler)(struct sk_buff *skb); |
6820 |
+ int (*handler)(struct sk_buff *skb); |
6821 |
+ |
6822 |
+ /* This returns an error if we weren't able to handle the error. */ |
6823 |
+@@ -52,8 +50,6 @@ struct net_protocol { |
6824 |
+ |
6825 |
+ #if IS_ENABLED(CONFIG_IPV6) |
6826 |
+ struct inet6_protocol { |
6827 |
+- void (*early_demux)(struct sk_buff *skb); |
6828 |
+- void (*early_demux_handler)(struct sk_buff *skb); |
6829 |
+ int (*handler)(struct sk_buff *skb); |
6830 |
+ |
6831 |
+ /* This returns an error if we weren't able to handle the error. */ |
6832 |
+diff --git a/include/net/route.h b/include/net/route.h |
6833 |
+index 25404fc2b4837..08df794364853 100644 |
6834 |
+--- a/include/net/route.h |
6835 |
++++ b/include/net/route.h |
6836 |
+@@ -361,7 +361,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst) |
6837 |
+ struct net *net = dev_net(dst->dev); |
6838 |
+ |
6839 |
+ if (hoplimit == 0) |
6840 |
+- hoplimit = net->ipv4.sysctl_ip_default_ttl; |
6841 |
++ hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); |
6842 |
+ return hoplimit; |
6843 |
+ } |
6844 |
+ |
6845 |
+diff --git a/include/net/tcp.h b/include/net/tcp.h |
6846 |
+index 2d9a78b3beaa9..4f5de382e1927 100644 |
6847 |
+--- a/include/net/tcp.h |
6848 |
++++ b/include/net/tcp.h |
6849 |
+@@ -932,7 +932,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific; |
6850 |
+ |
6851 |
+ INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); |
6852 |
+ INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); |
6853 |
+-INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); |
6854 |
++void tcp_v6_early_demux(struct sk_buff *skb); |
6855 |
+ |
6856 |
+ #endif |
6857 |
+ |
6858 |
+@@ -1421,8 +1421,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk) |
6859 |
+ struct tcp_sock *tp = tcp_sk(sk); |
6860 |
+ s32 delta; |
6861 |
+ |
6862 |
+- if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out || |
6863 |
+- ca_ops->cong_control) |
6864 |
++ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || |
6865 |
++ tp->packets_out || ca_ops->cong_control) |
6866 |
+ return; |
6867 |
+ delta = tcp_jiffies32 - tp->lsndtime; |
6868 |
+ if (delta > inet_csk(sk)->icsk_rto) |
6869 |
+@@ -1511,21 +1511,24 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp) |
6870 |
+ { |
6871 |
+ struct net *net = sock_net((struct sock *)tp); |
6872 |
+ |
6873 |
+- return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl; |
6874 |
++ return tp->keepalive_intvl ? : |
6875 |
++ READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl); |
6876 |
+ } |
6877 |
+ |
6878 |
+ static inline int keepalive_time_when(const struct tcp_sock *tp) |
6879 |
+ { |
6880 |
+ struct net *net = sock_net((struct sock *)tp); |
6881 |
+ |
6882 |
+- return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time; |
6883 |
++ return tp->keepalive_time ? : |
6884 |
++ READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time); |
6885 |
+ } |
6886 |
+ |
6887 |
+ static inline int keepalive_probes(const struct tcp_sock *tp) |
6888 |
+ { |
6889 |
+ struct net *net = sock_net((struct sock *)tp); |
6890 |
+ |
6891 |
+- return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes; |
6892 |
++ return tp->keepalive_probes ? : |
6893 |
++ READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes); |
6894 |
+ } |
6895 |
+ |
6896 |
+ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) |
6897 |
+@@ -1538,7 +1541,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) |
6898 |
+ |
6899 |
+ static inline int tcp_fin_time(const struct sock *sk) |
6900 |
+ { |
6901 |
+- int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; |
6902 |
++ int fin_timeout = tcp_sk(sk)->linger2 ? : |
6903 |
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); |
6904 |
+ const int rto = inet_csk(sk)->icsk_rto; |
6905 |
+ |
6906 |
+ if (fin_timeout < (rto << 2) - (rto >> 1)) |
6907 |
+@@ -2041,7 +2045,7 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); |
6908 |
+ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) |
6909 |
+ { |
6910 |
+ struct net *net = sock_net((struct sock *)tp); |
6911 |
+- return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; |
6912 |
++ return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat); |
6913 |
+ } |
6914 |
+ |
6915 |
+ bool tcp_stream_memory_free(const struct sock *sk, int wake); |
6916 |
+diff --git a/include/net/udp.h b/include/net/udp.h |
6917 |
+index f1c2a88c9005a..abe91ab9030df 100644 |
6918 |
+--- a/include/net/udp.h |
6919 |
++++ b/include/net/udp.h |
6920 |
+@@ -167,7 +167,7 @@ static inline void udp_csum_pull_header(struct sk_buff *skb) |
6921 |
+ typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport, |
6922 |
+ __be16 dport); |
6923 |
+ |
6924 |
+-INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *)); |
6925 |
++void udp_v6_early_demux(struct sk_buff *skb); |
6926 |
+ INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *)); |
6927 |
+ |
6928 |
+ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, |
6929 |
+@@ -238,7 +238,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if, |
6930 |
+ int dif, int sdif) |
6931 |
+ { |
6932 |
+ #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) |
6933 |
+- return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept, |
6934 |
++ return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept), |
6935 |
+ bound_dev_if, dif, sdif); |
6936 |
+ #else |
6937 |
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); |
6938 |
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c |
6939 |
+index 1e92b52fc8146..3adff3831c047 100644 |
6940 |
+--- a/kernel/bpf/core.c |
6941 |
++++ b/kernel/bpf/core.c |
6942 |
+@@ -68,11 +68,13 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns |
6943 |
+ { |
6944 |
+ u8 *ptr = NULL; |
6945 |
+ |
6946 |
+- if (k >= SKF_NET_OFF) |
6947 |
++ if (k >= SKF_NET_OFF) { |
6948 |
+ ptr = skb_network_header(skb) + k - SKF_NET_OFF; |
6949 |
+- else if (k >= SKF_LL_OFF) |
6950 |
++ } else if (k >= SKF_LL_OFF) { |
6951 |
++ if (unlikely(!skb_mac_header_was_set(skb))) |
6952 |
++ return NULL; |
6953 |
+ ptr = skb_mac_header(skb) + k - SKF_LL_OFF; |
6954 |
+- |
6955 |
++ } |
6956 |
+ if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) |
6957 |
+ return ptr; |
6958 |
+ |
6959 |
+diff --git a/kernel/events/core.c b/kernel/events/core.c |
6960 |
+index 950b25c3f2103..82238406f5f55 100644 |
6961 |
+--- a/kernel/events/core.c |
6962 |
++++ b/kernel/events/core.c |
6963 |
+@@ -6254,10 +6254,10 @@ again: |
6964 |
+ |
6965 |
+ if (!atomic_inc_not_zero(&event->rb->mmap_count)) { |
6966 |
+ /* |
6967 |
+- * Raced against perf_mmap_close() through |
6968 |
+- * perf_event_set_output(). Try again, hope for better |
6969 |
+- * luck. |
6970 |
++ * Raced against perf_mmap_close(); remove the |
6971 |
++ * event and try again. |
6972 |
+ */ |
6973 |
++ ring_buffer_attach(event, NULL); |
6974 |
+ mutex_unlock(&event->mmap_mutex); |
6975 |
+ goto again; |
6976 |
+ } |
6977 |
+@@ -11826,14 +11826,25 @@ err_size: |
6978 |
+ goto out; |
6979 |
+ } |
6980 |
+ |
6981 |
++static void mutex_lock_double(struct mutex *a, struct mutex *b) |
6982 |
++{ |
6983 |
++ if (b < a) |
6984 |
++ swap(a, b); |
6985 |
++ |
6986 |
++ mutex_lock(a); |
6987 |
++ mutex_lock_nested(b, SINGLE_DEPTH_NESTING); |
6988 |
++} |
6989 |
++ |
6990 |
+ static int |
6991 |
+ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) |
6992 |
+ { |
6993 |
+ struct perf_buffer *rb = NULL; |
6994 |
+ int ret = -EINVAL; |
6995 |
+ |
6996 |
+- if (!output_event) |
6997 |
++ if (!output_event) { |
6998 |
++ mutex_lock(&event->mmap_mutex); |
6999 |
+ goto set; |
7000 |
++ } |
7001 |
+ |
7002 |
+ /* don't allow circular references */ |
7003 |
+ if (event == output_event) |
7004 |
+@@ -11871,8 +11882,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) |
7005 |
+ event->pmu != output_event->pmu) |
7006 |
+ goto out; |
7007 |
+ |
7008 |
++ /* |
7009 |
++ * Hold both mmap_mutex to serialize against perf_mmap_close(). Since |
7010 |
++ * output_event is already on rb->event_list, and the list iteration |
7011 |
++ * restarts after every removal, it is guaranteed this new event is |
7012 |
++ * observed *OR* if output_event is already removed, it's guaranteed we |
7013 |
++ * observe !rb->mmap_count. |
7014 |
++ */ |
7015 |
++ mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); |
7016 |
+ set: |
7017 |
+- mutex_lock(&event->mmap_mutex); |
7018 |
+ /* Can't redirect output if we've got an active mmap() */ |
7019 |
+ if (atomic_read(&event->mmap_count)) |
7020 |
+ goto unlock; |
7021 |
+@@ -11882,6 +11900,12 @@ set: |
7022 |
+ rb = ring_buffer_get(output_event); |
7023 |
+ if (!rb) |
7024 |
+ goto unlock; |
7025 |
++ |
7026 |
++ /* did we race against perf_mmap_close() */ |
7027 |
++ if (!atomic_read(&rb->mmap_count)) { |
7028 |
++ ring_buffer_put(rb); |
7029 |
++ goto unlock; |
7030 |
++ } |
7031 |
+ } |
7032 |
+ |
7033 |
+ ring_buffer_attach(event, rb); |
7034 |
+@@ -11889,20 +11913,13 @@ set: |
7035 |
+ ret = 0; |
7036 |
+ unlock: |
7037 |
+ mutex_unlock(&event->mmap_mutex); |
7038 |
++ if (output_event) |
7039 |
++ mutex_unlock(&output_event->mmap_mutex); |
7040 |
+ |
7041 |
+ out: |
7042 |
+ return ret; |
7043 |
+ } |
7044 |
+ |
7045 |
+-static void mutex_lock_double(struct mutex *a, struct mutex *b) |
7046 |
+-{ |
7047 |
+- if (b < a) |
7048 |
+- swap(a, b); |
7049 |
+- |
7050 |
+- mutex_lock(a); |
7051 |
+- mutex_lock_nested(b, SINGLE_DEPTH_NESTING); |
7052 |
+-} |
7053 |
+- |
7054 |
+ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) |
7055 |
+ { |
7056 |
+ bool nmi_safe = false; |
7057 |
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c |
7058 |
+index b61281d104584..10a916ec64826 100644 |
7059 |
+--- a/kernel/sched/deadline.c |
7060 |
++++ b/kernel/sched/deadline.c |
7061 |
+@@ -1669,7 +1669,10 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) |
7062 |
+ * the throttle. |
7063 |
+ */ |
7064 |
+ p->dl.dl_throttled = 0; |
7065 |
+- BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH); |
7066 |
++ if (!(flags & ENQUEUE_REPLENISH)) |
7067 |
++ printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n", |
7068 |
++ task_pid_nr(p)); |
7069 |
++ |
7070 |
+ return; |
7071 |
+ } |
7072 |
+ |
7073 |
+diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c |
7074 |
+index 230038d4f9081..bb9962b33f95c 100644 |
7075 |
+--- a/kernel/watch_queue.c |
7076 |
++++ b/kernel/watch_queue.c |
7077 |
+@@ -34,6 +34,27 @@ MODULE_LICENSE("GPL"); |
7078 |
+ #define WATCH_QUEUE_NOTE_SIZE 128 |
7079 |
+ #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE) |
7080 |
+ |
7081 |
++/* |
7082 |
++ * This must be called under the RCU read-lock, which makes |
7083 |
++ * sure that the wqueue still exists. It can then take the lock, |
7084 |
++ * and check that the wqueue hasn't been destroyed, which in |
7085 |
++ * turn makes sure that the notification pipe still exists. |
7086 |
++ */ |
7087 |
++static inline bool lock_wqueue(struct watch_queue *wqueue) |
7088 |
++{ |
7089 |
++ spin_lock_bh(&wqueue->lock); |
7090 |
++ if (unlikely(wqueue->defunct)) { |
7091 |
++ spin_unlock_bh(&wqueue->lock); |
7092 |
++ return false; |
7093 |
++ } |
7094 |
++ return true; |
7095 |
++} |
7096 |
++ |
7097 |
++static inline void unlock_wqueue(struct watch_queue *wqueue) |
7098 |
++{ |
7099 |
++ spin_unlock_bh(&wqueue->lock); |
7100 |
++} |
7101 |
++ |
7102 |
+ static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, |
7103 |
+ struct pipe_buffer *buf) |
7104 |
+ { |
7105 |
+@@ -69,6 +90,10 @@ static const struct pipe_buf_operations watch_queue_pipe_buf_ops = { |
7106 |
+ |
7107 |
+ /* |
7108 |
+ * Post a notification to a watch queue. |
7109 |
++ * |
7110 |
++ * Must be called with the RCU lock for reading, and the |
7111 |
++ * watch_queue lock held, which guarantees that the pipe |
7112 |
++ * hasn't been released. |
7113 |
+ */ |
7114 |
+ static bool post_one_notification(struct watch_queue *wqueue, |
7115 |
+ struct watch_notification *n) |
7116 |
+@@ -85,9 +110,6 @@ static bool post_one_notification(struct watch_queue *wqueue, |
7117 |
+ |
7118 |
+ spin_lock_irq(&pipe->rd_wait.lock); |
7119 |
+ |
7120 |
+- if (wqueue->defunct) |
7121 |
+- goto out; |
7122 |
+- |
7123 |
+ mask = pipe->ring_size - 1; |
7124 |
+ head = pipe->head; |
7125 |
+ tail = pipe->tail; |
7126 |
+@@ -203,7 +225,10 @@ void __post_watch_notification(struct watch_list *wlist, |
7127 |
+ if (security_post_notification(watch->cred, cred, n) < 0) |
7128 |
+ continue; |
7129 |
+ |
7130 |
+- post_one_notification(wqueue, n); |
7131 |
++ if (lock_wqueue(wqueue)) { |
7132 |
++ post_one_notification(wqueue, n); |
7133 |
++ unlock_wqueue(wqueue); |
7134 |
++ } |
7135 |
+ } |
7136 |
+ |
7137 |
+ rcu_read_unlock(); |
7138 |
+@@ -462,11 +487,12 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist) |
7139 |
+ return -EAGAIN; |
7140 |
+ } |
7141 |
+ |
7142 |
+- spin_lock_bh(&wqueue->lock); |
7143 |
+- kref_get(&wqueue->usage); |
7144 |
+- kref_get(&watch->usage); |
7145 |
+- hlist_add_head(&watch->queue_node, &wqueue->watches); |
7146 |
+- spin_unlock_bh(&wqueue->lock); |
7147 |
++ if (lock_wqueue(wqueue)) { |
7148 |
++ kref_get(&wqueue->usage); |
7149 |
++ kref_get(&watch->usage); |
7150 |
++ hlist_add_head(&watch->queue_node, &wqueue->watches); |
7151 |
++ unlock_wqueue(wqueue); |
7152 |
++ } |
7153 |
+ |
7154 |
+ hlist_add_head(&watch->list_node, &wlist->watchers); |
7155 |
+ return 0; |
7156 |
+@@ -520,20 +546,15 @@ found: |
7157 |
+ |
7158 |
+ wqueue = rcu_dereference(watch->queue); |
7159 |
+ |
7160 |
+- /* We don't need the watch list lock for the next bit as RCU is |
7161 |
+- * protecting *wqueue from deallocation. |
7162 |
+- */ |
7163 |
+- if (wqueue) { |
7164 |
++ if (lock_wqueue(wqueue)) { |
7165 |
+ post_one_notification(wqueue, &n.watch); |
7166 |
+ |
7167 |
+- spin_lock_bh(&wqueue->lock); |
7168 |
+- |
7169 |
+ if (!hlist_unhashed(&watch->queue_node)) { |
7170 |
+ hlist_del_init_rcu(&watch->queue_node); |
7171 |
+ put_watch(watch); |
7172 |
+ } |
7173 |
+ |
7174 |
+- spin_unlock_bh(&wqueue->lock); |
7175 |
++ unlock_wqueue(wqueue); |
7176 |
+ } |
7177 |
+ |
7178 |
+ if (wlist->release_watch) { |
7179 |
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c |
7180 |
+index 8c74107a2b15e..ea6dee61bc9dc 100644 |
7181 |
+--- a/mm/mempolicy.c |
7182 |
++++ b/mm/mempolicy.c |
7183 |
+@@ -350,7 +350,7 @@ static void mpol_rebind_preferred(struct mempolicy *pol, |
7184 |
+ */ |
7185 |
+ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) |
7186 |
+ { |
7187 |
+- if (!pol) |
7188 |
++ if (!pol || pol->mode == MPOL_LOCAL) |
7189 |
+ return; |
7190 |
+ if (!mpol_store_user_nodemask(pol) && |
7191 |
+ nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) |
7192 |
+diff --git a/net/core/filter.c b/net/core/filter.c |
7193 |
+index 6391c1885bca8..d0b0c163d3f34 100644 |
7194 |
+--- a/net/core/filter.c |
7195 |
++++ b/net/core/filter.c |
7196 |
+@@ -7031,7 +7031,7 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len |
7197 |
+ if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) |
7198 |
+ return -EINVAL; |
7199 |
+ |
7200 |
+- if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) |
7201 |
++ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies)) |
7202 |
+ return -EINVAL; |
7203 |
+ |
7204 |
+ if (!th->ack || th->rst || th->syn) |
7205 |
+@@ -7106,7 +7106,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len, |
7206 |
+ if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) |
7207 |
+ return -EINVAL; |
7208 |
+ |
7209 |
+- if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) |
7210 |
++ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies)) |
7211 |
+ return -ENOENT; |
7212 |
+ |
7213 |
+ if (!th->syn || th->ack || th->fin || th->rst) |
7214 |
+diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c |
7215 |
+index 5f85e01d4093b..b0ff6153be623 100644 |
7216 |
+--- a/net/core/secure_seq.c |
7217 |
++++ b/net/core/secure_seq.c |
7218 |
+@@ -64,7 +64,7 @@ u32 secure_tcpv6_ts_off(const struct net *net, |
7219 |
+ .daddr = *(struct in6_addr *)daddr, |
7220 |
+ }; |
7221 |
+ |
7222 |
+- if (net->ipv4.sysctl_tcp_timestamps != 1) |
7223 |
++ if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1) |
7224 |
+ return 0; |
7225 |
+ |
7226 |
+ ts_secret_init(); |
7227 |
+@@ -120,7 +120,7 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral); |
7228 |
+ #ifdef CONFIG_INET |
7229 |
+ u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr) |
7230 |
+ { |
7231 |
+- if (net->ipv4.sysctl_tcp_timestamps != 1) |
7232 |
++ if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1) |
7233 |
+ return 0; |
7234 |
+ |
7235 |
+ ts_secret_init(); |
7236 |
+diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c |
7237 |
+index 3f00a28fe762a..5daa1fa542490 100644 |
7238 |
+--- a/net/core/sock_reuseport.c |
7239 |
++++ b/net/core/sock_reuseport.c |
7240 |
+@@ -387,7 +387,7 @@ void reuseport_stop_listen_sock(struct sock *sk) |
7241 |
+ prog = rcu_dereference_protected(reuse->prog, |
7242 |
+ lockdep_is_held(&reuseport_lock)); |
7243 |
+ |
7244 |
+- if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req || |
7245 |
++ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) || |
7246 |
+ (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) { |
7247 |
+ /* Migration capable, move sk from the listening section |
7248 |
+ * to the closed section. |
7249 |
+@@ -545,7 +545,7 @@ struct sock *reuseport_migrate_sock(struct sock *sk, |
7250 |
+ hash = migrating_sk->sk_hash; |
7251 |
+ prog = rcu_dereference(reuse->prog); |
7252 |
+ if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) { |
7253 |
+- if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req) |
7254 |
++ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req)) |
7255 |
+ goto select_by_hash; |
7256 |
+ goto failure; |
7257 |
+ } |
7258 |
+diff --git a/net/dsa/port.c b/net/dsa/port.c |
7259 |
+index bdccb613285db..7bc79e28d48ee 100644 |
7260 |
+--- a/net/dsa/port.c |
7261 |
++++ b/net/dsa/port.c |
7262 |
+@@ -242,6 +242,60 @@ void dsa_port_disable(struct dsa_port *dp) |
7263 |
+ rtnl_unlock(); |
7264 |
+ } |
7265 |
+ |
7266 |
++static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, |
7267 |
++ struct dsa_bridge bridge) |
7268 |
++{ |
7269 |
++ struct netlink_ext_ack extack = {0}; |
7270 |
++ bool change_vlan_filtering = false; |
7271 |
++ struct dsa_switch *ds = dp->ds; |
7272 |
++ struct dsa_port *other_dp; |
7273 |
++ bool vlan_filtering; |
7274 |
++ int err; |
7275 |
++ |
7276 |
++ if (ds->needs_standalone_vlan_filtering && |
7277 |
++ !br_vlan_enabled(bridge.dev)) { |
7278 |
++ change_vlan_filtering = true; |
7279 |
++ vlan_filtering = true; |
7280 |
++ } else if (!ds->needs_standalone_vlan_filtering && |
7281 |
++ br_vlan_enabled(bridge.dev)) { |
7282 |
++ change_vlan_filtering = true; |
7283 |
++ vlan_filtering = false; |
7284 |
++ } |
7285 |
++ |
7286 |
++ /* If the bridge was vlan_filtering, the bridge core doesn't trigger an |
7287 |
++ * event for changing vlan_filtering setting upon slave ports leaving |
7288 |
++ * it. That is a good thing, because that lets us handle it and also |
7289 |
++ * handle the case where the switch's vlan_filtering setting is global |
7290 |
++ * (not per port). When that happens, the correct moment to trigger the |
7291 |
++ * vlan_filtering callback is only when the last port leaves the last |
7292 |
++ * VLAN-aware bridge. |
7293 |
++ */ |
7294 |
++ if (change_vlan_filtering && ds->vlan_filtering_is_global) { |
7295 |
++ dsa_switch_for_each_port(other_dp, ds) { |
7296 |
++ struct net_device *br = dsa_port_bridge_dev_get(other_dp); |
7297 |
++ |
7298 |
++ if (br && br_vlan_enabled(br)) { |
7299 |
++ change_vlan_filtering = false; |
7300 |
++ break; |
7301 |
++ } |
7302 |
++ } |
7303 |
++ } |
7304 |
++ |
7305 |
++ if (!change_vlan_filtering) |
7306 |
++ return; |
7307 |
++ |
7308 |
++ err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); |
7309 |
++ if (extack._msg) { |
7310 |
++ dev_err(ds->dev, "port %d: %s\n", dp->index, |
7311 |
++ extack._msg); |
7312 |
++ } |
7313 |
++ if (err && err != -EOPNOTSUPP) { |
7314 |
++ dev_err(ds->dev, |
7315 |
++ "port %d failed to reset VLAN filtering to %d: %pe\n", |
7316 |
++ dp->index, vlan_filtering, ERR_PTR(err)); |
7317 |
++ } |
7318 |
++} |
7319 |
++ |
7320 |
+ static int dsa_port_inherit_brport_flags(struct dsa_port *dp, |
7321 |
+ struct netlink_ext_ack *extack) |
7322 |
+ { |
7323 |
+@@ -313,7 +367,8 @@ static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, |
7324 |
+ return 0; |
7325 |
+ } |
7326 |
+ |
7327 |
+-static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) |
7328 |
++static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, |
7329 |
++ struct dsa_bridge bridge) |
7330 |
+ { |
7331 |
+ /* Configure the port for standalone mode (no address learning, |
7332 |
+ * flood everything). |
7333 |
+@@ -333,7 +388,7 @@ static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) |
7334 |
+ */ |
7335 |
+ dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true); |
7336 |
+ |
7337 |
+- /* VLAN filtering is handled by dsa_switch_bridge_leave */ |
7338 |
++ dsa_port_reset_vlan_filtering(dp, bridge); |
7339 |
+ |
7340 |
+ /* Ageing time may be global to the switch chip, so don't change it |
7341 |
+ * here because we have no good reason (or value) to change it to. |
7342 |
+@@ -502,7 +557,7 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) |
7343 |
+ "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", |
7344 |
+ dp->index, ERR_PTR(err)); |
7345 |
+ |
7346 |
+- dsa_port_switchdev_unsync_attrs(dp); |
7347 |
++ dsa_port_switchdev_unsync_attrs(dp, info.bridge); |
7348 |
+ } |
7349 |
+ |
7350 |
+ int dsa_port_lag_change(struct dsa_port *dp, |
7351 |
+@@ -752,7 +807,7 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, |
7352 |
+ ds->vlan_filtering = vlan_filtering; |
7353 |
+ |
7354 |
+ dsa_switch_for_each_user_port(other_dp, ds) { |
7355 |
+- struct net_device *slave = dp->slave; |
7356 |
++ struct net_device *slave = other_dp->slave; |
7357 |
+ |
7358 |
+ /* We might be called in the unbind path, so not |
7359 |
+ * all slave devices might still be registered. |
7360 |
+diff --git a/net/dsa/switch.c b/net/dsa/switch.c |
7361 |
+index d25cd1da3eb35..d8a80cf9742c0 100644 |
7362 |
+--- a/net/dsa/switch.c |
7363 |
++++ b/net/dsa/switch.c |
7364 |
+@@ -115,62 +115,10 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds, |
7365 |
+ return 0; |
7366 |
+ } |
7367 |
+ |
7368 |
+-static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds, |
7369 |
+- struct dsa_notifier_bridge_info *info) |
7370 |
+-{ |
7371 |
+- struct netlink_ext_ack extack = {0}; |
7372 |
+- bool change_vlan_filtering = false; |
7373 |
+- bool vlan_filtering; |
7374 |
+- struct dsa_port *dp; |
7375 |
+- int err; |
7376 |
+- |
7377 |
+- if (ds->needs_standalone_vlan_filtering && |
7378 |
+- !br_vlan_enabled(info->bridge.dev)) { |
7379 |
+- change_vlan_filtering = true; |
7380 |
+- vlan_filtering = true; |
7381 |
+- } else if (!ds->needs_standalone_vlan_filtering && |
7382 |
+- br_vlan_enabled(info->bridge.dev)) { |
7383 |
+- change_vlan_filtering = true; |
7384 |
+- vlan_filtering = false; |
7385 |
+- } |
7386 |
+- |
7387 |
+- /* If the bridge was vlan_filtering, the bridge core doesn't trigger an |
7388 |
+- * event for changing vlan_filtering setting upon slave ports leaving |
7389 |
+- * it. That is a good thing, because that lets us handle it and also |
7390 |
+- * handle the case where the switch's vlan_filtering setting is global |
7391 |
+- * (not per port). When that happens, the correct moment to trigger the |
7392 |
+- * vlan_filtering callback is only when the last port leaves the last |
7393 |
+- * VLAN-aware bridge. |
7394 |
+- */ |
7395 |
+- if (change_vlan_filtering && ds->vlan_filtering_is_global) { |
7396 |
+- dsa_switch_for_each_port(dp, ds) { |
7397 |
+- struct net_device *br = dsa_port_bridge_dev_get(dp); |
7398 |
+- |
7399 |
+- if (br && br_vlan_enabled(br)) { |
7400 |
+- change_vlan_filtering = false; |
7401 |
+- break; |
7402 |
+- } |
7403 |
+- } |
7404 |
+- } |
7405 |
+- |
7406 |
+- if (change_vlan_filtering) { |
7407 |
+- err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port), |
7408 |
+- vlan_filtering, &extack); |
7409 |
+- if (extack._msg) |
7410 |
+- dev_err(ds->dev, "port %d: %s\n", info->port, |
7411 |
+- extack._msg); |
7412 |
+- if (err && err != -EOPNOTSUPP) |
7413 |
+- return err; |
7414 |
+- } |
7415 |
+- |
7416 |
+- return 0; |
7417 |
+-} |
7418 |
+- |
7419 |
+ static int dsa_switch_bridge_leave(struct dsa_switch *ds, |
7420 |
+ struct dsa_notifier_bridge_info *info) |
7421 |
+ { |
7422 |
+ struct dsa_switch_tree *dst = ds->dst; |
7423 |
+- int err; |
7424 |
+ |
7425 |
+ if (dst->index == info->tree_index && ds->index == info->sw_index && |
7426 |
+ ds->ops->port_bridge_leave) |
7427 |
+@@ -182,12 +130,6 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, |
7428 |
+ info->sw_index, info->port, |
7429 |
+ info->bridge); |
7430 |
+ |
7431 |
+- if (ds->dst->index == info->tree_index && ds->index == info->sw_index) { |
7432 |
+- err = dsa_switch_sync_vlan_filtering(ds, info); |
7433 |
+- if (err) |
7434 |
+- return err; |
7435 |
+- } |
7436 |
+- |
7437 |
+ return 0; |
7438 |
+ } |
7439 |
+ |
7440 |
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c |
7441 |
+index 98bc180563d16..5c207367b3b4f 100644 |
7442 |
+--- a/net/ipv4/af_inet.c |
7443 |
++++ b/net/ipv4/af_inet.c |
7444 |
+@@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog) |
7445 |
+ * because the socket was in TCP_LISTEN state previously but |
7446 |
+ * was shutdown() rather than close(). |
7447 |
+ */ |
7448 |
+- tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen; |
7449 |
++ tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); |
7450 |
+ if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) && |
7451 |
+ (tcp_fastopen & TFO_SERVER_ENABLE) && |
7452 |
+ !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) { |
7453 |
+@@ -335,7 +335,7 @@ lookup_protocol: |
7454 |
+ inet->hdrincl = 1; |
7455 |
+ } |
7456 |
+ |
7457 |
+- if (net->ipv4.sysctl_ip_no_pmtu_disc) |
7458 |
++ if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) |
7459 |
+ inet->pmtudisc = IP_PMTUDISC_DONT; |
7460 |
+ else |
7461 |
+ inet->pmtudisc = IP_PMTUDISC_WANT; |
7462 |
+@@ -1711,24 +1711,14 @@ static const struct net_protocol igmp_protocol = { |
7463 |
+ }; |
7464 |
+ #endif |
7465 |
+ |
7466 |
+-/* thinking of making this const? Don't. |
7467 |
+- * early_demux can change based on sysctl. |
7468 |
+- */ |
7469 |
+-static struct net_protocol tcp_protocol = { |
7470 |
+- .early_demux = tcp_v4_early_demux, |
7471 |
+- .early_demux_handler = tcp_v4_early_demux, |
7472 |
++static const struct net_protocol tcp_protocol = { |
7473 |
+ .handler = tcp_v4_rcv, |
7474 |
+ .err_handler = tcp_v4_err, |
7475 |
+ .no_policy = 1, |
7476 |
+ .icmp_strict_tag_validation = 1, |
7477 |
+ }; |
7478 |
+ |
7479 |
+-/* thinking of making this const? Don't. |
7480 |
+- * early_demux can change based on sysctl. |
7481 |
+- */ |
7482 |
+-static struct net_protocol udp_protocol = { |
7483 |
+- .early_demux = udp_v4_early_demux, |
7484 |
+- .early_demux_handler = udp_v4_early_demux, |
7485 |
++static const struct net_protocol udp_protocol = { |
7486 |
+ .handler = udp_rcv, |
7487 |
+ .err_handler = udp_err, |
7488 |
+ .no_policy = 1, |
7489 |
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c |
7490 |
+index 720f65f7bd0b0..9f5c1c26c8f26 100644 |
7491 |
+--- a/net/ipv4/fib_semantics.c |
7492 |
++++ b/net/ipv4/fib_semantics.c |
7493 |
+@@ -2216,7 +2216,7 @@ void fib_select_multipath(struct fib_result *res, int hash) |
7494 |
+ } |
7495 |
+ |
7496 |
+ change_nexthops(fi) { |
7497 |
+- if (net->ipv4.sysctl_fib_multipath_use_neigh) { |
7498 |
++ if (READ_ONCE(net->ipv4.sysctl_fib_multipath_use_neigh)) { |
7499 |
+ if (!fib_good_nh(nexthop_nh)) |
7500 |
+ continue; |
7501 |
+ if (!first) { |
7502 |
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c |
7503 |
+index c13ceda9ce5d8..d8cfa6241c04b 100644 |
7504 |
+--- a/net/ipv4/icmp.c |
7505 |
++++ b/net/ipv4/icmp.c |
7506 |
+@@ -878,7 +878,7 @@ static bool icmp_unreach(struct sk_buff *skb) |
7507 |
+ * values please see |
7508 |
+ * Documentation/networking/ip-sysctl.rst |
7509 |
+ */ |
7510 |
+- switch (net->ipv4.sysctl_ip_no_pmtu_disc) { |
7511 |
++ switch (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) { |
7512 |
+ default: |
7513 |
+ net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n", |
7514 |
+ &iph->daddr); |
7515 |
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c |
7516 |
+index 1d9e6d5e9a76c..0a0010f896274 100644 |
7517 |
+--- a/net/ipv4/igmp.c |
7518 |
++++ b/net/ipv4/igmp.c |
7519 |
+@@ -467,7 +467,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, |
7520 |
+ |
7521 |
+ if (pmc->multiaddr == IGMP_ALL_HOSTS) |
7522 |
+ return skb; |
7523 |
+- if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) |
7524 |
++ if (ipv4_is_local_multicast(pmc->multiaddr) && |
7525 |
++ !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) |
7526 |
+ return skb; |
7527 |
+ |
7528 |
+ mtu = READ_ONCE(dev->mtu); |
7529 |
+@@ -593,7 +594,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) |
7530 |
+ if (pmc->multiaddr == IGMP_ALL_HOSTS) |
7531 |
+ continue; |
7532 |
+ if (ipv4_is_local_multicast(pmc->multiaddr) && |
7533 |
+- !net->ipv4.sysctl_igmp_llm_reports) |
7534 |
++ !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) |
7535 |
+ continue; |
7536 |
+ spin_lock_bh(&pmc->lock); |
7537 |
+ if (pmc->sfcount[MCAST_EXCLUDE]) |
7538 |
+@@ -736,7 +737,8 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, |
7539 |
+ if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) |
7540 |
+ return igmpv3_send_report(in_dev, pmc); |
7541 |
+ |
7542 |
+- if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports) |
7543 |
++ if (ipv4_is_local_multicast(group) && |
7544 |
++ !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) |
7545 |
+ return 0; |
7546 |
+ |
7547 |
+ if (type == IGMP_HOST_LEAVE_MESSAGE) |
7548 |
+@@ -825,7 +827,7 @@ static void igmp_ifc_event(struct in_device *in_dev) |
7549 |
+ struct net *net = dev_net(in_dev->dev); |
7550 |
+ if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) |
7551 |
+ return; |
7552 |
+- WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv); |
7553 |
++ WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv)); |
7554 |
+ igmp_ifc_start_timer(in_dev, 1); |
7555 |
+ } |
7556 |
+ |
7557 |
+@@ -920,7 +922,8 @@ static bool igmp_heard_report(struct in_device *in_dev, __be32 group) |
7558 |
+ |
7559 |
+ if (group == IGMP_ALL_HOSTS) |
7560 |
+ return false; |
7561 |
+- if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports) |
7562 |
++ if (ipv4_is_local_multicast(group) && |
7563 |
++ !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) |
7564 |
+ return false; |
7565 |
+ |
7566 |
+ rcu_read_lock(); |
7567 |
+@@ -1006,7 +1009,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, |
7568 |
+ * received value was zero, use the default or statically |
7569 |
+ * configured value. |
7570 |
+ */ |
7571 |
+- in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv; |
7572 |
++ in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); |
7573 |
+ in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL; |
7574 |
+ |
7575 |
+ /* RFC3376, 8.3. Query Response Interval: |
7576 |
+@@ -1045,7 +1048,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, |
7577 |
+ if (im->multiaddr == IGMP_ALL_HOSTS) |
7578 |
+ continue; |
7579 |
+ if (ipv4_is_local_multicast(im->multiaddr) && |
7580 |
+- !net->ipv4.sysctl_igmp_llm_reports) |
7581 |
++ !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) |
7582 |
+ continue; |
7583 |
+ spin_lock_bh(&im->lock); |
7584 |
+ if (im->tm_running) |
7585 |
+@@ -1186,7 +1189,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im, |
7586 |
+ pmc->interface = im->interface; |
7587 |
+ in_dev_hold(in_dev); |
7588 |
+ pmc->multiaddr = im->multiaddr; |
7589 |
+- pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
7590 |
++ pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); |
7591 |
+ pmc->sfmode = im->sfmode; |
7592 |
+ if (pmc->sfmode == MCAST_INCLUDE) { |
7593 |
+ struct ip_sf_list *psf; |
7594 |
+@@ -1237,9 +1240,11 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) |
7595 |
+ swap(im->tomb, pmc->tomb); |
7596 |
+ swap(im->sources, pmc->sources); |
7597 |
+ for (psf = im->sources; psf; psf = psf->sf_next) |
7598 |
+- psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
7599 |
++ psf->sf_crcount = in_dev->mr_qrv ?: |
7600 |
++ READ_ONCE(net->ipv4.sysctl_igmp_qrv); |
7601 |
+ } else { |
7602 |
+- im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
7603 |
++ im->crcount = in_dev->mr_qrv ?: |
7604 |
++ READ_ONCE(net->ipv4.sysctl_igmp_qrv); |
7605 |
+ } |
7606 |
+ in_dev_put(pmc->interface); |
7607 |
+ kfree_pmc(pmc); |
7608 |
+@@ -1296,7 +1301,8 @@ static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp) |
7609 |
+ #ifdef CONFIG_IP_MULTICAST |
7610 |
+ if (im->multiaddr == IGMP_ALL_HOSTS) |
7611 |
+ return; |
7612 |
+- if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) |
7613 |
++ if (ipv4_is_local_multicast(im->multiaddr) && |
7614 |
++ !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) |
7615 |
+ return; |
7616 |
+ |
7617 |
+ reporter = im->reporter; |
7618 |
+@@ -1338,13 +1344,14 @@ static void igmp_group_added(struct ip_mc_list *im) |
7619 |
+ #ifdef CONFIG_IP_MULTICAST |
7620 |
+ if (im->multiaddr == IGMP_ALL_HOSTS) |
7621 |
+ return; |
7622 |
+- if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) |
7623 |
++ if (ipv4_is_local_multicast(im->multiaddr) && |
7624 |
++ !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) |
7625 |
+ return; |
7626 |
+ |
7627 |
+ if (in_dev->dead) |
7628 |
+ return; |
7629 |
+ |
7630 |
+- im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; |
7631 |
++ im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv); |
7632 |
+ if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { |
7633 |
+ spin_lock_bh(&im->lock); |
7634 |
+ igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); |
7635 |
+@@ -1358,7 +1365,7 @@ static void igmp_group_added(struct ip_mc_list *im) |
7636 |
+ * IN() to IN(A). |
7637 |
+ */ |
7638 |
+ if (im->sfmode == MCAST_EXCLUDE) |
7639 |
+- im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
7640 |
++ im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); |
7641 |
+ |
7642 |
+ igmp_ifc_event(in_dev); |
7643 |
+ #endif |
7644 |
+@@ -1642,7 +1649,7 @@ static void ip_mc_rejoin_groups(struct in_device *in_dev) |
7645 |
+ if (im->multiaddr == IGMP_ALL_HOSTS) |
7646 |
+ continue; |
7647 |
+ if (ipv4_is_local_multicast(im->multiaddr) && |
7648 |
+- !net->ipv4.sysctl_igmp_llm_reports) |
7649 |
++ !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) |
7650 |
+ continue; |
7651 |
+ |
7652 |
+ /* a failover is happening and switches |
7653 |
+@@ -1749,7 +1756,7 @@ static void ip_mc_reset(struct in_device *in_dev) |
7654 |
+ |
7655 |
+ in_dev->mr_qi = IGMP_QUERY_INTERVAL; |
7656 |
+ in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL; |
7657 |
+- in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv; |
7658 |
++ in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv); |
7659 |
+ } |
7660 |
+ #else |
7661 |
+ static void ip_mc_reset(struct in_device *in_dev) |
7662 |
+@@ -1883,7 +1890,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, |
7663 |
+ #ifdef CONFIG_IP_MULTICAST |
7664 |
+ if (psf->sf_oldin && |
7665 |
+ !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { |
7666 |
+- psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
7667 |
++ psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); |
7668 |
+ psf->sf_next = pmc->tomb; |
7669 |
+ pmc->tomb = psf; |
7670 |
+ rv = 1; |
7671 |
+@@ -1947,7 +1954,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, |
7672 |
+ /* filter mode change */ |
7673 |
+ pmc->sfmode = MCAST_INCLUDE; |
7674 |
+ #ifdef CONFIG_IP_MULTICAST |
7675 |
+- pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
7676 |
++ pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); |
7677 |
+ WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); |
7678 |
+ for (psf = pmc->sources; psf; psf = psf->sf_next) |
7679 |
+ psf->sf_crcount = 0; |
7680 |
+@@ -2126,7 +2133,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, |
7681 |
+ #ifdef CONFIG_IP_MULTICAST |
7682 |
+ /* else no filters; keep old mode for reports */ |
7683 |
+ |
7684 |
+- pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
7685 |
++ pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); |
7686 |
+ WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); |
7687 |
+ for (psf = pmc->sources; psf; psf = psf->sf_next) |
7688 |
+ psf->sf_crcount = 0; |
7689 |
+@@ -2192,7 +2199,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr, |
7690 |
+ count++; |
7691 |
+ } |
7692 |
+ err = -ENOBUFS; |
7693 |
+- if (count >= net->ipv4.sysctl_igmp_max_memberships) |
7694 |
++ if (count >= READ_ONCE(net->ipv4.sysctl_igmp_max_memberships)) |
7695 |
+ goto done; |
7696 |
+ iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); |
7697 |
+ if (!iml) |
7698 |
+@@ -2379,7 +2386,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct |
7699 |
+ } |
7700 |
+ /* else, add a new source to the filter */ |
7701 |
+ |
7702 |
+- if (psl && psl->sl_count >= net->ipv4.sysctl_igmp_max_msf) { |
7703 |
++ if (psl && psl->sl_count >= READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) { |
7704 |
+ err = -ENOBUFS; |
7705 |
+ goto done; |
7706 |
+ } |
7707 |
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c |
7708 |
+index 1e5b53c2bb267..cdc750ced5255 100644 |
7709 |
+--- a/net/ipv4/inet_connection_sock.c |
7710 |
++++ b/net/ipv4/inet_connection_sock.c |
7711 |
+@@ -259,7 +259,7 @@ next_port: |
7712 |
+ goto other_half_scan; |
7713 |
+ } |
7714 |
+ |
7715 |
+- if (net->ipv4.sysctl_ip_autobind_reuse && !relax) { |
7716 |
++ if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) { |
7717 |
+ /* We still have a chance to connect to different destinations */ |
7718 |
+ relax = true; |
7719 |
+ goto ports_exhausted; |
7720 |
+@@ -829,7 +829,8 @@ static void reqsk_timer_handler(struct timer_list *t) |
7721 |
+ |
7722 |
+ icsk = inet_csk(sk_listener); |
7723 |
+ net = sock_net(sk_listener); |
7724 |
+- max_syn_ack_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries; |
7725 |
++ max_syn_ack_retries = icsk->icsk_syn_retries ? : |
7726 |
++ READ_ONCE(net->ipv4.sysctl_tcp_synack_retries); |
7727 |
+ /* Normally all the openreqs are young and become mature |
7728 |
+ * (i.e. converted to established socket) for first timeout. |
7729 |
+ * If synack was not acknowledged for 1 second, it means |
7730 |
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c |
7731 |
+index 92ba3350274bc..03bb7c51b6182 100644 |
7732 |
+--- a/net/ipv4/ip_forward.c |
7733 |
++++ b/net/ipv4/ip_forward.c |
7734 |
+@@ -151,7 +151,7 @@ int ip_forward(struct sk_buff *skb) |
7735 |
+ !skb_sec_path(skb)) |
7736 |
+ ip_rt_send_redirect(skb); |
7737 |
+ |
7738 |
+- if (net->ipv4.sysctl_ip_fwd_update_priority) |
7739 |
++ if (READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority)) |
7740 |
+ skb->priority = rt_tos2priority(iph->tos); |
7741 |
+ |
7742 |
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, |
7743 |
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c |
7744 |
+index 95f7bb052784e..f3fd6c3983090 100644 |
7745 |
+--- a/net/ipv4/ip_input.c |
7746 |
++++ b/net/ipv4/ip_input.c |
7747 |
+@@ -312,14 +312,13 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, |
7748 |
+ ip_hdr(hint)->tos == iph->tos; |
7749 |
+ } |
7750 |
+ |
7751 |
+-INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *)); |
7752 |
+-INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *)); |
7753 |
++int tcp_v4_early_demux(struct sk_buff *skb); |
7754 |
++int udp_v4_early_demux(struct sk_buff *skb); |
7755 |
+ static int ip_rcv_finish_core(struct net *net, struct sock *sk, |
7756 |
+ struct sk_buff *skb, struct net_device *dev, |
7757 |
+ const struct sk_buff *hint) |
7758 |
+ { |
7759 |
+ const struct iphdr *iph = ip_hdr(skb); |
7760 |
+- int (*edemux)(struct sk_buff *skb); |
7761 |
+ int err, drop_reason; |
7762 |
+ struct rtable *rt; |
7763 |
+ |
7764 |
+@@ -332,21 +331,29 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, |
7765 |
+ goto drop_error; |
7766 |
+ } |
7767 |
+ |
7768 |
+- if (net->ipv4.sysctl_ip_early_demux && |
7769 |
++ if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && |
7770 |
+ !skb_dst(skb) && |
7771 |
+ !skb->sk && |
7772 |
+ !ip_is_fragment(iph)) { |
7773 |
+- const struct net_protocol *ipprot; |
7774 |
+- int protocol = iph->protocol; |
7775 |
+- |
7776 |
+- ipprot = rcu_dereference(inet_protos[protocol]); |
7777 |
+- if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { |
7778 |
+- err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux, |
7779 |
+- udp_v4_early_demux, skb); |
7780 |
+- if (unlikely(err)) |
7781 |
+- goto drop_error; |
7782 |
+- /* must reload iph, skb->head might have changed */ |
7783 |
+- iph = ip_hdr(skb); |
7784 |
++ switch (iph->protocol) { |
7785 |
++ case IPPROTO_TCP: |
7786 |
++ if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) { |
7787 |
++ tcp_v4_early_demux(skb); |
7788 |
++ |
7789 |
++ /* must reload iph, skb->head might have changed */ |
7790 |
++ iph = ip_hdr(skb); |
7791 |
++ } |
7792 |
++ break; |
7793 |
++ case IPPROTO_UDP: |
7794 |
++ if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) { |
7795 |
++ err = udp_v4_early_demux(skb); |
7796 |
++ if (unlikely(err)) |
7797 |
++ goto drop_error; |
7798 |
++ |
7799 |
++ /* must reload iph, skb->head might have changed */ |
7800 |
++ iph = ip_hdr(skb); |
7801 |
++ } |
7802 |
++ break; |
7803 |
+ } |
7804 |
+ } |
7805 |
+ |
7806 |
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c |
7807 |
+index 445a9ecaefa19..a8a323ecbb54b 100644 |
7808 |
+--- a/net/ipv4/ip_sockglue.c |
7809 |
++++ b/net/ipv4/ip_sockglue.c |
7810 |
+@@ -782,7 +782,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) |
7811 |
+ /* numsrc >= (4G-140)/128 overflow in 32 bits */ |
7812 |
+ err = -ENOBUFS; |
7813 |
+ if (gsf->gf_numsrc >= 0x1ffffff || |
7814 |
+- gsf->gf_numsrc > sock_net(sk)->ipv4.sysctl_igmp_max_msf) |
7815 |
++ gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf)) |
7816 |
+ goto out_free_gsf; |
7817 |
+ |
7818 |
+ err = -EINVAL; |
7819 |
+@@ -832,7 +832,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, |
7820 |
+ |
7821 |
+ /* numsrc >= (4G-140)/128 overflow in 32 bits */ |
7822 |
+ err = -ENOBUFS; |
7823 |
+- if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf) |
7824 |
++ if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf)) |
7825 |
+ goto out_free_gsf; |
7826 |
+ err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode, |
7827 |
+ &gf32->gf_group, gf32->gf_slist_flex); |
7828 |
+@@ -1244,7 +1244,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, |
7829 |
+ } |
7830 |
+ /* numsrc >= (1G-4) overflow in 32 bits */ |
7831 |
+ if (msf->imsf_numsrc >= 0x3ffffffcU || |
7832 |
+- msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) { |
7833 |
++ msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) { |
7834 |
+ kfree(msf); |
7835 |
+ err = -ENOBUFS; |
7836 |
+ break; |
7837 |
+@@ -1606,7 +1606,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, |
7838 |
+ { |
7839 |
+ struct net *net = sock_net(sk); |
7840 |
+ val = (inet->uc_ttl == -1 ? |
7841 |
+- net->ipv4.sysctl_ip_default_ttl : |
7842 |
++ READ_ONCE(net->ipv4.sysctl_ip_default_ttl) : |
7843 |
+ inet->uc_ttl); |
7844 |
+ break; |
7845 |
+ } |
7846 |
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c |
7847 |
+index 4eed5afca392e..f2edb40c0db00 100644 |
7848 |
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c |
7849 |
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c |
7850 |
+@@ -62,7 +62,7 @@ struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net, |
7851 |
+ |
7852 |
+ skb_reserve(nskb, LL_MAX_HEADER); |
7853 |
+ niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, |
7854 |
+- net->ipv4.sysctl_ip_default_ttl); |
7855 |
++ READ_ONCE(net->ipv4.sysctl_ip_default_ttl)); |
7856 |
+ nf_reject_ip_tcphdr_put(nskb, oldskb, oth); |
7857 |
+ niph->tot_len = htons(nskb->len); |
7858 |
+ ip_send_check(niph); |
7859 |
+@@ -115,7 +115,7 @@ struct sk_buff *nf_reject_skb_v4_unreach(struct net *net, |
7860 |
+ |
7861 |
+ skb_reserve(nskb, LL_MAX_HEADER); |
7862 |
+ niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP, |
7863 |
+- net->ipv4.sysctl_ip_default_ttl); |
7864 |
++ READ_ONCE(net->ipv4.sysctl_ip_default_ttl)); |
7865 |
+ |
7866 |
+ skb_reset_transport_header(nskb); |
7867 |
+ icmph = skb_put_zero(nskb, sizeof(struct icmphdr)); |
7868 |
+diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c |
7869 |
+index 28836071f0a69..0088a4c64d77e 100644 |
7870 |
+--- a/net/ipv4/proc.c |
7871 |
++++ b/net/ipv4/proc.c |
7872 |
+@@ -387,7 +387,7 @@ static int snmp_seq_show_ipstats(struct seq_file *seq, void *v) |
7873 |
+ |
7874 |
+ seq_printf(seq, "\nIp: %d %d", |
7875 |
+ IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2, |
7876 |
+- net->ipv4.sysctl_ip_default_ttl); |
7877 |
++ READ_ONCE(net->ipv4.sysctl_ip_default_ttl)); |
7878 |
+ |
7879 |
+ BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0); |
7880 |
+ snmp_get_cpu_field64_batch(buff64, snmp4_ipstats_list, |
7881 |
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
7882 |
+index ed01063d8f303..02a0a397a2f38 100644 |
7883 |
+--- a/net/ipv4/route.c |
7884 |
++++ b/net/ipv4/route.c |
7885 |
+@@ -1397,7 +1397,7 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr) |
7886 |
+ struct fib_info *fi = res->fi; |
7887 |
+ u32 mtu = 0; |
7888 |
+ |
7889 |
+- if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu || |
7890 |
++ if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) || |
7891 |
+ fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU)) |
7892 |
+ mtu = fi->fib_mtu; |
7893 |
+ |
7894 |
+@@ -1928,7 +1928,7 @@ static u32 fib_multipath_custom_hash_outer(const struct net *net, |
7895 |
+ const struct sk_buff *skb, |
7896 |
+ bool *p_has_inner) |
7897 |
+ { |
7898 |
+- u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; |
7899 |
++ u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); |
7900 |
+ struct flow_keys keys, hash_keys; |
7901 |
+ |
7902 |
+ if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) |
7903 |
+@@ -1957,7 +1957,7 @@ static u32 fib_multipath_custom_hash_inner(const struct net *net, |
7904 |
+ const struct sk_buff *skb, |
7905 |
+ bool has_inner) |
7906 |
+ { |
7907 |
+- u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; |
7908 |
++ u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); |
7909 |
+ struct flow_keys keys, hash_keys; |
7910 |
+ |
7911 |
+ /* We assume the packet carries an encapsulation, but if none was |
7912 |
+@@ -2017,7 +2017,7 @@ static u32 fib_multipath_custom_hash_skb(const struct net *net, |
7913 |
+ static u32 fib_multipath_custom_hash_fl4(const struct net *net, |
7914 |
+ const struct flowi4 *fl4) |
7915 |
+ { |
7916 |
+- u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; |
7917 |
++ u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); |
7918 |
+ struct flow_keys hash_keys; |
7919 |
+ |
7920 |
+ if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) |
7921 |
+@@ -2047,7 +2047,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, |
7922 |
+ struct flow_keys hash_keys; |
7923 |
+ u32 mhash = 0; |
7924 |
+ |
7925 |
+- switch (net->ipv4.sysctl_fib_multipath_hash_policy) { |
7926 |
++ switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) { |
7927 |
+ case 0: |
7928 |
+ memset(&hash_keys, 0, sizeof(hash_keys)); |
7929 |
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; |
7930 |
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c |
7931 |
+index b387c48351559..942d2dfa11151 100644 |
7932 |
+--- a/net/ipv4/syncookies.c |
7933 |
++++ b/net/ipv4/syncookies.c |
7934 |
+@@ -247,12 +247,12 @@ bool cookie_timestamp_decode(const struct net *net, |
7935 |
+ return true; |
7936 |
+ } |
7937 |
+ |
7938 |
+- if (!net->ipv4.sysctl_tcp_timestamps) |
7939 |
++ if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps)) |
7940 |
+ return false; |
7941 |
+ |
7942 |
+ tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0; |
7943 |
+ |
7944 |
+- if (tcp_opt->sack_ok && !net->ipv4.sysctl_tcp_sack) |
7945 |
++ if (tcp_opt->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack)) |
7946 |
+ return false; |
7947 |
+ |
7948 |
+ if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK) |
7949 |
+@@ -261,7 +261,7 @@ bool cookie_timestamp_decode(const struct net *net, |
7950 |
+ tcp_opt->wscale_ok = 1; |
7951 |
+ tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK; |
7952 |
+ |
7953 |
+- return net->ipv4.sysctl_tcp_window_scaling != 0; |
7954 |
++ return READ_ONCE(net->ipv4.sysctl_tcp_window_scaling) != 0; |
7955 |
+ } |
7956 |
+ EXPORT_SYMBOL(cookie_timestamp_decode); |
7957 |
+ |
7958 |
+@@ -340,7 +340,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) |
7959 |
+ struct flowi4 fl4; |
7960 |
+ u32 tsoff = 0; |
7961 |
+ |
7962 |
+- if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst) |
7963 |
++ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || |
7964 |
++ !th->ack || th->rst) |
7965 |
+ goto out; |
7966 |
+ |
7967 |
+ if (tcp_synq_no_recent_overflow(sk)) |
7968 |
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c |
7969 |
+index ffe0264a51b8c..344cdcd5a7d5c 100644 |
7970 |
+--- a/net/ipv4/sysctl_net_ipv4.c |
7971 |
++++ b/net/ipv4/sysctl_net_ipv4.c |
7972 |
+@@ -88,7 +88,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write, |
7973 |
+ * port limit. |
7974 |
+ */ |
7975 |
+ if ((range[1] < range[0]) || |
7976 |
+- (range[0] < net->ipv4.sysctl_ip_prot_sock)) |
7977 |
++ (range[0] < READ_ONCE(net->ipv4.sysctl_ip_prot_sock))) |
7978 |
+ ret = -EINVAL; |
7979 |
+ else |
7980 |
+ set_local_port_range(net, range); |
7981 |
+@@ -114,7 +114,7 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write, |
7982 |
+ .extra2 = &ip_privileged_port_max, |
7983 |
+ }; |
7984 |
+ |
7985 |
+- pports = net->ipv4.sysctl_ip_prot_sock; |
7986 |
++ pports = READ_ONCE(net->ipv4.sysctl_ip_prot_sock); |
7987 |
+ |
7988 |
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); |
7989 |
+ |
7990 |
+@@ -126,7 +126,7 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write, |
7991 |
+ if (range[0] < pports) |
7992 |
+ ret = -EINVAL; |
7993 |
+ else |
7994 |
+- net->ipv4.sysctl_ip_prot_sock = pports; |
7995 |
++ WRITE_ONCE(net->ipv4.sysctl_ip_prot_sock, pports); |
7996 |
+ } |
7997 |
+ |
7998 |
+ return ret; |
7999 |
+@@ -354,61 +354,6 @@ bad_key: |
8000 |
+ return ret; |
8001 |
+ } |
8002 |
+ |
8003 |
+-static void proc_configure_early_demux(int enabled, int protocol) |
8004 |
+-{ |
8005 |
+- struct net_protocol *ipprot; |
8006 |
+-#if IS_ENABLED(CONFIG_IPV6) |
8007 |
+- struct inet6_protocol *ip6prot; |
8008 |
+-#endif |
8009 |
+- |
8010 |
+- rcu_read_lock(); |
8011 |
+- |
8012 |
+- ipprot = rcu_dereference(inet_protos[protocol]); |
8013 |
+- if (ipprot) |
8014 |
+- ipprot->early_demux = enabled ? ipprot->early_demux_handler : |
8015 |
+- NULL; |
8016 |
+- |
8017 |
+-#if IS_ENABLED(CONFIG_IPV6) |
8018 |
+- ip6prot = rcu_dereference(inet6_protos[protocol]); |
8019 |
+- if (ip6prot) |
8020 |
+- ip6prot->early_demux = enabled ? ip6prot->early_demux_handler : |
8021 |
+- NULL; |
8022 |
+-#endif |
8023 |
+- rcu_read_unlock(); |
8024 |
+-} |
8025 |
+- |
8026 |
+-static int proc_tcp_early_demux(struct ctl_table *table, int write, |
8027 |
+- void *buffer, size_t *lenp, loff_t *ppos) |
8028 |
+-{ |
8029 |
+- int ret = 0; |
8030 |
+- |
8031 |
+- ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos); |
8032 |
+- |
8033 |
+- if (write && !ret) { |
8034 |
+- int enabled = init_net.ipv4.sysctl_tcp_early_demux; |
8035 |
+- |
8036 |
+- proc_configure_early_demux(enabled, IPPROTO_TCP); |
8037 |
+- } |
8038 |
+- |
8039 |
+- return ret; |
8040 |
+-} |
8041 |
+- |
8042 |
+-static int proc_udp_early_demux(struct ctl_table *table, int write, |
8043 |
+- void *buffer, size_t *lenp, loff_t *ppos) |
8044 |
+-{ |
8045 |
+- int ret = 0; |
8046 |
+- |
8047 |
+- ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos); |
8048 |
+- |
8049 |
+- if (write && !ret) { |
8050 |
+- int enabled = init_net.ipv4.sysctl_udp_early_demux; |
8051 |
+- |
8052 |
+- proc_configure_early_demux(enabled, IPPROTO_UDP); |
8053 |
+- } |
8054 |
+- |
8055 |
+- return ret; |
8056 |
+-} |
8057 |
+- |
8058 |
+ static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table, |
8059 |
+ int write, void *buffer, |
8060 |
+ size_t *lenp, loff_t *ppos) |
8061 |
+@@ -711,14 +656,14 @@ static struct ctl_table ipv4_net_table[] = { |
8062 |
+ .data = &init_net.ipv4.sysctl_udp_early_demux, |
8063 |
+ .maxlen = sizeof(u8), |
8064 |
+ .mode = 0644, |
8065 |
+- .proc_handler = proc_udp_early_demux |
8066 |
++ .proc_handler = proc_dou8vec_minmax, |
8067 |
+ }, |
8068 |
+ { |
8069 |
+ .procname = "tcp_early_demux", |
8070 |
+ .data = &init_net.ipv4.sysctl_tcp_early_demux, |
8071 |
+ .maxlen = sizeof(u8), |
8072 |
+ .mode = 0644, |
8073 |
+- .proc_handler = proc_tcp_early_demux |
8074 |
++ .proc_handler = proc_dou8vec_minmax, |
8075 |
+ }, |
8076 |
+ { |
8077 |
+ .procname = "nexthop_compat_mode", |
8078 |
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
8079 |
+index f2fd1779d9251..28db838e604ad 100644 |
8080 |
+--- a/net/ipv4/tcp.c |
8081 |
++++ b/net/ipv4/tcp.c |
8082 |
+@@ -441,7 +441,7 @@ void tcp_init_sock(struct sock *sk) |
8083 |
+ tp->snd_cwnd_clamp = ~0; |
8084 |
+ tp->mss_cache = TCP_MSS_DEFAULT; |
8085 |
+ |
8086 |
+- tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; |
8087 |
++ tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); |
8088 |
+ tcp_assign_congestion_control(sk); |
8089 |
+ |
8090 |
+ tp->tsoffset = 0; |
8091 |
+@@ -1151,7 +1151,8 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, |
8092 |
+ struct sockaddr *uaddr = msg->msg_name; |
8093 |
+ int err, flags; |
8094 |
+ |
8095 |
+- if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) || |
8096 |
++ if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & |
8097 |
++ TFO_CLIENT_ENABLE) || |
8098 |
+ (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && |
8099 |
+ uaddr->sa_family == AF_UNSPEC)) |
8100 |
+ return -EOPNOTSUPP; |
8101 |
+@@ -3638,7 +3639,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, |
8102 |
+ case TCP_FASTOPEN_CONNECT: |
8103 |
+ if (val > 1 || val < 0) { |
8104 |
+ err = -EINVAL; |
8105 |
+- } else if (net->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) { |
8106 |
++ } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & |
8107 |
++ TFO_CLIENT_ENABLE) { |
8108 |
+ if (sk->sk_state == TCP_CLOSE) |
8109 |
+ tp->fastopen_connect = val; |
8110 |
+ else |
8111 |
+@@ -3988,12 +3990,13 @@ static int do_tcp_getsockopt(struct sock *sk, int level, |
8112 |
+ val = keepalive_probes(tp); |
8113 |
+ break; |
8114 |
+ case TCP_SYNCNT: |
8115 |
+- val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; |
8116 |
++ val = icsk->icsk_syn_retries ? : |
8117 |
++ READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); |
8118 |
+ break; |
8119 |
+ case TCP_LINGER2: |
8120 |
+ val = tp->linger2; |
8121 |
+ if (val >= 0) |
8122 |
+- val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ; |
8123 |
++ val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; |
8124 |
+ break; |
8125 |
+ case TCP_DEFER_ACCEPT: |
8126 |
+ val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, |
8127 |
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c |
8128 |
+index fdbcf2a6d08ef..825b216d11f52 100644 |
8129 |
+--- a/net/ipv4/tcp_fastopen.c |
8130 |
++++ b/net/ipv4/tcp_fastopen.c |
8131 |
+@@ -332,7 +332,7 @@ static bool tcp_fastopen_no_cookie(const struct sock *sk, |
8132 |
+ const struct dst_entry *dst, |
8133 |
+ int flag) |
8134 |
+ { |
8135 |
+- return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) || |
8136 |
++ return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) || |
8137 |
+ tcp_sk(sk)->fastopen_no_cookie || |
8138 |
+ (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE)); |
8139 |
+ } |
8140 |
+@@ -347,7 +347,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, |
8141 |
+ const struct dst_entry *dst) |
8142 |
+ { |
8143 |
+ bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; |
8144 |
+- int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen; |
8145 |
++ int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); |
8146 |
+ struct tcp_fastopen_cookie valid_foc = { .len = -1 }; |
8147 |
+ struct sock *child; |
8148 |
+ int ret = 0; |
8149 |
+@@ -489,7 +489,7 @@ void tcp_fastopen_active_disable(struct sock *sk) |
8150 |
+ { |
8151 |
+ struct net *net = sock_net(sk); |
8152 |
+ |
8153 |
+- if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout) |
8154 |
++ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)) |
8155 |
+ return; |
8156 |
+ |
8157 |
+ /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */ |
8158 |
+@@ -510,7 +510,8 @@ void tcp_fastopen_active_disable(struct sock *sk) |
8159 |
+ */ |
8160 |
+ bool tcp_fastopen_active_should_disable(struct sock *sk) |
8161 |
+ { |
8162 |
+- unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout; |
8163 |
++ unsigned int tfo_bh_timeout = |
8164 |
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout); |
8165 |
+ unsigned long timeout; |
8166 |
+ int tfo_da_times; |
8167 |
+ int multiplier; |
8168 |
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
8169 |
+index 2d71bcfcc7592..19b186a4a8e8c 100644 |
8170 |
+--- a/net/ipv4/tcp_input.c |
8171 |
++++ b/net/ipv4/tcp_input.c |
8172 |
+@@ -1051,7 +1051,7 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, |
8173 |
+ tp->undo_marker ? tp->undo_retrans : 0); |
8174 |
+ #endif |
8175 |
+ tp->reordering = min_t(u32, (metric + mss - 1) / mss, |
8176 |
+- sock_net(sk)->ipv4.sysctl_tcp_max_reordering); |
8177 |
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); |
8178 |
+ } |
8179 |
+ |
8180 |
+ /* This exciting event is worth to be remembered. 8) */ |
8181 |
+@@ -2030,7 +2030,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend) |
8182 |
+ return; |
8183 |
+ |
8184 |
+ tp->reordering = min_t(u32, tp->packets_out + addend, |
8185 |
+- sock_net(sk)->ipv4.sysctl_tcp_max_reordering); |
8186 |
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); |
8187 |
+ tp->reord_seen++; |
8188 |
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); |
8189 |
+ } |
8190 |
+@@ -2095,7 +2095,8 @@ static inline void tcp_init_undo(struct tcp_sock *tp) |
8191 |
+ |
8192 |
+ static bool tcp_is_rack(const struct sock *sk) |
8193 |
+ { |
8194 |
+- return sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION; |
8195 |
++ return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & |
8196 |
++ TCP_RACK_LOSS_DETECTION; |
8197 |
+ } |
8198 |
+ |
8199 |
+ /* If we detect SACK reneging, forget all SACK information |
8200 |
+@@ -2139,6 +2140,7 @@ void tcp_enter_loss(struct sock *sk) |
8201 |
+ struct tcp_sock *tp = tcp_sk(sk); |
8202 |
+ struct net *net = sock_net(sk); |
8203 |
+ bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; |
8204 |
++ u8 reordering; |
8205 |
+ |
8206 |
+ tcp_timeout_mark_lost(sk); |
8207 |
+ |
8208 |
+@@ -2159,10 +2161,12 @@ void tcp_enter_loss(struct sock *sk) |
8209 |
+ /* Timeout in disordered state after receiving substantial DUPACKs |
8210 |
+ * suggests that the degree of reordering is over-estimated. |
8211 |
+ */ |
8212 |
++ reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering); |
8213 |
+ if (icsk->icsk_ca_state <= TCP_CA_Disorder && |
8214 |
+- tp->sacked_out >= net->ipv4.sysctl_tcp_reordering) |
8215 |
++ tp->sacked_out >= reordering) |
8216 |
+ tp->reordering = min_t(unsigned int, tp->reordering, |
8217 |
+- net->ipv4.sysctl_tcp_reordering); |
8218 |
++ reordering); |
8219 |
++ |
8220 |
+ tcp_set_ca_state(sk, TCP_CA_Loss); |
8221 |
+ tp->high_seq = tp->snd_nxt; |
8222 |
+ tcp_ecn_queue_cwr(tp); |
8223 |
+@@ -3464,7 +3468,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) |
8224 |
+ * new SACK or ECE mark may first advance cwnd here and later reduce |
8225 |
+ * cwnd in tcp_fastretrans_alert() based on more states. |
8226 |
+ */ |
8227 |
+- if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering) |
8228 |
++ if (tcp_sk(sk)->reordering > |
8229 |
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering)) |
8230 |
+ return flag & FLAG_FORWARD_PROGRESS; |
8231 |
+ |
8232 |
+ return flag & FLAG_DATA_ACKED; |
8233 |
+@@ -4056,7 +4061,7 @@ void tcp_parse_options(const struct net *net, |
8234 |
+ break; |
8235 |
+ case TCPOPT_WINDOW: |
8236 |
+ if (opsize == TCPOLEN_WINDOW && th->syn && |
8237 |
+- !estab && net->ipv4.sysctl_tcp_window_scaling) { |
8238 |
++ !estab && READ_ONCE(net->ipv4.sysctl_tcp_window_scaling)) { |
8239 |
+ __u8 snd_wscale = *(__u8 *)ptr; |
8240 |
+ opt_rx->wscale_ok = 1; |
8241 |
+ if (snd_wscale > TCP_MAX_WSCALE) { |
8242 |
+@@ -4072,7 +4077,7 @@ void tcp_parse_options(const struct net *net, |
8243 |
+ case TCPOPT_TIMESTAMP: |
8244 |
+ if ((opsize == TCPOLEN_TIMESTAMP) && |
8245 |
+ ((estab && opt_rx->tstamp_ok) || |
8246 |
+- (!estab && net->ipv4.sysctl_tcp_timestamps))) { |
8247 |
++ (!estab && READ_ONCE(net->ipv4.sysctl_tcp_timestamps)))) { |
8248 |
+ opt_rx->saw_tstamp = 1; |
8249 |
+ opt_rx->rcv_tsval = get_unaligned_be32(ptr); |
8250 |
+ opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); |
8251 |
+@@ -4080,7 +4085,7 @@ void tcp_parse_options(const struct net *net, |
8252 |
+ break; |
8253 |
+ case TCPOPT_SACK_PERM: |
8254 |
+ if (opsize == TCPOLEN_SACK_PERM && th->syn && |
8255 |
+- !estab && net->ipv4.sysctl_tcp_sack) { |
8256 |
++ !estab && READ_ONCE(net->ipv4.sysctl_tcp_sack)) { |
8257 |
+ opt_rx->sack_ok = TCP_SACK_SEEN; |
8258 |
+ tcp_sack_reset(opt_rx); |
8259 |
+ } |
8260 |
+@@ -5571,7 +5576,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) |
8261 |
+ struct tcp_sock *tp = tcp_sk(sk); |
8262 |
+ u32 ptr = ntohs(th->urg_ptr); |
8263 |
+ |
8264 |
+- if (ptr && !sock_net(sk)->ipv4.sysctl_tcp_stdurg) |
8265 |
++ if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg)) |
8266 |
+ ptr--; |
8267 |
+ ptr += ntohl(th->seq); |
8268 |
+ |
8269 |
+@@ -6780,11 +6785,14 @@ static bool tcp_syn_flood_action(const struct sock *sk, const char *proto) |
8270 |
+ { |
8271 |
+ struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; |
8272 |
+ const char *msg = "Dropping request"; |
8273 |
+- bool want_cookie = false; |
8274 |
+ struct net *net = sock_net(sk); |
8275 |
++ bool want_cookie = false; |
8276 |
++ u8 syncookies; |
8277 |
++ |
8278 |
++ syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies); |
8279 |
+ |
8280 |
+ #ifdef CONFIG_SYN_COOKIES |
8281 |
+- if (net->ipv4.sysctl_tcp_syncookies) { |
8282 |
++ if (syncookies) { |
8283 |
+ msg = "Sending cookies"; |
8284 |
+ want_cookie = true; |
8285 |
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); |
8286 |
+@@ -6792,8 +6800,7 @@ static bool tcp_syn_flood_action(const struct sock *sk, const char *proto) |
8287 |
+ #endif |
8288 |
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); |
8289 |
+ |
8290 |
+- if (!queue->synflood_warned && |
8291 |
+- net->ipv4.sysctl_tcp_syncookies != 2 && |
8292 |
++ if (!queue->synflood_warned && syncookies != 2 && |
8293 |
+ xchg(&queue->synflood_warned, 1) == 0) |
8294 |
+ net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", |
8295 |
+ proto, sk->sk_num, msg); |
8296 |
+@@ -6842,7 +6849,7 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, |
8297 |
+ struct tcp_sock *tp = tcp_sk(sk); |
8298 |
+ u16 mss; |
8299 |
+ |
8300 |
+- if (sock_net(sk)->ipv4.sysctl_tcp_syncookies != 2 && |
8301 |
++ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 && |
8302 |
+ !inet_csk_reqsk_queue_is_full(sk)) |
8303 |
+ return 0; |
8304 |
+ |
8305 |
+@@ -6876,13 +6883,15 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, |
8306 |
+ bool want_cookie = false; |
8307 |
+ struct dst_entry *dst; |
8308 |
+ struct flowi fl; |
8309 |
++ u8 syncookies; |
8310 |
++ |
8311 |
++ syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies); |
8312 |
+ |
8313 |
+ /* TW buckets are converted to open requests without |
8314 |
+ * limitations, they conserve resources and peer is |
8315 |
+ * evidently real one. |
8316 |
+ */ |
8317 |
+- if ((net->ipv4.sysctl_tcp_syncookies == 2 || |
8318 |
+- inet_csk_reqsk_queue_is_full(sk)) && !isn) { |
8319 |
++ if ((syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) { |
8320 |
+ want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name); |
8321 |
+ if (!want_cookie) |
8322 |
+ goto drop; |
8323 |
+@@ -6931,10 +6940,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, |
8324 |
+ tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb); |
8325 |
+ |
8326 |
+ if (!want_cookie && !isn) { |
8327 |
++ int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog); |
8328 |
++ |
8329 |
+ /* Kill the following clause, if you dislike this way. */ |
8330 |
+- if (!net->ipv4.sysctl_tcp_syncookies && |
8331 |
+- (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < |
8332 |
+- (net->ipv4.sysctl_max_syn_backlog >> 2)) && |
8333 |
++ if (!syncookies && |
8334 |
++ (max_syn_backlog - inet_csk_reqsk_queue_len(sk) < |
8335 |
++ (max_syn_backlog >> 2)) && |
8336 |
+ !tcp_peer_is_proven(req, dst)) { |
8337 |
+ /* Without syncookies last quarter of |
8338 |
+ * backlog is filled with destinations, |
8339 |
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
8340 |
+index cd78b4fc334f7..a57f96b868741 100644 |
8341 |
+--- a/net/ipv4/tcp_ipv4.c |
8342 |
++++ b/net/ipv4/tcp_ipv4.c |
8343 |
+@@ -108,10 +108,10 @@ static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb) |
8344 |
+ |
8345 |
+ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) |
8346 |
+ { |
8347 |
++ int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse); |
8348 |
+ const struct inet_timewait_sock *tw = inet_twsk(sktw); |
8349 |
+ const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); |
8350 |
+ struct tcp_sock *tp = tcp_sk(sk); |
8351 |
+- int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse; |
8352 |
+ |
8353 |
+ if (reuse == 2) { |
8354 |
+ /* Still does not detect *everything* that goes through |
8355 |
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c |
8356 |
+index 7029b0e98edb2..a501150deaa3b 100644 |
8357 |
+--- a/net/ipv4/tcp_metrics.c |
8358 |
++++ b/net/ipv4/tcp_metrics.c |
8359 |
+@@ -428,7 +428,8 @@ void tcp_update_metrics(struct sock *sk) |
8360 |
+ if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) { |
8361 |
+ val = tcp_metric_get(tm, TCP_METRIC_REORDERING); |
8362 |
+ if (val < tp->reordering && |
8363 |
+- tp->reordering != net->ipv4.sysctl_tcp_reordering) |
8364 |
++ tp->reordering != |
8365 |
++ READ_ONCE(net->ipv4.sysctl_tcp_reordering)) |
8366 |
+ tcp_metric_set(tm, TCP_METRIC_REORDERING, |
8367 |
+ tp->reordering); |
8368 |
+ } |
8369 |
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c |
8370 |
+index 6854bb1fb32b2..cb95d88497aea 100644 |
8371 |
+--- a/net/ipv4/tcp_minisocks.c |
8372 |
++++ b/net/ipv4/tcp_minisocks.c |
8373 |
+@@ -173,7 +173,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, |
8374 |
+ * Oh well... nobody has a sufficient solution to this |
8375 |
+ * protocol bug yet. |
8376 |
+ */ |
8377 |
+- if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) { |
8378 |
++ if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) { |
8379 |
+ kill: |
8380 |
+ inet_twsk_deschedule_put(tw); |
8381 |
+ return TCP_TW_SUCCESS; |
8382 |
+@@ -781,7 +781,7 @@ listen_overflow: |
8383 |
+ if (sk != req->rsk_listener) |
8384 |
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); |
8385 |
+ |
8386 |
+- if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) { |
8387 |
++ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) { |
8388 |
+ inet_rsk(req)->acked = 1; |
8389 |
+ return NULL; |
8390 |
+ } |
8391 |
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
8392 |
+index 34249469e361f..3554a4c1e1b82 100644 |
8393 |
+--- a/net/ipv4/tcp_output.c |
8394 |
++++ b/net/ipv4/tcp_output.c |
8395 |
+@@ -789,18 +789,18 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, |
8396 |
+ opts->mss = tcp_advertise_mss(sk); |
8397 |
+ remaining -= TCPOLEN_MSS_ALIGNED; |
8398 |
+ |
8399 |
+- if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) { |
8400 |
++ if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) { |
8401 |
+ opts->options |= OPTION_TS; |
8402 |
+ opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; |
8403 |
+ opts->tsecr = tp->rx_opt.ts_recent; |
8404 |
+ remaining -= TCPOLEN_TSTAMP_ALIGNED; |
8405 |
+ } |
8406 |
+- if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { |
8407 |
++ if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { |
8408 |
+ opts->ws = tp->rx_opt.rcv_wscale; |
8409 |
+ opts->options |= OPTION_WSCALE; |
8410 |
+ remaining -= TCPOLEN_WSCALE_ALIGNED; |
8411 |
+ } |
8412 |
+- if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) { |
8413 |
++ if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { |
8414 |
+ opts->options |= OPTION_SACK_ADVERTISE; |
8415 |
+ if (unlikely(!(OPTION_TS & opts->options))) |
8416 |
+ remaining -= TCPOLEN_SACKPERM_ALIGNED; |
8417 |
+@@ -1717,7 +1717,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) |
8418 |
+ mss_now -= icsk->icsk_ext_hdr_len; |
8419 |
+ |
8420 |
+ /* Then reserve room for full set of TCP options and 8 bytes of data */ |
8421 |
+- mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); |
8422 |
++ mss_now = max(mss_now, |
8423 |
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); |
8424 |
+ return mss_now; |
8425 |
+ } |
8426 |
+ |
8427 |
+@@ -1760,10 +1761,10 @@ void tcp_mtup_init(struct sock *sk) |
8428 |
+ struct inet_connection_sock *icsk = inet_csk(sk); |
8429 |
+ struct net *net = sock_net(sk); |
8430 |
+ |
8431 |
+- icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; |
8432 |
++ icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; |
8433 |
+ icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + |
8434 |
+ icsk->icsk_af_ops->net_header_len; |
8435 |
+- icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); |
8436 |
++ icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); |
8437 |
+ icsk->icsk_mtup.probe_size = 0; |
8438 |
+ if (icsk->icsk_mtup.enabled) |
8439 |
+ icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; |
8440 |
+@@ -1895,7 +1896,7 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) |
8441 |
+ if (tp->packets_out > tp->snd_cwnd_used) |
8442 |
+ tp->snd_cwnd_used = tp->packets_out; |
8443 |
+ |
8444 |
+- if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle && |
8445 |
++ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && |
8446 |
+ (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && |
8447 |
+ !ca_ops->cong_control) |
8448 |
+ tcp_cwnd_application_limited(sk); |
8449 |
+@@ -2280,7 +2281,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk) |
8450 |
+ u32 interval; |
8451 |
+ s32 delta; |
8452 |
+ |
8453 |
+- interval = net->ipv4.sysctl_tcp_probe_interval; |
8454 |
++ interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval); |
8455 |
+ delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; |
8456 |
+ if (unlikely(delta >= interval * HZ)) { |
8457 |
+ int mss = tcp_current_mss(sk); |
8458 |
+@@ -2364,7 +2365,7 @@ static int tcp_mtu_probe(struct sock *sk) |
8459 |
+ * probing process by not resetting search range to its orignal. |
8460 |
+ */ |
8461 |
+ if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || |
8462 |
+- interval < net->ipv4.sysctl_tcp_probe_threshold) { |
8463 |
++ interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) { |
8464 |
+ /* Check whether enough time has elaplased for |
8465 |
+ * another round of probing. |
8466 |
+ */ |
8467 |
+@@ -2738,7 +2739,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) |
8468 |
+ if (rcu_access_pointer(tp->fastopen_rsk)) |
8469 |
+ return false; |
8470 |
+ |
8471 |
+- early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans; |
8472 |
++ early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); |
8473 |
+ /* Schedule a loss probe in 2*RTT for SACK capable connections |
8474 |
+ * not in loss recovery, that are either limited by cwnd or application. |
8475 |
+ */ |
8476 |
+@@ -3102,7 +3103,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, |
8477 |
+ struct sk_buff *skb = to, *tmp; |
8478 |
+ bool first = true; |
8479 |
+ |
8480 |
+- if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse) |
8481 |
++ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) |
8482 |
+ return; |
8483 |
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) |
8484 |
+ return; |
8485 |
+@@ -3644,7 +3645,7 @@ static void tcp_connect_init(struct sock *sk) |
8486 |
+ * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. |
8487 |
+ */ |
8488 |
+ tp->tcp_header_len = sizeof(struct tcphdr); |
8489 |
+- if (sock_net(sk)->ipv4.sysctl_tcp_timestamps) |
8490 |
++ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) |
8491 |
+ tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; |
8492 |
+ |
8493 |
+ #ifdef CONFIG_TCP_MD5SIG |
8494 |
+@@ -3680,7 +3681,7 @@ static void tcp_connect_init(struct sock *sk) |
8495 |
+ tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), |
8496 |
+ &tp->rcv_wnd, |
8497 |
+ &tp->window_clamp, |
8498 |
+- sock_net(sk)->ipv4.sysctl_tcp_window_scaling, |
8499 |
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), |
8500 |
+ &rcv_wscale, |
8501 |
+ rcv_wnd); |
8502 |
+ |
8503 |
+@@ -4087,7 +4088,7 @@ void tcp_send_probe0(struct sock *sk) |
8504 |
+ |
8505 |
+ icsk->icsk_probes_out++; |
8506 |
+ if (err <= 0) { |
8507 |
+- if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) |
8508 |
++ if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) |
8509 |
+ icsk->icsk_backoff++; |
8510 |
+ timeout = tcp_probe0_when(sk, TCP_RTO_MAX); |
8511 |
+ } else { |
8512 |
+diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c |
8513 |
+index fd113f6226efc..ac14216f6204f 100644 |
8514 |
+--- a/net/ipv4/tcp_recovery.c |
8515 |
++++ b/net/ipv4/tcp_recovery.c |
8516 |
+@@ -19,7 +19,8 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk) |
8517 |
+ return 0; |
8518 |
+ |
8519 |
+ if (tp->sacked_out >= tp->reordering && |
8520 |
+- !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH)) |
8521 |
++ !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & |
8522 |
++ TCP_RACK_NO_DUPTHRESH)) |
8523 |
+ return 0; |
8524 |
+ } |
8525 |
+ |
8526 |
+@@ -192,7 +193,8 @@ void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs) |
8527 |
+ { |
8528 |
+ struct tcp_sock *tp = tcp_sk(sk); |
8529 |
+ |
8530 |
+- if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND || |
8531 |
++ if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & |
8532 |
++ TCP_RACK_STATIC_REO_WND) || |
8533 |
+ !rs->prior_delivered) |
8534 |
+ return; |
8535 |
+ |
8536 |
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c |
8537 |
+index 20cf4a98c69d8..50bba370486e8 100644 |
8538 |
+--- a/net/ipv4/tcp_timer.c |
8539 |
++++ b/net/ipv4/tcp_timer.c |
8540 |
+@@ -143,7 +143,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) |
8541 |
+ */ |
8542 |
+ static int tcp_orphan_retries(struct sock *sk, bool alive) |
8543 |
+ { |
8544 |
+- int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */ |
8545 |
++ int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */ |
8546 |
+ |
8547 |
+ /* We know from an ICMP that something is wrong. */ |
8548 |
+ if (sk->sk_err_soft && !alive) |
8549 |
+@@ -163,7 +163,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) |
8550 |
+ int mss; |
8551 |
+ |
8552 |
+ /* Black hole detection */ |
8553 |
+- if (!net->ipv4.sysctl_tcp_mtu_probing) |
8554 |
++ if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing)) |
8555 |
+ return; |
8556 |
+ |
8557 |
+ if (!icsk->icsk_mtup.enabled) { |
8558 |
+@@ -171,9 +171,9 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) |
8559 |
+ icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; |
8560 |
+ } else { |
8561 |
+ mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; |
8562 |
+- mss = min(net->ipv4.sysctl_tcp_base_mss, mss); |
8563 |
+- mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor); |
8564 |
+- mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); |
8565 |
++ mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss); |
8566 |
++ mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor)); |
8567 |
++ mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss)); |
8568 |
+ icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); |
8569 |
+ } |
8570 |
+ tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
8571 |
+@@ -239,17 +239,18 @@ static int tcp_write_timeout(struct sock *sk) |
8572 |
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
8573 |
+ if (icsk->icsk_retransmits) |
8574 |
+ __dst_negative_advice(sk); |
8575 |
+- retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; |
8576 |
++ retry_until = icsk->icsk_syn_retries ? : |
8577 |
++ READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); |
8578 |
+ expired = icsk->icsk_retransmits >= retry_until; |
8579 |
+ } else { |
8580 |
+- if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) { |
8581 |
++ if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) { |
8582 |
+ /* Black hole detection */ |
8583 |
+ tcp_mtu_probing(icsk, sk); |
8584 |
+ |
8585 |
+ __dst_negative_advice(sk); |
8586 |
+ } |
8587 |
+ |
8588 |
+- retry_until = net->ipv4.sysctl_tcp_retries2; |
8589 |
++ retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2); |
8590 |
+ if (sock_flag(sk, SOCK_DEAD)) { |
8591 |
+ const bool alive = icsk->icsk_rto < TCP_RTO_MAX; |
8592 |
+ |
8593 |
+@@ -380,7 +381,7 @@ static void tcp_probe_timer(struct sock *sk) |
8594 |
+ msecs_to_jiffies(icsk->icsk_user_timeout)) |
8595 |
+ goto abort; |
8596 |
+ |
8597 |
+- max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; |
8598 |
++ max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); |
8599 |
+ if (sock_flag(sk, SOCK_DEAD)) { |
8600 |
+ const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; |
8601 |
+ |
8602 |
+@@ -406,12 +407,15 @@ abort: tcp_write_err(sk); |
8603 |
+ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) |
8604 |
+ { |
8605 |
+ struct inet_connection_sock *icsk = inet_csk(sk); |
8606 |
+- int max_retries = icsk->icsk_syn_retries ? : |
8607 |
+- sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */ |
8608 |
+ struct tcp_sock *tp = tcp_sk(sk); |
8609 |
++ int max_retries; |
8610 |
+ |
8611 |
+ req->rsk_ops->syn_ack_timeout(req); |
8612 |
+ |
8613 |
++ /* add one more retry for fastopen */ |
8614 |
++ max_retries = icsk->icsk_syn_retries ? : |
8615 |
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1; |
8616 |
++ |
8617 |
+ if (req->num_timeout >= max_retries) { |
8618 |
+ tcp_write_err(sk); |
8619 |
+ return; |
8620 |
+@@ -574,7 +578,7 @@ out_reset_timer: |
8621 |
+ * linear-timeout retransmissions into a black hole |
8622 |
+ */ |
8623 |
+ if (sk->sk_state == TCP_ESTABLISHED && |
8624 |
+- (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) && |
8625 |
++ (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) && |
8626 |
+ tcp_stream_is_thin(tp) && |
8627 |
+ icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { |
8628 |
+ icsk->icsk_backoff = 0; |
8629 |
+@@ -585,7 +589,7 @@ out_reset_timer: |
8630 |
+ } |
8631 |
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
8632 |
+ tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX); |
8633 |
+- if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0)) |
8634 |
++ if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0)) |
8635 |
+ __sk_dst_reset(sk); |
8636 |
+ |
8637 |
+ out:; |
8638 |
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c |
8639 |
+index 7d7b7523d1265..ef1e6545d8690 100644 |
8640 |
+--- a/net/ipv6/af_inet6.c |
8641 |
++++ b/net/ipv6/af_inet6.c |
8642 |
+@@ -226,7 +226,7 @@ lookup_protocol: |
8643 |
+ RCU_INIT_POINTER(inet->mc_list, NULL); |
8644 |
+ inet->rcv_tos = 0; |
8645 |
+ |
8646 |
+- if (net->ipv4.sysctl_ip_no_pmtu_disc) |
8647 |
++ if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) |
8648 |
+ inet->pmtudisc = IP_PMTUDISC_DONT; |
8649 |
+ else |
8650 |
+ inet->pmtudisc = IP_PMTUDISC_WANT; |
8651 |
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c |
8652 |
+index 5b5ea35635f9f..caba03e551ef6 100644 |
8653 |
+--- a/net/ipv6/ip6_input.c |
8654 |
++++ b/net/ipv6/ip6_input.c |
8655 |
+@@ -45,20 +45,23 @@ |
8656 |
+ #include <net/inet_ecn.h> |
8657 |
+ #include <net/dst_metadata.h> |
8658 |
+ |
8659 |
+-INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *)); |
8660 |
+ static void ip6_rcv_finish_core(struct net *net, struct sock *sk, |
8661 |
+ struct sk_buff *skb) |
8662 |
+ { |
8663 |
+- void (*edemux)(struct sk_buff *skb); |
8664 |
+- |
8665 |
+- if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { |
8666 |
+- const struct inet6_protocol *ipprot; |
8667 |
+- |
8668 |
+- ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); |
8669 |
+- if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) |
8670 |
+- INDIRECT_CALL_2(edemux, tcp_v6_early_demux, |
8671 |
+- udp_v6_early_demux, skb); |
8672 |
++ if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && |
8673 |
++ !skb_dst(skb) && !skb->sk) { |
8674 |
++ switch (ipv6_hdr(skb)->nexthdr) { |
8675 |
++ case IPPROTO_TCP: |
8676 |
++ if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) |
8677 |
++ tcp_v6_early_demux(skb); |
8678 |
++ break; |
8679 |
++ case IPPROTO_UDP: |
8680 |
++ if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) |
8681 |
++ udp_v6_early_demux(skb); |
8682 |
++ break; |
8683 |
++ } |
8684 |
+ } |
8685 |
++ |
8686 |
+ if (!skb_valid_dst(skb)) |
8687 |
+ ip6_route_input(skb); |
8688 |
+ } |
8689 |
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c |
8690 |
+index 9cc123f000fbc..5014aa6634527 100644 |
8691 |
+--- a/net/ipv6/syncookies.c |
8692 |
++++ b/net/ipv6/syncookies.c |
8693 |
+@@ -141,7 +141,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) |
8694 |
+ __u8 rcv_wscale; |
8695 |
+ u32 tsoff = 0; |
8696 |
+ |
8697 |
+- if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst) |
8698 |
++ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || |
8699 |
++ !th->ack || th->rst) |
8700 |
+ goto out; |
8701 |
+ |
8702 |
+ if (tcp_synq_no_recent_overflow(sk)) |
8703 |
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
8704 |
+index cbc5fff3d8466..5185c11dc4447 100644 |
8705 |
+--- a/net/ipv6/tcp_ipv6.c |
8706 |
++++ b/net/ipv6/tcp_ipv6.c |
8707 |
+@@ -1822,7 +1822,7 @@ do_time_wait: |
8708 |
+ goto discard_it; |
8709 |
+ } |
8710 |
+ |
8711 |
+-INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) |
8712 |
++void tcp_v6_early_demux(struct sk_buff *skb) |
8713 |
+ { |
8714 |
+ const struct ipv6hdr *hdr; |
8715 |
+ const struct tcphdr *th; |
8716 |
+@@ -2176,12 +2176,7 @@ struct proto tcpv6_prot = { |
8717 |
+ }; |
8718 |
+ EXPORT_SYMBOL_GPL(tcpv6_prot); |
8719 |
+ |
8720 |
+-/* thinking of making this const? Don't. |
8721 |
+- * early_demux can change based on sysctl. |
8722 |
+- */ |
8723 |
+-static struct inet6_protocol tcpv6_protocol = { |
8724 |
+- .early_demux = tcp_v6_early_demux, |
8725 |
+- .early_demux_handler = tcp_v6_early_demux, |
8726 |
++static const struct inet6_protocol tcpv6_protocol = { |
8727 |
+ .handler = tcp_v6_rcv, |
8728 |
+ .err_handler = tcp_v6_err, |
8729 |
+ .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
8730 |
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c |
8731 |
+index a535c3f2e4af4..aea28bf701be4 100644 |
8732 |
+--- a/net/ipv6/udp.c |
8733 |
++++ b/net/ipv6/udp.c |
8734 |
+@@ -1052,7 +1052,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, |
8735 |
+ return NULL; |
8736 |
+ } |
8737 |
+ |
8738 |
+-INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) |
8739 |
++void udp_v6_early_demux(struct sk_buff *skb) |
8740 |
+ { |
8741 |
+ struct net *net = dev_net(skb->dev); |
8742 |
+ const struct udphdr *uh; |
8743 |
+@@ -1660,12 +1660,7 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname, |
8744 |
+ return ipv6_getsockopt(sk, level, optname, optval, optlen); |
8745 |
+ } |
8746 |
+ |
8747 |
+-/* thinking of making this const? Don't. |
8748 |
+- * early_demux can change based on sysctl. |
8749 |
+- */ |
8750 |
+-static struct inet6_protocol udpv6_protocol = { |
8751 |
+- .early_demux = udp_v6_early_demux, |
8752 |
+- .early_demux_handler = udp_v6_early_demux, |
8753 |
++static const struct inet6_protocol udpv6_protocol = { |
8754 |
+ .handler = udpv6_rcv, |
8755 |
+ .err_handler = udpv6_err, |
8756 |
+ .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
8757 |
+diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c |
8758 |
+index e479dd0561c54..16915f8eef2b1 100644 |
8759 |
+--- a/net/netfilter/nf_synproxy_core.c |
8760 |
++++ b/net/netfilter/nf_synproxy_core.c |
8761 |
+@@ -405,7 +405,7 @@ synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr, |
8762 |
+ iph->tos = 0; |
8763 |
+ iph->id = 0; |
8764 |
+ iph->frag_off = htons(IP_DF); |
8765 |
+- iph->ttl = net->ipv4.sysctl_ip_default_ttl; |
8766 |
++ iph->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); |
8767 |
+ iph->protocol = IPPROTO_TCP; |
8768 |
+ iph->check = 0; |
8769 |
+ iph->saddr = saddr; |
8770 |
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c |
8771 |
+index 2d4dc1468a9a5..6fd33c75d6bb2 100644 |
8772 |
+--- a/net/sched/cls_api.c |
8773 |
++++ b/net/sched/cls_api.c |
8774 |
+@@ -3531,7 +3531,7 @@ int tc_setup_action(struct flow_action *flow_action, |
8775 |
+ struct tc_action *actions[], |
8776 |
+ struct netlink_ext_ack *extack) |
8777 |
+ { |
8778 |
+- int i, j, index, err = 0; |
8779 |
++ int i, j, k, index, err = 0; |
8780 |
+ struct tc_action *act; |
8781 |
+ |
8782 |
+ BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); |
8783 |
+@@ -3551,14 +3551,18 @@ int tc_setup_action(struct flow_action *flow_action, |
8784 |
+ if (err) |
8785 |
+ goto err_out_locked; |
8786 |
+ |
8787 |
+- entry->hw_stats = tc_act_hw_stats(act->hw_stats); |
8788 |
+- entry->hw_index = act->tcfa_index; |
8789 |
+ index = 0; |
8790 |
+ err = tc_setup_offload_act(act, entry, &index, extack); |
8791 |
+- if (!err) |
8792 |
+- j += index; |
8793 |
+- else |
8794 |
++ if (err) |
8795 |
+ goto err_out_locked; |
8796 |
++ |
8797 |
++ for (k = 0; k < index ; k++) { |
8798 |
++ entry[k].hw_stats = tc_act_hw_stats(act->hw_stats); |
8799 |
++ entry[k].hw_index = act->tcfa_index; |
8800 |
++ } |
8801 |
++ |
8802 |
++ j += index; |
8803 |
++ |
8804 |
+ spin_unlock_bh(&act->tcfa_lock); |
8805 |
+ } |
8806 |
+ |
8807 |
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c |
8808 |
+index 35928fefae332..1a094b087d88b 100644 |
8809 |
+--- a/net/sctp/protocol.c |
8810 |
++++ b/net/sctp/protocol.c |
8811 |
+@@ -358,7 +358,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) |
8812 |
+ if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) && |
8813 |
+ ret != RTN_LOCAL && |
8814 |
+ !sp->inet.freebind && |
8815 |
+- !net->ipv4.sysctl_ip_nonlocal_bind) |
8816 |
++ !READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind)) |
8817 |
+ return 0; |
8818 |
+ |
8819 |
+ if (ipv6_only_sock(sctp_opt2sk(sp))) |
8820 |
+diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c |
8821 |
+index c4d057b2941d5..0bde36b564727 100644 |
8822 |
+--- a/net/smc/smc_llc.c |
8823 |
++++ b/net/smc/smc_llc.c |
8824 |
+@@ -2122,7 +2122,7 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) |
8825 |
+ init_waitqueue_head(&lgr->llc_flow_waiter); |
8826 |
+ init_waitqueue_head(&lgr->llc_msg_waiter); |
8827 |
+ mutex_init(&lgr->llc_conf_mutex); |
8828 |
+- lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time; |
8829 |
++ lgr->llc_testlink_time = READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time); |
8830 |
+ } |
8831 |
+ |
8832 |
+ /* called after lgr was removed from lgr_list */ |
8833 |
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c |
8834 |
+index 3a61bb5945441..9c3933781ad47 100644 |
8835 |
+--- a/net/tls/tls_device.c |
8836 |
++++ b/net/tls/tls_device.c |
8837 |
+@@ -97,13 +97,16 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx) |
8838 |
+ unsigned long flags; |
8839 |
+ |
8840 |
+ spin_lock_irqsave(&tls_device_lock, flags); |
8841 |
++ if (unlikely(!refcount_dec_and_test(&ctx->refcount))) |
8842 |
++ goto unlock; |
8843 |
++ |
8844 |
+ list_move_tail(&ctx->list, &tls_device_gc_list); |
8845 |
+ |
8846 |
+ /* schedule_work inside the spinlock |
8847 |
+ * to make sure tls_device_down waits for that work. |
8848 |
+ */ |
8849 |
+ schedule_work(&tls_device_gc_work); |
8850 |
+- |
8851 |
++unlock: |
8852 |
+ spin_unlock_irqrestore(&tls_device_lock, flags); |
8853 |
+ } |
8854 |
+ |
8855 |
+@@ -194,8 +197,7 @@ void tls_device_sk_destruct(struct sock *sk) |
8856 |
+ clean_acked_data_disable(inet_csk(sk)); |
8857 |
+ } |
8858 |
+ |
8859 |
+- if (refcount_dec_and_test(&tls_ctx->refcount)) |
8860 |
+- tls_device_queue_ctx_destruction(tls_ctx); |
8861 |
++ tls_device_queue_ctx_destruction(tls_ctx); |
8862 |
+ } |
8863 |
+ EXPORT_SYMBOL_GPL(tls_device_sk_destruct); |
8864 |
+ |
8865 |
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c |
8866 |
+index f1876ea61fdce..f1a0bab920a55 100644 |
8867 |
+--- a/net/xfrm/xfrm_policy.c |
8868 |
++++ b/net/xfrm/xfrm_policy.c |
8869 |
+@@ -2678,8 +2678,10 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family, |
8870 |
+ *num_xfrms = 0; |
8871 |
+ return 0; |
8872 |
+ } |
8873 |
+- if (IS_ERR(pols[0])) |
8874 |
++ if (IS_ERR(pols[0])) { |
8875 |
++ *num_pols = 0; |
8876 |
+ return PTR_ERR(pols[0]); |
8877 |
++ } |
8878 |
+ |
8879 |
+ *num_xfrms = pols[0]->xfrm_nr; |
8880 |
+ |
8881 |
+@@ -2694,6 +2696,7 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family, |
8882 |
+ if (pols[1]) { |
8883 |
+ if (IS_ERR(pols[1])) { |
8884 |
+ xfrm_pols_put(pols, *num_pols); |
8885 |
++ *num_pols = 0; |
8886 |
+ return PTR_ERR(pols[1]); |
8887 |
+ } |
8888 |
+ (*num_pols)++; |
8889 |
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c |
8890 |
+index b749935152ba5..b4ce16a934a28 100644 |
8891 |
+--- a/net/xfrm/xfrm_state.c |
8892 |
++++ b/net/xfrm/xfrm_state.c |
8893 |
+@@ -2620,7 +2620,7 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload) |
8894 |
+ int err; |
8895 |
+ |
8896 |
+ if (family == AF_INET && |
8897 |
+- xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc) |
8898 |
++ READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)) |
8899 |
+ x->props.flags |= XFRM_STATE_NOPMTUDISC; |
8900 |
+ |
8901 |
+ err = -EPROTONOSUPPORT; |
8902 |
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c |
8903 |
+index eea6e92500b8e..50ebad1e3abb1 100644 |
8904 |
+--- a/security/integrity/ima/ima_policy.c |
8905 |
++++ b/security/integrity/ima/ima_policy.c |
8906 |
+@@ -2181,6 +2181,10 @@ bool ima_appraise_signature(enum kernel_read_file_id id) |
8907 |
+ if (id >= READING_MAX_ID) |
8908 |
+ return false; |
8909 |
+ |
8910 |
++ if (id == READING_KEXEC_IMAGE && !(ima_appraise & IMA_APPRAISE_ENFORCE) |
8911 |
++ && security_locked_down(LOCKDOWN_KEXEC)) |
8912 |
++ return false; |
8913 |
++ |
8914 |
+ func = read_idmap[id] ?: FILE_CHECK; |
8915 |
+ |
8916 |
+ rcu_read_lock(); |
8917 |
+diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c |
8918 |
+index 88d23924e1bf2..87313145d14f9 100644 |
8919 |
+--- a/sound/soc/sof/intel/hda-loader.c |
8920 |
++++ b/sound/soc/sof/intel/hda-loader.c |
8921 |
+@@ -397,7 +397,8 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev) |
8922 |
+ struct firmware stripped_firmware; |
8923 |
+ int ret, ret1, i; |
8924 |
+ |
8925 |
+- if ((sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT) && |
8926 |
++ if ((sdev->system_suspend_target < SOF_SUSPEND_S4) && |
8927 |
++ (sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT) && |
8928 |
+ !(sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT)) && |
8929 |
+ !sdev->first_boot) { |
8930 |
+ dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n"); |
8931 |
+diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c |
8932 |
+index 1c319582ca6f0..76351f7f3243d 100644 |
8933 |
+--- a/sound/soc/sof/pm.c |
8934 |
++++ b/sound/soc/sof/pm.c |
8935 |
+@@ -23,6 +23,9 @@ static u32 snd_sof_dsp_power_target(struct snd_sof_dev *sdev) |
8936 |
+ u32 target_dsp_state; |
8937 |
+ |
8938 |
+ switch (sdev->system_suspend_target) { |
8939 |
++ case SOF_SUSPEND_S5: |
8940 |
++ case SOF_SUSPEND_S4: |
8941 |
++ /* DSP should be in D3 if the system is suspending to S3+ */ |
8942 |
+ case SOF_SUSPEND_S3: |
8943 |
+ /* DSP should be in D3 if the system is suspending to S3 */ |
8944 |
+ target_dsp_state = SOF_DSP_PM_D3; |
8945 |
+@@ -327,8 +330,24 @@ int snd_sof_prepare(struct device *dev) |
8946 |
+ return 0; |
8947 |
+ |
8948 |
+ #if defined(CONFIG_ACPI) |
8949 |
+- if (acpi_target_system_state() == ACPI_STATE_S0) |
8950 |
++ switch (acpi_target_system_state()) { |
8951 |
++ case ACPI_STATE_S0: |
8952 |
+ sdev->system_suspend_target = SOF_SUSPEND_S0IX; |
8953 |
++ break; |
8954 |
++ case ACPI_STATE_S1: |
8955 |
++ case ACPI_STATE_S2: |
8956 |
++ case ACPI_STATE_S3: |
8957 |
++ sdev->system_suspend_target = SOF_SUSPEND_S3; |
8958 |
++ break; |
8959 |
++ case ACPI_STATE_S4: |
8960 |
++ sdev->system_suspend_target = SOF_SUSPEND_S4; |
8961 |
++ break; |
8962 |
++ case ACPI_STATE_S5: |
8963 |
++ sdev->system_suspend_target = SOF_SUSPEND_S5; |
8964 |
++ break; |
8965 |
++ default: |
8966 |
++ break; |
8967 |
++ } |
8968 |
+ #endif |
8969 |
+ |
8970 |
+ return 0; |
8971 |
+diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h |
8972 |
+index 0d9b640ae24cd..c856f0d84e495 100644 |
8973 |
+--- a/sound/soc/sof/sof-priv.h |
8974 |
++++ b/sound/soc/sof/sof-priv.h |
8975 |
+@@ -85,6 +85,8 @@ enum sof_system_suspend_state { |
8976 |
+ SOF_SUSPEND_NONE = 0, |
8977 |
+ SOF_SUSPEND_S0IX, |
8978 |
+ SOF_SUSPEND_S3, |
8979 |
++ SOF_SUSPEND_S4, |
8980 |
++ SOF_SUSPEND_S5, |
8981 |
+ }; |
8982 |
+ |
8983 |
+ enum sof_dfsentry_type { |
8984 |
+diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c |
8985 |
+index 4ad0dfbc8b21f..7c7d20fc503ad 100644 |
8986 |
+--- a/tools/perf/tests/perf-time-to-tsc.c |
8987 |
++++ b/tools/perf/tests/perf-time-to-tsc.c |
8988 |
+@@ -20,8 +20,6 @@ |
8989 |
+ #include "tsc.h" |
8990 |
+ #include "mmap.h" |
8991 |
+ #include "tests.h" |
8992 |
+-#include "pmu.h" |
8993 |
+-#include "pmu-hybrid.h" |
8994 |
+ |
8995 |
+ /* |
8996 |
+ * Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just |
8997 |
+@@ -106,28 +104,21 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su |
8998 |
+ |
8999 |
+ evlist__config(evlist, &opts, NULL); |
9000 |
+ |
9001 |
+- evsel = evlist__first(evlist); |
9002 |
+- |
9003 |
+- evsel->core.attr.comm = 1; |
9004 |
+- evsel->core.attr.disabled = 1; |
9005 |
+- evsel->core.attr.enable_on_exec = 0; |
9006 |
+- |
9007 |
+- /* |
9008 |
+- * For hybrid "cycles:u", it creates two events. |
9009 |
+- * Init the second evsel here. |
9010 |
+- */ |
9011 |
+- if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) { |
9012 |
+- evsel = evsel__next(evsel); |
9013 |
++ /* For hybrid "cycles:u", it creates two events */ |
9014 |
++ evlist__for_each_entry(evlist, evsel) { |
9015 |
+ evsel->core.attr.comm = 1; |
9016 |
+ evsel->core.attr.disabled = 1; |
9017 |
+ evsel->core.attr.enable_on_exec = 0; |
9018 |
+ } |
9019 |
+ |
9020 |
+- if (evlist__open(evlist) == -ENOENT) { |
9021 |
+- err = TEST_SKIP; |
9022 |
++ ret = evlist__open(evlist); |
9023 |
++ if (ret < 0) { |
9024 |
++ if (ret == -ENOENT) |
9025 |
++ err = TEST_SKIP; |
9026 |
++ else |
9027 |
++ pr_debug("evlist__open() failed\n"); |
9028 |
+ goto out_err; |
9029 |
+ } |
9030 |
+- CHECK__(evlist__open(evlist)); |
9031 |
+ |
9032 |
+ CHECK__(evlist__mmap(evlist, UINT_MAX)); |
9033 |
+ |
9034 |
+@@ -167,10 +158,12 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su |
9035 |
+ goto next_event; |
9036 |
+ |
9037 |
+ if (strcmp(event->comm.comm, comm1) == 0) { |
9038 |
++ CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event)); |
9039 |
+ CHECK__(evsel__parse_sample(evsel, event, &sample)); |
9040 |
+ comm1_time = sample.time; |
9041 |
+ } |
9042 |
+ if (strcmp(event->comm.comm, comm2) == 0) { |
9043 |
++ CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event)); |
9044 |
+ CHECK__(evsel__parse_sample(evsel, event, &sample)); |
9045 |
+ comm2_time = sample.time; |
9046 |
+ } |
9047 |
+diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile |
9048 |
+index 71b3066023685..616ed40196554 100644 |
9049 |
+--- a/tools/testing/selftests/gpio/Makefile |
9050 |
++++ b/tools/testing/selftests/gpio/Makefile |
9051 |
+@@ -3,6 +3,6 @@ |
9052 |
+ TEST_PROGS := gpio-mockup.sh gpio-sim.sh |
9053 |
+ TEST_FILES := gpio-mockup-sysfs.sh |
9054 |
+ TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev gpio-chip-info gpio-line-name |
9055 |
+-CFLAGS += -O2 -g -Wall -I../../../../usr/include/ |
9056 |
++CFLAGS += -O2 -g -Wall -I../../../../usr/include/ $(KHDR_INCLUDES) |
9057 |
+ |
9058 |
+ include ../lib.mk |
9059 |
+diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c |
9060 |
+index 4158da0da2bba..2237d1aac8014 100644 |
9061 |
+--- a/tools/testing/selftests/kvm/rseq_test.c |
9062 |
++++ b/tools/testing/selftests/kvm/rseq_test.c |
9063 |
+@@ -82,8 +82,9 @@ static int next_cpu(int cpu) |
9064 |
+ return cpu; |
9065 |
+ } |
9066 |
+ |
9067 |
+-static void *migration_worker(void *ign) |
9068 |
++static void *migration_worker(void *__rseq_tid) |
9069 |
+ { |
9070 |
++ pid_t rseq_tid = (pid_t)(unsigned long)__rseq_tid; |
9071 |
+ cpu_set_t allowed_mask; |
9072 |
+ int r, i, cpu; |
9073 |
+ |
9074 |
+@@ -106,7 +107,7 @@ static void *migration_worker(void *ign) |
9075 |
+ * stable, i.e. while changing affinity is in-progress. |
9076 |
+ */ |
9077 |
+ smp_wmb(); |
9078 |
+- r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask); |
9079 |
++ r = sched_setaffinity(rseq_tid, sizeof(allowed_mask), &allowed_mask); |
9080 |
+ TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)", |
9081 |
+ errno, strerror(errno)); |
9082 |
+ smp_wmb(); |
9083 |
+@@ -231,7 +232,8 @@ int main(int argc, char *argv[]) |
9084 |
+ vm = vm_create_default(VCPU_ID, 0, guest_code); |
9085 |
+ ucall_init(vm, NULL); |
9086 |
+ |
9087 |
+- pthread_create(&migration_thread, NULL, migration_worker, 0); |
9088 |
++ pthread_create(&migration_thread, NULL, migration_worker, |
9089 |
++ (void *)(unsigned long)gettid()); |
9090 |
+ |
9091 |
+ for (i = 0; !done; i++) { |
9092 |
+ vcpu_run(vm, VCPU_ID); |
9093 |
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
9094 |
+index 5ab12214e18dd..24cb37d19c638 100644 |
9095 |
+--- a/virt/kvm/kvm_main.c |
9096 |
++++ b/virt/kvm/kvm_main.c |
9097 |
+@@ -4299,8 +4299,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm, |
9098 |
+ kvm_put_kvm_no_destroy(kvm); |
9099 |
+ mutex_lock(&kvm->lock); |
9100 |
+ list_del(&dev->vm_node); |
9101 |
++ if (ops->release) |
9102 |
++ ops->release(dev); |
9103 |
+ mutex_unlock(&kvm->lock); |
9104 |
+- ops->destroy(dev); |
9105 |
++ if (ops->destroy) |
9106 |
++ ops->destroy(dev); |
9107 |
+ return ret; |
9108 |
+ } |
9109 |
+ |