1 |
commit: 2d4b45c54778aa120dd8467beb7e9a3c42005258 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Mon Jun 7 11:22:42 2021 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Mon Jun 7 11:22:42 2021 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2d4b45c5 |
7 |
|
8 |
Upgrade wireguard patch to v1.0.202100606 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 2 +- |
13 |
... => 2400_wireguard-backport-v1.0.20210606.patch | 497 +++++++++++---------- |
14 |
2 files changed, 271 insertions(+), 228 deletions(-) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index f6d1278..fbcce52 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -551,7 +551,7 @@ Patch: 2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch |
21 |
From: https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@××××××××.org/raw |
22 |
Desc: Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758 |
23 |
|
24 |
-Patch: 2400_wireguard-backport-v1.0.20210424.patch |
25 |
+Patch: 2400_wireguard-backport-v1.0.202100606.patch |
26 |
From: https://git.zx2c4.com/wireguard-linux/ |
27 |
Desc: Extremely simple yet fast and modern VPN that utilizes state-of-the-art cryptography |
28 |
|
29 |
|
30 |
diff --git a/2400_wireguard-backport-v1.0.20210424.patch b/2400_wireguard-backport-v1.0.20210606.patch |
31 |
similarity index 99% |
32 |
rename from 2400_wireguard-backport-v1.0.20210424.patch |
33 |
rename to 2400_wireguard-backport-v1.0.20210606.patch |
34 |
index 34d7aa5..a5b7b80 100755 |
35 |
--- a/2400_wireguard-backport-v1.0.20210424.patch |
36 |
+++ b/2400_wireguard-backport-v1.0.20210606.patch |
37 |
@@ -3380,7 +3380,7 @@ exit 0 |
38 |
- u32 u[5]; |
39 |
- /* ... silently appended r^3 and r^4 when using AVX2 */ |
40 |
+asmlinkage void poly1305_init_x86_64(void *ctx, |
41 |
-+ const u8 key[POLY1305_KEY_SIZE]); |
42 |
++ const u8 key[POLY1305_BLOCK_SIZE]); |
43 |
+asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, |
44 |
+ const size_t len, const u32 padbit); |
45 |
+asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], |
46 |
@@ -3462,7 +3462,7 @@ exit 0 |
47 |
+} |
48 |
|
49 |
- return crypto_poly1305_init(desc); |
50 |
-+static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE]) |
51 |
++static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE]) |
52 |
+{ |
53 |
+ poly1305_init_x86_64(ctx, key); |
54 |
} |
55 |
@@ -3523,7 +3523,7 @@ exit 0 |
56 |
|
57 |
- BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base)); |
58 |
- sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base); |
59 |
-+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) |
60 |
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) |
61 |
+{ |
62 |
+ poly1305_simd_init(&dctx->h, key); |
63 |
+ dctx->s[0] = get_unaligned_le32(&key[16]); |
64 |
@@ -4104,7 +4104,7 @@ exit 0 |
65 |
.digestsize = POLY1305_DIGEST_SIZE, |
66 |
--- b/include/crypto/internal/poly1305.h |
67 |
+++ b/include/crypto/internal/poly1305.h |
68 |
-@@ -0,0 +1,33 @@ |
69 |
+@@ -0,0 +1,34 @@ |
70 |
+/* SPDX-License-Identifier: GPL-2.0 */ |
71 |
+/* |
72 |
+ * Common values for the Poly1305 algorithm |
73 |
@@ -4125,7 +4125,8 @@ exit 0 |
74 |
+ * only the ε-almost-∆-universal hash function (not the full MAC) is computed. |
75 |
+ */ |
76 |
+ |
77 |
-+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key); |
78 |
++void poly1305_core_setkey(struct poly1305_core_key *key, |
79 |
++ const u8 raw_key[POLY1305_BLOCK_SIZE]); |
80 |
+static inline void poly1305_core_init(struct poly1305_state *state) |
81 |
+{ |
82 |
+ *state = (struct poly1305_state){}; |
83 |
@@ -4140,7 +4141,7 @@ exit 0 |
84 |
+#endif |
85 |
--- b/include/crypto/poly1305.h |
86 |
+++ b/include/crypto/poly1305.h |
87 |
-@@ -14,51 +14,84 @@ |
88 |
+@@ -14,51 +14,86 @@ |
89 |
#define POLY1305_DIGEST_SIZE 16 |
90 |
|
91 |
+/* The poly1305_key and poly1305_state types are mostly opaque and |
92 |
@@ -4206,8 +4207,10 @@ exit 0 |
93 |
- */ |
94 |
-void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key); |
95 |
-static inline void poly1305_core_init(struct poly1305_state *state) |
96 |
-+void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key); |
97 |
-+void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key); |
98 |
++void poly1305_init_arch(struct poly1305_desc_ctx *desc, |
99 |
++ const u8 key[POLY1305_KEY_SIZE]); |
100 |
++void poly1305_init_generic(struct poly1305_desc_ctx *desc, |
101 |
++ const u8 key[POLY1305_KEY_SIZE]); |
102 |
+ |
103 |
+static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key) |
104 |
+{ |
105 |
@@ -4258,7 +4261,7 @@ exit 0 |
106 |
#endif |
107 |
--- b/lib/crypto/poly1305.c |
108 |
+++ b/lib/crypto/poly1305.c |
109 |
-@@ -0,0 +1,77 @@ |
110 |
+@@ -0,0 +1,78 @@ |
111 |
+// SPDX-License-Identifier: GPL-2.0-or-later |
112 |
+/* |
113 |
+ * Poly1305 authenticator algorithm, RFC7539 |
114 |
@@ -4273,7 +4276,8 @@ exit 0 |
115 |
+#include <linux/module.h> |
116 |
+#include <asm/unaligned.h> |
117 |
+ |
118 |
-+void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key) |
119 |
++void poly1305_init_generic(struct poly1305_desc_ctx *desc, |
120 |
++ const u8 key[POLY1305_KEY_SIZE]) |
121 |
+{ |
122 |
+ poly1305_core_setkey(&desc->core_r, key); |
123 |
+ desc->s[0] = get_unaligned_le32(key + 16); |
124 |
@@ -6150,7 +6154,7 @@ exit 0 |
125 |
+ |
126 |
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); |
127 |
+ |
128 |
-+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) |
129 |
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) |
130 |
+{ |
131 |
+ poly1305_init_arm64(&dctx->h, key); |
132 |
+ dctx->s[0] = get_unaligned_le32(key + 16); |
133 |
@@ -8788,7 +8792,7 @@ exit 0 |
134 |
+ |
135 |
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); |
136 |
+ |
137 |
-+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) |
138 |
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) |
139 |
+{ |
140 |
+ poly1305_init_arm(&dctx->h, key); |
141 |
+ dctx->s[0] = get_unaligned_le32(key + 16); |
142 |
@@ -9052,7 +9056,7 @@ exit 0 |
143 |
+asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit); |
144 |
+asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce); |
145 |
+ |
146 |
-+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) |
147 |
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) |
148 |
+{ |
149 |
+ poly1305_init_mips(&dctx->h, key); |
150 |
+ dctx->s[0] = get_unaligned_le32(key + 16); |
151 |
@@ -30620,9 +30624,9 @@ exit 0 |
152 |
u32 nh_key[NH_KEY_WORDS]; |
153 |
}; |
154 |
|
155 |
---- /dev/null |
156 |
+--- b/lib/crypto/poly1305-donna32.c |
157 |
+++ b/lib/crypto/poly1305-donna32.c |
158 |
-@@ -0,0 +1,204 @@ |
159 |
+@@ -0,0 +1,205 @@ |
160 |
+// SPDX-License-Identifier: GPL-2.0 OR MIT |
161 |
+/* |
162 |
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved. |
163 |
@@ -30635,7 +30639,8 @@ exit 0 |
164 |
+#include <asm/unaligned.h> |
165 |
+#include <crypto/internal/poly1305.h> |
166 |
+ |
167 |
-+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) |
168 |
++void poly1305_core_setkey(struct poly1305_core_key *key, |
169 |
++ const u8 raw_key[POLY1305_BLOCK_SIZE]) |
170 |
+{ |
171 |
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ |
172 |
+ key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff; |
173 |
@@ -30827,9 +30832,9 @@ exit 0 |
174 |
+ put_unaligned_le32(h3, &mac[12]); |
175 |
+} |
176 |
+EXPORT_SYMBOL(poly1305_core_emit); |
177 |
---- /dev/null |
178 |
+--- b/lib/crypto/poly1305-donna64.c |
179 |
+++ b/lib/crypto/poly1305-donna64.c |
180 |
-@@ -0,0 +1,185 @@ |
181 |
+@@ -0,0 +1,186 @@ |
182 |
+// SPDX-License-Identifier: GPL-2.0 OR MIT |
183 |
+/* |
184 |
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved. |
185 |
@@ -30844,7 +30849,8 @@ exit 0 |
186 |
+ |
187 |
+typedef __uint128_t u128; |
188 |
+ |
189 |
-+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) |
190 |
++void poly1305_core_setkey(struct poly1305_core_key *key, |
191 |
++ const u8 raw_key[POLY1305_BLOCK_SIZE]) |
192 |
+{ |
193 |
+ u64 t0, t1; |
194 |
+ |
195 |
@@ -35909,7 +35915,7 @@ exit 0 |
196 |
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@×××××.com>"); |
197 |
--- a/arch/x86/Makefile |
198 |
+++ b/arch/x86/Makefile |
199 |
-@@ -197,9 +197,10 @@ avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) |
200 |
+@@ -198,9 +198,10 @@ avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) |
201 |
avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1) |
202 |
sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1) |
203 |
sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) |
204 |
@@ -36513,11 +36519,10 @@ exit 0 |
205 |
obj-$(CONFIG_EQUALIZER) += eql.o |
206 |
obj-$(CONFIG_IFB) += ifb.o |
207 |
obj-$(CONFIG_MACSEC) += macsec.o |
208 |
---- /dev/null |
209 |
+--- b/drivers/net/wireguard/Makefile |
210 |
+++ b/drivers/net/wireguard/Makefile |
211 |
-@@ -0,0 +1,18 @@ |
212 |
-+ccflags-y := -O3 |
213 |
-+ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' |
214 |
+@@ -0,0 +1,17 @@ |
215 |
++ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' |
216 |
+ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG |
217 |
+wireguard-y := main.o |
218 |
+wireguard-y += noise.o |
219 |
@@ -36536,7 +36541,7 @@ exit 0 |
220 |
+obj-$(CONFIG_WIREGUARD) := wireguard.o |
221 |
--- b/drivers/net/wireguard/allowedips.c |
222 |
+++ b/drivers/net/wireguard/allowedips.c |
223 |
-@@ -0,0 +1,377 @@ |
224 |
+@@ -0,0 +1,386 @@ |
225 |
+// SPDX-License-Identifier: GPL-2.0 |
226 |
+/* |
227 |
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved. |
228 |
@@ -36545,6 +36550,8 @@ exit 0 |
229 |
+#include "allowedips.h" |
230 |
+#include "peer.h" |
231 |
+ |
232 |
++static struct kmem_cache *node_cache; |
233 |
++ |
234 |
+static void swap_endian(u8 *dst, const u8 *src, u8 bits) |
235 |
+{ |
236 |
+ if (bits == 32) { |
237 |
@@ -36567,8 +36574,11 @@ exit 0 |
238 |
+ node->bitlen = bits; |
239 |
+ memcpy(node->bits, src, bits / 8U); |
240 |
+} |
241 |
-+#define CHOOSE_NODE(parent, key) \ |
242 |
-+ parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] |
243 |
++ |
244 |
++static inline u8 choose(struct allowedips_node *node, const u8 *key) |
245 |
++{ |
246 |
++ return (key[node->bit_at_a] >> node->bit_at_b) & 1; |
247 |
++} |
248 |
+ |
249 |
+static void push_rcu(struct allowedips_node **stack, |
250 |
+ struct allowedips_node __rcu *p, unsigned int *len) |
251 |
@@ -36579,6 +36589,11 @@ exit 0 |
252 |
+ } |
253 |
+} |
254 |
+ |
255 |
++static void node_free_rcu(struct rcu_head *rcu) |
256 |
++{ |
257 |
++ kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu)); |
258 |
++} |
259 |
++ |
260 |
+static void root_free_rcu(struct rcu_head *rcu) |
261 |
+{ |
262 |
+ struct allowedips_node *node, *stack[128] = { |
263 |
@@ -36588,7 +36603,7 @@ exit 0 |
264 |
+ while (len > 0 && (node = stack[--len])) { |
265 |
+ push_rcu(stack, node->bit[0], &len); |
266 |
+ push_rcu(stack, node->bit[1], &len); |
267 |
-+ kfree(node); |
268 |
++ kmem_cache_free(node_cache, node); |
269 |
+ } |
270 |
+} |
271 |
+ |
272 |
@@ -36605,60 +36620,6 @@ exit 0 |
273 |
+ } |
274 |
+} |
275 |
+ |
276 |
-+static void walk_remove_by_peer(struct allowedips_node __rcu **top, |
277 |
-+ struct wg_peer *peer, struct mutex *lock) |
278 |
-+{ |
279 |
-+#define REF(p) rcu_access_pointer(p) |
280 |
-+#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock)) |
281 |
-+#define PUSH(p) ({ \ |
282 |
-+ WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \ |
283 |
-+ stack[len++] = p; \ |
284 |
-+ }) |
285 |
-+ |
286 |
-+ struct allowedips_node __rcu **stack[128], **nptr; |
287 |
-+ struct allowedips_node *node, *prev; |
288 |
-+ unsigned int len; |
289 |
-+ |
290 |
-+ if (unlikely(!peer || !REF(*top))) |
291 |
-+ return; |
292 |
-+ |
293 |
-+ for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) { |
294 |
-+ nptr = stack[len - 1]; |
295 |
-+ node = DEREF(nptr); |
296 |
-+ if (!node) { |
297 |
-+ --len; |
298 |
-+ continue; |
299 |
-+ } |
300 |
-+ if (!prev || REF(prev->bit[0]) == node || |
301 |
-+ REF(prev->bit[1]) == node) { |
302 |
-+ if (REF(node->bit[0])) |
303 |
-+ PUSH(&node->bit[0]); |
304 |
-+ else if (REF(node->bit[1])) |
305 |
-+ PUSH(&node->bit[1]); |
306 |
-+ } else if (REF(node->bit[0]) == prev) { |
307 |
-+ if (REF(node->bit[1])) |
308 |
-+ PUSH(&node->bit[1]); |
309 |
-+ } else { |
310 |
-+ if (rcu_dereference_protected(node->peer, |
311 |
-+ lockdep_is_held(lock)) == peer) { |
312 |
-+ RCU_INIT_POINTER(node->peer, NULL); |
313 |
-+ list_del_init(&node->peer_list); |
314 |
-+ if (!node->bit[0] || !node->bit[1]) { |
315 |
-+ rcu_assign_pointer(*nptr, DEREF( |
316 |
-+ &node->bit[!REF(node->bit[0])])); |
317 |
-+ kfree_rcu(node, rcu); |
318 |
-+ node = DEREF(nptr); |
319 |
-+ } |
320 |
-+ } |
321 |
-+ --len; |
322 |
-+ } |
323 |
-+ } |
324 |
-+ |
325 |
-+#undef REF |
326 |
-+#undef DEREF |
327 |
-+#undef PUSH |
328 |
-+} |
329 |
-+ |
330 |
+static unsigned int fls128(u64 a, u64 b) |
331 |
+{ |
332 |
+ return a ? fls64(a) + 64U : fls64(b); |
333 |
@@ -36698,7 +36659,7 @@ exit 0 |
334 |
+ found = node; |
335 |
+ if (node->cidr == bits) |
336 |
+ break; |
337 |
-+ node = rcu_dereference_bh(CHOOSE_NODE(node, key)); |
338 |
++ node = rcu_dereference_bh(node->bit[choose(node, key)]); |
339 |
+ } |
340 |
+ return found; |
341 |
+} |
342 |
@@ -36730,8 +36691,7 @@ exit 0 |
343 |
+ u8 cidr, u8 bits, struct allowedips_node **rnode, |
344 |
+ struct mutex *lock) |
345 |
+{ |
346 |
-+ struct allowedips_node *node = rcu_dereference_protected(trie, |
347 |
-+ lockdep_is_held(lock)); |
348 |
++ struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock)); |
349 |
+ struct allowedips_node *parent = NULL; |
350 |
+ bool exact = false; |
351 |
+ |
352 |
@@ -36741,13 +36701,24 @@ exit 0 |
353 |
+ exact = true; |
354 |
+ break; |
355 |
+ } |
356 |
-+ node = rcu_dereference_protected(CHOOSE_NODE(parent, key), |
357 |
-+ lockdep_is_held(lock)); |
358 |
++ node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock)); |
359 |
+ } |
360 |
+ *rnode = parent; |
361 |
+ return exact; |
362 |
+} |
363 |
+ |
364 |
++static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node) |
365 |
++{ |
366 |
++ node->parent_bit_packed = (unsigned long)parent | bit; |
367 |
++ rcu_assign_pointer(*parent, node); |
368 |
++} |
369 |
++ |
370 |
++static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node) |
371 |
++{ |
372 |
++ u8 bit = choose(parent, node->bits); |
373 |
++ connect_node(&parent->bit[bit], bit, node); |
374 |
++} |
375 |
++ |
376 |
+static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, |
377 |
+ u8 cidr, struct wg_peer *peer, struct mutex *lock) |
378 |
+{ |
379 |
@@ -36757,13 +36728,13 @@ exit 0 |
380 |
+ return -EINVAL; |
381 |
+ |
382 |
+ if (!rcu_access_pointer(*trie)) { |
383 |
-+ node = kzalloc(sizeof(*node), GFP_KERNEL); |
384 |
++ node = kmem_cache_zalloc(node_cache, GFP_KERNEL); |
385 |
+ if (unlikely(!node)) |
386 |
+ return -ENOMEM; |
387 |
+ RCU_INIT_POINTER(node->peer, peer); |
388 |
+ list_add_tail(&node->peer_list, &peer->allowedips_list); |
389 |
+ copy_and_assign_cidr(node, key, cidr, bits); |
390 |
-+ rcu_assign_pointer(*trie, node); |
391 |
++ connect_node(trie, 2, node); |
392 |
+ return 0; |
393 |
+ } |
394 |
+ if (node_placement(*trie, key, cidr, bits, &node, lock)) { |
395 |
@@ -36772,7 +36743,7 @@ exit 0 |
396 |
+ return 0; |
397 |
+ } |
398 |
+ |
399 |
-+ newnode = kzalloc(sizeof(*newnode), GFP_KERNEL); |
400 |
++ newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL); |
401 |
+ if (unlikely(!newnode)) |
402 |
+ return -ENOMEM; |
403 |
+ RCU_INIT_POINTER(newnode->peer, peer); |
404 |
@@ -36782,10 +36753,10 @@ exit 0 |
405 |
+ if (!node) { |
406 |
+ down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); |
407 |
+ } else { |
408 |
-+ down = rcu_dereference_protected(CHOOSE_NODE(node, key), |
409 |
-+ lockdep_is_held(lock)); |
410 |
++ const u8 bit = choose(node, key); |
411 |
++ down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock)); |
412 |
+ if (!down) { |
413 |
-+ rcu_assign_pointer(CHOOSE_NODE(node, key), newnode); |
414 |
++ connect_node(&node->bit[bit], bit, newnode); |
415 |
+ return 0; |
416 |
+ } |
417 |
+ } |
418 |
@@ -36793,30 +36764,29 @@ exit 0 |
419 |
+ parent = node; |
420 |
+ |
421 |
+ if (newnode->cidr == cidr) { |
422 |
-+ rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down); |
423 |
++ choose_and_connect_node(newnode, down); |
424 |
+ if (!parent) |
425 |
-+ rcu_assign_pointer(*trie, newnode); |
426 |
++ connect_node(trie, 2, newnode); |
427 |
+ else |
428 |
-+ rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), |
429 |
-+ newnode); |
430 |
-+ } else { |
431 |
-+ node = kzalloc(sizeof(*node), GFP_KERNEL); |
432 |
-+ if (unlikely(!node)) { |
433 |
-+ list_del(&newnode->peer_list); |
434 |
-+ kfree(newnode); |
435 |
-+ return -ENOMEM; |
436 |
-+ } |
437 |
-+ INIT_LIST_HEAD(&node->peer_list); |
438 |
-+ copy_and_assign_cidr(node, newnode->bits, cidr, bits); |
439 |
++ choose_and_connect_node(parent, newnode); |
440 |
++ return 0; |
441 |
++ } |
442 |
+ |
443 |
-+ rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); |
444 |
-+ rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); |
445 |
-+ if (!parent) |
446 |
-+ rcu_assign_pointer(*trie, node); |
447 |
-+ else |
448 |
-+ rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), |
449 |
-+ node); |
450 |
++ node = kmem_cache_zalloc(node_cache, GFP_KERNEL); |
451 |
++ if (unlikely(!node)) { |
452 |
++ list_del(&newnode->peer_list); |
453 |
++ kmem_cache_free(node_cache, newnode); |
454 |
++ return -ENOMEM; |
455 |
+ } |
456 |
++ INIT_LIST_HEAD(&node->peer_list); |
457 |
++ copy_and_assign_cidr(node, newnode->bits, cidr, bits); |
458 |
++ |
459 |
++ choose_and_connect_node(node, down); |
460 |
++ choose_and_connect_node(node, newnode); |
461 |
++ if (!parent) |
462 |
++ connect_node(trie, 2, node); |
463 |
++ else |
464 |
++ choose_and_connect_node(parent, node); |
465 |
+ return 0; |
466 |
+} |
467 |
+ |
468 |
@@ -36874,9 +36844,41 @@ exit 0 |
469 |
+void wg_allowedips_remove_by_peer(struct allowedips *table, |
470 |
+ struct wg_peer *peer, struct mutex *lock) |
471 |
+{ |
472 |
++ struct allowedips_node *node, *child, **parent_bit, *parent, *tmp; |
473 |
++ bool free_parent; |
474 |
++ |
475 |
++ if (list_empty(&peer->allowedips_list)) |
476 |
++ return; |
477 |
+ ++table->seq; |
478 |
-+ walk_remove_by_peer(&table->root4, peer, lock); |
479 |
-+ walk_remove_by_peer(&table->root6, peer, lock); |
480 |
++ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) { |
481 |
++ list_del_init(&node->peer_list); |
482 |
++ RCU_INIT_POINTER(node->peer, NULL); |
483 |
++ if (node->bit[0] && node->bit[1]) |
484 |
++ continue; |
485 |
++ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])], |
486 |
++ lockdep_is_held(lock)); |
487 |
++ if (child) |
488 |
++ child->parent_bit_packed = node->parent_bit_packed; |
489 |
++ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL); |
490 |
++ *parent_bit = child; |
491 |
++ parent = (void *)parent_bit - |
492 |
++ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]); |
493 |
++ free_parent = !rcu_access_pointer(node->bit[0]) && |
494 |
++ !rcu_access_pointer(node->bit[1]) && |
495 |
++ (node->parent_bit_packed & 3) <= 1 && |
496 |
++ !rcu_access_pointer(parent->peer); |
497 |
++ if (free_parent) |
498 |
++ child = rcu_dereference_protected( |
499 |
++ parent->bit[!(node->parent_bit_packed & 1)], |
500 |
++ lockdep_is_held(lock)); |
501 |
++ call_rcu(&node->rcu, node_free_rcu); |
502 |
++ if (!free_parent) |
503 |
++ continue; |
504 |
++ if (child) |
505 |
++ child->parent_bit_packed = parent->parent_bit_packed; |
506 |
++ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child; |
507 |
++ call_rcu(&parent->rcu, node_free_rcu); |
508 |
++ } |
509 |
+} |
510 |
+ |
511 |
+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) |
512 |
@@ -36913,8 +36915,20 @@ exit 0 |
513 |
+ return NULL; |
514 |
+} |
515 |
+ |
516 |
++int __init wg_allowedips_slab_init(void) |
517 |
++{ |
518 |
++ node_cache = KMEM_CACHE(allowedips_node, 0); |
519 |
++ return node_cache ? 0 : -ENOMEM; |
520 |
++} |
521 |
++ |
522 |
++void wg_allowedips_slab_uninit(void) |
523 |
++{ |
524 |
++ rcu_barrier(); |
525 |
++ kmem_cache_destroy(node_cache); |
526 |
++} |
527 |
++ |
528 |
+#include "selftest/allowedips.c" |
529 |
---- /dev/null |
530 |
+--- b/drivers/net/wireguard/allowedips.h |
531 |
+++ b/drivers/net/wireguard/allowedips.h |
532 |
@@ -0,0 +1,59 @@ |
533 |
+/* SPDX-License-Identifier: GPL-2.0 */ |
534 |
@@ -36934,14 +36948,11 @@ exit 0 |
535 |
+struct allowedips_node { |
536 |
+ struct wg_peer __rcu *peer; |
537 |
+ struct allowedips_node __rcu *bit[2]; |
538 |
-+ /* While it may seem scandalous that we waste space for v4, |
539 |
-+ * we're alloc'ing to the nearest power of 2 anyway, so this |
540 |
-+ * doesn't actually make a difference. |
541 |
-+ */ |
542 |
-+ u8 bits[16] __aligned(__alignof(u64)); |
543 |
+ u8 cidr, bit_at_a, bit_at_b, bitlen; |
544 |
++ u8 bits[16] __aligned(__alignof(u64)); |
545 |
+ |
546 |
-+ /* Keep rarely used list at bottom to be beyond cache line. */ |
547 |
++ /* Keep rarely used members at bottom to be beyond cache line. */ |
548 |
++ unsigned long parent_bit_packed; |
549 |
+ union { |
550 |
+ struct list_head peer_list; |
551 |
+ struct rcu_head rcu; |
552 |
@@ -36952,7 +36963,7 @@ exit 0 |
553 |
+ struct allowedips_node __rcu *root4; |
554 |
+ struct allowedips_node __rcu *root6; |
555 |
+ u64 seq; |
556 |
-+}; |
557 |
++} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */ |
558 |
+ |
559 |
+void wg_allowedips_init(struct allowedips *table); |
560 |
+void wg_allowedips_free(struct allowedips *table, struct mutex *mutex); |
561 |
@@ -36975,6 +36986,9 @@ exit 0 |
562 |
+bool wg_allowedips_selftest(void); |
563 |
+#endif |
564 |
+ |
565 |
++int wg_allowedips_slab_init(void); |
566 |
++void wg_allowedips_slab_uninit(void); |
567 |
++ |
568 |
+#endif /* _WG_ALLOWEDIPS_H */ |
569 |
--- /dev/null |
570 |
+++ b/drivers/net/wireguard/cookie.c |
571 |
@@ -37807,7 +37821,7 @@ exit 0 |
572 |
+#endif /* _WG_DEVICE_H */ |
573 |
--- b/drivers/net/wireguard/main.c |
574 |
+++ b/drivers/net/wireguard/main.c |
575 |
-@@ -0,0 +1,63 @@ |
576 |
+@@ -0,0 +1,78 @@ |
577 |
+// SPDX-License-Identifier: GPL-2.0 |
578 |
+/* |
579 |
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved. |
580 |
@@ -37831,13 +37845,22 @@ exit 0 |
581 |
+{ |
582 |
+ int ret; |
583 |
+ |
584 |
++ ret = wg_allowedips_slab_init(); |
585 |
++ if (ret < 0) |
586 |
++ goto err_allowedips; |
587 |
++ |
588 |
+#ifdef DEBUG |
589 |
++ ret = -ENOTRECOVERABLE; |
590 |
+ if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() || |
591 |
+ !wg_ratelimiter_selftest()) |
592 |
-+ return -ENOTRECOVERABLE; |
593 |
++ goto err_peer; |
594 |
+#endif |
595 |
+ wg_noise_init(); |
596 |
+ |
597 |
++ ret = wg_peer_init(); |
598 |
++ if (ret < 0) |
599 |
++ goto err_peer; |
600 |
++ |
601 |
+ ret = wg_device_init(); |
602 |
+ if (ret < 0) |
603 |
+ goto err_device; |
604 |
@@ -37854,6 +37877,10 @@ exit 0 |
605 |
+err_netlink: |
606 |
+ wg_device_uninit(); |
607 |
+err_device: |
608 |
++ wg_peer_uninit(); |
609 |
++err_peer: |
610 |
++ wg_allowedips_slab_uninit(); |
611 |
++err_allowedips: |
612 |
+ return ret; |
613 |
+} |
614 |
+ |
615 |
@@ -37861,6 +37888,8 @@ exit 0 |
616 |
+{ |
617 |
+ wg_genetlink_uninit(); |
618 |
+ wg_device_uninit(); |
619 |
++ wg_peer_uninit(); |
620 |
++ wg_allowedips_slab_uninit(); |
621 |
+} |
622 |
+ |
623 |
+module_init(mod_init); |
624 |
@@ -39637,7 +39666,7 @@ exit 0 |
625 |
+#endif /* _WG_NOISE_H */ |
626 |
--- b/drivers/net/wireguard/peer.c |
627 |
+++ b/drivers/net/wireguard/peer.c |
628 |
-@@ -0,0 +1,227 @@ |
629 |
+@@ -0,0 +1,240 @@ |
630 |
+// SPDX-License-Identifier: GPL-2.0 |
631 |
+/* |
632 |
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved. |
633 |
@@ -39655,6 +39684,7 @@ exit 0 |
634 |
+#include <linux/rcupdate.h> |
635 |
+#include <linux/list.h> |
636 |
+ |
637 |
++static struct kmem_cache *peer_cache; |
638 |
+static atomic64_t peer_counter = ATOMIC64_INIT(0); |
639 |
+ |
640 |
+struct wg_peer *wg_peer_create(struct wg_device *wg, |
641 |
@@ -39669,10 +39699,10 @@ exit 0 |
642 |
+ if (wg->num_peers >= MAX_PEERS_PER_DEVICE) |
643 |
+ return ERR_PTR(ret); |
644 |
+ |
645 |
-+ peer = kzalloc(sizeof(*peer), GFP_KERNEL); |
646 |
++ peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL); |
647 |
+ if (unlikely(!peer)) |
648 |
+ return ERR_PTR(ret); |
649 |
-+ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) |
650 |
++ if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))) |
651 |
+ goto err; |
652 |
+ |
653 |
+ peer->device = wg; |
654 |
@@ -39704,7 +39734,7 @@ exit 0 |
655 |
+ return peer; |
656 |
+ |
657 |
+err: |
658 |
-+ kfree(peer); |
659 |
++ kmem_cache_free(peer_cache, peer); |
660 |
+ return ERR_PTR(ret); |
661 |
+} |
662 |
+ |
663 |
@@ -39728,7 +39758,7 @@ exit 0 |
664 |
+ /* Mark as dead, so that we don't allow jumping contexts after. */ |
665 |
+ WRITE_ONCE(peer->is_dead, true); |
666 |
+ |
667 |
-+ /* The caller must now synchronize_rcu() for this to take effect. */ |
668 |
++ /* The caller must now synchronize_net() for this to take effect. */ |
669 |
+} |
670 |
+ |
671 |
+static void peer_remove_after_dead(struct wg_peer *peer) |
672 |
@@ -39800,7 +39830,7 @@ exit 0 |
673 |
+ lockdep_assert_held(&peer->device->device_update_lock); |
674 |
+ |
675 |
+ peer_make_dead(peer); |
676 |
-+ synchronize_rcu(); |
677 |
++ synchronize_net(); |
678 |
+ peer_remove_after_dead(peer); |
679 |
+} |
680 |
+ |
681 |
@@ -39818,7 +39848,7 @@ exit 0 |
682 |
+ peer_make_dead(peer); |
683 |
+ list_add_tail(&peer->peer_list, &dead_peers); |
684 |
+ } |
685 |
-+ synchronize_rcu(); |
686 |
++ synchronize_net(); |
687 |
+ list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) |
688 |
+ peer_remove_after_dead(peer); |
689 |
+} |
690 |
@@ -39833,7 +39863,8 @@ exit 0 |
691 |
+ /* The final zeroing takes care of clearing any remaining handshake key |
692 |
+ * material and other potentially sensitive information. |
693 |
+ */ |
694 |
-+ kzfree(peer); |
695 |
++ memzero_explicit(peer, sizeof(*peer)); |
696 |
++ kmem_cache_free(peer_cache, peer); |
697 |
+} |
698 |
+ |
699 |
+static void kref_release(struct kref *refcount) |
700 |
@@ -39865,9 +39896,20 @@ exit 0 |
701 |
+ return; |
702 |
+ kref_put(&peer->refcount, kref_release); |
703 |
+} |
704 |
++ |
705 |
++int __init wg_peer_init(void) |
706 |
++{ |
707 |
++ peer_cache = KMEM_CACHE(wg_peer, 0); |
708 |
++ return peer_cache ? 0 : -ENOMEM; |
709 |
++} |
710 |
++ |
711 |
++void wg_peer_uninit(void) |
712 |
++{ |
713 |
++ kmem_cache_destroy(peer_cache); |
714 |
++} |
715 |
--- b/drivers/net/wireguard/peer.h |
716 |
+++ b/drivers/net/wireguard/peer.h |
717 |
-@@ -0,0 +1,83 @@ |
718 |
+@@ -0,0 +1,86 @@ |
719 |
+/* SPDX-License-Identifier: GPL-2.0 */ |
720 |
+/* |
721 |
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved. |
722 |
@@ -39950,6 +39992,9 @@ exit 0 |
723 |
+void wg_peer_remove(struct wg_peer *peer); |
724 |
+void wg_peer_remove_all(struct wg_device *wg); |
725 |
+ |
726 |
++int wg_peer_init(void); |
727 |
++void wg_peer_uninit(void); |
728 |
++ |
729 |
+#endif /* _WG_PEER_H */ |
730 |
--- b/drivers/net/wireguard/peerlookup.c |
731 |
+++ b/drivers/net/wireguard/peerlookup.c |
732 |
@@ -41411,9 +41456,9 @@ exit 0 |
733 |
+err: |
734 |
+ dev_kfree_skb(skb); |
735 |
+} |
736 |
---- /dev/null |
737 |
+--- b/drivers/net/wireguard/selftest/allowedips.c |
738 |
+++ b/drivers/net/wireguard/selftest/allowedips.c |
739 |
-@@ -0,0 +1,683 @@ |
740 |
+@@ -0,0 +1,676 @@ |
741 |
+// SPDX-License-Identifier: GPL-2.0 |
742 |
+/* |
743 |
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved. |
744 |
@@ -41435,32 +41480,22 @@ exit 0 |
745 |
+ |
746 |
+#include <linux/siphash.h> |
747 |
+ |
748 |
-+static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits, |
749 |
-+ u8 cidr) |
750 |
-+{ |
751 |
-+ swap_endian(dst, src, bits); |
752 |
-+ memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8); |
753 |
-+ if (cidr) |
754 |
-+ dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8); |
755 |
-+} |
756 |
-+ |
757 |
+static __init void print_node(struct allowedips_node *node, u8 bits) |
758 |
+{ |
759 |
+ char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; |
760 |
-+ char *fmt_declaration = KERN_DEBUG |
761 |
-+ "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; |
762 |
++ char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; |
763 |
++ u8 ip1[16], ip2[16], cidr1, cidr2; |
764 |
+ char *style = "dotted"; |
765 |
-+ u8 ip1[16], ip2[16]; |
766 |
+ u32 color = 0; |
767 |
+ |
768 |
++ if (node == NULL) |
769 |
++ return; |
770 |
+ if (bits == 32) { |
771 |
+ fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; |
772 |
-+ fmt_declaration = KERN_DEBUG |
773 |
-+ "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; |
774 |
++ fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; |
775 |
+ } else if (bits == 128) { |
776 |
+ fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; |
777 |
-+ fmt_declaration = KERN_DEBUG |
778 |
-+ "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; |
779 |
++ fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; |
780 |
+ } |
781 |
+ if (node->peer) { |
782 |
+ hsiphash_key_t key = { { 0 } }; |
783 |
@@ -41471,24 +41506,20 @@ exit 0 |
784 |
+ hsiphash_1u32(0xabad1dea, &key) % 200; |
785 |
+ style = "bold"; |
786 |
+ } |
787 |
-+ swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr); |
788 |
-+ printk(fmt_declaration, ip1, node->cidr, style, color); |
789 |
++ wg_allowedips_read_node(node, ip1, &cidr1); |
790 |
++ printk(fmt_declaration, ip1, cidr1, style, color); |
791 |
+ if (node->bit[0]) { |
792 |
-+ swap_endian_and_apply_cidr(ip2, |
793 |
-+ rcu_dereference_raw(node->bit[0])->bits, bits, |
794 |
-+ node->cidr); |
795 |
-+ printk(fmt_connection, ip1, node->cidr, ip2, |
796 |
-+ rcu_dereference_raw(node->bit[0])->cidr); |
797 |
-+ print_node(rcu_dereference_raw(node->bit[0]), bits); |
798 |
++ wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2); |
799 |
++ printk(fmt_connection, ip1, cidr1, ip2, cidr2); |
800 |
+ } |
801 |
+ if (node->bit[1]) { |
802 |
-+ swap_endian_and_apply_cidr(ip2, |
803 |
-+ rcu_dereference_raw(node->bit[1])->bits, |
804 |
-+ bits, node->cidr); |
805 |
-+ printk(fmt_connection, ip1, node->cidr, ip2, |
806 |
-+ rcu_dereference_raw(node->bit[1])->cidr); |
807 |
-+ print_node(rcu_dereference_raw(node->bit[1]), bits); |
808 |
++ wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2); |
809 |
++ printk(fmt_connection, ip1, cidr1, ip2, cidr2); |
810 |
+ } |
811 |
++ if (node->bit[0]) |
812 |
++ print_node(rcu_dereference_raw(node->bit[0]), bits); |
813 |
++ if (node->bit[1]) |
814 |
++ print_node(rcu_dereference_raw(node->bit[1]), bits); |
815 |
+} |
816 |
+ |
817 |
+static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) |
818 |
@@ -41537,8 +41568,8 @@ exit 0 |
819 |
+{ |
820 |
+ union nf_inet_addr mask; |
821 |
+ |
822 |
-+ memset(&mask, 0x00, 128 / 8); |
823 |
-+ memset(&mask, 0xff, cidr / 8); |
824 |
++ memset(&mask, 0, sizeof(mask)); |
825 |
++ memset(&mask.all, 0xff, cidr / 8); |
826 |
+ if (cidr % 32) |
827 |
+ mask.all[cidr / 32] = (__force u32)htonl( |
828 |
+ (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); |
829 |
@@ -41565,42 +41596,36 @@ exit 0 |
830 |
+} |
831 |
+ |
832 |
+static __init inline bool |
833 |
-+horrible_match_v4(const struct horrible_allowedips_node *node, |
834 |
-+ struct in_addr *ip) |
835 |
++horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip) |
836 |
+{ |
837 |
+ return (ip->s_addr & node->mask.ip) == node->ip.ip; |
838 |
+} |
839 |
+ |
840 |
+static __init inline bool |
841 |
-+horrible_match_v6(const struct horrible_allowedips_node *node, |
842 |
-+ struct in6_addr *ip) |
843 |
-+{ |
844 |
-+ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == |
845 |
-+ node->ip.ip6[0] && |
846 |
-+ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == |
847 |
-+ node->ip.ip6[1] && |
848 |
-+ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == |
849 |
-+ node->ip.ip6[2] && |
850 |
++horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip) |
851 |
++{ |
852 |
++ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] && |
853 |
++ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] && |
854 |
++ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] && |
855 |
+ (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; |
856 |
+} |
857 |
+ |
858 |
+static __init void |
859 |
-+horrible_insert_ordered(struct horrible_allowedips *table, |
860 |
-+ struct horrible_allowedips_node *node) |
861 |
++horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node) |
862 |
+{ |
863 |
+ struct horrible_allowedips_node *other = NULL, *where = NULL; |
864 |
+ u8 my_cidr = horrible_mask_to_cidr(node->mask); |
865 |
+ |
866 |
+ hlist_for_each_entry(other, &table->head, table) { |
867 |
-+ if (!memcmp(&other->mask, &node->mask, |
868 |
-+ sizeof(union nf_inet_addr)) && |
869 |
-+ !memcmp(&other->ip, &node->ip, |
870 |
-+ sizeof(union nf_inet_addr)) && |
871 |
-+ other->ip_version == node->ip_version) { |
872 |
++ if (other->ip_version == node->ip_version && |
873 |
++ !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) && |
874 |
++ !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) { |
875 |
+ other->value = node->value; |
876 |
+ kfree(node); |
877 |
+ return; |
878 |
+ } |
879 |
++ } |
880 |
++ hlist_for_each_entry(other, &table->head, table) { |
881 |
+ where = other; |
882 |
+ if (horrible_mask_to_cidr(other->mask) <= my_cidr) |
883 |
+ break; |
884 |
@@ -41617,8 +41642,7 @@ exit 0 |
885 |
+horrible_allowedips_insert_v4(struct horrible_allowedips *table, |
886 |
+ struct in_addr *ip, u8 cidr, void *value) |
887 |
+{ |
888 |
-+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), |
889 |
-+ GFP_KERNEL); |
890 |
++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); |
891 |
+ |
892 |
+ if (unlikely(!node)) |
893 |
+ return -ENOMEM; |
894 |
@@ -41635,8 +41659,7 @@ exit 0 |
895 |
+horrible_allowedips_insert_v6(struct horrible_allowedips *table, |
896 |
+ struct in6_addr *ip, u8 cidr, void *value) |
897 |
+{ |
898 |
-+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), |
899 |
-+ GFP_KERNEL); |
900 |
++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); |
901 |
+ |
902 |
+ if (unlikely(!node)) |
903 |
+ return -ENOMEM; |
904 |
@@ -41650,39 +41673,43 @@ exit 0 |
905 |
+} |
906 |
+ |
907 |
+static __init void * |
908 |
-+horrible_allowedips_lookup_v4(struct horrible_allowedips *table, |
909 |
-+ struct in_addr *ip) |
910 |
++horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip) |
911 |
+{ |
912 |
+ struct horrible_allowedips_node *node; |
913 |
-+ void *ret = NULL; |
914 |
+ |
915 |
+ hlist_for_each_entry(node, &table->head, table) { |
916 |
-+ if (node->ip_version != 4) |
917 |
-+ continue; |
918 |
-+ if (horrible_match_v4(node, ip)) { |
919 |
-+ ret = node->value; |
920 |
-+ break; |
921 |
-+ } |
922 |
++ if (node->ip_version == 4 && horrible_match_v4(node, ip)) |
923 |
++ return node->value; |
924 |
+ } |
925 |
-+ return ret; |
926 |
++ return NULL; |
927 |
+} |
928 |
+ |
929 |
+static __init void * |
930 |
-+horrible_allowedips_lookup_v6(struct horrible_allowedips *table, |
931 |
-+ struct in6_addr *ip) |
932 |
++horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip) |
933 |
+{ |
934 |
+ struct horrible_allowedips_node *node; |
935 |
-+ void *ret = NULL; |
936 |
+ |
937 |
+ hlist_for_each_entry(node, &table->head, table) { |
938 |
-+ if (node->ip_version != 6) |
939 |
++ if (node->ip_version == 6 && horrible_match_v6(node, ip)) |
940 |
++ return node->value; |
941 |
++ } |
942 |
++ return NULL; |
943 |
++} |
944 |
++ |
945 |
++ |
946 |
++static __init void |
947 |
++horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value) |
948 |
++{ |
949 |
++ struct horrible_allowedips_node *node; |
950 |
++ struct hlist_node *h; |
951 |
++ |
952 |
++ hlist_for_each_entry_safe(node, h, &table->head, table) { |
953 |
++ if (node->value != value) |
954 |
+ continue; |
955 |
-+ if (horrible_match_v6(node, ip)) { |
956 |
-+ ret = node->value; |
957 |
-+ break; |
958 |
-+ } |
959 |
++ hlist_del(&node->table); |
960 |
++ kfree(node); |
961 |
+ } |
962 |
-+ return ret; |
963 |
++ |
964 |
+} |
965 |
+ |
966 |
+static __init bool randomized_test(void) |
967 |
@@ -41712,6 +41739,7 @@ exit 0 |
968 |
+ goto free; |
969 |
+ } |
970 |
+ kref_init(&peers[i]->refcount); |
971 |
++ INIT_LIST_HEAD(&peers[i]->allowedips_list); |
972 |
+ } |
973 |
+ |
974 |
+ mutex_lock(&mutex); |
975 |
@@ -41749,7 +41777,7 @@ exit 0 |
976 |
+ if (wg_allowedips_insert_v4(&t, |
977 |
+ (struct in_addr *)mutated, |
978 |
+ cidr, peer, &mutex) < 0) { |
979 |
-+ pr_err("allowedips random malloc: FAIL\n"); |
980 |
++ pr_err("allowedips random self-test malloc: FAIL\n"); |
981 |
+ goto free_locked; |
982 |
+ } |
983 |
+ if (horrible_allowedips_insert_v4(&h, |
984 |
@@ -41812,23 +41840,33 @@ exit 0 |
985 |
+ print_tree(t.root6, 128); |
986 |
+ } |
987 |
+ |
988 |
-+ for (i = 0; i < NUM_QUERIES; ++i) { |
989 |
-+ prandom_bytes(ip, 4); |
990 |
-+ if (lookup(t.root4, 32, ip) != |
991 |
-+ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { |
992 |
-+ pr_err("allowedips random self-test: FAIL\n"); |
993 |
-+ goto free; |
994 |
++ for (j = 0;; ++j) { |
995 |
++ for (i = 0; i < NUM_QUERIES; ++i) { |
996 |
++ prandom_bytes(ip, 4); |
997 |
++ if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { |
998 |
++ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip); |
999 |
++ pr_err("allowedips random v4 self-test: FAIL\n"); |
1000 |
++ goto free; |
1001 |
++ } |
1002 |
++ prandom_bytes(ip, 16); |
1003 |
++ if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { |
1004 |
++ pr_err("allowedips random v6 self-test: FAIL\n"); |
1005 |
++ goto free; |
1006 |
++ } |
1007 |
+ } |
1008 |
++ if (j >= NUM_PEERS) |
1009 |
++ break; |
1010 |
++ mutex_lock(&mutex); |
1011 |
++ wg_allowedips_remove_by_peer(&t, peers[j], &mutex); |
1012 |
++ mutex_unlock(&mutex); |
1013 |
++ horrible_allowedips_remove_by_value(&h, peers[j]); |
1014 |
+ } |
1015 |
+ |
1016 |
-+ for (i = 0; i < NUM_QUERIES; ++i) { |
1017 |
-+ prandom_bytes(ip, 16); |
1018 |
-+ if (lookup(t.root6, 128, ip) != |
1019 |
-+ horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { |
1020 |
-+ pr_err("allowedips random self-test: FAIL\n"); |
1021 |
-+ goto free; |
1022 |
-+ } |
1023 |
++ if (t.root4 || t.root6) { |
1024 |
++ pr_err("allowedips random self-test removal: FAIL\n"); |
1025 |
++ goto free; |
1026 |
+ } |
1027 |
++ |
1028 |
+ ret = true; |
1029 |
+ |
1030 |
+free: |
1031 |
@@ -43291,7 +43329,7 @@ exit 0 |
1032 |
+ if (new4) |
1033 |
+ wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); |
1034 |
+ mutex_unlock(&wg->socket_update_lock); |
1035 |
-+ synchronize_rcu(); |
1036 |
++ synchronize_net(); |
1037 |
+ sock_free(old4); |
1038 |
+ sock_free(old6); |
1039 |
+} |
1040 |
@@ -43827,7 +43865,7 @@ exit 0 |
1041 |
+#endif /* _WG_UAPI_WIREGUARD_H */ |
1042 |
--- b/tools/testing/selftests/wireguard/netns.sh |
1043 |
+++ b/tools/testing/selftests/wireguard/netns.sh |
1044 |
-@@ -0,0 +1,635 @@ |
1045 |
+@@ -0,0 +1,636 @@ |
1046 |
+#!/bin/bash |
1047 |
+# SPDX-License-Identifier: GPL-2.0 |
1048 |
+# |
1049 |
@@ -44193,6 +44231,7 @@ exit 0 |
1050 |
+ip1 -4 route add default dev wg0 table 51820 |
1051 |
+ip1 -4 rule add not fwmark 51820 table 51820 |
1052 |
+ip1 -4 rule add table main suppress_prefixlength 0 |
1053 |
++n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter' |
1054 |
+# Flood the pings instead of sending just one, to trigger routing table reference counting bugs. |
1055 |
+n1 ping -W 1 -c 100 -f 192.168.99.7 |
1056 |
+n1 ping -W 1 -c 100 -f abab::1111 |
1057 |
@@ -45370,7 +45409,7 @@ exit 0 |
1058 |
+} |
1059 |
--- b/tools/testing/selftests/wireguard/qemu/kernel.config |
1060 |
+++ b/tools/testing/selftests/wireguard/qemu/kernel.config |
1061 |
-@@ -0,0 +1,90 @@ |
1062 |
+@@ -0,0 +1,89 @@ |
1063 |
+CONFIG_LOCALVERSION="" |
1064 |
+CONFIG_NET=y |
1065 |
+CONFIG_NETDEVICES=y |
1066 |
@@ -45392,7 +45431,6 @@ exit 0 |
1067 |
+CONFIG_NETFILTER_XT_NAT=y |
1068 |
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y |
1069 |
+CONFIG_NETFILTER_XT_MARK=y |
1070 |
-+CONFIG_NF_CONNTRACK_IPV4=y |
1071 |
+CONFIG_NF_NAT_IPV4=y |
1072 |
+CONFIG_IP_NF_IPTABLES=y |
1073 |
+CONFIG_IP_NF_FILTER=y |
1074 |
@@ -45497,3 +45535,8 @@ exit 0 |
1075 |
+ |
1076 |
+const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol }; |
1077 |
+EXPORT_SYMBOL(ip_tunnel_header_ops); |
1078 |
+--- /dev/null |
1079 |
++++ b/arch/mips/crypto/.gitignore |
1080 |
+@@ -0,0 +1,2 @@ |
1081 |
++# SPDX-License-Identifier: GPL-2.0-only |
1082 |
++poly1305-core.S |