Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.16 commit in: /
Date: Thu, 26 Apr 2018 10:22:27
Message-Id: 1524738134.3da94fc1c80fd1720445ecdcc890ccd938c7dd75.mpagano@gentoo
1 commit: 3da94fc1c80fd1720445ecdcc890ccd938c7dd75
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Apr 26 10:22:14 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Apr 26 10:22:14 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3da94fc1
7
8 Linux patch 4.16.5
9
10 0000_README | 4 +
11 1004_linux-4.16.5.patch | 995 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 999 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index c127441..344c387 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -59,6 +59,10 @@ Patch: 1003_linux-4.16.4.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.16.4
21
22 +Patch: 1004_linux-4.16.5.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.16.5
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1004_linux-4.16.5.patch b/1004_linux-4.16.5.patch
31 new file mode 100644
32 index 0000000..4a84bff
33 --- /dev/null
34 +++ b/1004_linux-4.16.5.patch
35 @@ -0,0 +1,995 @@
36 +diff --git a/Makefile b/Makefile
37 +index d51175192ac1..6678a90f355b 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 16
44 +-SUBLEVEL = 4
45 ++SUBLEVEL = 5
46 + EXTRAVERSION =
47 + NAME = Fearless Coyote
48 +
49 +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
50 +index 5ee33a6e33bb..9bf2a1a4bd22 100644
51 +--- a/arch/x86/kernel/acpi/boot.c
52 ++++ b/arch/x86/kernel/acpi/boot.c
53 +@@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
54 + apic_id = processor->local_apic_id;
55 + enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
56 +
57 ++ /* Ignore invalid ID */
58 ++ if (apic_id == 0xffffffff)
59 ++ return 0;
60 ++
61 + /*
62 + * We need to register disabled CPU as well to permit
63 + * counting disabled CPUs. This allows us to size
64 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
65 +index fb4302738410..3615c0f255e9 100644
66 +--- a/arch/x86/kernel/tsc.c
67 ++++ b/arch/x86/kernel/tsc.c
68 +@@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
69 + hpet2 -= hpet1;
70 + tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
71 + do_div(tmp, 1000000);
72 +- do_div(deltatsc, tmp);
73 ++ deltatsc = div64_u64(deltatsc, tmp);
74 +
75 + return (unsigned long) deltatsc;
76 + }
77 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
78 +index 763bb3bade63..8494dbae41b9 100644
79 +--- a/arch/x86/kvm/mmu.c
80 ++++ b/arch/x86/kvm/mmu.c
81 +@@ -3031,7 +3031,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
82 + return RET_PF_RETRY;
83 + }
84 +
85 +- return RET_PF_EMULATE;
86 ++ return -EFAULT;
87 + }
88 +
89 + static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
90 +diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
91 +index 21bffdcb2f20..557ed25b42e3 100644
92 +--- a/drivers/clocksource/timer-imx-tpm.c
93 ++++ b/drivers/clocksource/timer-imx-tpm.c
94 +@@ -105,7 +105,7 @@ static int tpm_set_next_event(unsigned long delta,
95 + * of writing CNT registers which may cause the min_delta event got
96 + * missed, so we need add a ETIME check here in case it happened.
97 + */
98 +- return (int)((next - now) <= 0) ? -ETIME : 0;
99 ++ return (int)(next - now) <= 0 ? -ETIME : 0;
100 + }
101 +
102 + static int tpm_set_state_oneshot(struct clock_event_device *evt)
103 +diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
104 +index 02a50929af67..e7f4fe2848a5 100644
105 +--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
106 ++++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
107 +@@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
108 + {
109 + uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
110 + ssize_t ret;
111 ++ int retry;
112 +
113 + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
114 + return 0;
115 +
116 +- ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
117 +- &tmds_oen, sizeof(tmds_oen));
118 +- if (ret) {
119 +- DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
120 +- enable ? "enable" : "disable");
121 +- return ret;
122 ++ /*
123 ++ * LSPCON adapters in low-power state may ignore the first write, so
124 ++ * read back and verify the written value a few times.
125 ++ */
126 ++ for (retry = 0; retry < 3; retry++) {
127 ++ uint8_t tmp;
128 ++
129 ++ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
130 ++ &tmds_oen, sizeof(tmds_oen));
131 ++ if (ret) {
132 ++ DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
133 ++ enable ? "enable" : "disable",
134 ++ retry + 1);
135 ++ return ret;
136 ++ }
137 ++
138 ++ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
139 ++ &tmp, sizeof(tmp));
140 ++ if (ret) {
141 ++ DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
142 ++ enable ? "enabling" : "disabling",
143 ++ retry + 1);
144 ++ return ret;
145 ++ }
146 ++
147 ++ if (tmp == tmds_oen)
148 ++ return 0;
149 + }
150 +
151 +- return 0;
152 ++ DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
153 ++ enable ? "enabling" : "disabling");
154 ++
155 ++ return -EIO;
156 + }
157 + EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
158 +
159 +diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
160 +index 2fb7b34ef561..82cd2fbe2cb3 100644
161 +--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
162 ++++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
163 +@@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
164 + struct intel_vgpu_fb_info *fb_info)
165 + {
166 + gvt_dmabuf->drm_format = fb_info->drm_format;
167 ++ gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
168 + gvt_dmabuf->width = fb_info->width;
169 + gvt_dmabuf->height = fb_info->height;
170 + gvt_dmabuf->stride = fb_info->stride;
171 +diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
172 +index 021f722e2481..f34d7f1e6c4e 100644
173 +--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
174 ++++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
175 +@@ -1284,7 +1284,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
176 +
177 + }
178 +
179 +- return 0;
180 ++ return -ENOTTY;
181 + }
182 +
183 + static ssize_t
184 +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
185 +index 3ab1ace2a6bd..df505868d65a 100644
186 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
187 ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
188 +@@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
189 +
190 + err = radix_tree_insert(handles_vma, handle, vma);
191 + if (unlikely(err)) {
192 +- kfree(lut);
193 ++ kmem_cache_free(eb->i915->luts, lut);
194 + goto err_obj;
195 + }
196 +
197 +diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
198 +index 4a01f62a392d..0ef7856d8155 100644
199 +--- a/drivers/gpu/drm/i915/intel_audio.c
200 ++++ b/drivers/gpu/drm/i915/intel_audio.c
201 +@@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
202 + struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
203 + u32 tmp;
204 +
205 +- if (!IS_GEN9_BC(dev_priv))
206 ++ if (!IS_GEN9(dev_priv))
207 + return;
208 +
209 + i915_audio_component_get_power(kdev);
210 +diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
211 +index b49a2df44430..9b992e1b5996 100644
212 +--- a/drivers/gpu/drm/i915/intel_bios.c
213 ++++ b/drivers/gpu/drm/i915/intel_bios.c
214 +@@ -1255,7 +1255,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
215 + return;
216 +
217 + aux_channel = child->aux_channel;
218 +- ddc_pin = child->ddc_pin;
219 +
220 + is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
221 + is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
222 +@@ -1302,9 +1301,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
223 + DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
224 +
225 + if (is_dvi) {
226 +- info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
227 +-
228 +- sanitize_ddc_pin(dev_priv, port);
229 ++ ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
230 ++ if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
231 ++ info->alternate_ddc_pin = ddc_pin;
232 ++ sanitize_ddc_pin(dev_priv, port);
233 ++ } else {
234 ++ DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
235 ++ "sticking to defaults\n",
236 ++ port_name(port), ddc_pin);
237 ++ }
238 + }
239 +
240 + if (is_dp) {
241 +diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
242 +index 2decc8e2c79f..add9cc97a3b6 100644
243 +--- a/drivers/gpu/drm/vc4/vc4_bo.c
244 ++++ b/drivers/gpu/drm/vc4/vc4_bo.c
245 +@@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
246 + vc4_bo_set_label(obj, -1);
247 +
248 + if (bo->validated_shader) {
249 ++ kfree(bo->validated_shader->uniform_addr_offsets);
250 + kfree(bo->validated_shader->texture_samples);
251 + kfree(bo->validated_shader);
252 + bo->validated_shader = NULL;
253 +@@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
254 + }
255 +
256 + if (bo->validated_shader) {
257 ++ kfree(bo->validated_shader->uniform_addr_offsets);
258 + kfree(bo->validated_shader->texture_samples);
259 + kfree(bo->validated_shader);
260 + bo->validated_shader = NULL;
261 +diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
262 +index d3f15bf60900..7cf82b071de2 100644
263 +--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
264 ++++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
265 +@@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
266 + fail:
267 + kfree(validation_state.branch_targets);
268 + if (validated_shader) {
269 ++ kfree(validated_shader->uniform_addr_offsets);
270 + kfree(validated_shader->texture_samples);
271 + kfree(validated_shader);
272 + }
273 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
274 +index a2e1aa86e133..6c424afea25f 100644
275 +--- a/drivers/infiniband/hw/mlx5/qp.c
276 ++++ b/drivers/infiniband/hw/mlx5/qp.c
277 +@@ -3157,7 +3157,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
278 + * If we moved a kernel QP to RESET, clean up all old CQ
279 + * entries and reinitialize the QP.
280 + */
281 +- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
282 ++ if (new_state == IB_QPS_RESET &&
283 ++ !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
284 + mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
285 + ibqp->srq ? to_msrq(ibqp->srq) : NULL);
286 + if (send_cq != recv_cq)
287 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
288 +index 35b21f8152bb..20af54378cc0 100644
289 +--- a/drivers/net/wireless/mac80211_hwsim.c
290 ++++ b/drivers/net/wireless/mac80211_hwsim.c
291 +@@ -3484,8 +3484,11 @@ static void __net_exit hwsim_exit_net(struct net *net)
292 + list_del(&data->list);
293 + rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
294 + hwsim_rht_params);
295 +- INIT_WORK(&data->destroy_work, destroy_radio);
296 +- queue_work(hwsim_wq, &data->destroy_work);
297 ++ spin_unlock_bh(&hwsim_radio_lock);
298 ++ mac80211_hwsim_del_radio(data,
299 ++ wiphy_name(data->hw->wiphy),
300 ++ NULL);
301 ++ spin_lock_bh(&hwsim_radio_lock);
302 + }
303 + spin_unlock_bh(&hwsim_radio_lock);
304 + }
305 +diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
306 +index 7ab5e0128f0c..1e9a20a4c06c 100644
307 +--- a/fs/btrfs/delayed-ref.c
308 ++++ b/fs/btrfs/delayed-ref.c
309 +@@ -553,8 +553,10 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
310 + struct btrfs_delayed_ref_head *head_ref,
311 + struct btrfs_qgroup_extent_record *qrecord,
312 + u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
313 +- int action, int is_data, int *qrecord_inserted_ret,
314 ++ int action, int is_data, int is_system,
315 ++ int *qrecord_inserted_ret,
316 + int *old_ref_mod, int *new_ref_mod)
317 ++
318 + {
319 + struct btrfs_delayed_ref_head *existing;
320 + struct btrfs_delayed_ref_root *delayed_refs;
321 +@@ -598,6 +600,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
322 + head_ref->ref_mod = count_mod;
323 + head_ref->must_insert_reserved = must_insert_reserved;
324 + head_ref->is_data = is_data;
325 ++ head_ref->is_system = is_system;
326 + head_ref->ref_tree = RB_ROOT;
327 + INIT_LIST_HEAD(&head_ref->ref_add_list);
328 + RB_CLEAR_NODE(&head_ref->href_node);
329 +@@ -785,6 +788,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
330 + struct btrfs_delayed_ref_root *delayed_refs;
331 + struct btrfs_qgroup_extent_record *record = NULL;
332 + int qrecord_inserted;
333 ++ int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
334 +
335 + BUG_ON(extent_op && extent_op->is_data);
336 + ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
337 +@@ -813,8 +817,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
338 + */
339 + head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
340 + bytenr, num_bytes, 0, 0, action, 0,
341 +- &qrecord_inserted, old_ref_mod,
342 +- new_ref_mod);
343 ++ is_system, &qrecord_inserted,
344 ++ old_ref_mod, new_ref_mod);
345 +
346 + add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
347 + num_bytes, parent, ref_root, level, action);
348 +@@ -881,7 +885,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
349 + */
350 + head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
351 + bytenr, num_bytes, ref_root, reserved,
352 +- action, 1, &qrecord_inserted,
353 ++ action, 1, 0, &qrecord_inserted,
354 + old_ref_mod, new_ref_mod);
355 +
356 + add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
357 +@@ -911,9 +915,14 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
358 + delayed_refs = &trans->transaction->delayed_refs;
359 + spin_lock(&delayed_refs->lock);
360 +
361 ++ /*
362 ++ * extent_ops just modify the flags of an extent and they don't result
363 ++ * in ref count changes, hence it's safe to pass false/0 for is_system
364 ++ * argument
365 ++ */
366 + add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
367 + num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
368 +- extent_op->is_data, NULL, NULL, NULL);
369 ++ extent_op->is_data, 0, NULL, NULL, NULL);
370 +
371 + spin_unlock(&delayed_refs->lock);
372 + return 0;
373 +diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
374 +index c4f625e5a691..ba97d18cc168 100644
375 +--- a/fs/btrfs/delayed-ref.h
376 ++++ b/fs/btrfs/delayed-ref.h
377 +@@ -139,6 +139,7 @@ struct btrfs_delayed_ref_head {
378 + */
379 + unsigned int must_insert_reserved:1;
380 + unsigned int is_data:1;
381 ++ unsigned int is_system:1;
382 + unsigned int processing:1;
383 + };
384 +
385 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
386 +index c1618ab9fecf..16b54b1ff20e 100644
387 +--- a/fs/btrfs/extent-tree.c
388 ++++ b/fs/btrfs/extent-tree.c
389 +@@ -2615,13 +2615,19 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
390 + trace_run_delayed_ref_head(fs_info, head, 0);
391 +
392 + if (head->total_ref_mod < 0) {
393 +- struct btrfs_block_group_cache *cache;
394 ++ struct btrfs_space_info *space_info;
395 ++ u64 flags;
396 +
397 +- cache = btrfs_lookup_block_group(fs_info, head->bytenr);
398 +- ASSERT(cache);
399 +- percpu_counter_add(&cache->space_info->total_bytes_pinned,
400 ++ if (head->is_data)
401 ++ flags = BTRFS_BLOCK_GROUP_DATA;
402 ++ else if (head->is_system)
403 ++ flags = BTRFS_BLOCK_GROUP_SYSTEM;
404 ++ else
405 ++ flags = BTRFS_BLOCK_GROUP_METADATA;
406 ++ space_info = __find_space_info(fs_info, flags);
407 ++ ASSERT(space_info);
408 ++ percpu_counter_add(&space_info->total_bytes_pinned,
409 + -head->num_bytes);
410 +- btrfs_put_block_group(cache);
411 +
412 + if (head->is_data) {
413 + spin_lock(&delayed_refs->lock);
414 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
415 +index c7b75dd58fad..ef1cf323832a 100644
416 +--- a/fs/btrfs/inode.c
417 ++++ b/fs/btrfs/inode.c
418 +@@ -44,6 +44,7 @@
419 + #include <linux/uio.h>
420 + #include <linux/magic.h>
421 + #include <linux/iversion.h>
422 ++#include <asm/unaligned.h>
423 + #include "ctree.h"
424 + #include "disk-io.h"
425 + #include "transaction.h"
426 +@@ -5951,11 +5952,13 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
427 + struct dir_entry *entry = addr;
428 + char *name = (char *)(entry + 1);
429 +
430 +- ctx->pos = entry->offset;
431 +- if (!dir_emit(ctx, name, entry->name_len, entry->ino,
432 +- entry->type))
433 ++ ctx->pos = get_unaligned(&entry->offset);
434 ++ if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
435 ++ get_unaligned(&entry->ino),
436 ++ get_unaligned(&entry->type)))
437 + return 1;
438 +- addr += sizeof(struct dir_entry) + entry->name_len;
439 ++ addr += sizeof(struct dir_entry) +
440 ++ get_unaligned(&entry->name_len);
441 + ctx->pos++;
442 + }
443 + return 0;
444 +@@ -6045,14 +6048,15 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
445 + }
446 +
447 + entry = addr;
448 +- entry->name_len = name_len;
449 ++ put_unaligned(name_len, &entry->name_len);
450 + name_ptr = (char *)(entry + 1);
451 + read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
452 + name_len);
453 +- entry->type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
454 ++ put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)],
455 ++ &entry->type);
456 + btrfs_dir_item_key_to_cpu(leaf, di, &location);
457 +- entry->ino = location.objectid;
458 +- entry->offset = found_key.offset;
459 ++ put_unaligned(location.objectid, &entry->ino);
460 ++ put_unaligned(found_key.offset, &entry->offset);
461 + entries++;
462 + addr += sizeof(struct dir_entry) + name_len;
463 + total_len += sizeof(struct dir_entry) + name_len;
464 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
465 +index 81ba6e0d88d8..925844343038 100644
466 +--- a/fs/cifs/dir.c
467 ++++ b/fs/cifs/dir.c
468 +@@ -684,6 +684,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
469 + goto mknod_out;
470 + }
471 +
472 ++ if (!S_ISCHR(mode) && !S_ISBLK(mode))
473 ++ goto mknod_out;
474 ++
475 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
476 + goto mknod_out;
477 +
478 +@@ -692,10 +695,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
479 +
480 + buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
481 + if (buf == NULL) {
482 +- kfree(full_path);
483 + rc = -ENOMEM;
484 +- free_xid(xid);
485 +- return rc;
486 ++ goto mknod_out;
487 + }
488 +
489 + if (backup_cred(cifs_sb))
490 +@@ -742,7 +743,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
491 + pdev->minor = cpu_to_le64(MINOR(device_number));
492 + rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
493 + &bytes_written, iov, 1);
494 +- } /* else if (S_ISFIFO) */
495 ++ }
496 + tcon->ses->server->ops->close(xid, tcon, &fid);
497 + d_drop(direntry);
498 +
499 +diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
500 +index 52cccdbb7e14..34be5c5d027f 100644
501 +--- a/fs/cifs/smbdirect.c
502 ++++ b/fs/cifs/smbdirect.c
503 +@@ -2194,6 +2194,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
504 + goto done;
505 + }
506 + i++;
507 ++ if (i == rqst->rq_nvec)
508 ++ break;
509 + }
510 + start = i;
511 + buflen = 0;
512 +diff --git a/fs/super.c b/fs/super.c
513 +index 672538ca9831..afbf4d220c27 100644
514 +--- a/fs/super.c
515 ++++ b/fs/super.c
516 +@@ -166,6 +166,7 @@ static void destroy_unused_super(struct super_block *s)
517 + security_sb_free(s);
518 + put_user_ns(s->s_user_ns);
519 + kfree(s->s_subtype);
520 ++ free_prealloced_shrinker(&s->s_shrink);
521 + /* no delays needed */
522 + destroy_super_work(&s->destroy_work);
523 + }
524 +@@ -251,6 +252,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
525 + s->s_shrink.count_objects = super_cache_count;
526 + s->s_shrink.batch = 1024;
527 + s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
528 ++ if (prealloc_shrinker(&s->s_shrink))
529 ++ goto fail;
530 + return s;
531 +
532 + fail:
533 +@@ -517,11 +520,7 @@ struct super_block *sget_userns(struct file_system_type *type,
534 + hlist_add_head(&s->s_instances, &type->fs_supers);
535 + spin_unlock(&sb_lock);
536 + get_filesystem(type);
537 +- err = register_shrinker(&s->s_shrink);
538 +- if (err) {
539 +- deactivate_locked_super(s);
540 +- s = ERR_PTR(err);
541 +- }
542 ++ register_shrinker_prepared(&s->s_shrink);
543 + return s;
544 + }
545 +
546 +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
547 +index 14529511c4b8..065d605adea0 100644
548 +--- a/include/linux/netfilter/x_tables.h
549 ++++ b/include/linux/netfilter/x_tables.h
550 +@@ -301,6 +301,7 @@ int xt_data_to_user(void __user *dst, const void *src,
551 +
552 + void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
553 + struct xt_counters_info *info, bool compat);
554 ++struct xt_counters *xt_counters_alloc(unsigned int counters);
555 +
556 + struct xt_table *xt_register_table(struct net *net,
557 + const struct xt_table *table,
558 +@@ -509,7 +510,7 @@ void xt_compat_unlock(u_int8_t af);
559 +
560 + int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
561 + void xt_compat_flush_offsets(u_int8_t af);
562 +-void xt_compat_init_offsets(u_int8_t af, unsigned int number);
563 ++int xt_compat_init_offsets(u8 af, unsigned int number);
564 + int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
565 +
566 + int xt_compat_match_offset(const struct xt_match *match);
567 +diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
568 +index 388ff2936a87..6794490f25b2 100644
569 +--- a/include/linux/shrinker.h
570 ++++ b/include/linux/shrinker.h
571 +@@ -75,6 +75,9 @@ struct shrinker {
572 + #define SHRINKER_NUMA_AWARE (1 << 0)
573 + #define SHRINKER_MEMCG_AWARE (1 << 1)
574 +
575 +-extern int register_shrinker(struct shrinker *);
576 +-extern void unregister_shrinker(struct shrinker *);
577 ++extern int prealloc_shrinker(struct shrinker *shrinker);
578 ++extern void register_shrinker_prepared(struct shrinker *shrinker);
579 ++extern int register_shrinker(struct shrinker *shrinker);
580 ++extern void unregister_shrinker(struct shrinker *shrinker);
581 ++extern void free_prealloced_shrinker(struct shrinker *shrinker);
582 + #endif
583 +diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
584 +index 772a43fea825..73cc26e321de 100644
585 +--- a/kernel/events/callchain.c
586 ++++ b/kernel/events/callchain.c
587 +@@ -119,19 +119,22 @@ int get_callchain_buffers(int event_max_stack)
588 + goto exit;
589 + }
590 +
591 ++ /*
592 ++ * If requesting per event more than the global cap,
593 ++ * return a different error to help userspace figure
594 ++ * this out.
595 ++ *
596 ++ * And also do it here so that we have &callchain_mutex held.
597 ++ */
598 ++ if (event_max_stack > sysctl_perf_event_max_stack) {
599 ++ err = -EOVERFLOW;
600 ++ goto exit;
601 ++ }
602 ++
603 + if (count > 1) {
604 + /* If the allocation failed, give up */
605 + if (!callchain_cpus_entries)
606 + err = -ENOMEM;
607 +- /*
608 +- * If requesting per event more than the global cap,
609 +- * return a different error to help userspace figure
610 +- * this out.
611 +- *
612 +- * And also do it here so that we have &callchain_mutex held.
613 +- */
614 +- if (event_max_stack > sysctl_perf_event_max_stack)
615 +- err = -EOVERFLOW;
616 + goto exit;
617 + }
618 +
619 +diff --git a/kernel/events/core.c b/kernel/events/core.c
620 +index b32bc0698a2a..ca7298760c83 100644
621 +--- a/kernel/events/core.c
622 ++++ b/kernel/events/core.c
623 +@@ -9730,9 +9730,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
624 + * __u16 sample size limit.
625 + */
626 + if (attr->sample_stack_user >= USHRT_MAX)
627 +- ret = -EINVAL;
628 ++ return -EINVAL;
629 + else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
630 +- ret = -EINVAL;
631 ++ return -EINVAL;
632 + }
633 +
634 + if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
635 +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
636 +index ec09ce9a6012..639321bf2e39 100644
637 +--- a/kernel/time/alarmtimer.c
638 ++++ b/kernel/time/alarmtimer.c
639 +@@ -326,6 +326,17 @@ static int alarmtimer_resume(struct device *dev)
640 + }
641 + #endif
642 +
643 ++static void
644 ++__alarm_init(struct alarm *alarm, enum alarmtimer_type type,
645 ++ enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
646 ++{
647 ++ timerqueue_init(&alarm->node);
648 ++ alarm->timer.function = alarmtimer_fired;
649 ++ alarm->function = function;
650 ++ alarm->type = type;
651 ++ alarm->state = ALARMTIMER_STATE_INACTIVE;
652 ++}
653 ++
654 + /**
655 + * alarm_init - Initialize an alarm structure
656 + * @alarm: ptr to alarm to be initialized
657 +@@ -335,13 +346,9 @@ static int alarmtimer_resume(struct device *dev)
658 + void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
659 + enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
660 + {
661 +- timerqueue_init(&alarm->node);
662 + hrtimer_init(&alarm->timer, alarm_bases[type].base_clockid,
663 +- HRTIMER_MODE_ABS);
664 +- alarm->timer.function = alarmtimer_fired;
665 +- alarm->function = function;
666 +- alarm->type = type;
667 +- alarm->state = ALARMTIMER_STATE_INACTIVE;
668 ++ HRTIMER_MODE_ABS);
669 ++ __alarm_init(alarm, type, function);
670 + }
671 + EXPORT_SYMBOL_GPL(alarm_init);
672 +
673 +@@ -719,6 +726,8 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
674 +
675 + __set_current_state(TASK_RUNNING);
676 +
677 ++ destroy_hrtimer_on_stack(&alarm->timer);
678 ++
679 + if (!alarm->data)
680 + return 0;
681 +
682 +@@ -740,6 +749,15 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
683 + return -ERESTART_RESTARTBLOCK;
684 + }
685 +
686 ++static void
687 ++alarm_init_on_stack(struct alarm *alarm, enum alarmtimer_type type,
688 ++ enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
689 ++{
690 ++ hrtimer_init_on_stack(&alarm->timer, alarm_bases[type].base_clockid,
691 ++ HRTIMER_MODE_ABS);
692 ++ __alarm_init(alarm, type, function);
693 ++}
694 ++
695 + /**
696 + * alarm_timer_nsleep_restart - restartblock alarmtimer nsleep
697 + * @restart: ptr to restart block
698 +@@ -752,7 +770,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
699 + ktime_t exp = restart->nanosleep.expires;
700 + struct alarm alarm;
701 +
702 +- alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
703 ++ alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup);
704 +
705 + return alarmtimer_do_nsleep(&alarm, exp, type);
706 + }
707 +@@ -784,7 +802,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
708 + if (!capable(CAP_WAKE_ALARM))
709 + return -EPERM;
710 +
711 +- alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
712 ++ alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup);
713 +
714 + exp = timespec64_to_ktime(*tsreq);
715 + /* Convert (if necessary) to absolute time */
716 +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
717 +index 2541bd89f20e..5a6251ac6f7a 100644
718 +--- a/kernel/time/posix-cpu-timers.c
719 ++++ b/kernel/time/posix-cpu-timers.c
720 +@@ -1205,10 +1205,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
721 + u64 *newval, u64 *oldval)
722 + {
723 + u64 now;
724 ++ int ret;
725 +
726 + WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
727 ++ ret = cpu_timer_sample_group(clock_idx, tsk, &now);
728 +
729 +- if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) {
730 ++ if (oldval && ret != -EINVAL) {
731 + /*
732 + * We are setting itimer. The *oldval is absolute and we update
733 + * it to be relative, *newval argument is relative and we update
734 +diff --git a/mm/vmscan.c b/mm/vmscan.c
735 +index cd5dc3faaa57..f6a1587f9f31 100644
736 +--- a/mm/vmscan.c
737 ++++ b/mm/vmscan.c
738 +@@ -258,7 +258,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
739 + /*
740 + * Add a shrinker callback to be called from the vm.
741 + */
742 +-int register_shrinker(struct shrinker *shrinker)
743 ++int prealloc_shrinker(struct shrinker *shrinker)
744 + {
745 + size_t size = sizeof(*shrinker->nr_deferred);
746 +
747 +@@ -268,10 +268,29 @@ int register_shrinker(struct shrinker *shrinker)
748 + shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
749 + if (!shrinker->nr_deferred)
750 + return -ENOMEM;
751 ++ return 0;
752 ++}
753 ++
754 ++void free_prealloced_shrinker(struct shrinker *shrinker)
755 ++{
756 ++ kfree(shrinker->nr_deferred);
757 ++ shrinker->nr_deferred = NULL;
758 ++}
759 +
760 ++void register_shrinker_prepared(struct shrinker *shrinker)
761 ++{
762 + down_write(&shrinker_rwsem);
763 + list_add_tail(&shrinker->list, &shrinker_list);
764 + up_write(&shrinker_rwsem);
765 ++}
766 ++
767 ++int register_shrinker(struct shrinker *shrinker)
768 ++{
769 ++ int err = prealloc_shrinker(shrinker);
770 ++
771 ++ if (err)
772 ++ return err;
773 ++ register_shrinker_prepared(shrinker);
774 + return 0;
775 + }
776 + EXPORT_SYMBOL(register_shrinker);
777 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
778 +index a94d23b0a9af..752112539753 100644
779 +--- a/net/bridge/netfilter/ebtables.c
780 ++++ b/net/bridge/netfilter/ebtables.c
781 +@@ -1821,10 +1821,14 @@ static int compat_table_info(const struct ebt_table_info *info,
782 + {
783 + unsigned int size = info->entries_size;
784 + const void *entries = info->entries;
785 ++ int ret;
786 +
787 + newinfo->entries_size = size;
788 +
789 +- xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
790 ++ ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
791 ++ if (ret)
792 ++ return ret;
793 ++
794 + return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
795 + entries, newinfo);
796 + }
797 +@@ -2268,7 +2272,9 @@ static int compat_do_replace(struct net *net, void __user *user,
798 +
799 + xt_compat_lock(NFPROTO_BRIDGE);
800 +
801 +- xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
802 ++ ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
803 ++ if (ret < 0)
804 ++ goto out_unlock;
805 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
806 + if (ret < 0)
807 + goto out_unlock;
808 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
809 +index e3e420f3ba7b..b940d6aaa94f 100644
810 +--- a/net/ipv4/netfilter/arp_tables.c
811 ++++ b/net/ipv4/netfilter/arp_tables.c
812 +@@ -781,7 +781,9 @@ static int compat_table_info(const struct xt_table_info *info,
813 + memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
814 + newinfo->initial_entries = 0;
815 + loc_cpu_entry = info->entries;
816 +- xt_compat_init_offsets(NFPROTO_ARP, info->number);
817 ++ ret = xt_compat_init_offsets(NFPROTO_ARP, info->number);
818 ++ if (ret)
819 ++ return ret;
820 + xt_entry_foreach(iter, loc_cpu_entry, info->size) {
821 + ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
822 + if (ret != 0)
823 +@@ -895,7 +897,7 @@ static int __do_replace(struct net *net, const char *name,
824 + struct arpt_entry *iter;
825 +
826 + ret = 0;
827 +- counters = vzalloc(num_counters * sizeof(struct xt_counters));
828 ++ counters = xt_counters_alloc(num_counters);
829 + if (!counters) {
830 + ret = -ENOMEM;
831 + goto out;
832 +@@ -1167,7 +1169,7 @@ static int translate_compat_table(struct xt_table_info **pinfo,
833 + struct compat_arpt_entry *iter0;
834 + struct arpt_replace repl;
835 + unsigned int size;
836 +- int ret = 0;
837 ++ int ret;
838 +
839 + info = *pinfo;
840 + entry0 = *pentry0;
841 +@@ -1176,7 +1178,9 @@ static int translate_compat_table(struct xt_table_info **pinfo,
842 +
843 + j = 0;
844 + xt_compat_lock(NFPROTO_ARP);
845 +- xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
846 ++ ret = xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
847 ++ if (ret)
848 ++ goto out_unlock;
849 + /* Walk through entries, checking offsets. */
850 + xt_entry_foreach(iter0, entry0, compatr->size) {
851 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
852 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
853 +index e38395a8dcf2..34f22450da5b 100644
854 +--- a/net/ipv4/netfilter/ip_tables.c
855 ++++ b/net/ipv4/netfilter/ip_tables.c
856 +@@ -945,7 +945,9 @@ static int compat_table_info(const struct xt_table_info *info,
857 + memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
858 + newinfo->initial_entries = 0;
859 + loc_cpu_entry = info->entries;
860 +- xt_compat_init_offsets(AF_INET, info->number);
861 ++ ret = xt_compat_init_offsets(AF_INET, info->number);
862 ++ if (ret)
863 ++ return ret;
864 + xt_entry_foreach(iter, loc_cpu_entry, info->size) {
865 + ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
866 + if (ret != 0)
867 +@@ -1057,7 +1059,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
868 + struct ipt_entry *iter;
869 +
870 + ret = 0;
871 +- counters = vzalloc(num_counters * sizeof(struct xt_counters));
872 ++ counters = xt_counters_alloc(num_counters);
873 + if (!counters) {
874 + ret = -ENOMEM;
875 + goto out;
876 +@@ -1418,7 +1420,9 @@ translate_compat_table(struct net *net,
877 +
878 + j = 0;
879 + xt_compat_lock(AF_INET);
880 +- xt_compat_init_offsets(AF_INET, compatr->num_entries);
881 ++ ret = xt_compat_init_offsets(AF_INET, compatr->num_entries);
882 ++ if (ret)
883 ++ goto out_unlock;
884 + /* Walk through entries, checking offsets. */
885 + xt_entry_foreach(iter0, entry0, compatr->size) {
886 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
887 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
888 +index 62358b93bbac..41db3c8f469f 100644
889 +--- a/net/ipv6/netfilter/ip6_tables.c
890 ++++ b/net/ipv6/netfilter/ip6_tables.c
891 +@@ -962,7 +962,9 @@ static int compat_table_info(const struct xt_table_info *info,
892 + memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
893 + newinfo->initial_entries = 0;
894 + loc_cpu_entry = info->entries;
895 +- xt_compat_init_offsets(AF_INET6, info->number);
896 ++ ret = xt_compat_init_offsets(AF_INET6, info->number);
897 ++ if (ret)
898 ++ return ret;
899 + xt_entry_foreach(iter, loc_cpu_entry, info->size) {
900 + ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
901 + if (ret != 0)
902 +@@ -1075,7 +1077,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
903 + struct ip6t_entry *iter;
904 +
905 + ret = 0;
906 +- counters = vzalloc(num_counters * sizeof(struct xt_counters));
907 ++ counters = xt_counters_alloc(num_counters);
908 + if (!counters) {
909 + ret = -ENOMEM;
910 + goto out;
911 +@@ -1425,7 +1427,7 @@ translate_compat_table(struct net *net,
912 + struct compat_ip6t_entry *iter0;
913 + struct ip6t_replace repl;
914 + unsigned int size;
915 +- int ret = 0;
916 ++ int ret;
917 +
918 + info = *pinfo;
919 + entry0 = *pentry0;
920 +@@ -1434,7 +1436,9 @@ translate_compat_table(struct net *net,
921 +
922 + j = 0;
923 + xt_compat_lock(AF_INET6);
924 +- xt_compat_init_offsets(AF_INET6, compatr->num_entries);
925 ++ ret = xt_compat_init_offsets(AF_INET6, compatr->num_entries);
926 ++ if (ret)
927 ++ goto out_unlock;
928 + /* Walk through entries, checking offsets. */
929 + xt_entry_foreach(iter0, entry0, compatr->size) {
930 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
931 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
932 +index 4aa01c90e9d1..a94c0e3cdcf0 100644
933 +--- a/net/netfilter/x_tables.c
934 ++++ b/net/netfilter/x_tables.c
935 +@@ -40,6 +40,7 @@ MODULE_AUTHOR("Harald Welte <laforge@×××××××××.org>");
936 + MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
937 +
938 + #define XT_PCPU_BLOCK_SIZE 4096
939 ++#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
940 +
941 + struct compat_delta {
942 + unsigned int offset; /* offset in kernel */
943 +@@ -553,14 +554,8 @@ int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
944 + {
945 + struct xt_af *xp = &xt[af];
946 +
947 +- if (!xp->compat_tab) {
948 +- if (!xp->number)
949 +- return -EINVAL;
950 +- xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
951 +- if (!xp->compat_tab)
952 +- return -ENOMEM;
953 +- xp->cur = 0;
954 +- }
955 ++ if (WARN_ON(!xp->compat_tab))
956 ++ return -ENOMEM;
957 +
958 + if (xp->cur >= xp->number)
959 + return -EINVAL;
960 +@@ -603,10 +598,28 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
961 + }
962 + EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
963 +
964 +-void xt_compat_init_offsets(u_int8_t af, unsigned int number)
965 ++int xt_compat_init_offsets(u8 af, unsigned int number)
966 + {
967 ++ size_t mem;
968 ++
969 ++ if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
970 ++ return -EINVAL;
971 ++
972 ++ if (WARN_ON(xt[af].compat_tab))
973 ++ return -EINVAL;
974 ++
975 ++ mem = sizeof(struct compat_delta) * number;
976 ++ if (mem > XT_MAX_TABLE_SIZE)
977 ++ return -ENOMEM;
978 ++
979 ++ xt[af].compat_tab = vmalloc(mem);
980 ++ if (!xt[af].compat_tab)
981 ++ return -ENOMEM;
982 ++
983 + xt[af].number = number;
984 + xt[af].cur = 0;
985 ++
986 ++ return 0;
987 + }
988 + EXPORT_SYMBOL(xt_compat_init_offsets);
989 +
990 +@@ -805,6 +818,9 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
991 + */
992 + unsigned int *xt_alloc_entry_offsets(unsigned int size)
993 + {
994 ++ if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
995 ++ return NULL;
996 ++
997 + return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
998 +
999 + }
1000 +@@ -1029,7 +1045,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
1001 + struct xt_table_info *info = NULL;
1002 + size_t sz = sizeof(*info) + size;
1003 +
1004 +- if (sz < sizeof(*info))
1005 ++ if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1006 + return NULL;
1007 +
1008 + /* __GFP_NORETRY is not fully supported by kvmalloc but it should
1009 +@@ -1198,6 +1214,21 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
1010 + return 0;
1011 + }
1012 +
1013 ++struct xt_counters *xt_counters_alloc(unsigned int counters)
1014 ++{
1015 ++ struct xt_counters *mem;
1016 ++
1017 ++ if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1018 ++ return NULL;
1019 ++
1020 ++ counters *= sizeof(*mem);
1021 ++ if (counters > XT_MAX_TABLE_SIZE)
1022 ++ return NULL;
1023 ++
1024 ++ return vzalloc(counters);
1025 ++}
1026 ++EXPORT_SYMBOL(xt_counters_alloc);
1027 ++
1028 + struct xt_table_info *
1029 + xt_replace_table(struct xt_table *table,
1030 + unsigned int num_counters,