Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Tue, 11 Jan 2022 14:34:33
Message-Id: 1641911652.a83c34f38a907533b7adff715936d94f4ab825df.mpagano@gentoo
1 commit: a83c34f38a907533b7adff715936d94f4ab825df
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jan 11 14:34:12 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jan 11 14:34:12 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a83c34f3
7
8 Linux patch 5.4.171
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1170_linux-5.4.171.patch | 909 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 913 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 75707cd8..5d64761b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -723,6 +723,10 @@ Patch: 1169_linux-5.4.170.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.170
23
24 +Patch: 1170_linux-5.4.171.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.171
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1170_linux-5.4.171.patch b/1170_linux-5.4.171.patch
33 new file mode 100644
34 index 00000000..a57bed9c
35 --- /dev/null
36 +++ b/1170_linux-5.4.171.patch
37 @@ -0,0 +1,909 @@
38 +diff --git a/Makefile b/Makefile
39 +index 7380354e49513..062052f71a976 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 170
47 ++SUBLEVEL = 171
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
52 +index b8d715c68ca44..11a0806469162 100644
53 +--- a/drivers/infiniband/core/uverbs_marshall.c
54 ++++ b/drivers/infiniband/core/uverbs_marshall.c
55 +@@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
56 + struct rdma_ah_attr *src = ah_attr;
57 + struct rdma_ah_attr conv_ah;
58 +
59 +- memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
60 ++ memset(&dst->grh, 0, sizeof(dst->grh));
61 +
62 + if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
63 + (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) &&
64 +diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
65 +index 00c5478871322..818699b855c5c 100644
66 +--- a/drivers/infiniband/core/uverbs_uapi.c
67 ++++ b/drivers/infiniband/core/uverbs_uapi.c
68 +@@ -450,6 +450,9 @@ static int uapi_finalize(struct uverbs_api *uapi)
69 + uapi->num_write_ex = max_write_ex + 1;
70 + data = kmalloc_array(uapi->num_write + uapi->num_write_ex,
71 + sizeof(*uapi->write_methods), GFP_KERNEL);
72 ++ if (!data)
73 ++ return -ENOMEM;
74 ++
75 + for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++)
76 + data[i] = &uapi->notsupp_method;
77 + uapi->write_methods = data;
78 +diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
79 +index 2962c3747adc3..ed5fbcb40e3f0 100644
80 +--- a/drivers/input/touchscreen/of_touchscreen.c
81 ++++ b/drivers/input/touchscreen/of_touchscreen.c
82 +@@ -77,8 +77,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
83 + axis = multitouch ? ABS_MT_POSITION_X : ABS_X;
84 + data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-x",
85 + input_abs_get_min(input, axis),
86 +- &minimum) |
87 +- touchscreen_get_prop_u32(dev, "touchscreen-size-x",
88 ++ &minimum);
89 ++ data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-x",
90 + input_abs_get_max(input,
91 + axis) + 1,
92 + &maximum);
93 +@@ -91,8 +91,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
94 + axis = multitouch ? ABS_MT_POSITION_Y : ABS_Y;
95 + data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-y",
96 + input_abs_get_min(input, axis),
97 +- &minimum) |
98 +- touchscreen_get_prop_u32(dev, "touchscreen-size-y",
99 ++ &minimum);
100 ++ data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-y",
101 + input_abs_get_max(input,
102 + axis) + 1,
103 + &maximum);
104 +diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
105 +index 55891e4204460..a41b4b2645941 100644
106 +--- a/drivers/isdn/mISDN/core.c
107 ++++ b/drivers/isdn/mISDN/core.c
108 +@@ -381,7 +381,7 @@ mISDNInit(void)
109 + err = mISDN_inittimer(&debug);
110 + if (err)
111 + goto error2;
112 +- err = l1_init(&debug);
113 ++ err = Isdnl1_Init(&debug);
114 + if (err)
115 + goto error3;
116 + err = Isdnl2_Init(&debug);
117 +@@ -395,7 +395,7 @@ mISDNInit(void)
118 + error5:
119 + Isdnl2_cleanup();
120 + error4:
121 +- l1_cleanup();
122 ++ Isdnl1_cleanup();
123 + error3:
124 + mISDN_timer_cleanup();
125 + error2:
126 +@@ -408,7 +408,7 @@ static void mISDN_cleanup(void)
127 + {
128 + misdn_sock_cleanup();
129 + Isdnl2_cleanup();
130 +- l1_cleanup();
131 ++ Isdnl1_cleanup();
132 + mISDN_timer_cleanup();
133 + class_unregister(&mISDN_class);
134 +
135 +diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h
136 +index 23b44d3033279..42599f49c189d 100644
137 +--- a/drivers/isdn/mISDN/core.h
138 ++++ b/drivers/isdn/mISDN/core.h
139 +@@ -60,8 +60,8 @@ struct Bprotocol *get_Bprotocol4id(u_int);
140 + extern int mISDN_inittimer(u_int *);
141 + extern void mISDN_timer_cleanup(void);
142 +
143 +-extern int l1_init(u_int *);
144 +-extern void l1_cleanup(void);
145 ++extern int Isdnl1_Init(u_int *);
146 ++extern void Isdnl1_cleanup(void);
147 + extern int Isdnl2_Init(u_int *);
148 + extern void Isdnl2_cleanup(void);
149 +
150 +diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
151 +index 98a3bc6c17009..7b31c25a550e3 100644
152 +--- a/drivers/isdn/mISDN/layer1.c
153 ++++ b/drivers/isdn/mISDN/layer1.c
154 +@@ -398,7 +398,7 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
155 + EXPORT_SYMBOL(create_l1);
156 +
157 + int
158 +-l1_init(u_int *deb)
159 ++Isdnl1_Init(u_int *deb)
160 + {
161 + debug = deb;
162 + l1fsm_s.state_count = L1S_STATE_COUNT;
163 +@@ -409,7 +409,7 @@ l1_init(u_int *deb)
164 + }
165 +
166 + void
167 +-l1_cleanup(void)
168 ++Isdnl1_cleanup(void)
169 + {
170 + mISDN_FsmFree(&l1fsm_s);
171 + }
172 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
173 +index 03821b46a8cb4..4c22f119ac62f 100644
174 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
175 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
176 +@@ -305,6 +305,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
177 + if (!buff->is_eop) {
178 + buff_ = buff;
179 + do {
180 ++ if (buff_->next >= self->size) {
181 ++ err = -EIO;
182 ++ goto err_exit;
183 ++ }
184 + next_ = buff_->next,
185 + buff_ = &self->buff_ring[next_];
186 + is_rsc_completed =
187 +@@ -327,6 +331,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
188 + if (buff->is_error || buff->is_cso_err) {
189 + buff_ = buff;
190 + do {
191 ++ if (buff_->next >= self->size) {
192 ++ err = -EIO;
193 ++ goto err_exit;
194 ++ }
195 + next_ = buff_->next,
196 + buff_ = &self->buff_ring[next_];
197 +
198 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
199 +index ce237da003ddb..a2326683be170 100644
200 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
201 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
202 +@@ -107,6 +107,24 @@ MODULE_VERSION(DRV_VERSION);
203 +
204 + static struct workqueue_struct *i40e_wq;
205 +
206 ++static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
207 ++ struct net_device *netdev, int delta)
208 ++{
209 ++ struct netdev_hw_addr *ha;
210 ++
211 ++ if (!f || !netdev)
212 ++ return;
213 ++
214 ++ netdev_for_each_mc_addr(ha, netdev) {
215 ++ if (ether_addr_equal(ha->addr, f->macaddr)) {
216 ++ ha->refcount += delta;
217 ++ if (ha->refcount <= 0)
218 ++ ha->refcount = 1;
219 ++ break;
220 ++ }
221 ++ }
222 ++}
223 ++
224 + /**
225 + * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
226 + * @hw: pointer to the HW structure
227 +@@ -2022,6 +2040,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
228 + hlist_for_each_entry_safe(new, h, from, hlist) {
229 + /* We can simply free the wrapper structure */
230 + hlist_del(&new->hlist);
231 ++ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
232 + kfree(new);
233 + }
234 + }
235 +@@ -2369,6 +2388,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
236 + &tmp_add_list,
237 + &tmp_del_list,
238 + vlan_filters);
239 ++
240 ++ hlist_for_each_entry(new, &tmp_add_list, hlist)
241 ++ netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
242 ++
243 + if (retval)
244 + goto err_no_memory_locked;
245 +
246 +@@ -2501,6 +2524,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
247 + if (new->f->state == I40E_FILTER_NEW)
248 + new->f->state = new->state;
249 + hlist_del(&new->hlist);
250 ++ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
251 + kfree(new);
252 + }
253 + spin_unlock_bh(&vsi->mac_filter_hash_lock);
254 +@@ -8302,6 +8326,27 @@ int i40e_open(struct net_device *netdev)
255 + return 0;
256 + }
257 +
258 ++/**
259 ++ * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
260 ++ * @vsi: vsi structure
261 ++ *
262 ++ * This updates netdev's number of tx/rx queues
263 ++ *
264 ++ * Returns status of setting tx/rx queues
265 ++ **/
266 ++static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
267 ++{
268 ++ int ret;
269 ++
270 ++ ret = netif_set_real_num_rx_queues(vsi->netdev,
271 ++ vsi->num_queue_pairs);
272 ++ if (ret)
273 ++ return ret;
274 ++
275 ++ return netif_set_real_num_tx_queues(vsi->netdev,
276 ++ vsi->num_queue_pairs);
277 ++}
278 ++
279 + /**
280 + * i40e_vsi_open -
281 + * @vsi: the VSI to open
282 +@@ -8338,13 +8383,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
283 + goto err_setup_rx;
284 +
285 + /* Notify the stack of the actual queue counts. */
286 +- err = netif_set_real_num_tx_queues(vsi->netdev,
287 +- vsi->num_queue_pairs);
288 +- if (err)
289 +- goto err_set_queues;
290 +-
291 +- err = netif_set_real_num_rx_queues(vsi->netdev,
292 +- vsi->num_queue_pairs);
293 ++ err = i40e_netif_set_realnum_tx_rx_queues(vsi);
294 + if (err)
295 + goto err_set_queues;
296 +
297 +@@ -13766,6 +13805,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
298 + case I40E_VSI_MAIN:
299 + case I40E_VSI_VMDQ2:
300 + ret = i40e_config_netdev(vsi);
301 ++ if (ret)
302 ++ goto err_netdev;
303 ++ ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
304 + if (ret)
305 + goto err_netdev;
306 + ret = register_netdev(vsi->netdev);
307 +@@ -15012,8 +15054,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
308 +
309 + if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
310 + hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
311 +- dev_info(&pdev->dev,
312 +- "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
313 ++ dev_dbg(&pdev->dev,
314 ++ "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
315 + hw->aq.api_maj_ver,
316 + hw->aq.api_min_ver,
317 + I40E_FW_API_VERSION_MAJOR,
318 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
319 +index 449eb06e2c7da..309e953ed1e44 100644
320 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
321 ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
322 +@@ -2604,8 +2604,11 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
323 + total_max_rate += tx_rate;
324 + num_qps += mqprio_qopt->qopt.count[i];
325 + }
326 +- if (num_qps > IAVF_MAX_REQ_QUEUES)
327 ++ if (num_qps > adapter->num_active_queues) {
328 ++ dev_err(&adapter->pdev->dev,
329 ++ "Cannot support requested number of queues\n");
330 + return -EINVAL;
331 ++ }
332 +
333 + ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
334 + return ret;
335 +diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
336 +index 23ee0b14cbfa1..2f5e7b31032aa 100644
337 +--- a/drivers/net/ieee802154/atusb.c
338 ++++ b/drivers/net/ieee802154/atusb.c
339 +@@ -93,7 +93,9 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
340 +
341 + ret = usb_control_msg(usb_dev, pipe, request, requesttype,
342 + value, index, data, size, timeout);
343 +- if (ret < 0) {
344 ++ if (ret < size) {
345 ++ ret = ret < 0 ? ret : -ENODATA;
346 ++
347 + atusb->err = ret;
348 + dev_err(&usb_dev->dev,
349 + "%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
350 +@@ -861,9 +863,9 @@ static int atusb_get_and_show_build(struct atusb *atusb)
351 + if (!build)
352 + return -ENOMEM;
353 +
354 +- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
355 +- ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
356 +- build, ATUSB_BUILD_SIZE, 1000);
357 ++ /* We cannot call atusb_control_msg() here, since this request may read various length data */
358 ++ ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD,
359 ++ ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000);
360 + if (ret >= 0) {
361 + build[ret] = 0;
362 + dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
363 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
364 +index 0b61d80ea3f8c..18cc5e4280e83 100644
365 +--- a/drivers/net/phy/micrel.c
366 ++++ b/drivers/net/phy/micrel.c
367 +@@ -1096,6 +1096,7 @@ static struct phy_driver ksphy_driver[] = {
368 + .probe = kszphy_probe,
369 + .config_init = ksz8081_config_init,
370 + .ack_interrupt = kszphy_ack_interrupt,
371 ++ .soft_reset = genphy_soft_reset,
372 + .config_intr = kszphy_config_intr,
373 + .get_sset_count = kszphy_get_sset_count,
374 + .get_strings = kszphy_get_strings,
375 +diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
376 +index f9b359d4e2939..1505fe3f87ed3 100644
377 +--- a/drivers/net/usb/rndis_host.c
378 ++++ b/drivers/net/usb/rndis_host.c
379 +@@ -608,6 +608,11 @@ static const struct usb_device_id products [] = {
380 + USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042,
381 + USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
382 + .driver_info = (unsigned long) &rndis_poll_status_info,
383 ++}, {
384 ++ /* Hytera Communications DMR radios' "Radio to PC Network" */
385 ++ USB_VENDOR_AND_INTERFACE_INFO(0x238b,
386 ++ USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
387 ++ .driver_info = (unsigned long)&rndis_info,
388 + }, {
389 + /* RNDIS is MSFT's un-official variant of CDC ACM */
390 + USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
391 +diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
392 +index e4a0cc45b3d11..ec613bcc0a302 100644
393 +--- a/drivers/power/reset/ltc2952-poweroff.c
394 ++++ b/drivers/power/reset/ltc2952-poweroff.c
395 +@@ -160,8 +160,8 @@ static void ltc2952_poweroff_kill(void)
396 +
397 + static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
398 + {
399 +- data->wde_interval = 300L * 1E6L;
400 +- data->trigger_delay = ktime_set(2, 500L*1E6L);
401 ++ data->wde_interval = 300L * NSEC_PER_MSEC;
402 ++ data->trigger_delay = ktime_set(2, 500L * NSEC_PER_MSEC);
403 +
404 + hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
405 + data->timer_trigger.function = ltc2952_poweroff_timer_trigger;
406 +diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
407 +index 5c36c430ce8b6..a2f56a68c50d6 100644
408 +--- a/drivers/power/supply/power_supply_core.c
409 ++++ b/drivers/power/supply/power_supply_core.c
410 +@@ -742,6 +742,10 @@ power_supply_find_ocv2cap_table(struct power_supply_battery_info *info,
411 + return NULL;
412 +
413 + for (i = 0; i < POWER_SUPPLY_OCV_TEMP_MAX; i++) {
414 ++ /* Out of capacity tables */
415 ++ if (!info->ocv_table[i])
416 ++ break;
417 ++
418 + temp_diff = abs(info->ocv_temp[i] - temp);
419 +
420 + if (temp_diff < best_temp_diff) {
421 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
422 +index eeba6180711cd..f3cee64c6d12f 100644
423 +--- a/drivers/scsi/libiscsi.c
424 ++++ b/drivers/scsi/libiscsi.c
425 +@@ -2948,6 +2948,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
426 + {
427 + struct iscsi_conn *conn = cls_conn->dd_data;
428 + struct iscsi_session *session = conn->session;
429 ++ char *tmp_persistent_address = conn->persistent_address;
430 ++ char *tmp_local_ipaddr = conn->local_ipaddr;
431 +
432 + del_timer_sync(&conn->transport_timer);
433 +
434 +@@ -2969,8 +2971,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
435 + spin_lock_bh(&session->frwd_lock);
436 + free_pages((unsigned long) conn->data,
437 + get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
438 +- kfree(conn->persistent_address);
439 +- kfree(conn->local_ipaddr);
440 + /* regular RX path uses back_lock */
441 + spin_lock_bh(&session->back_lock);
442 + kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
443 +@@ -2982,6 +2982,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
444 + mutex_unlock(&session->eh_mutex);
445 +
446 + iscsi_destroy_conn(cls_conn);
447 ++ kfree(tmp_persistent_address);
448 ++ kfree(tmp_local_ipaddr);
449 + }
450 + EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
451 +
452 +diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
453 +index 253c8b71d3c49..061da9b82b967 100644
454 +--- a/drivers/usb/mtu3/mtu3_gadget.c
455 ++++ b/drivers/usb/mtu3/mtu3_gadget.c
456 +@@ -85,7 +85,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
457 + if (usb_endpoint_xfer_int(desc) ||
458 + usb_endpoint_xfer_isoc(desc)) {
459 + interval = desc->bInterval;
460 +- interval = clamp_val(interval, 1, 16) - 1;
461 ++ interval = clamp_val(interval, 1, 16);
462 + if (usb_endpoint_xfer_isoc(desc) && comp_desc)
463 + mult = comp_desc->bmAttributes;
464 + }
465 +@@ -97,7 +97,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
466 + if (usb_endpoint_xfer_isoc(desc) ||
467 + usb_endpoint_xfer_int(desc)) {
468 + interval = desc->bInterval;
469 +- interval = clamp_val(interval, 1, 16) - 1;
470 ++ interval = clamp_val(interval, 1, 16);
471 + mult = usb_endpoint_maxp_mult(desc) - 1;
472 + }
473 + break;
474 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
475 +index f7d27cbbeb860..03dce3980d90a 100644
476 +--- a/fs/f2fs/checkpoint.c
477 ++++ b/fs/f2fs/checkpoint.c
478 +@@ -1144,7 +1144,8 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
479 + if (!is_journalled_quota(sbi))
480 + return false;
481 +
482 +- down_write(&sbi->quota_sem);
483 ++ if (!down_write_trylock(&sbi->quota_sem))
484 ++ return true;
485 + if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
486 + ret = false;
487 + } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
488 +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
489 +index b3021d9b34a5e..7b7a009425e21 100644
490 +--- a/fs/xfs/xfs_ioctl.c
491 ++++ b/fs/xfs/xfs_ioctl.c
492 +@@ -714,7 +714,8 @@ xfs_ioc_space(
493 + flags |= XFS_PREALLOC_CLEAR;
494 + if (bf->l_start > XFS_ISIZE(ip)) {
495 + error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
496 +- bf->l_start - XFS_ISIZE(ip), 0);
497 ++ bf->l_start - XFS_ISIZE(ip),
498 ++ XFS_BMAPI_PREALLOC);
499 + if (error)
500 + goto out_unlock;
501 + }
502 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
503 +index 5240ba9a82db8..54f5b2f080f53 100644
504 +--- a/kernel/trace/trace.c
505 ++++ b/kernel/trace/trace.c
506 +@@ -3007,7 +3007,7 @@ struct trace_buffer_struct {
507 + char buffer[4][TRACE_BUF_SIZE];
508 + };
509 +
510 +-static struct trace_buffer_struct *trace_percpu_buffer;
511 ++static struct trace_buffer_struct __percpu *trace_percpu_buffer;
512 +
513 + /*
514 + * Thise allows for lockless recording. If we're nested too deeply, then
515 +@@ -3017,7 +3017,7 @@ static char *get_trace_buf(void)
516 + {
517 + struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
518 +
519 +- if (!buffer || buffer->nesting >= 4)
520 ++ if (!trace_percpu_buffer || buffer->nesting >= 4)
521 + return NULL;
522 +
523 + buffer->nesting++;
524 +@@ -3036,7 +3036,7 @@ static void put_trace_buf(void)
525 +
526 + static int alloc_percpu_trace_buffer(void)
527 + {
528 +- struct trace_buffer_struct *buffers;
529 ++ struct trace_buffer_struct __percpu *buffers;
530 +
531 + buffers = alloc_percpu(struct trace_buffer_struct);
532 + if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
533 +diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
534 +index f5bf931252c4b..09d81f9c2a649 100644
535 +--- a/net/batman-adv/multicast.c
536 ++++ b/net/batman-adv/multicast.c
537 +@@ -1373,6 +1373,7 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
538 + * @bat_priv: the bat priv with all the soft interface information
539 + * @skb: The multicast packet to check
540 + * @orig: an originator to be set to forward the skb to
541 ++ * @is_routable: stores whether the destination is routable
542 + *
543 + * Return: the forwarding mode as enum batadv_forw_mode and in case of
544 + * BATADV_FORW_SINGLE set the orig to the single originator the skb
545 +@@ -1380,17 +1381,16 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
546 + */
547 + enum batadv_forw_mode
548 + batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
549 +- struct batadv_orig_node **orig)
550 ++ struct batadv_orig_node **orig, int *is_routable)
551 + {
552 + int ret, tt_count, ip_count, unsnoop_count, total_count;
553 + bool is_unsnoopable = false;
554 + unsigned int mcast_fanout;
555 + struct ethhdr *ethhdr;
556 +- int is_routable = 0;
557 + int rtr_count = 0;
558 +
559 + ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
560 +- &is_routable);
561 ++ is_routable);
562 + if (ret == -ENOMEM)
563 + return BATADV_FORW_NONE;
564 + else if (ret < 0)
565 +@@ -1403,7 +1403,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
566 + ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
567 + unsnoop_count = !is_unsnoopable ? 0 :
568 + atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
569 +- rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable);
570 ++ rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
571 +
572 + total_count = tt_count + ip_count + unsnoop_count + rtr_count;
573 +
574 +@@ -1723,6 +1723,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
575 + * @bat_priv: the bat priv with all the soft interface information
576 + * @skb: the multicast packet to transmit
577 + * @vid: the vlan identifier
578 ++ * @is_routable: stores whether the destination is routable
579 + *
580 + * Sends copies of a frame with multicast destination to any node that signaled
581 + * interest in it, that is either via the translation table or the according
582 +@@ -1735,7 +1736,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
583 + * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
584 + */
585 + int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
586 +- unsigned short vid)
587 ++ unsigned short vid, int is_routable)
588 + {
589 + int ret;
590 +
591 +@@ -1751,12 +1752,16 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
592 + return ret;
593 + }
594 +
595 ++ if (!is_routable)
596 ++ goto skip_mc_router;
597 ++
598 + ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
599 + if (ret != NET_XMIT_SUCCESS) {
600 + kfree_skb(skb);
601 + return ret;
602 + }
603 +
604 ++skip_mc_router:
605 + consume_skb(skb);
606 + return ret;
607 + }
608 +diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
609 +index 403929013ac47..fc1ffd22a6715 100644
610 +--- a/net/batman-adv/multicast.h
611 ++++ b/net/batman-adv/multicast.h
612 +@@ -44,7 +44,8 @@ enum batadv_forw_mode {
613 +
614 + enum batadv_forw_mode
615 + batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
616 +- struct batadv_orig_node **mcast_single_orig);
617 ++ struct batadv_orig_node **mcast_single_orig,
618 ++ int *is_routable);
619 +
620 + int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
621 + struct sk_buff *skb,
622 +@@ -52,7 +53,7 @@ int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
623 + struct batadv_orig_node *orig_node);
624 +
625 + int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
626 +- unsigned short vid);
627 ++ unsigned short vid, int is_routable);
628 +
629 + void batadv_mcast_init(struct batadv_priv *bat_priv);
630 +
631 +@@ -71,7 +72,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
632 +
633 + static inline enum batadv_forw_mode
634 + batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
635 +- struct batadv_orig_node **mcast_single_orig)
636 ++ struct batadv_orig_node **mcast_single_orig,
637 ++ int *is_routable)
638 + {
639 + return BATADV_FORW_ALL;
640 + }
641 +@@ -88,7 +90,7 @@ batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
642 +
643 + static inline int
644 + batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
645 +- unsigned short vid)
646 ++ unsigned short vid, int is_routable)
647 + {
648 + kfree_skb(skb);
649 + return NET_XMIT_DROP;
650 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
651 +index 7f209390069ea..504e3cb67bed4 100644
652 +--- a/net/batman-adv/soft-interface.c
653 ++++ b/net/batman-adv/soft-interface.c
654 +@@ -200,6 +200,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
655 + int gw_mode;
656 + enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
657 + struct batadv_orig_node *mcast_single_orig = NULL;
658 ++ int mcast_is_routable = 0;
659 + int network_offset = ETH_HLEN;
660 + __be16 proto;
661 +
662 +@@ -302,7 +303,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
663 + send:
664 + if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
665 + forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
666 +- &mcast_single_orig);
667 ++ &mcast_single_orig,
668 ++ &mcast_is_routable);
669 + if (forw_mode == BATADV_FORW_NONE)
670 + goto dropped;
671 +
672 +@@ -367,7 +369,8 @@ send:
673 + ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
674 + mcast_single_orig);
675 + } else if (forw_mode == BATADV_FORW_SOME) {
676 +- ret = batadv_mcast_forw_send(bat_priv, skb, vid);
677 ++ ret = batadv_mcast_forw_send(bat_priv, skb, vid,
678 ++ mcast_is_routable);
679 + } else {
680 + if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
681 + skb))
682 +diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
683 +index 2f9c0de533c75..0b64f015b3b0b 100644
684 +--- a/net/core/lwtunnel.c
685 ++++ b/net/core/lwtunnel.c
686 +@@ -190,6 +190,10 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining,
687 + nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
688 +
689 + if (nla_entype) {
690 ++ if (nla_len(nla_entype) < sizeof(u16)) {
691 ++ NL_SET_ERR_MSG(extack, "Invalid RTA_ENCAP_TYPE");
692 ++ return -EINVAL;
693 ++ }
694 + encap_type = nla_get_u16(nla_entype);
695 +
696 + if (lwtunnel_valid_encap_type(encap_type,
697 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
698 +index c0b8154205237..ce4b28f011485 100644
699 +--- a/net/ipv4/fib_semantics.c
700 ++++ b/net/ipv4/fib_semantics.c
701 +@@ -654,6 +654,19 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
702 + return nhs;
703 + }
704 +
705 ++static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla,
706 ++ struct netlink_ext_ack *extack)
707 ++{
708 ++ if (nla_len(nla) < sizeof(*gw)) {
709 ++ NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY");
710 ++ return -EINVAL;
711 ++ }
712 ++
713 ++ *gw = nla_get_in_addr(nla);
714 ++
715 ++ return 0;
716 ++}
717 ++
718 + /* only called when fib_nh is integrated into fib_info */
719 + static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
720 + int remaining, struct fib_config *cfg,
721 +@@ -696,7 +709,11 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
722 + return -EINVAL;
723 + }
724 + if (nla) {
725 +- fib_cfg.fc_gw4 = nla_get_in_addr(nla);
726 ++ ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla,
727 ++ extack);
728 ++ if (ret)
729 ++ goto errout;
730 ++
731 + if (fib_cfg.fc_gw4)
732 + fib_cfg.fc_gw_family = AF_INET;
733 + } else if (nlav) {
734 +@@ -706,10 +723,18 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
735 + }
736 +
737 + nla = nla_find(attrs, attrlen, RTA_FLOW);
738 +- if (nla)
739 ++ if (nla) {
740 ++ if (nla_len(nla) < sizeof(u32)) {
741 ++ NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
742 ++ return -EINVAL;
743 ++ }
744 + fib_cfg.fc_flow = nla_get_u32(nla);
745 ++ }
746 +
747 + fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
748 ++ /* RTA_ENCAP_TYPE length checked in
749 ++ * lwtunnel_valid_encap_type_attr
750 ++ */
751 + nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
752 + if (nla)
753 + fib_cfg.fc_encap_type = nla_get_u16(nla);
754 +@@ -894,6 +919,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
755 + attrlen = rtnh_attrlen(rtnh);
756 + if (attrlen > 0) {
757 + struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
758 ++ int err;
759 +
760 + nla = nla_find(attrs, attrlen, RTA_GATEWAY);
761 + nlav = nla_find(attrs, attrlen, RTA_VIA);
762 +@@ -904,12 +930,17 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
763 + }
764 +
765 + if (nla) {
766 ++ __be32 gw;
767 ++
768 ++ err = fib_gw_from_attr(&gw, nla, extack);
769 ++ if (err)
770 ++ return err;
771 ++
772 + if (nh->fib_nh_gw_family != AF_INET ||
773 +- nla_get_in_addr(nla) != nh->fib_nh_gw4)
774 ++ gw != nh->fib_nh_gw4)
775 + return 1;
776 + } else if (nlav) {
777 + struct fib_config cfg2;
778 +- int err;
779 +
780 + err = fib_gw_from_via(&cfg2, nlav, extack);
781 + if (err)
782 +@@ -932,8 +963,14 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
783 +
784 + #ifdef CONFIG_IP_ROUTE_CLASSID
785 + nla = nla_find(attrs, attrlen, RTA_FLOW);
786 +- if (nla && nla_get_u32(nla) != nh->nh_tclassid)
787 +- return 1;
788 ++ if (nla) {
789 ++ if (nla_len(nla) < sizeof(u32)) {
790 ++ NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
791 ++ return -EINVAL;
792 ++ }
793 ++ if (nla_get_u32(nla) != nh->nh_tclassid)
794 ++ return 1;
795 ++ }
796 + #endif
797 + }
798 +
799 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
800 +index a0016f05c4f86..3cdf011a8dd8d 100644
801 +--- a/net/ipv4/udp.c
802 ++++ b/net/ipv4/udp.c
803 +@@ -2943,7 +2943,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
804 + {
805 + seq_setwidth(seq, 127);
806 + if (v == SEQ_START_TOKEN)
807 +- seq_puts(seq, " sl local_address rem_address st tx_queue "
808 ++ seq_puts(seq, " sl local_address rem_address st tx_queue "
809 + "rx_queue tr tm->when retrnsmt uid timeout "
810 + "inode ref pointer drops");
811 + else {
812 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
813 +index 12ab6605d9617..8b44d3b53844e 100644
814 +--- a/net/ipv6/ip6_vti.c
815 ++++ b/net/ipv6/ip6_vti.c
816 +@@ -795,6 +795,8 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
817 + struct net *net = dev_net(dev);
818 + struct vti6_net *ip6n = net_generic(net, vti6_net_id);
819 +
820 ++ memset(&p1, 0, sizeof(p1));
821 ++
822 + switch (cmd) {
823 + case SIOCGETTUNNEL:
824 + if (dev == ip6n->fb_tnl_dev) {
825 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
826 +index f36db3dd97346..5ef6e27e026e9 100644
827 +--- a/net/ipv6/route.c
828 ++++ b/net/ipv6/route.c
829 +@@ -5092,6 +5092,19 @@ static void ip6_route_mpath_notify(struct fib6_info *rt,
830 + inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
831 + }
832 +
833 ++static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
834 ++ struct netlink_ext_ack *extack)
835 ++{
836 ++ if (nla_len(nla) < sizeof(*gw)) {
837 ++ NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
838 ++ return -EINVAL;
839 ++ }
840 ++
841 ++ *gw = nla_get_in6_addr(nla);
842 ++
843 ++ return 0;
844 ++}
845 ++
846 + static int ip6_route_multipath_add(struct fib6_config *cfg,
847 + struct netlink_ext_ack *extack)
848 + {
849 +@@ -5133,10 +5146,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
850 +
851 + nla = nla_find(attrs, attrlen, RTA_GATEWAY);
852 + if (nla) {
853 +- r_cfg.fc_gateway = nla_get_in6_addr(nla);
854 ++ err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
855 ++ extack);
856 ++ if (err)
857 ++ goto cleanup;
858 ++
859 + r_cfg.fc_flags |= RTF_GATEWAY;
860 + }
861 + r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
862 ++
863 ++ /* RTA_ENCAP_TYPE length checked in
864 ++ * lwtunnel_valid_encap_type_attr
865 ++ */
866 + nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
867 + if (nla)
868 + r_cfg.fc_encap_type = nla_get_u16(nla);
869 +@@ -5288,7 +5309,13 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
870 +
871 + nla = nla_find(attrs, attrlen, RTA_GATEWAY);
872 + if (nla) {
873 +- nla_memcpy(&r_cfg.fc_gateway, nla, 16);
874 ++ err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
875 ++ extack);
876 ++ if (err) {
877 ++ last_err = err;
878 ++ goto next_rtnh;
879 ++ }
880 ++
881 + r_cfg.fc_flags |= RTF_GATEWAY;
882 + }
883 + }
884 +@@ -5296,6 +5323,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
885 + if (err)
886 + last_err = err;
887 +
888 ++next_rtnh:
889 + rtnh = rtnh_next(rtnh, &remaining);
890 + }
891 +
892 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
893 +index 5c727af01143f..ad00f31e20023 100644
894 +--- a/net/mac80211/mlme.c
895 ++++ b/net/mac80211/mlme.c
896 +@@ -4953,7 +4953,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
897 + */
898 + if (new_sta) {
899 + u32 rates = 0, basic_rates = 0;
900 +- bool have_higher_than_11mbit;
901 ++ bool have_higher_than_11mbit = false;
902 + int min_rate = INT_MAX, min_rate_index = -1;
903 + const struct cfg80211_bss_ies *ies;
904 + int shift = ieee80211_vif_get_shift(&sdata->vif);
905 +diff --git a/net/phonet/pep.c b/net/phonet/pep.c
906 +index a07e13f63332c..0c5d0f7b8b4bb 100644
907 +--- a/net/phonet/pep.c
908 ++++ b/net/phonet/pep.c
909 +@@ -868,6 +868,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
910 +
911 + err = pep_accept_conn(newsk, skb);
912 + if (err) {
913 ++ __sock_put(sk);
914 + sock_put(newsk);
915 + newsk = NULL;
916 + goto drop;
917 +diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
918 +index b046fd3cac2cf..1eb339d224ae5 100644
919 +--- a/net/sched/sch_qfq.c
920 ++++ b/net/sched/sch_qfq.c
921 +@@ -1421,10 +1421,8 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
922 + if (err < 0)
923 + return err;
924 +
925 +- if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
926 +- max_classes = QFQ_MAX_AGG_CLASSES;
927 +- else
928 +- max_classes = qdisc_dev(sch)->tx_queue_len + 1;
929 ++ max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
930 ++ QFQ_MAX_AGG_CLASSES);
931 + /* max_cl_shift = floor(log_2(max_classes)) */
932 + max_cl_shift = __fls(max_classes);
933 + q->max_agg_classes = 1<<max_cl_shift;
934 +diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
935 +index a4f4d4cf22c3b..d0752a0a8f362 100644
936 +--- a/tools/testing/selftests/x86/test_vsyscall.c
937 ++++ b/tools/testing/selftests/x86/test_vsyscall.c
938 +@@ -480,7 +480,7 @@ static int test_process_vm_readv(void)
939 + }
940 +
941 + if (vsyscall_map_r) {
942 +- if (!memcmp(buf, (const void *)0xffffffffff600000, 4096)) {
943 ++ if (!memcmp(buf, remote.iov_base, sizeof(buf))) {
944 + printf("[OK]\tIt worked and read correct data\n");
945 + } else {
946 + printf("[FAIL]\tIt worked but returned incorrect data\n");