1 |
commit: 6e0d0b04ebaf8be6ef1adff3d563b4c16b465af0 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sat Jun 25 19:44:50 2022 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sat Jun 25 19:44:50 2022 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6e0d0b04 |
7 |
|
8 |
Linux patch 5.10.125 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1124_linux-5.10.125.patch | 493 ++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 497 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index aedaaf1a..fb42ce16 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -539,6 +539,10 @@ Patch: 1123_linux-5.10.124.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.10.124 |
23 |
|
24 |
+Patch: 1124_linux-5.10.125.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.10.125 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1124_linux-5.10.125.patch b/1124_linux-5.10.125.patch |
33 |
new file mode 100644 |
34 |
index 00000000..0af8c810 |
35 |
--- /dev/null |
36 |
+++ b/1124_linux-5.10.125.patch |
37 |
@@ -0,0 +1,493 @@ |
38 |
+diff --git a/Makefile b/Makefile |
39 |
+index 9ed79a05a9725..da5b28931e5cb 100644 |
40 |
+--- a/Makefile |
41 |
++++ b/Makefile |
42 |
+@@ -1,7 +1,7 @@ |
43 |
+ # SPDX-License-Identifier: GPL-2.0 |
44 |
+ VERSION = 5 |
45 |
+ PATCHLEVEL = 10 |
46 |
+-SUBLEVEL = 124 |
47 |
++SUBLEVEL = 125 |
48 |
+ EXTRAVERSION = |
49 |
+ NAME = Dare mighty things |
50 |
+ |
51 |
+diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S |
52 |
+index 2d881f34dd9d5..7b8158ae36ecc 100644 |
53 |
+--- a/arch/arm64/mm/cache.S |
54 |
++++ b/arch/arm64/mm/cache.S |
55 |
+@@ -228,8 +228,6 @@ SYM_FUNC_END_PI(__dma_flush_area) |
56 |
+ * - dir - DMA direction |
57 |
+ */ |
58 |
+ SYM_FUNC_START_PI(__dma_map_area) |
59 |
+- cmp w2, #DMA_FROM_DEVICE |
60 |
+- b.eq __dma_inv_area |
61 |
+ b __dma_clean_area |
62 |
+ SYM_FUNC_END_PI(__dma_map_area) |
63 |
+ |
64 |
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c |
65 |
+index fabaedddc90cb..1c05caf68e7d8 100644 |
66 |
+--- a/arch/s390/mm/pgtable.c |
67 |
++++ b/arch/s390/mm/pgtable.c |
68 |
+@@ -734,7 +734,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
69 |
+ pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT; |
70 |
+ ptev = pte_val(*ptep); |
71 |
+ if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) |
72 |
+- page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); |
73 |
++ page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0); |
74 |
+ pgste_set_unlock(ptep, pgste); |
75 |
+ preempt_enable(); |
76 |
+ } |
77 |
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c |
78 |
+index 19f0c5db11e33..32d09d024f6c9 100644 |
79 |
+--- a/drivers/tty/serial/serial_core.c |
80 |
++++ b/drivers/tty/serial/serial_core.c |
81 |
+@@ -144,6 +144,11 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) |
82 |
+ unsigned long flags; |
83 |
+ unsigned int old; |
84 |
+ |
85 |
++ if (port->rs485.flags & SER_RS485_ENABLED) { |
86 |
++ set &= ~TIOCM_RTS; |
87 |
++ clear &= ~TIOCM_RTS; |
88 |
++ } |
89 |
++ |
90 |
+ spin_lock_irqsave(&port->lock, flags); |
91 |
+ old = port->mctrl; |
92 |
+ port->mctrl = (old & ~clear) | set; |
93 |
+@@ -157,23 +162,10 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) |
94 |
+ |
95 |
+ static void uart_port_dtr_rts(struct uart_port *uport, int raise) |
96 |
+ { |
97 |
+- int rs485_on = uport->rs485_config && |
98 |
+- (uport->rs485.flags & SER_RS485_ENABLED); |
99 |
+- int RTS_after_send = !!(uport->rs485.flags & SER_RS485_RTS_AFTER_SEND); |
100 |
+- |
101 |
+- if (raise) { |
102 |
+- if (rs485_on && RTS_after_send) { |
103 |
+- uart_set_mctrl(uport, TIOCM_DTR); |
104 |
+- uart_clear_mctrl(uport, TIOCM_RTS); |
105 |
+- } else { |
106 |
+- uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS); |
107 |
+- } |
108 |
+- } else { |
109 |
+- unsigned int clear = TIOCM_DTR; |
110 |
+- |
111 |
+- clear |= (!rs485_on || RTS_after_send) ? TIOCM_RTS : 0; |
112 |
+- uart_clear_mctrl(uport, clear); |
113 |
+- } |
114 |
++ if (raise) |
115 |
++ uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS); |
116 |
++ else |
117 |
++ uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS); |
118 |
+ } |
119 |
+ |
120 |
+ /* |
121 |
+@@ -1116,11 +1108,6 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) |
122 |
+ goto out; |
123 |
+ |
124 |
+ if (!tty_io_error(tty)) { |
125 |
+- if (uport->rs485.flags & SER_RS485_ENABLED) { |
126 |
+- set &= ~TIOCM_RTS; |
127 |
+- clear &= ~TIOCM_RTS; |
128 |
+- } |
129 |
+- |
130 |
+ uart_update_mctrl(uport, set, clear); |
131 |
+ ret = 0; |
132 |
+ } |
133 |
+@@ -2429,6 +2416,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, |
134 |
+ */ |
135 |
+ spin_lock_irqsave(&port->lock, flags); |
136 |
+ port->mctrl &= TIOCM_DTR; |
137 |
++ if (port->rs485.flags & SER_RS485_ENABLED && |
138 |
++ !(port->rs485.flags & SER_RS485_RTS_AFTER_SEND)) |
139 |
++ port->mctrl |= TIOCM_RTS; |
140 |
+ port->ops->set_mctrl(port, port->mctrl); |
141 |
+ spin_unlock_irqrestore(&port->lock, flags); |
142 |
+ |
143 |
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c |
144 |
+index a40be8b448c24..64ef97ab9274a 100644 |
145 |
+--- a/drivers/usb/gadget/function/u_ether.c |
146 |
++++ b/drivers/usb/gadget/function/u_ether.c |
147 |
+@@ -772,9 +772,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, |
148 |
+ dev->qmult = qmult; |
149 |
+ snprintf(net->name, sizeof(net->name), "%s%%d", netname); |
150 |
+ |
151 |
+- if (get_ether_addr(dev_addr, net->dev_addr)) |
152 |
++ if (get_ether_addr(dev_addr, net->dev_addr)) { |
153 |
++ net->addr_assign_type = NET_ADDR_RANDOM; |
154 |
+ dev_warn(&g->dev, |
155 |
+ "using random %s ethernet address\n", "self"); |
156 |
++ } else { |
157 |
++ net->addr_assign_type = NET_ADDR_SET; |
158 |
++ } |
159 |
+ if (get_ether_addr(host_addr, dev->host_mac)) |
160 |
+ dev_warn(&g->dev, |
161 |
+ "using random %s ethernet address\n", "host"); |
162 |
+@@ -831,6 +835,9 @@ struct net_device *gether_setup_name_default(const char *netname) |
163 |
+ INIT_LIST_HEAD(&dev->tx_reqs); |
164 |
+ INIT_LIST_HEAD(&dev->rx_reqs); |
165 |
+ |
166 |
++ /* by default we always have a random MAC address */ |
167 |
++ net->addr_assign_type = NET_ADDR_RANDOM; |
168 |
++ |
169 |
+ skb_queue_head_init(&dev->rx_frames); |
170 |
+ |
171 |
+ /* network device setup */ |
172 |
+@@ -868,7 +875,6 @@ int gether_register_netdev(struct net_device *net) |
173 |
+ g = dev->gadget; |
174 |
+ |
175 |
+ memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN); |
176 |
+- net->addr_assign_type = NET_ADDR_RANDOM; |
177 |
+ |
178 |
+ status = register_netdev(net); |
179 |
+ if (status < 0) { |
180 |
+@@ -908,6 +914,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr) |
181 |
+ if (get_ether_addr(dev_addr, new_addr)) |
182 |
+ return -EINVAL; |
183 |
+ memcpy(dev->dev_mac, new_addr, ETH_ALEN); |
184 |
++ net->addr_assign_type = NET_ADDR_SET; |
185 |
+ return 0; |
186 |
+ } |
187 |
+ EXPORT_SYMBOL_GPL(gether_set_dev_addr); |
188 |
+diff --git a/fs/io_uring.c b/fs/io_uring.c |
189 |
+index 871475d3fca2c..40ac37beca47d 100644 |
190 |
+--- a/fs/io_uring.c |
191 |
++++ b/fs/io_uring.c |
192 |
+@@ -773,7 +773,8 @@ static const struct io_op_def io_op_defs[] = { |
193 |
+ .buffer_select = 1, |
194 |
+ .needs_async_data = 1, |
195 |
+ .async_size = sizeof(struct io_async_rw), |
196 |
+- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, |
197 |
++ .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | |
198 |
++ IO_WQ_WORK_FILES, |
199 |
+ }, |
200 |
+ [IORING_OP_WRITEV] = { |
201 |
+ .needs_file = 1, |
202 |
+@@ -783,7 +784,7 @@ static const struct io_op_def io_op_defs[] = { |
203 |
+ .needs_async_data = 1, |
204 |
+ .async_size = sizeof(struct io_async_rw), |
205 |
+ .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | |
206 |
+- IO_WQ_WORK_FSIZE, |
207 |
++ IO_WQ_WORK_FSIZE | IO_WQ_WORK_FILES, |
208 |
+ }, |
209 |
+ [IORING_OP_FSYNC] = { |
210 |
+ .needs_file = 1, |
211 |
+@@ -794,7 +795,8 @@ static const struct io_op_def io_op_defs[] = { |
212 |
+ .unbound_nonreg_file = 1, |
213 |
+ .pollin = 1, |
214 |
+ .async_size = sizeof(struct io_async_rw), |
215 |
+- .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM, |
216 |
++ .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM | |
217 |
++ IO_WQ_WORK_FILES, |
218 |
+ }, |
219 |
+ [IORING_OP_WRITE_FIXED] = { |
220 |
+ .needs_file = 1, |
221 |
+@@ -803,7 +805,7 @@ static const struct io_op_def io_op_defs[] = { |
222 |
+ .pollout = 1, |
223 |
+ .async_size = sizeof(struct io_async_rw), |
224 |
+ .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE | |
225 |
+- IO_WQ_WORK_MM, |
226 |
++ IO_WQ_WORK_MM | IO_WQ_WORK_FILES, |
227 |
+ }, |
228 |
+ [IORING_OP_POLL_ADD] = { |
229 |
+ .needs_file = 1, |
230 |
+@@ -857,7 +859,7 @@ static const struct io_op_def io_op_defs[] = { |
231 |
+ .pollout = 1, |
232 |
+ .needs_async_data = 1, |
233 |
+ .async_size = sizeof(struct io_async_connect), |
234 |
+- .work_flags = IO_WQ_WORK_MM, |
235 |
++ .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FS, |
236 |
+ }, |
237 |
+ [IORING_OP_FALLOCATE] = { |
238 |
+ .needs_file = 1, |
239 |
+@@ -885,7 +887,8 @@ static const struct io_op_def io_op_defs[] = { |
240 |
+ .pollin = 1, |
241 |
+ .buffer_select = 1, |
242 |
+ .async_size = sizeof(struct io_async_rw), |
243 |
+- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, |
244 |
++ .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | |
245 |
++ IO_WQ_WORK_FILES, |
246 |
+ }, |
247 |
+ [IORING_OP_WRITE] = { |
248 |
+ .needs_file = 1, |
249 |
+@@ -894,7 +897,7 @@ static const struct io_op_def io_op_defs[] = { |
250 |
+ .pollout = 1, |
251 |
+ .async_size = sizeof(struct io_async_rw), |
252 |
+ .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | |
253 |
+- IO_WQ_WORK_FSIZE, |
254 |
++ IO_WQ_WORK_FSIZE | IO_WQ_WORK_FILES, |
255 |
+ }, |
256 |
+ [IORING_OP_FADVISE] = { |
257 |
+ .needs_file = 1, |
258 |
+@@ -907,14 +910,16 @@ static const struct io_op_def io_op_defs[] = { |
259 |
+ .needs_file = 1, |
260 |
+ .unbound_nonreg_file = 1, |
261 |
+ .pollout = 1, |
262 |
+- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, |
263 |
++ .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | |
264 |
++ IO_WQ_WORK_FS, |
265 |
+ }, |
266 |
+ [IORING_OP_RECV] = { |
267 |
+ .needs_file = 1, |
268 |
+ .unbound_nonreg_file = 1, |
269 |
+ .pollin = 1, |
270 |
+ .buffer_select = 1, |
271 |
+- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, |
272 |
++ .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | |
273 |
++ IO_WQ_WORK_FS, |
274 |
+ }, |
275 |
+ [IORING_OP_OPENAT2] = { |
276 |
+ .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS | |
277 |
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c |
278 |
+index 8c7d01e907a31..bf5cb6efb8c09 100644 |
279 |
+--- a/fs/zonefs/super.c |
280 |
++++ b/fs/zonefs/super.c |
281 |
+@@ -68,15 +68,49 @@ static inline void zonefs_i_size_write(struct inode *inode, loff_t isize) |
282 |
+ zi->i_flags &= ~ZONEFS_ZONE_OPEN; |
283 |
+ } |
284 |
+ |
285 |
+-static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
286 |
+- unsigned int flags, struct iomap *iomap, |
287 |
+- struct iomap *srcmap) |
288 |
++static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset, |
289 |
++ loff_t length, unsigned int flags, |
290 |
++ struct iomap *iomap, struct iomap *srcmap) |
291 |
+ { |
292 |
+ struct zonefs_inode_info *zi = ZONEFS_I(inode); |
293 |
+ struct super_block *sb = inode->i_sb; |
294 |
+ loff_t isize; |
295 |
+ |
296 |
+- /* All I/Os should always be within the file maximum size */ |
297 |
++ /* |
298 |
++ * All blocks are always mapped below EOF. If reading past EOF, |
299 |
++ * act as if there is a hole up to the file maximum size. |
300 |
++ */ |
301 |
++ mutex_lock(&zi->i_truncate_mutex); |
302 |
++ iomap->bdev = inode->i_sb->s_bdev; |
303 |
++ iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); |
304 |
++ isize = i_size_read(inode); |
305 |
++ if (iomap->offset >= isize) { |
306 |
++ iomap->type = IOMAP_HOLE; |
307 |
++ iomap->addr = IOMAP_NULL_ADDR; |
308 |
++ iomap->length = length; |
309 |
++ } else { |
310 |
++ iomap->type = IOMAP_MAPPED; |
311 |
++ iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset; |
312 |
++ iomap->length = isize - iomap->offset; |
313 |
++ } |
314 |
++ mutex_unlock(&zi->i_truncate_mutex); |
315 |
++ |
316 |
++ return 0; |
317 |
++} |
318 |
++ |
319 |
++static const struct iomap_ops zonefs_read_iomap_ops = { |
320 |
++ .iomap_begin = zonefs_read_iomap_begin, |
321 |
++}; |
322 |
++ |
323 |
++static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset, |
324 |
++ loff_t length, unsigned int flags, |
325 |
++ struct iomap *iomap, struct iomap *srcmap) |
326 |
++{ |
327 |
++ struct zonefs_inode_info *zi = ZONEFS_I(inode); |
328 |
++ struct super_block *sb = inode->i_sb; |
329 |
++ loff_t isize; |
330 |
++ |
331 |
++ /* All write I/Os should always be within the file maximum size */ |
332 |
+ if (WARN_ON_ONCE(offset + length > zi->i_max_size)) |
333 |
+ return -EIO; |
334 |
+ |
335 |
+@@ -86,7 +120,7 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
336 |
+ * operation. |
337 |
+ */ |
338 |
+ if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ && |
339 |
+- (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT))) |
340 |
++ !(flags & IOMAP_DIRECT))) |
341 |
+ return -EIO; |
342 |
+ |
343 |
+ /* |
344 |
+@@ -95,45 +129,42 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
345 |
+ * write pointer) and unwriten beyond. |
346 |
+ */ |
347 |
+ mutex_lock(&zi->i_truncate_mutex); |
348 |
++ iomap->bdev = inode->i_sb->s_bdev; |
349 |
++ iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); |
350 |
++ iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset; |
351 |
+ isize = i_size_read(inode); |
352 |
+- if (offset >= isize) |
353 |
++ if (iomap->offset >= isize) { |
354 |
+ iomap->type = IOMAP_UNWRITTEN; |
355 |
+- else |
356 |
++ iomap->length = zi->i_max_size - iomap->offset; |
357 |
++ } else { |
358 |
+ iomap->type = IOMAP_MAPPED; |
359 |
+- if (flags & IOMAP_WRITE) |
360 |
+- length = zi->i_max_size - offset; |
361 |
+- else |
362 |
+- length = min(length, isize - offset); |
363 |
++ iomap->length = isize - iomap->offset; |
364 |
++ } |
365 |
+ mutex_unlock(&zi->i_truncate_mutex); |
366 |
+ |
367 |
+- iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); |
368 |
+- iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset; |
369 |
+- iomap->bdev = inode->i_sb->s_bdev; |
370 |
+- iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset; |
371 |
+- |
372 |
+ return 0; |
373 |
+ } |
374 |
+ |
375 |
+-static const struct iomap_ops zonefs_iomap_ops = { |
376 |
+- .iomap_begin = zonefs_iomap_begin, |
377 |
++static const struct iomap_ops zonefs_write_iomap_ops = { |
378 |
++ .iomap_begin = zonefs_write_iomap_begin, |
379 |
+ }; |
380 |
+ |
381 |
+ static int zonefs_readpage(struct file *unused, struct page *page) |
382 |
+ { |
383 |
+- return iomap_readpage(page, &zonefs_iomap_ops); |
384 |
++ return iomap_readpage(page, &zonefs_read_iomap_ops); |
385 |
+ } |
386 |
+ |
387 |
+ static void zonefs_readahead(struct readahead_control *rac) |
388 |
+ { |
389 |
+- iomap_readahead(rac, &zonefs_iomap_ops); |
390 |
++ iomap_readahead(rac, &zonefs_read_iomap_ops); |
391 |
+ } |
392 |
+ |
393 |
+ /* |
394 |
+ * Map blocks for page writeback. This is used only on conventional zone files, |
395 |
+ * which implies that the page range can only be within the fixed inode size. |
396 |
+ */ |
397 |
+-static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc, |
398 |
+- struct inode *inode, loff_t offset) |
399 |
++static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc, |
400 |
++ struct inode *inode, loff_t offset) |
401 |
+ { |
402 |
+ struct zonefs_inode_info *zi = ZONEFS_I(inode); |
403 |
+ |
404 |
+@@ -147,12 +178,12 @@ static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc, |
405 |
+ offset < wpc->iomap.offset + wpc->iomap.length) |
406 |
+ return 0; |
407 |
+ |
408 |
+- return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset, |
409 |
+- IOMAP_WRITE, &wpc->iomap, NULL); |
410 |
++ return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset, |
411 |
++ IOMAP_WRITE, &wpc->iomap, NULL); |
412 |
+ } |
413 |
+ |
414 |
+ static const struct iomap_writeback_ops zonefs_writeback_ops = { |
415 |
+- .map_blocks = zonefs_map_blocks, |
416 |
++ .map_blocks = zonefs_write_map_blocks, |
417 |
+ }; |
418 |
+ |
419 |
+ static int zonefs_writepage(struct page *page, struct writeback_control *wbc) |
420 |
+@@ -182,7 +213,8 @@ static int zonefs_swap_activate(struct swap_info_struct *sis, |
421 |
+ return -EINVAL; |
422 |
+ } |
423 |
+ |
424 |
+- return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops); |
425 |
++ return iomap_swapfile_activate(sis, swap_file, span, |
426 |
++ &zonefs_read_iomap_ops); |
427 |
+ } |
428 |
+ |
429 |
+ static const struct address_space_operations zonefs_file_aops = { |
430 |
+@@ -612,7 +644,7 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf) |
431 |
+ |
432 |
+ /* Serialize against truncates */ |
433 |
+ down_read(&zi->i_mmap_sem); |
434 |
+- ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops); |
435 |
++ ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops); |
436 |
+ up_read(&zi->i_mmap_sem); |
437 |
+ |
438 |
+ sb_end_pagefault(inode->i_sb); |
439 |
+@@ -869,7 +901,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) |
440 |
+ if (append) |
441 |
+ ret = zonefs_file_dio_append(iocb, from); |
442 |
+ else |
443 |
+- ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops, |
444 |
++ ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops, |
445 |
+ &zonefs_write_dio_ops, sync); |
446 |
+ if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && |
447 |
+ (ret > 0 || ret == -EIOCBQUEUED)) { |
448 |
+@@ -911,7 +943,7 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb, |
449 |
+ if (ret <= 0) |
450 |
+ goto inode_unlock; |
451 |
+ |
452 |
+- ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops); |
453 |
++ ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops); |
454 |
+ if (ret > 0) |
455 |
+ iocb->ki_pos += ret; |
456 |
+ else if (ret == -EIO) |
457 |
+@@ -1004,7 +1036,7 @@ static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) |
458 |
+ goto inode_unlock; |
459 |
+ } |
460 |
+ file_accessed(iocb->ki_filp); |
461 |
+- ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops, |
462 |
++ ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops, |
463 |
+ &zonefs_read_dio_ops, is_sync_kiocb(iocb)); |
464 |
+ } else { |
465 |
+ ret = generic_file_read_iter(iocb, to); |
466 |
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c |
467 |
+index 44b524136f953..f38b71cc3edbe 100644 |
468 |
+--- a/net/ipv4/inet_hashtables.c |
469 |
++++ b/net/ipv4/inet_hashtables.c |
470 |
+@@ -726,12 +726,14 @@ EXPORT_SYMBOL_GPL(inet_unhash); |
471 |
+ * Note that we use 32bit integers (vs RFC 'short integers') |
472 |
+ * because 2^16 is not a multiple of num_ephemeral and this |
473 |
+ * property might be used by clever attacker. |
474 |
+- * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, |
475 |
+- * we use 256 instead to really give more isolation and |
476 |
+- * privacy, this only consumes 1 KB of kernel memory. |
477 |
++ * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though |
478 |
++ * attacks were since demonstrated, thus we use 65536 instead to really |
479 |
++ * give more isolation and privacy, at the expense of 256kB of kernel |
480 |
++ * memory. |
481 |
+ */ |
482 |
+-#define INET_TABLE_PERTURB_SHIFT 8 |
483 |
+-static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT]; |
484 |
++#define INET_TABLE_PERTURB_SHIFT 16 |
485 |
++#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT) |
486 |
++static u32 *table_perturb; |
487 |
+ |
488 |
+ int __inet_hash_connect(struct inet_timewait_death_row *death_row, |
489 |
+ struct sock *sk, u64 port_offset, |
490 |
+@@ -774,10 +776,11 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, |
491 |
+ if (likely(remaining > 1)) |
492 |
+ remaining &= ~1U; |
493 |
+ |
494 |
+- net_get_random_once(table_perturb, sizeof(table_perturb)); |
495 |
+- index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT); |
496 |
++ net_get_random_once(table_perturb, |
497 |
++ INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb)); |
498 |
++ index = port_offset & (INET_TABLE_PERTURB_SIZE - 1); |
499 |
+ |
500 |
+- offset = READ_ONCE(table_perturb[index]) + port_offset; |
501 |
++ offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32); |
502 |
+ offset %= remaining; |
503 |
+ |
504 |
+ /* In first pass we try ports of @low parity. |
505 |
+@@ -833,6 +836,12 @@ next_port: |
506 |
+ return -EADDRNOTAVAIL; |
507 |
+ |
508 |
+ ok: |
509 |
++ /* Here we want to add a little bit of randomness to the next source |
510 |
++ * port that will be chosen. We use a max() with a random here so that |
511 |
++ * on low contention the randomness is maximal and on high contention |
512 |
++ * it may be inexistent. |
513 |
++ */ |
514 |
++ i = max_t(int, i, (prandom_u32() & 7) * 2); |
515 |
+ WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); |
516 |
+ |
517 |
+ /* Head lock still held and bh's disabled */ |
518 |
+@@ -906,6 +915,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, |
519 |
+ low_limit, |
520 |
+ high_limit); |
521 |
+ init_hashinfo_lhash2(h); |
522 |
++ |
523 |
++ /* this one is used for source ports of outgoing connections */ |
524 |
++ table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE, |
525 |
++ sizeof(*table_perturb), GFP_KERNEL); |
526 |
++ if (!table_perturb) |
527 |
++ panic("TCP: failed to alloc table_perturb"); |
528 |
+ } |
529 |
+ |
530 |
+ int inet_hashinfo2_init_mod(struct inet_hashinfo *h) |