1 |
commit: 0cf9a96e20f1df4bba833a1d18501d54a897acdf |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Mon Sep 28 14:09:52 2015 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Mon Sep 28 14:09:52 2015 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0cf9a96e |
7 |
|
8 |
Linux patch 3.12.48 |
9 |
|
10 |
1047_linux-3.12.48.patch | 1502 ++++++++++++++++++++++++++++++++++++++++++++++ |
11 |
1 file changed, 1502 insertions(+) |
12 |
|
13 |
diff --git a/1047_linux-3.12.48.patch b/1047_linux-3.12.48.patch |
14 |
new file mode 100644 |
15 |
index 0000000..c0f1bf4 |
16 |
--- /dev/null |
17 |
+++ b/1047_linux-3.12.48.patch |
18 |
@@ -0,0 +1,1502 @@ |
19 |
+diff --git a/Makefile b/Makefile |
20 |
+index c45298b8b2d5..a01f2573731d 100644 |
21 |
+--- a/Makefile |
22 |
++++ b/Makefile |
23 |
+@@ -1,6 +1,6 @@ |
24 |
+ VERSION = 3 |
25 |
+ PATCHLEVEL = 12 |
26 |
+-SUBLEVEL = 47 |
27 |
++SUBLEVEL = 48 |
28 |
+ EXTRAVERSION = |
29 |
+ NAME = One Giant Leap for Frogkind |
30 |
+ |
31 |
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c |
32 |
+index 560227b817fe..01c7270b5e84 100644 |
33 |
+--- a/drivers/block/mtip32xx/mtip32xx.c |
34 |
++++ b/drivers/block/mtip32xx/mtip32xx.c |
35 |
+@@ -2810,34 +2810,51 @@ static ssize_t show_device_status(struct device_driver *drv, char *buf) |
36 |
+ static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, |
37 |
+ size_t len, loff_t *offset) |
38 |
+ { |
39 |
++ struct driver_data *dd = (struct driver_data *)f->private_data; |
40 |
+ int size = *offset; |
41 |
+- char buf[MTIP_DFS_MAX_BUF_SIZE]; |
42 |
++ char *buf; |
43 |
++ int rv = 0; |
44 |
+ |
45 |
+ if (!len || *offset) |
46 |
+ return 0; |
47 |
+ |
48 |
++ buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); |
49 |
++ if (!buf) { |
50 |
++ dev_err(&dd->pdev->dev, |
51 |
++ "Memory allocation: status buffer\n"); |
52 |
++ return -ENOMEM; |
53 |
++ } |
54 |
++ |
55 |
+ size += show_device_status(NULL, buf); |
56 |
+ |
57 |
+ *offset = size <= len ? size : len; |
58 |
+ size = copy_to_user(ubuf, buf, *offset); |
59 |
+ if (size) |
60 |
+- return -EFAULT; |
61 |
++ rv = -EFAULT; |
62 |
+ |
63 |
+- return *offset; |
64 |
++ kfree(buf); |
65 |
++ return rv ? rv : *offset; |
66 |
+ } |
67 |
+ |
68 |
+ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, |
69 |
+ size_t len, loff_t *offset) |
70 |
+ { |
71 |
+ struct driver_data *dd = (struct driver_data *)f->private_data; |
72 |
+- char buf[MTIP_DFS_MAX_BUF_SIZE]; |
73 |
++ char *buf; |
74 |
+ u32 group_allocated; |
75 |
+ int size = *offset; |
76 |
+- int n; |
77 |
++ int n, rv = 0; |
78 |
+ |
79 |
+ if (!len || size) |
80 |
+ return 0; |
81 |
+ |
82 |
++ buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); |
83 |
++ if (!buf) { |
84 |
++ dev_err(&dd->pdev->dev, |
85 |
++ "Memory allocation: register buffer\n"); |
86 |
++ return -ENOMEM; |
87 |
++ } |
88 |
++ |
89 |
+ size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); |
90 |
+ |
91 |
+ for (n = dd->slot_groups-1; n >= 0; n--) |
92 |
+@@ -2892,21 +2909,30 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, |
93 |
+ *offset = size <= len ? size : len; |
94 |
+ size = copy_to_user(ubuf, buf, *offset); |
95 |
+ if (size) |
96 |
+- return -EFAULT; |
97 |
++ rv = -EFAULT; |
98 |
+ |
99 |
+- return *offset; |
100 |
++ kfree(buf); |
101 |
++ return rv ? rv : *offset; |
102 |
+ } |
103 |
+ |
104 |
+ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, |
105 |
+ size_t len, loff_t *offset) |
106 |
+ { |
107 |
+ struct driver_data *dd = (struct driver_data *)f->private_data; |
108 |
+- char buf[MTIP_DFS_MAX_BUF_SIZE]; |
109 |
++ char *buf; |
110 |
+ int size = *offset; |
111 |
++ int rv = 0; |
112 |
+ |
113 |
+ if (!len || size) |
114 |
+ return 0; |
115 |
+ |
116 |
++ buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); |
117 |
++ if (!buf) { |
118 |
++ dev_err(&dd->pdev->dev, |
119 |
++ "Memory allocation: flag buffer\n"); |
120 |
++ return -ENOMEM; |
121 |
++ } |
122 |
++ |
123 |
+ size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", |
124 |
+ dd->port->flags); |
125 |
+ size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", |
126 |
+@@ -2915,9 +2941,10 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, |
127 |
+ *offset = size <= len ? size : len; |
128 |
+ size = copy_to_user(ubuf, buf, *offset); |
129 |
+ if (size) |
130 |
+- return -EFAULT; |
131 |
++ rv = -EFAULT; |
132 |
+ |
133 |
+- return *offset; |
134 |
++ kfree(buf); |
135 |
++ return rv ? rv : *offset; |
136 |
+ } |
137 |
+ |
138 |
+ static const struct file_operations mtip_device_status_fops = { |
139 |
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c |
140 |
+index 4a2d91536a8d..c843cf0aa623 100644 |
141 |
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c |
142 |
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c |
143 |
+@@ -73,6 +73,11 @@ static void radeon_hotplug_work_func(struct work_struct *work) |
144 |
+ struct drm_mode_config *mode_config = &dev->mode_config; |
145 |
+ struct drm_connector *connector; |
146 |
+ |
147 |
++ /* we can race here at startup, some boards seem to trigger |
148 |
++ * hotplug irqs when they shouldn't. */ |
149 |
++ if (!rdev->mode_info.mode_config_initialized) |
150 |
++ return; |
151 |
++ |
152 |
+ mutex_lock(&mode_config->mutex); |
153 |
+ if (mode_config->num_connector) { |
154 |
+ list_for_each_entry(connector, &mode_config->connector_list, head) |
155 |
+diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c |
156 |
+index 8c91fd5eb6fd..3ac9c4194814 100644 |
157 |
+--- a/drivers/isdn/gigaset/ser-gigaset.c |
158 |
++++ b/drivers/isdn/gigaset/ser-gigaset.c |
159 |
+@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty) |
160 |
+ cs->hw.ser->tty = tty; |
161 |
+ atomic_set(&cs->hw.ser->refcnt, 1); |
162 |
+ init_completion(&cs->hw.ser->dead_cmp); |
163 |
+- |
164 |
+ tty->disc_data = cs; |
165 |
+ |
166 |
++ /* Set the amount of data we're willing to receive per call |
167 |
++ * from the hardware driver to half of the input buffer size |
168 |
++ * to leave some reserve. |
169 |
++ * Note: We don't do flow control towards the hardware driver. |
170 |
++ * If more data is received than will fit into the input buffer, |
171 |
++ * it will be dropped and an error will be logged. This should |
172 |
++ * never happen as the device is slow and the buffer size ample. |
173 |
++ */ |
174 |
++ tty->receive_room = RBUFSIZE/2; |
175 |
++ |
176 |
+ /* OK.. Initialization of the datastructures and the HW is done.. Now |
177 |
+ * startup system and notify the LL that we are ready to run |
178 |
+ */ |
179 |
+diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c |
180 |
+index 4296155090b2..afcd18428945 100644 |
181 |
+--- a/drivers/md/dm-cache-policy-mq.c |
182 |
++++ b/drivers/md/dm-cache-policy-mq.c |
183 |
+@@ -855,7 +855,7 @@ static void mq_destroy(struct dm_cache_policy *p) |
184 |
+ struct mq_policy *mq = to_mq_policy(p); |
185 |
+ |
186 |
+ free_bitset(mq->allocation_bitset); |
187 |
+- kfree(mq->table); |
188 |
++ vfree(mq->table); |
189 |
+ free_entries(mq); |
190 |
+ kfree(mq); |
191 |
+ } |
192 |
+@@ -1106,7 +1106,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, |
193 |
+ |
194 |
+ mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); |
195 |
+ mq->hash_bits = ffs(mq->nr_buckets) - 1; |
196 |
+- mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); |
197 |
++ mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets); |
198 |
+ if (!mq->table) |
199 |
+ goto bad_alloc_table; |
200 |
+ |
201 |
+diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c |
202 |
+index e775bfbc5e6e..5f187294c85a 100644 |
203 |
+--- a/drivers/mfd/lpc_ich.c |
204 |
++++ b/drivers/mfd/lpc_ich.c |
205 |
+@@ -872,8 +872,8 @@ gpe0_done: |
206 |
+ lpc_ich_enable_gpio_space(dev); |
207 |
+ |
208 |
+ lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]); |
209 |
+- ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO], |
210 |
+- 1, NULL, 0, NULL); |
211 |
++ ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO, |
212 |
++ &lpc_ich_cells[LPC_GPIO], 1, NULL, 0, NULL); |
213 |
+ |
214 |
+ gpio_done: |
215 |
+ if (acpi_conflict) |
216 |
+@@ -932,8 +932,8 @@ static int lpc_ich_init_wdt(struct pci_dev *dev) |
217 |
+ } |
218 |
+ |
219 |
+ lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]); |
220 |
+- ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT], |
221 |
+- 1, NULL, 0, NULL); |
222 |
++ ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO, |
223 |
++ &lpc_ich_cells[LPC_WDT], 1, NULL, 0, NULL); |
224 |
+ |
225 |
+ wdt_done: |
226 |
+ return ret; |
227 |
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
228 |
+index 5f95537d4896..b3892b0d2e61 100644 |
229 |
+--- a/drivers/net/bonding/bond_main.c |
230 |
++++ b/drivers/net/bonding/bond_main.c |
231 |
+@@ -671,6 +671,22 @@ static void bond_set_dev_addr(struct net_device *bond_dev, |
232 |
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); |
233 |
+ } |
234 |
+ |
235 |
++static struct slave *bond_get_old_active(struct bonding *bond, |
236 |
++ struct slave *new_active) |
237 |
++{ |
238 |
++ struct slave *slave; |
239 |
++ |
240 |
++ bond_for_each_slave(bond, slave) { |
241 |
++ if (slave == new_active) |
242 |
++ continue; |
243 |
++ |
244 |
++ if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) |
245 |
++ return slave; |
246 |
++ } |
247 |
++ |
248 |
++ return NULL; |
249 |
++} |
250 |
++ |
251 |
+ /* |
252 |
+ * bond_do_fail_over_mac |
253 |
+ * |
254 |
+@@ -712,6 +728,9 @@ static void bond_do_fail_over_mac(struct bonding *bond, |
255 |
+ write_unlock_bh(&bond->curr_slave_lock); |
256 |
+ read_unlock(&bond->lock); |
257 |
+ |
258 |
++ if (!old_active) |
259 |
++ old_active = bond_get_old_active(bond, new_active); |
260 |
++ |
261 |
+ if (old_active) { |
262 |
+ memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN); |
263 |
+ memcpy(saddr.sa_data, old_active->dev->dev_addr, |
264 |
+@@ -1917,6 +1936,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, |
265 |
+ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; |
266 |
+ pr_info("%s: destroying bond %s.\n", |
267 |
+ bond_dev->name, bond_dev->name); |
268 |
++ bond_remove_proc_entry(bond); |
269 |
+ unregister_netdevice(bond_dev); |
270 |
+ } |
271 |
+ return ret; |
272 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c |
273 |
+index 0416c5b3b35c..3990b435a081 100644 |
274 |
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c |
275 |
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c |
276 |
+@@ -558,7 +558,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) |
277 |
+ mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" |
278 |
+ " to slave: %d, port:%d\n", |
279 |
+ __func__, i, port); |
280 |
+- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; |
281 |
++ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; |
282 |
+ if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) |
283 |
+ mlx4_slave_event(dev, i, eqe); |
284 |
+ } else { /* IB port */ |
285 |
+@@ -584,7 +584,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) |
286 |
+ for (i = 0; i < dev->num_slaves; i++) { |
287 |
+ if (i == mlx4_master_func_num(dev)) |
288 |
+ continue; |
289 |
+- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; |
290 |
++ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; |
291 |
+ if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) |
292 |
+ mlx4_slave_event(dev, i, eqe); |
293 |
+ } |
294 |
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c |
295 |
+index c913e8cc3b26..ed7759980c47 100644 |
296 |
+--- a/drivers/scsi/lpfc/lpfc_scsi.c |
297 |
++++ b/drivers/scsi/lpfc/lpfc_scsi.c |
298 |
+@@ -3423,7 +3423,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) |
299 |
+ */ |
300 |
+ |
301 |
+ nseg = scsi_dma_map(scsi_cmnd); |
302 |
+- if (unlikely(!nseg)) |
303 |
++ if (unlikely(nseg <= 0)) |
304 |
+ return 1; |
305 |
+ sgl += 1; |
306 |
+ /* clear the last flag in the fcp_rsp map entry */ |
307 |
+diff --git a/fs/aio.c b/fs/aio.c |
308 |
+index 329e6c1f3a43..31a5cb74ae1f 100644 |
309 |
+--- a/fs/aio.c |
310 |
++++ b/fs/aio.c |
311 |
+@@ -146,6 +146,7 @@ struct kioctx { |
312 |
+ |
313 |
+ struct { |
314 |
+ unsigned tail; |
315 |
++ unsigned completed_events; |
316 |
+ spinlock_t completion_lock; |
317 |
+ } ____cacheline_aligned_in_smp; |
318 |
+ |
319 |
+@@ -899,6 +900,68 @@ out: |
320 |
+ return ret; |
321 |
+ } |
322 |
+ |
323 |
++/* refill_reqs_available |
324 |
++ * Updates the reqs_available reference counts used for tracking the |
325 |
++ * number of free slots in the completion ring. This can be called |
326 |
++ * from aio_complete() (to optimistically update reqs_available) or |
327 |
++ * from aio_get_req() (the we're out of events case). It must be |
328 |
++ * called holding ctx->completion_lock. |
329 |
++ */ |
330 |
++static void refill_reqs_available(struct kioctx *ctx, unsigned head, |
331 |
++ unsigned tail) |
332 |
++{ |
333 |
++ unsigned events_in_ring, completed; |
334 |
++ |
335 |
++ /* Clamp head since userland can write to it. */ |
336 |
++ head %= ctx->nr_events; |
337 |
++ if (head <= tail) |
338 |
++ events_in_ring = tail - head; |
339 |
++ else |
340 |
++ events_in_ring = ctx->nr_events - (head - tail); |
341 |
++ |
342 |
++ completed = ctx->completed_events; |
343 |
++ if (events_in_ring < completed) |
344 |
++ completed -= events_in_ring; |
345 |
++ else |
346 |
++ completed = 0; |
347 |
++ |
348 |
++ if (!completed) |
349 |
++ return; |
350 |
++ |
351 |
++ ctx->completed_events -= completed; |
352 |
++ put_reqs_available(ctx, completed); |
353 |
++} |
354 |
++ |
355 |
++/* user_refill_reqs_available |
356 |
++ * Called to refill reqs_available when aio_get_req() encounters an |
357 |
++ * out of space in the completion ring. |
358 |
++ */ |
359 |
++static void user_refill_reqs_available(struct kioctx *ctx) |
360 |
++{ |
361 |
++ spin_lock_irq(&ctx->completion_lock); |
362 |
++ if (ctx->completed_events) { |
363 |
++ struct aio_ring *ring; |
364 |
++ unsigned head; |
365 |
++ |
366 |
++ /* Access of ring->head may race with aio_read_events_ring() |
367 |
++ * here, but that's okay since whether we read the old version |
368 |
++ * or the new version, and either will be valid. The important |
369 |
++ * part is that head cannot pass tail since we prevent |
370 |
++ * aio_complete() from updating tail by holding |
371 |
++ * ctx->completion_lock. Even if head is invalid, the check |
372 |
++ * against ctx->completed_events below will make sure we do the |
373 |
++ * safe/right thing. |
374 |
++ */ |
375 |
++ ring = kmap_atomic(ctx->ring_pages[0]); |
376 |
++ head = ring->head; |
377 |
++ kunmap_atomic(ring); |
378 |
++ |
379 |
++ refill_reqs_available(ctx, head, ctx->tail); |
380 |
++ } |
381 |
++ |
382 |
++ spin_unlock_irq(&ctx->completion_lock); |
383 |
++} |
384 |
++ |
385 |
+ /* aio_get_req |
386 |
+ * Allocate a slot for an aio request. |
387 |
+ * Returns NULL if no requests are free. |
388 |
+@@ -907,8 +970,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx) |
389 |
+ { |
390 |
+ struct kiocb *req; |
391 |
+ |
392 |
+- if (!get_reqs_available(ctx)) |
393 |
+- return NULL; |
394 |
++ if (!get_reqs_available(ctx)) { |
395 |
++ user_refill_reqs_available(ctx); |
396 |
++ if (!get_reqs_available(ctx)) |
397 |
++ return NULL; |
398 |
++ } |
399 |
+ |
400 |
+ req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); |
401 |
+ if (unlikely(!req)) |
402 |
+@@ -967,8 +1033,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2) |
403 |
+ struct kioctx *ctx = iocb->ki_ctx; |
404 |
+ struct aio_ring *ring; |
405 |
+ struct io_event *ev_page, *event; |
406 |
++ unsigned tail, pos, head; |
407 |
+ unsigned long flags; |
408 |
+- unsigned tail, pos; |
409 |
+ |
410 |
+ /* |
411 |
+ * Special case handling for sync iocbs: |
412 |
+@@ -1029,10 +1095,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2) |
413 |
+ ctx->tail = tail; |
414 |
+ |
415 |
+ ring = kmap_atomic(ctx->ring_pages[0]); |
416 |
++ head = ring->head; |
417 |
+ ring->tail = tail; |
418 |
+ kunmap_atomic(ring); |
419 |
+ flush_dcache_page(ctx->ring_pages[0]); |
420 |
+ |
421 |
++ ctx->completed_events++; |
422 |
++ if (ctx->completed_events > 1) |
423 |
++ refill_reqs_available(ctx, head, tail); |
424 |
+ spin_unlock_irqrestore(&ctx->completion_lock, flags); |
425 |
+ |
426 |
+ pr_debug("added to ring %p at [%u]\n", iocb, tail); |
427 |
+@@ -1047,7 +1117,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2) |
428 |
+ |
429 |
+ /* everything turned out well, dispose of the aiocb. */ |
430 |
+ kiocb_free(iocb); |
431 |
+- put_reqs_available(ctx, 1); |
432 |
+ |
433 |
+ /* |
434 |
+ * We have to order our ring_info tail store above and test |
435 |
+diff --git a/fs/bio.c b/fs/bio.c |
436 |
+index ea5035da4d9a..e7fb3f82f5f5 100644 |
437 |
+--- a/fs/bio.c |
438 |
++++ b/fs/bio.c |
439 |
+@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs); |
440 |
+ |
441 |
+ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page |
442 |
+ *page, unsigned int len, unsigned int offset, |
443 |
+- unsigned short max_sectors) |
444 |
++ unsigned int max_sectors) |
445 |
+ { |
446 |
+ int retried_segments = 0; |
447 |
+ struct bio_vec *bvec; |
448 |
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c |
449 |
+index 89b5519085c2..ebad721656f3 100644 |
450 |
+--- a/fs/cifs/connect.c |
451 |
++++ b/fs/cifs/connect.c |
452 |
+@@ -2245,6 +2245,8 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) |
453 |
+ |
454 |
+ spin_lock(&cifs_tcp_ses_lock); |
455 |
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { |
456 |
++ if (ses->status == CifsExiting) |
457 |
++ continue; |
458 |
+ if (!match_session(ses, vol)) |
459 |
+ continue; |
460 |
+ ++ses->ses_count; |
461 |
+@@ -2258,24 +2260,37 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) |
462 |
+ static void |
463 |
+ cifs_put_smb_ses(struct cifs_ses *ses) |
464 |
+ { |
465 |
+- unsigned int xid; |
466 |
++ unsigned int rc, xid; |
467 |
+ struct TCP_Server_Info *server = ses->server; |
468 |
+ |
469 |
+ cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); |
470 |
++ |
471 |
+ spin_lock(&cifs_tcp_ses_lock); |
472 |
++ if (ses->status == CifsExiting) { |
473 |
++ spin_unlock(&cifs_tcp_ses_lock); |
474 |
++ return; |
475 |
++ } |
476 |
+ if (--ses->ses_count > 0) { |
477 |
+ spin_unlock(&cifs_tcp_ses_lock); |
478 |
+ return; |
479 |
+ } |
480 |
+- |
481 |
+- list_del_init(&ses->smb_ses_list); |
482 |
++ if (ses->status == CifsGood) |
483 |
++ ses->status = CifsExiting; |
484 |
+ spin_unlock(&cifs_tcp_ses_lock); |
485 |
+ |
486 |
+- if (ses->status == CifsGood && server->ops->logoff) { |
487 |
++ if (ses->status == CifsExiting && server->ops->logoff) { |
488 |
+ xid = get_xid(); |
489 |
+- server->ops->logoff(xid, ses); |
490 |
++ rc = server->ops->logoff(xid, ses); |
491 |
++ if (rc) |
492 |
++ cifs_dbg(VFS, "%s: Session Logoff failure rc=%d\n", |
493 |
++ __func__, rc); |
494 |
+ _free_xid(xid); |
495 |
+ } |
496 |
++ |
497 |
++ spin_lock(&cifs_tcp_ses_lock); |
498 |
++ list_del_init(&ses->smb_ses_list); |
499 |
++ spin_unlock(&cifs_tcp_ses_lock); |
500 |
++ |
501 |
+ sesInfoFree(ses); |
502 |
+ cifs_put_tcp_session(server); |
503 |
+ } |
504 |
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c |
505 |
+index 340abca3aa52..ee1963b2e5a7 100644 |
506 |
+--- a/fs/cifs/smb2transport.c |
507 |
++++ b/fs/cifs/smb2transport.c |
508 |
+@@ -516,13 +516,19 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_hdr *buf, |
509 |
+ return -EAGAIN; |
510 |
+ } |
511 |
+ |
512 |
+- if (ses->status != CifsGood) { |
513 |
+- /* check if SMB2 session is bad because we are setting it up */ |
514 |
++ if (ses->status == CifsNew) { |
515 |
+ if ((buf->Command != SMB2_SESSION_SETUP) && |
516 |
+ (buf->Command != SMB2_NEGOTIATE)) |
517 |
+ return -EAGAIN; |
518 |
+ /* else ok - we are setting up session */ |
519 |
+ } |
520 |
++ |
521 |
++ if (ses->status == CifsExiting) { |
522 |
++ if (buf->Command != SMB2_LOGOFF) |
523 |
++ return -EAGAIN; |
524 |
++ /* else ok - we are shutting down the session */ |
525 |
++ } |
526 |
++ |
527 |
+ *mid = smb2_mid_entry_alloc(buf, ses->server); |
528 |
+ if (*mid == NULL) |
529 |
+ return -ENOMEM; |
530 |
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c |
531 |
+index 800b938e4061..ebb46e311e0b 100644 |
532 |
+--- a/fs/cifs/transport.c |
533 |
++++ b/fs/cifs/transport.c |
534 |
+@@ -431,13 +431,20 @@ static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, |
535 |
+ return -EAGAIN; |
536 |
+ } |
537 |
+ |
538 |
+- if (ses->status != CifsGood) { |
539 |
+- /* check if SMB session is bad because we are setting it up */ |
540 |
++ if (ses->status == CifsNew) { |
541 |
+ if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && |
542 |
+ (in_buf->Command != SMB_COM_NEGOTIATE)) |
543 |
+ return -EAGAIN; |
544 |
+ /* else ok - we are setting up session */ |
545 |
+ } |
546 |
++ |
547 |
++ if (ses->status == CifsExiting) { |
548 |
++ /* check if SMB session is bad because we are setting it up */ |
549 |
++ if (in_buf->Command != SMB_COM_LOGOFF_ANDX) |
550 |
++ return -EAGAIN; |
551 |
++ /* else ok - we are shutting down session */ |
552 |
++ } |
553 |
++ |
554 |
+ *ppmidQ = AllocMidQEntry(in_buf, ses->server); |
555 |
+ if (*ppmidQ == NULL) |
556 |
+ return -ENOMEM; |
557 |
+diff --git a/include/net/ip.h b/include/net/ip.h |
558 |
+index 1b1269e13596..553c07514a05 100644 |
559 |
+--- a/include/net/ip.h |
560 |
++++ b/include/net/ip.h |
561 |
+@@ -141,6 +141,7 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) |
562 |
+ } |
563 |
+ |
564 |
+ /* datagram.c */ |
565 |
++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); |
566 |
+ extern int ip4_datagram_connect(struct sock *sk, |
567 |
+ struct sockaddr *uaddr, int addr_len); |
568 |
+ |
569 |
+diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h |
570 |
+index 0c1288a50e8b..a68a061882f4 100644 |
571 |
+--- a/include/net/netfilter/nf_conntrack.h |
572 |
++++ b/include/net/netfilter/nf_conntrack.h |
573 |
+@@ -293,6 +293,8 @@ extern unsigned int nf_conntrack_max; |
574 |
+ extern unsigned int nf_conntrack_hash_rnd; |
575 |
+ void init_nf_conntrack_hash_rnd(void); |
576 |
+ |
577 |
++void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl); |
578 |
++ |
579 |
+ #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) |
580 |
+ #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) |
581 |
+ |
582 |
+diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c |
583 |
+index b7b1914dfa25..27cf128ebc15 100644 |
584 |
+--- a/net/bridge/br_mdb.c |
585 |
++++ b/net/bridge/br_mdb.c |
586 |
+@@ -347,7 +347,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, |
587 |
+ return -ENOMEM; |
588 |
+ rcu_assign_pointer(*pp, p); |
589 |
+ |
590 |
+- br_mdb_notify(br->dev, port, group, RTM_NEWMDB); |
591 |
+ return 0; |
592 |
+ } |
593 |
+ |
594 |
+@@ -370,6 +369,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, |
595 |
+ if (!p || p->br != br || p->state == BR_STATE_DISABLED) |
596 |
+ return -EINVAL; |
597 |
+ |
598 |
++ memset(&ip, 0, sizeof(ip)); |
599 |
+ ip.proto = entry->addr.proto; |
600 |
+ if (ip.proto == htons(ETH_P_IP)) |
601 |
+ ip.u.ip4 = entry->addr.u.ip4; |
602 |
+@@ -416,6 +416,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) |
603 |
+ if (!netif_running(br->dev) || br->multicast_disabled) |
604 |
+ return -EINVAL; |
605 |
+ |
606 |
++ memset(&ip, 0, sizeof(ip)); |
607 |
+ ip.proto = entry->addr.proto; |
608 |
+ if (ip.proto == htons(ETH_P_IP)) { |
609 |
+ if (timer_pending(&br->ip4_querier.timer)) |
610 |
+diff --git a/net/core/datagram.c b/net/core/datagram.c |
611 |
+index af814e764206..98e3d61e7476 100644 |
612 |
+--- a/net/core/datagram.c |
613 |
++++ b/net/core/datagram.c |
614 |
+@@ -130,6 +130,35 @@ out_noerr: |
615 |
+ goto out; |
616 |
+ } |
617 |
+ |
618 |
++static struct sk_buff *skb_set_peeked(struct sk_buff *skb) |
619 |
++{ |
620 |
++ struct sk_buff *nskb; |
621 |
++ |
622 |
++ if (skb->peeked) |
623 |
++ return skb; |
624 |
++ |
625 |
++ /* We have to unshare an skb before modifying it. */ |
626 |
++ if (!skb_shared(skb)) |
627 |
++ goto done; |
628 |
++ |
629 |
++ nskb = skb_clone(skb, GFP_ATOMIC); |
630 |
++ if (!nskb) |
631 |
++ return ERR_PTR(-ENOMEM); |
632 |
++ |
633 |
++ skb->prev->next = nskb; |
634 |
++ skb->next->prev = nskb; |
635 |
++ nskb->prev = skb->prev; |
636 |
++ nskb->next = skb->next; |
637 |
++ |
638 |
++ consume_skb(skb); |
639 |
++ skb = nskb; |
640 |
++ |
641 |
++done: |
642 |
++ skb->peeked = 1; |
643 |
++ |
644 |
++ return skb; |
645 |
++} |
646 |
++ |
647 |
+ /** |
648 |
+ * __skb_recv_datagram - Receive a datagram skbuff |
649 |
+ * @sk: socket |
650 |
+@@ -164,7 +193,9 @@ out_noerr: |
651 |
+ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, |
652 |
+ int *peeked, int *off, int *err) |
653 |
+ { |
654 |
++ struct sk_buff_head *queue = &sk->sk_receive_queue; |
655 |
+ struct sk_buff *skb, *last; |
656 |
++ unsigned long cpu_flags; |
657 |
+ long timeo; |
658 |
+ /* |
659 |
+ * Caller is allowed not to check sk->sk_err before skb_recv_datagram() |
660 |
+@@ -183,8 +214,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, |
661 |
+ * Look at current nfs client by the way... |
662 |
+ * However, this function was correct in any case. 8) |
663 |
+ */ |
664 |
+- unsigned long cpu_flags; |
665 |
+- struct sk_buff_head *queue = &sk->sk_receive_queue; |
666 |
+ int _off = *off; |
667 |
+ |
668 |
+ last = (struct sk_buff *)queue; |
669 |
+@@ -198,7 +227,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, |
670 |
+ _off -= skb->len; |
671 |
+ continue; |
672 |
+ } |
673 |
+- skb->peeked = 1; |
674 |
++ |
675 |
++ skb = skb_set_peeked(skb); |
676 |
++ error = PTR_ERR(skb); |
677 |
++ if (IS_ERR(skb)) |
678 |
++ goto unlock_err; |
679 |
++ |
680 |
+ atomic_inc(&skb->users); |
681 |
+ } else |
682 |
+ __skb_unlink(skb, queue); |
683 |
+@@ -222,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, |
684 |
+ |
685 |
+ return NULL; |
686 |
+ |
687 |
++unlock_err: |
688 |
++ spin_unlock_irqrestore(&queue->lock, cpu_flags); |
689 |
+ no_packet: |
690 |
+ *err = error; |
691 |
+ return NULL; |
692 |
+@@ -742,7 +778,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
693 |
+ if (likely(!sum)) { |
694 |
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
695 |
+ netdev_rx_csum_fault(skb->dev); |
696 |
+- skb->ip_summed = CHECKSUM_UNNECESSARY; |
697 |
++ if (!skb_shared(skb)) |
698 |
++ skb->ip_summed = CHECKSUM_UNNECESSARY; |
699 |
+ } |
700 |
+ return sum; |
701 |
+ } |
702 |
+diff --git a/net/core/dev.c b/net/core/dev.c |
703 |
+index 3ca487e14080..f991f5d3371d 100644 |
704 |
+--- a/net/core/dev.c |
705 |
++++ b/net/core/dev.c |
706 |
+@@ -3193,6 +3193,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, |
707 |
+ local_irq_save(flags); |
708 |
+ |
709 |
+ rps_lock(sd); |
710 |
++ if (!netif_running(skb->dev)) |
711 |
++ goto drop; |
712 |
+ qlen = skb_queue_len(&sd->input_pkt_queue); |
713 |
+ if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { |
714 |
+ if (skb_queue_len(&sd->input_pkt_queue)) { |
715 |
+@@ -3214,6 +3216,7 @@ enqueue: |
716 |
+ goto enqueue; |
717 |
+ } |
718 |
+ |
719 |
++drop: |
720 |
+ sd->dropped++; |
721 |
+ rps_unlock(sd); |
722 |
+ |
723 |
+@@ -3518,8 +3521,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) |
724 |
+ |
725 |
+ pt_prev = NULL; |
726 |
+ |
727 |
+- rcu_read_lock(); |
728 |
+- |
729 |
+ another_round: |
730 |
+ skb->skb_iif = skb->dev->ifindex; |
731 |
+ |
732 |
+@@ -3529,7 +3530,7 @@ another_round: |
733 |
+ skb->protocol == cpu_to_be16(ETH_P_8021AD)) { |
734 |
+ skb = skb_vlan_untag(skb); |
735 |
+ if (unlikely(!skb)) |
736 |
+- goto unlock; |
737 |
++ goto out; |
738 |
+ } |
739 |
+ |
740 |
+ #ifdef CONFIG_NET_CLS_ACT |
741 |
+@@ -3554,7 +3555,7 @@ skip_taps: |
742 |
+ #ifdef CONFIG_NET_CLS_ACT |
743 |
+ skb = handle_ing(skb, &pt_prev, &ret, orig_dev); |
744 |
+ if (!skb) |
745 |
+- goto unlock; |
746 |
++ goto out; |
747 |
+ ncls: |
748 |
+ #endif |
749 |
+ |
750 |
+@@ -3569,7 +3570,7 @@ ncls: |
751 |
+ if (vlan_do_receive(&skb)) |
752 |
+ goto another_round; |
753 |
+ else if (unlikely(!skb)) |
754 |
+- goto unlock; |
755 |
++ goto out; |
756 |
+ } |
757 |
+ |
758 |
+ rx_handler = rcu_dereference(skb->dev->rx_handler); |
759 |
+@@ -3581,7 +3582,7 @@ ncls: |
760 |
+ switch (rx_handler(&skb)) { |
761 |
+ case RX_HANDLER_CONSUMED: |
762 |
+ ret = NET_RX_SUCCESS; |
763 |
+- goto unlock; |
764 |
++ goto out; |
765 |
+ case RX_HANDLER_ANOTHER: |
766 |
+ goto another_round; |
767 |
+ case RX_HANDLER_EXACT: |
768 |
+@@ -3633,8 +3634,6 @@ drop: |
769 |
+ ret = NET_RX_DROP; |
770 |
+ } |
771 |
+ |
772 |
+-unlock: |
773 |
+- rcu_read_unlock(); |
774 |
+ out: |
775 |
+ return ret; |
776 |
+ } |
777 |
+@@ -3681,29 +3680,30 @@ static int __netif_receive_skb(struct sk_buff *skb) |
778 |
+ */ |
779 |
+ int netif_receive_skb(struct sk_buff *skb) |
780 |
+ { |
781 |
++ int ret; |
782 |
++ |
783 |
+ net_timestamp_check(netdev_tstamp_prequeue, skb); |
784 |
+ |
785 |
+ if (skb_defer_rx_timestamp(skb)) |
786 |
+ return NET_RX_SUCCESS; |
787 |
+ |
788 |
++ rcu_read_lock(); |
789 |
++ |
790 |
+ #ifdef CONFIG_RPS |
791 |
+ if (static_key_false(&rps_needed)) { |
792 |
+ struct rps_dev_flow voidflow, *rflow = &voidflow; |
793 |
+- int cpu, ret; |
794 |
+- |
795 |
+- rcu_read_lock(); |
796 |
+- |
797 |
+- cpu = get_rps_cpu(skb->dev, skb, &rflow); |
798 |
++ int cpu = get_rps_cpu(skb->dev, skb, &rflow); |
799 |
+ |
800 |
+ if (cpu >= 0) { |
801 |
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); |
802 |
+ rcu_read_unlock(); |
803 |
+ return ret; |
804 |
+ } |
805 |
+- rcu_read_unlock(); |
806 |
+ } |
807 |
+ #endif |
808 |
+- return __netif_receive_skb(skb); |
809 |
++ ret = __netif_receive_skb(skb); |
810 |
++ rcu_read_unlock(); |
811 |
++ return ret; |
812 |
+ } |
813 |
+ EXPORT_SYMBOL(netif_receive_skb); |
814 |
+ |
815 |
+@@ -4113,8 +4113,10 @@ static int process_backlog(struct napi_struct *napi, int quota) |
816 |
+ unsigned int qlen; |
817 |
+ |
818 |
+ while ((skb = __skb_dequeue(&sd->process_queue))) { |
819 |
++ rcu_read_lock(); |
820 |
+ local_irq_enable(); |
821 |
+ __netif_receive_skb(skb); |
822 |
++ rcu_read_unlock(); |
823 |
+ local_irq_disable(); |
824 |
+ input_queue_head_incr(sd); |
825 |
+ if (++work >= quota) { |
826 |
+@@ -5302,6 +5304,7 @@ static void rollback_registered_many(struct list_head *head) |
827 |
+ unlist_netdevice(dev); |
828 |
+ |
829 |
+ dev->reg_state = NETREG_UNREGISTERING; |
830 |
++ on_each_cpu(flush_backlog, dev, 1); |
831 |
+ } |
832 |
+ |
833 |
+ synchronize_net(); |
834 |
+@@ -5559,7 +5562,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev) |
835 |
+ struct netdev_queue *tx; |
836 |
+ size_t sz = count * sizeof(*tx); |
837 |
+ |
838 |
+- BUG_ON(count < 1 || count > 0xffff); |
839 |
++ if (count < 1 || count > 0xffff) |
840 |
++ return -EINVAL; |
841 |
+ |
842 |
+ tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); |
843 |
+ if (!tx) { |
844 |
+@@ -5917,8 +5921,6 @@ void netdev_run_todo(void) |
845 |
+ |
846 |
+ dev->reg_state = NETREG_UNREGISTERED; |
847 |
+ |
848 |
+- on_each_cpu(flush_backlog, dev, 1); |
849 |
+- |
850 |
+ netdev_wait_allrefs(dev); |
851 |
+ |
852 |
+ /* paranoia */ |
853 |
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c |
854 |
+index a104ba3c5768..cea47344d535 100644 |
855 |
+--- a/net/core/pktgen.c |
856 |
++++ b/net/core/pktgen.c |
857 |
+@@ -3423,8 +3423,10 @@ static int pktgen_thread_worker(void *arg) |
858 |
+ pktgen_rem_thread(t); |
859 |
+ |
860 |
+ /* Wait for kthread_stop */ |
861 |
+- while (!kthread_should_stop()) { |
862 |
++ for (;;) { |
863 |
+ set_current_state(TASK_INTERRUPTIBLE); |
864 |
++ if (kthread_should_stop()) |
865 |
++ break; |
866 |
+ schedule(); |
867 |
+ } |
868 |
+ __set_current_state(TASK_RUNNING); |
869 |
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
870 |
+index 76cc27f3f991..fd3a16e45dd9 100644 |
871 |
+--- a/net/core/rtnetlink.c |
872 |
++++ b/net/core/rtnetlink.c |
873 |
+@@ -1197,10 +1197,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { |
874 |
+ [IFLA_INFO_DATA] = { .type = NLA_NESTED }, |
875 |
+ }; |
876 |
+ |
877 |
+-static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { |
878 |
+- [IFLA_VF_INFO] = { .type = NLA_NESTED }, |
879 |
+-}; |
880 |
+- |
881 |
+ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { |
882 |
+ [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, |
883 |
+ [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, |
884 |
+@@ -1274,67 +1270,66 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) |
885 |
+ return 0; |
886 |
+ } |
887 |
+ |
888 |
+-static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) |
889 |
++static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
890 |
+ { |
891 |
+- int rem, err = -EINVAL; |
892 |
+- struct nlattr *vf; |
893 |
+ const struct net_device_ops *ops = dev->netdev_ops; |
894 |
++ int err = -EINVAL; |
895 |
+ |
896 |
+- nla_for_each_nested(vf, attr, rem) { |
897 |
+- switch (nla_type(vf)) { |
898 |
+- case IFLA_VF_MAC: { |
899 |
+- struct ifla_vf_mac *ivm; |
900 |
+- ivm = nla_data(vf); |
901 |
+- err = -EOPNOTSUPP; |
902 |
+- if (ops->ndo_set_vf_mac) |
903 |
+- err = ops->ndo_set_vf_mac(dev, ivm->vf, |
904 |
+- ivm->mac); |
905 |
+- break; |
906 |
+- } |
907 |
+- case IFLA_VF_VLAN: { |
908 |
+- struct ifla_vf_vlan *ivv; |
909 |
+- ivv = nla_data(vf); |
910 |
+- err = -EOPNOTSUPP; |
911 |
+- if (ops->ndo_set_vf_vlan) |
912 |
+- err = ops->ndo_set_vf_vlan(dev, ivv->vf, |
913 |
+- ivv->vlan, |
914 |
+- ivv->qos); |
915 |
+- break; |
916 |
+- } |
917 |
+- case IFLA_VF_TX_RATE: { |
918 |
+- struct ifla_vf_tx_rate *ivt; |
919 |
+- ivt = nla_data(vf); |
920 |
+- err = -EOPNOTSUPP; |
921 |
+- if (ops->ndo_set_vf_tx_rate) |
922 |
+- err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, |
923 |
+- ivt->rate); |
924 |
+- break; |
925 |
+- } |
926 |
+- case IFLA_VF_SPOOFCHK: { |
927 |
+- struct ifla_vf_spoofchk *ivs; |
928 |
+- ivs = nla_data(vf); |
929 |
+- err = -EOPNOTSUPP; |
930 |
+- if (ops->ndo_set_vf_spoofchk) |
931 |
+- err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, |
932 |
+- ivs->setting); |
933 |
+- break; |
934 |
+- } |
935 |
+- case IFLA_VF_LINK_STATE: { |
936 |
+- struct ifla_vf_link_state *ivl; |
937 |
+- ivl = nla_data(vf); |
938 |
+- err = -EOPNOTSUPP; |
939 |
+- if (ops->ndo_set_vf_link_state) |
940 |
+- err = ops->ndo_set_vf_link_state(dev, ivl->vf, |
941 |
+- ivl->link_state); |
942 |
+- break; |
943 |
+- } |
944 |
+- default: |
945 |
+- err = -EINVAL; |
946 |
+- break; |
947 |
+- } |
948 |
+- if (err) |
949 |
+- break; |
950 |
++ if (tb[IFLA_VF_MAC]) { |
951 |
++ struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); |
952 |
++ |
953 |
++ err = -EOPNOTSUPP; |
954 |
++ if (ops->ndo_set_vf_mac) |
955 |
++ err = ops->ndo_set_vf_mac(dev, ivm->vf, |
956 |
++ ivm->mac); |
957 |
++ if (err < 0) |
958 |
++ return err; |
959 |
++ } |
960 |
++ |
961 |
++ if (tb[IFLA_VF_VLAN]) { |
962 |
++ struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); |
963 |
++ |
964 |
++ err = -EOPNOTSUPP; |
965 |
++ if (ops->ndo_set_vf_vlan) |
966 |
++ err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, |
967 |
++ ivv->qos); |
968 |
++ if (err < 0) |
969 |
++ return err; |
970 |
++ } |
971 |
++ |
972 |
++ if (tb[IFLA_VF_TX_RATE]) { |
973 |
++ struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); |
974 |
++ |
975 |
++ err = -EOPNOTSUPP; |
976 |
++ if (ops->ndo_set_vf_tx_rate) |
977 |
++ err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, |
978 |
++ ivt->rate); |
979 |
++ if (err < 0) |
980 |
++ return err; |
981 |
+ } |
982 |
++ |
983 |
++ if (tb[IFLA_VF_SPOOFCHK]) { |
984 |
++ struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); |
985 |
++ |
986 |
++ err = -EOPNOTSUPP; |
987 |
++ if (ops->ndo_set_vf_spoofchk) |
988 |
++ err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, |
989 |
++ ivs->setting); |
990 |
++ if (err < 0) |
991 |
++ return err; |
992 |
++ } |
993 |
++ |
994 |
++ if (tb[IFLA_VF_LINK_STATE]) { |
995 |
++ struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); |
996 |
++ |
997 |
++ err = -EOPNOTSUPP; |
998 |
++ if (ops->ndo_set_vf_link_state) |
999 |
++ err = ops->ndo_set_vf_link_state(dev, ivl->vf, |
1000 |
++ ivl->link_state); |
1001 |
++ if (err < 0) |
1002 |
++ return err; |
1003 |
++ } |
1004 |
++ |
1005 |
+ return err; |
1006 |
+ } |
1007 |
+ |
1008 |
+@@ -1517,14 +1512,21 @@ static int do_setlink(const struct sk_buff *skb, |
1009 |
+ } |
1010 |
+ |
1011 |
+ if (tb[IFLA_VFINFO_LIST]) { |
1012 |
++ struct nlattr *vfinfo[IFLA_VF_MAX + 1]; |
1013 |
+ struct nlattr *attr; |
1014 |
+ int rem; |
1015 |
++ |
1016 |
+ nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { |
1017 |
+- if (nla_type(attr) != IFLA_VF_INFO) { |
1018 |
++ if (nla_type(attr) != IFLA_VF_INFO || |
1019 |
++ nla_len(attr) < NLA_HDRLEN) { |
1020 |
+ err = -EINVAL; |
1021 |
+ goto errout; |
1022 |
+ } |
1023 |
+- err = do_setvfinfo(dev, attr); |
1024 |
++ err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr, |
1025 |
++ ifla_vf_policy); |
1026 |
++ if (err < 0) |
1027 |
++ goto errout; |
1028 |
++ err = do_setvfinfo(dev, vfinfo); |
1029 |
+ if (err < 0) |
1030 |
+ goto errout; |
1031 |
+ modified = 1; |
1032 |
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c |
1033 |
+index 5f3dc1df04bf..291b0821d1ac 100644 |
1034 |
+--- a/net/ipv4/datagram.c |
1035 |
++++ b/net/ipv4/datagram.c |
1036 |
+@@ -20,7 +20,7 @@ |
1037 |
+ #include <net/route.h> |
1038 |
+ #include <net/tcp_states.h> |
1039 |
+ |
1040 |
+-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1041 |
++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1042 |
+ { |
1043 |
+ struct inet_sock *inet = inet_sk(sk); |
1044 |
+ struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; |
1045 |
+@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1046 |
+ |
1047 |
+ sk_dst_reset(sk); |
1048 |
+ |
1049 |
+- lock_sock(sk); |
1050 |
+- |
1051 |
+ oif = sk->sk_bound_dev_if; |
1052 |
+ saddr = inet->inet_saddr; |
1053 |
+ if (ipv4_is_multicast(usin->sin_addr.s_addr)) { |
1054 |
+@@ -81,9 +79,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1055 |
+ sk_dst_set(sk, &rt->dst); |
1056 |
+ err = 0; |
1057 |
+ out: |
1058 |
+- release_sock(sk); |
1059 |
+ return err; |
1060 |
+ } |
1061 |
++EXPORT_SYMBOL(__ip4_datagram_connect); |
1062 |
++ |
1063 |
++int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1064 |
++{ |
1065 |
++ int res; |
1066 |
++ |
1067 |
++ lock_sock(sk); |
1068 |
++ res = __ip4_datagram_connect(sk, uaddr, addr_len); |
1069 |
++ release_sock(sk); |
1070 |
++ return res; |
1071 |
++} |
1072 |
+ EXPORT_SYMBOL(ip4_datagram_connect); |
1073 |
+ |
1074 |
+ /* Because UDP xmit path can manipulate sk_dst_cache without holding |
1075 |
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c |
1076 |
+index 4c1884fed548..4d98a6b80b04 100644 |
1077 |
+--- a/net/ipv4/ip_fragment.c |
1078 |
++++ b/net/ipv4/ip_fragment.c |
1079 |
+@@ -356,7 +356,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
1080 |
+ ihl = ip_hdrlen(skb); |
1081 |
+ |
1082 |
+ /* Determine the position of this fragment. */ |
1083 |
+- end = offset + skb->len - ihl; |
1084 |
++ end = offset + skb->len - skb_network_offset(skb) - ihl; |
1085 |
+ err = -EINVAL; |
1086 |
+ |
1087 |
+ /* Is this the final fragment? */ |
1088 |
+@@ -386,7 +386,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
1089 |
+ goto err; |
1090 |
+ |
1091 |
+ err = -ENOMEM; |
1092 |
+- if (pskb_pull(skb, ihl) == NULL) |
1093 |
++ if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) |
1094 |
+ goto err; |
1095 |
+ |
1096 |
+ err = pskb_trim_rcsum(skb, end - offset); |
1097 |
+@@ -627,6 +627,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
1098 |
+ iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; |
1099 |
+ iph->tot_len = htons(len); |
1100 |
+ iph->tos |= ecn; |
1101 |
++ |
1102 |
++ ip_send_check(iph); |
1103 |
++ |
1104 |
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); |
1105 |
+ qp->q.fragments = NULL; |
1106 |
+ qp->q.fragments_tail = NULL; |
1107 |
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
1108 |
+index edd5a8171357..6913e2fdc12c 100644 |
1109 |
+--- a/net/ipv4/ip_tunnel.c |
1110 |
++++ b/net/ipv4/ip_tunnel.c |
1111 |
+@@ -476,7 +476,8 @@ drop: |
1112 |
+ EXPORT_SYMBOL_GPL(ip_tunnel_rcv); |
1113 |
+ |
1114 |
+ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, |
1115 |
+- struct rtable *rt, __be16 df) |
1116 |
++ struct rtable *rt, __be16 df, |
1117 |
++ const struct iphdr *inner_iph) |
1118 |
+ { |
1119 |
+ struct ip_tunnel *tunnel = netdev_priv(dev); |
1120 |
+ int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; |
1121 |
+@@ -493,7 +494,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, |
1122 |
+ |
1123 |
+ if (skb->protocol == htons(ETH_P_IP)) { |
1124 |
+ if (!skb_is_gso(skb) && |
1125 |
+- (df & htons(IP_DF)) && mtu < pkt_size) { |
1126 |
++ (inner_iph->frag_off & htons(IP_DF)) && |
1127 |
++ mtu < pkt_size) { |
1128 |
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
1129 |
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
1130 |
+ return -E2BIG; |
1131 |
+@@ -611,7 +613,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
1132 |
+ goto tx_error; |
1133 |
+ } |
1134 |
+ |
1135 |
+- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { |
1136 |
++ if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { |
1137 |
+ ip_rt_put(rt); |
1138 |
+ goto tx_error; |
1139 |
+ } |
1140 |
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c |
1141 |
+index 9f9ad99fcfdd..da44cb4f51d1 100644 |
1142 |
+--- a/net/ipv6/datagram.c |
1143 |
++++ b/net/ipv6/datagram.c |
1144 |
+@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) |
1145 |
+ return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); |
1146 |
+ } |
1147 |
+ |
1148 |
+-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1149 |
++static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1150 |
+ { |
1151 |
+ struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
1152 |
+ struct inet_sock *inet = inet_sk(sk); |
1153 |
+@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1154 |
+ if (usin->sin6_family == AF_INET) { |
1155 |
+ if (__ipv6_only_sock(sk)) |
1156 |
+ return -EAFNOSUPPORT; |
1157 |
+- err = ip4_datagram_connect(sk, uaddr, addr_len); |
1158 |
++ err = __ip4_datagram_connect(sk, uaddr, addr_len); |
1159 |
+ goto ipv4_connected; |
1160 |
+ } |
1161 |
+ |
1162 |
+@@ -99,9 +99,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1163 |
+ sin.sin_addr.s_addr = daddr->s6_addr32[3]; |
1164 |
+ sin.sin_port = usin->sin6_port; |
1165 |
+ |
1166 |
+- err = ip4_datagram_connect(sk, |
1167 |
+- (struct sockaddr *) &sin, |
1168 |
+- sizeof(sin)); |
1169 |
++ err = __ip4_datagram_connect(sk, |
1170 |
++ (struct sockaddr *) &sin, |
1171 |
++ sizeof(sin)); |
1172 |
+ |
1173 |
+ ipv4_connected: |
1174 |
+ if (err) |
1175 |
+@@ -204,6 +204,16 @@ out: |
1176 |
+ fl6_sock_release(flowlabel); |
1177 |
+ return err; |
1178 |
+ } |
1179 |
++ |
1180 |
++int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1181 |
++{ |
1182 |
++ int res; |
1183 |
++ |
1184 |
++ lock_sock(sk); |
1185 |
++ res = __ip6_datagram_connect(sk, uaddr, addr_len); |
1186 |
++ release_sock(sk); |
1187 |
++ return res; |
1188 |
++} |
1189 |
+ EXPORT_SYMBOL_GPL(ip6_datagram_connect); |
1190 |
+ |
1191 |
+ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, |
1192 |
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c |
1193 |
+index 51d54dc376f3..05c94d9c3776 100644 |
1194 |
+--- a/net/ipv6/ip6_input.c |
1195 |
++++ b/net/ipv6/ip6_input.c |
1196 |
+@@ -329,10 +329,10 @@ int ip6_mc_input(struct sk_buff *skb) |
1197 |
+ if (offset < 0) |
1198 |
+ goto out; |
1199 |
+ |
1200 |
+- if (!ipv6_is_mld(skb, nexthdr, offset)) |
1201 |
+- goto out; |
1202 |
++ if (ipv6_is_mld(skb, nexthdr, offset)) |
1203 |
++ deliver = true; |
1204 |
+ |
1205 |
+- deliver = true; |
1206 |
++ goto out; |
1207 |
+ } |
1208 |
+ /* unknown RA - process it normally */ |
1209 |
+ } |
1210 |
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c |
1211 |
+index 5d892febd64c..cf9bfc5ddb34 100644 |
1212 |
+--- a/net/netfilter/nf_conntrack_core.c |
1213 |
++++ b/net/netfilter/nf_conntrack_core.c |
1214 |
+@@ -318,6 +318,21 @@ static void death_by_timeout(unsigned long ul_conntrack) |
1215 |
+ nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); |
1216 |
+ } |
1217 |
+ |
1218 |
++static inline bool |
1219 |
++nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, |
1220 |
++ const struct nf_conntrack_tuple *tuple, |
1221 |
++ u16 zone) |
1222 |
++{ |
1223 |
++ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); |
1224 |
++ |
1225 |
++ /* A conntrack can be recreated with the equal tuple, |
1226 |
++ * so we need to check that the conntrack is confirmed |
1227 |
++ */ |
1228 |
++ return nf_ct_tuple_equal(tuple, &h->tuple) && |
1229 |
++ nf_ct_zone(ct) == zone && |
1230 |
++ nf_ct_is_confirmed(ct); |
1231 |
++} |
1232 |
++ |
1233 |
+ /* |
1234 |
+ * Warning : |
1235 |
+ * - Caller must take a reference on returned object |
1236 |
+@@ -339,8 +354,7 @@ ____nf_conntrack_find(struct net *net, u16 zone, |
1237 |
+ local_bh_disable(); |
1238 |
+ begin: |
1239 |
+ hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { |
1240 |
+- if (nf_ct_tuple_equal(tuple, &h->tuple) && |
1241 |
+- nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { |
1242 |
++ if (nf_ct_key_equal(h, tuple, zone)) { |
1243 |
+ NF_CT_STAT_INC(net, found); |
1244 |
+ local_bh_enable(); |
1245 |
+ return h; |
1246 |
+@@ -387,8 +401,7 @@ begin: |
1247 |
+ !atomic_inc_not_zero(&ct->ct_general.use))) |
1248 |
+ h = NULL; |
1249 |
+ else { |
1250 |
+- if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || |
1251 |
+- nf_ct_zone(ct) != zone)) { |
1252 |
++ if (unlikely(!nf_ct_key_equal(h, tuple, zone))) { |
1253 |
+ nf_ct_put(ct); |
1254 |
+ goto begin; |
1255 |
+ } |
1256 |
+@@ -450,7 +463,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) |
1257 |
+ goto out; |
1258 |
+ |
1259 |
+ add_timer(&ct->timeout); |
1260 |
+- nf_conntrack_get(&ct->ct_general); |
1261 |
++ smp_wmb(); |
1262 |
++ /* The caller holds a reference to this object */ |
1263 |
++ atomic_set(&ct->ct_general.use, 2); |
1264 |
+ __nf_conntrack_hash_insert(ct, hash, repl_hash); |
1265 |
+ NF_CT_STAT_INC(net, insert); |
1266 |
+ spin_unlock_bh(&nf_conntrack_lock); |
1267 |
+@@ -464,6 +479,21 @@ out: |
1268 |
+ } |
1269 |
+ EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); |
1270 |
+ |
1271 |
++/* deletion from this larval template list happens via nf_ct_put() */ |
1272 |
++void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl) |
1273 |
++{ |
1274 |
++ __set_bit(IPS_TEMPLATE_BIT, &tmpl->status); |
1275 |
++ __set_bit(IPS_CONFIRMED_BIT, &tmpl->status); |
1276 |
++ nf_conntrack_get(&tmpl->ct_general); |
1277 |
++ |
1278 |
++ spin_lock_bh(&nf_conntrack_lock); |
1279 |
++ /* Overload tuple linked list to put us in template list. */ |
1280 |
++ hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
1281 |
++ &net->ct.tmpl); |
1282 |
++ spin_unlock_bh(&nf_conntrack_lock); |
1283 |
++} |
1284 |
++EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert); |
1285 |
++ |
1286 |
+ /* Confirm a connection given skb; places it in hash table */ |
1287 |
+ int |
1288 |
+ __nf_conntrack_confirm(struct sk_buff *skb) |
1289 |
+@@ -735,11 +765,10 @@ __nf_conntrack_alloc(struct net *net, u16 zone, |
1290 |
+ nf_ct_zone->id = zone; |
1291 |
+ } |
1292 |
+ #endif |
1293 |
+- /* |
1294 |
+- * changes to lookup keys must be done before setting refcnt to 1 |
1295 |
++ /* Because we use RCU lookups, we set ct_general.use to zero before |
1296 |
++ * this is inserted in any list. |
1297 |
+ */ |
1298 |
+- smp_wmb(); |
1299 |
+- atomic_set(&ct->ct_general.use, 1); |
1300 |
++ atomic_set(&ct->ct_general.use, 0); |
1301 |
+ return ct; |
1302 |
+ |
1303 |
+ #ifdef CONFIG_NF_CONNTRACK_ZONES |
1304 |
+@@ -763,6 +792,11 @@ void nf_conntrack_free(struct nf_conn *ct) |
1305 |
+ { |
1306 |
+ struct net *net = nf_ct_net(ct); |
1307 |
+ |
1308 |
++ /* A freed object has refcnt == 0, that's |
1309 |
++ * the golden rule for SLAB_DESTROY_BY_RCU |
1310 |
++ */ |
1311 |
++ NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0); |
1312 |
++ |
1313 |
+ nf_ct_ext_destroy(ct); |
1314 |
+ atomic_dec(&net->ct.count); |
1315 |
+ nf_ct_ext_free(ct); |
1316 |
+@@ -857,6 +891,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, |
1317 |
+ NF_CT_STAT_INC(net, new); |
1318 |
+ } |
1319 |
+ |
1320 |
++ /* Now it is inserted into the unconfirmed list, bump refcount */ |
1321 |
++ nf_conntrack_get(&ct->ct_general); |
1322 |
++ |
1323 |
+ /* Overload tuple linked list to put us in unconfirmed list. */ |
1324 |
+ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
1325 |
+ &net->ct.unconfirmed); |
1326 |
+diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c |
1327 |
+index cdf4567ba9b3..bf6e9a144dac 100644 |
1328 |
+--- a/net/netfilter/nf_synproxy_core.c |
1329 |
++++ b/net/netfilter/nf_synproxy_core.c |
1330 |
+@@ -362,9 +362,8 @@ static int __net_init synproxy_net_init(struct net *net) |
1331 |
+ goto err2; |
1332 |
+ if (!nfct_synproxy_ext_add(ct)) |
1333 |
+ goto err2; |
1334 |
+- __set_bit(IPS_TEMPLATE_BIT, &ct->status); |
1335 |
+- __set_bit(IPS_CONFIRMED_BIT, &ct->status); |
1336 |
+ |
1337 |
++ nf_conntrack_tmpl_insert(net, ct); |
1338 |
+ snet->tmpl = ct; |
1339 |
+ |
1340 |
+ snet->stats = alloc_percpu(struct synproxy_stats); |
1341 |
+@@ -389,7 +388,7 @@ static void __net_exit synproxy_net_exit(struct net *net) |
1342 |
+ { |
1343 |
+ struct synproxy_net *snet = synproxy_pernet(net); |
1344 |
+ |
1345 |
+- nf_conntrack_free(snet->tmpl); |
1346 |
++ nf_ct_put(snet->tmpl); |
1347 |
+ synproxy_proc_exit(net); |
1348 |
+ free_percpu(snet->stats); |
1349 |
+ } |
1350 |
+diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c |
1351 |
+index da35ac06a975..889960193544 100644 |
1352 |
+--- a/net/netfilter/xt_CT.c |
1353 |
++++ b/net/netfilter/xt_CT.c |
1354 |
+@@ -226,12 +226,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, |
1355 |
+ goto err3; |
1356 |
+ } |
1357 |
+ |
1358 |
+- __set_bit(IPS_TEMPLATE_BIT, &ct->status); |
1359 |
+- __set_bit(IPS_CONFIRMED_BIT, &ct->status); |
1360 |
+- |
1361 |
+- /* Overload tuple linked list to put us in template list. */ |
1362 |
+- hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
1363 |
+- &par->net->ct.tmpl); |
1364 |
++ nf_conntrack_tmpl_insert(par->net, ct); |
1365 |
+ out: |
1366 |
+ info->ct = ct; |
1367 |
+ return 0; |
1368 |
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
1369 |
+index 5a75a1eb3ae7..22e0f478a2a3 100644 |
1370 |
+--- a/net/netlink/af_netlink.c |
1371 |
++++ b/net/netlink/af_netlink.c |
1372 |
+@@ -342,25 +342,52 @@ err1: |
1373 |
+ return NULL; |
1374 |
+ } |
1375 |
+ |
1376 |
++ |
1377 |
++static void |
1378 |
++__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, |
1379 |
++ unsigned int order) |
1380 |
++{ |
1381 |
++ struct netlink_sock *nlk = nlk_sk(sk); |
1382 |
++ struct sk_buff_head *queue; |
1383 |
++ struct netlink_ring *ring; |
1384 |
++ |
1385 |
++ queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; |
1386 |
++ ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; |
1387 |
++ |
1388 |
++ spin_lock_bh(&queue->lock); |
1389 |
++ |
1390 |
++ ring->frame_max = req->nm_frame_nr - 1; |
1391 |
++ ring->head = 0; |
1392 |
++ ring->frame_size = req->nm_frame_size; |
1393 |
++ ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; |
1394 |
++ |
1395 |
++ swap(ring->pg_vec_len, req->nm_block_nr); |
1396 |
++ swap(ring->pg_vec_order, order); |
1397 |
++ swap(ring->pg_vec, pg_vec); |
1398 |
++ |
1399 |
++ __skb_queue_purge(queue); |
1400 |
++ spin_unlock_bh(&queue->lock); |
1401 |
++ |
1402 |
++ WARN_ON(atomic_read(&nlk->mapped)); |
1403 |
++ |
1404 |
++ if (pg_vec) |
1405 |
++ free_pg_vec(pg_vec, order, req->nm_block_nr); |
1406 |
++} |
1407 |
++ |
1408 |
+ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, |
1409 |
+- bool closing, bool tx_ring) |
1410 |
++ bool tx_ring) |
1411 |
+ { |
1412 |
+ struct netlink_sock *nlk = nlk_sk(sk); |
1413 |
+ struct netlink_ring *ring; |
1414 |
+- struct sk_buff_head *queue; |
1415 |
+ void **pg_vec = NULL; |
1416 |
+ unsigned int order = 0; |
1417 |
+- int err; |
1418 |
+ |
1419 |
+ ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; |
1420 |
+- queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; |
1421 |
+ |
1422 |
+- if (!closing) { |
1423 |
+- if (atomic_read(&nlk->mapped)) |
1424 |
+- return -EBUSY; |
1425 |
+- if (atomic_read(&ring->pending)) |
1426 |
+- return -EBUSY; |
1427 |
+- } |
1428 |
++ if (atomic_read(&nlk->mapped)) |
1429 |
++ return -EBUSY; |
1430 |
++ if (atomic_read(&ring->pending)) |
1431 |
++ return -EBUSY; |
1432 |
+ |
1433 |
+ if (req->nm_block_nr) { |
1434 |
+ if (ring->pg_vec != NULL) |
1435 |
+@@ -392,31 +419,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, |
1436 |
+ return -EINVAL; |
1437 |
+ } |
1438 |
+ |
1439 |
+- err = -EBUSY; |
1440 |
+ mutex_lock(&nlk->pg_vec_lock); |
1441 |
+- if (closing || atomic_read(&nlk->mapped) == 0) { |
1442 |
+- err = 0; |
1443 |
+- spin_lock_bh(&queue->lock); |
1444 |
+- |
1445 |
+- ring->frame_max = req->nm_frame_nr - 1; |
1446 |
+- ring->head = 0; |
1447 |
+- ring->frame_size = req->nm_frame_size; |
1448 |
+- ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; |
1449 |
+- |
1450 |
+- swap(ring->pg_vec_len, req->nm_block_nr); |
1451 |
+- swap(ring->pg_vec_order, order); |
1452 |
+- swap(ring->pg_vec, pg_vec); |
1453 |
+- |
1454 |
+- __skb_queue_purge(queue); |
1455 |
+- spin_unlock_bh(&queue->lock); |
1456 |
+- |
1457 |
+- WARN_ON(atomic_read(&nlk->mapped)); |
1458 |
++ if (atomic_read(&nlk->mapped) == 0) { |
1459 |
++ __netlink_set_ring(sk, req, tx_ring, pg_vec, order); |
1460 |
++ mutex_unlock(&nlk->pg_vec_lock); |
1461 |
++ return 0; |
1462 |
+ } |
1463 |
++ |
1464 |
+ mutex_unlock(&nlk->pg_vec_lock); |
1465 |
+ |
1466 |
+ if (pg_vec) |
1467 |
+ free_pg_vec(pg_vec, order, req->nm_block_nr); |
1468 |
+- return err; |
1469 |
++ |
1470 |
++ return -EBUSY; |
1471 |
+ } |
1472 |
+ |
1473 |
+ static void netlink_mm_open(struct vm_area_struct *vma) |
1474 |
+@@ -885,10 +900,10 @@ static void netlink_sock_destruct(struct sock *sk) |
1475 |
+ |
1476 |
+ memset(&req, 0, sizeof(req)); |
1477 |
+ if (nlk->rx_ring.pg_vec) |
1478 |
+- netlink_set_ring(sk, &req, true, false); |
1479 |
++ __netlink_set_ring(sk, &req, false, NULL, 0); |
1480 |
+ memset(&req, 0, sizeof(req)); |
1481 |
+ if (nlk->tx_ring.pg_vec) |
1482 |
+- netlink_set_ring(sk, &req, true, true); |
1483 |
++ __netlink_set_ring(sk, &req, true, NULL, 0); |
1484 |
+ } |
1485 |
+ #endif /* CONFIG_NETLINK_MMAP */ |
1486 |
+ |
1487 |
+@@ -2182,7 +2197,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, |
1488 |
+ return -EINVAL; |
1489 |
+ if (copy_from_user(&req, optval, sizeof(req))) |
1490 |
+ return -EFAULT; |
1491 |
+- err = netlink_set_ring(sk, &req, false, |
1492 |
++ err = netlink_set_ring(sk, &req, |
1493 |
+ optname == NETLINK_TX_RING); |
1494 |
+ break; |
1495 |
+ } |
1496 |
+diff --git a/net/rds/info.c b/net/rds/info.c |
1497 |
+index 9a6b4f66187c..140a44a5f7b7 100644 |
1498 |
+--- a/net/rds/info.c |
1499 |
++++ b/net/rds/info.c |
1500 |
+@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, |
1501 |
+ |
1502 |
+ /* check for all kinds of wrapping and the like */ |
1503 |
+ start = (unsigned long)optval; |
1504 |
+- if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { |
1505 |
++ if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) { |
1506 |
+ ret = -EINVAL; |
1507 |
+ goto out; |
1508 |
+ } |
1509 |
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c |
1510 |
+index dffdbeac18ca..d1233088f953 100644 |
1511 |
+--- a/net/tipc/socket.c |
1512 |
++++ b/net/tipc/socket.c |
1513 |
+@@ -1607,6 +1607,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) |
1514 |
+ res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); |
1515 |
+ if (res) |
1516 |
+ goto exit; |
1517 |
++ security_sk_clone(sock->sk, new_sock->sk); |
1518 |
+ |
1519 |
+ new_sk = new_sock->sk; |
1520 |
+ new_tsock = tipc_sk(new_sk); |