Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.10 commit in: /
Date: Mon, 28 Jul 2014 19:41:40
Message-Id: 1406576372.bb6262518ee99eb5e444a8cd8c6067fe52f55864.mpagano@gentoo
1 commit: bb6262518ee99eb5e444a8cd8c6067fe52f55864
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jul 28 19:39:32 2014 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Jul 28 19:39:32 2014 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=bb626251
7
8 Linux patch 3.10.50
9
10 ---
11 0000_README | 4 +
12 1049_linux-3.10.50.patch | 1637 ++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 1641 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index db70185..e86111a 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -238,6 +238,10 @@ Patch: 1048_linux-3.10.49.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.10.49
22
23 +Patch: 1049_linux-3.10.50.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.10.50
26 +
27 Patch: 1500_XATTR_USER_PREFIX.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
29 Desc: Support for namespace user.pax.* on tmpfs.
30
31 diff --git a/1049_linux-3.10.50.patch b/1049_linux-3.10.50.patch
32 new file mode 100644
33 index 0000000..643e891
34 --- /dev/null
35 +++ b/1049_linux-3.10.50.patch
36 @@ -0,0 +1,1637 @@
37 +diff --git a/Makefile b/Makefile
38 +index b8b8d33eab55..8d891c66803c 100644
39 +--- a/Makefile
40 ++++ b/Makefile
41 +@@ -1,6 +1,6 @@
42 + VERSION = 3
43 + PATCHLEVEL = 10
44 +-SUBLEVEL = 49
45 ++SUBLEVEL = 50
46 + EXTRAVERSION =
47 + NAME = TOSSUG Baby Fish
48 +
49 +diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
50 +index 30333cec0fef..ef9d79a3db25 100644
51 +--- a/arch/arc/include/uapi/asm/ptrace.h
52 ++++ b/arch/arc/include/uapi/asm/ptrace.h
53 +@@ -11,6 +11,7 @@
54 + #ifndef _UAPI__ASM_ARC_PTRACE_H
55 + #define _UAPI__ASM_ARC_PTRACE_H
56 +
57 ++#define PTRACE_GET_THREAD_AREA 25
58 +
59 + #ifndef __ASSEMBLY__
60 + /*
61 +diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
62 +index 0851604bb9cd..f8a36ed9e0d5 100644
63 +--- a/arch/arc/kernel/ptrace.c
64 ++++ b/arch/arc/kernel/ptrace.c
65 +@@ -136,6 +136,10 @@ long arch_ptrace(struct task_struct *child, long request,
66 + pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
67 +
68 + switch (request) {
69 ++ case PTRACE_GET_THREAD_AREA:
70 ++ ret = put_user(task_thread_info(child)->thr_ptr,
71 ++ (unsigned long __user *)data);
72 ++ break;
73 + default:
74 + ret = ptrace_request(child, request, addr, data);
75 + break;
76 +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
77 +index 18a9f5ef643a..d41951246cd6 100644
78 +--- a/arch/arm/Kconfig
79 ++++ b/arch/arm/Kconfig
80 +@@ -4,6 +4,7 @@ config ARM
81 + select ARCH_BINFMT_ELF_RANDOMIZE_PIE
82 + select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
83 + select ARCH_HAVE_CUSTOM_GPIO_H
84 ++ select ARCH_SUPPORTS_ATOMIC_RMW
85 + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
86 + select ARCH_WANT_IPC_PARSE_VERSION
87 + select BUILDTIME_EXTABLE_SORT if MMU
88 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
89 +index 56b3f6d447ae..0677ff4814fa 100644
90 +--- a/arch/arm64/Kconfig
91 ++++ b/arch/arm64/Kconfig
92 +@@ -1,6 +1,7 @@
93 + config ARM64
94 + def_bool y
95 + select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
96 ++ select ARCH_SUPPORTS_ATOMIC_RMW
97 + select ARCH_WANT_OPTIONAL_GPIOLIB
98 + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
99 + select ARCH_WANT_FRAME_POINTERS
100 +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
101 +index fe404e77246e..7f656f119ea6 100644
102 +--- a/arch/powerpc/Kconfig
103 ++++ b/arch/powerpc/Kconfig
104 +@@ -138,6 +138,7 @@ config PPC
105 + select ARCH_USE_BUILTIN_BSWAP
106 + select OLD_SIGSUSPEND
107 + select OLD_SIGACTION if PPC32
108 ++ select ARCH_SUPPORTS_ATOMIC_RMW
109 +
110 + config EARLY_PRINTK
111 + bool
112 +diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
113 +index 2668b3142fa2..03a1bc3c3dde 100644
114 +--- a/arch/sparc/Kconfig
115 ++++ b/arch/sparc/Kconfig
116 +@@ -77,6 +77,7 @@ config SPARC64
117 + select ARCH_HAVE_NMI_SAFE_CMPXCHG
118 + select HAVE_C_RECORDMCOUNT
119 + select NO_BOOTMEM
120 ++ select ARCH_SUPPORTS_ATOMIC_RMW
121 +
122 + config ARCH_DEFCONFIG
123 + string
124 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
125 +index fe120da25625..af88b27ce313 100644
126 +--- a/arch/x86/Kconfig
127 ++++ b/arch/x86/Kconfig
128 +@@ -121,6 +121,7 @@ config X86
129 + select OLD_SIGACTION if X86_32
130 + select COMPAT_OLD_SIGACTION if IA32_EMULATION
131 + select RTC_LIB
132 ++ select ARCH_SUPPORTS_ATOMIC_RMW
133 +
134 + config INSTRUCTION_DECODER
135 + def_bool y
136 +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
137 +index a9e22073bd56..b45ac6affa9c 100644
138 +--- a/arch/x86/kernel/cpu/perf_event_intel.c
139 ++++ b/arch/x86/kernel/cpu/perf_event_intel.c
140 +@@ -1199,6 +1199,15 @@ again:
141 + intel_pmu_lbr_read();
142 +
143 + /*
144 ++ * CondChgd bit 63 doesn't mean any overflow status. Ignore
145 ++ * and clear the bit.
146 ++ */
147 ++ if (__test_and_clear_bit(63, (unsigned long *)&status)) {
148 ++ if (!status)
149 ++ goto done;
150 ++ }
151 ++
152 ++ /*
153 + * PEBS overflow sets bit 62 in the global status register
154 + */
155 + if (__test_and_clear_bit(62, (unsigned long *)&status)) {
156 +diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
157 +index b6154d5a07a5..db0be2fb05fe 100644
158 +--- a/drivers/bluetooth/hci_h5.c
159 ++++ b/drivers/bluetooth/hci_h5.c
160 +@@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
161 + H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
162 + BT_ERR("Non-link packet received in non-active state");
163 + h5_reset_rx(h5);
164 ++ return 0;
165 + }
166 +
167 + h5->rx_func = h5_rx_payload;
168 +diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
169 +index 21393dc4700a..f4b6b89b98f3 100644
170 +--- a/drivers/gpu/drm/qxl/qxl_irq.c
171 ++++ b/drivers/gpu/drm/qxl/qxl_irq.c
172 +@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
173 +
174 + pending = xchg(&qdev->ram_header->int_pending, 0);
175 +
176 ++ if (!pending)
177 ++ return IRQ_NONE;
178 ++
179 + atomic_inc(&qdev->irq_received);
180 +
181 + if (pending & QXL_INTERRUPT_DISPLAY) {
182 +diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
183 +index 5802d7486354..1b564d7e4191 100644
184 +--- a/drivers/gpu/drm/radeon/atombios_encoders.c
185 ++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
186 +@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
187 + struct backlight_properties props;
188 + struct radeon_backlight_privdata *pdata;
189 + struct radeon_encoder_atom_dig *dig;
190 +- u8 backlight_level;
191 + char bl_name[16];
192 +
193 + /* Mac laptops with multiple GPUs use the gmux driver for backlight
194 +@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
195 +
196 + pdata->encoder = radeon_encoder;
197 +
198 +- backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
199 +-
200 + dig = radeon_encoder->enc_priv;
201 + dig->bl_dev = bd;
202 +
203 + bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
204 ++ /* Set a reasonable default here if the level is 0 otherwise
205 ++ * fbdev will attempt to turn the backlight on after console
206 ++ * unblanking and it will try and restore 0 which turns the backlight
207 ++ * off again.
208 ++ */
209 ++ if (bd->props.brightness == 0)
210 ++ bd->props.brightness = RADEON_MAX_BL_LEVEL;
211 + bd->props.power = FB_BLANK_UNBLANK;
212 + backlight_update_status(bd);
213 +
214 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
215 +index 06ccfe477650..a84de32a91f5 100644
216 +--- a/drivers/gpu/drm/radeon/radeon_display.c
217 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
218 +@@ -688,6 +688,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
219 + struct radeon_device *rdev = dev->dev_private;
220 + int ret = 0;
221 +
222 ++ /* don't leak the edid if we already fetched it in detect() */
223 ++ if (radeon_connector->edid)
224 ++ goto got_edid;
225 ++
226 + /* on hw with routers, select right port */
227 + if (radeon_connector->router.ddc_valid)
228 + radeon_router_select_ddc_port(radeon_connector);
229 +@@ -727,6 +731,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
230 + radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
231 + }
232 + if (radeon_connector->edid) {
233 ++got_edid:
234 + drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
235 + ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
236 + drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
237 +diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
238 +index ed50e9e83c61..0e8c1ea4dd53 100644
239 +--- a/drivers/hv/hv_kvp.c
240 ++++ b/drivers/hv/hv_kvp.c
241 +@@ -111,6 +111,15 @@ kvp_work_func(struct work_struct *dummy)
242 + kvp_respond_to_host(NULL, HV_E_FAIL);
243 + }
244 +
245 ++static void poll_channel(struct vmbus_channel *channel)
246 ++{
247 ++ unsigned long flags;
248 ++
249 ++ spin_lock_irqsave(&channel->inbound_lock, flags);
250 ++ hv_kvp_onchannelcallback(channel);
251 ++ spin_unlock_irqrestore(&channel->inbound_lock, flags);
252 ++}
253 ++
254 + static int kvp_handle_handshake(struct hv_kvp_msg *msg)
255 + {
256 + int ret = 1;
257 +@@ -139,7 +148,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
258 + kvp_register(dm_reg_value);
259 + kvp_transaction.active = false;
260 + if (kvp_transaction.kvp_context)
261 +- hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
262 ++ poll_channel(kvp_transaction.kvp_context);
263 + }
264 + return ret;
265 + }
266 +@@ -552,6 +561,7 @@ response_done:
267 +
268 + vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
269 + VM_PKT_DATA_INBAND, 0);
270 ++ poll_channel(channel);
271 +
272 + }
273 +
274 +@@ -585,7 +595,7 @@ void hv_kvp_onchannelcallback(void *context)
275 + return;
276 + }
277 +
278 +- vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
279 ++ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
280 + &requestid);
281 +
282 + if (recvlen > 0) {
283 +diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
284 +index 2f561c5dfe24..64c778f7756f 100644
285 +--- a/drivers/hv/hv_util.c
286 ++++ b/drivers/hv/hv_util.c
287 +@@ -279,7 +279,7 @@ static int util_probe(struct hv_device *dev,
288 + (struct hv_util_service *)dev_id->driver_data;
289 + int ret;
290 +
291 +- srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
292 ++ srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
293 + if (!srv->recv_buffer)
294 + return -ENOMEM;
295 + if (srv->util_init) {
296 +diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
297 +index 58637355c1f6..79610bdf1d35 100644
298 +--- a/drivers/hwmon/adt7470.c
299 ++++ b/drivers/hwmon/adt7470.c
300 +@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
301 + return -EINVAL;
302 +
303 + temp = DIV_ROUND_CLOSEST(temp, 1000);
304 +- temp = clamp_val(temp, 0, 255);
305 ++ temp = clamp_val(temp, -128, 127);
306 +
307 + mutex_lock(&data->lock);
308 + data->temp_min[attr->index] = temp;
309 +@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
310 + return -EINVAL;
311 +
312 + temp = DIV_ROUND_CLOSEST(temp, 1000);
313 +- temp = clamp_val(temp, 0, 255);
314 ++ temp = clamp_val(temp, -128, 127);
315 +
316 + mutex_lock(&data->lock);
317 + data->temp_max[attr->index] = temp;
318 +@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
319 + return -EINVAL;
320 +
321 + temp = DIV_ROUND_CLOSEST(temp, 1000);
322 +- temp = clamp_val(temp, 0, 255);
323 ++ temp = clamp_val(temp, -128, 127);
324 +
325 + mutex_lock(&data->lock);
326 + data->pwm_tmin[attr->index] = temp;
327 +diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
328 +index 960fac3fb166..48044b044b7a 100644
329 +--- a/drivers/hwmon/da9052-hwmon.c
330 ++++ b/drivers/hwmon/da9052-hwmon.c
331 +@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
332 + struct device_attribute *devattr,
333 + char *buf)
334 + {
335 +- return sprintf(buf, "da9052-hwmon\n");
336 ++ return sprintf(buf, "da9052\n");
337 + }
338 +
339 + static ssize_t show_label(struct device *dev,
340 +diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
341 +index 029ecabc4380..1b275a2881d6 100644
342 +--- a/drivers/hwmon/da9055-hwmon.c
343 ++++ b/drivers/hwmon/da9055-hwmon.c
344 +@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
345 + struct device_attribute *devattr,
346 + char *buf)
347 + {
348 +- return sprintf(buf, "da9055-hwmon\n");
349 ++ return sprintf(buf, "da9055\n");
350 + }
351 +
352 + static ssize_t show_label(struct device *dev,
353 +diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
354 +index 4e11218d644e..c8ee1cb023b8 100644
355 +--- a/drivers/irqchip/irq-gic.c
356 ++++ b/drivers/irqchip/irq-gic.c
357 +@@ -42,6 +42,7 @@
358 + #include <linux/irqchip/chained_irq.h>
359 + #include <linux/irqchip/arm-gic.h>
360 +
361 ++#include <asm/cputype.h>
362 + #include <asm/irq.h>
363 + #include <asm/exception.h>
364 + #include <asm/smp_plat.h>
365 +@@ -754,7 +755,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
366 + }
367 +
368 + for_each_possible_cpu(cpu) {
369 +- unsigned long offset = percpu_offset * cpu_logical_map(cpu);
370 ++ u32 mpidr = cpu_logical_map(cpu);
371 ++ u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
372 ++ unsigned long offset = percpu_offset * core_id;
373 + *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
374 + *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
375 + }
376 +@@ -858,6 +861,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
377 + }
378 + IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
379 + IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
380 ++IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
381 + IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
382 + IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
383 +
384 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
385 +index a33e07f4222e..de737ba1d351 100644
386 +--- a/drivers/md/dm-cache-metadata.c
387 ++++ b/drivers/md/dm-cache-metadata.c
388 +@@ -384,6 +384,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
389 +
390 + disk_super = dm_block_data(sblock);
391 +
392 ++ /* Verify the data block size hasn't changed */
393 ++ if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
394 ++ DMERR("changing the data block size (from %u to %llu) is not supported",
395 ++ le32_to_cpu(disk_super->data_block_size),
396 ++ (unsigned long long)cmd->data_block_size);
397 ++ r = -EINVAL;
398 ++ goto bad;
399 ++ }
400 ++
401 + r = __check_incompat_features(disk_super, cmd);
402 + if (r < 0)
403 + goto bad;
404 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
405 +index 5f49d704f275..3b1503dc1f13 100644
406 +--- a/drivers/md/dm-thin-metadata.c
407 ++++ b/drivers/md/dm-thin-metadata.c
408 +@@ -591,6 +591,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
409 +
410 + disk_super = dm_block_data(sblock);
411 +
412 ++ /* Verify the data block size hasn't changed */
413 ++ if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
414 ++ DMERR("changing the data block size (from %u to %llu) is not supported",
415 ++ le32_to_cpu(disk_super->data_block_size),
416 ++ (unsigned long long)pmd->data_block_size);
417 ++ r = -EINVAL;
418 ++ goto bad_unlock_sblock;
419 ++ }
420 ++
421 + r = __check_incompat_features(disk_super, pmd);
422 + if (r < 0)
423 + goto bad_unlock_sblock;
424 +diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
425 +index 6008c8d546a3..20d9c15a305d 100644
426 +--- a/drivers/media/usb/gspca/pac7302.c
427 ++++ b/drivers/media/usb/gspca/pac7302.c
428 +@@ -945,6 +945,7 @@ static const struct usb_device_id device_table[] = {
429 + {USB_DEVICE(0x093a, 0x2620)},
430 + {USB_DEVICE(0x093a, 0x2621)},
431 + {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
432 ++ {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
433 + {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
434 + {USB_DEVICE(0x093a, 0x2625)},
435 + {USB_DEVICE(0x093a, 0x2626)},
436 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
437 +index 70be100feeb4..b04f7f128f49 100644
438 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
439 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
440 +@@ -745,7 +745,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
441 +
442 + return;
443 + }
444 +- bnx2x_frag_free(fp, new_data);
445 ++ if (new_data)
446 ++ bnx2x_frag_free(fp, new_data);
447 + drop:
448 + /* drop the packet and keep the buffer in the bin */
449 + DP(NETIF_MSG_RX_STATUS,
450 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
451 +index 7371626c56a1..d81a7dbfeef6 100644
452 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
453 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
454 +@@ -2663,7 +2663,7 @@ static int be_open(struct net_device *netdev)
455 +
456 + for_all_evt_queues(adapter, eqo, i) {
457 + napi_enable(&eqo->napi);
458 +- be_eq_notify(adapter, eqo->q.id, true, false, 0);
459 ++ be_eq_notify(adapter, eqo->q.id, true, true, 0);
460 + }
461 + adapter->flags |= BE_FLAGS_NAPI_ENABLED;
462 +
463 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
464 +index 64cbe0dfe043..4d3c8122e2aa 100644
465 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
466 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
467 +@@ -7229,6 +7229,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
468 +
469 + if (netif_running(netdev))
470 + igb_close(netdev);
471 ++ else
472 ++ igb_reset(adapter);
473 +
474 + igb_clear_interrupt_scheme(adapter);
475 +
476 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
477 +index a602aeeb3acb..658613021919 100644
478 +--- a/drivers/net/ethernet/marvell/mvneta.c
479 ++++ b/drivers/net/ethernet/marvell/mvneta.c
480 +@@ -1145,7 +1145,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
481 + command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
482 + command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
483 +
484 +- if (l3_proto == swab16(ETH_P_IP))
485 ++ if (l3_proto == htons(ETH_P_IP))
486 + command |= MVNETA_TXD_IP_CSUM;
487 + else
488 + command |= MVNETA_TX_L3_IP6;
489 +@@ -2306,7 +2306,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
490 +
491 + if (phydev->speed == SPEED_1000)
492 + val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
493 +- else
494 ++ else if (phydev->speed == SPEED_100)
495 + val |= MVNETA_GMAC_CONFIG_MII_SPEED;
496 +
497 + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
498 +diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
499 +index 3df56840a3b9..398faff8be7a 100644
500 +--- a/drivers/net/ethernet/sun/sunvnet.c
501 ++++ b/drivers/net/ethernet/sun/sunvnet.c
502 +@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
503 + return vp;
504 + }
505 +
506 ++static void vnet_cleanup(void)
507 ++{
508 ++ struct vnet *vp;
509 ++ struct net_device *dev;
510 ++
511 ++ mutex_lock(&vnet_list_mutex);
512 ++ while (!list_empty(&vnet_list)) {
513 ++ vp = list_first_entry(&vnet_list, struct vnet, list);
514 ++ list_del(&vp->list);
515 ++ dev = vp->dev;
516 ++ /* vio_unregister_driver() should have cleaned up port_list */
517 ++ BUG_ON(!list_empty(&vp->port_list));
518 ++ unregister_netdev(dev);
519 ++ free_netdev(dev);
520 ++ }
521 ++ mutex_unlock(&vnet_list_mutex);
522 ++}
523 ++
524 + static const char *local_mac_prop = "local-mac-address";
525 +
526 + static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
527 +@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
528 +
529 + kfree(port);
530 +
531 +- unregister_netdev(vp->dev);
532 + }
533 + return 0;
534 + }
535 +@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
536 + static void __exit vnet_exit(void)
537 + {
538 + vio_unregister_driver(&vnet_port_driver);
539 ++ vnet_cleanup();
540 + }
541 +
542 + module_init(vnet_init);
543 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
544 +index 6839fb07a4c9..becfa3ef7fdc 100644
545 +--- a/drivers/net/ppp/pppoe.c
546 ++++ b/drivers/net/ppp/pppoe.c
547 +@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
548 + po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
549 + dev->hard_header_len);
550 +
551 +- po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
552 ++ po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
553 + po->chan.private = sk;
554 + po->chan.ops = &pppoe_chan_ops;
555 +
556 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
557 +index 6fb0082b3308..6c584f8a2268 100644
558 +--- a/drivers/net/usb/qmi_wwan.c
559 ++++ b/drivers/net/usb/qmi_wwan.c
560 +@@ -647,6 +647,7 @@ static const struct usb_device_id products[] = {
561 + {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
562 + {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
563 + {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
564 ++ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
565 + {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
566 + {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
567 + {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
568 +@@ -721,6 +722,7 @@ static const struct usb_device_id products[] = {
569 + {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
570 + {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
571 + {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
572 ++ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
573 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
574 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
575 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
576 +@@ -733,6 +735,7 @@ static const struct usb_device_id products[] = {
577 + {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
578 + {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
579 + {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
580 ++ {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
581 + {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
582 + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
583 + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
584 +diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
585 +index cd1ad0019185..ca17e4c9eca2 100644
586 +--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
587 ++++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
588 +@@ -1072,13 +1072,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
589 + /* recalculate basic rates */
590 + iwl_calc_basic_rates(priv, ctx);
591 +
592 +- /*
593 +- * force CTS-to-self frames protection if RTS-CTS is not preferred
594 +- * one aggregation protection method
595 +- */
596 +- if (!priv->hw_params.use_rts_for_aggregation)
597 +- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
598 +-
599 + if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
600 + !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
601 + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
602 +@@ -1484,11 +1477,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
603 + else
604 + ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
605 +
606 +- if (bss_conf->use_cts_prot)
607 +- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
608 +- else
609 +- ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
610 +-
611 + memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
612 +
613 + if (vif->type == NL80211_IFTYPE_AP ||
614 +diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
615 +index fc3fe8ddcf62..83c61964d082 100644
616 +--- a/drivers/net/wireless/mwifiex/main.c
617 ++++ b/drivers/net/wireless/mwifiex/main.c
618 +@@ -501,6 +501,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
619 + }
620 +
621 + tx_info = MWIFIEX_SKB_TXCB(skb);
622 ++ memset(tx_info, 0, sizeof(*tx_info));
623 + tx_info->bss_num = priv->bss_num;
624 + tx_info->bss_type = priv->bss_type;
625 +
626 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
627 +index 46efdca96952..63c217053668 100644
628 +--- a/drivers/usb/core/hub.c
629 ++++ b/drivers/usb/core/hub.c
630 +@@ -887,6 +887,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
631 + if (!hub_is_superspeed(hub->hdev))
632 + return -EINVAL;
633 +
634 ++ ret = hub_port_status(hub, port1, &portstatus, &portchange);
635 ++ if (ret < 0)
636 ++ return ret;
637 ++
638 ++ /*
639 ++ * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
640 ++ * Controller [1022:7814] will have spurious result making the following
641 ++ * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
642 ++ * as high-speed device if we set the usb 3.0 port link state to
643 ++ * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
644 ++ * check the state here to avoid the bug.
645 ++ */
646 ++ if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
647 ++ USB_SS_PORT_LS_RX_DETECT) {
648 ++ dev_dbg(&hub->ports[port1 - 1]->dev,
649 ++ "Not disabling port; link state is RxDetect\n");
650 ++ return ret;
651 ++ }
652 ++
653 + ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
654 + if (ret)
655 + return ret;
656 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
657 +index b5718516825b..39a986e1da9e 100644
658 +--- a/fs/fuse/inode.c
659 ++++ b/fs/fuse/inode.c
660 +@@ -461,6 +461,17 @@ static const match_table_t tokens = {
661 + {OPT_ERR, NULL}
662 + };
663 +
664 ++static int fuse_match_uint(substring_t *s, unsigned int *res)
665 ++{
666 ++ int err = -ENOMEM;
667 ++ char *buf = match_strdup(s);
668 ++ if (buf) {
669 ++ err = kstrtouint(buf, 10, res);
670 ++ kfree(buf);
671 ++ }
672 ++ return err;
673 ++}
674 ++
675 + static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
676 + {
677 + char *p;
678 +@@ -471,6 +482,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
679 + while ((p = strsep(&opt, ",")) != NULL) {
680 + int token;
681 + int value;
682 ++ unsigned uv;
683 + substring_t args[MAX_OPT_ARGS];
684 + if (!*p)
685 + continue;
686 +@@ -494,18 +506,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
687 + break;
688 +
689 + case OPT_USER_ID:
690 +- if (match_int(&args[0], &value))
691 ++ if (fuse_match_uint(&args[0], &uv))
692 + return 0;
693 +- d->user_id = make_kuid(current_user_ns(), value);
694 ++ d->user_id = make_kuid(current_user_ns(), uv);
695 + if (!uid_valid(d->user_id))
696 + return 0;
697 + d->user_id_present = 1;
698 + break;
699 +
700 + case OPT_GROUP_ID:
701 +- if (match_int(&args[0], &value))
702 ++ if (fuse_match_uint(&args[0], &uv))
703 + return 0;
704 +- d->group_id = make_kgid(current_user_ns(), value);
705 ++ d->group_id = make_kgid(current_user_ns(), uv);
706 + if (!gid_valid(d->group_id))
707 + return 0;
708 + d->group_id_present = 1;
709 +diff --git a/include/net/sock.h b/include/net/sock.h
710 +index 72f710d2f75a..26b15c0780be 100644
711 +--- a/include/net/sock.h
712 ++++ b/include/net/sock.h
713 +@@ -1727,8 +1727,8 @@ sk_dst_get(struct sock *sk)
714 +
715 + rcu_read_lock();
716 + dst = rcu_dereference(sk->sk_dst_cache);
717 +- if (dst)
718 +- dst_hold(dst);
719 ++ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
720 ++ dst = NULL;
721 + rcu_read_unlock();
722 + return dst;
723 + }
724 +@@ -1767,9 +1767,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
725 + static inline void
726 + sk_dst_set(struct sock *sk, struct dst_entry *dst)
727 + {
728 +- spin_lock(&sk->sk_dst_lock);
729 +- __sk_dst_set(sk, dst);
730 +- spin_unlock(&sk->sk_dst_lock);
731 ++ struct dst_entry *old_dst;
732 ++
733 ++ sk_tx_queue_clear(sk);
734 ++ old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
735 ++ dst_release(old_dst);
736 + }
737 +
738 + static inline void
739 +@@ -1781,9 +1783,7 @@ __sk_dst_reset(struct sock *sk)
740 + static inline void
741 + sk_dst_reset(struct sock *sk)
742 + {
743 +- spin_lock(&sk->sk_dst_lock);
744 +- __sk_dst_reset(sk);
745 +- spin_unlock(&sk->sk_dst_lock);
746 ++ sk_dst_set(sk, NULL);
747 + }
748 +
749 + extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
750 +diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
751 +index 44511d100eaa..e4d30533c562 100644
752 +--- a/kernel/Kconfig.locks
753 ++++ b/kernel/Kconfig.locks
754 +@@ -220,6 +220,9 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
755 +
756 + endif
757 +
758 ++config ARCH_SUPPORTS_ATOMIC_RMW
759 ++ bool
760 ++
761 + config MUTEX_SPIN_ON_OWNER
762 + def_bool y
763 +- depends on SMP && !DEBUG_MUTEXES
764 ++ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
765 +diff --git a/kernel/power/process.c b/kernel/power/process.c
766 +index 98088e0e71e8..1b212bee1510 100644
767 +--- a/kernel/power/process.c
768 ++++ b/kernel/power/process.c
769 +@@ -174,6 +174,7 @@ void thaw_processes(void)
770 +
771 + printk("Restarting tasks ... ");
772 +
773 ++ __usermodehelper_set_disable_depth(UMH_FREEZING);
774 + thaw_workqueues();
775 +
776 + read_lock(&tasklist_lock);
777 +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
778 +index e745a1548367..701b6c8a4b12 100644
779 +--- a/kernel/sched/debug.c
780 ++++ b/kernel/sched/debug.c
781 +@@ -551,7 +551,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
782 +
783 + avg_atom = p->se.sum_exec_runtime;
784 + if (nr_switches)
785 +- do_div(avg_atom, nr_switches);
786 ++ avg_atom = div64_ul(avg_atom, nr_switches);
787 + else
788 + avg_atom = -1LL;
789 +
790 +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
791 +index a8f5084dcde7..294bf4ef1f47 100644
792 +--- a/kernel/time/alarmtimer.c
793 ++++ b/kernel/time/alarmtimer.c
794 +@@ -540,9 +540,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
795 + struct itimerspec *new_setting,
796 + struct itimerspec *old_setting)
797 + {
798 ++ ktime_t exp;
799 ++
800 + if (!rtcdev)
801 + return -ENOTSUPP;
802 +
803 ++ if (flags & ~TIMER_ABSTIME)
804 ++ return -EINVAL;
805 ++
806 + if (old_setting)
807 + alarm_timer_get(timr, old_setting);
808 +
809 +@@ -552,8 +557,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
810 +
811 + /* start the timer */
812 + timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
813 +- alarm_start(&timr->it.alarm.alarmtimer,
814 +- timespec_to_ktime(new_setting->it_value));
815 ++ exp = timespec_to_ktime(new_setting->it_value);
816 ++ /* Convert (if necessary) to absolute time */
817 ++ if (flags != TIMER_ABSTIME) {
818 ++ ktime_t now;
819 ++
820 ++ now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
821 ++ exp = ktime_add(now, exp);
822 ++ }
823 ++
824 ++ alarm_start(&timr->it.alarm.alarmtimer, exp);
825 + return 0;
826 + }
827 +
828 +@@ -685,6 +698,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
829 + if (!alarmtimer_get_rtcdev())
830 + return -ENOTSUPP;
831 +
832 ++ if (flags & ~TIMER_ABSTIME)
833 ++ return -EINVAL;
834 ++
835 + if (!capable(CAP_WAKE_ALARM))
836 + return -EPERM;
837 +
838 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
839 +index 797d3b91a30b..401d9bd1fe42 100644
840 +--- a/kernel/trace/ftrace.c
841 ++++ b/kernel/trace/ftrace.c
842 +@@ -331,12 +331,12 @@ static void update_ftrace_function(void)
843 + func = ftrace_ops_list_func;
844 + }
845 +
846 ++ update_function_graph_func();
847 ++
848 + /* If there's no change, then do nothing more here */
849 + if (ftrace_trace_function == func)
850 + return;
851 +
852 +- update_function_graph_func();
853 +-
854 + /*
855 + * If we are using the list function, it doesn't care
856 + * about the function_trace_ops.
857 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
858 +index 8e94c1102636..4063d5fe5e44 100644
859 +--- a/kernel/trace/ring_buffer.c
860 ++++ b/kernel/trace/ring_buffer.c
861 +@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
862 + struct ring_buffer_per_cpu *cpu_buffer;
863 + struct rb_irq_work *work;
864 +
865 +- if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
866 +- (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
867 +- return POLLIN | POLLRDNORM;
868 +-
869 + if (cpu == RING_BUFFER_ALL_CPUS)
870 + work = &buffer->irq_work;
871 + else {
872 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
873 +index 8fe92ce43f39..98a830d079b9 100644
874 +--- a/kernel/trace/trace.c
875 ++++ b/kernel/trace/trace.c
876 +@@ -423,6 +423,9 @@ int __trace_puts(unsigned long ip, const char *str, int size)
877 + struct print_entry *entry;
878 + unsigned long irq_flags;
879 + int alloc;
880 ++ int pc;
881 ++
882 ++ pc = preempt_count();
883 +
884 + if (unlikely(tracing_selftest_running || tracing_disabled))
885 + return 0;
886 +@@ -432,7 +435,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
887 + local_save_flags(irq_flags);
888 + buffer = global_trace.trace_buffer.buffer;
889 + event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
890 +- irq_flags, preempt_count());
891 ++ irq_flags, pc);
892 + if (!event)
893 + return 0;
894 +
895 +@@ -449,6 +452,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
896 + entry->buf[size] = '\0';
897 +
898 + __buffer_unlock_commit(buffer, event);
899 ++ ftrace_trace_stack(buffer, irq_flags, 4, pc);
900 +
901 + return size;
902 + }
903 +@@ -466,6 +470,9 @@ int __trace_bputs(unsigned long ip, const char *str)
904 + struct bputs_entry *entry;
905 + unsigned long irq_flags;
906 + int size = sizeof(struct bputs_entry);
907 ++ int pc;
908 ++
909 ++ pc = preempt_count();
910 +
911 + if (unlikely(tracing_selftest_running || tracing_disabled))
912 + return 0;
913 +@@ -473,7 +480,7 @@ int __trace_bputs(unsigned long ip, const char *str)
914 + local_save_flags(irq_flags);
915 + buffer = global_trace.trace_buffer.buffer;
916 + event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
917 +- irq_flags, preempt_count());
918 ++ irq_flags, pc);
919 + if (!event)
920 + return 0;
921 +
922 +@@ -482,6 +489,7 @@ int __trace_bputs(unsigned long ip, const char *str)
923 + entry->str = str;
924 +
925 + __buffer_unlock_commit(buffer, event);
926 ++ ftrace_trace_stack(buffer, irq_flags, 4, pc);
927 +
928 + return 1;
929 + }
930 +diff --git a/mm/shmem.c b/mm/shmem.c
931 +index 509b393eceeb..16cc1d77f70a 100644
932 +--- a/mm/shmem.c
933 ++++ b/mm/shmem.c
934 +@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
935 + #define SHORT_SYMLINK_LEN 128
936 +
937 + /*
938 +- * shmem_fallocate and shmem_writepage communicate via inode->i_private
939 +- * (with i_mutex making sure that it has only one user at a time):
940 +- * we would prefer not to enlarge the shmem inode just for that.
941 ++ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
942 ++ * inode->i_private (with i_mutex making sure that it has only one user at
943 ++ * a time): we would prefer not to enlarge the shmem inode just for that.
944 + */
945 + struct shmem_falloc {
946 ++ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
947 + pgoff_t start; /* start of range currently being fallocated */
948 + pgoff_t next; /* the next page offset to be fallocated */
949 + pgoff_t nr_falloced; /* how many new pages have been fallocated */
950 +@@ -533,22 +534,19 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
951 + return;
952 +
953 + index = start;
954 +- for ( ; ; ) {
955 ++ while (index < end) {
956 + cond_resched();
957 + pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
958 + min(end - index, (pgoff_t)PAGEVEC_SIZE),
959 + pvec.pages, indices);
960 + if (!pvec.nr) {
961 +- if (index == start || unfalloc)
962 ++ /* If all gone or hole-punch or unfalloc, we're done */
963 ++ if (index == start || end != -1)
964 + break;
965 ++ /* But if truncating, restart to make sure all gone */
966 + index = start;
967 + continue;
968 + }
969 +- if ((index == start || unfalloc) && indices[0] >= end) {
970 +- shmem_deswap_pagevec(&pvec);
971 +- pagevec_release(&pvec);
972 +- break;
973 +- }
974 + mem_cgroup_uncharge_start();
975 + for (i = 0; i < pagevec_count(&pvec); i++) {
976 + struct page *page = pvec.pages[i];
977 +@@ -560,8 +558,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
978 + if (radix_tree_exceptional_entry(page)) {
979 + if (unfalloc)
980 + continue;
981 +- nr_swaps_freed += !shmem_free_swap(mapping,
982 +- index, page);
983 ++ if (shmem_free_swap(mapping, index, page)) {
984 ++ /* Swap was replaced by page: retry */
985 ++ index--;
986 ++ break;
987 ++ }
988 ++ nr_swaps_freed++;
989 + continue;
990 + }
991 +
992 +@@ -570,6 +572,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
993 + if (page->mapping == mapping) {
994 + VM_BUG_ON(PageWriteback(page));
995 + truncate_inode_page(mapping, page);
996 ++ } else {
997 ++ /* Page was replaced by swap: retry */
998 ++ unlock_page(page);
999 ++ index--;
1000 ++ break;
1001 + }
1002 + }
1003 + unlock_page(page);
1004 +@@ -826,6 +833,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1005 + spin_lock(&inode->i_lock);
1006 + shmem_falloc = inode->i_private;
1007 + if (shmem_falloc &&
1008 ++ !shmem_falloc->waitq &&
1009 + index >= shmem_falloc->start &&
1010 + index < shmem_falloc->next)
1011 + shmem_falloc->nr_unswapped++;
1012 +@@ -1300,6 +1308,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1013 + int error;
1014 + int ret = VM_FAULT_LOCKED;
1015 +
1016 ++ /*
1017 ++ * Trinity finds that probing a hole which tmpfs is punching can
1018 ++ * prevent the hole-punch from ever completing: which in turn
1019 ++ * locks writers out with its hold on i_mutex. So refrain from
1020 ++ * faulting pages into the hole while it's being punched. Although
1021 ++ * shmem_undo_range() does remove the additions, it may be unable to
1022 ++ * keep up, as each new page needs its own unmap_mapping_range() call,
1023 ++ * and the i_mmap tree grows ever slower to scan if new vmas are added.
1024 ++ *
1025 ++ * It does not matter if we sometimes reach this check just before the
1026 ++ * hole-punch begins, so that one fault then races with the punch:
1027 ++ * we just need to make racing faults a rare case.
1028 ++ *
1029 ++ * The implementation below would be much simpler if we just used a
1030 ++ * standard mutex or completion: but we cannot take i_mutex in fault,
1031 ++ * and bloating every shmem inode for this unlikely case would be sad.
1032 ++ */
1033 ++ if (unlikely(inode->i_private)) {
1034 ++ struct shmem_falloc *shmem_falloc;
1035 ++
1036 ++ spin_lock(&inode->i_lock);
1037 ++ shmem_falloc = inode->i_private;
1038 ++ if (shmem_falloc &&
1039 ++ shmem_falloc->waitq &&
1040 ++ vmf->pgoff >= shmem_falloc->start &&
1041 ++ vmf->pgoff < shmem_falloc->next) {
1042 ++ wait_queue_head_t *shmem_falloc_waitq;
1043 ++ DEFINE_WAIT(shmem_fault_wait);
1044 ++
1045 ++ ret = VM_FAULT_NOPAGE;
1046 ++ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1047 ++ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1048 ++ /* It's polite to up mmap_sem if we can */
1049 ++ up_read(&vma->vm_mm->mmap_sem);
1050 ++ ret = VM_FAULT_RETRY;
1051 ++ }
1052 ++
1053 ++ shmem_falloc_waitq = shmem_falloc->waitq;
1054 ++ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1055 ++ TASK_UNINTERRUPTIBLE);
1056 ++ spin_unlock(&inode->i_lock);
1057 ++ schedule();
1058 ++
1059 ++ /*
1060 ++ * shmem_falloc_waitq points into the shmem_fallocate()
1061 ++ * stack of the hole-punching task: shmem_falloc_waitq
1062 ++ * is usually invalid by the time we reach here, but
1063 ++ * finish_wait() does not dereference it in that case;
1064 ++ * though i_lock needed lest racing with wake_up_all().
1065 ++ */
1066 ++ spin_lock(&inode->i_lock);
1067 ++ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1068 ++ spin_unlock(&inode->i_lock);
1069 ++ return ret;
1070 ++ }
1071 ++ spin_unlock(&inode->i_lock);
1072 ++ }
1073 ++
1074 + error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1075 + if (error)
1076 + return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1077 +@@ -1821,12 +1887,25 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1078 + struct address_space *mapping = file->f_mapping;
1079 + loff_t unmap_start = round_up(offset, PAGE_SIZE);
1080 + loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1081 ++ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1082 ++
1083 ++ shmem_falloc.waitq = &shmem_falloc_waitq;
1084 ++ shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1085 ++ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1086 ++ spin_lock(&inode->i_lock);
1087 ++ inode->i_private = &shmem_falloc;
1088 ++ spin_unlock(&inode->i_lock);
1089 +
1090 + if ((u64)unmap_end > (u64)unmap_start)
1091 + unmap_mapping_range(mapping, unmap_start,
1092 + 1 + unmap_end - unmap_start, 0);
1093 + shmem_truncate_range(inode, offset, offset + len - 1);
1094 + /* No need to unmap again: hole-punching leaves COWed pages */
1095 ++
1096 ++ spin_lock(&inode->i_lock);
1097 ++ inode->i_private = NULL;
1098 ++ wake_up_all(&shmem_falloc_waitq);
1099 ++ spin_unlock(&inode->i_lock);
1100 + error = 0;
1101 + goto out;
1102 + }
1103 +@@ -1844,6 +1923,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1104 + goto out;
1105 + }
1106 +
1107 ++ shmem_falloc.waitq = NULL;
1108 + shmem_falloc.start = start;
1109 + shmem_falloc.next = start;
1110 + shmem_falloc.nr_falloced = 0;
1111 +diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
1112 +index 4a78c4de9f20..42ef36a85e69 100644
1113 +--- a/net/8021q/vlan_core.c
1114 ++++ b/net/8021q/vlan_core.c
1115 +@@ -103,8 +103,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_id);
1116 +
1117 + static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
1118 + {
1119 +- if (skb_cow(skb, skb_headroom(skb)) < 0)
1120 ++ if (skb_cow(skb, skb_headroom(skb)) < 0) {
1121 ++ kfree_skb(skb);
1122 + return NULL;
1123 ++ }
1124 ++
1125 + memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
1126 + skb->mac_header += VLAN_HLEN;
1127 + return skb;
1128 +diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
1129 +index 0018daccdea9..8799e171addf 100644
1130 +--- a/net/appletalk/ddp.c
1131 ++++ b/net/appletalk/ddp.c
1132 +@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1133 + goto drop;
1134 +
1135 + /* Queue packet (standard) */
1136 +- skb->sk = sock;
1137 +-
1138 + if (sock_queue_rcv_skb(sock, skb) < 0)
1139 + goto drop;
1140 +
1141 +@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1142 + if (!skb)
1143 + goto out;
1144 +
1145 +- skb->sk = sk;
1146 + skb_reserve(skb, ddp_dl->header_length);
1147 + skb_reserve(skb, dev->hard_header_len);
1148 + skb->dev = dev;
1149 +diff --git a/net/core/dst.c b/net/core/dst.c
1150 +index df9cc810ec8e..c0e021871df8 100644
1151 +--- a/net/core/dst.c
1152 ++++ b/net/core/dst.c
1153 +@@ -267,6 +267,15 @@ again:
1154 + }
1155 + EXPORT_SYMBOL(dst_destroy);
1156 +
1157 ++static void dst_destroy_rcu(struct rcu_head *head)
1158 ++{
1159 ++ struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
1160 ++
1161 ++ dst = dst_destroy(dst);
1162 ++ if (dst)
1163 ++ __dst_free(dst);
1164 ++}
1165 ++
1166 + void dst_release(struct dst_entry *dst)
1167 + {
1168 + if (dst) {
1169 +@@ -274,11 +283,8 @@ void dst_release(struct dst_entry *dst)
1170 +
1171 + newrefcnt = atomic_dec_return(&dst->__refcnt);
1172 + WARN_ON(newrefcnt < 0);
1173 +- if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
1174 +- dst = dst_destroy(dst);
1175 +- if (dst)
1176 +- __dst_free(dst);
1177 +- }
1178 ++ if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
1179 ++ call_rcu(&dst->rcu_head, dst_destroy_rcu);
1180 + }
1181 + }
1182 + EXPORT_SYMBOL(dst_release);
1183 +diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
1184 +index c32be292c7e3..2022b46ab38f 100644
1185 +--- a/net/dns_resolver/dns_query.c
1186 ++++ b/net/dns_resolver/dns_query.c
1187 +@@ -150,7 +150,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
1188 + if (!*_result)
1189 + goto put;
1190 +
1191 +- memcpy(*_result, upayload->data, len + 1);
1192 ++ memcpy(*_result, upayload->data, len);
1193 ++ (*_result)[len] = '\0';
1194 ++
1195 + if (_expiry)
1196 + *_expiry = rkey->expiry;
1197 +
1198 +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
1199 +index 76e10b47e053..ea78ef5ac352 100644
1200 +--- a/net/ipv4/icmp.c
1201 ++++ b/net/ipv4/icmp.c
1202 +@@ -697,8 +697,6 @@ static void icmp_unreach(struct sk_buff *skb)
1203 + &iph->daddr);
1204 + } else {
1205 + info = ntohs(icmph->un.frag.mtu);
1206 +- if (!info)
1207 +- goto out;
1208 + }
1209 + break;
1210 + case ICMP_SR_FAILED:
1211 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
1212 +index 089b4af4fecc..38d63ca8a6b5 100644
1213 +--- a/net/ipv4/igmp.c
1214 ++++ b/net/ipv4/igmp.c
1215 +@@ -1874,6 +1874,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1216 +
1217 + rtnl_lock();
1218 + in_dev = ip_mc_find_dev(net, imr);
1219 ++ if (!in_dev) {
1220 ++ ret = -ENODEV;
1221 ++ goto out;
1222 ++ }
1223 + ifindex = imr->imr_ifindex;
1224 + for (imlp = &inet->mc_list;
1225 + (iml = rtnl_dereference(*imlp)) != NULL;
1226 +@@ -1891,16 +1895,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1227 +
1228 + *imlp = iml->next_rcu;
1229 +
1230 +- if (in_dev)
1231 +- ip_mc_dec_group(in_dev, group);
1232 ++ ip_mc_dec_group(in_dev, group);
1233 + rtnl_unlock();
1234 + /* decrease mem now to avoid the memleak warning */
1235 + atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1236 + kfree_rcu(iml, rcu);
1237 + return 0;
1238 + }
1239 +- if (!in_dev)
1240 +- ret = -ENODEV;
1241 ++out:
1242 + rtnl_unlock();
1243 + return ret;
1244 + }
1245 +diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
1246 +index ec7264514a82..089ed81d1878 100644
1247 +--- a/net/ipv4/ip_options.c
1248 ++++ b/net/ipv4/ip_options.c
1249 +@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
1250 + optptr++;
1251 + continue;
1252 + }
1253 ++ if (unlikely(l < 2)) {
1254 ++ pp_ptr = optptr;
1255 ++ goto error;
1256 ++ }
1257 + optlen = optptr[1];
1258 + if (optlen<2 || optlen>l) {
1259 + pp_ptr = optptr;
1260 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
1261 +index fa6573264c8a..5642374cb751 100644
1262 +--- a/net/ipv4/ip_tunnel.c
1263 ++++ b/net/ipv4/ip_tunnel.c
1264 +@@ -166,6 +166,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
1265 +
1266 + hlist_for_each_entry_rcu(t, head, hash_node) {
1267 + if (remote != t->parms.iph.daddr ||
1268 ++ t->parms.iph.saddr != 0 ||
1269 + !(t->dev->flags & IFF_UP))
1270 + continue;
1271 +
1272 +@@ -182,10 +183,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
1273 + head = &itn->tunnels[hash];
1274 +
1275 + hlist_for_each_entry_rcu(t, head, hash_node) {
1276 +- if ((local != t->parms.iph.saddr &&
1277 +- (local != t->parms.iph.daddr ||
1278 +- !ipv4_is_multicast(local))) ||
1279 +- !(t->dev->flags & IFF_UP))
1280 ++ if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
1281 ++ (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
1282 ++ continue;
1283 ++
1284 ++ if (!(t->dev->flags & IFF_UP))
1285 + continue;
1286 +
1287 + if (!ip_tunnel_key_match(&t->parms, flags, key))
1288 +@@ -202,6 +204,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
1289 +
1290 + hlist_for_each_entry_rcu(t, head, hash_node) {
1291 + if (t->parms.i_key != key ||
1292 ++ t->parms.iph.saddr != 0 ||
1293 ++ t->parms.iph.daddr != 0 ||
1294 + !(t->dev->flags & IFF_UP))
1295 + continue;
1296 +
1297 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1298 +index 7256eef088b2..2b9887becb5c 100644
1299 +--- a/net/ipv4/route.c
1300 ++++ b/net/ipv4/route.c
1301 +@@ -985,20 +985,21 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1302 + const struct iphdr *iph = (const struct iphdr *) skb->data;
1303 + struct flowi4 fl4;
1304 + struct rtable *rt;
1305 +- struct dst_entry *dst;
1306 ++ struct dst_entry *odst = NULL;
1307 + bool new = false;
1308 +
1309 + bh_lock_sock(sk);
1310 +- rt = (struct rtable *) __sk_dst_get(sk);
1311 ++ odst = sk_dst_get(sk);
1312 +
1313 +- if (sock_owned_by_user(sk) || !rt) {
1314 ++ if (sock_owned_by_user(sk) || !odst) {
1315 + __ipv4_sk_update_pmtu(skb, sk, mtu);
1316 + goto out;
1317 + }
1318 +
1319 + __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1320 +
1321 +- if (!__sk_dst_check(sk, 0)) {
1322 ++ rt = (struct rtable *)odst;
1323 ++ if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1324 + rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1325 + if (IS_ERR(rt))
1326 + goto out;
1327 +@@ -1008,8 +1009,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1328 +
1329 + __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1330 +
1331 +- dst = dst_check(&rt->dst, 0);
1332 +- if (!dst) {
1333 ++ if (!dst_check(&rt->dst, 0)) {
1334 + if (new)
1335 + dst_release(&rt->dst);
1336 +
1337 +@@ -1021,10 +1021,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1338 + }
1339 +
1340 + if (new)
1341 +- __sk_dst_set(sk, &rt->dst);
1342 ++ sk_dst_set(sk, &rt->dst);
1343 +
1344 + out:
1345 + bh_unlock_sock(sk);
1346 ++ dst_release(odst);
1347 + }
1348 + EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1349 +
1350 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1351 +index 39bdb14b3214..5d4bd6ca3ab1 100644
1352 +--- a/net/ipv4/tcp.c
1353 ++++ b/net/ipv4/tcp.c
1354 +@@ -1065,7 +1065,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1355 + if (unlikely(tp->repair)) {
1356 + if (tp->repair_queue == TCP_RECV_QUEUE) {
1357 + copied = tcp_send_rcvq(sk, msg, size);
1358 +- goto out;
1359 ++ goto out_nopush;
1360 + }
1361 +
1362 + err = -EINVAL;
1363 +@@ -1238,6 +1238,7 @@ wait_for_memory:
1364 + out:
1365 + if (copied)
1366 + tcp_push(sk, flags, mss_now, tp->nonagle);
1367 ++out_nopush:
1368 + release_sock(sk);
1369 + return copied + copied_syn;
1370 +
1371 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1372 +index ba7d2b7ad9f9..ea7f52f3062d 100644
1373 +--- a/net/ipv4/tcp_input.c
1374 ++++ b/net/ipv4/tcp_input.c
1375 +@@ -1075,7 +1075,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1376 + }
1377 +
1378 + /* D-SACK for already forgotten data... Do dumb counting. */
1379 +- if (dup_sack && tp->undo_marker && tp->undo_retrans &&
1380 ++ if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
1381 + !after(end_seq_0, prior_snd_una) &&
1382 + after(end_seq_0, tp->undo_marker))
1383 + tp->undo_retrans--;
1384 +@@ -1130,7 +1130,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1385 + unsigned int new_len = (pkt_len / mss) * mss;
1386 + if (!in_sack && new_len < pkt_len) {
1387 + new_len += mss;
1388 +- if (new_len > skb->len)
1389 ++ if (new_len >= skb->len)
1390 + return 0;
1391 + }
1392 + pkt_len = new_len;
1393 +@@ -1154,7 +1154,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
1394 +
1395 + /* Account D-SACK for retransmitted packet. */
1396 + if (dup_sack && (sacked & TCPCB_RETRANS)) {
1397 +- if (tp->undo_marker && tp->undo_retrans &&
1398 ++ if (tp->undo_marker && tp->undo_retrans > 0 &&
1399 + after(end_seq, tp->undo_marker))
1400 + tp->undo_retrans--;
1401 + if (sacked & TCPCB_SACKED_ACKED)
1402 +@@ -1850,7 +1850,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
1403 + tp->lost_out = 0;
1404 +
1405 + tp->undo_marker = 0;
1406 +- tp->undo_retrans = 0;
1407 ++ tp->undo_retrans = -1;
1408 + }
1409 +
1410 + void tcp_clear_retrans(struct tcp_sock *tp)
1411 +@@ -2700,7 +2700,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
1412 +
1413 + tp->prior_ssthresh = 0;
1414 + tp->undo_marker = tp->snd_una;
1415 +- tp->undo_retrans = tp->retrans_out;
1416 ++ tp->undo_retrans = tp->retrans_out ? : -1;
1417 +
1418 + if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
1419 + if (!ece_ack)
1420 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1421 +index 4a4e8746d1b2..56e29f0e230e 100644
1422 +--- a/net/ipv4/tcp_output.c
1423 ++++ b/net/ipv4/tcp_output.c
1424 +@@ -2428,13 +2428,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1425 + if (!tp->retrans_stamp)
1426 + tp->retrans_stamp = TCP_SKB_CB(skb)->when;
1427 +
1428 +- tp->undo_retrans += tcp_skb_pcount(skb);
1429 +-
1430 + /* snd_nxt is stored to detect loss of retransmitted segment,
1431 + * see tcp_input.c tcp_sacktag_write_queue().
1432 + */
1433 + TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
1434 + }
1435 ++
1436 ++ if (tp->undo_retrans < 0)
1437 ++ tp->undo_retrans = 0;
1438 ++ tp->undo_retrans += tcp_skb_pcount(skb);
1439 + return err;
1440 + }
1441 +
1442 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1443 +index be34adde692f..5ed562dfe743 100644
1444 +--- a/net/netlink/af_netlink.c
1445 ++++ b/net/netlink/af_netlink.c
1446 +@@ -500,7 +500,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
1447 + while (nlk->cb != NULL && netlink_dump_space(nlk)) {
1448 + err = netlink_dump(sk);
1449 + if (err < 0) {
1450 +- sk->sk_err = err;
1451 ++ sk->sk_err = -err;
1452 + sk->sk_error_report(sk);
1453 + break;
1454 + }
1455 +@@ -2272,7 +2272,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1456 + if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1457 + ret = netlink_dump(sk);
1458 + if (ret) {
1459 +- sk->sk_err = ret;
1460 ++ sk->sk_err = -ret;
1461 + sk->sk_error_report(sk);
1462 + }
1463 + }
1464 +diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
1465 +index fe0ba7488bdf..29299dcabfbb 100644
1466 +--- a/net/sctp/sysctl.c
1467 ++++ b/net/sctp/sysctl.c
1468 +@@ -368,8 +368,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
1469 + tbl.data = &net->sctp.auth_enable;
1470 +
1471 + ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
1472 +-
1473 +- if (write) {
1474 ++ if (write && ret == 0) {
1475 + struct sock *sk = net->sctp.ctl_sock;
1476 +
1477 + net->sctp.auth_enable = new_value;
1478 +diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
1479 +index 10c018a5b9fe..ca907f2f5e5a 100644
1480 +--- a/net/sctp/ulpevent.c
1481 ++++ b/net/sctp/ulpevent.c
1482 +@@ -373,9 +373,10 @@ fail:
1483 + * specification [SCTP] and any extensions for a list of possible
1484 + * error formats.
1485 + */
1486 +-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
1487 +- const struct sctp_association *asoc, struct sctp_chunk *chunk,
1488 +- __u16 flags, gfp_t gfp)
1489 ++struct sctp_ulpevent *
1490 ++sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
1491 ++ struct sctp_chunk *chunk, __u16 flags,
1492 ++ gfp_t gfp)
1493 + {
1494 + struct sctp_ulpevent *event;
1495 + struct sctp_remote_error *sre;
1496 +@@ -394,8 +395,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
1497 + /* Copy the skb to a new skb with room for us to prepend
1498 + * notification with.
1499 + */
1500 +- skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
1501 +- 0, gfp);
1502 ++ skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
1503 +
1504 + /* Pull off the rest of the cause TLV from the chunk. */
1505 + skb_pull(chunk->skb, elen);
1506 +@@ -406,62 +406,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
1507 + event = sctp_skb2event(skb);
1508 + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
1509 +
1510 +- sre = (struct sctp_remote_error *)
1511 +- skb_push(skb, sizeof(struct sctp_remote_error));
1512 ++ sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
1513 +
1514 + /* Trim the buffer to the right length. */
1515 +- skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
1516 ++ skb_trim(skb, sizeof(*sre) + elen);
1517 +
1518 +- /* Socket Extensions for SCTP
1519 +- * 5.3.1.3 SCTP_REMOTE_ERROR
1520 +- *
1521 +- * sre_type:
1522 +- * It should be SCTP_REMOTE_ERROR.
1523 +- */
1524 ++ /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
1525 ++ memset(sre, 0, sizeof(*sre));
1526 + sre->sre_type = SCTP_REMOTE_ERROR;
1527 +-
1528 +- /*
1529 +- * Socket Extensions for SCTP
1530 +- * 5.3.1.3 SCTP_REMOTE_ERROR
1531 +- *
1532 +- * sre_flags: 16 bits (unsigned integer)
1533 +- * Currently unused.
1534 +- */
1535 + sre->sre_flags = 0;
1536 +-
1537 +- /* Socket Extensions for SCTP
1538 +- * 5.3.1.3 SCTP_REMOTE_ERROR
1539 +- *
1540 +- * sre_length: sizeof (__u32)
1541 +- *
1542 +- * This field is the total length of the notification data,
1543 +- * including the notification header.
1544 +- */
1545 + sre->sre_length = skb->len;
1546 +-
1547 +- /* Socket Extensions for SCTP
1548 +- * 5.3.1.3 SCTP_REMOTE_ERROR
1549 +- *
1550 +- * sre_error: 16 bits (unsigned integer)
1551 +- * This value represents one of the Operational Error causes defined in
1552 +- * the SCTP specification, in network byte order.
1553 +- */
1554 + sre->sre_error = cause;
1555 +-
1556 +- /* Socket Extensions for SCTP
1557 +- * 5.3.1.3 SCTP_REMOTE_ERROR
1558 +- *
1559 +- * sre_assoc_id: sizeof (sctp_assoc_t)
1560 +- *
1561 +- * The association id field, holds the identifier for the association.
1562 +- * All notifications for a given association have the same association
1563 +- * identifier. For TCP style socket, this field is ignored.
1564 +- */
1565 + sctp_ulpevent_set_owner(event, asoc);
1566 + sre->sre_assoc_id = sctp_assoc2id(asoc);
1567 +
1568 + return event;
1569 +-
1570 + fail:
1571 + return NULL;
1572 + }
1573 +@@ -906,7 +865,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
1574 + return notification->sn_header.sn_type;
1575 + }
1576 +
1577 +-/* Copy out the sndrcvinfo into a msghdr. */
1578 ++/* RFC6458, Section 5.3.2. SCTP Header Information Structure
1579 ++ * (SCTP_SNDRCV, DEPRECATED)
1580 ++ */
1581 + void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
1582 + struct msghdr *msghdr)
1583 + {
1584 +@@ -915,74 +876,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
1585 + if (sctp_ulpevent_is_notification(event))
1586 + return;
1587 +
1588 +- /* Sockets API Extensions for SCTP
1589 +- * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
1590 +- *
1591 +- * sinfo_stream: 16 bits (unsigned integer)
1592 +- *
1593 +- * For recvmsg() the SCTP stack places the message's stream number in
1594 +- * this value.
1595 +- */
1596 ++ memset(&sinfo, 0, sizeof(sinfo));
1597 + sinfo.sinfo_stream = event->stream;
1598 +- /* sinfo_ssn: 16 bits (unsigned integer)
1599 +- *
1600 +- * For recvmsg() this value contains the stream sequence number that
1601 +- * the remote endpoint placed in the DATA chunk. For fragmented
1602 +- * messages this is the same number for all deliveries of the message
1603 +- * (if more than one recvmsg() is needed to read the message).
1604 +- */
1605 + sinfo.sinfo_ssn = event->ssn;
1606 +- /* sinfo_ppid: 32 bits (unsigned integer)
1607 +- *
1608 +- * In recvmsg() this value is
1609 +- * the same information that was passed by the upper layer in the peer
1610 +- * application. Please note that byte order issues are NOT accounted
1611 +- * for and this information is passed opaquely by the SCTP stack from
1612 +- * one end to the other.
1613 +- */
1614 + sinfo.sinfo_ppid = event->ppid;
1615 +- /* sinfo_flags: 16 bits (unsigned integer)
1616 +- *
1617 +- * This field may contain any of the following flags and is composed of
1618 +- * a bitwise OR of these values.
1619 +- *
1620 +- * recvmsg() flags:
1621 +- *
1622 +- * SCTP_UNORDERED - This flag is present when the message was sent
1623 +- * non-ordered.
1624 +- */
1625 + sinfo.sinfo_flags = event->flags;
1626 +- /* sinfo_tsn: 32 bit (unsigned integer)
1627 +- *
1628 +- * For the receiving side, this field holds a TSN that was
1629 +- * assigned to one of the SCTP Data Chunks.
1630 +- */
1631 + sinfo.sinfo_tsn = event->tsn;
1632 +- /* sinfo_cumtsn: 32 bit (unsigned integer)
1633 +- *
1634 +- * This field will hold the current cumulative TSN as
1635 +- * known by the underlying SCTP layer. Note this field is
1636 +- * ignored when sending and only valid for a receive
1637 +- * operation when sinfo_flags are set to SCTP_UNORDERED.
1638 +- */
1639 + sinfo.sinfo_cumtsn = event->cumtsn;
1640 +- /* sinfo_assoc_id: sizeof (sctp_assoc_t)
1641 +- *
1642 +- * The association handle field, sinfo_assoc_id, holds the identifier
1643 +- * for the association announced in the COMMUNICATION_UP notification.
1644 +- * All notifications for a given association have the same identifier.
1645 +- * Ignored for one-to-one style sockets.
1646 +- */
1647 + sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
1648 +-
1649 +- /* context value that is set via SCTP_CONTEXT socket option. */
1650 ++ /* Context value that is set via SCTP_CONTEXT socket option. */
1651 + sinfo.sinfo_context = event->asoc->default_rcv_context;
1652 +-
1653 + /* These fields are not used while receiving. */
1654 + sinfo.sinfo_timetolive = 0;
1655 +
1656 + put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
1657 +- sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
1658 ++ sizeof(sinfo), &sinfo);
1659 + }
1660 +
1661 + /* Do accounting for bytes received and hold a reference to the association
1662 +diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
1663 +index e5f3da507823..bf2755419ec6 100644
1664 +--- a/net/tipc/bcast.c
1665 ++++ b/net/tipc/bcast.c
1666 +@@ -531,6 +531,7 @@ receive:
1667 +
1668 + buf = node->bclink.deferred_head;
1669 + node->bclink.deferred_head = buf->next;
1670 ++ buf->next = NULL;
1671 + node->bclink.deferred_size--;
1672 + goto receive;
1673 + }