Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.4 commit in: /
Date: Mon, 28 Jul 2014 16:52:43
Message-Id: 1406566230.da70b949627e538e9465762c96f64ef07190e788.mpagano@gentoo
1 commit: da70b949627e538e9465762c96f64ef07190e788
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jul 28 16:50:30 2014 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Jul 28 16:50:30 2014 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=da70b949
7
8 Linux patch 3.4.100
9
10 ---
11 0000_README | 4 +
12 1099_linux-3.4.100.patch | 929 +++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 933 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index d5d9d81..af7f3f3 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -435,6 +435,10 @@ Patch: 1098_linux-3.4.99.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.4.99
22
23 +Patch: 1099_linux-3.4.100.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.4.100
26 +
27 Patch: 1500_XATTR_USER_PREFIX.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
29 Desc: Support for namespace user.pax.* on tmpfs.
30
31 diff --git a/1099_linux-3.4.100.patch b/1099_linux-3.4.100.patch
32 new file mode 100644
33 index 0000000..597e1fd
34 --- /dev/null
35 +++ b/1099_linux-3.4.100.patch
36 @@ -0,0 +1,929 @@
37 +diff --git a/Makefile b/Makefile
38 +index ed97caf40f71..d6c64eb82525 100644
39 +--- a/Makefile
40 ++++ b/Makefile
41 +@@ -1,6 +1,6 @@
42 + VERSION = 3
43 + PATCHLEVEL = 4
44 +-SUBLEVEL = 99
45 ++SUBLEVEL = 100
46 + EXTRAVERSION =
47 + NAME = Saber-toothed Squirrel
48 +
49 +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
50 +index 268b2455e7b0..b1cbcff69cdb 100644
51 +--- a/arch/x86/kernel/cpu/perf_event_intel.c
52 ++++ b/arch/x86/kernel/cpu/perf_event_intel.c
53 +@@ -1070,6 +1070,15 @@ again:
54 + intel_pmu_lbr_read();
55 +
56 + /*
57 ++ * CondChgd bit 63 doesn't mean any overflow status. Ignore
58 ++ * and clear the bit.
59 ++ */
60 ++ if (__test_and_clear_bit(63, (unsigned long *)&status)) {
61 ++ if (!status)
62 ++ goto done;
63 ++ }
64 ++
65 ++ /*
66 + * PEBS overflow sets bit 62 in the global status register
67 + */
68 + if (__test_and_clear_bit(62, (unsigned long *)&status)) {
69 +diff --git a/crypto/testmgr.h b/crypto/testmgr.h
70 +index 36e5a8ee0e1e..1ae2e0ea5492 100644
71 +--- a/crypto/testmgr.h
72 ++++ b/crypto/testmgr.h
73 +@@ -14558,38 +14558,40 @@ static struct pcomp_testvec zlib_decomp_tv_template[] = {
74 + static struct comp_testvec lzo_comp_tv_template[] = {
75 + {
76 + .inlen = 70,
77 +- .outlen = 46,
78 ++ .outlen = 57,
79 + .input = "Join us now and share the software "
80 + "Join us now and share the software ",
81 + .output = "\x00\x0d\x4a\x6f\x69\x6e\x20\x75"
82 +- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
83 +- "\x64\x20\x73\x68\x61\x72\x65\x20"
84 +- "\x74\x68\x65\x20\x73\x6f\x66\x74"
85 +- "\x77\x70\x01\x01\x4a\x6f\x69\x6e"
86 +- "\x3d\x88\x00\x11\x00\x00",
87 ++ "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
88 ++ "\x64\x20\x73\x68\x61\x72\x65\x20"
89 ++ "\x74\x68\x65\x20\x73\x6f\x66\x74"
90 ++ "\x77\x70\x01\x32\x88\x00\x0c\x65"
91 ++ "\x20\x74\x68\x65\x20\x73\x6f\x66"
92 ++ "\x74\x77\x61\x72\x65\x20\x11\x00"
93 ++ "\x00",
94 + }, {
95 + .inlen = 159,
96 +- .outlen = 133,
97 ++ .outlen = 131,
98 + .input = "This document describes a compression method based on the LZO "
99 + "compression algorithm. This document defines the application of "
100 + "the LZO algorithm used in UBIFS.",
101 +- .output = "\x00\x2b\x54\x68\x69\x73\x20\x64"
102 ++ .output = "\x00\x2c\x54\x68\x69\x73\x20\x64"
103 + "\x6f\x63\x75\x6d\x65\x6e\x74\x20"
104 + "\x64\x65\x73\x63\x72\x69\x62\x65"
105 + "\x73\x20\x61\x20\x63\x6f\x6d\x70"
106 + "\x72\x65\x73\x73\x69\x6f\x6e\x20"
107 + "\x6d\x65\x74\x68\x6f\x64\x20\x62"
108 + "\x61\x73\x65\x64\x20\x6f\x6e\x20"
109 +- "\x74\x68\x65\x20\x4c\x5a\x4f\x2b"
110 +- "\x8c\x00\x0d\x61\x6c\x67\x6f\x72"
111 +- "\x69\x74\x68\x6d\x2e\x20\x20\x54"
112 +- "\x68\x69\x73\x2a\x54\x01\x02\x66"
113 +- "\x69\x6e\x65\x73\x94\x06\x05\x61"
114 +- "\x70\x70\x6c\x69\x63\x61\x74\x76"
115 +- "\x0a\x6f\x66\x88\x02\x60\x09\x27"
116 +- "\xf0\x00\x0c\x20\x75\x73\x65\x64"
117 +- "\x20\x69\x6e\x20\x55\x42\x49\x46"
118 +- "\x53\x2e\x11\x00\x00",
119 ++ "\x74\x68\x65\x20\x4c\x5a\x4f\x20"
120 ++ "\x2a\x8c\x00\x09\x61\x6c\x67\x6f"
121 ++ "\x72\x69\x74\x68\x6d\x2e\x20\x20"
122 ++ "\x2e\x54\x01\x03\x66\x69\x6e\x65"
123 ++ "\x73\x20\x74\x06\x05\x61\x70\x70"
124 ++ "\x6c\x69\x63\x61\x74\x76\x0a\x6f"
125 ++ "\x66\x88\x02\x60\x09\x27\xf0\x00"
126 ++ "\x0c\x20\x75\x73\x65\x64\x20\x69"
127 ++ "\x6e\x20\x55\x42\x49\x46\x53\x2e"
128 ++ "\x11\x00\x00",
129 + },
130 + };
131 +
132 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
133 +index 60404f4b2446..adc9bfd4d82f 100644
134 +--- a/drivers/gpu/drm/radeon/radeon_display.c
135 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
136 +@@ -709,6 +709,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
137 + struct radeon_device *rdev = dev->dev_private;
138 + int ret = 0;
139 +
140 ++ /* don't leak the edid if we already fetched it in detect() */
141 ++ if (radeon_connector->edid)
142 ++ goto got_edid;
143 ++
144 + /* on hw with routers, select right port */
145 + if (radeon_connector->router.ddc_valid)
146 + radeon_router_select_ddc_port(radeon_connector);
147 +@@ -748,6 +752,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
148 + radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
149 + }
150 + if (radeon_connector->edid) {
151 ++got_edid:
152 + drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
153 + ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
154 + drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
155 +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
156 +index 97b2e21ac46a..cf065df9bb18 100644
157 +--- a/drivers/iommu/dmar.c
158 ++++ b/drivers/iommu/dmar.c
159 +@@ -582,7 +582,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
160 + {
161 + struct intel_iommu *iommu;
162 + int map_size;
163 +- u32 ver;
164 ++ u32 ver, sts;
165 + static int iommu_allocated = 0;
166 + int agaw = 0;
167 + int msagaw = 0;
168 +@@ -652,6 +652,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
169 + (unsigned long long)iommu->cap,
170 + (unsigned long long)iommu->ecap);
171 +
172 ++ /* Reflect status in gcmd */
173 ++ sts = readl(iommu->reg + DMAR_GSTS_REG);
174 ++ if (sts & DMA_GSTS_IRES)
175 ++ iommu->gcmd |= DMA_GCMD_IRE;
176 ++ if (sts & DMA_GSTS_TES)
177 ++ iommu->gcmd |= DMA_GCMD_TE;
178 ++ if (sts & DMA_GSTS_QIES)
179 ++ iommu->gcmd |= DMA_GCMD_QIE;
180 ++
181 + raw_spin_lock_init(&iommu->register_lock);
182 +
183 + drhd->iommu = iommu;
184 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
185 +index 4e1c6bfc9c8d..dd255c578ad9 100644
186 +--- a/drivers/iommu/intel-iommu.c
187 ++++ b/drivers/iommu/intel-iommu.c
188 +@@ -3659,6 +3659,7 @@ static struct notifier_block device_nb = {
189 + int __init intel_iommu_init(void)
190 + {
191 + int ret = 0;
192 ++ struct dmar_drhd_unit *drhd;
193 +
194 + /* VT-d is required for a TXT/tboot launch, so enforce that */
195 + force_on = tboot_force_iommu();
196 +@@ -3669,6 +3670,20 @@ int __init intel_iommu_init(void)
197 + return -ENODEV;
198 + }
199 +
200 ++ /*
201 ++ * Disable translation if already enabled prior to OS handover.
202 ++ */
203 ++ for_each_drhd_unit(drhd) {
204 ++ struct intel_iommu *iommu;
205 ++
206 ++ if (drhd->ignored)
207 ++ continue;
208 ++
209 ++ iommu = drhd->iommu;
210 ++ if (iommu->gcmd & DMA_GCMD_TE)
211 ++ iommu_disable_translation(iommu);
212 ++ }
213 ++
214 + if (dmar_dev_scope_init() < 0) {
215 + if (force_on)
216 + panic("tboot: Failed to initialize DMAR device scope\n");
217 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
218 +index ef1f9400b967..b2740f12b180 100644
219 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
220 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
221 +@@ -2411,7 +2411,7 @@ static int be_open(struct net_device *netdev)
222 +
223 + for_all_evt_queues(adapter, eqo, i) {
224 + napi_enable(&eqo->napi);
225 +- be_eq_notify(adapter, eqo->q.id, true, false, 0);
226 ++ be_eq_notify(adapter, eqo->q.id, true, true, 0);
227 + }
228 +
229 + status = be_cmd_link_status_query(adapter, NULL, NULL,
230 +diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
231 +index 8e2ac643a777..ed6ec513defa 100644
232 +--- a/drivers/net/ethernet/sun/sunvnet.c
233 ++++ b/drivers/net/ethernet/sun/sunvnet.c
234 +@@ -1086,6 +1086,24 @@ static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
235 + return vp;
236 + }
237 +
238 ++static void vnet_cleanup(void)
239 ++{
240 ++ struct vnet *vp;
241 ++ struct net_device *dev;
242 ++
243 ++ mutex_lock(&vnet_list_mutex);
244 ++ while (!list_empty(&vnet_list)) {
245 ++ vp = list_first_entry(&vnet_list, struct vnet, list);
246 ++ list_del(&vp->list);
247 ++ dev = vp->dev;
248 ++ /* vio_unregister_driver() should have cleaned up port_list */
249 ++ BUG_ON(!list_empty(&vp->port_list));
250 ++ unregister_netdev(dev);
251 ++ free_netdev(dev);
252 ++ }
253 ++ mutex_unlock(&vnet_list_mutex);
254 ++}
255 ++
256 + static const char *local_mac_prop = "local-mac-address";
257 +
258 + static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
259 +@@ -1244,7 +1262,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
260 +
261 + kfree(port);
262 +
263 +- unregister_netdev(vp->dev);
264 + }
265 + return 0;
266 + }
267 +@@ -1272,6 +1289,7 @@ static int __init vnet_init(void)
268 + static void __exit vnet_exit(void)
269 + {
270 + vio_unregister_driver(&vnet_port_driver);
271 ++ vnet_cleanup();
272 + }
273 +
274 + module_init(vnet_init);
275 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
276 +index bac88c22d990..fbe75a784edb 100644
277 +--- a/drivers/net/ppp/pppoe.c
278 ++++ b/drivers/net/ppp/pppoe.c
279 +@@ -681,7 +681,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
280 + po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
281 + dev->hard_header_len);
282 +
283 +- po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
284 ++ po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
285 + po->chan.private = sk;
286 + po->chan.ops = &pppoe_chan_ops;
287 +
288 +diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
289 +index 9d1b3ca6334b..a884c322f3ea 100644
290 +--- a/drivers/net/wireless/mwifiex/main.c
291 ++++ b/drivers/net/wireless/mwifiex/main.c
292 +@@ -457,6 +457,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
293 + }
294 +
295 + tx_info = MWIFIEX_SKB_TXCB(skb);
296 ++ memset(tx_info, 0, sizeof(*tx_info));
297 + tx_info->bss_num = priv->bss_num;
298 + tx_info->bss_type = priv->bss_type;
299 + mwifiex_fill_buffer(skb);
300 +diff --git a/kernel/power/process.c b/kernel/power/process.c
301 +index 19db29f67558..f27d0c8cd9e8 100644
302 +--- a/kernel/power/process.c
303 ++++ b/kernel/power/process.c
304 +@@ -185,6 +185,7 @@ void thaw_processes(void)
305 +
306 + printk("Restarting tasks ... ");
307 +
308 ++ __usermodehelper_set_disable_depth(UMH_FREEZING);
309 + thaw_workqueues();
310 +
311 + read_lock(&tasklist_lock);
312 +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
313 +index 877aa733b961..b7045793bd56 100644
314 +--- a/kernel/time/alarmtimer.c
315 ++++ b/kernel/time/alarmtimer.c
316 +@@ -569,9 +569,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
317 + struct itimerspec *new_setting,
318 + struct itimerspec *old_setting)
319 + {
320 ++ ktime_t exp;
321 ++
322 + if (!rtcdev)
323 + return -ENOTSUPP;
324 +
325 ++ if (flags & ~TIMER_ABSTIME)
326 ++ return -EINVAL;
327 ++
328 + if (old_setting)
329 + alarm_timer_get(timr, old_setting);
330 +
331 +@@ -581,8 +586,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
332 +
333 + /* start the timer */
334 + timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
335 +- alarm_start(&timr->it.alarm.alarmtimer,
336 +- timespec_to_ktime(new_setting->it_value));
337 ++ exp = timespec_to_ktime(new_setting->it_value);
338 ++ /* Convert (if necessary) to absolute time */
339 ++ if (flags != TIMER_ABSTIME) {
340 ++ ktime_t now;
341 ++
342 ++ now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
343 ++ exp = ktime_add(now, exp);
344 ++ }
345 ++
346 ++ alarm_start(&timr->it.alarm.alarmtimer, exp);
347 + return 0;
348 + }
349 +
350 +@@ -714,6 +727,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
351 + if (!alarmtimer_get_rtcdev())
352 + return -ENOTSUPP;
353 +
354 ++ if (flags & ~TIMER_ABSTIME)
355 ++ return -EINVAL;
356 ++
357 + if (!capable(CAP_WAKE_ALARM))
358 + return -EPERM;
359 +
360 +diff --git a/mm/shmem.c b/mm/shmem.c
361 +index 58c4a477be67..4bb5a80dd13b 100644
362 +--- a/mm/shmem.c
363 ++++ b/mm/shmem.c
364 +@@ -76,6 +76,17 @@ static struct vfsmount *shm_mnt;
365 + /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
366 + #define SHORT_SYMLINK_LEN 128
367 +
368 ++/*
369 ++ * vmtruncate_range() communicates with shmem_fault via
370 ++ * inode->i_private (with i_mutex making sure that it has only one user at
371 ++ * a time): we would prefer not to enlarge the shmem inode just for that.
372 ++ */
373 ++struct shmem_falloc {
374 ++ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
375 ++ pgoff_t start; /* start of range currently being fallocated */
376 ++ pgoff_t next; /* the next page offset to be fallocated */
377 ++};
378 ++
379 + struct shmem_xattr {
380 + struct list_head list; /* anchored by shmem_inode_info->xattr_list */
381 + char *name; /* xattr name */
382 +@@ -488,22 +499,19 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
383 + }
384 +
385 + index = start;
386 +- for ( ; ; ) {
387 ++ while (index <= end) {
388 + cond_resched();
389 + pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
390 + min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
391 + pvec.pages, indices);
392 + if (!pvec.nr) {
393 +- if (index == start)
394 ++ /* If all gone or hole-punch, we're done */
395 ++ if (index == start || end != -1)
396 + break;
397 ++ /* But if truncating, restart to make sure all gone */
398 + index = start;
399 + continue;
400 + }
401 +- if (index == start && indices[0] > end) {
402 +- shmem_deswap_pagevec(&pvec);
403 +- pagevec_release(&pvec);
404 +- break;
405 +- }
406 + mem_cgroup_uncharge_start();
407 + for (i = 0; i < pagevec_count(&pvec); i++) {
408 + struct page *page = pvec.pages[i];
409 +@@ -513,8 +521,12 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
410 + break;
411 +
412 + if (radix_tree_exceptional_entry(page)) {
413 +- nr_swaps_freed += !shmem_free_swap(mapping,
414 +- index, page);
415 ++ if (shmem_free_swap(mapping, index, page)) {
416 ++ /* Swap was replaced by page: retry */
417 ++ index--;
418 ++ break;
419 ++ }
420 ++ nr_swaps_freed++;
421 + continue;
422 + }
423 +
424 +@@ -522,6 +534,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
425 + if (page->mapping == mapping) {
426 + VM_BUG_ON(PageWriteback(page));
427 + truncate_inode_page(mapping, page);
428 ++ } else {
429 ++ /* Page was replaced by swap: retry */
430 ++ unlock_page(page);
431 ++ index--;
432 ++ break;
433 + }
434 + unlock_page(page);
435 + }
436 +@@ -1060,6 +1077,63 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
437 + int error;
438 + int ret = VM_FAULT_LOCKED;
439 +
440 ++ /*
441 ++ * Trinity finds that probing a hole which tmpfs is punching can
442 ++ * prevent the hole-punch from ever completing: which in turn
443 ++ * locks writers out with its hold on i_mutex. So refrain from
444 ++ * faulting pages into the hole while it's being punched. Although
445 ++ * shmem_truncate_range() does remove the additions, it may be unable to
446 ++ * keep up, as each new page needs its own unmap_mapping_range() call,
447 ++ * and the i_mmap tree grows ever slower to scan if new vmas are added.
448 ++ *
449 ++ * It does not matter if we sometimes reach this check just before the
450 ++ * hole-punch begins, so that one fault then races with the punch:
451 ++ * we just need to make racing faults a rare case.
452 ++ *
453 ++ * The implementation below would be much simpler if we just used a
454 ++ * standard mutex or completion: but we cannot take i_mutex in fault,
455 ++ * and bloating every shmem inode for this unlikely case would be sad.
456 ++ */
457 ++ if (unlikely(inode->i_private)) {
458 ++ struct shmem_falloc *shmem_falloc;
459 ++
460 ++ spin_lock(&inode->i_lock);
461 ++ shmem_falloc = inode->i_private;
462 ++ if (shmem_falloc &&
463 ++ vmf->pgoff >= shmem_falloc->start &&
464 ++ vmf->pgoff < shmem_falloc->next) {
465 ++ wait_queue_head_t *shmem_falloc_waitq;
466 ++ DEFINE_WAIT(shmem_fault_wait);
467 ++
468 ++ ret = VM_FAULT_NOPAGE;
469 ++ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
470 ++ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
471 ++ /* It's polite to up mmap_sem if we can */
472 ++ up_read(&vma->vm_mm->mmap_sem);
473 ++ ret = VM_FAULT_RETRY;
474 ++ }
475 ++
476 ++ shmem_falloc_waitq = shmem_falloc->waitq;
477 ++ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
478 ++ TASK_UNINTERRUPTIBLE);
479 ++ spin_unlock(&inode->i_lock);
480 ++ schedule();
481 ++
482 ++ /*
483 ++ * shmem_falloc_waitq points into the vmtruncate_range()
484 ++ * stack of the hole-punching task: shmem_falloc_waitq
485 ++ * is usually invalid by the time we reach here, but
486 ++ * finish_wait() does not dereference it in that case;
487 ++ * though i_lock needed lest racing with wake_up_all().
488 ++ */
489 ++ spin_lock(&inode->i_lock);
490 ++ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
491 ++ spin_unlock(&inode->i_lock);
492 ++ return ret;
493 ++ }
494 ++ spin_unlock(&inode->i_lock);
495 ++ }
496 ++
497 + error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
498 + if (error)
499 + return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
500 +@@ -1071,6 +1145,47 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
501 + return ret;
502 + }
503 +
504 ++int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
505 ++{
506 ++ /*
507 ++ * If the underlying filesystem is not going to provide
508 ++ * a way to truncate a range of blocks (punch a hole) -
509 ++ * we should return failure right now.
510 ++ * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range().
511 ++ */
512 ++ if (inode->i_op->truncate_range != shmem_truncate_range)
513 ++ return -ENOSYS;
514 ++
515 ++ mutex_lock(&inode->i_mutex);
516 ++ {
517 ++ struct shmem_falloc shmem_falloc;
518 ++ struct address_space *mapping = inode->i_mapping;
519 ++ loff_t unmap_start = round_up(lstart, PAGE_SIZE);
520 ++ loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
521 ++ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
522 ++
523 ++ shmem_falloc.waitq = &shmem_falloc_waitq;
524 ++ shmem_falloc.start = unmap_start >> PAGE_SHIFT;
525 ++ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
526 ++ spin_lock(&inode->i_lock);
527 ++ inode->i_private = &shmem_falloc;
528 ++ spin_unlock(&inode->i_lock);
529 ++
530 ++ if ((u64)unmap_end > (u64)unmap_start)
531 ++ unmap_mapping_range(mapping, unmap_start,
532 ++ 1 + unmap_end - unmap_start, 0);
533 ++ shmem_truncate_range(inode, lstart, lend);
534 ++ /* No need to unmap again: hole-punching leaves COWed pages */
535 ++
536 ++ spin_lock(&inode->i_lock);
537 ++ inode->i_private = NULL;
538 ++ wake_up_all(&shmem_falloc_waitq);
539 ++ spin_unlock(&inode->i_lock);
540 ++ }
541 ++ mutex_unlock(&inode->i_mutex);
542 ++ return 0;
543 ++}
544 ++
545 + #ifdef CONFIG_NUMA
546 + static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
547 + {
548 +@@ -2547,6 +2662,12 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
549 + }
550 + EXPORT_SYMBOL_GPL(shmem_truncate_range);
551 +
552 ++int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
553 ++{
554 ++ /* Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). */
555 ++ return -ENOSYS;
556 ++}
557 ++
558 + #define shmem_vm_ops generic_file_vm_ops
559 + #define shmem_file_operations ramfs_file_operations
560 + #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
561 +diff --git a/mm/truncate.c b/mm/truncate.c
562 +index 4224627695ba..f38055cb8af6 100644
563 +--- a/mm/truncate.c
564 ++++ b/mm/truncate.c
565 +@@ -603,31 +603,6 @@ int vmtruncate(struct inode *inode, loff_t newsize)
566 + }
567 + EXPORT_SYMBOL(vmtruncate);
568 +
569 +-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
570 +-{
571 +- struct address_space *mapping = inode->i_mapping;
572 +- loff_t holebegin = round_up(lstart, PAGE_SIZE);
573 +- loff_t holelen = 1 + lend - holebegin;
574 +-
575 +- /*
576 +- * If the underlying filesystem is not going to provide
577 +- * a way to truncate a range of blocks (punch a hole) -
578 +- * we should return failure right now.
579 +- */
580 +- if (!inode->i_op->truncate_range)
581 +- return -ENOSYS;
582 +-
583 +- mutex_lock(&inode->i_mutex);
584 +- inode_dio_wait(inode);
585 +- unmap_mapping_range(mapping, holebegin, holelen, 1);
586 +- inode->i_op->truncate_range(inode, lstart, lend);
587 +- /* unmap again to remove racily COWed private pages */
588 +- unmap_mapping_range(mapping, holebegin, holelen, 1);
589 +- mutex_unlock(&inode->i_mutex);
590 +-
591 +- return 0;
592 +-}
593 +-
594 + /**
595 + * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
596 + * @inode: inode
597 +diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
598 +index 912613c566cb..37c486c019fe 100644
599 +--- a/net/8021q/vlan_core.c
600 ++++ b/net/8021q/vlan_core.c
601 +@@ -96,8 +96,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_id);
602 +
603 + static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
604 + {
605 +- if (skb_cow(skb, skb_headroom(skb)) < 0)
606 ++ if (skb_cow(skb, skb_headroom(skb)) < 0) {
607 ++ kfree_skb(skb);
608 + return NULL;
609 ++ }
610 ++
611 + memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
612 + skb->mac_header += VLAN_HLEN;
613 + return skb;
614 +diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
615 +index 334d4cd7612f..79aaac288afb 100644
616 +--- a/net/appletalk/ddp.c
617 ++++ b/net/appletalk/ddp.c
618 +@@ -1494,8 +1494,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
619 + goto drop;
620 +
621 + /* Queue packet (standard) */
622 +- skb->sk = sock;
623 +-
624 + if (sock_queue_rcv_skb(sock, skb) < 0)
625 + goto drop;
626 +
627 +@@ -1649,7 +1647,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
628 + if (!skb)
629 + goto out;
630 +
631 +- skb->sk = sk;
632 + skb_reserve(skb, ddp_dl->header_length);
633 + skb_reserve(skb, dev->hard_header_len);
634 + skb->dev = dev;
635 +diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
636 +index c32be292c7e3..2022b46ab38f 100644
637 +--- a/net/dns_resolver/dns_query.c
638 ++++ b/net/dns_resolver/dns_query.c
639 +@@ -150,7 +150,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
640 + if (!*_result)
641 + goto put;
642 +
643 +- memcpy(*_result, upayload->data, len + 1);
644 ++ memcpy(*_result, upayload->data, len);
645 ++ (*_result)[len] = '\0';
646 ++
647 + if (_expiry)
648 + *_expiry = rkey->expiry;
649 +
650 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
651 +index c8e26992742f..3f0bb3b3819d 100644
652 +--- a/net/ipv4/igmp.c
653 ++++ b/net/ipv4/igmp.c
654 +@@ -1866,6 +1866,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
655 +
656 + rtnl_lock();
657 + in_dev = ip_mc_find_dev(net, imr);
658 ++ if (!in_dev) {
659 ++ ret = -ENODEV;
660 ++ goto out;
661 ++ }
662 + ifindex = imr->imr_ifindex;
663 + for (imlp = &inet->mc_list;
664 + (iml = rtnl_dereference(*imlp)) != NULL;
665 +@@ -1883,16 +1887,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
666 +
667 + *imlp = iml->next_rcu;
668 +
669 +- if (in_dev)
670 +- ip_mc_dec_group(in_dev, group);
671 ++ ip_mc_dec_group(in_dev, group);
672 + rtnl_unlock();
673 + /* decrease mem now to avoid the memleak warning */
674 + atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
675 + kfree_rcu(iml, rcu);
676 + return 0;
677 + }
678 +- if (!in_dev)
679 +- ret = -ENODEV;
680 ++out:
681 + rtnl_unlock();
682 + return ret;
683 + }
684 +diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
685 +index b69a3700642b..523541730777 100644
686 +--- a/net/ipv4/ip_options.c
687 ++++ b/net/ipv4/ip_options.c
688 +@@ -279,6 +279,10 @@ int ip_options_compile(struct net *net,
689 + optptr++;
690 + continue;
691 + }
692 ++ if (unlikely(l < 2)) {
693 ++ pp_ptr = optptr;
694 ++ goto error;
695 ++ }
696 + optlen = optptr[1];
697 + if (optlen<2 || optlen>l) {
698 + pp_ptr = optptr;
699 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
700 +index 99eb909c9d5f..2d3290496a0a 100644
701 +--- a/net/ipv4/tcp_input.c
702 ++++ b/net/ipv4/tcp_input.c
703 +@@ -1250,7 +1250,7 @@ static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
704 + }
705 +
706 + /* D-SACK for already forgotten data... Do dumb counting. */
707 +- if (dup_sack && tp->undo_marker && tp->undo_retrans &&
708 ++ if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
709 + !after(end_seq_0, prior_snd_una) &&
710 + after(end_seq_0, tp->undo_marker))
711 + tp->undo_retrans--;
712 +@@ -1304,7 +1304,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
713 + unsigned int new_len = (pkt_len / mss) * mss;
714 + if (!in_sack && new_len < pkt_len) {
715 + new_len += mss;
716 +- if (new_len > skb->len)
717 ++ if (new_len >= skb->len)
718 + return 0;
719 + }
720 + pkt_len = new_len;
721 +@@ -1328,7 +1328,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
722 +
723 + /* Account D-SACK for retransmitted packet. */
724 + if (dup_sack && (sacked & TCPCB_RETRANS)) {
725 +- if (tp->undo_marker && tp->undo_retrans &&
726 ++ if (tp->undo_marker && tp->undo_retrans > 0 &&
727 + after(end_seq, tp->undo_marker))
728 + tp->undo_retrans--;
729 + if (sacked & TCPCB_SACKED_ACKED)
730 +@@ -2226,7 +2226,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
731 + tp->lost_out = 0;
732 +
733 + tp->undo_marker = 0;
734 +- tp->undo_retrans = 0;
735 ++ tp->undo_retrans = -1;
736 + }
737 +
738 + void tcp_clear_retrans(struct tcp_sock *tp)
739 +@@ -3165,7 +3165,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
740 + tp->high_seq = tp->snd_nxt;
741 + tp->prior_ssthresh = 0;
742 + tp->undo_marker = tp->snd_una;
743 +- tp->undo_retrans = tp->retrans_out;
744 ++ tp->undo_retrans = tp->retrans_out ? : -1;
745 +
746 + if (icsk->icsk_ca_state < TCP_CA_CWR) {
747 + if (!(flag & FLAG_ECE))
748 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
749 +index 987f5cc706b4..fd414b61f966 100644
750 +--- a/net/ipv4/tcp_output.c
751 ++++ b/net/ipv4/tcp_output.c
752 +@@ -2194,13 +2194,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
753 + if (!tp->retrans_stamp)
754 + tp->retrans_stamp = TCP_SKB_CB(skb)->when;
755 +
756 +- tp->undo_retrans += tcp_skb_pcount(skb);
757 +-
758 + /* snd_nxt is stored to detect loss of retransmitted segment,
759 + * see tcp_input.c tcp_sacktag_write_queue().
760 + */
761 + TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
762 + }
763 ++
764 ++ if (tp->undo_retrans < 0)
765 ++ tp->undo_retrans = 0;
766 ++ tp->undo_retrans += tcp_skb_pcount(skb);
767 + return err;
768 + }
769 +
770 +diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
771 +index 8a84017834c2..57da44707eb1 100644
772 +--- a/net/sctp/ulpevent.c
773 ++++ b/net/sctp/ulpevent.c
774 +@@ -373,9 +373,10 @@ fail:
775 + * specification [SCTP] and any extensions for a list of possible
776 + * error formats.
777 + */
778 +-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
779 +- const struct sctp_association *asoc, struct sctp_chunk *chunk,
780 +- __u16 flags, gfp_t gfp)
781 ++struct sctp_ulpevent *
782 ++sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
783 ++ struct sctp_chunk *chunk, __u16 flags,
784 ++ gfp_t gfp)
785 + {
786 + struct sctp_ulpevent *event;
787 + struct sctp_remote_error *sre;
788 +@@ -394,8 +395,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
789 + /* Copy the skb to a new skb with room for us to prepend
790 + * notification with.
791 + */
792 +- skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
793 +- 0, gfp);
794 ++ skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
795 +
796 + /* Pull off the rest of the cause TLV from the chunk. */
797 + skb_pull(chunk->skb, elen);
798 +@@ -406,62 +406,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
799 + event = sctp_skb2event(skb);
800 + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
801 +
802 +- sre = (struct sctp_remote_error *)
803 +- skb_push(skb, sizeof(struct sctp_remote_error));
804 ++ sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
805 +
806 + /* Trim the buffer to the right length. */
807 +- skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
808 ++ skb_trim(skb, sizeof(*sre) + elen);
809 +
810 +- /* Socket Extensions for SCTP
811 +- * 5.3.1.3 SCTP_REMOTE_ERROR
812 +- *
813 +- * sre_type:
814 +- * It should be SCTP_REMOTE_ERROR.
815 +- */
816 ++ /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
817 ++ memset(sre, 0, sizeof(*sre));
818 + sre->sre_type = SCTP_REMOTE_ERROR;
819 +-
820 +- /*
821 +- * Socket Extensions for SCTP
822 +- * 5.3.1.3 SCTP_REMOTE_ERROR
823 +- *
824 +- * sre_flags: 16 bits (unsigned integer)
825 +- * Currently unused.
826 +- */
827 + sre->sre_flags = 0;
828 +-
829 +- /* Socket Extensions for SCTP
830 +- * 5.3.1.3 SCTP_REMOTE_ERROR
831 +- *
832 +- * sre_length: sizeof (__u32)
833 +- *
834 +- * This field is the total length of the notification data,
835 +- * including the notification header.
836 +- */
837 + sre->sre_length = skb->len;
838 +-
839 +- /* Socket Extensions for SCTP
840 +- * 5.3.1.3 SCTP_REMOTE_ERROR
841 +- *
842 +- * sre_error: 16 bits (unsigned integer)
843 +- * This value represents one of the Operational Error causes defined in
844 +- * the SCTP specification, in network byte order.
845 +- */
846 + sre->sre_error = cause;
847 +-
848 +- /* Socket Extensions for SCTP
849 +- * 5.3.1.3 SCTP_REMOTE_ERROR
850 +- *
851 +- * sre_assoc_id: sizeof (sctp_assoc_t)
852 +- *
853 +- * The association id field, holds the identifier for the association.
854 +- * All notifications for a given association have the same association
855 +- * identifier. For TCP style socket, this field is ignored.
856 +- */
857 + sctp_ulpevent_set_owner(event, asoc);
858 + sre->sre_assoc_id = sctp_assoc2id(asoc);
859 +
860 + return event;
861 +-
862 + fail:
863 + return NULL;
864 + }
865 +@@ -904,7 +863,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
866 + return notification->sn_header.sn_type;
867 + }
868 +
869 +-/* Copy out the sndrcvinfo into a msghdr. */
870 ++/* RFC6458, Section 5.3.2. SCTP Header Information Structure
871 ++ * (SCTP_SNDRCV, DEPRECATED)
872 ++ */
873 + void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
874 + struct msghdr *msghdr)
875 + {
876 +@@ -913,74 +874,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
877 + if (sctp_ulpevent_is_notification(event))
878 + return;
879 +
880 +- /* Sockets API Extensions for SCTP
881 +- * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
882 +- *
883 +- * sinfo_stream: 16 bits (unsigned integer)
884 +- *
885 +- * For recvmsg() the SCTP stack places the message's stream number in
886 +- * this value.
887 +- */
888 ++ memset(&sinfo, 0, sizeof(sinfo));
889 + sinfo.sinfo_stream = event->stream;
890 +- /* sinfo_ssn: 16 bits (unsigned integer)
891 +- *
892 +- * For recvmsg() this value contains the stream sequence number that
893 +- * the remote endpoint placed in the DATA chunk. For fragmented
894 +- * messages this is the same number for all deliveries of the message
895 +- * (if more than one recvmsg() is needed to read the message).
896 +- */
897 + sinfo.sinfo_ssn = event->ssn;
898 +- /* sinfo_ppid: 32 bits (unsigned integer)
899 +- *
900 +- * In recvmsg() this value is
901 +- * the same information that was passed by the upper layer in the peer
902 +- * application. Please note that byte order issues are NOT accounted
903 +- * for and this information is passed opaquely by the SCTP stack from
904 +- * one end to the other.
905 +- */
906 + sinfo.sinfo_ppid = event->ppid;
907 +- /* sinfo_flags: 16 bits (unsigned integer)
908 +- *
909 +- * This field may contain any of the following flags and is composed of
910 +- * a bitwise OR of these values.
911 +- *
912 +- * recvmsg() flags:
913 +- *
914 +- * SCTP_UNORDERED - This flag is present when the message was sent
915 +- * non-ordered.
916 +- */
917 + sinfo.sinfo_flags = event->flags;
918 +- /* sinfo_tsn: 32 bit (unsigned integer)
919 +- *
920 +- * For the receiving side, this field holds a TSN that was
921 +- * assigned to one of the SCTP Data Chunks.
922 +- */
923 + sinfo.sinfo_tsn = event->tsn;
924 +- /* sinfo_cumtsn: 32 bit (unsigned integer)
925 +- *
926 +- * This field will hold the current cumulative TSN as
927 +- * known by the underlying SCTP layer. Note this field is
928 +- * ignored when sending and only valid for a receive
929 +- * operation when sinfo_flags are set to SCTP_UNORDERED.
930 +- */
931 + sinfo.sinfo_cumtsn = event->cumtsn;
932 +- /* sinfo_assoc_id: sizeof (sctp_assoc_t)
933 +- *
934 +- * The association handle field, sinfo_assoc_id, holds the identifier
935 +- * for the association announced in the COMMUNICATION_UP notification.
936 +- * All notifications for a given association have the same identifier.
937 +- * Ignored for one-to-one style sockets.
938 +- */
939 + sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
940 +-
941 +- /* context value that is set via SCTP_CONTEXT socket option. */
942 ++ /* Context value that is set via SCTP_CONTEXT socket option. */
943 + sinfo.sinfo_context = event->asoc->default_rcv_context;
944 +-
945 + /* These fields are not used while receiving. */
946 + sinfo.sinfo_timetolive = 0;
947 +
948 + put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
949 +- sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
950 ++ sizeof(sinfo), &sinfo);
951 + }
952 +
953 + /* Do accounting for bytes received and hold a reference to the association
954 +diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
955 +index e00441a2092f..9495be3a61e0 100644
956 +--- a/net/tipc/bcast.c
957 ++++ b/net/tipc/bcast.c
958 +@@ -541,6 +541,7 @@ receive:
959 +
960 + buf = node->bclink.deferred_head;
961 + node->bclink.deferred_head = buf->next;
962 ++ buf->next = NULL;
963 + node->bclink.deferred_size--;
964 + goto receive;
965 + }