Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Thu, 27 Apr 2017 08:18:37
Message-Id: 1493280889.236cf85200aa1e029e12bd493278b317530b96e3.alicef@gentoo
1 commit: 236cf85200aa1e029e12bd493278b317530b96e3
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Thu Apr 27 08:14:49 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Thu Apr 27 08:14:49 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=236cf852
7
8 Linux patch 4.4.64
9
10 0000_README | 4 +
11 1063_linux-4.4.64.patch | 1016 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1020 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index f5bbb30..9ca141b 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -295,6 +295,10 @@ Patch: 1062_linux-4.4.63.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.63
21
22 +Patch: 1063_linux-4.4.64.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.64
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1063_linux-4.4.64.patch b/1063_linux-4.4.64.patch
31 new file mode 100644
32 index 0000000..9d503a3
33 --- /dev/null
34 +++ b/1063_linux-4.4.64.patch
35 @@ -0,0 +1,1016 @@
36 +diff --git a/Makefile b/Makefile
37 +index ec52973043f6..17708f5dc169 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 63
44 ++SUBLEVEL = 64
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
49 +index f91ee2f27b41..01cf10556081 100644
50 +--- a/arch/arm/kvm/mmu.c
51 ++++ b/arch/arm/kvm/mmu.c
52 +@@ -300,6 +300,14 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
53 + next = kvm_pgd_addr_end(addr, end);
54 + if (!pgd_none(*pgd))
55 + unmap_puds(kvm, pgd, addr, next);
56 ++ /*
57 ++ * If we are dealing with a large range in
58 ++ * stage2 table, release the kvm->mmu_lock
59 ++ * to prevent starvation and lockup detector
60 ++ * warnings.
61 ++ */
62 ++ if (kvm && (next != end))
63 ++ cond_resched_lock(&kvm->mmu_lock);
64 + } while (pgd++, addr = next, addr != end);
65 + }
66 +
67 +@@ -738,6 +746,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
68 + */
69 + static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
70 + {
71 ++ assert_spin_locked(&kvm->mmu_lock);
72 + unmap_range(kvm, kvm->arch.pgd, start, size);
73 + }
74 +
75 +@@ -824,7 +833,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
76 + if (kvm->arch.pgd == NULL)
77 + return;
78 +
79 ++ spin_lock(&kvm->mmu_lock);
80 + unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
81 ++ spin_unlock(&kvm->mmu_lock);
82 ++
83 + kvm_free_hwpgd(kvm_get_hwpgd(kvm));
84 + if (KVM_PREALLOC_LEVEL > 0)
85 + kfree(kvm->arch.pgd);
86 +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
87 +index edba294620db..f6fd0332c3a2 100644
88 +--- a/arch/powerpc/kernel/entry_64.S
89 ++++ b/arch/powerpc/kernel/entry_64.S
90 +@@ -716,7 +716,7 @@ resume_kernel:
91 +
92 + addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
93 +
94 +- lwz r3,GPR1(r1)
95 ++ ld r3,GPR1(r1)
96 + subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
97 + mr r4,r1 /* src: current exception frame */
98 + mr r1,r3 /* Reroute the trampoline frame to r1 */
99 +@@ -730,8 +730,8 @@ resume_kernel:
100 + addi r6,r6,8
101 + bdnz 2b
102 +
103 +- /* Do real store operation to complete stwu */
104 +- lwz r5,GPR1(r1)
105 ++ /* Do real store operation to complete stdu */
106 ++ ld r5,GPR1(r1)
107 + std r8,0(r5)
108 +
109 + /* Clear _TIF_EMULATE_STACK_STORE flag */
110 +diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
111 +index 024f85f947ae..e2c0e4eab037 100644
112 +--- a/arch/s390/include/asm/pgtable.h
113 ++++ b/arch/s390/include/asm/pgtable.h
114 +@@ -829,6 +829,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
115 + {
116 + pgste_t pgste;
117 +
118 ++ if (pte_present(entry))
119 ++ pte_val(entry) &= ~_PAGE_UNUSED;
120 + if (mm_has_pgste(mm)) {
121 + pgste = pgste_get_lock(ptep);
122 + pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
123 +diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
124 +index d8ce3ec816ab..bd8ce6bcdfc9 100644
125 +--- a/arch/x86/include/asm/pmem.h
126 ++++ b/arch/x86/include/asm/pmem.h
127 +@@ -72,8 +72,8 @@ static inline void arch_wmb_pmem(void)
128 + * @size: number of bytes to write back
129 + *
130 + * Write back a cache range using the CLWB (cache line write back)
131 +- * instruction. This function requires explicit ordering with an
132 +- * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation.
133 ++ * instruction. Note that @size is internally rounded up to be cache
134 ++ * line size aligned.
135 + */
136 + static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
137 + {
138 +@@ -87,15 +87,6 @@ static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
139 + clwb(p);
140 + }
141 +
142 +-/*
143 +- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
144 +- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
145 +- */
146 +-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
147 +-{
148 +- return iter_is_iovec(i) == false;
149 +-}
150 +-
151 + /**
152 + * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
153 + * @addr: PMEM destination address
154 +@@ -114,8 +105,36 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
155 + /* TODO: skip the write-back by always using non-temporal stores */
156 + len = copy_from_iter_nocache(vaddr, bytes, i);
157 +
158 +- if (__iter_needs_pmem_wb(i))
159 +- __arch_wb_cache_pmem(vaddr, bytes);
160 ++ /*
161 ++ * In the iovec case on x86_64 copy_from_iter_nocache() uses
162 ++ * non-temporal stores for the bulk of the transfer, but we need
163 ++ * to manually flush if the transfer is unaligned. A cached
164 ++ * memory copy is used when destination or size is not naturally
165 ++ * aligned. That is:
166 ++ * - Require 8-byte alignment when size is 8 bytes or larger.
167 ++ * - Require 4-byte alignment when size is 4 bytes.
168 ++ *
169 ++ * In the non-iovec case the entire destination needs to be
170 ++ * flushed.
171 ++ */
172 ++ if (iter_is_iovec(i)) {
173 ++ unsigned long flushed, dest = (unsigned long) addr;
174 ++
175 ++ if (bytes < 8) {
176 ++ if (!IS_ALIGNED(dest, 4) || (bytes != 4))
177 ++ __arch_wb_cache_pmem(addr, 1);
178 ++ } else {
179 ++ if (!IS_ALIGNED(dest, 8)) {
180 ++ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
181 ++ __arch_wb_cache_pmem(addr, 1);
182 ++ }
183 ++
184 ++ flushed = dest - (unsigned long) addr;
185 ++ if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
186 ++ __arch_wb_cache_pmem(addr + bytes - 1, 1);
187 ++ }
188 ++ } else
189 ++ __arch_wb_cache_pmem(addr, bytes);
190 +
191 + return len;
192 + }
193 +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
194 +index e99b15077e94..62aca448726a 100644
195 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
196 ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
197 +@@ -53,7 +53,7 @@ static const char * const th_names[] = {
198 + "load_store",
199 + "insn_fetch",
200 + "combined_unit",
201 +- "",
202 ++ "decode_unit",
203 + "northbridge",
204 + "execution_unit",
205 + };
206 +diff --git a/block/genhd.c b/block/genhd.c
207 +index a5bed6bc869d..3032453a89e6 100644
208 +--- a/block/genhd.c
209 ++++ b/block/genhd.c
210 +@@ -664,7 +664,6 @@ void del_gendisk(struct gendisk *disk)
211 +
212 + kobject_put(disk->part0.holder_dir);
213 + kobject_put(disk->slave_dir);
214 +- disk->driverfs_dev = NULL;
215 + if (!sysfs_deprecated)
216 + sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
217 + pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
218 +diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
219 +index fcd4ce6f78d5..1c2b846c5776 100644
220 +--- a/drivers/acpi/power.c
221 ++++ b/drivers/acpi/power.c
222 +@@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
223 + return -EINVAL;
224 +
225 + /* The state of the list is 'on' IFF all resources are 'on'. */
226 ++ cur_state = 0;
227 + list_for_each_entry(entry, list, node) {
228 + struct acpi_power_resource *resource = entry->resource;
229 + acpi_handle handle = resource->device.handle;
230 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
231 +index 1ef37c727572..d037454fe7b8 100644
232 +--- a/drivers/hv/channel.c
233 ++++ b/drivers/hv/channel.c
234 +@@ -73,7 +73,6 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
235 + void *in, *out;
236 + unsigned long flags;
237 + int ret, err = 0;
238 +- unsigned long t;
239 + struct page *page;
240 +
241 + spin_lock_irqsave(&newchannel->lock, flags);
242 +@@ -183,11 +182,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
243 + goto error1;
244 + }
245 +
246 +- t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
247 +- if (t == 0) {
248 +- err = -ETIMEDOUT;
249 +- goto error1;
250 +- }
251 ++ wait_for_completion(&open_info->waitevent);
252 +
253 + spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
254 + list_del(&open_info->msglistentry);
255 +@@ -375,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
256 + struct vmbus_channel_gpadl_header *gpadlmsg;
257 + struct vmbus_channel_gpadl_body *gpadl_body;
258 + struct vmbus_channel_msginfo *msginfo = NULL;
259 +- struct vmbus_channel_msginfo *submsginfo;
260 ++ struct vmbus_channel_msginfo *submsginfo, *tmp;
261 + u32 msgcount;
262 + struct list_head *curr;
263 + u32 next_gpadl_handle;
264 +@@ -437,6 +432,13 @@ cleanup:
265 + list_del(&msginfo->msglistentry);
266 + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
267 +
268 ++ if (msgcount > 1) {
269 ++ list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
270 ++ msglistentry) {
271 ++ kfree(submsginfo);
272 ++ }
273 ++ }
274 ++
275 + kfree(msginfo);
276 + return ret;
277 + }
278 +diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
279 +index 4fc2e8836e60..2bbc53025549 100644
280 +--- a/drivers/hv/connection.c
281 ++++ b/drivers/hv/connection.c
282 +@@ -429,7 +429,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
283 + union hv_connection_id conn_id;
284 + int ret = 0;
285 + int retries = 0;
286 +- u32 msec = 1;
287 ++ u32 usec = 1;
288 +
289 + conn_id.asu32 = 0;
290 + conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID;
291 +@@ -462,9 +462,9 @@ int vmbus_post_msg(void *buffer, size_t buflen)
292 + }
293 +
294 + retries++;
295 +- msleep(msec);
296 +- if (msec < 2048)
297 +- msec *= 2;
298 ++ udelay(usec);
299 ++ if (usec < 2048)
300 ++ usec *= 2;
301 + }
302 + return ret;
303 + }
304 +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
305 +index ddbf7e7e0d98..8ce1f2e22912 100644
306 +--- a/drivers/hv/hv.c
307 ++++ b/drivers/hv/hv.c
308 +@@ -305,9 +305,10 @@ void hv_cleanup(bool crash)
309 +
310 + hypercall_msr.as_uint64 = 0;
311 + wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
312 +- if (!crash)
313 ++ if (!crash) {
314 + vfree(hv_context.tsc_page);
315 +- hv_context.tsc_page = NULL;
316 ++ hv_context.tsc_page = NULL;
317 ++ }
318 + }
319 + #endif
320 + }
321 +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
322 +index 43af91362be5..354da7f207b7 100644
323 +--- a/drivers/hv/hv_balloon.c
324 ++++ b/drivers/hv/hv_balloon.c
325 +@@ -430,16 +430,27 @@ struct dm_info_msg {
326 + * currently hot added. We hot add in multiples of 128M
327 + * chunks; it is possible that we may not be able to bring
328 + * online all the pages in the region. The range
329 +- * covered_end_pfn defines the pages that can
330 ++ * covered_start_pfn:covered_end_pfn defines the pages that can
331 + * be brough online.
332 + */
333 +
334 + struct hv_hotadd_state {
335 + struct list_head list;
336 + unsigned long start_pfn;
337 ++ unsigned long covered_start_pfn;
338 + unsigned long covered_end_pfn;
339 + unsigned long ha_end_pfn;
340 + unsigned long end_pfn;
341 ++ /*
342 ++ * A list of gaps.
343 ++ */
344 ++ struct list_head gap_list;
345 ++};
346 ++
347 ++struct hv_hotadd_gap {
348 ++ struct list_head list;
349 ++ unsigned long start_pfn;
350 ++ unsigned long end_pfn;
351 + };
352 +
353 + struct balloon_state {
354 +@@ -595,18 +606,46 @@ static struct notifier_block hv_memory_nb = {
355 + .priority = 0
356 + };
357 +
358 ++/* Check if the particular page is backed and can be onlined and online it. */
359 ++static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
360 ++{
361 ++ unsigned long cur_start_pgp;
362 ++ unsigned long cur_end_pgp;
363 ++ struct hv_hotadd_gap *gap;
364 ++
365 ++ cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
366 ++ cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
367 ++
368 ++ /* The page is not backed. */
369 ++ if (((unsigned long)pg < cur_start_pgp) ||
370 ++ ((unsigned long)pg >= cur_end_pgp))
371 ++ return;
372 ++
373 ++ /* Check for gaps. */
374 ++ list_for_each_entry(gap, &has->gap_list, list) {
375 ++ cur_start_pgp = (unsigned long)
376 ++ pfn_to_page(gap->start_pfn);
377 ++ cur_end_pgp = (unsigned long)
378 ++ pfn_to_page(gap->end_pfn);
379 ++ if (((unsigned long)pg >= cur_start_pgp) &&
380 ++ ((unsigned long)pg < cur_end_pgp)) {
381 ++ return;
382 ++ }
383 ++ }
384 +
385 +-static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
386 ++ /* This frame is currently backed; online the page. */
387 ++ __online_page_set_limits(pg);
388 ++ __online_page_increment_counters(pg);
389 ++ __online_page_free(pg);
390 ++}
391 ++
392 ++static void hv_bring_pgs_online(struct hv_hotadd_state *has,
393 ++ unsigned long start_pfn, unsigned long size)
394 + {
395 + int i;
396 +
397 +- for (i = 0; i < size; i++) {
398 +- struct page *pg;
399 +- pg = pfn_to_page(start_pfn + i);
400 +- __online_page_set_limits(pg);
401 +- __online_page_increment_counters(pg);
402 +- __online_page_free(pg);
403 +- }
404 ++ for (i = 0; i < size; i++)
405 ++ hv_page_online_one(has, pfn_to_page(start_pfn + i));
406 + }
407 +
408 + static void hv_mem_hot_add(unsigned long start, unsigned long size,
409 +@@ -682,26 +721,25 @@ static void hv_online_page(struct page *pg)
410 +
411 + list_for_each(cur, &dm_device.ha_region_list) {
412 + has = list_entry(cur, struct hv_hotadd_state, list);
413 +- cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn);
414 +- cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
415 ++ cur_start_pgp = (unsigned long)
416 ++ pfn_to_page(has->start_pfn);
417 ++ cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
418 +
419 +- if (((unsigned long)pg >= cur_start_pgp) &&
420 +- ((unsigned long)pg < cur_end_pgp)) {
421 +- /*
422 +- * This frame is currently backed; online the
423 +- * page.
424 +- */
425 +- __online_page_set_limits(pg);
426 +- __online_page_increment_counters(pg);
427 +- __online_page_free(pg);
428 +- }
429 ++ /* The page belongs to a different HAS. */
430 ++ if (((unsigned long)pg < cur_start_pgp) ||
431 ++ ((unsigned long)pg >= cur_end_pgp))
432 ++ continue;
433 ++
434 ++ hv_page_online_one(has, pg);
435 ++ break;
436 + }
437 + }
438 +
439 +-static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
440 ++static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
441 + {
442 + struct list_head *cur;
443 + struct hv_hotadd_state *has;
444 ++ struct hv_hotadd_gap *gap;
445 + unsigned long residual, new_inc;
446 +
447 + if (list_empty(&dm_device.ha_region_list))
448 +@@ -716,6 +754,24 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
449 + */
450 + if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
451 + continue;
452 ++
453 ++ /*
454 ++ * If the current start pfn is not where the covered_end
455 ++ * is, create a gap and update covered_end_pfn.
456 ++ */
457 ++ if (has->covered_end_pfn != start_pfn) {
458 ++ gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
459 ++ if (!gap)
460 ++ return -ENOMEM;
461 ++
462 ++ INIT_LIST_HEAD(&gap->list);
463 ++ gap->start_pfn = has->covered_end_pfn;
464 ++ gap->end_pfn = start_pfn;
465 ++ list_add_tail(&gap->list, &has->gap_list);
466 ++
467 ++ has->covered_end_pfn = start_pfn;
468 ++ }
469 ++
470 + /*
471 + * If the current hot add-request extends beyond
472 + * our current limit; extend it.
473 +@@ -732,19 +788,10 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
474 + has->end_pfn += new_inc;
475 + }
476 +
477 +- /*
478 +- * If the current start pfn is not where the covered_end
479 +- * is, update it.
480 +- */
481 +-
482 +- if (has->covered_end_pfn != start_pfn)
483 +- has->covered_end_pfn = start_pfn;
484 +-
485 +- return true;
486 +-
487 ++ return 1;
488 + }
489 +
490 +- return false;
491 ++ return 0;
492 + }
493 +
494 + static unsigned long handle_pg_range(unsigned long pg_start,
495 +@@ -783,6 +830,8 @@ static unsigned long handle_pg_range(unsigned long pg_start,
496 + if (pgs_ol > pfn_cnt)
497 + pgs_ol = pfn_cnt;
498 +
499 ++ has->covered_end_pfn += pgs_ol;
500 ++ pfn_cnt -= pgs_ol;
501 + /*
502 + * Check if the corresponding memory block is already
503 + * online by checking its last previously backed page.
504 +@@ -791,10 +840,8 @@ static unsigned long handle_pg_range(unsigned long pg_start,
505 + */
506 + if (start_pfn > has->start_pfn &&
507 + !PageReserved(pfn_to_page(start_pfn - 1)))
508 +- hv_bring_pgs_online(start_pfn, pgs_ol);
509 ++ hv_bring_pgs_online(has, start_pfn, pgs_ol);
510 +
511 +- has->covered_end_pfn += pgs_ol;
512 +- pfn_cnt -= pgs_ol;
513 + }
514 +
515 + if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
516 +@@ -832,13 +879,19 @@ static unsigned long process_hot_add(unsigned long pg_start,
517 + unsigned long rg_size)
518 + {
519 + struct hv_hotadd_state *ha_region = NULL;
520 ++ int covered;
521 +
522 + if (pfn_cnt == 0)
523 + return 0;
524 +
525 +- if (!dm_device.host_specified_ha_region)
526 +- if (pfn_covered(pg_start, pfn_cnt))
527 ++ if (!dm_device.host_specified_ha_region) {
528 ++ covered = pfn_covered(pg_start, pfn_cnt);
529 ++ if (covered < 0)
530 ++ return 0;
531 ++
532 ++ if (covered)
533 + goto do_pg_range;
534 ++ }
535 +
536 + /*
537 + * If the host has specified a hot-add range; deal with it first.
538 +@@ -850,10 +903,12 @@ static unsigned long process_hot_add(unsigned long pg_start,
539 + return 0;
540 +
541 + INIT_LIST_HEAD(&ha_region->list);
542 ++ INIT_LIST_HEAD(&ha_region->gap_list);
543 +
544 + list_add_tail(&ha_region->list, &dm_device.ha_region_list);
545 + ha_region->start_pfn = rg_start;
546 + ha_region->ha_end_pfn = rg_start;
547 ++ ha_region->covered_start_pfn = pg_start;
548 + ha_region->covered_end_pfn = pg_start;
549 + ha_region->end_pfn = rg_start + rg_size;
550 + }
551 +@@ -1581,6 +1636,7 @@ static int balloon_remove(struct hv_device *dev)
552 + struct hv_dynmem_device *dm = hv_get_drvdata(dev);
553 + struct list_head *cur, *tmp;
554 + struct hv_hotadd_state *has;
555 ++ struct hv_hotadd_gap *gap, *tmp_gap;
556 +
557 + if (dm->num_pages_ballooned != 0)
558 + pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
559 +@@ -1597,6 +1653,10 @@ static int balloon_remove(struct hv_device *dev)
560 + #endif
561 + list_for_each_safe(cur, tmp, &dm->ha_region_list) {
562 + has = list_entry(cur, struct hv_hotadd_state, list);
563 ++ list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
564 ++ list_del(&gap->list);
565 ++ kfree(gap);
566 ++ }
567 + list_del(&has->list);
568 + kfree(has);
569 + }
570 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
571 +index 43482ae1e049..1a2b2620421e 100644
572 +--- a/drivers/input/mouse/elantech.c
573 ++++ b/drivers/input/mouse/elantech.c
574 +@@ -1122,6 +1122,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
575 + * Asus UX32VD 0x361f02 00, 15, 0e clickpad
576 + * Avatar AVIU-145A2 0x361f00 ? clickpad
577 + * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
578 ++ * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
579 + * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
580 + * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
581 + * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
582 +@@ -1528,6 +1529,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
583 + },
584 + },
585 + {
586 ++ /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
587 ++ .matches = {
588 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
589 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
590 ++ },
591 ++ },
592 ++ {
593 + /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
594 + .matches = {
595 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
596 +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
597 +index 1f1582f6cccb..8d838779fd1b 100644
598 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c
599 ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
600 +@@ -804,6 +804,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
601 +
602 + switch (uhs) {
603 + case MMC_TIMING_UHS_SDR50:
604 ++ case MMC_TIMING_UHS_DDR50:
605 + pinctrl = imx_data->pins_100mhz;
606 + break;
607 + case MMC_TIMING_UHS_SDR104:
608 +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
609 +index 0134ba32a057..39712560b4c1 100644
610 +--- a/drivers/mtd/ubi/upd.c
611 ++++ b/drivers/mtd/ubi/upd.c
612 +@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
613 + return err;
614 + }
615 +
616 +- if (bytes == 0) {
617 +- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
618 +- if (err)
619 +- return err;
620 ++ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
621 ++ if (err)
622 ++ return err;
623 +
624 ++ if (bytes == 0) {
625 + err = clear_update_marker(ubi, vol, 0);
626 + if (err)
627 + return err;
628 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
629 +index b76883606e4b..94906aaa9b7c 100644
630 +--- a/fs/cifs/cifsglob.h
631 ++++ b/fs/cifs/cifsglob.h
632 +@@ -906,7 +906,6 @@ struct cifs_tcon {
633 + bool use_persistent:1; /* use persistent instead of durable handles */
634 + #ifdef CONFIG_CIFS_SMB2
635 + bool print:1; /* set if connection to printer share */
636 +- bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
637 + __le32 capabilities;
638 + __u32 share_flags;
639 + __u32 maximal_access;
640 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
641 +index fc537c29044e..87b87e091e8e 100644
642 +--- a/fs/cifs/smb1ops.c
643 ++++ b/fs/cifs/smb1ops.c
644 +@@ -1015,6 +1015,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
645 + return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
646 + }
647 +
648 ++static bool
649 ++cifs_can_echo(struct TCP_Server_Info *server)
650 ++{
651 ++ if (server->tcpStatus == CifsGood)
652 ++ return true;
653 ++
654 ++ return false;
655 ++}
656 ++
657 + struct smb_version_operations smb1_operations = {
658 + .send_cancel = send_nt_cancel,
659 + .compare_fids = cifs_compare_fids,
660 +@@ -1049,6 +1058,7 @@ struct smb_version_operations smb1_operations = {
661 + .get_dfs_refer = CIFSGetDFSRefer,
662 + .qfs_tcon = cifs_qfs_tcon,
663 + .is_path_accessible = cifs_is_path_accessible,
664 ++ .can_echo = cifs_can_echo,
665 + .query_path_info = cifs_query_path_info,
666 + .query_file_info = cifs_query_file_info,
667 + .get_srv_inum = cifs_get_srv_inum,
668 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
669 +index 6cb5c4b30e78..6cb2603f8a5c 100644
670 +--- a/fs/cifs/smb2pdu.c
671 ++++ b/fs/cifs/smb2pdu.c
672 +@@ -932,9 +932,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
673 + else
674 + return -EIO;
675 +
676 +- if (tcon && tcon->bad_network_name)
677 +- return -ENOENT;
678 +-
679 + if ((tcon && tcon->seal) &&
680 + ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
681 + cifs_dbg(VFS, "encryption requested but no server support");
682 +@@ -1036,8 +1033,6 @@ tcon_exit:
683 + tcon_error_exit:
684 + if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
685 + cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
686 +- if (tcon)
687 +- tcon->bad_network_name = true;
688 + }
689 + goto tcon_exit;
690 + }
691 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
692 +index 7d7f99b0db47..1275175b0946 100644
693 +--- a/kernel/trace/ring_buffer.c
694 ++++ b/kernel/trace/ring_buffer.c
695 +@@ -3440,11 +3440,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
696 + int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
697 + {
698 + struct ring_buffer_per_cpu *cpu_buffer;
699 ++ struct buffer_page *reader;
700 ++ struct buffer_page *head_page;
701 ++ struct buffer_page *commit_page;
702 ++ unsigned commit;
703 +
704 + cpu_buffer = iter->cpu_buffer;
705 +
706 +- return iter->head_page == cpu_buffer->commit_page &&
707 +- iter->head == rb_commit_index(cpu_buffer);
708 ++ /* Remember, trace recording is off when iterator is in use */
709 ++ reader = cpu_buffer->reader_page;
710 ++ head_page = cpu_buffer->head_page;
711 ++ commit_page = cpu_buffer->commit_page;
712 ++ commit = rb_page_commit(commit_page);
713 ++
714 ++ return ((iter->head_page == commit_page && iter->head == commit) ||
715 ++ (iter->head_page == reader && commit_page == head_page &&
716 ++ head_page->read == commit &&
717 ++ iter->head == rb_page_commit(cpu_buffer->reader_page)));
718 + }
719 + EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
720 +
721 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
722 +index 059233abcfcf..4c21c0b7dc91 100644
723 +--- a/kernel/trace/trace.c
724 ++++ b/kernel/trace/trace.c
725 +@@ -6060,11 +6060,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
726 + return ret;
727 +
728 + out_reg:
729 +- ret = register_ftrace_function_probe(glob, ops, count);
730 ++ ret = alloc_snapshot(&global_trace);
731 ++ if (ret < 0)
732 ++ goto out;
733 +
734 +- if (ret >= 0)
735 +- alloc_snapshot(&global_trace);
736 ++ ret = register_ftrace_function_probe(glob, ops, count);
737 +
738 ++ out:
739 + return ret < 0 ? ret : 0;
740 + }
741 +
742 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
743 +index 2b528389409f..9f0915f72702 100644
744 +--- a/net/mac80211/rx.c
745 ++++ b/net/mac80211/rx.c
746 +@@ -3396,6 +3396,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
747 + !ether_addr_equal(bssid, hdr->addr1))
748 + return false;
749 + }
750 ++
751 ++ /*
752 ++ * 802.11-2016 Table 9-26 says that for data frames, A1 must be
753 ++ * the BSSID - we've checked that already but may have accepted
754 ++ * the wildcard (ff:ff:ff:ff:ff:ff).
755 ++ *
756 ++ * It also says:
757 ++ * The BSSID of the Data frame is determined as follows:
758 ++ * a) If the STA is contained within an AP or is associated
759 ++ * with an AP, the BSSID is the address currently in use
760 ++ * by the STA contained in the AP.
761 ++ *
762 ++ * So we should not accept data frames with an address that's
763 ++ * multicast.
764 ++ *
765 ++ * Accepting it also opens a security problem because stations
766 ++ * could encrypt it with the GTK and inject traffic that way.
767 ++ */
768 ++ if (ieee80211_is_data(hdr->frame_control) && multicast)
769 ++ return false;
770 ++
771 + return true;
772 + case NL80211_IFTYPE_WDS:
773 + if (bssid || !ieee80211_is_data(hdr->frame_control))
774 +diff --git a/net/tipc/node.c b/net/tipc/node.c
775 +index 3926b561f873..d468aad6163e 100644
776 +--- a/net/tipc/node.c
777 ++++ b/net/tipc/node.c
778 +@@ -102,9 +102,10 @@ static unsigned int tipc_hashfn(u32 addr)
779 +
780 + static void tipc_node_kref_release(struct kref *kref)
781 + {
782 +- struct tipc_node *node = container_of(kref, struct tipc_node, kref);
783 ++ struct tipc_node *n = container_of(kref, struct tipc_node, kref);
784 +
785 +- tipc_node_delete(node);
786 ++ kfree(n->bc_entry.link);
787 ++ kfree_rcu(n, rcu);
788 + }
789 +
790 + void tipc_node_put(struct tipc_node *node)
791 +@@ -216,21 +217,20 @@ static void tipc_node_delete(struct tipc_node *node)
792 + {
793 + list_del_rcu(&node->list);
794 + hlist_del_rcu(&node->hash);
795 +- kfree(node->bc_entry.link);
796 +- kfree_rcu(node, rcu);
797 ++ tipc_node_put(node);
798 ++
799 ++ del_timer_sync(&node->timer);
800 ++ tipc_node_put(node);
801 + }
802 +
803 + void tipc_node_stop(struct net *net)
804 + {
805 +- struct tipc_net *tn = net_generic(net, tipc_net_id);
806 ++ struct tipc_net *tn = tipc_net(net);
807 + struct tipc_node *node, *t_node;
808 +
809 + spin_lock_bh(&tn->node_list_lock);
810 +- list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
811 +- if (del_timer(&node->timer))
812 +- tipc_node_put(node);
813 +- tipc_node_put(node);
814 +- }
815 ++ list_for_each_entry_safe(node, t_node, &tn->node_list, list)
816 ++ tipc_node_delete(node);
817 + spin_unlock_bh(&tn->node_list_lock);
818 + }
819 +
820 +@@ -313,9 +313,7 @@ static void tipc_node_timeout(unsigned long data)
821 + if (rc & TIPC_LINK_DOWN_EVT)
822 + tipc_node_link_down(n, bearer_id, false);
823 + }
824 +- if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
825 +- tipc_node_get(n);
826 +- tipc_node_put(n);
827 ++ mod_timer(&n->timer, jiffies + n->keepalive_intv);
828 + }
829 +
830 + /**
831 +diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
832 +index 0a369bb440e7..662bdd20a748 100644
833 +--- a/net/vmw_vsock/vmci_transport.c
834 ++++ b/net/vmw_vsock/vmci_transport.c
835 +@@ -842,7 +842,7 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
836 + * qp_handle.
837 + */
838 + if (vmci_handle_is_invalid(e_payload->handle) ||
839 +- vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
840 ++ !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
841 + return;
842 +
843 + /* We don't ask for delayed CBs when we subscribe to this event (we
844 +@@ -2154,7 +2154,7 @@ module_exit(vmci_transport_exit);
845 +
846 + MODULE_AUTHOR("VMware, Inc.");
847 + MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
848 +-MODULE_VERSION("1.0.2.0-k");
849 ++MODULE_VERSION("1.0.3.0-k");
850 + MODULE_LICENSE("GPL v2");
851 + MODULE_ALIAS("vmware_vsock");
852 + MODULE_ALIAS_NETPROTO(PF_VSOCK);
853 +diff --git a/security/keys/gc.c b/security/keys/gc.c
854 +index addf060399e0..9cb4fe4478a1 100644
855 +--- a/security/keys/gc.c
856 ++++ b/security/keys/gc.c
857 +@@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
858 + * immediately unlinked.
859 + */
860 + struct key_type key_type_dead = {
861 +- .name = "dead",
862 ++ .name = ".dead",
863 + };
864 +
865 + /*
866 +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
867 +index 1c3872aeed14..442e350c209d 100644
868 +--- a/security/keys/keyctl.c
869 ++++ b/security/keys/keyctl.c
870 +@@ -271,7 +271,8 @@ error:
871 + * Create and join an anonymous session keyring or join a named session
872 + * keyring, creating it if necessary. A named session keyring must have Search
873 + * permission for it to be joined. Session keyrings without this permit will
874 +- * be skipped over.
875 ++ * be skipped over. It is not permitted for userspace to create or join
876 ++ * keyrings whose name begin with a dot.
877 + *
878 + * If successful, the ID of the joined session keyring will be returned.
879 + */
880 +@@ -288,12 +289,16 @@ long keyctl_join_session_keyring(const char __user *_name)
881 + ret = PTR_ERR(name);
882 + goto error;
883 + }
884 ++
885 ++ ret = -EPERM;
886 ++ if (name[0] == '.')
887 ++ goto error_name;
888 + }
889 +
890 + /* join the session */
891 + ret = join_session_keyring(name);
892 ++error_name:
893 + kfree(name);
894 +-
895 + error:
896 + return ret;
897 + }
898 +@@ -1223,8 +1228,8 @@ error:
899 + * Read or set the default keyring in which request_key() will cache keys and
900 + * return the old setting.
901 + *
902 +- * If a process keyring is specified then this will be created if it doesn't
903 +- * yet exist. The old setting will be returned if successful.
904 ++ * If a thread or process keyring is specified then it will be created if it
905 ++ * doesn't yet exist. The old setting will be returned if successful.
906 + */
907 + long keyctl_set_reqkey_keyring(int reqkey_defl)
908 + {
909 +@@ -1249,11 +1254,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
910 +
911 + case KEY_REQKEY_DEFL_PROCESS_KEYRING:
912 + ret = install_process_keyring_to_cred(new);
913 +- if (ret < 0) {
914 +- if (ret != -EEXIST)
915 +- goto error;
916 +- ret = 0;
917 +- }
918 ++ if (ret < 0)
919 ++ goto error;
920 + goto set;
921 +
922 + case KEY_REQKEY_DEFL_DEFAULT:
923 +diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
924 +index e6d50172872f..4ed909142956 100644
925 +--- a/security/keys/process_keys.c
926 ++++ b/security/keys/process_keys.c
927 +@@ -125,13 +125,18 @@ error:
928 + }
929 +
930 + /*
931 +- * Install a fresh thread keyring directly to new credentials. This keyring is
932 +- * allowed to overrun the quota.
933 ++ * Install a thread keyring to the given credentials struct if it didn't have
934 ++ * one already. This is allowed to overrun the quota.
935 ++ *
936 ++ * Return: 0 if a thread keyring is now present; -errno on failure.
937 + */
938 + int install_thread_keyring_to_cred(struct cred *new)
939 + {
940 + struct key *keyring;
941 +
942 ++ if (new->thread_keyring)
943 ++ return 0;
944 ++
945 + keyring = keyring_alloc("_tid", new->uid, new->gid, new,
946 + KEY_POS_ALL | KEY_USR_VIEW,
947 + KEY_ALLOC_QUOTA_OVERRUN, NULL);
948 +@@ -143,7 +148,9 @@ int install_thread_keyring_to_cred(struct cred *new)
949 + }
950 +
951 + /*
952 +- * Install a fresh thread keyring, discarding the old one.
953 ++ * Install a thread keyring to the current task if it didn't have one already.
954 ++ *
955 ++ * Return: 0 if a thread keyring is now present; -errno on failure.
956 + */
957 + static int install_thread_keyring(void)
958 + {
959 +@@ -154,8 +161,6 @@ static int install_thread_keyring(void)
960 + if (!new)
961 + return -ENOMEM;
962 +
963 +- BUG_ON(new->thread_keyring);
964 +-
965 + ret = install_thread_keyring_to_cred(new);
966 + if (ret < 0) {
967 + abort_creds(new);
968 +@@ -166,17 +171,17 @@ static int install_thread_keyring(void)
969 + }
970 +
971 + /*
972 +- * Install a process keyring directly to a credentials struct.
973 ++ * Install a process keyring to the given credentials struct if it didn't have
974 ++ * one already. This is allowed to overrun the quota.
975 + *
976 +- * Returns -EEXIST if there was already a process keyring, 0 if one installed,
977 +- * and other value on any other error
978 ++ * Return: 0 if a process keyring is now present; -errno on failure.
979 + */
980 + int install_process_keyring_to_cred(struct cred *new)
981 + {
982 + struct key *keyring;
983 +
984 + if (new->process_keyring)
985 +- return -EEXIST;
986 ++ return 0;
987 +
988 + keyring = keyring_alloc("_pid", new->uid, new->gid, new,
989 + KEY_POS_ALL | KEY_USR_VIEW,
990 +@@ -189,11 +194,9 @@ int install_process_keyring_to_cred(struct cred *new)
991 + }
992 +
993 + /*
994 +- * Make sure a process keyring is installed for the current process. The
995 +- * existing process keyring is not replaced.
996 ++ * Install a process keyring to the current task if it didn't have one already.
997 + *
998 +- * Returns 0 if there is a process keyring by the end of this function, some
999 +- * error otherwise.
1000 ++ * Return: 0 if a process keyring is now present; -errno on failure.
1001 + */
1002 + static int install_process_keyring(void)
1003 + {
1004 +@@ -207,14 +210,18 @@ static int install_process_keyring(void)
1005 + ret = install_process_keyring_to_cred(new);
1006 + if (ret < 0) {
1007 + abort_creds(new);
1008 +- return ret != -EEXIST ? ret : 0;
1009 ++ return ret;
1010 + }
1011 +
1012 + return commit_creds(new);
1013 + }
1014 +
1015 + /*
1016 +- * Install a session keyring directly to a credentials struct.
1017 ++ * Install the given keyring as the session keyring of the given credentials
1018 ++ * struct, replacing the existing one if any. If the given keyring is NULL,
1019 ++ * then install a new anonymous session keyring.
1020 ++ *
1021 ++ * Return: 0 on success; -errno on failure.
1022 + */
1023 + int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
1024 + {
1025 +@@ -249,8 +256,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
1026 + }
1027 +
1028 + /*
1029 +- * Install a session keyring, discarding the old one. If a keyring is not
1030 +- * supplied, an empty one is invented.
1031 ++ * Install the given keyring as the session keyring of the current task,
1032 ++ * replacing the existing one if any. If the given keyring is NULL, then
1033 ++ * install a new anonymous session keyring.
1034 ++ *
1035 ++ * Return: 0 on success; -errno on failure.
1036 + */
1037 + static int install_session_keyring(struct key *keyring)
1038 + {
1039 +diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
1040 +index 0d9f48ec42bb..bc7adb84e679 100644
1041 +--- a/tools/hv/hv_kvp_daemon.c
1042 ++++ b/tools/hv/hv_kvp_daemon.c
1043 +@@ -1433,7 +1433,7 @@ int main(int argc, char *argv[])
1044 + openlog("KVP", 0, LOG_USER);
1045 + syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
1046 +
1047 +- kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR);
1048 ++ kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR | O_CLOEXEC);
1049 +
1050 + if (kvp_fd < 0) {
1051 + syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s",