Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Fri, 17 Aug 2018 19:28:35
Message-Id: 1534534100.1faffd1a486263e356b11211cfa05b09ce97eae4.mpagano@gentoo
1 commit: 1faffd1a486263e356b11211cfa05b09ce97eae4
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 17 19:28:20 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Aug 17 19:28:20 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1faffd1a
7
8 Linux patch 4.18.2
9
10 0000_README | 4 +
11 1001_linux-4.18.2.patch | 1679 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1683 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index ad4a3ed..c801597 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -47,6 +47,10 @@ Patch: 1000_linux-4.18.1.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.18.1
21
22 +Patch: 1001_linux-4.18.2.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.18.2
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1001_linux-4.18.2.patch b/1001_linux-4.18.2.patch
31 new file mode 100644
32 index 0000000..1853255
33 --- /dev/null
34 +++ b/1001_linux-4.18.2.patch
35 @@ -0,0 +1,1679 @@
36 +diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
37 +index ddc029734b25..005d8842a503 100644
38 +--- a/Documentation/process/changes.rst
39 ++++ b/Documentation/process/changes.rst
40 +@@ -35,7 +35,7 @@ binutils 2.20 ld -v
41 + flex 2.5.35 flex --version
42 + bison 2.0 bison --version
43 + util-linux 2.10o fdformat --version
44 +-module-init-tools 0.9.10 depmod -V
45 ++kmod 13 depmod -V
46 + e2fsprogs 1.41.4 e2fsck -V
47 + jfsutils 1.1.3 fsck.jfs -V
48 + reiserfsprogs 3.6.3 reiserfsck -V
49 +@@ -156,12 +156,6 @@ is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and
50 + reproduce the Oops with that option, then you can still decode that Oops
51 + with ksymoops.
52 +
53 +-Module-Init-Tools
54 +------------------
55 +-
56 +-A new module loader is now in the kernel that requires ``module-init-tools``
57 +-to use. It is backward compatible with the 2.4.x series kernels.
58 +-
59 + Mkinitrd
60 + --------
61 +
62 +@@ -371,16 +365,17 @@ Util-linux
63 +
64 + - <https://www.kernel.org/pub/linux/utils/util-linux/>
65 +
66 ++Kmod
67 ++----
68 ++
69 ++- <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
70 ++- <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
71 ++
72 + Ksymoops
73 + --------
74 +
75 + - <https://www.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
76 +
77 +-Module-Init-Tools
78 +------------------
79 +-
80 +-- <https://www.kernel.org/pub/linux/utils/kernel/module-init-tools/>
81 +-
82 + Mkinitrd
83 + --------
84 +
85 +diff --git a/Makefile b/Makefile
86 +index 5edf963148e8..fd409a0fd4e1 100644
87 +--- a/Makefile
88 ++++ b/Makefile
89 +@@ -1,7 +1,7 @@
90 + # SPDX-License-Identifier: GPL-2.0
91 + VERSION = 4
92 + PATCHLEVEL = 18
93 +-SUBLEVEL = 1
94 ++SUBLEVEL = 2
95 + EXTRAVERSION =
96 + NAME = Merciless Moray
97 +
98 +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
99 +index 493ff75670ff..8ae5d7ae4af3 100644
100 +--- a/arch/arm64/mm/mmu.c
101 ++++ b/arch/arm64/mm/mmu.c
102 +@@ -977,12 +977,12 @@ int pmd_clear_huge(pmd_t *pmdp)
103 + return 1;
104 + }
105 +
106 +-int pud_free_pmd_page(pud_t *pud)
107 ++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
108 + {
109 + return pud_none(*pud);
110 + }
111 +
112 +-int pmd_free_pte_page(pmd_t *pmd)
113 ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
114 + {
115 + return pmd_none(*pmd);
116 + }
117 +diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
118 +index 16c4ccb1f154..d2364c55bbde 100644
119 +--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
120 ++++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
121 +@@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
122 + vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
123 + vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
124 + vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
125 +- vmovd _args_digest(state , idx, 4) , %xmm0
126 ++ vmovd _args_digest+4*32(state, idx, 4), %xmm1
127 + vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
128 + vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
129 + vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
130 +diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
131 +index de27615c51ea..0c662cb6a723 100644
132 +--- a/arch/x86/hyperv/mmu.c
133 ++++ b/arch/x86/hyperv/mmu.c
134 +@@ -95,6 +95,11 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
135 + } else {
136 + for_each_cpu(cpu, cpus) {
137 + vcpu = hv_cpu_number_to_vp_number(cpu);
138 ++ if (vcpu == VP_INVAL) {
139 ++ local_irq_restore(flags);
140 ++ goto do_native;
141 ++ }
142 ++
143 + if (vcpu >= 64)
144 + goto do_native;
145 +
146 +diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
147 +index 5cdcdbd4d892..89789e8c80f6 100644
148 +--- a/arch/x86/include/asm/i8259.h
149 ++++ b/arch/x86/include/asm/i8259.h
150 +@@ -3,6 +3,7 @@
151 + #define _ASM_X86_I8259_H
152 +
153 + #include <linux/delay.h>
154 ++#include <asm/io.h>
155 +
156 + extern unsigned int cached_irq_mask;
157 +
158 +diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
159 +index d492752f79e1..391f358ebb4c 100644
160 +--- a/arch/x86/kernel/apic/x2apic_uv_x.c
161 ++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
162 +@@ -394,10 +394,10 @@ extern int uv_hub_info_version(void)
163 + EXPORT_SYMBOL(uv_hub_info_version);
164 +
165 + /* Default UV memory block size is 2GB */
166 +-static unsigned long mem_block_size = (2UL << 30);
167 ++static unsigned long mem_block_size __initdata = (2UL << 30);
168 +
169 + /* Kernel parameter to specify UV mem block size */
170 +-static int parse_mem_block_size(char *ptr)
171 ++static int __init parse_mem_block_size(char *ptr)
172 + {
173 + unsigned long size = memparse(ptr, NULL);
174 +
175 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
176 +index c4f0ae49a53d..664f161f96ff 100644
177 +--- a/arch/x86/kernel/cpu/bugs.c
178 ++++ b/arch/x86/kernel/cpu/bugs.c
179 +@@ -648,10 +648,9 @@ void x86_spec_ctrl_setup_ap(void)
180 + enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
181 + #if IS_ENABLED(CONFIG_KVM_INTEL)
182 + EXPORT_SYMBOL_GPL(l1tf_mitigation);
183 +-
184 ++#endif
185 + enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
186 + EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
187 +-#endif
188 +
189 + static void __init l1tf_select_mitigation(void)
190 + {
191 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
192 +index 9eda6f730ec4..b41b72bd8bb8 100644
193 +--- a/arch/x86/kernel/cpu/common.c
194 ++++ b/arch/x86/kernel/cpu/common.c
195 +@@ -905,7 +905,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
196 + apply_forced_caps(c);
197 + }
198 +
199 +-static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
200 ++void get_cpu_address_sizes(struct cpuinfo_x86 *c)
201 + {
202 + u32 eax, ebx, ecx, edx;
203 +
204 +diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
205 +index e59c0ea82a33..7b229afa0a37 100644
206 +--- a/arch/x86/kernel/cpu/cpu.h
207 ++++ b/arch/x86/kernel/cpu/cpu.h
208 +@@ -46,6 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
209 + *const __x86_cpu_dev_end[];
210 +
211 + extern void get_cpu_cap(struct cpuinfo_x86 *c);
212 ++extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
213 + extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
214 + extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
215 + extern u32 get_scattered_cpuid_leaf(unsigned int level,
216 +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
217 +index 7bb6f65c79de..29505724202a 100644
218 +--- a/arch/x86/mm/pageattr.c
219 ++++ b/arch/x86/mm/pageattr.c
220 +@@ -1784,6 +1784,12 @@ int set_memory_nonglobal(unsigned long addr, int numpages)
221 + __pgprot(_PAGE_GLOBAL), 0);
222 + }
223 +
224 ++int set_memory_global(unsigned long addr, int numpages)
225 ++{
226 ++ return change_page_attr_set(&addr, numpages,
227 ++ __pgprot(_PAGE_GLOBAL), 0);
228 ++}
229 ++
230 + static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
231 + {
232 + struct cpa_data cpa;
233 +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
234 +index 47b5951e592b..e3deefb891da 100644
235 +--- a/arch/x86/mm/pgtable.c
236 ++++ b/arch/x86/mm/pgtable.c
237 +@@ -719,28 +719,50 @@ int pmd_clear_huge(pmd_t *pmd)
238 + return 0;
239 + }
240 +
241 ++#ifdef CONFIG_X86_64
242 + /**
243 + * pud_free_pmd_page - Clear pud entry and free pmd page.
244 + * @pud: Pointer to a PUD.
245 ++ * @addr: Virtual address associated with pud.
246 + *
247 +- * Context: The pud range has been unmaped and TLB purged.
248 ++ * Context: The pud range has been unmapped and TLB purged.
249 + * Return: 1 if clearing the entry succeeded. 0 otherwise.
250 ++ *
251 ++ * NOTE: Callers must allow a single page allocation.
252 + */
253 +-int pud_free_pmd_page(pud_t *pud)
254 ++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
255 + {
256 +- pmd_t *pmd;
257 ++ pmd_t *pmd, *pmd_sv;
258 ++ pte_t *pte;
259 + int i;
260 +
261 + if (pud_none(*pud))
262 + return 1;
263 +
264 + pmd = (pmd_t *)pud_page_vaddr(*pud);
265 ++ pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
266 ++ if (!pmd_sv)
267 ++ return 0;
268 +
269 +- for (i = 0; i < PTRS_PER_PMD; i++)
270 +- if (!pmd_free_pte_page(&pmd[i]))
271 +- return 0;
272 ++ for (i = 0; i < PTRS_PER_PMD; i++) {
273 ++ pmd_sv[i] = pmd[i];
274 ++ if (!pmd_none(pmd[i]))
275 ++ pmd_clear(&pmd[i]);
276 ++ }
277 +
278 + pud_clear(pud);
279 ++
280 ++ /* INVLPG to clear all paging-structure caches */
281 ++ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
282 ++
283 ++ for (i = 0; i < PTRS_PER_PMD; i++) {
284 ++ if (!pmd_none(pmd_sv[i])) {
285 ++ pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
286 ++ free_page((unsigned long)pte);
287 ++ }
288 ++ }
289 ++
290 ++ free_page((unsigned long)pmd_sv);
291 + free_page((unsigned long)pmd);
292 +
293 + return 1;
294 +@@ -749,11 +771,12 @@ int pud_free_pmd_page(pud_t *pud)
295 + /**
296 + * pmd_free_pte_page - Clear pmd entry and free pte page.
297 + * @pmd: Pointer to a PMD.
298 ++ * @addr: Virtual address associated with pmd.
299 + *
300 +- * Context: The pmd range has been unmaped and TLB purged.
301 ++ * Context: The pmd range has been unmapped and TLB purged.
302 + * Return: 1 if clearing the entry succeeded. 0 otherwise.
303 + */
304 +-int pmd_free_pte_page(pmd_t *pmd)
305 ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
306 + {
307 + pte_t *pte;
308 +
309 +@@ -762,8 +785,30 @@ int pmd_free_pte_page(pmd_t *pmd)
310 +
311 + pte = (pte_t *)pmd_page_vaddr(*pmd);
312 + pmd_clear(pmd);
313 ++
314 ++ /* INVLPG to clear all paging-structure caches */
315 ++ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
316 ++
317 + free_page((unsigned long)pte);
318 +
319 + return 1;
320 + }
321 ++
322 ++#else /* !CONFIG_X86_64 */
323 ++
324 ++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
325 ++{
326 ++ return pud_none(*pud);
327 ++}
328 ++
329 ++/*
330 ++ * Disable free page handling on x86-PAE. This assures that ioremap()
331 ++ * does not update sync'd pmd entries. See vmalloc_sync_one().
332 ++ */
333 ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
334 ++{
335 ++ return pmd_none(*pmd);
336 ++}
337 ++
338 ++#endif /* CONFIG_X86_64 */
339 + #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
340 +diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
341 +index fb752d9a3ce9..946455e9cfef 100644
342 +--- a/arch/x86/mm/pti.c
343 ++++ b/arch/x86/mm/pti.c
344 +@@ -435,6 +435,13 @@ static inline bool pti_kernel_image_global_ok(void)
345 + return true;
346 + }
347 +
348 ++/*
349 ++ * This is the only user for these and it is not arch-generic
350 ++ * like the other set_memory.h functions. Just extern them.
351 ++ */
352 ++extern int set_memory_nonglobal(unsigned long addr, int numpages);
353 ++extern int set_memory_global(unsigned long addr, int numpages);
354 ++
355 + /*
356 + * For some configurations, map all of kernel text into the user page
357 + * tables. This reduces TLB misses, especially on non-PCID systems.
358 +@@ -447,7 +454,8 @@ void pti_clone_kernel_text(void)
359 + * clone the areas past rodata, they might contain secrets.
360 + */
361 + unsigned long start = PFN_ALIGN(_text);
362 +- unsigned long end = (unsigned long)__end_rodata_hpage_align;
363 ++ unsigned long end_clone = (unsigned long)__end_rodata_hpage_align;
364 ++ unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
365 +
366 + if (!pti_kernel_image_global_ok())
367 + return;
368 +@@ -459,14 +467,18 @@ void pti_clone_kernel_text(void)
369 + * pti_set_kernel_image_nonglobal() did to clear the
370 + * global bit.
371 + */
372 +- pti_clone_pmds(start, end, _PAGE_RW);
373 ++ pti_clone_pmds(start, end_clone, _PAGE_RW);
374 ++
375 ++ /*
376 ++ * pti_clone_pmds() will set the global bit in any PMDs
377 ++ * that it clones, but we also need to get any PTEs in
378 ++ * the last level for areas that are not huge-page-aligned.
379 ++ */
380 ++
381 ++ /* Set the global bit for normal non-__init kernel text: */
382 ++ set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
383 + }
384 +
385 +-/*
386 +- * This is the only user for it and it is not arch-generic like
387 +- * the other set_memory.h functions. Just extern it.
388 +- */
389 +-extern int set_memory_nonglobal(unsigned long addr, int numpages);
390 + void pti_set_kernel_image_nonglobal(void)
391 + {
392 + /*
393 +@@ -478,9 +490,11 @@ void pti_set_kernel_image_nonglobal(void)
394 + unsigned long start = PFN_ALIGN(_text);
395 + unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
396 +
397 +- if (pti_kernel_image_global_ok())
398 +- return;
399 +-
400 ++ /*
401 ++ * This clears _PAGE_GLOBAL from the entire kernel image.
402 ++ * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
403 ++ * areas that are mapped to userspace.
404 ++ */
405 + set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
406 + }
407 +
408 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
409 +index 439a94bf89ad..c5e3f2acc7f0 100644
410 +--- a/arch/x86/xen/enlighten_pv.c
411 ++++ b/arch/x86/xen/enlighten_pv.c
412 +@@ -1259,6 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
413 + get_cpu_cap(&boot_cpu_data);
414 + x86_configure_nx();
415 +
416 ++ /* Determine virtual and physical address sizes */
417 ++ get_cpu_address_sizes(&boot_cpu_data);
418 ++
419 + /* Let's presume PV guests always boot on vCPU with id 0. */
420 + per_cpu(xen_vcpu_id, 0) = 0;
421 +
422 +diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
423 +index d880a4897159..4ee7c041bb82 100644
424 +--- a/crypto/ablkcipher.c
425 ++++ b/crypto/ablkcipher.c
426 +@@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
427 + return max(start, end_page);
428 + }
429 +
430 +-static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
431 +- unsigned int bsize)
432 ++static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
433 ++ unsigned int n)
434 + {
435 +- unsigned int n = bsize;
436 +-
437 + for (;;) {
438 + unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
439 +
440 +@@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
441 + n -= len_this_page;
442 + scatterwalk_start(&walk->out, sg_next(walk->out.sg));
443 + }
444 +-
445 +- return bsize;
446 + }
447 +
448 +-static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
449 +- unsigned int n)
450 ++static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
451 ++ unsigned int n)
452 + {
453 + scatterwalk_advance(&walk->in, n);
454 + scatterwalk_advance(&walk->out, n);
455 +-
456 +- return n;
457 + }
458 +
459 + static int ablkcipher_walk_next(struct ablkcipher_request *req,
460 +@@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
461 + struct ablkcipher_walk *walk, int err)
462 + {
463 + struct crypto_tfm *tfm = req->base.tfm;
464 +- unsigned int nbytes = 0;
465 ++ unsigned int n; /* bytes processed */
466 ++ bool more;
467 +
468 +- if (likely(err >= 0)) {
469 +- unsigned int n = walk->nbytes - err;
470 ++ if (unlikely(err < 0))
471 ++ goto finish;
472 +
473 +- if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
474 +- n = ablkcipher_done_fast(walk, n);
475 +- else if (WARN_ON(err)) {
476 +- err = -EINVAL;
477 +- goto err;
478 +- } else
479 +- n = ablkcipher_done_slow(walk, n);
480 ++ n = walk->nbytes - err;
481 ++ walk->total -= n;
482 ++ more = (walk->total != 0);
483 +
484 +- nbytes = walk->total - n;
485 +- err = 0;
486 ++ if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
487 ++ ablkcipher_done_fast(walk, n);
488 ++ } else {
489 ++ if (WARN_ON(err)) {
490 ++ /* unexpected case; didn't process all bytes */
491 ++ err = -EINVAL;
492 ++ goto finish;
493 ++ }
494 ++ ablkcipher_done_slow(walk, n);
495 + }
496 +
497 +- scatterwalk_done(&walk->in, 0, nbytes);
498 +- scatterwalk_done(&walk->out, 1, nbytes);
499 +-
500 +-err:
501 +- walk->total = nbytes;
502 +- walk->nbytes = nbytes;
503 ++ scatterwalk_done(&walk->in, 0, more);
504 ++ scatterwalk_done(&walk->out, 1, more);
505 +
506 +- if (nbytes) {
507 ++ if (more) {
508 + crypto_yield(req->base.flags);
509 + return ablkcipher_walk_next(req, walk);
510 + }
511 +-
512 ++ err = 0;
513 ++finish:
514 ++ walk->nbytes = 0;
515 + if (walk->iv != req->info)
516 + memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
517 + kfree(walk->iv_buffer);
518 +-
519 + return err;
520 + }
521 + EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
522 +diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
523 +index 01c0d4aa2563..77b5fa293f66 100644
524 +--- a/crypto/blkcipher.c
525 ++++ b/crypto/blkcipher.c
526 +@@ -70,19 +70,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
527 + return max(start, end_page);
528 + }
529 +
530 +-static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
531 +- unsigned int bsize)
532 ++static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
533 ++ unsigned int bsize)
534 + {
535 + u8 *addr;
536 +
537 + addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
538 + addr = blkcipher_get_spot(addr, bsize);
539 + scatterwalk_copychunks(addr, &walk->out, bsize, 1);
540 +- return bsize;
541 + }
542 +
543 +-static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
544 +- unsigned int n)
545 ++static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
546 ++ unsigned int n)
547 + {
548 + if (walk->flags & BLKCIPHER_WALK_COPY) {
549 + blkcipher_map_dst(walk);
550 +@@ -96,49 +95,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
551 +
552 + scatterwalk_advance(&walk->in, n);
553 + scatterwalk_advance(&walk->out, n);
554 +-
555 +- return n;
556 + }
557 +
558 + int blkcipher_walk_done(struct blkcipher_desc *desc,
559 + struct blkcipher_walk *walk, int err)
560 + {
561 +- unsigned int nbytes = 0;
562 ++ unsigned int n; /* bytes processed */
563 ++ bool more;
564 +
565 +- if (likely(err >= 0)) {
566 +- unsigned int n = walk->nbytes - err;
567 ++ if (unlikely(err < 0))
568 ++ goto finish;
569 +
570 +- if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
571 +- n = blkcipher_done_fast(walk, n);
572 +- else if (WARN_ON(err)) {
573 +- err = -EINVAL;
574 +- goto err;
575 +- } else
576 +- n = blkcipher_done_slow(walk, n);
577 ++ n = walk->nbytes - err;
578 ++ walk->total -= n;
579 ++ more = (walk->total != 0);
580 +
581 +- nbytes = walk->total - n;
582 +- err = 0;
583 ++ if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
584 ++ blkcipher_done_fast(walk, n);
585 ++ } else {
586 ++ if (WARN_ON(err)) {
587 ++ /* unexpected case; didn't process all bytes */
588 ++ err = -EINVAL;
589 ++ goto finish;
590 ++ }
591 ++ blkcipher_done_slow(walk, n);
592 + }
593 +
594 +- scatterwalk_done(&walk->in, 0, nbytes);
595 +- scatterwalk_done(&walk->out, 1, nbytes);
596 ++ scatterwalk_done(&walk->in, 0, more);
597 ++ scatterwalk_done(&walk->out, 1, more);
598 +
599 +-err:
600 +- walk->total = nbytes;
601 +- walk->nbytes = nbytes;
602 +-
603 +- if (nbytes) {
604 ++ if (more) {
605 + crypto_yield(desc->flags);
606 + return blkcipher_walk_next(desc, walk);
607 + }
608 +-
609 ++ err = 0;
610 ++finish:
611 ++ walk->nbytes = 0;
612 + if (walk->iv != desc->info)
613 + memcpy(desc->info, walk->iv, walk->ivsize);
614 + if (walk->buffer != walk->page)
615 + kfree(walk->buffer);
616 + if (walk->page)
617 + free_page((unsigned long)walk->page);
618 +-
619 + return err;
620 + }
621 + EXPORT_SYMBOL_GPL(blkcipher_walk_done);
622 +diff --git a/crypto/skcipher.c b/crypto/skcipher.c
623 +index 0fe2a2923ad0..5dc8407bdaa9 100644
624 +--- a/crypto/skcipher.c
625 ++++ b/crypto/skcipher.c
626 +@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
627 + return max(start, end_page);
628 + }
629 +
630 +-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
631 ++static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
632 + {
633 + u8 *addr;
634 +
635 +@@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
636 + addr = skcipher_get_spot(addr, bsize);
637 + scatterwalk_copychunks(addr, &walk->out, bsize,
638 + (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
639 +- return 0;
640 + }
641 +
642 + int skcipher_walk_done(struct skcipher_walk *walk, int err)
643 + {
644 +- unsigned int n = walk->nbytes - err;
645 +- unsigned int nbytes;
646 +-
647 +- nbytes = walk->total - n;
648 +-
649 +- if (unlikely(err < 0)) {
650 +- nbytes = 0;
651 +- n = 0;
652 +- } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
653 +- SKCIPHER_WALK_SLOW |
654 +- SKCIPHER_WALK_COPY |
655 +- SKCIPHER_WALK_DIFF)))) {
656 ++ unsigned int n; /* bytes processed */
657 ++ bool more;
658 ++
659 ++ if (unlikely(err < 0))
660 ++ goto finish;
661 ++
662 ++ n = walk->nbytes - err;
663 ++ walk->total -= n;
664 ++ more = (walk->total != 0);
665 ++
666 ++ if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
667 ++ SKCIPHER_WALK_SLOW |
668 ++ SKCIPHER_WALK_COPY |
669 ++ SKCIPHER_WALK_DIFF)))) {
670 + unmap_src:
671 + skcipher_unmap_src(walk);
672 + } else if (walk->flags & SKCIPHER_WALK_DIFF) {
673 +@@ -131,28 +132,28 @@ unmap_src:
674 + skcipher_unmap_dst(walk);
675 + } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
676 + if (WARN_ON(err)) {
677 ++ /* unexpected case; didn't process all bytes */
678 + err = -EINVAL;
679 +- nbytes = 0;
680 +- } else
681 +- n = skcipher_done_slow(walk, n);
682 ++ goto finish;
683 ++ }
684 ++ skcipher_done_slow(walk, n);
685 ++ goto already_advanced;
686 + }
687 +
688 +- if (err > 0)
689 +- err = 0;
690 +-
691 +- walk->total = nbytes;
692 +- walk->nbytes = nbytes;
693 +-
694 + scatterwalk_advance(&walk->in, n);
695 + scatterwalk_advance(&walk->out, n);
696 +- scatterwalk_done(&walk->in, 0, nbytes);
697 +- scatterwalk_done(&walk->out, 1, nbytes);
698 ++already_advanced:
699 ++ scatterwalk_done(&walk->in, 0, more);
700 ++ scatterwalk_done(&walk->out, 1, more);
701 +
702 +- if (nbytes) {
703 ++ if (more) {
704 + crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
705 + CRYPTO_TFM_REQ_MAY_SLEEP : 0);
706 + return skcipher_walk_next(walk);
707 + }
708 ++ err = 0;
709 ++finish:
710 ++ walk->nbytes = 0;
711 +
712 + /* Short-circuit for the common/fast path. */
713 + if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
714 +@@ -399,7 +400,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
715 + unsigned size;
716 + u8 *iv;
717 +
718 +- aligned_bs = ALIGN(bs, alignmask);
719 ++ aligned_bs = ALIGN(bs, alignmask + 1);
720 +
721 + /* Minimum size to align buffer by alignmask. */
722 + size = alignmask & ~a;
723 +diff --git a/crypto/vmac.c b/crypto/vmac.c
724 +index df76a816cfb2..bb2fc787d615 100644
725 +--- a/crypto/vmac.c
726 ++++ b/crypto/vmac.c
727 +@@ -1,6 +1,10 @@
728 + /*
729 +- * Modified to interface to the Linux kernel
730 ++ * VMAC: Message Authentication Code using Universal Hashing
731 ++ *
732 ++ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
733 ++ *
734 + * Copyright (c) 2009, Intel Corporation.
735 ++ * Copyright (c) 2018, Google Inc.
736 + *
737 + * This program is free software; you can redistribute it and/or modify it
738 + * under the terms and conditions of the GNU General Public License,
739 +@@ -16,14 +20,15 @@
740 + * Place - Suite 330, Boston, MA 02111-1307 USA.
741 + */
742 +
743 +-/* --------------------------------------------------------------------------
744 +- * VMAC and VHASH Implementation by Ted Krovetz (tdk@×××.org) and Wei Dai.
745 +- * This implementation is herby placed in the public domain.
746 +- * The authors offers no warranty. Use at your own risk.
747 +- * Please send bug reports to the authors.
748 +- * Last modified: 17 APR 08, 1700 PDT
749 +- * ----------------------------------------------------------------------- */
750 ++/*
751 ++ * Derived from:
752 ++ * VMAC and VHASH Implementation by Ted Krovetz (tdk@×××.org) and Wei Dai.
753 ++ * This implementation is herby placed in the public domain.
754 ++ * The authors offers no warranty. Use at your own risk.
755 ++ * Last modified: 17 APR 08, 1700 PDT
756 ++ */
757 +
758 ++#include <asm/unaligned.h>
759 + #include <linux/init.h>
760 + #include <linux/types.h>
761 + #include <linux/crypto.h>
762 +@@ -31,9 +36,35 @@
763 + #include <linux/scatterlist.h>
764 + #include <asm/byteorder.h>
765 + #include <crypto/scatterwalk.h>
766 +-#include <crypto/vmac.h>
767 + #include <crypto/internal/hash.h>
768 +
769 ++/*
770 ++ * User definable settings.
771 ++ */
772 ++#define VMAC_TAG_LEN 64
773 ++#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
774 ++#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
775 ++#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
776 ++
777 ++/* per-transform (per-key) context */
778 ++struct vmac_tfm_ctx {
779 ++ struct crypto_cipher *cipher;
780 ++ u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
781 ++ u64 polykey[2*VMAC_TAG_LEN/64];
782 ++ u64 l3key[2*VMAC_TAG_LEN/64];
783 ++};
784 ++
785 ++/* per-request context */
786 ++struct vmac_desc_ctx {
787 ++ union {
788 ++ u8 partial[VMAC_NHBYTES]; /* partial block */
789 ++ __le64 partial_words[VMAC_NHBYTES / 8];
790 ++ };
791 ++ unsigned int partial_size; /* size of the partial block */
792 ++ bool first_block_processed;
793 ++ u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
794 ++};
795 ++
796 + /*
797 + * Constants and masks
798 + */
799 +@@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
800 + } while (0)
801 + #endif
802 +
803 +-static void vhash_abort(struct vmac_ctx *ctx)
804 +-{
805 +- ctx->polytmp[0] = ctx->polykey[0] ;
806 +- ctx->polytmp[1] = ctx->polykey[1] ;
807 +- ctx->first_block_processed = 0;
808 +-}
809 +-
810 + static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
811 + {
812 + u64 rh, rl, t, z = 0;
813 +@@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
814 + return rl;
815 + }
816 +
817 +-static void vhash_update(const unsigned char *m,
818 +- unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
819 +- struct vmac_ctx *ctx)
820 ++/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
821 ++static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
822 ++ struct vmac_desc_ctx *dctx,
823 ++ const __le64 *mptr, unsigned int blocks)
824 + {
825 +- u64 rh, rl, *mptr;
826 +- const u64 *kptr = (u64 *)ctx->nhkey;
827 +- int i;
828 +- u64 ch, cl;
829 +- u64 pkh = ctx->polykey[0];
830 +- u64 pkl = ctx->polykey[1];
831 +-
832 +- if (!mbytes)
833 +- return;
834 +-
835 +- BUG_ON(mbytes % VMAC_NHBYTES);
836 +-
837 +- mptr = (u64 *)m;
838 +- i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
839 +-
840 +- ch = ctx->polytmp[0];
841 +- cl = ctx->polytmp[1];
842 +-
843 +- if (!ctx->first_block_processed) {
844 +- ctx->first_block_processed = 1;
845 ++ const u64 *kptr = tctx->nhkey;
846 ++ const u64 pkh = tctx->polykey[0];
847 ++ const u64 pkl = tctx->polykey[1];
848 ++ u64 ch = dctx->polytmp[0];
849 ++ u64 cl = dctx->polytmp[1];
850 ++ u64 rh, rl;
851 ++
852 ++ if (!dctx->first_block_processed) {
853 ++ dctx->first_block_processed = true;
854 + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
855 + rh &= m62;
856 + ADD128(ch, cl, rh, rl);
857 + mptr += (VMAC_NHBYTES/sizeof(u64));
858 +- i--;
859 ++ blocks--;
860 + }
861 +
862 +- while (i--) {
863 ++ while (blocks--) {
864 + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
865 + rh &= m62;
866 + poly_step(ch, cl, pkh, pkl, rh, rl);
867 + mptr += (VMAC_NHBYTES/sizeof(u64));
868 + }
869 +
870 +- ctx->polytmp[0] = ch;
871 +- ctx->polytmp[1] = cl;
872 ++ dctx->polytmp[0] = ch;
873 ++ dctx->polytmp[1] = cl;
874 + }
875 +
876 +-static u64 vhash(unsigned char m[], unsigned int mbytes,
877 +- u64 *tagl, struct vmac_ctx *ctx)
878 ++static int vmac_setkey(struct crypto_shash *tfm,
879 ++ const u8 *key, unsigned int keylen)
880 + {
881 +- u64 rh, rl, *mptr;
882 +- const u64 *kptr = (u64 *)ctx->nhkey;
883 +- int i, remaining;
884 +- u64 ch, cl;
885 +- u64 pkh = ctx->polykey[0];
886 +- u64 pkl = ctx->polykey[1];
887 +-
888 +- mptr = (u64 *)m;
889 +- i = mbytes / VMAC_NHBYTES;
890 +- remaining = mbytes % VMAC_NHBYTES;
891 +-
892 +- if (ctx->first_block_processed) {
893 +- ch = ctx->polytmp[0];
894 +- cl = ctx->polytmp[1];
895 +- } else if (i) {
896 +- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
897 +- ch &= m62;
898 +- ADD128(ch, cl, pkh, pkl);
899 +- mptr += (VMAC_NHBYTES/sizeof(u64));
900 +- i--;
901 +- } else if (remaining) {
902 +- nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
903 +- ch &= m62;
904 +- ADD128(ch, cl, pkh, pkl);
905 +- mptr += (VMAC_NHBYTES/sizeof(u64));
906 +- goto do_l3;
907 +- } else {/* Empty String */
908 +- ch = pkh; cl = pkl;
909 +- goto do_l3;
910 +- }
911 +-
912 +- while (i--) {
913 +- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
914 +- rh &= m62;
915 +- poly_step(ch, cl, pkh, pkl, rh, rl);
916 +- mptr += (VMAC_NHBYTES/sizeof(u64));
917 +- }
918 +- if (remaining) {
919 +- nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
920 +- rh &= m62;
921 +- poly_step(ch, cl, pkh, pkl, rh, rl);
922 +- }
923 +-
924 +-do_l3:
925 +- vhash_abort(ctx);
926 +- remaining *= 8;
927 +- return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
928 +-}
929 ++ struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
930 ++ __be64 out[2];
931 ++ u8 in[16] = { 0 };
932 ++ unsigned int i;
933 ++ int err;
934 +
935 +-static u64 vmac(unsigned char m[], unsigned int mbytes,
936 +- const unsigned char n[16], u64 *tagl,
937 +- struct vmac_ctx_t *ctx)
938 +-{
939 +- u64 *in_n, *out_p;
940 +- u64 p, h;
941 +- int i;
942 +-
943 +- in_n = ctx->__vmac_ctx.cached_nonce;
944 +- out_p = ctx->__vmac_ctx.cached_aes;
945 +-
946 +- i = n[15] & 1;
947 +- if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
948 +- in_n[0] = *(u64 *)(n);
949 +- in_n[1] = *(u64 *)(n+8);
950 +- ((unsigned char *)in_n)[15] &= 0xFE;
951 +- crypto_cipher_encrypt_one(ctx->child,
952 +- (unsigned char *)out_p, (unsigned char *)in_n);
953 +-
954 +- ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
955 ++ if (keylen != VMAC_KEY_LEN) {
956 ++ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
957 ++ return -EINVAL;
958 + }
959 +- p = be64_to_cpup(out_p + i);
960 +- h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
961 +- return le64_to_cpu(p + h);
962 +-}
963 +
964 +-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
965 +-{
966 +- u64 in[2] = {0}, out[2];
967 +- unsigned i;
968 +- int err = 0;
969 +-
970 +- err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
971 ++ err = crypto_cipher_setkey(tctx->cipher, key, keylen);
972 + if (err)
973 + return err;
974 +
975 + /* Fill nh key */
976 +- ((unsigned char *)in)[0] = 0x80;
977 +- for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
978 +- crypto_cipher_encrypt_one(ctx->child,
979 +- (unsigned char *)out, (unsigned char *)in);
980 +- ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
981 +- ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
982 +- ((unsigned char *)in)[15] += 1;
983 ++ in[0] = 0x80;
984 ++ for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
985 ++ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
986 ++ tctx->nhkey[i] = be64_to_cpu(out[0]);
987 ++ tctx->nhkey[i+1] = be64_to_cpu(out[1]);
988 ++ in[15]++;
989 + }
990 +
991 + /* Fill poly key */
992 +- ((unsigned char *)in)[0] = 0xC0;
993 +- in[1] = 0;
994 +- for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
995 +- crypto_cipher_encrypt_one(ctx->child,
996 +- (unsigned char *)out, (unsigned char *)in);
997 +- ctx->__vmac_ctx.polytmp[i] =
998 +- ctx->__vmac_ctx.polykey[i] =
999 +- be64_to_cpup(out) & mpoly;
1000 +- ctx->__vmac_ctx.polytmp[i+1] =
1001 +- ctx->__vmac_ctx.polykey[i+1] =
1002 +- be64_to_cpup(out+1) & mpoly;
1003 +- ((unsigned char *)in)[15] += 1;
1004 ++ in[0] = 0xC0;
1005 ++ in[15] = 0;
1006 ++ for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
1007 ++ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
1008 ++ tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
1009 ++ tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
1010 ++ in[15]++;
1011 + }
1012 +
1013 + /* Fill ip key */
1014 +- ((unsigned char *)in)[0] = 0xE0;
1015 +- in[1] = 0;
1016 +- for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
1017 ++ in[0] = 0xE0;
1018 ++ in[15] = 0;
1019 ++ for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
1020 + do {
1021 +- crypto_cipher_encrypt_one(ctx->child,
1022 +- (unsigned char *)out, (unsigned char *)in);
1023 +- ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
1024 +- ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
1025 +- ((unsigned char *)in)[15] += 1;
1026 +- } while (ctx->__vmac_ctx.l3key[i] >= p64
1027 +- || ctx->__vmac_ctx.l3key[i+1] >= p64);
1028 ++ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
1029 ++ tctx->l3key[i] = be64_to_cpu(out[0]);
1030 ++ tctx->l3key[i+1] = be64_to_cpu(out[1]);
1031 ++ in[15]++;
1032 ++ } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
1033 + }
1034 +
1035 +- /* Invalidate nonce/aes cache and reset other elements */
1036 +- ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
1037 +- ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
1038 +- ctx->__vmac_ctx.first_block_processed = 0;
1039 +-
1040 +- return err;
1041 ++ return 0;
1042 + }
1043 +
1044 +-static int vmac_setkey(struct crypto_shash *parent,
1045 +- const u8 *key, unsigned int keylen)
1046 ++static int vmac_init(struct shash_desc *desc)
1047 + {
1048 +- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
1049 ++ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
1050 ++ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
1051 +
1052 +- if (keylen != VMAC_KEY_LEN) {
1053 +- crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
1054 +- return -EINVAL;
1055 +- }
1056 +-
1057 +- return vmac_set_key((u8 *)key, ctx);
1058 +-}
1059 +-
1060 +-static int vmac_init(struct shash_desc *pdesc)
1061 +-{
1062 ++ dctx->partial_size = 0;
1063 ++ dctx->first_block_processed = false;
1064 ++ memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
1065 + return 0;
1066 + }
1067 +
1068 +-static int vmac_update(struct shash_desc *pdesc, const u8 *p,
1069 +- unsigned int len)
1070 ++static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
1071 + {
1072 +- struct crypto_shash *parent = pdesc->tfm;
1073 +- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
1074 +- int expand;
1075 +- int min;
1076 +-
1077 +- expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
1078 +- VMAC_NHBYTES - ctx->partial_size : 0;
1079 +-
1080 +- min = len < expand ? len : expand;
1081 +-
1082 +- memcpy(ctx->partial + ctx->partial_size, p, min);
1083 +- ctx->partial_size += min;
1084 +-
1085 +- if (len < expand)
1086 +- return 0;
1087 +-
1088 +- vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
1089 +- ctx->partial_size = 0;
1090 +-
1091 +- len -= expand;
1092 +- p += expand;
1093 ++ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
1094 ++ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
1095 ++ unsigned int n;
1096 ++
1097 ++ if (dctx->partial_size) {
1098 ++ n = min(len, VMAC_NHBYTES - dctx->partial_size);
1099 ++ memcpy(&dctx->partial[dctx->partial_size], p, n);
1100 ++ dctx->partial_size += n;
1101 ++ p += n;
1102 ++ len -= n;
1103 ++ if (dctx->partial_size == VMAC_NHBYTES) {
1104 ++ vhash_blocks(tctx, dctx, dctx->partial_words, 1);
1105 ++ dctx->partial_size = 0;
1106 ++ }
1107 ++ }
1108 +
1109 +- if (len % VMAC_NHBYTES) {
1110 +- memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
1111 +- len % VMAC_NHBYTES);
1112 +- ctx->partial_size = len % VMAC_NHBYTES;
1113 ++ if (len >= VMAC_NHBYTES) {
1114 ++ n = round_down(len, VMAC_NHBYTES);
1115 ++ /* TODO: 'p' may be misaligned here */
1116 ++ vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
1117 ++ p += n;
1118 ++ len -= n;
1119 + }
1120 +
1121 +- vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
1122 ++ if (len) {
1123 ++ memcpy(dctx->partial, p, len);
1124 ++ dctx->partial_size = len;
1125 ++ }
1126 +
1127 + return 0;
1128 + }
1129 +
1130 +-static int vmac_final(struct shash_desc *pdesc, u8 *out)
1131 ++static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
1132 ++ struct vmac_desc_ctx *dctx)
1133 + {
1134 +- struct crypto_shash *parent = pdesc->tfm;
1135 +- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
1136 +- vmac_t mac;
1137 +- u8 nonce[16] = {};
1138 +-
1139 +- /* vmac() ends up accessing outside the array bounds that
1140 +- * we specify. In appears to access up to the next 2-word
1141 +- * boundary. We'll just be uber cautious and zero the
1142 +- * unwritten bytes in the buffer.
1143 +- */
1144 +- if (ctx->partial_size) {
1145 +- memset(ctx->partial + ctx->partial_size, 0,
1146 +- VMAC_NHBYTES - ctx->partial_size);
1147 ++ unsigned int partial = dctx->partial_size;
1148 ++ u64 ch = dctx->polytmp[0];
1149 ++ u64 cl = dctx->polytmp[1];
1150 ++
1151 ++ /* L1 and L2-hash the final block if needed */
1152 ++ if (partial) {
1153 ++ /* Zero-pad to next 128-bit boundary */
1154 ++ unsigned int n = round_up(partial, 16);
1155 ++ u64 rh, rl;
1156 ++
1157 ++ memset(&dctx->partial[partial], 0, n - partial);
1158 ++ nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
1159 ++ rh &= m62;
1160 ++ if (dctx->first_block_processed)
1161 ++ poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
1162 ++ rh, rl);
1163 ++ else
1164 ++ ADD128(ch, cl, rh, rl);
1165 + }
1166 +- mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
1167 +- memcpy(out, &mac, sizeof(vmac_t));
1168 +- memzero_explicit(&mac, sizeof(vmac_t));
1169 +- memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
1170 +- ctx->partial_size = 0;
1171 ++
1172 ++ /* L3-hash the 128-bit output of L2-hash */
1173 ++ return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
1174 ++}
1175 ++
1176 ++static int vmac_final(struct shash_desc *desc, u8 *out)
1177 ++{
1178 ++ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
1179 ++ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
1180 ++ static const u8 nonce[16] = {}; /* TODO: this is insecure */
1181 ++ union {
1182 ++ u8 bytes[16];
1183 ++ __be64 pads[2];
1184 ++ } block;
1185 ++ int index;
1186 ++ u64 hash, pad;
1187 ++
1188 ++ /* Finish calculating the VHASH of the message */
1189 ++ hash = vhash_final(tctx, dctx);
1190 ++
1191 ++ /* Generate pseudorandom pad by encrypting the nonce */
1192 ++ memcpy(&block, nonce, 16);
1193 ++ index = block.bytes[15] & 1;
1194 ++ block.bytes[15] &= ~1;
1195 ++ crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes);
1196 ++ pad = be64_to_cpu(block.pads[index]);
1197 ++
1198 ++ /* The VMAC is the sum of VHASH and the pseudorandom pad */
1199 ++ put_unaligned_le64(hash + pad, out);
1200 + return 0;
1201 + }
1202 +
1203 + static int vmac_init_tfm(struct crypto_tfm *tfm)
1204 + {
1205 +- struct crypto_cipher *cipher;
1206 +- struct crypto_instance *inst = (void *)tfm->__crt_alg;
1207 ++ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
1208 + struct crypto_spawn *spawn = crypto_instance_ctx(inst);
1209 +- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
1210 ++ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
1211 ++ struct crypto_cipher *cipher;
1212 +
1213 + cipher = crypto_spawn_cipher(spawn);
1214 + if (IS_ERR(cipher))
1215 + return PTR_ERR(cipher);
1216 +
1217 +- ctx->child = cipher;
1218 ++ tctx->cipher = cipher;
1219 + return 0;
1220 + }
1221 +
1222 + static void vmac_exit_tfm(struct crypto_tfm *tfm)
1223 + {
1224 +- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
1225 +- crypto_free_cipher(ctx->child);
1226 ++ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
1227 ++
1228 ++ crypto_free_cipher(tctx->cipher);
1229 + }
1230 +
1231 + static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
1232 +@@ -655,6 +608,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
1233 + if (IS_ERR(alg))
1234 + return PTR_ERR(alg);
1235 +
1236 ++ err = -EINVAL;
1237 ++ if (alg->cra_blocksize != 16)
1238 ++ goto out_put_alg;
1239 ++
1240 + inst = shash_alloc_instance("vmac", alg);
1241 + err = PTR_ERR(inst);
1242 + if (IS_ERR(inst))
1243 +@@ -670,11 +627,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
1244 + inst->alg.base.cra_blocksize = alg->cra_blocksize;
1245 + inst->alg.base.cra_alignmask = alg->cra_alignmask;
1246 +
1247 +- inst->alg.digestsize = sizeof(vmac_t);
1248 +- inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
1249 ++ inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
1250 + inst->alg.base.cra_init = vmac_init_tfm;
1251 + inst->alg.base.cra_exit = vmac_exit_tfm;
1252 +
1253 ++ inst->alg.descsize = sizeof(struct vmac_desc_ctx);
1254 ++ inst->alg.digestsize = VMAC_TAG_LEN / 8;
1255 + inst->alg.init = vmac_init;
1256 + inst->alg.update = vmac_update;
1257 + inst->alg.final = vmac_final;
1258 +diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
1259 +index ff478d826d7d..051b8c6bae64 100644
1260 +--- a/drivers/crypto/ccp/psp-dev.c
1261 ++++ b/drivers/crypto/ccp/psp-dev.c
1262 +@@ -84,8 +84,6 @@ done:
1263 +
1264 + static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
1265 + {
1266 +- psp->sev_int_rcvd = 0;
1267 +-
1268 + wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
1269 + *reg = ioread32(psp->io_regs + PSP_CMDRESP);
1270 + }
1271 +@@ -148,6 +146,8 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
1272 + iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
1273 + iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
1274 +
1275 ++ psp->sev_int_rcvd = 0;
1276 ++
1277 + reg = cmd;
1278 + reg <<= PSP_CMDRESP_CMD_SHIFT;
1279 + reg |= PSP_CMDRESP_IOC;
1280 +@@ -856,6 +856,9 @@ void psp_dev_destroy(struct sp_device *sp)
1281 + {
1282 + struct psp_device *psp = sp->psp_data;
1283 +
1284 ++ if (!psp)
1285 ++ return;
1286 ++
1287 + if (psp->sev_misc)
1288 + kref_put(&misc_dev->refcount, sev_exit);
1289 +
1290 +diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
1291 +index d2810c183b73..958ced3ca485 100644
1292 +--- a/drivers/crypto/ccree/cc_cipher.c
1293 ++++ b/drivers/crypto/ccree/cc_cipher.c
1294 +@@ -593,34 +593,82 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
1295 + }
1296 + }
1297 +
1298 ++/*
1299 ++ * Update a CTR-AES 128 bit counter
1300 ++ */
1301 ++static void cc_update_ctr(u8 *ctr, unsigned int increment)
1302 ++{
1303 ++ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
1304 ++ IS_ALIGNED((unsigned long)ctr, 8)) {
1305 ++
1306 ++ __be64 *high_be = (__be64 *)ctr;
1307 ++ __be64 *low_be = high_be + 1;
1308 ++ u64 orig_low = __be64_to_cpu(*low_be);
1309 ++ u64 new_low = orig_low + (u64)increment;
1310 ++
1311 ++ *low_be = __cpu_to_be64(new_low);
1312 ++
1313 ++ if (new_low < orig_low)
1314 ++ *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
1315 ++ } else {
1316 ++ u8 *pos = (ctr + AES_BLOCK_SIZE);
1317 ++ u8 val;
1318 ++ unsigned int size;
1319 ++
1320 ++ for (; increment; increment--)
1321 ++ for (size = AES_BLOCK_SIZE; size; size--) {
1322 ++ val = *--pos + 1;
1323 ++ *pos = val;
1324 ++ if (val)
1325 ++ break;
1326 ++ }
1327 ++ }
1328 ++}
1329 ++
1330 + static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
1331 + {
1332 + struct skcipher_request *req = (struct skcipher_request *)cc_req;
1333 + struct scatterlist *dst = req->dst;
1334 + struct scatterlist *src = req->src;
1335 + struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
1336 +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1337 +- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1338 ++ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
1339 ++ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
1340 ++ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
1341 ++ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
1342 ++ unsigned int len;
1343 +
1344 +- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
1345 +- kzfree(req_ctx->iv);
1346 ++ switch (ctx_p->cipher_mode) {
1347 ++ case DRV_CIPHER_CBC:
1348 ++ /*
1349 ++ * The crypto API expects us to set the req->iv to the last
1350 ++ * ciphertext block. For encrypt, simply copy from the result.
1351 ++ * For decrypt, we must copy from a saved buffer since this
1352 ++ * could be an in-place decryption operation and the src is
1353 ++ * lost by this point.
1354 ++ */
1355 ++ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1356 ++ memcpy(req->iv, req_ctx->backup_info, ivsize);
1357 ++ kzfree(req_ctx->backup_info);
1358 ++ } else if (!err) {
1359 ++ len = req->cryptlen - ivsize;
1360 ++ scatterwalk_map_and_copy(req->iv, req->dst, len,
1361 ++ ivsize, 0);
1362 ++ }
1363 ++ break;
1364 +
1365 +- /*
1366 +- * The crypto API expects us to set the req->iv to the last
1367 +- * ciphertext block. For encrypt, simply copy from the result.
1368 +- * For decrypt, we must copy from a saved buffer since this
1369 +- * could be an in-place decryption operation and the src is
1370 +- * lost by this point.
1371 +- */
1372 +- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1373 +- memcpy(req->iv, req_ctx->backup_info, ivsize);
1374 +- kzfree(req_ctx->backup_info);
1375 +- } else if (!err) {
1376 +- scatterwalk_map_and_copy(req->iv, req->dst,
1377 +- (req->cryptlen - ivsize),
1378 +- ivsize, 0);
1379 ++ case DRV_CIPHER_CTR:
1380 ++ /* Compute the counter of the last block */
1381 ++ len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
1382 ++ cc_update_ctr((u8 *)req->iv, len);
1383 ++ break;
1384 ++
1385 ++ default:
1386 ++ break;
1387 + }
1388 +
1389 ++ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
1390 ++ kzfree(req_ctx->iv);
1391 ++
1392 + skcipher_request_complete(req, err);
1393 + }
1394 +
1395 +@@ -752,20 +800,29 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
1396 + static int cc_cipher_decrypt(struct skcipher_request *req)
1397 + {
1398 + struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
1399 ++ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
1400 ++ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
1401 + struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
1402 + unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
1403 + gfp_t flags = cc_gfp_flags(&req->base);
1404 ++ unsigned int len;
1405 +
1406 +- /*
1407 +- * Allocate and save the last IV sized bytes of the source, which will
1408 +- * be lost in case of in-place decryption and might be needed for CTS.
1409 +- */
1410 +- req_ctx->backup_info = kmalloc(ivsize, flags);
1411 +- if (!req_ctx->backup_info)
1412 +- return -ENOMEM;
1413 ++ if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
1414 ++
1415 ++ /* Allocate and save the last IV sized bytes of the source,
1416 ++ * which will be lost in case of in-place decryption.
1417 ++ */
1418 ++ req_ctx->backup_info = kzalloc(ivsize, flags);
1419 ++ if (!req_ctx->backup_info)
1420 ++ return -ENOMEM;
1421 ++
1422 ++ len = req->cryptlen - ivsize;
1423 ++ scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
1424 ++ ivsize, 0);
1425 ++ } else {
1426 ++ req_ctx->backup_info = NULL;
1427 ++ }
1428 +
1429 +- scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
1430 +- (req->cryptlen - ivsize), ivsize, 0);
1431 + req_ctx->is_giv = false;
1432 +
1433 + return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
1434 +diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
1435 +index 96ff777474d7..e4ebde05a8a0 100644
1436 +--- a/drivers/crypto/ccree/cc_hash.c
1437 ++++ b/drivers/crypto/ccree/cc_hash.c
1438 +@@ -602,66 +602,7 @@ static int cc_hash_update(struct ahash_request *req)
1439 + return rc;
1440 + }
1441 +
1442 +-static int cc_hash_finup(struct ahash_request *req)
1443 +-{
1444 +- struct ahash_req_ctx *state = ahash_request_ctx(req);
1445 +- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1446 +- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1447 +- u32 digestsize = crypto_ahash_digestsize(tfm);
1448 +- struct scatterlist *src = req->src;
1449 +- unsigned int nbytes = req->nbytes;
1450 +- u8 *result = req->result;
1451 +- struct device *dev = drvdata_to_dev(ctx->drvdata);
1452 +- bool is_hmac = ctx->is_hmac;
1453 +- struct cc_crypto_req cc_req = {};
1454 +- struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1455 +- unsigned int idx = 0;
1456 +- int rc;
1457 +- gfp_t flags = cc_gfp_flags(&req->base);
1458 +-
1459 +- dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
1460 +- nbytes);
1461 +-
1462 +- if (cc_map_req(dev, state, ctx)) {
1463 +- dev_err(dev, "map_ahash_source() failed\n");
1464 +- return -EINVAL;
1465 +- }
1466 +-
1467 +- if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
1468 +- flags)) {
1469 +- dev_err(dev, "map_ahash_request_final() failed\n");
1470 +- cc_unmap_req(dev, state, ctx);
1471 +- return -ENOMEM;
1472 +- }
1473 +- if (cc_map_result(dev, state, digestsize)) {
1474 +- dev_err(dev, "map_ahash_digest() failed\n");
1475 +- cc_unmap_hash_request(dev, state, src, true);
1476 +- cc_unmap_req(dev, state, ctx);
1477 +- return -ENOMEM;
1478 +- }
1479 +-
1480 +- /* Setup request structure */
1481 +- cc_req.user_cb = cc_hash_complete;
1482 +- cc_req.user_arg = req;
1483 +-
1484 +- idx = cc_restore_hash(desc, ctx, state, idx);
1485 +-
1486 +- if (is_hmac)
1487 +- idx = cc_fin_hmac(desc, req, idx);
1488 +-
1489 +- idx = cc_fin_result(desc, req, idx);
1490 +-
1491 +- rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1492 +- if (rc != -EINPROGRESS && rc != -EBUSY) {
1493 +- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1494 +- cc_unmap_hash_request(dev, state, src, true);
1495 +- cc_unmap_result(dev, state, digestsize, result);
1496 +- cc_unmap_req(dev, state, ctx);
1497 +- }
1498 +- return rc;
1499 +-}
1500 +-
1501 +-static int cc_hash_final(struct ahash_request *req)
1502 ++static int cc_do_finup(struct ahash_request *req, bool update)
1503 + {
1504 + struct ahash_req_ctx *state = ahash_request_ctx(req);
1505 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1506 +@@ -678,21 +619,20 @@ static int cc_hash_final(struct ahash_request *req)
1507 + int rc;
1508 + gfp_t flags = cc_gfp_flags(&req->base);
1509 +
1510 +- dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
1511 +- nbytes);
1512 ++ dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
1513 ++ update ? "finup" : "final", nbytes);
1514 +
1515 + if (cc_map_req(dev, state, ctx)) {
1516 + dev_err(dev, "map_ahash_source() failed\n");
1517 + return -EINVAL;
1518 + }
1519 +
1520 +- if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
1521 ++ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
1522 + flags)) {
1523 + dev_err(dev, "map_ahash_request_final() failed\n");
1524 + cc_unmap_req(dev, state, ctx);
1525 + return -ENOMEM;
1526 + }
1527 +-
1528 + if (cc_map_result(dev, state, digestsize)) {
1529 + dev_err(dev, "map_ahash_digest() failed\n");
1530 + cc_unmap_hash_request(dev, state, src, true);
1531 +@@ -706,7 +646,7 @@ static int cc_hash_final(struct ahash_request *req)
1532 +
1533 + idx = cc_restore_hash(desc, ctx, state, idx);
1534 +
1535 +- /* "DO-PAD" must be enabled only when writing current length to HW */
1536 ++ /* Pad the hash */
1537 + hw_desc_init(&desc[idx]);
1538 + set_cipher_do(&desc[idx], DO_PAD);
1539 + set_cipher_mode(&desc[idx], ctx->hw_mode);
1540 +@@ -731,6 +671,17 @@ static int cc_hash_final(struct ahash_request *req)
1541 + return rc;
1542 + }
1543 +
1544 ++static int cc_hash_finup(struct ahash_request *req)
1545 ++{
1546 ++ return cc_do_finup(req, true);
1547 ++}
1548 ++
1549 ++
1550 ++static int cc_hash_final(struct ahash_request *req)
1551 ++{
1552 ++ return cc_do_finup(req, false);
1553 ++}
1554 ++
1555 + static int cc_hash_init(struct ahash_request *req)
1556 + {
1557 + struct ahash_req_ctx *state = ahash_request_ctx(req);
1558 +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
1559 +index 26ca0276b503..a75cb371cd19 100644
1560 +--- a/include/asm-generic/pgtable.h
1561 ++++ b/include/asm-generic/pgtable.h
1562 +@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1563 + int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1564 + int pud_clear_huge(pud_t *pud);
1565 + int pmd_clear_huge(pmd_t *pmd);
1566 +-int pud_free_pmd_page(pud_t *pud);
1567 +-int pmd_free_pte_page(pmd_t *pmd);
1568 ++int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1569 ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1570 + #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
1571 + static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1572 + {
1573 +@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
1574 + {
1575 + return 0;
1576 + }
1577 +-static inline int pud_free_pmd_page(pud_t *pud)
1578 ++static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1579 + {
1580 + return 0;
1581 + }
1582 +-static inline int pmd_free_pte_page(pmd_t *pmd)
1583 ++static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1584 + {
1585 + return 0;
1586 + }
1587 +diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
1588 +deleted file mode 100644
1589 +index 6b700c7b2fe1..000000000000
1590 +--- a/include/crypto/vmac.h
1591 ++++ /dev/null
1592 +@@ -1,63 +0,0 @@
1593 +-/*
1594 +- * Modified to interface to the Linux kernel
1595 +- * Copyright (c) 2009, Intel Corporation.
1596 +- *
1597 +- * This program is free software; you can redistribute it and/or modify it
1598 +- * under the terms and conditions of the GNU General Public License,
1599 +- * version 2, as published by the Free Software Foundation.
1600 +- *
1601 +- * This program is distributed in the hope it will be useful, but WITHOUT
1602 +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1603 +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1604 +- * more details.
1605 +- *
1606 +- * You should have received a copy of the GNU General Public License along with
1607 +- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
1608 +- * Place - Suite 330, Boston, MA 02111-1307 USA.
1609 +- */
1610 +-
1611 +-#ifndef __CRYPTO_VMAC_H
1612 +-#define __CRYPTO_VMAC_H
1613 +-
1614 +-/* --------------------------------------------------------------------------
1615 +- * VMAC and VHASH Implementation by Ted Krovetz (tdk@×××.org) and Wei Dai.
1616 +- * This implementation is herby placed in the public domain.
1617 +- * The authors offers no warranty. Use at your own risk.
1618 +- * Please send bug reports to the authors.
1619 +- * Last modified: 17 APR 08, 1700 PDT
1620 +- * ----------------------------------------------------------------------- */
1621 +-
1622 +-/*
1623 +- * User definable settings.
1624 +- */
1625 +-#define VMAC_TAG_LEN 64
1626 +-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
1627 +-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
1628 +-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
1629 +-
1630 +-/*
1631 +- * This implementation uses u32 and u64 as names for unsigned 32-
1632 +- * and 64-bit integer types. These are defined in C99 stdint.h. The
1633 +- * following may need adaptation if you are not running a C99 or
1634 +- * Microsoft C environment.
1635 +- */
1636 +-struct vmac_ctx {
1637 +- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
1638 +- u64 polykey[2*VMAC_TAG_LEN/64];
1639 +- u64 l3key[2*VMAC_TAG_LEN/64];
1640 +- u64 polytmp[2*VMAC_TAG_LEN/64];
1641 +- u64 cached_nonce[2];
1642 +- u64 cached_aes[2];
1643 +- int first_block_processed;
1644 +-};
1645 +-
1646 +-typedef u64 vmac_t;
1647 +-
1648 +-struct vmac_ctx_t {
1649 +- struct crypto_cipher *child;
1650 +- struct vmac_ctx __vmac_ctx;
1651 +- u8 partial[VMAC_NHBYTES]; /* partial block */
1652 +- int partial_size; /* size of the partial block */
1653 +-};
1654 +-
1655 +-#endif /* __CRYPTO_VMAC_H */
1656 +diff --git a/lib/ioremap.c b/lib/ioremap.c
1657 +index 54e5bbaa3200..517f5853ffed 100644
1658 +--- a/lib/ioremap.c
1659 ++++ b/lib/ioremap.c
1660 +@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
1661 + if (ioremap_pmd_enabled() &&
1662 + ((next - addr) == PMD_SIZE) &&
1663 + IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
1664 +- pmd_free_pte_page(pmd)) {
1665 ++ pmd_free_pte_page(pmd, addr)) {
1666 + if (pmd_set_huge(pmd, phys_addr + addr, prot))
1667 + continue;
1668 + }
1669 +@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
1670 + if (ioremap_pud_enabled() &&
1671 + ((next - addr) == PUD_SIZE) &&
1672 + IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
1673 +- pud_free_pmd_page(pud)) {
1674 ++ pud_free_pmd_page(pud, addr)) {
1675 + if (pud_set_huge(pud, phys_addr + addr, prot))
1676 + continue;
1677 + }
1678 +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
1679 +index 1036e4fa1ea2..3bba8f4b08a9 100644
1680 +--- a/net/bluetooth/hidp/core.c
1681 ++++ b/net/bluetooth/hidp/core.c
1682 +@@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session)
1683 + del_timer(&session->timer);
1684 + }
1685 +
1686 +-static void hidp_process_report(struct hidp_session *session,
1687 +- int type, const u8 *data, int len, int intr)
1688 ++static void hidp_process_report(struct hidp_session *session, int type,
1689 ++ const u8 *data, unsigned int len, int intr)
1690 + {
1691 + if (len > HID_MAX_BUFFER_SIZE)
1692 + len = HID_MAX_BUFFER_SIZE;
1693 +diff --git a/scripts/depmod.sh b/scripts/depmod.sh
1694 +index 1a6f85e0e6e1..999d585eaa73 100755
1695 +--- a/scripts/depmod.sh
1696 ++++ b/scripts/depmod.sh
1697 +@@ -10,10 +10,16 @@ fi
1698 + DEPMOD=$1
1699 + KERNELRELEASE=$2
1700 +
1701 +-if ! test -r System.map -a -x "$DEPMOD"; then
1702 ++if ! test -r System.map ; then
1703 + exit 0
1704 + fi
1705 +
1706 ++if [ -z $(command -v $DEPMOD) ]; then
1707 ++ echo "'make modules_install' requires $DEPMOD. Please install it." >&2
1708 ++ echo "This is probably in the kmod package." >&2
1709 ++ exit 1
1710 ++fi
1711 ++
1712 + # older versions of depmod require the version string to start with three
1713 + # numbers, so we cheat with a symlink here
1714 + depmod_hack_needed=true