Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Sat, 26 Jan 2019 14:59:44
Message-Id: 1548514744.7925b86f90f144813faafb0726a435199798af82.mpagano@gentoo
1 commit: 7925b86f90f144813faafb0726a435199798af82
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jan 26 14:59:04 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Jan 26 14:59:04 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7925b86f
7
8 proj/linux-patches: Linux patch 4.4.172
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1171_linux-4.4.172.patch | 4333 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4337 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index fb7be63..02e6688 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -727,6 +727,10 @@ Patch: 1170_linux-4.4.171.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.171
23
24 +Patch: 1171_linux-4.4.172.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.172
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1171_linux-4.4.172.patch b/1171_linux-4.4.172.patch
33 new file mode 100644
34 index 0000000..ca36344
35 --- /dev/null
36 +++ b/1171_linux-4.4.172.patch
37 @@ -0,0 +1,4333 @@
38 +diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
39 +index 6d2689ebf824..5b87946a53a3 100644
40 +--- a/Documentation/filesystems/proc.txt
41 ++++ b/Documentation/filesystems/proc.txt
42 +@@ -466,7 +466,9 @@ manner. The codes are the following:
43 +
44 + Note that there is no guarantee that every flag and associated mnemonic will
45 + be present in all further kernel releases. Things get changed, the flags may
46 +-be vanished or the reverse -- new added.
47 ++be vanished or the reverse -- new added. Interpretation of their meaning
48 ++might change in future as well. So each consumer of these flags has to
49 ++follow each specific kernel version for the exact semantic.
50 +
51 + This file is only present if the CONFIG_MMU kernel configuration option is
52 + enabled.
53 +diff --git a/Makefile b/Makefile
54 +index c6b680faedd8..2aa8db459a74 100644
55 +--- a/Makefile
56 ++++ b/Makefile
57 +@@ -1,6 +1,6 @@
58 + VERSION = 4
59 + PATCHLEVEL = 4
60 +-SUBLEVEL = 171
61 ++SUBLEVEL = 172
62 + EXTRAVERSION =
63 + NAME = Blurry Fish Butt
64 +
65 +diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
66 +index ef8e13d379cb..d7e7cf56e8d6 100644
67 +--- a/arch/arm64/include/asm/kvm_arm.h
68 ++++ b/arch/arm64/include/asm/kvm_arm.h
69 +@@ -23,6 +23,8 @@
70 + #include <asm/types.h>
71 +
72 + /* Hyp Configuration Register (HCR) bits */
73 ++#define HCR_API (UL(1) << 41)
74 ++#define HCR_APK (UL(1) << 40)
75 + #define HCR_ID (UL(1) << 33)
76 + #define HCR_CD (UL(1) << 32)
77 + #define HCR_RW_SHIFT 31
78 +@@ -81,6 +83,7 @@
79 + HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
80 + #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
81 + #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
82 ++#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
83 +
84 +
85 + /* Hyp System Control Register (SCTLR_EL2) bits */
86 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
87 +index d019c3a58cc2..0382eba4bf7b 100644
88 +--- a/arch/arm64/kernel/head.S
89 ++++ b/arch/arm64/kernel/head.S
90 +@@ -30,6 +30,7 @@
91 + #include <asm/cache.h>
92 + #include <asm/cputype.h>
93 + #include <asm/kernel-pgtable.h>
94 ++#include <asm/kvm_arm.h>
95 + #include <asm/memory.h>
96 + #include <asm/pgtable-hwdef.h>
97 + #include <asm/pgtable.h>
98 +@@ -464,7 +465,7 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
99 + ret
100 +
101 + /* Hyp configuration. */
102 +-2: mov x0, #(1 << 31) // 64-bit EL1
103 ++2: mov_q x0, HCR_HOST_NVHE_FLAGS
104 + msr hcr_el2, x0
105 +
106 + /* Generic timers. */
107 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
108 +index 62d3dc60ca09..e99a0ed7e66b 100644
109 +--- a/arch/arm64/kernel/perf_event.c
110 ++++ b/arch/arm64/kernel/perf_event.c
111 +@@ -670,6 +670,7 @@ static struct platform_driver armv8_pmu_driver = {
112 + .driver = {
113 + .name = "armv8-pmu",
114 + .of_match_table = armv8_pmu_of_device_ids,
115 ++ .suppress_bind_attrs = true,
116 + },
117 + .probe = armv8_pmu_device_probe,
118 + };
119 +diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
120 +index 86c289832272..8d3da858c257 100644
121 +--- a/arch/arm64/kvm/hyp.S
122 ++++ b/arch/arm64/kvm/hyp.S
123 +@@ -494,7 +494,7 @@
124 + .endm
125 +
126 + .macro deactivate_traps
127 +- mov x2, #HCR_RW
128 ++ mov_q x2, HCR_HOST_NVHE_FLAGS
129 + msr hcr_el2, x2
130 + msr hstr_el2, xzr
131 +
132 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
133 +index 8b0424abc84c..333ea0389adb 100644
134 +--- a/arch/mips/Kconfig
135 ++++ b/arch/mips/Kconfig
136 +@@ -760,6 +760,7 @@ config SIBYTE_SWARM
137 + select SYS_SUPPORTS_HIGHMEM
138 + select SYS_SUPPORTS_LITTLE_ENDIAN
139 + select ZONE_DMA32 if 64BIT
140 ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
141 +
142 + config SIBYTE_LITTLESUR
143 + bool "Sibyte BCM91250C2-LittleSur"
144 +@@ -782,6 +783,7 @@ config SIBYTE_SENTOSA
145 + select SYS_HAS_CPU_SB1
146 + select SYS_SUPPORTS_BIG_ENDIAN
147 + select SYS_SUPPORTS_LITTLE_ENDIAN
148 ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
149 +
150 + config SIBYTE_BIGSUR
151 + bool "Sibyte BCM91480B-BigSur"
152 +@@ -795,6 +797,7 @@ config SIBYTE_BIGSUR
153 + select SYS_SUPPORTS_HIGHMEM
154 + select SYS_SUPPORTS_LITTLE_ENDIAN
155 + select ZONE_DMA32 if 64BIT
156 ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
157 +
158 + config SNI_RM
159 + bool "SNI RM200/300/400"
160 +@@ -2972,6 +2975,7 @@ config MIPS32_O32
161 + config MIPS32_N32
162 + bool "Kernel support for n32 binaries"
163 + depends on 64BIT
164 ++ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
165 + select COMPAT
166 + select MIPS32_COMPAT
167 + select SYSVIPC_COMPAT if SYSVIPC
168 +diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
169 +index 2a5bb849b10e..288b58b00dc8 100644
170 +--- a/arch/mips/pci/msi-octeon.c
171 ++++ b/arch/mips/pci/msi-octeon.c
172 +@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
173 + int irq;
174 + struct irq_chip *msi;
175 +
176 +- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
177 ++ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
178 ++ return 0;
179 ++ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
180 + msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
181 + msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
182 + msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
183 +diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
184 +index b3d6bf23a662..3ef3fb658136 100644
185 +--- a/arch/mips/sibyte/common/Makefile
186 ++++ b/arch/mips/sibyte/common/Makefile
187 +@@ -1,4 +1,5 @@
188 + obj-y := cfe.o
189 ++obj-$(CONFIG_SWIOTLB) += dma.o
190 + obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
191 + obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
192 + obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
193 +diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
194 +new file mode 100644
195 +index 000000000000..eb47a94f3583
196 +--- /dev/null
197 ++++ b/arch/mips/sibyte/common/dma.c
198 +@@ -0,0 +1,14 @@
199 ++// SPDX-License-Identifier: GPL-2.0+
200 ++/*
201 ++ * DMA support for Broadcom SiByte platforms.
202 ++ *
203 ++ * Copyright (c) 2018 Maciej W. Rozycki
204 ++ */
205 ++
206 ++#include <linux/swiotlb.h>
207 ++#include <asm/bootinfo.h>
208 ++
209 ++void __init plat_swiotlb_setup(void)
210 ++{
211 ++ swiotlb_init(1);
212 ++}
213 +diff --git a/crypto/authenc.c b/crypto/authenc.c
214 +index b7290c5b1eaa..5c25005ff398 100644
215 +--- a/crypto/authenc.c
216 ++++ b/crypto/authenc.c
217 +@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
218 + return -EINVAL;
219 + if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
220 + return -EINVAL;
221 +- if (RTA_PAYLOAD(rta) < sizeof(*param))
222 ++
223 ++ /*
224 ++ * RTA_OK() didn't align the rtattr's payload when validating that it
225 ++ * fits in the buffer. Yet, the keys should start on the next 4-byte
226 ++ * aligned boundary. To avoid confusion, require that the rtattr
227 ++ * payload be exactly the param struct, which has a 4-byte aligned size.
228 ++ */
229 ++ if (RTA_PAYLOAD(rta) != sizeof(*param))
230 + return -EINVAL;
231 ++ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
232 +
233 + param = RTA_DATA(rta);
234 + keys->enckeylen = be32_to_cpu(param->enckeylen);
235 +
236 +- key += RTA_ALIGN(rta->rta_len);
237 +- keylen -= RTA_ALIGN(rta->rta_len);
238 ++ key += rta->rta_len;
239 ++ keylen -= rta->rta_len;
240 +
241 + if (keylen < keys->enckeylen)
242 + return -EINVAL;
243 +diff --git a/crypto/authencesn.c b/crypto/authencesn.c
244 +index fa0c4567f697..5fdf3e532310 100644
245 +--- a/crypto/authencesn.c
246 ++++ b/crypto/authencesn.c
247 +@@ -276,7 +276,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
248 + struct aead_request *req = areq->data;
249 +
250 + err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
251 +- aead_request_complete(req, err);
252 ++ authenc_esn_request_complete(req, err);
253 + }
254 +
255 + static int crypto_authenc_esn_decrypt(struct aead_request *req)
256 +diff --git a/drivers/base/bus.c b/drivers/base/bus.c
257 +index 0346e46e2871..ecca4ae248e0 100644
258 +--- a/drivers/base/bus.c
259 ++++ b/drivers/base/bus.c
260 +@@ -33,6 +33,9 @@ static struct kset *system_kset;
261 +
262 + #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
263 +
264 ++#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
265 ++ struct driver_attribute driver_attr_##_name = \
266 ++ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
267 +
268 + static int __must_check bus_rescan_devices_helper(struct device *dev,
269 + void *data);
270 +@@ -198,7 +201,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
271 + bus_put(bus);
272 + return err;
273 + }
274 +-static DRIVER_ATTR_WO(unbind);
275 ++static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
276 +
277 + /*
278 + * Manually attach a device to a driver.
279 +@@ -234,7 +237,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
280 + bus_put(bus);
281 + return err;
282 + }
283 +-static DRIVER_ATTR_WO(bind);
284 ++static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
285 +
286 + static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
287 + {
288 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
289 +index da3902ac16c8..b1cf891cb3d9 100644
290 +--- a/drivers/block/loop.c
291 ++++ b/drivers/block/loop.c
292 +@@ -81,7 +81,7 @@
293 + #include <asm/uaccess.h>
294 +
295 + static DEFINE_IDR(loop_index_idr);
296 +-static DEFINE_MUTEX(loop_index_mutex);
297 ++static DEFINE_MUTEX(loop_ctl_mutex);
298 +
299 + static int max_part;
300 + static int part_shift;
301 +@@ -1044,7 +1044,7 @@ static int loop_clr_fd(struct loop_device *lo)
302 + */
303 + if (atomic_read(&lo->lo_refcnt) > 1) {
304 + lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
305 +- mutex_unlock(&lo->lo_ctl_mutex);
306 ++ mutex_unlock(&loop_ctl_mutex);
307 + return 0;
308 + }
309 +
310 +@@ -1093,12 +1093,12 @@ static int loop_clr_fd(struct loop_device *lo)
311 + if (!part_shift)
312 + lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
313 + loop_unprepare_queue(lo);
314 +- mutex_unlock(&lo->lo_ctl_mutex);
315 ++ mutex_unlock(&loop_ctl_mutex);
316 + /*
317 +- * Need not hold lo_ctl_mutex to fput backing file.
318 +- * Calling fput holding lo_ctl_mutex triggers a circular
319 ++ * Need not hold loop_ctl_mutex to fput backing file.
320 ++ * Calling fput holding loop_ctl_mutex triggers a circular
321 + * lock dependency possibility warning as fput can take
322 +- * bd_mutex which is usually taken before lo_ctl_mutex.
323 ++ * bd_mutex which is usually taken before loop_ctl_mutex.
324 + */
325 + fput(filp);
326 + return 0;
327 +@@ -1361,7 +1361,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
328 + struct loop_device *lo = bdev->bd_disk->private_data;
329 + int err;
330 +
331 +- mutex_lock_nested(&lo->lo_ctl_mutex, 1);
332 ++ mutex_lock_nested(&loop_ctl_mutex, 1);
333 + switch (cmd) {
334 + case LOOP_SET_FD:
335 + err = loop_set_fd(lo, mode, bdev, arg);
336 +@@ -1370,7 +1370,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
337 + err = loop_change_fd(lo, bdev, arg);
338 + break;
339 + case LOOP_CLR_FD:
340 +- /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
341 ++ /* loop_clr_fd would have unlocked loop_ctl_mutex on success */
342 + err = loop_clr_fd(lo);
343 + if (!err)
344 + goto out_unlocked;
345 +@@ -1406,7 +1406,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
346 + default:
347 + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
348 + }
349 +- mutex_unlock(&lo->lo_ctl_mutex);
350 ++ mutex_unlock(&loop_ctl_mutex);
351 +
352 + out_unlocked:
353 + return err;
354 +@@ -1539,16 +1539,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
355 +
356 + switch(cmd) {
357 + case LOOP_SET_STATUS:
358 +- mutex_lock(&lo->lo_ctl_mutex);
359 ++ mutex_lock(&loop_ctl_mutex);
360 + err = loop_set_status_compat(
361 + lo, (const struct compat_loop_info __user *) arg);
362 +- mutex_unlock(&lo->lo_ctl_mutex);
363 ++ mutex_unlock(&loop_ctl_mutex);
364 + break;
365 + case LOOP_GET_STATUS:
366 +- mutex_lock(&lo->lo_ctl_mutex);
367 ++ mutex_lock(&loop_ctl_mutex);
368 + err = loop_get_status_compat(
369 + lo, (struct compat_loop_info __user *) arg);
370 +- mutex_unlock(&lo->lo_ctl_mutex);
371 ++ mutex_unlock(&loop_ctl_mutex);
372 + break;
373 + case LOOP_SET_CAPACITY:
374 + case LOOP_CLR_FD:
375 +@@ -1570,9 +1570,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
376 + static int lo_open(struct block_device *bdev, fmode_t mode)
377 + {
378 + struct loop_device *lo;
379 +- int err = 0;
380 ++ int err;
381 +
382 +- mutex_lock(&loop_index_mutex);
383 ++ err = mutex_lock_killable(&loop_ctl_mutex);
384 ++ if (err)
385 ++ return err;
386 + lo = bdev->bd_disk->private_data;
387 + if (!lo) {
388 + err = -ENXIO;
389 +@@ -1581,18 +1583,20 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
390 +
391 + atomic_inc(&lo->lo_refcnt);
392 + out:
393 +- mutex_unlock(&loop_index_mutex);
394 ++ mutex_unlock(&loop_ctl_mutex);
395 + return err;
396 + }
397 +
398 +-static void __lo_release(struct loop_device *lo)
399 ++static void lo_release(struct gendisk *disk, fmode_t mode)
400 + {
401 ++ struct loop_device *lo;
402 + int err;
403 +
404 ++ mutex_lock(&loop_ctl_mutex);
405 ++ lo = disk->private_data;
406 + if (atomic_dec_return(&lo->lo_refcnt))
407 +- return;
408 ++ goto out_unlock;
409 +
410 +- mutex_lock(&lo->lo_ctl_mutex);
411 + if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
412 + /*
413 + * In autoclear mode, stop the loop thread
414 +@@ -1609,14 +1613,8 @@ static void __lo_release(struct loop_device *lo)
415 + loop_flush(lo);
416 + }
417 +
418 +- mutex_unlock(&lo->lo_ctl_mutex);
419 +-}
420 +-
421 +-static void lo_release(struct gendisk *disk, fmode_t mode)
422 +-{
423 +- mutex_lock(&loop_index_mutex);
424 +- __lo_release(disk->private_data);
425 +- mutex_unlock(&loop_index_mutex);
426 ++out_unlock:
427 ++ mutex_unlock(&loop_ctl_mutex);
428 + }
429 +
430 + static const struct block_device_operations lo_fops = {
431 +@@ -1655,10 +1653,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
432 + struct loop_device *lo = ptr;
433 + struct loop_func_table *xfer = data;
434 +
435 +- mutex_lock(&lo->lo_ctl_mutex);
436 ++ mutex_lock(&loop_ctl_mutex);
437 + if (lo->lo_encryption == xfer)
438 + loop_release_xfer(lo);
439 +- mutex_unlock(&lo->lo_ctl_mutex);
440 ++ mutex_unlock(&loop_ctl_mutex);
441 + return 0;
442 + }
443 +
444 +@@ -1820,7 +1818,6 @@ static int loop_add(struct loop_device **l, int i)
445 + if (!part_shift)
446 + disk->flags |= GENHD_FL_NO_PART_SCAN;
447 + disk->flags |= GENHD_FL_EXT_DEVT;
448 +- mutex_init(&lo->lo_ctl_mutex);
449 + atomic_set(&lo->lo_refcnt, 0);
450 + lo->lo_number = i;
451 + spin_lock_init(&lo->lo_lock);
452 +@@ -1899,7 +1896,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
453 + struct kobject *kobj;
454 + int err;
455 +
456 +- mutex_lock(&loop_index_mutex);
457 ++ mutex_lock(&loop_ctl_mutex);
458 + err = loop_lookup(&lo, MINOR(dev) >> part_shift);
459 + if (err < 0)
460 + err = loop_add(&lo, MINOR(dev) >> part_shift);
461 +@@ -1907,7 +1904,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
462 + kobj = NULL;
463 + else
464 + kobj = get_disk(lo->lo_disk);
465 +- mutex_unlock(&loop_index_mutex);
466 ++ mutex_unlock(&loop_ctl_mutex);
467 +
468 + *part = 0;
469 + return kobj;
470 +@@ -1917,9 +1914,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
471 + unsigned long parm)
472 + {
473 + struct loop_device *lo;
474 +- int ret = -ENOSYS;
475 ++ int ret;
476 ++
477 ++ ret = mutex_lock_killable(&loop_ctl_mutex);
478 ++ if (ret)
479 ++ return ret;
480 +
481 +- mutex_lock(&loop_index_mutex);
482 ++ ret = -ENOSYS;
483 + switch (cmd) {
484 + case LOOP_CTL_ADD:
485 + ret = loop_lookup(&lo, parm);
486 +@@ -1933,19 +1934,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
487 + ret = loop_lookup(&lo, parm);
488 + if (ret < 0)
489 + break;
490 +- mutex_lock(&lo->lo_ctl_mutex);
491 + if (lo->lo_state != Lo_unbound) {
492 + ret = -EBUSY;
493 +- mutex_unlock(&lo->lo_ctl_mutex);
494 + break;
495 + }
496 + if (atomic_read(&lo->lo_refcnt) > 0) {
497 + ret = -EBUSY;
498 +- mutex_unlock(&lo->lo_ctl_mutex);
499 + break;
500 + }
501 + lo->lo_disk->private_data = NULL;
502 +- mutex_unlock(&lo->lo_ctl_mutex);
503 + idr_remove(&loop_index_idr, lo->lo_number);
504 + loop_remove(lo);
505 + break;
506 +@@ -1955,7 +1952,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
507 + break;
508 + ret = loop_add(&lo, -1);
509 + }
510 +- mutex_unlock(&loop_index_mutex);
511 ++ mutex_unlock(&loop_ctl_mutex);
512 +
513 + return ret;
514 + }
515 +@@ -2038,10 +2035,10 @@ static int __init loop_init(void)
516 + THIS_MODULE, loop_probe, NULL, NULL);
517 +
518 + /* pre-create number of devices given by config or max_loop */
519 +- mutex_lock(&loop_index_mutex);
520 ++ mutex_lock(&loop_ctl_mutex);
521 + for (i = 0; i < nr; i++)
522 + loop_add(&lo, i);
523 +- mutex_unlock(&loop_index_mutex);
524 ++ mutex_unlock(&loop_ctl_mutex);
525 +
526 + printk(KERN_INFO "loop: module loaded\n");
527 + return 0;
528 +diff --git a/drivers/block/loop.h b/drivers/block/loop.h
529 +index 60f0fd2c0c65..a923e74495ce 100644
530 +--- a/drivers/block/loop.h
531 ++++ b/drivers/block/loop.h
532 +@@ -55,7 +55,6 @@ struct loop_device {
533 +
534 + spinlock_t lo_lock;
535 + int lo_state;
536 +- struct mutex lo_ctl_mutex;
537 + struct kthread_worker worker;
538 + struct task_struct *worker_task;
539 + bool use_dio;
540 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
541 +index 7a2e23d6bfdd..b2da2382d544 100644
542 +--- a/drivers/char/ipmi/ipmi_ssif.c
543 ++++ b/drivers/char/ipmi/ipmi_ssif.c
544 +@@ -637,8 +637,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
545 +
546 + /* Remove the multi-part read marker. */
547 + len -= 2;
548 ++ data += 2;
549 + for (i = 0; i < len; i++)
550 +- ssif_info->data[i] = data[i+2];
551 ++ ssif_info->data[i] = data[i];
552 + ssif_info->multi_len = len;
553 + ssif_info->multi_pos = 1;
554 +
555 +@@ -666,8 +667,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
556 + }
557 +
558 + blocknum = data[0];
559 ++ len--;
560 ++ data++;
561 ++
562 ++ if (blocknum != 0xff && len != 31) {
563 ++ /* All blocks but the last must have 31 data bytes. */
564 ++ result = -EIO;
565 ++ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
566 ++ pr_info("Received middle message <31\n");
567 +
568 +- if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
569 ++ goto continue_op;
570 ++ }
571 ++
572 ++ if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
573 + /* Received message too big, abort the operation. */
574 + result = -E2BIG;
575 + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
576 +@@ -676,16 +688,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
577 + goto continue_op;
578 + }
579 +
580 +- /* Remove the blocknum from the data. */
581 +- len--;
582 + for (i = 0; i < len; i++)
583 +- ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
584 ++ ssif_info->data[i + ssif_info->multi_len] = data[i];
585 + ssif_info->multi_len += len;
586 + if (blocknum == 0xff) {
587 + /* End of read */
588 + len = ssif_info->multi_len;
589 + data = ssif_info->data;
590 +- } else if (blocknum + 1 != ssif_info->multi_pos) {
591 ++ } else if (blocknum != ssif_info->multi_pos) {
592 + /*
593 + * Out of sequence block, just abort. Block
594 + * numbers start at zero for the second block,
595 +@@ -713,6 +723,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
596 + }
597 + }
598 +
599 ++ continue_op:
600 + if (result < 0) {
601 + ssif_inc_stat(ssif_info, receive_errors);
602 + } else {
603 +@@ -720,8 +731,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
604 + ssif_inc_stat(ssif_info, received_message_parts);
605 + }
606 +
607 +-
608 +- continue_op:
609 + if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
610 + pr_info(PFX "DONE 1: state = %d, result=%d.\n",
611 + ssif_info->ssif_state, result);
612 +diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
613 +index a0df83e6b84b..46c05c9a9354 100644
614 +--- a/drivers/clk/imx/clk-imx6q.c
615 ++++ b/drivers/clk/imx/clk-imx6q.c
616 +@@ -239,8 +239,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
617 + * lvds1_gate and lvds2_gate are pseudo-gates. Both can be
618 + * independently configured as clock inputs or outputs. We treat
619 + * the "output_enable" bit as a gate, even though it's really just
620 +- * enabling clock output.
621 ++ * enabling clock output. Initially the gate bits are cleared, as
622 ++ * otherwise the exclusive configuration gets locked in the setup done
623 ++ * by software running before the clock driver, with no way to change
624 ++ * it.
625 + */
626 ++ writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
627 + clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
628 + clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));
629 +
630 +diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
631 +index 07135e009d8b..601a6c3acc7f 100644
632 +--- a/drivers/cpuidle/cpuidle-pseries.c
633 ++++ b/drivers/cpuidle/cpuidle-pseries.c
634 +@@ -240,7 +240,13 @@ static int pseries_idle_probe(void)
635 + return -ENODEV;
636 +
637 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
638 +- if (lppaca_shared_proc(get_lppaca())) {
639 ++ /*
640 ++ * Use local_paca instead of get_lppaca() since
641 ++ * preemption is not disabled, and it is not required in
642 ++ * fact, since lppaca_ptr does not need to be the value
643 ++ * associated to the current CPU, it can be from any CPU.
644 ++ */
645 ++ if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
646 + cpuidle_state_table = shared_states;
647 + max_idle_state = ARRAY_SIZE(shared_states);
648 + } else {
649 +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
650 +index 5ad036741b99..e449f22c8f29 100644
651 +--- a/drivers/gpu/drm/drm_fb_helper.c
652 ++++ b/drivers/gpu/drm/drm_fb_helper.c
653 +@@ -1109,9 +1109,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
654 + struct drm_framebuffer *fb = fb_helper->fb;
655 + int depth;
656 +
657 +- if (var->pixclock != 0 || in_dbg_master())
658 ++ if (in_dbg_master())
659 + return -EINVAL;
660 +
661 ++ if (var->pixclock != 0) {
662 ++ DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
663 ++ var->pixclock = 0;
664 ++ }
665 ++
666 + /* Need to resize the fb object !!! */
667 + if (var->bits_per_pixel > fb->bits_per_pixel ||
668 + var->xres > fb->width || var->yres > fb->height ||
669 +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
670 +index 54c308e6704f..04248394843e 100644
671 +--- a/drivers/md/dm-kcopyd.c
672 ++++ b/drivers/md/dm-kcopyd.c
673 +@@ -55,15 +55,17 @@ struct dm_kcopyd_client {
674 + struct dm_kcopyd_throttle *throttle;
675 +
676 + /*
677 +- * We maintain three lists of jobs:
678 ++ * We maintain four lists of jobs:
679 + *
680 + * i) jobs waiting for pages
681 + * ii) jobs that have pages, and are waiting for the io to be issued.
682 +- * iii) jobs that have completed.
683 ++ * iii) jobs that don't need to do any IO and just run a callback
684 ++ * iv) jobs that have completed.
685 + *
686 +- * All three of these are protected by job_lock.
687 ++ * All four of these are protected by job_lock.
688 + */
689 + spinlock_t job_lock;
690 ++ struct list_head callback_jobs;
691 + struct list_head complete_jobs;
692 + struct list_head io_jobs;
693 + struct list_head pages_jobs;
694 +@@ -583,6 +585,7 @@ static void do_work(struct work_struct *work)
695 + struct dm_kcopyd_client *kc = container_of(work,
696 + struct dm_kcopyd_client, kcopyd_work);
697 + struct blk_plug plug;
698 ++ unsigned long flags;
699 +
700 + /*
701 + * The order that these are called is *very* important.
702 +@@ -591,6 +594,10 @@ static void do_work(struct work_struct *work)
703 + * list. io jobs call wake when they complete and it all
704 + * starts again.
705 + */
706 ++ spin_lock_irqsave(&kc->job_lock, flags);
707 ++ list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
708 ++ spin_unlock_irqrestore(&kc->job_lock, flags);
709 ++
710 + blk_start_plug(&plug);
711 + process_jobs(&kc->complete_jobs, kc, run_complete_job);
712 + process_jobs(&kc->pages_jobs, kc, run_pages_job);
713 +@@ -608,7 +615,7 @@ static void dispatch_job(struct kcopyd_job *job)
714 + struct dm_kcopyd_client *kc = job->kc;
715 + atomic_inc(&kc->nr_jobs);
716 + if (unlikely(!job->source.count))
717 +- push(&kc->complete_jobs, job);
718 ++ push(&kc->callback_jobs, job);
719 + else if (job->pages == &zero_page_list)
720 + push(&kc->io_jobs, job);
721 + else
722 +@@ -795,7 +802,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
723 + job->read_err = read_err;
724 + job->write_err = write_err;
725 +
726 +- push(&kc->complete_jobs, job);
727 ++ push(&kc->callback_jobs, job);
728 + wake(kc);
729 + }
730 + EXPORT_SYMBOL(dm_kcopyd_do_callback);
731 +@@ -825,6 +832,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
732 + return ERR_PTR(-ENOMEM);
733 +
734 + spin_lock_init(&kc->job_lock);
735 ++ INIT_LIST_HEAD(&kc->callback_jobs);
736 + INIT_LIST_HEAD(&kc->complete_jobs);
737 + INIT_LIST_HEAD(&kc->io_jobs);
738 + INIT_LIST_HEAD(&kc->pages_jobs);
739 +@@ -874,6 +882,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
740 + /* Wait for completion of all jobs submitted by this client. */
741 + wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
742 +
743 ++ BUG_ON(!list_empty(&kc->callback_jobs));
744 + BUG_ON(!list_empty(&kc->complete_jobs));
745 + BUG_ON(!list_empty(&kc->io_jobs));
746 + BUG_ON(!list_empty(&kc->pages_jobs));
747 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
748 +index e108deebbaaa..5d3797728b9c 100644
749 +--- a/drivers/md/dm-snap.c
750 ++++ b/drivers/md/dm-snap.c
751 +@@ -19,6 +19,7 @@
752 + #include <linux/vmalloc.h>
753 + #include <linux/log2.h>
754 + #include <linux/dm-kcopyd.h>
755 ++#include <linux/semaphore.h>
756 +
757 + #include "dm.h"
758 +
759 +@@ -105,6 +106,9 @@ struct dm_snapshot {
760 + /* The on disk metadata handler */
761 + struct dm_exception_store *store;
762 +
763 ++ /* Maximum number of in-flight COW jobs. */
764 ++ struct semaphore cow_count;
765 ++
766 + struct dm_kcopyd_client *kcopyd_client;
767 +
768 + /* Wait for events based on state_bits */
769 +@@ -145,6 +149,19 @@ struct dm_snapshot {
770 + #define RUNNING_MERGE 0
771 + #define SHUTDOWN_MERGE 1
772 +
773 ++/*
774 ++ * Maximum number of chunks being copied on write.
775 ++ *
776 ++ * The value was decided experimentally as a trade-off between memory
777 ++ * consumption, stalling the kernel's workqueues and maintaining a high enough
778 ++ * throughput.
779 ++ */
780 ++#define DEFAULT_COW_THRESHOLD 2048
781 ++
782 ++static int cow_threshold = DEFAULT_COW_THRESHOLD;
783 ++module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
784 ++MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
785 ++
786 + DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
787 + "A percentage of time allocated for copy on write");
788 +
789 +@@ -1190,6 +1207,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
790 + goto bad_hash_tables;
791 + }
792 +
793 ++ sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
794 ++
795 + s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
796 + if (IS_ERR(s->kcopyd_client)) {
797 + r = PTR_ERR(s->kcopyd_client);
798 +@@ -1563,6 +1582,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
799 + }
800 + list_add(&pe->out_of_order_entry, lh);
801 + }
802 ++ up(&s->cow_count);
803 + }
804 +
805 + /*
806 +@@ -1586,6 +1606,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
807 + dest.count = src.count;
808 +
809 + /* Hand over to kcopyd */
810 ++ down(&s->cow_count);
811 + dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
812 + }
813 +
814 +@@ -1606,6 +1627,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
815 + pe->full_bio_end_io = bio->bi_end_io;
816 + pe->full_bio_private = bio->bi_private;
817 +
818 ++ down(&s->cow_count);
819 + callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
820 + copy_callback, pe);
821 +
822 +diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
823 +index 251a556112a9..280b5ffea592 100644
824 +--- a/drivers/media/firewire/firedtv-avc.c
825 ++++ b/drivers/media/firewire/firedtv-avc.c
826 +@@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r)
827 + return r->operand[7];
828 + }
829 +
830 +-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
831 ++int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
832 ++ unsigned int *len)
833 + {
834 + struct avc_command_frame *c = (void *)fdtv->avc_data;
835 + struct avc_response_frame *r = (void *)fdtv->avc_data;
836 +@@ -1009,7 +1010,8 @@ out:
837 + return ret;
838 + }
839 +
840 +-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
841 ++int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
842 ++ unsigned int *len)
843 + {
844 + struct avc_command_frame *c = (void *)fdtv->avc_data;
845 + struct avc_response_frame *r = (void *)fdtv->avc_data;
846 +diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
847 +index 345d1eda8c05..5b18a08c6285 100644
848 +--- a/drivers/media/firewire/firedtv.h
849 ++++ b/drivers/media/firewire/firedtv.h
850 +@@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
851 + struct dvb_diseqc_master_cmd *diseqcmd);
852 + void avc_remote_ctrl_work(struct work_struct *work);
853 + int avc_register_remote_control(struct firedtv *fdtv);
854 +-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
855 +-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
856 ++int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
857 ++ unsigned int *len);
858 ++int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
859 ++ unsigned int *len);
860 + int avc_ca_reset(struct firedtv *fdtv);
861 + int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
862 + int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
863 +diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
864 +index 83cc6d3b4784..81ba454a6d95 100644
865 +--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
866 ++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
867 +@@ -863,8 +863,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
868 + "%s-vid-cap", dev->v4l2_dev.name);
869 +
870 + if (IS_ERR(dev->kthread_vid_cap)) {
871 ++ int err = PTR_ERR(dev->kthread_vid_cap);
872 ++
873 ++ dev->kthread_vid_cap = NULL;
874 + v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
875 +- return PTR_ERR(dev->kthread_vid_cap);
876 ++ return err;
877 + }
878 + *pstreaming = true;
879 + vivid_grab_controls(dev, true);
880 +diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
881 +index c2c46dcdbe95..2c5dbdcb576a 100644
882 +--- a/drivers/media/platform/vivid/vivid-kthread-out.c
883 ++++ b/drivers/media/platform/vivid/vivid-kthread-out.c
884 +@@ -248,8 +248,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
885 + "%s-vid-out", dev->v4l2_dev.name);
886 +
887 + if (IS_ERR(dev->kthread_vid_out)) {
888 ++ int err = PTR_ERR(dev->kthread_vid_out);
889 ++
890 ++ dev->kthread_vid_out = NULL;
891 + v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
892 +- return PTR_ERR(dev->kthread_vid_out);
893 ++ return err;
894 + }
895 + *pstreaming = true;
896 + vivid_grab_controls(dev, true);
897 +diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
898 +index 1678b730dba2..2e82f520a869 100644
899 +--- a/drivers/media/platform/vivid/vivid-vid-common.c
900 ++++ b/drivers/media/platform/vivid/vivid-vid-common.c
901 +@@ -33,7 +33,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
902 + .type = V4L2_DV_BT_656_1120,
903 + /* keep this initialization for compatibility with GCC < 4.4.6 */
904 + .reserved = { 0 },
905 +- V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
906 ++ V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
907 + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
908 + V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
909 + V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
910 +diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
911 +index 6cfcdcea27e0..873948e429e8 100644
912 +--- a/drivers/media/usb/em28xx/em28xx-video.c
913 ++++ b/drivers/media/usb/em28xx/em28xx-video.c
914 +@@ -930,6 +930,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
915 +
916 + em28xx_videodbg("%s\n", __func__);
917 +
918 ++ dev->v4l2->field_count = 0;
919 ++
920 + /* Make sure streaming is not already in progress for this type
921 + of filehandle (e.g. video, vbi) */
922 + rc = res_get(dev, vq->type);
923 +@@ -1149,8 +1151,6 @@ static void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
924 + {
925 + struct em28xx *dev = priv;
926 +
927 +- dev->v4l2->field_count = 0;
928 +-
929 + /*
930 + * In the case of non-AC97 volume controls, we still need
931 + * to do some setups at em28xx, in order to mute/unmute
932 +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
933 +index 8ce9c63dfc59..e0041fcfa783 100644
934 +--- a/drivers/media/v4l2-core/videobuf2-core.c
935 ++++ b/drivers/media/v4l2-core/videobuf2-core.c
936 +@@ -1976,9 +1976,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
937 + return -EINVAL;
938 + }
939 + }
940 ++
941 ++ mutex_lock(&q->mmap_lock);
942 ++
943 + if (vb2_fileio_is_active(q)) {
944 + dprintk(1, "mmap: file io in progress\n");
945 +- return -EBUSY;
946 ++ ret = -EBUSY;
947 ++ goto unlock;
948 + }
949 +
950 + /*
951 +@@ -1986,7 +1990,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
952 + */
953 + ret = __find_plane_by_offset(q, off, &buffer, &plane);
954 + if (ret)
955 +- return ret;
956 ++ goto unlock;
957 +
958 + vb = q->bufs[buffer];
959 +
960 +@@ -1999,11 +2003,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
961 + if (length < (vma->vm_end - vma->vm_start)) {
962 + dprintk(1,
963 + "MMAP invalid, as it would overflow buffer length\n");
964 +- return -EINVAL;
965 ++ ret = -EINVAL;
966 ++ goto unlock;
967 + }
968 +
969 +- mutex_lock(&q->mmap_lock);
970 + ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
971 ++
972 ++unlock:
973 + mutex_unlock(&q->mmap_lock);
974 + if (ret)
975 + return ret;
976 +diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
977 +index 5628a6b5b19b..c5c320efc7b4 100644
978 +--- a/drivers/mfd/tps6586x.c
979 ++++ b/drivers/mfd/tps6586x.c
980 +@@ -594,6 +594,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
981 + return 0;
982 + }
983 +
984 ++static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
985 ++{
986 ++ struct tps6586x *tps6586x = dev_get_drvdata(dev);
987 ++
988 ++ if (tps6586x->client->irq)
989 ++ disable_irq(tps6586x->client->irq);
990 ++
991 ++ return 0;
992 ++}
993 ++
994 ++static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
995 ++{
996 ++ struct tps6586x *tps6586x = dev_get_drvdata(dev);
997 ++
998 ++ if (tps6586x->client->irq)
999 ++ enable_irq(tps6586x->client->irq);
1000 ++
1001 ++ return 0;
1002 ++}
1003 ++
1004 ++static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
1005 ++ tps6586x_i2c_resume);
1006 ++
1007 + static const struct i2c_device_id tps6586x_id_table[] = {
1008 + { "tps6586x", 0 },
1009 + { },
1010 +@@ -604,6 +627,7 @@ static struct i2c_driver tps6586x_driver = {
1011 + .driver = {
1012 + .name = "tps6586x",
1013 + .of_match_table = of_match_ptr(tps6586x_of_match),
1014 ++ .pm = &tps6586x_pm_ops,
1015 + },
1016 + .probe = tps6586x_i2c_probe,
1017 + .remove = tps6586x_i2c_remove,
1018 +diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
1019 +index bf62e429f7fc..98be9eb3184b 100644
1020 +--- a/drivers/mmc/host/atmel-mci.c
1021 ++++ b/drivers/mmc/host/atmel-mci.c
1022 +@@ -1840,13 +1840,14 @@ static void atmci_tasklet_func(unsigned long priv)
1023 + }
1024 +
1025 + atmci_request_end(host, host->mrq);
1026 +- state = STATE_IDLE;
1027 ++ goto unlock; /* atmci_request_end() sets host->state */
1028 + break;
1029 + }
1030 + } while (state != prev_state);
1031 +
1032 + host->state = state;
1033 +
1034 ++unlock:
1035 + spin_unlock(&host->lock);
1036 + }
1037 +
1038 +diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
1039 +index 25a0ad5102d6..855cf8c15c8a 100644
1040 +--- a/drivers/net/ethernet/intel/e1000e/ptp.c
1041 ++++ b/drivers/net/ethernet/intel/e1000e/ptp.c
1042 +@@ -111,10 +111,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
1043 + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
1044 + ptp_clock_info);
1045 + unsigned long flags;
1046 +- u64 ns;
1047 ++ u64 cycles, ns;
1048 +
1049 + spin_lock_irqsave(&adapter->systim_lock, flags);
1050 +- ns = timecounter_read(&adapter->tc);
1051 ++
1052 ++ /* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
1053 ++ cycles = adapter->cc.read(&adapter->cc);
1054 ++ ns = timecounter_cyc2time(&adapter->tc, cycles);
1055 ++
1056 + spin_unlock_irqrestore(&adapter->systim_lock, flags);
1057 +
1058 + *ts = ns_to_timespec64(ns);
1059 +@@ -170,9 +174,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
1060 + systim_overflow_work.work);
1061 + struct e1000_hw *hw = &adapter->hw;
1062 + struct timespec64 ts;
1063 ++ u64 ns;
1064 +
1065 +- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
1066 ++ /* Update the timecounter */
1067 ++ ns = timecounter_read(&adapter->tc);
1068 +
1069 ++ ts = ns_to_timespec64(ns);
1070 + e_dbg("SYSTIM overflow check at %lld.%09lu\n",
1071 + (long long) ts.tv_sec, ts.tv_nsec);
1072 +
1073 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1074 +index 93543e176829..8f40e121f7d4 100644
1075 +--- a/drivers/net/ethernet/realtek/r8169.c
1076 ++++ b/drivers/net/ethernet/realtek/r8169.c
1077 +@@ -324,6 +324,8 @@ enum cfg_version {
1078 + };
1079 +
1080 + static const struct pci_device_id rtl8169_pci_tbl[] = {
1081 ++ { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
1082 ++ { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
1083 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
1084 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
1085 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
1086 +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
1087 +index f96f7b865267..7c1defaef3f5 100644
1088 +--- a/drivers/platform/x86/asus-wmi.c
1089 ++++ b/drivers/platform/x86/asus-wmi.c
1090 +@@ -2084,7 +2084,8 @@ static int asus_wmi_add(struct platform_device *pdev)
1091 + err = asus_wmi_backlight_init(asus);
1092 + if (err && err != -ENODEV)
1093 + goto fail_backlight;
1094 +- }
1095 ++ } else
1096 ++ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
1097 +
1098 + status = wmi_install_notify_handler(asus->driver->event_guid,
1099 + asus_wmi_notify, asus);
1100 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
1101 +index 741509b35617..14f32c114c55 100644
1102 +--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
1103 ++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
1104 +@@ -1273,7 +1273,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
1105 +
1106 + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1107 + ld = MR_TargetIdToLdGet(ldCount, drv_map);
1108 +- if (ld >= MAX_LOGICAL_DRIVES_EXT) {
1109 ++ if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
1110 + lbInfo[ldCount].loadBalanceFlag = 0;
1111 + continue;
1112 + }
1113 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
1114 +index 213944ed64d9..3d3bfa814093 100644
1115 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
1116 ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
1117 +@@ -1758,7 +1758,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1118 + device_id < instance->fw_supported_vd_count)) {
1119 +
1120 + ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1121 +- if (ld >= instance->fw_supported_vd_count)
1122 ++ if (ld >= instance->fw_supported_vd_count - 1)
1123 + fp_possible = 0;
1124 +
1125 + raid = MR_LdRaidGet(ld, local_map_ptr);
1126 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1127 +index 6fffb73766de..ec80a0077ace 100644
1128 +--- a/drivers/scsi/sd.c
1129 ++++ b/drivers/scsi/sd.c
1130 +@@ -207,6 +207,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
1131 + sp = buffer_data[0] & 0x80 ? 1 : 0;
1132 + buffer_data[0] &= ~0x80;
1133 +
1134 ++ /*
1135 ++ * Ensure WP, DPOFUA, and RESERVED fields are cleared in
1136 ++ * received mode parameter buffer before doing MODE SELECT.
1137 ++ */
1138 ++ data.device_specific = 0;
1139 ++
1140 + if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
1141 + SD_MAX_RETRIES, &data, &sshdr)) {
1142 + if (scsi_sense_valid(&sshdr))
1143 +diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
1144 +index 9413e1a949e5..5af4d6a03d6e 100644
1145 +--- a/drivers/target/target_core_spc.c
1146 ++++ b/drivers/target/target_core_spc.c
1147 +@@ -108,12 +108,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
1148 +
1149 + buf[7] = 0x2; /* CmdQue=1 */
1150 +
1151 +- memcpy(&buf[8], "LIO-ORG ", 8);
1152 +- memset(&buf[16], 0x20, 16);
1153 ++ /*
1154 ++ * ASCII data fields described as being left-aligned shall have any
1155 ++ * unused bytes at the end of the field (i.e., highest offset) and the
1156 ++ * unused bytes shall be filled with ASCII space characters (20h).
1157 ++ */
1158 ++ memset(&buf[8], 0x20, 8 + 16 + 4);
1159 ++ memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1);
1160 + memcpy(&buf[16], dev->t10_wwn.model,
1161 +- min_t(size_t, strlen(dev->t10_wwn.model), 16));
1162 ++ strnlen(dev->t10_wwn.model, 16));
1163 + memcpy(&buf[32], dev->t10_wwn.revision,
1164 +- min_t(size_t, strlen(dev->t10_wwn.revision), 4));
1165 ++ strnlen(dev->t10_wwn.revision, 4));
1166 + buf[4] = 31; /* Set additional length to 31 */
1167 +
1168 + return 0;
1169 +@@ -251,7 +256,9 @@ check_t10_vend_desc:
1170 + buf[off] = 0x2; /* ASCII */
1171 + buf[off+1] = 0x1; /* T10 Vendor ID */
1172 + buf[off+2] = 0x0;
1173 +- memcpy(&buf[off+4], "LIO-ORG", 8);
1174 ++ /* left align Vendor ID and pad with spaces */
1175 ++ memset(&buf[off+4], 0x20, 8);
1176 ++ memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1);
1177 + /* Extra Byte for NULL Terminator */
1178 + id_len++;
1179 + /* Identifier Length */
1180 +diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
1181 +index ad7eba5ca380..34234c233851 100644
1182 +--- a/drivers/tty/tty_ldsem.c
1183 ++++ b/drivers/tty/tty_ldsem.c
1184 +@@ -307,6 +307,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
1185 + if (!locked)
1186 + ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
1187 + list_del(&waiter.list);
1188 ++
1189 ++ /*
1190 ++ * In case of timeout, wake up every reader who gave the right of way
1191 ++ * to writer. Prevent separation readers into two groups:
1192 ++ * one that helds semaphore and another that sleeps.
1193 ++ * (in case of no contention with a writer)
1194 ++ */
1195 ++ if (!locked && list_empty(&sem->write_wait))
1196 ++ __ldsem_wake_readers(sem);
1197 ++
1198 + raw_spin_unlock_irq(&sem->wait_lock);
1199 +
1200 + __set_task_state(tsk, TASK_RUNNING);
1201 +diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1202 +index 34ab4f950f0a..0c1c34ff40a9 100644
1203 +--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1204 ++++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1205 +@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
1206 +
1207 + int r = 0;
1208 +
1209 ++ memset(&p, 0, sizeof(p));
1210 ++
1211 + switch (cmd) {
1212 + case OMAPFB_SYNC_GFX:
1213 + DBG("ioctl SYNC_GFX\n");
1214 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1215 +index f80a0af68736..78722aaffecd 100644
1216 +--- a/fs/btrfs/disk-io.c
1217 ++++ b/fs/btrfs/disk-io.c
1218 +@@ -4111,6 +4111,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
1219 + spin_lock(&fs_info->ordered_root_lock);
1220 + }
1221 + spin_unlock(&fs_info->ordered_root_lock);
1222 ++
1223 ++ /*
1224 ++ * We need this here because if we've been flipped read-only we won't
1225 ++ * get sync() from the umount, so we need to make sure any ordered
1226 ++ * extents that haven't had their dirty pages IO start writeout yet
1227 ++ * actually get run and error out properly.
1228 ++ */
1229 ++ btrfs_wait_ordered_roots(fs_info, -1);
1230 + }
1231 +
1232 + static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
1233 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
1234 +index f661d80474be..4b2f609f376d 100644
1235 +--- a/fs/f2fs/checkpoint.c
1236 ++++ b/fs/f2fs/checkpoint.c
1237 +@@ -58,6 +58,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
1238 + .rw = READ_SYNC | REQ_META | REQ_PRIO,
1239 + .blk_addr = index,
1240 + .encrypted_page = NULL,
1241 ++ .is_meta = is_meta,
1242 + };
1243 +
1244 + if (unlikely(!is_meta))
1245 +@@ -74,8 +75,10 @@ repeat:
1246 + fio.page = page;
1247 +
1248 + if (f2fs_submit_page_bio(&fio)) {
1249 +- f2fs_put_page(page, 1);
1250 +- goto repeat;
1251 ++ memset(page_address(page), 0, PAGE_SIZE);
1252 ++ f2fs_stop_checkpoint(sbi);
1253 ++ f2fs_bug_on(sbi, 1);
1254 ++ return page;
1255 + }
1256 +
1257 + lock_page(page);
1258 +@@ -106,7 +109,8 @@ struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
1259 + return __get_meta_page(sbi, index, false);
1260 + }
1261 +
1262 +-bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
1263 ++bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
1264 ++ block_t blkaddr, int type)
1265 + {
1266 + switch (type) {
1267 + case META_NAT:
1268 +@@ -126,8 +130,20 @@ bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
1269 + return false;
1270 + break;
1271 + case META_POR:
1272 ++ case DATA_GENERIC:
1273 + if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
1274 +- blkaddr < MAIN_BLKADDR(sbi)))
1275 ++ blkaddr < MAIN_BLKADDR(sbi))) {
1276 ++ if (type == DATA_GENERIC) {
1277 ++ f2fs_msg(sbi->sb, KERN_WARNING,
1278 ++ "access invalid blkaddr:%u", blkaddr);
1279 ++ WARN_ON(1);
1280 ++ }
1281 ++ return false;
1282 ++ }
1283 ++ break;
1284 ++ case META_GENERIC:
1285 ++ if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
1286 ++ blkaddr >= MAIN_BLKADDR(sbi)))
1287 + return false;
1288 + break;
1289 + default:
1290 +@@ -151,6 +167,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
1291 + .type = META,
1292 + .rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
1293 + .encrypted_page = NULL,
1294 ++ .is_meta = (type != META_POR),
1295 + };
1296 +
1297 + if (unlikely(type == META_POR))
1298 +@@ -158,7 +175,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
1299 +
1300 + for (; nrpages-- > 0; blkno++) {
1301 +
1302 +- if (!is_valid_blkaddr(sbi, blkno, type))
1303 ++ if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
1304 + goto out;
1305 +
1306 + switch (type) {
1307 +@@ -601,54 +618,73 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
1308 + }
1309 + }
1310 +
1311 +-static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
1312 +- block_t cp_addr, unsigned long long *version)
1313 ++static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
1314 ++ struct f2fs_checkpoint **cp_block, struct page **cp_page,
1315 ++ unsigned long long *version)
1316 + {
1317 +- struct page *cp_page_1, *cp_page_2 = NULL;
1318 + unsigned long blk_size = sbi->blocksize;
1319 +- struct f2fs_checkpoint *cp_block;
1320 +- unsigned long long cur_version = 0, pre_version = 0;
1321 +- size_t crc_offset;
1322 ++ size_t crc_offset = 0;
1323 + __u32 crc = 0;
1324 +
1325 +- /* Read the 1st cp block in this CP pack */
1326 +- cp_page_1 = get_meta_page(sbi, cp_addr);
1327 ++ *cp_page = get_meta_page(sbi, cp_addr);
1328 ++ *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
1329 +
1330 +- /* get the version number */
1331 +- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
1332 +- crc_offset = le32_to_cpu(cp_block->checksum_offset);
1333 +- if (crc_offset >= blk_size)
1334 +- goto invalid_cp1;
1335 +-
1336 +- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
1337 +- if (!f2fs_crc_valid(crc, cp_block, crc_offset))
1338 +- goto invalid_cp1;
1339 ++ crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
1340 ++ if (crc_offset >= blk_size) {
1341 ++ f2fs_put_page(*cp_page, 1);
1342 ++ f2fs_msg(sbi->sb, KERN_WARNING,
1343 ++ "invalid crc_offset: %zu", crc_offset);
1344 ++ return -EINVAL;
1345 ++ }
1346 +
1347 +- pre_version = cur_cp_version(cp_block);
1348 ++ crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
1349 ++ + crc_offset)));
1350 ++ if (!f2fs_crc_valid(crc, *cp_block, crc_offset)) {
1351 ++ f2fs_put_page(*cp_page, 1);
1352 ++ f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
1353 ++ return -EINVAL;
1354 ++ }
1355 +
1356 +- /* Read the 2nd cp block in this CP pack */
1357 +- cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
1358 +- cp_page_2 = get_meta_page(sbi, cp_addr);
1359 ++ *version = cur_cp_version(*cp_block);
1360 ++ return 0;
1361 ++}
1362 +
1363 +- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
1364 +- crc_offset = le32_to_cpu(cp_block->checksum_offset);
1365 +- if (crc_offset >= blk_size)
1366 +- goto invalid_cp2;
1367 ++static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
1368 ++ block_t cp_addr, unsigned long long *version)
1369 ++{
1370 ++ struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
1371 ++ struct f2fs_checkpoint *cp_block = NULL;
1372 ++ unsigned long long cur_version = 0, pre_version = 0;
1373 ++ int err;
1374 +
1375 +- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
1376 +- if (!f2fs_crc_valid(crc, cp_block, crc_offset))
1377 +- goto invalid_cp2;
1378 ++ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1379 ++ &cp_page_1, version);
1380 ++ if (err)
1381 ++ return NULL;
1382 ++
1383 ++ if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
1384 ++ sbi->blocks_per_seg) {
1385 ++ f2fs_msg(sbi->sb, KERN_WARNING,
1386 ++ "invalid cp_pack_total_block_count:%u",
1387 ++ le32_to_cpu(cp_block->cp_pack_total_block_count));
1388 ++ goto invalid_cp;
1389 ++ }
1390 ++ pre_version = *version;
1391 +
1392 +- cur_version = cur_cp_version(cp_block);
1393 ++ cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
1394 ++ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1395 ++ &cp_page_2, version);
1396 ++ if (err)
1397 ++ goto invalid_cp;
1398 ++ cur_version = *version;
1399 +
1400 + if (cur_version == pre_version) {
1401 + *version = cur_version;
1402 + f2fs_put_page(cp_page_2, 1);
1403 + return cp_page_1;
1404 + }
1405 +-invalid_cp2:
1406 + f2fs_put_page(cp_page_2, 1);
1407 +-invalid_cp1:
1408 ++invalid_cp:
1409 + f2fs_put_page(cp_page_1, 1);
1410 + return NULL;
1411 + }
1412 +@@ -696,6 +732,15 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
1413 + cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
1414 + memcpy(sbi->ckpt, cp_block, blk_size);
1415 +
1416 ++ if (cur_page == cp1)
1417 ++ sbi->cur_cp_pack = 1;
1418 ++ else
1419 ++ sbi->cur_cp_pack = 2;
1420 ++
1421 ++ /* Sanity checking of checkpoint */
1422 ++ if (sanity_check_ckpt(sbi))
1423 ++ goto free_fail_no_cp;
1424 ++
1425 + if (cp_blks <= 1)
1426 + goto done;
1427 +
1428 +@@ -717,6 +762,9 @@ done:
1429 + f2fs_put_page(cp2, 1);
1430 + return 0;
1431 +
1432 ++free_fail_no_cp:
1433 ++ f2fs_put_page(cp1, 1);
1434 ++ f2fs_put_page(cp2, 1);
1435 + fail_no_cp:
1436 + kfree(sbi->ckpt);
1437 + return -EINVAL;
1438 +@@ -767,24 +815,6 @@ out:
1439 + f2fs_trace_pid(page);
1440 + }
1441 +
1442 +-void add_dirty_dir_inode(struct inode *inode)
1443 +-{
1444 +- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1445 +- struct inode_entry *new =
1446 +- f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
1447 +- int ret = 0;
1448 +-
1449 +- new->inode = inode;
1450 +- INIT_LIST_HEAD(&new->list);
1451 +-
1452 +- spin_lock(&sbi->dir_inode_lock);
1453 +- ret = __add_dirty_inode(inode, new);
1454 +- spin_unlock(&sbi->dir_inode_lock);
1455 +-
1456 +- if (ret)
1457 +- kmem_cache_free(inode_entry_slab, new);
1458 +-}
1459 +-
1460 + void remove_dirty_dir_inode(struct inode *inode)
1461 + {
1462 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1463 +@@ -807,12 +837,6 @@ void remove_dirty_dir_inode(struct inode *inode)
1464 + stat_dec_dirty_dir(sbi);
1465 + spin_unlock(&sbi->dir_inode_lock);
1466 + kmem_cache_free(inode_entry_slab, entry);
1467 +-
1468 +- /* Only from the recovery routine */
1469 +- if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
1470 +- clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
1471 +- iput(inode);
1472 +- }
1473 + }
1474 +
1475 + void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
1476 +@@ -922,7 +946,6 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
1477 + static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1478 + {
1479 + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1480 +- struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
1481 + struct f2fs_nm_info *nm_i = NM_I(sbi);
1482 + unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1483 + nid_t last_nid = nm_i->next_scan_nid;
1484 +@@ -931,15 +954,6 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1485 + __u32 crc32 = 0;
1486 + int i;
1487 + int cp_payload_blks = __cp_payload(sbi);
1488 +- block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
1489 +- bool invalidate = false;
1490 +-
1491 +- /*
1492 +- * This avoids to conduct wrong roll-forward operations and uses
1493 +- * metapages, so should be called prior to sync_meta_pages below.
1494 +- */
1495 +- if (discard_next_dnode(sbi, discard_blk))
1496 +- invalidate = true;
1497 +
1498 + /* Flush all the NAT/SIT pages */
1499 + while (get_pages(sbi, F2FS_DIRTY_META)) {
1500 +@@ -1016,6 +1030,9 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1501 + if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1502 + set_ckpt_flags(ckpt, CP_FSCK_FLAG);
1503 +
1504 ++ /* set this flag to activate crc|cp_ver for recovery */
1505 ++ set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
1506 ++
1507 + /* update SIT/NAT bitmap */
1508 + get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
1509 + get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
1510 +@@ -1025,7 +1042,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1511 + le32_to_cpu(ckpt->checksum_offset)))
1512 + = cpu_to_le32(crc32);
1513 +
1514 +- start_blk = __start_cp_addr(sbi);
1515 ++ start_blk = __start_cp_next_addr(sbi);
1516 +
1517 + /* need to wait for end_io results */
1518 + wait_on_all_pages_writeback(sbi);
1519 +@@ -1073,14 +1090,6 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1520 + /* wait for previous submitted meta pages writeback */
1521 + wait_on_all_pages_writeback(sbi);
1522 +
1523 +- /*
1524 +- * invalidate meta page which is used temporarily for zeroing out
1525 +- * block at the end of warm node chain.
1526 +- */
1527 +- if (invalidate)
1528 +- invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
1529 +- discard_blk);
1530 +-
1531 + release_dirty_inode(sbi);
1532 +
1533 + if (unlikely(f2fs_cp_error(sbi)))
1534 +@@ -1088,6 +1097,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1535 +
1536 + clear_prefree_segments(sbi, cpc);
1537 + clear_sbi_flag(sbi, SBI_IS_DIRTY);
1538 ++ __set_cp_next_pack(sbi);
1539 + }
1540 +
1541 + /*
1542 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
1543 +index f6ccb21f286b..2b0b671484bd 100644
1544 +--- a/fs/f2fs/data.c
1545 ++++ b/fs/f2fs/data.c
1546 +@@ -147,6 +147,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
1547 + struct bio *bio;
1548 + struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
1549 +
1550 ++ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->blk_addr,
1551 ++ __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
1552 ++ return -EFAULT;
1553 ++
1554 + trace_f2fs_submit_page_bio(page, fio);
1555 + f2fs_trace_ios(fio, 0);
1556 +
1557 +@@ -172,7 +176,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
1558 +
1559 + io = is_read ? &sbi->read_io : &sbi->write_io[btype];
1560 +
1561 +- verify_block_addr(sbi, fio->blk_addr);
1562 ++ verify_block_addr(fio, fio->blk_addr);
1563 +
1564 + down_write(&io->io_rwsem);
1565 +
1566 +@@ -603,7 +607,13 @@ static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1567 + goto unlock_out;
1568 + }
1569 +
1570 +- if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
1571 ++ if (__is_valid_data_blkaddr(dn.data_blkaddr) &&
1572 ++ !f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, DATA_GENERIC)) {
1573 ++ err = -EFAULT;
1574 ++ goto sync_out;
1575 ++ }
1576 ++
1577 ++ if (!is_valid_data_blkaddr(sbi, dn.data_blkaddr)) {
1578 + if (create) {
1579 + if (unlikely(f2fs_cp_error(sbi))) {
1580 + err = -EIO;
1581 +@@ -866,6 +876,40 @@ out:
1582 + return ret;
1583 + }
1584 +
1585 ++struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
1586 ++ unsigned nr_pages)
1587 ++{
1588 ++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1589 ++ struct f2fs_crypto_ctx *ctx = NULL;
1590 ++ struct block_device *bdev = sbi->sb->s_bdev;
1591 ++ struct bio *bio;
1592 ++
1593 ++ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
1594 ++ return ERR_PTR(-EFAULT);
1595 ++
1596 ++ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1597 ++ ctx = f2fs_get_crypto_ctx(inode);
1598 ++ if (IS_ERR(ctx))
1599 ++ return ERR_CAST(ctx);
1600 ++
1601 ++ /* wait the page to be moved by cleaning */
1602 ++ f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1603 ++ }
1604 ++
1605 ++ bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
1606 ++ if (!bio) {
1607 ++ if (ctx)
1608 ++ f2fs_release_crypto_ctx(ctx);
1609 ++ return ERR_PTR(-ENOMEM);
1610 ++ }
1611 ++ bio->bi_bdev = bdev;
1612 ++ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
1613 ++ bio->bi_end_io = f2fs_read_end_io;
1614 ++ bio->bi_private = ctx;
1615 ++
1616 ++ return bio;
1617 ++}
1618 ++
1619 + /*
1620 + * This function was originally taken from fs/mpage.c, and customized for f2fs.
1621 + * Major change was from block_size == page_size in f2fs by default.
1622 +@@ -884,7 +928,6 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
1623 + sector_t last_block;
1624 + sector_t last_block_in_file;
1625 + sector_t block_nr;
1626 +- struct block_device *bdev = inode->i_sb->s_bdev;
1627 + struct f2fs_map_blocks map;
1628 +
1629 + map.m_pblk = 0;
1630 +@@ -941,6 +984,10 @@ got_it:
1631 + SetPageUptodate(page);
1632 + goto confused;
1633 + }
1634 ++
1635 ++ if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
1636 ++ DATA_GENERIC))
1637 ++ goto set_error_page;
1638 + } else {
1639 + zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1640 + SetPageUptodate(page);
1641 +@@ -958,31 +1005,9 @@ submit_and_realloc:
1642 + bio = NULL;
1643 + }
1644 + if (bio == NULL) {
1645 +- struct f2fs_crypto_ctx *ctx = NULL;
1646 +-
1647 +- if (f2fs_encrypted_inode(inode) &&
1648 +- S_ISREG(inode->i_mode)) {
1649 +-
1650 +- ctx = f2fs_get_crypto_ctx(inode);
1651 +- if (IS_ERR(ctx))
1652 +- goto set_error_page;
1653 +-
1654 +- /* wait the page to be moved by cleaning */
1655 +- f2fs_wait_on_encrypted_page_writeback(
1656 +- F2FS_I_SB(inode), block_nr);
1657 +- }
1658 +-
1659 +- bio = bio_alloc(GFP_KERNEL,
1660 +- min_t(int, nr_pages, BIO_MAX_PAGES));
1661 +- if (!bio) {
1662 +- if (ctx)
1663 +- f2fs_release_crypto_ctx(ctx);
1664 ++ bio = f2fs_grab_bio(inode, block_nr, nr_pages);
1665 ++ if (IS_ERR(bio))
1666 + goto set_error_page;
1667 +- }
1668 +- bio->bi_bdev = bdev;
1669 +- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
1670 +- bio->bi_end_io = f2fs_read_end_io;
1671 +- bio->bi_private = ctx;
1672 + }
1673 +
1674 + if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1675 +@@ -1077,11 +1102,17 @@ int do_write_data_page(struct f2fs_io_info *fio)
1676 +
1677 + set_page_writeback(page);
1678 +
1679 ++ if (__is_valid_data_blkaddr(fio->blk_addr) &&
1680 ++ !f2fs_is_valid_blkaddr(fio->sbi, fio->blk_addr,
1681 ++ DATA_GENERIC)) {
1682 ++ err = -EFAULT;
1683 ++ goto out_writepage;
1684 ++ }
1685 + /*
1686 + * If current allocation needs SSR,
1687 + * it had better in-place writes for updated data.
1688 + */
1689 +- if (unlikely(fio->blk_addr != NEW_ADDR &&
1690 ++ if (unlikely(is_valid_data_blkaddr(fio->sbi, fio->blk_addr) &&
1691 + !is_cold_data(page) &&
1692 + need_inplace_update(inode))) {
1693 + rewrite_data_page(fio);
1694 +@@ -1482,17 +1513,21 @@ put_next:
1695 + if (dn.data_blkaddr == NEW_ADDR) {
1696 + zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1697 + } else {
1698 +- struct f2fs_io_info fio = {
1699 +- .sbi = sbi,
1700 +- .type = DATA,
1701 +- .rw = READ_SYNC,
1702 +- .blk_addr = dn.data_blkaddr,
1703 +- .page = page,
1704 +- .encrypted_page = NULL,
1705 +- };
1706 +- err = f2fs_submit_page_bio(&fio);
1707 +- if (err)
1708 ++ struct bio *bio;
1709 ++
1710 ++ bio = f2fs_grab_bio(inode, dn.data_blkaddr, 1);
1711 ++ if (IS_ERR(bio)) {
1712 ++ err = PTR_ERR(bio);
1713 + goto fail;
1714 ++ }
1715 ++
1716 ++ if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
1717 ++ bio_put(bio);
1718 ++ err = -EFAULT;
1719 ++ goto fail;
1720 ++ }
1721 ++
1722 ++ submit_bio(READ_SYNC, bio);
1723 +
1724 + lock_page(page);
1725 + if (unlikely(!PageUptodate(page))) {
1726 +@@ -1503,13 +1538,6 @@ put_next:
1727 + f2fs_put_page(page, 1);
1728 + goto repeat;
1729 + }
1730 +-
1731 +- /* avoid symlink page */
1732 +- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1733 +- err = f2fs_decrypt_one(inode, page);
1734 +- if (err)
1735 +- goto fail;
1736 +- }
1737 + }
1738 + out_update:
1739 + SetPageUptodate(page);
1740 +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
1741 +index 60972a559685..92a240616f52 100644
1742 +--- a/fs/f2fs/dir.c
1743 ++++ b/fs/f2fs/dir.c
1744 +@@ -48,7 +48,6 @@ unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
1745 + [F2FS_FT_SYMLINK] = DT_LNK,
1746 + };
1747 +
1748 +-#define S_SHIFT 12
1749 + static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
1750 + [S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE,
1751 + [S_IFDIR >> S_SHIFT] = F2FS_FT_DIR,
1752 +@@ -64,6 +63,13 @@ void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
1753 + de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
1754 + }
1755 +
1756 ++unsigned char get_de_type(struct f2fs_dir_entry *de)
1757 ++{
1758 ++ if (de->file_type < F2FS_FT_MAX)
1759 ++ return f2fs_filetype_table[de->file_type];
1760 ++ return DT_UNKNOWN;
1761 ++}
1762 ++
1763 + static unsigned long dir_block_index(unsigned int level,
1764 + int dir_level, unsigned int idx)
1765 + {
1766 +@@ -519,11 +525,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
1767 + test_and_set_bit_le(bit_pos + i, (void *)d->bitmap);
1768 + }
1769 +
1770 +-/*
1771 +- * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1772 +- * f2fs_unlock_op().
1773 +- */
1774 +-int __f2fs_add_link(struct inode *dir, const struct qstr *name,
1775 ++int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
1776 + struct inode *inode, nid_t ino, umode_t mode)
1777 + {
1778 + unsigned int bit_pos;
1779 +@@ -536,28 +538,11 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
1780 + struct f2fs_dentry_block *dentry_blk = NULL;
1781 + struct f2fs_dentry_ptr d;
1782 + struct page *page = NULL;
1783 +- struct f2fs_filename fname;
1784 +- struct qstr new_name;
1785 +- int slots, err;
1786 +-
1787 +- err = f2fs_fname_setup_filename(dir, name, 0, &fname);
1788 +- if (err)
1789 +- return err;
1790 +-
1791 +- new_name.name = fname_name(&fname);
1792 +- new_name.len = fname_len(&fname);
1793 +-
1794 +- if (f2fs_has_inline_dentry(dir)) {
1795 +- err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
1796 +- if (!err || err != -EAGAIN)
1797 +- goto out;
1798 +- else
1799 +- err = 0;
1800 +- }
1801 ++ int slots, err = 0;
1802 +
1803 + level = 0;
1804 +- slots = GET_DENTRY_SLOTS(new_name.len);
1805 +- dentry_hash = f2fs_dentry_hash(&new_name, NULL);
1806 ++ slots = GET_DENTRY_SLOTS(new_name->len);
1807 ++ dentry_hash = f2fs_dentry_hash(new_name, NULL);
1808 +
1809 + current_depth = F2FS_I(dir)->i_current_depth;
1810 + if (F2FS_I(dir)->chash == dentry_hash) {
1811 +@@ -566,10 +551,8 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
1812 + }
1813 +
1814 + start:
1815 +- if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) {
1816 +- err = -ENOSPC;
1817 +- goto out;
1818 +- }
1819 ++ if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
1820 ++ return -ENOSPC;
1821 +
1822 + /* Increase the depth, if required */
1823 + if (level == current_depth)
1824 +@@ -583,10 +566,8 @@ start:
1825 +
1826 + for (block = bidx; block <= (bidx + nblock - 1); block++) {
1827 + dentry_page = get_new_data_page(dir, NULL, block, true);
1828 +- if (IS_ERR(dentry_page)) {
1829 +- err = PTR_ERR(dentry_page);
1830 +- goto out;
1831 +- }
1832 ++ if (IS_ERR(dentry_page))
1833 ++ return PTR_ERR(dentry_page);
1834 +
1835 + dentry_blk = kmap(dentry_page);
1836 + bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
1837 +@@ -606,7 +587,7 @@ add_dentry:
1838 +
1839 + if (inode) {
1840 + down_write(&F2FS_I(inode)->i_sem);
1841 +- page = init_inode_metadata(inode, dir, &new_name, NULL);
1842 ++ page = init_inode_metadata(inode, dir, new_name, NULL);
1843 + if (IS_ERR(page)) {
1844 + err = PTR_ERR(page);
1845 + goto fail;
1846 +@@ -616,7 +597,7 @@ add_dentry:
1847 + }
1848 +
1849 + make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
1850 +- f2fs_update_dentry(ino, mode, &d, &new_name, dentry_hash, bit_pos);
1851 ++ f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
1852 +
1853 + set_page_dirty(dentry_page);
1854 +
1855 +@@ -638,7 +619,34 @@ fail:
1856 + }
1857 + kunmap(dentry_page);
1858 + f2fs_put_page(dentry_page, 1);
1859 +-out:
1860 ++
1861 ++ return err;
1862 ++}
1863 ++
1864 ++/*
1865 ++ * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1866 ++ * f2fs_unlock_op().
1867 ++ */
1868 ++int __f2fs_add_link(struct inode *dir, const struct qstr *name,
1869 ++ struct inode *inode, nid_t ino, umode_t mode)
1870 ++{
1871 ++ struct f2fs_filename fname;
1872 ++ struct qstr new_name;
1873 ++ int err;
1874 ++
1875 ++ err = f2fs_fname_setup_filename(dir, name, 0, &fname);
1876 ++ if (err)
1877 ++ return err;
1878 ++
1879 ++ new_name.name = fname_name(&fname);
1880 ++ new_name.len = fname_len(&fname);
1881 ++
1882 ++ err = -EAGAIN;
1883 ++ if (f2fs_has_inline_dentry(dir))
1884 ++ err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
1885 ++ if (err == -EAGAIN)
1886 ++ err = f2fs_add_regular_entry(dir, &new_name, inode, ino, mode);
1887 ++
1888 + f2fs_fname_free_filename(&fname);
1889 + return err;
1890 + }
1891 +@@ -792,10 +800,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
1892 + break;
1893 +
1894 + de = &d->dentry[bit_pos];
1895 +- if (de->file_type < F2FS_FT_MAX)
1896 +- d_type = f2fs_filetype_table[de->file_type];
1897 +- else
1898 +- d_type = DT_UNKNOWN;
1899 ++ d_type = get_de_type(de);
1900 +
1901 + de_name.name = d->filename[bit_pos];
1902 + de_name.len = le16_to_cpu(de->name_len);
1903 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
1904 +index 2871576fbca4..2bfce887dce2 100644
1905 +--- a/fs/f2fs/f2fs.h
1906 ++++ b/fs/f2fs/f2fs.h
1907 +@@ -135,7 +135,7 @@ struct cp_control {
1908 + };
1909 +
1910 + /*
1911 +- * For CP/NAT/SIT/SSA readahead
1912 ++ * indicate meta/data type
1913 + */
1914 + enum {
1915 + META_CP,
1916 +@@ -143,6 +143,8 @@ enum {
1917 + META_SIT,
1918 + META_SSA,
1919 + META_POR,
1920 ++ DATA_GENERIC,
1921 ++ META_GENERIC,
1922 + };
1923 +
1924 + /* for the list of ino */
1925 +@@ -684,6 +686,7 @@ struct f2fs_io_info {
1926 + block_t blk_addr; /* block address to be written */
1927 + struct page *page; /* page to be written */
1928 + struct page *encrypted_page; /* encrypted page */
1929 ++ bool is_meta; /* indicate borrow meta inode mapping or not */
1930 + };
1931 +
1932 + #define is_read_io(rw) (((rw) & 1) == READ)
1933 +@@ -731,6 +734,7 @@ struct f2fs_sb_info {
1934 +
1935 + /* for checkpoint */
1936 + struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
1937 ++ int cur_cp_pack; /* remain current cp pack */
1938 + struct inode *meta_inode; /* cache meta blocks */
1939 + struct mutex cp_mutex; /* checkpoint procedure lock */
1940 + struct rw_semaphore cp_rwsem; /* blocking FS operations */
1941 +@@ -1140,22 +1144,27 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
1942 +
1943 + static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
1944 + {
1945 +- block_t start_addr;
1946 +- struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1947 +- unsigned long long ckpt_version = cur_cp_version(ckpt);
1948 +-
1949 +- start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
1950 ++ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
1951 +
1952 +- /*
1953 +- * odd numbered checkpoint should at cp segment 0
1954 +- * and even segment must be at cp segment 1
1955 +- */
1956 +- if (!(ckpt_version & 1))
1957 ++ if (sbi->cur_cp_pack == 2)
1958 + start_addr += sbi->blocks_per_seg;
1959 ++ return start_addr;
1960 ++}
1961 ++
1962 ++static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
1963 ++{
1964 ++ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
1965 +
1966 ++ if (sbi->cur_cp_pack == 1)
1967 ++ start_addr += sbi->blocks_per_seg;
1968 + return start_addr;
1969 + }
1970 +
1971 ++static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
1972 ++{
1973 ++ sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
1974 ++}
1975 ++
1976 + static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
1977 + {
1978 + return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
1979 +@@ -1402,7 +1411,6 @@ enum {
1980 + FI_NO_ALLOC, /* should not allocate any blocks */
1981 + FI_FREE_NID, /* free allocated nide */
1982 + FI_UPDATE_DIR, /* should update inode block for consistency */
1983 +- FI_DELAY_IPUT, /* used for the recovery */
1984 + FI_NO_EXTENT, /* not to use the extent cache */
1985 + FI_INLINE_XATTR, /* used for inline xattr */
1986 + FI_INLINE_DATA, /* used for inline data*/
1987 +@@ -1641,6 +1649,39 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
1988 + (pgofs - ADDRS_PER_INODE(fi) + ADDRS_PER_BLOCK) / \
1989 + ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi))
1990 +
1991 ++#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META && \
1992 ++ (!is_read_io(fio->rw) || fio->is_meta))
1993 ++
1994 ++bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
1995 ++ block_t blkaddr, int type);
1996 ++void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
1997 ++static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
1998 ++ block_t blkaddr, int type)
1999 ++{
2000 ++ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
2001 ++ f2fs_msg(sbi->sb, KERN_ERR,
2002 ++ "invalid blkaddr: %u, type: %d, run fsck to fix.",
2003 ++ blkaddr, type);
2004 ++ f2fs_bug_on(sbi, 1);
2005 ++ }
2006 ++}
2007 ++
2008 ++static inline bool __is_valid_data_blkaddr(block_t blkaddr)
2009 ++{
2010 ++ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
2011 ++ return false;
2012 ++ return true;
2013 ++}
2014 ++
2015 ++static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
2016 ++ block_t blkaddr)
2017 ++{
2018 ++ if (!__is_valid_data_blkaddr(blkaddr))
2019 ++ return false;
2020 ++ verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
2021 ++ return true;
2022 ++}
2023 ++
2024 + /*
2025 + * file.c
2026 + */
2027 +@@ -1677,7 +1718,7 @@ struct dentry *f2fs_get_parent(struct dentry *child);
2028 + */
2029 + extern unsigned char f2fs_filetype_table[F2FS_FT_MAX];
2030 + void set_de_type(struct f2fs_dir_entry *, umode_t);
2031 +-
2032 ++unsigned char get_de_type(struct f2fs_dir_entry *);
2033 + struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *,
2034 + f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
2035 + bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
2036 +@@ -1698,6 +1739,8 @@ void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
2037 + int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
2038 + void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
2039 + const struct qstr *, f2fs_hash_t , unsigned int);
2040 ++int f2fs_add_regular_entry(struct inode *, const struct qstr *,
2041 ++ struct inode *, nid_t, umode_t);
2042 + int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
2043 + umode_t);
2044 + void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
2045 +@@ -1718,6 +1761,7 @@ int f2fs_commit_super(struct f2fs_sb_info *, bool);
2046 + int f2fs_sync_fs(struct super_block *, int);
2047 + extern __printf(3, 4)
2048 + void f2fs_msg(struct super_block *, const char *, const char *, ...);
2049 ++int sanity_check_ckpt(struct f2fs_sb_info *sbi);
2050 +
2051 + /*
2052 + * hash.c
2053 +@@ -1778,7 +1822,6 @@ bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
2054 + void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
2055 + void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
2056 + void release_discard_addrs(struct f2fs_sb_info *);
2057 +-bool discard_next_dnode(struct f2fs_sb_info *, block_t);
2058 + int npages_for_summary_flush(struct f2fs_sb_info *, bool);
2059 + void allocate_new_segments(struct f2fs_sb_info *);
2060 + int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
2061 +@@ -1810,7 +1853,8 @@ void destroy_segment_manager_caches(void);
2062 + struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
2063 + struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
2064 + struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
2065 +-bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
2066 ++bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
2067 ++ block_t blkaddr, int type);
2068 + int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
2069 + void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
2070 + long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
2071 +@@ -1825,7 +1869,6 @@ void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
2072 + int recover_orphan_inodes(struct f2fs_sb_info *);
2073 + int get_valid_checkpoint(struct f2fs_sb_info *);
2074 + void update_dirty_page(struct inode *, struct page *);
2075 +-void add_dirty_dir_inode(struct inode *);
2076 + void remove_dirty_dir_inode(struct inode *);
2077 + void sync_dirty_dir_inodes(struct f2fs_sb_info *);
2078 + void write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
2079 +@@ -1864,7 +1907,7 @@ void build_gc_manager(struct f2fs_sb_info *);
2080 + /*
2081 + * recovery.c
2082 + */
2083 +-int recover_fsync_data(struct f2fs_sb_info *);
2084 ++int recover_fsync_data(struct f2fs_sb_info *, bool);
2085 + bool space_for_roll_forward(struct f2fs_sb_info *);
2086 +
2087 + /*
2088 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
2089 +index 01eed94b01ea..96bfd9f0ea02 100644
2090 +--- a/fs/f2fs/file.c
2091 ++++ b/fs/f2fs/file.c
2092 +@@ -305,13 +305,13 @@ static pgoff_t __get_first_dirty_index(struct address_space *mapping,
2093 + return pgofs;
2094 + }
2095 +
2096 +-static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
2097 +- int whence)
2098 ++static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
2099 ++ pgoff_t dirty, pgoff_t pgofs, int whence)
2100 + {
2101 + switch (whence) {
2102 + case SEEK_DATA:
2103 + if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
2104 +- (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
2105 ++ is_valid_data_blkaddr(sbi, blkaddr))
2106 + return true;
2107 + break;
2108 + case SEEK_HOLE:
2109 +@@ -374,7 +374,15 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
2110 + block_t blkaddr;
2111 + blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
2112 +
2113 +- if (__found_offset(blkaddr, dirty, pgofs, whence)) {
2114 ++ if (__is_valid_data_blkaddr(blkaddr) &&
2115 ++ !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
2116 ++ blkaddr, DATA_GENERIC)) {
2117 ++ f2fs_put_dnode(&dn);
2118 ++ goto fail;
2119 ++ }
2120 ++
2121 ++ if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
2122 ++ pgofs, whence)) {
2123 + f2fs_put_dnode(&dn);
2124 + goto found;
2125 + }
2126 +@@ -466,6 +474,11 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
2127 +
2128 + dn->data_blkaddr = NULL_ADDR;
2129 + set_data_blkaddr(dn);
2130 ++
2131 ++ if (__is_valid_data_blkaddr(blkaddr) &&
2132 ++ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
2133 ++ continue;
2134 ++
2135 + invalidate_blocks(sbi, blkaddr);
2136 + if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
2137 + clear_inode_flag(F2FS_I(dn->inode),
2138 +diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
2139 +index ad80f916b64d..00685a8b1418 100644
2140 +--- a/fs/f2fs/inline.c
2141 ++++ b/fs/f2fs/inline.c
2142 +@@ -127,6 +127,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
2143 + if (err)
2144 + return err;
2145 +
2146 ++ if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
2147 ++ f2fs_put_dnode(dn);
2148 ++ set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
2149 ++ f2fs_msg(fio.sbi->sb, KERN_WARNING,
2150 ++ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
2151 ++ "run fsck to fix.",
2152 ++ __func__, dn->inode->i_ino, dn->data_blkaddr);
2153 ++ return -EINVAL;
2154 ++ }
2155 ++
2156 + f2fs_wait_on_page_writeback(page, DATA);
2157 +
2158 + if (PageUptodate(page))
2159 +@@ -367,7 +377,7 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
2160 + * NOTE: ipage is grabbed by caller, but if any error occurs, we should
2161 + * release ipage in this function.
2162 + */
2163 +-static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
2164 ++static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
2165 + struct f2fs_inline_dentry *inline_dentry)
2166 + {
2167 + struct page *page;
2168 +@@ -386,6 +396,17 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
2169 + if (err)
2170 + goto out;
2171 +
2172 ++ if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
2173 ++ f2fs_put_dnode(&dn);
2174 ++ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
2175 ++ f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
2176 ++ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
2177 ++ "run fsck to fix.",
2178 ++ __func__, dir->i_ino, dn.data_blkaddr);
2179 ++ err = -EINVAL;
2180 ++ goto out;
2181 ++ }
2182 ++
2183 + f2fs_wait_on_page_writeback(page, DATA);
2184 + zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
2185 +
2186 +@@ -428,6 +449,98 @@ out:
2187 + return err;
2188 + }
2189 +
2190 ++static int f2fs_add_inline_entries(struct inode *dir,
2191 ++ struct f2fs_inline_dentry *inline_dentry)
2192 ++{
2193 ++ struct f2fs_dentry_ptr d;
2194 ++ unsigned long bit_pos = 0;
2195 ++ int err = 0;
2196 ++
2197 ++ make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
2198 ++
2199 ++ while (bit_pos < d.max) {
2200 ++ struct f2fs_dir_entry *de;
2201 ++ struct qstr new_name;
2202 ++ nid_t ino;
2203 ++ umode_t fake_mode;
2204 ++
2205 ++ if (!test_bit_le(bit_pos, d.bitmap)) {
2206 ++ bit_pos++;
2207 ++ continue;
2208 ++ }
2209 ++
2210 ++ de = &d.dentry[bit_pos];
2211 ++ new_name.name = d.filename[bit_pos];
2212 ++ new_name.len = de->name_len;
2213 ++
2214 ++ ino = le32_to_cpu(de->ino);
2215 ++ fake_mode = get_de_type(de) << S_SHIFT;
2216 ++
2217 ++ err = f2fs_add_regular_entry(dir, &new_name, NULL,
2218 ++ ino, fake_mode);
2219 ++ if (err)
2220 ++ goto punch_dentry_pages;
2221 ++
2222 ++ if (unlikely(!de->name_len))
2223 ++ d.max = -1;
2224 ++
2225 ++ bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
2226 ++ }
2227 ++ return 0;
2228 ++punch_dentry_pages:
2229 ++ truncate_inode_pages(&dir->i_data, 0);
2230 ++ truncate_blocks(dir, 0, false);
2231 ++ remove_dirty_dir_inode(dir);
2232 ++ return err;
2233 ++}
2234 ++
2235 ++static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
2236 ++ struct f2fs_inline_dentry *inline_dentry)
2237 ++{
2238 ++ struct f2fs_inline_dentry *backup_dentry;
2239 ++ int err;
2240 ++
2241 ++ backup_dentry = kmalloc(sizeof(struct f2fs_inline_dentry),
2242 ++ GFP_F2FS_ZERO);
2243 ++ if (!backup_dentry)
2244 ++ return -ENOMEM;
2245 ++
2246 ++ memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA);
2247 ++ truncate_inline_inode(ipage, 0);
2248 ++
2249 ++ unlock_page(ipage);
2250 ++
2251 ++ err = f2fs_add_inline_entries(dir, backup_dentry);
2252 ++ if (err)
2253 ++ goto recover;
2254 ++
2255 ++ lock_page(ipage);
2256 ++
2257 ++ stat_dec_inline_dir(dir);
2258 ++ clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
2259 ++ update_inode(dir, ipage);
2260 ++ kfree(backup_dentry);
2261 ++ return 0;
2262 ++recover:
2263 ++ lock_page(ipage);
2264 ++ memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
2265 ++ i_size_write(dir, MAX_INLINE_DATA);
2266 ++ update_inode(dir, ipage);
2267 ++ f2fs_put_page(ipage, 1);
2268 ++
2269 ++ kfree(backup_dentry);
2270 ++ return err;
2271 ++}
2272 ++
2273 ++static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
2274 ++ struct f2fs_inline_dentry *inline_dentry)
2275 ++{
2276 ++ if (!F2FS_I(dir)->i_dir_level)
2277 ++ return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
2278 ++ else
2279 ++ return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
2280 ++}
2281 ++
2282 + int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
2283 + struct inode *inode, nid_t ino, umode_t mode)
2284 + {
2285 +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
2286 +index 5528801a5baf..89bf8dd7758c 100644
2287 +--- a/fs/f2fs/inode.c
2288 ++++ b/fs/f2fs/inode.c
2289 +@@ -50,13 +50,16 @@ static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
2290 + }
2291 + }
2292 +
2293 +-static bool __written_first_block(struct f2fs_inode *ri)
2294 ++static int __written_first_block(struct f2fs_sb_info *sbi,
2295 ++ struct f2fs_inode *ri)
2296 + {
2297 + block_t addr = le32_to_cpu(ri->i_addr[0]);
2298 +
2299 +- if (addr != NEW_ADDR && addr != NULL_ADDR)
2300 +- return true;
2301 +- return false;
2302 ++ if (!__is_valid_data_blkaddr(addr))
2303 ++ return 1;
2304 ++ if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
2305 ++ return -EFAULT;
2306 ++ return 0;
2307 + }
2308 +
2309 + static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
2310 +@@ -94,12 +97,57 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
2311 + return;
2312 + }
2313 +
2314 ++static bool sanity_check_inode(struct inode *inode, struct page *node_page)
2315 ++{
2316 ++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2317 ++ unsigned long long iblocks;
2318 ++
2319 ++ iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
2320 ++ if (!iblocks) {
2321 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
2322 ++ f2fs_msg(sbi->sb, KERN_WARNING,
2323 ++ "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
2324 ++ "run fsck to fix.",
2325 ++ __func__, inode->i_ino, iblocks);
2326 ++ return false;
2327 ++ }
2328 ++
2329 ++ if (ino_of_node(node_page) != nid_of_node(node_page)) {
2330 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
2331 ++ f2fs_msg(sbi->sb, KERN_WARNING,
2332 ++ "%s: corrupted inode footer i_ino=%lx, ino,nid: "
2333 ++ "[%u, %u] run fsck to fix.",
2334 ++ __func__, inode->i_ino,
2335 ++ ino_of_node(node_page), nid_of_node(node_page));
2336 ++ return false;
2337 ++ }
2338 ++
2339 ++ if (F2FS_I(inode)->extent_tree) {
2340 ++ struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
2341 ++
2342 ++ if (ei->len &&
2343 ++ (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
2344 ++ !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
2345 ++ DATA_GENERIC))) {
2346 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
2347 ++ f2fs_msg(sbi->sb, KERN_WARNING,
2348 ++ "%s: inode (ino=%lx) extent info [%u, %u, %u] "
2349 ++ "is incorrect, run fsck to fix",
2350 ++ __func__, inode->i_ino,
2351 ++ ei->blk, ei->fofs, ei->len);
2352 ++ return false;
2353 ++ }
2354 ++ }
2355 ++ return true;
2356 ++}
2357 ++
2358 + static int do_read_inode(struct inode *inode)
2359 + {
2360 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2361 + struct f2fs_inode_info *fi = F2FS_I(inode);
2362 + struct page *node_page;
2363 + struct f2fs_inode *ri;
2364 ++ int err;
2365 +
2366 + /* Check if ino is within scope */
2367 + if (check_nid_range(sbi, inode->i_ino)) {
2368 +@@ -142,6 +190,11 @@ static int do_read_inode(struct inode *inode)
2369 +
2370 + get_inline_info(fi, ri);
2371 +
2372 ++ if (!sanity_check_inode(inode, node_page)) {
2373 ++ f2fs_put_page(node_page, 1);
2374 ++ return -EINVAL;
2375 ++ }
2376 ++
2377 + /* check data exist */
2378 + if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
2379 + __recover_inline_status(inode, node_page);
2380 +@@ -149,7 +202,12 @@ static int do_read_inode(struct inode *inode)
2381 + /* get rdev by using inline_info */
2382 + __get_inode_rdev(inode, ri);
2383 +
2384 +- if (__written_first_block(ri))
2385 ++ err = __written_first_block(sbi, ri);
2386 ++ if (err < 0) {
2387 ++ f2fs_put_page(node_page, 1);
2388 ++ return err;
2389 ++ }
2390 ++ if (!err)
2391 + set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
2392 +
2393 + f2fs_put_page(node_page, 1);
2394 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
2395 +index 7bcbc6e9c40d..3685fea62333 100644
2396 +--- a/fs/f2fs/node.c
2397 ++++ b/fs/f2fs/node.c
2398 +@@ -261,13 +261,11 @@ static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
2399 + {
2400 + struct nat_entry *e;
2401 +
2402 +- down_write(&nm_i->nat_tree_lock);
2403 + e = __lookup_nat_cache(nm_i, nid);
2404 + if (!e) {
2405 + e = grab_nat_entry(nm_i, nid);
2406 + node_info_from_raw_nat(&e->ni, ne);
2407 + }
2408 +- up_write(&nm_i->nat_tree_lock);
2409 + }
2410 +
2411 + static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
2412 +@@ -298,8 +296,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
2413 + new_blkaddr == NULL_ADDR);
2414 + f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
2415 + new_blkaddr == NEW_ADDR);
2416 +- f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
2417 +- nat_get_blkaddr(e) != NULL_ADDR &&
2418 ++ f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
2419 + new_blkaddr == NEW_ADDR);
2420 +
2421 + /* increment version no as node is removed */
2422 +@@ -314,7 +311,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
2423 +
2424 + /* change address */
2425 + nat_set_blkaddr(e, new_blkaddr);
2426 +- if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
2427 ++ if (!is_valid_data_blkaddr(sbi, new_blkaddr))
2428 + set_nat_flag(e, IS_CHECKPOINTED, false);
2429 + __set_nat_cache_dirty(nm_i, e);
2430 +
2431 +@@ -379,6 +376,8 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
2432 +
2433 + memset(&ne, 0, sizeof(struct f2fs_nat_entry));
2434 +
2435 ++ down_write(&nm_i->nat_tree_lock);
2436 ++
2437 + /* Check current segment summary */
2438 + mutex_lock(&curseg->curseg_mutex);
2439 + i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
2440 +@@ -399,6 +398,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
2441 + cache:
2442 + /* cache nat entry */
2443 + cache_nat_entry(NM_I(sbi), nid, &ne);
2444 ++ up_write(&nm_i->nat_tree_lock);
2445 + }
2446 +
2447 + /*
2448 +@@ -1341,6 +1341,12 @@ static int f2fs_write_node_page(struct page *page,
2449 + return 0;
2450 + }
2451 +
2452 ++ if (__is_valid_data_blkaddr(ni.blk_addr) &&
2453 ++ !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
2454 ++ up_read(&sbi->node_write);
2455 ++ goto redirty_out;
2456 ++ }
2457 ++
2458 + set_page_writeback(page);
2459 + fio.blk_addr = ni.blk_addr;
2460 + write_node_page(nid, &fio);
2461 +@@ -1427,9 +1433,9 @@ static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
2462 + static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
2463 + {
2464 + struct f2fs_nm_info *nm_i = NM_I(sbi);
2465 +- struct free_nid *i;
2466 ++ struct free_nid *i, *e;
2467 + struct nat_entry *ne;
2468 +- bool allocated = false;
2469 ++ int err = -EINVAL;
2470 +
2471 + if (!available_free_memory(sbi, FREE_NIDS))
2472 + return -1;
2473 +@@ -1438,40 +1444,58 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
2474 + if (unlikely(nid == 0))
2475 + return 0;
2476 +
2477 +- if (build) {
2478 +- /* do not add allocated nids */
2479 +- down_read(&nm_i->nat_tree_lock);
2480 +- ne = __lookup_nat_cache(nm_i, nid);
2481 +- if (ne &&
2482 +- (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2483 +- nat_get_blkaddr(ne) != NULL_ADDR))
2484 +- allocated = true;
2485 +- up_read(&nm_i->nat_tree_lock);
2486 +- if (allocated)
2487 +- return 0;
2488 +- }
2489 +-
2490 + i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
2491 + i->nid = nid;
2492 + i->state = NID_NEW;
2493 +
2494 +- if (radix_tree_preload(GFP_NOFS)) {
2495 +- kmem_cache_free(free_nid_slab, i);
2496 +- return 0;
2497 +- }
2498 ++ if (radix_tree_preload(GFP_NOFS))
2499 ++ goto err;
2500 +
2501 + spin_lock(&nm_i->free_nid_list_lock);
2502 +- if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
2503 +- spin_unlock(&nm_i->free_nid_list_lock);
2504 +- radix_tree_preload_end();
2505 +- kmem_cache_free(free_nid_slab, i);
2506 +- return 0;
2507 ++
2508 ++ if (build) {
2509 ++ /*
2510 ++ * Thread A Thread B
2511 ++ * - f2fs_create
2512 ++ * - f2fs_new_inode
2513 ++ * - alloc_nid
2514 ++ * - __insert_nid_to_list(ALLOC_NID_LIST)
2515 ++ * - f2fs_balance_fs_bg
2516 ++ * - build_free_nids
2517 ++ * - __build_free_nids
2518 ++ * - scan_nat_page
2519 ++ * - add_free_nid
2520 ++ * - __lookup_nat_cache
2521 ++ * - f2fs_add_link
2522 ++ * - init_inode_metadata
2523 ++ * - new_inode_page
2524 ++ * - new_node_page
2525 ++ * - set_node_addr
2526 ++ * - alloc_nid_done
2527 ++ * - __remove_nid_from_list(ALLOC_NID_LIST)
2528 ++ * - __insert_nid_to_list(FREE_NID_LIST)
2529 ++ */
2530 ++ ne = __lookup_nat_cache(nm_i, nid);
2531 ++ if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2532 ++ nat_get_blkaddr(ne) != NULL_ADDR))
2533 ++ goto err_out;
2534 ++
2535 ++ e = __lookup_free_nid_list(nm_i, nid);
2536 ++ if (e)
2537 ++ goto err_out;
2538 + }
2539 ++ if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i))
2540 ++ goto err_out;
2541 ++ err = 0;
2542 + list_add_tail(&i->list, &nm_i->free_nid_list);
2543 + nm_i->fcnt++;
2544 ++err_out:
2545 + spin_unlock(&nm_i->free_nid_list_lock);
2546 + radix_tree_preload_end();
2547 +- return 1;
2548 ++err:
2549 ++ if (err)
2550 ++ kmem_cache_free(free_nid_slab, i);
2551 ++ return !err;
2552 + }
2553 +
2554 + static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
2555 +@@ -1532,6 +1556,8 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
2556 + ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2557 + META_NAT, true);
2558 +
2559 ++ down_read(&nm_i->nat_tree_lock);
2560 ++
2561 + while (1) {
2562 + struct page *page = get_current_nat_page(sbi, nid);
2563 +
2564 +@@ -1560,6 +1586,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
2565 + remove_free_nid(nm_i, nid);
2566 + }
2567 + mutex_unlock(&curseg->curseg_mutex);
2568 ++ up_read(&nm_i->nat_tree_lock);
2569 +
2570 + ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2571 + nm_i->ra_nid_pages, META_NAT, false);
2572 +@@ -1842,14 +1869,12 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2573 +
2574 + raw_ne = nat_in_journal(sum, i);
2575 +
2576 +- down_write(&nm_i->nat_tree_lock);
2577 + ne = __lookup_nat_cache(nm_i, nid);
2578 + if (!ne) {
2579 + ne = grab_nat_entry(nm_i, nid);
2580 + node_info_from_raw_nat(&ne->ni, &raw_ne);
2581 + }
2582 + __set_nat_cache_dirty(nm_i, ne);
2583 +- up_write(&nm_i->nat_tree_lock);
2584 + }
2585 + update_nats_in_cursum(sum, -i);
2586 + mutex_unlock(&curseg->curseg_mutex);
2587 +@@ -1883,7 +1908,6 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2588 + struct f2fs_nat_block *nat_blk;
2589 + struct nat_entry *ne, *cur;
2590 + struct page *page = NULL;
2591 +- struct f2fs_nm_info *nm_i = NM_I(sbi);
2592 +
2593 + /*
2594 + * there are two steps to flush nat entries:
2595 +@@ -1920,12 +1944,8 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2596 + raw_ne = &nat_blk->entries[nid - start_nid];
2597 + }
2598 + raw_nat_from_node_info(raw_ne, &ne->ni);
2599 +-
2600 +- down_write(&NM_I(sbi)->nat_tree_lock);
2601 + nat_reset_flag(ne);
2602 + __clear_nat_cache_dirty(NM_I(sbi), ne);
2603 +- up_write(&NM_I(sbi)->nat_tree_lock);
2604 +-
2605 + if (nat_get_blkaddr(ne) == NULL_ADDR)
2606 + add_free_nid(sbi, nid, false);
2607 + }
2608 +@@ -1937,9 +1957,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2609 +
2610 + f2fs_bug_on(sbi, set->entry_cnt);
2611 +
2612 +- down_write(&nm_i->nat_tree_lock);
2613 + radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2614 +- up_write(&nm_i->nat_tree_lock);
2615 + kmem_cache_free(nat_entry_set_slab, set);
2616 + }
2617 +
2618 +@@ -1959,6 +1977,9 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
2619 +
2620 + if (!nm_i->dirty_nat_cnt)
2621 + return;
2622 ++
2623 ++ down_write(&nm_i->nat_tree_lock);
2624 ++
2625 + /*
2626 + * if there are no enough space in journal to store dirty nat
2627 + * entries, remove all entries from journal and merge them
2628 +@@ -1967,7 +1988,6 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
2629 + if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2630 + remove_nats_in_journal(sbi);
2631 +
2632 +- down_write(&nm_i->nat_tree_lock);
2633 + while ((found = __gang_lookup_nat_set(nm_i,
2634 + set_idx, SETVEC_SIZE, setvec))) {
2635 + unsigned idx;
2636 +@@ -1976,12 +1996,13 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
2637 + __adjust_nat_entry_set(setvec[idx], &sets,
2638 + MAX_NAT_JENTRIES(sum));
2639 + }
2640 +- up_write(&nm_i->nat_tree_lock);
2641 +
2642 + /* flush dirty nats in nat entry set */
2643 + list_for_each_entry_safe(set, tmp, &sets, set_list)
2644 + __flush_nat_entry_set(sbi, set);
2645 +
2646 ++ up_write(&nm_i->nat_tree_lock);
2647 ++
2648 + f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
2649 + }
2650 +
2651 +diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
2652 +index e4fffd2d98c4..0d6f0e3dc655 100644
2653 +--- a/fs/f2fs/node.h
2654 ++++ b/fs/f2fs/node.h
2655 +@@ -212,6 +212,37 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
2656 + f2fs_change_bit(block_off, nm_i->nat_bitmap);
2657 + }
2658 +
2659 ++static inline nid_t ino_of_node(struct page *node_page)
2660 ++{
2661 ++ struct f2fs_node *rn = F2FS_NODE(node_page);
2662 ++ return le32_to_cpu(rn->footer.ino);
2663 ++}
2664 ++
2665 ++static inline nid_t nid_of_node(struct page *node_page)
2666 ++{
2667 ++ struct f2fs_node *rn = F2FS_NODE(node_page);
2668 ++ return le32_to_cpu(rn->footer.nid);
2669 ++}
2670 ++
2671 ++static inline unsigned int ofs_of_node(struct page *node_page)
2672 ++{
2673 ++ struct f2fs_node *rn = F2FS_NODE(node_page);
2674 ++ unsigned flag = le32_to_cpu(rn->footer.flag);
2675 ++ return flag >> OFFSET_BIT_SHIFT;
2676 ++}
2677 ++
2678 ++static inline __u64 cpver_of_node(struct page *node_page)
2679 ++{
2680 ++ struct f2fs_node *rn = F2FS_NODE(node_page);
2681 ++ return le64_to_cpu(rn->footer.cp_ver);
2682 ++}
2683 ++
2684 ++static inline block_t next_blkaddr_of_node(struct page *node_page)
2685 ++{
2686 ++ struct f2fs_node *rn = F2FS_NODE(node_page);
2687 ++ return le32_to_cpu(rn->footer.next_blkaddr);
2688 ++}
2689 ++
2690 + static inline void fill_node_footer(struct page *page, nid_t nid,
2691 + nid_t ino, unsigned int ofs, bool reset)
2692 + {
2693 +@@ -242,40 +273,30 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
2694 + {
2695 + struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
2696 + struct f2fs_node *rn = F2FS_NODE(page);
2697 ++ size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
2698 ++ __u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
2699 +
2700 +- rn->footer.cp_ver = ckpt->checkpoint_ver;
2701 ++ if (is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
2702 ++ __u64 crc = le32_to_cpu(*((__le32 *)
2703 ++ ((unsigned char *)ckpt + crc_offset)));
2704 ++ cp_ver |= (crc << 32);
2705 ++ }
2706 ++ rn->footer.cp_ver = cpu_to_le64(cp_ver);
2707 + rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
2708 + }
2709 +
2710 +-static inline nid_t ino_of_node(struct page *node_page)
2711 +-{
2712 +- struct f2fs_node *rn = F2FS_NODE(node_page);
2713 +- return le32_to_cpu(rn->footer.ino);
2714 +-}
2715 +-
2716 +-static inline nid_t nid_of_node(struct page *node_page)
2717 ++static inline bool is_recoverable_dnode(struct page *page)
2718 + {
2719 +- struct f2fs_node *rn = F2FS_NODE(node_page);
2720 +- return le32_to_cpu(rn->footer.nid);
2721 +-}
2722 +-
2723 +-static inline unsigned int ofs_of_node(struct page *node_page)
2724 +-{
2725 +- struct f2fs_node *rn = F2FS_NODE(node_page);
2726 +- unsigned flag = le32_to_cpu(rn->footer.flag);
2727 +- return flag >> OFFSET_BIT_SHIFT;
2728 +-}
2729 +-
2730 +-static inline unsigned long long cpver_of_node(struct page *node_page)
2731 +-{
2732 +- struct f2fs_node *rn = F2FS_NODE(node_page);
2733 +- return le64_to_cpu(rn->footer.cp_ver);
2734 +-}
2735 ++ struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
2736 ++ size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
2737 ++ __u64 cp_ver = cur_cp_version(ckpt);
2738 +
2739 +-static inline block_t next_blkaddr_of_node(struct page *node_page)
2740 +-{
2741 +- struct f2fs_node *rn = F2FS_NODE(node_page);
2742 +- return le32_to_cpu(rn->footer.next_blkaddr);
2743 ++ if (is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
2744 ++ __u64 crc = le32_to_cpu(*((__le32 *)
2745 ++ ((unsigned char *)ckpt + crc_offset)));
2746 ++ cp_ver |= (crc << 32);
2747 ++ }
2748 ++ return cpu_to_le64(cp_ver) == cpver_of_node(page);
2749 + }
2750 +
2751 + /*
2752 +diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
2753 +index e32f349f341b..2878be3e448f 100644
2754 +--- a/fs/f2fs/recovery.c
2755 ++++ b/fs/f2fs/recovery.c
2756 +@@ -67,7 +67,30 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
2757 + return NULL;
2758 + }
2759 +
2760 +-static int recover_dentry(struct inode *inode, struct page *ipage)
2761 ++static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
2762 ++ struct inode *inode)
2763 ++{
2764 ++ struct fsync_inode_entry *entry;
2765 ++
2766 ++ entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
2767 ++ if (!entry)
2768 ++ return NULL;
2769 ++
2770 ++ entry->inode = inode;
2771 ++ list_add_tail(&entry->list, head);
2772 ++
2773 ++ return entry;
2774 ++}
2775 ++
2776 ++static void del_fsync_inode(struct fsync_inode_entry *entry)
2777 ++{
2778 ++ iput(entry->inode);
2779 ++ list_del(&entry->list);
2780 ++ kmem_cache_free(fsync_entry_slab, entry);
2781 ++}
2782 ++
2783 ++static int recover_dentry(struct inode *inode, struct page *ipage,
2784 ++ struct list_head *dir_list)
2785 + {
2786 + struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
2787 + nid_t pino = le32_to_cpu(raw_inode->i_pino);
2788 +@@ -75,18 +98,29 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
2789 + struct qstr name;
2790 + struct page *page;
2791 + struct inode *dir, *einode;
2792 ++ struct fsync_inode_entry *entry;
2793 + int err = 0;
2794 +
2795 +- dir = f2fs_iget(inode->i_sb, pino);
2796 +- if (IS_ERR(dir)) {
2797 +- err = PTR_ERR(dir);
2798 +- goto out;
2799 ++ entry = get_fsync_inode(dir_list, pino);
2800 ++ if (!entry) {
2801 ++ dir = f2fs_iget(inode->i_sb, pino);
2802 ++ if (IS_ERR(dir)) {
2803 ++ err = PTR_ERR(dir);
2804 ++ goto out;
2805 ++ }
2806 ++
2807 ++ entry = add_fsync_inode(dir_list, dir);
2808 ++ if (!entry) {
2809 ++ err = -ENOMEM;
2810 ++ iput(dir);
2811 ++ goto out;
2812 ++ }
2813 + }
2814 +
2815 +- if (file_enc_name(inode)) {
2816 +- iput(dir);
2817 ++ dir = entry->inode;
2818 ++
2819 ++ if (file_enc_name(inode))
2820 + return 0;
2821 +- }
2822 +
2823 + name.len = le32_to_cpu(raw_inode->i_namelen);
2824 + name.name = raw_inode->i_name;
2825 +@@ -94,7 +128,7 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
2826 + if (unlikely(name.len > F2FS_NAME_LEN)) {
2827 + WARN_ON(1);
2828 + err = -ENAMETOOLONG;
2829 +- goto out_err;
2830 ++ goto out;
2831 + }
2832 + retry:
2833 + de = f2fs_find_entry(dir, &name, &page);
2834 +@@ -120,23 +154,12 @@ retry:
2835 + goto retry;
2836 + }
2837 + err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
2838 +- if (err)
2839 +- goto out_err;
2840 +-
2841 +- if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
2842 +- iput(dir);
2843 +- } else {
2844 +- add_dirty_dir_inode(dir);
2845 +- set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
2846 +- }
2847 +
2848 + goto out;
2849 +
2850 + out_unmap_put:
2851 + f2fs_dentry_kunmap(dir, page);
2852 + f2fs_put_page(page, 0);
2853 +-out_err:
2854 +- iput(dir);
2855 + out:
2856 + f2fs_msg(inode->i_sb, KERN_NOTICE,
2857 + "%s: ino = %x, name = %s, dir = %lx, err = %d",
2858 +@@ -170,8 +193,8 @@ static void recover_inode(struct inode *inode, struct page *page)
2859 +
2860 + static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
2861 + {
2862 +- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
2863 + struct curseg_info *curseg;
2864 ++ struct inode *inode;
2865 + struct page *page = NULL;
2866 + block_t blkaddr;
2867 + int err = 0;
2868 +@@ -185,12 +208,12 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
2869 + while (1) {
2870 + struct fsync_inode_entry *entry;
2871 +
2872 +- if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
2873 ++ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
2874 + return 0;
2875 +
2876 + page = get_tmp_page(sbi, blkaddr);
2877 +
2878 +- if (cp_ver != cpver_of_node(page))
2879 ++ if (!is_recoverable_dnode(page))
2880 + break;
2881 +
2882 + if (!is_fsync_dnode(page))
2883 +@@ -204,27 +227,27 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
2884 + break;
2885 + }
2886 +
2887 +- /* add this fsync inode to the list */
2888 +- entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
2889 +- if (!entry) {
2890 +- err = -ENOMEM;
2891 +- break;
2892 +- }
2893 + /*
2894 + * CP | dnode(F) | inode(DF)
2895 + * For this case, we should not give up now.
2896 + */
2897 +- entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
2898 +- if (IS_ERR(entry->inode)) {
2899 +- err = PTR_ERR(entry->inode);
2900 +- kmem_cache_free(fsync_entry_slab, entry);
2901 ++ inode = f2fs_iget(sbi->sb, ino_of_node(page));
2902 ++ if (IS_ERR(inode)) {
2903 ++ err = PTR_ERR(inode);
2904 + if (err == -ENOENT) {
2905 + err = 0;
2906 + goto next;
2907 + }
2908 + break;
2909 + }
2910 +- list_add_tail(&entry->list, head);
2911 ++
2912 ++ /* add this fsync inode to the list */
2913 ++ entry = add_fsync_inode(head, inode);
2914 ++ if (!entry) {
2915 ++ err = -ENOMEM;
2916 ++ iput(inode);
2917 ++ break;
2918 ++ }
2919 + }
2920 + entry->blkaddr = blkaddr;
2921 +
2922 +@@ -248,11 +271,8 @@ static void destroy_fsync_dnodes(struct list_head *head)
2923 + {
2924 + struct fsync_inode_entry *entry, *tmp;
2925 +
2926 +- list_for_each_entry_safe(entry, tmp, head, list) {
2927 +- iput(entry->inode);
2928 +- list_del(&entry->list);
2929 +- kmem_cache_free(fsync_entry_slab, entry);
2930 +- }
2931 ++ list_for_each_entry_safe(entry, tmp, head, list)
2932 ++ del_fsync_inode(entry);
2933 + }
2934 +
2935 + static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
2936 +@@ -423,7 +443,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
2937 + }
2938 +
2939 + /* dest is valid block, try to recover from src to dest */
2940 +- if (is_valid_blkaddr(sbi, dest, META_POR)) {
2941 ++ if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
2942 +
2943 + if (src == NULL_ADDR) {
2944 + err = reserve_new_block(&dn);
2945 +@@ -459,35 +479,34 @@ out:
2946 + return err;
2947 + }
2948 +
2949 +-static int recover_data(struct f2fs_sb_info *sbi,
2950 +- struct list_head *head, int type)
2951 ++static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
2952 ++ struct list_head *dir_list)
2953 + {
2954 +- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
2955 + struct curseg_info *curseg;
2956 + struct page *page = NULL;
2957 + int err = 0;
2958 + block_t blkaddr;
2959 +
2960 + /* get node pages in the current segment */
2961 +- curseg = CURSEG_I(sbi, type);
2962 ++ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
2963 + blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
2964 +
2965 + while (1) {
2966 + struct fsync_inode_entry *entry;
2967 +
2968 +- if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
2969 ++ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
2970 + break;
2971 +
2972 + ra_meta_pages_cond(sbi, blkaddr);
2973 +
2974 + page = get_tmp_page(sbi, blkaddr);
2975 +
2976 +- if (cp_ver != cpver_of_node(page)) {
2977 ++ if (!is_recoverable_dnode(page)) {
2978 + f2fs_put_page(page, 1);
2979 + break;
2980 + }
2981 +
2982 +- entry = get_fsync_inode(head, ino_of_node(page));
2983 ++ entry = get_fsync_inode(inode_list, ino_of_node(page));
2984 + if (!entry)
2985 + goto next;
2986 + /*
2987 +@@ -498,7 +517,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
2988 + if (entry->last_inode == blkaddr)
2989 + recover_inode(entry->inode, page);
2990 + if (entry->last_dentry == blkaddr) {
2991 +- err = recover_dentry(entry->inode, page);
2992 ++ err = recover_dentry(entry->inode, page, dir_list);
2993 + if (err) {
2994 + f2fs_put_page(page, 1);
2995 + break;
2996 +@@ -510,11 +529,8 @@ static int recover_data(struct f2fs_sb_info *sbi,
2997 + break;
2998 + }
2999 +
3000 +- if (entry->blkaddr == blkaddr) {
3001 +- iput(entry->inode);
3002 +- list_del(&entry->list);
3003 +- kmem_cache_free(fsync_entry_slab, entry);
3004 +- }
3005 ++ if (entry->blkaddr == blkaddr)
3006 ++ del_fsync_inode(entry);
3007 + next:
3008 + /* check next segment */
3009 + blkaddr = next_blkaddr_of_node(page);
3010 +@@ -525,12 +541,14 @@ next:
3011 + return err;
3012 + }
3013 +
3014 +-int recover_fsync_data(struct f2fs_sb_info *sbi)
3015 ++int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
3016 + {
3017 + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
3018 + struct list_head inode_list;
3019 ++ struct list_head dir_list;
3020 + block_t blkaddr;
3021 + int err;
3022 ++ int ret = 0;
3023 + bool need_writecp = false;
3024 +
3025 + fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
3026 +@@ -539,6 +557,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
3027 + return -ENOMEM;
3028 +
3029 + INIT_LIST_HEAD(&inode_list);
3030 ++ INIT_LIST_HEAD(&dir_list);
3031 +
3032 + /* prevent checkpoint */
3033 + mutex_lock(&sbi->cp_mutex);
3034 +@@ -547,21 +566,22 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
3035 +
3036 + /* step #1: find fsynced inode numbers */
3037 + err = find_fsync_dnodes(sbi, &inode_list);
3038 +- if (err)
3039 ++ if (err || list_empty(&inode_list))
3040 + goto out;
3041 +
3042 +- if (list_empty(&inode_list))
3043 ++ if (check_only) {
3044 ++ ret = 1;
3045 + goto out;
3046 ++ }
3047 +
3048 + need_writecp = true;
3049 +
3050 + /* step #2: recover data */
3051 +- err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
3052 ++ err = recover_data(sbi, &inode_list, &dir_list);
3053 + if (!err)
3054 + f2fs_bug_on(sbi, !list_empty(&inode_list));
3055 + out:
3056 + destroy_fsync_dnodes(&inode_list);
3057 +- kmem_cache_destroy(fsync_entry_slab);
3058 +
3059 + /* truncate meta pages to be used by the recovery */
3060 + truncate_inode_pages_range(META_MAPPING(sbi),
3061 +@@ -573,31 +593,20 @@ out:
3062 + }
3063 +
3064 + clear_sbi_flag(sbi, SBI_POR_DOING);
3065 +- if (err) {
3066 +- bool invalidate = false;
3067 +-
3068 +- if (discard_next_dnode(sbi, blkaddr))
3069 +- invalidate = true;
3070 +-
3071 +- /* Flush all the NAT/SIT pages */
3072 +- while (get_pages(sbi, F2FS_DIRTY_META))
3073 +- sync_meta_pages(sbi, META, LONG_MAX);
3074 ++ if (err)
3075 ++ set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
3076 ++ mutex_unlock(&sbi->cp_mutex);
3077 +
3078 +- /* invalidate temporary meta page */
3079 +- if (invalidate)
3080 +- invalidate_mapping_pages(META_MAPPING(sbi),
3081 +- blkaddr, blkaddr);
3082 ++ /* let's drop all the directory inodes for clean checkpoint */
3083 ++ destroy_fsync_dnodes(&dir_list);
3084 +
3085 +- set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
3086 +- mutex_unlock(&sbi->cp_mutex);
3087 +- } else if (need_writecp) {
3088 ++ if (!err && need_writecp) {
3089 + struct cp_control cpc = {
3090 + .reason = CP_RECOVERY,
3091 + };
3092 +- mutex_unlock(&sbi->cp_mutex);
3093 + write_checkpoint(sbi, &cpc);
3094 +- } else {
3095 +- mutex_unlock(&sbi->cp_mutex);
3096 + }
3097 +- return err;
3098 ++
3099 ++ kmem_cache_destroy(fsync_entry_slab);
3100 ++ return ret ? ret: err;
3101 + }
3102 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
3103 +index 39ec9da08bb5..6802cd754eda 100644
3104 +--- a/fs/f2fs/segment.c
3105 ++++ b/fs/f2fs/segment.c
3106 +@@ -519,28 +519,6 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
3107 + return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
3108 + }
3109 +
3110 +-bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
3111 +-{
3112 +- int err = -ENOTSUPP;
3113 +-
3114 +- if (test_opt(sbi, DISCARD)) {
3115 +- struct seg_entry *se = get_seg_entry(sbi,
3116 +- GET_SEGNO(sbi, blkaddr));
3117 +- unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
3118 +-
3119 +- if (f2fs_test_bit(offset, se->discard_map))
3120 +- return false;
3121 +-
3122 +- err = f2fs_issue_discard(sbi, blkaddr, 1);
3123 +- }
3124 +-
3125 +- if (err) {
3126 +- update_meta_page(sbi, NULL, blkaddr);
3127 +- return true;
3128 +- }
3129 +- return false;
3130 +-}
3131 +-
3132 + static void __add_discard_entry(struct f2fs_sb_info *sbi,
3133 + struct cp_control *cpc, struct seg_entry *se,
3134 + unsigned int start, unsigned int end)
3135 +@@ -774,7 +752,7 @@ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
3136 + struct seg_entry *se;
3137 + bool is_cp = false;
3138 +
3139 +- if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
3140 ++ if (!is_valid_data_blkaddr(sbi, blkaddr))
3141 + return true;
3142 +
3143 + mutex_lock(&sit_i->sentry_lock);
3144 +@@ -1488,7 +1466,7 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
3145 + {
3146 + struct page *cpage;
3147 +
3148 +- if (blkaddr == NEW_ADDR)
3149 ++ if (!is_valid_data_blkaddr(sbi, blkaddr))
3150 + return;
3151 +
3152 + f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
3153 +@@ -2123,7 +2101,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
3154 + return restore_curseg_summaries(sbi);
3155 + }
3156 +
3157 +-static void build_sit_entries(struct f2fs_sb_info *sbi)
3158 ++static int build_sit_entries(struct f2fs_sb_info *sbi)
3159 + {
3160 + struct sit_info *sit_i = SIT_I(sbi);
3161 + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3162 +@@ -2132,6 +2110,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
3163 + unsigned int i, start, end;
3164 + unsigned int readed, start_blk = 0;
3165 + int nrpages = MAX_BIO_BLOCKS(sbi);
3166 ++ int err = 0;
3167 +
3168 + do {
3169 + readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
3170 +@@ -2145,36 +2124,62 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
3171 + struct f2fs_sit_entry sit;
3172 + struct page *page;
3173 +
3174 +- mutex_lock(&curseg->curseg_mutex);
3175 +- for (i = 0; i < sits_in_cursum(sum); i++) {
3176 +- if (le32_to_cpu(segno_in_journal(sum, i))
3177 +- == start) {
3178 +- sit = sit_in_journal(sum, i);
3179 +- mutex_unlock(&curseg->curseg_mutex);
3180 +- goto got_it;
3181 +- }
3182 +- }
3183 +- mutex_unlock(&curseg->curseg_mutex);
3184 +-
3185 + page = get_current_sit_page(sbi, start);
3186 + sit_blk = (struct f2fs_sit_block *)page_address(page);
3187 + sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
3188 + f2fs_put_page(page, 1);
3189 +-got_it:
3190 +- check_block_count(sbi, start, &sit);
3191 ++
3192 ++ err = check_block_count(sbi, start, &sit);
3193 ++ if (err)
3194 ++ return err;
3195 + seg_info_from_raw_sit(se, &sit);
3196 +
3197 + /* build discard map only one time */
3198 + memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
3199 + sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
3200 +
3201 +- if (sbi->segs_per_sec > 1) {
3202 +- struct sec_entry *e = get_sec_entry(sbi, start);
3203 +- e->valid_blocks += se->valid_blocks;
3204 +- }
3205 ++ if (sbi->segs_per_sec > 1)
3206 ++ get_sec_entry(sbi, start)->valid_blocks +=
3207 ++ se->valid_blocks;
3208 + }
3209 + start_blk += readed;
3210 + } while (start_blk < sit_blk_cnt);
3211 ++
3212 ++ mutex_lock(&curseg->curseg_mutex);
3213 ++ for (i = 0; i < sits_in_cursum(sum); i++) {
3214 ++ struct f2fs_sit_entry sit;
3215 ++ struct seg_entry *se;
3216 ++ unsigned int old_valid_blocks;
3217 ++
3218 ++ start = le32_to_cpu(segno_in_journal(sum, i));
3219 ++ if (start >= MAIN_SEGS(sbi)) {
3220 ++ f2fs_msg(sbi->sb, KERN_ERR,
3221 ++ "Wrong journal entry on segno %u",
3222 ++ start);
3223 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
3224 ++ err = -EINVAL;
3225 ++ break;
3226 ++ }
3227 ++
3228 ++ se = &sit_i->sentries[start];
3229 ++ sit = sit_in_journal(sum, i);
3230 ++
3231 ++ old_valid_blocks = se->valid_blocks;
3232 ++
3233 ++ err = check_block_count(sbi, start, &sit);
3234 ++ if (err)
3235 ++ break;
3236 ++ seg_info_from_raw_sit(se, &sit);
3237 ++
3238 ++ memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
3239 ++ sbi->discard_blks += old_valid_blocks - se->valid_blocks;
3240 ++
3241 ++ if (sbi->segs_per_sec > 1)
3242 ++ get_sec_entry(sbi, start)->valid_blocks +=
3243 ++ se->valid_blocks - old_valid_blocks;
3244 ++ }
3245 ++ mutex_unlock(&curseg->curseg_mutex);
3246 ++ return err;
3247 + }
3248 +
3249 + static void init_free_segmap(struct f2fs_sb_info *sbi)
3250 +@@ -2336,7 +2341,9 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
3251 + return err;
3252 +
3253 + /* reinit free segmap based on SIT */
3254 +- build_sit_entries(sbi);
3255 ++ err = build_sit_entries(sbi);
3256 ++ if (err)
3257 ++ return err;
3258 +
3259 + init_free_segmap(sbi);
3260 + err = build_dirty_segmap(sbi);
3261 +diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
3262 +index bfa1d31f79aa..08b08ae6ba9d 100644
3263 +--- a/fs/f2fs/segment.h
3264 ++++ b/fs/f2fs/segment.h
3265 +@@ -17,6 +17,8 @@
3266 +
3267 + #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
3268 +
3269 ++#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
3270 ++
3271 + /* L: Logical segment # in volume, R: Relative segment # in main area */
3272 + #define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
3273 + #define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
3274 +@@ -46,13 +48,19 @@
3275 + (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
3276 + sbi->segs_per_sec)) \
3277 +
3278 +-#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
3279 +-#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
3280 ++#define MAIN_BLKADDR(sbi) \
3281 ++ (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
3282 ++ le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
3283 ++#define SEG0_BLKADDR(sbi) \
3284 ++ (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
3285 ++ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
3286 +
3287 + #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
3288 + #define MAIN_SECS(sbi) (sbi->total_sections)
3289 +
3290 +-#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
3291 ++#define TOTAL_SEGS(sbi) \
3292 ++ (SM_I(sbi) ? SM_I(sbi)->segment_count : \
3293 ++ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
3294 + #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
3295 +
3296 + #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
3297 +@@ -72,7 +80,7 @@
3298 + (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
3299 +
3300 + #define GET_SEGNO(sbi, blk_addr) \
3301 +- (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
3302 ++ ((!is_valid_data_blkaddr(sbi, blk_addr)) ? \
3303 + NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
3304 + GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
3305 + #define GET_SECNO(sbi, segno) \
3306 +@@ -574,16 +582,20 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
3307 + f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
3308 + }
3309 +
3310 +-static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
3311 ++static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
3312 + {
3313 +- f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
3314 +- || blk_addr >= MAX_BLKADDR(sbi));
3315 ++ struct f2fs_sb_info *sbi = fio->sbi;
3316 ++
3317 ++ if (__is_meta_io(fio))
3318 ++ verify_blkaddr(sbi, blk_addr, META_GENERIC);
3319 ++ else
3320 ++ verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
3321 + }
3322 +
3323 + /*
3324 + * Summary block is always treated as an invalid block
3325 + */
3326 +-static inline void check_block_count(struct f2fs_sb_info *sbi,
3327 ++static inline int check_block_count(struct f2fs_sb_info *sbi,
3328 + int segno, struct f2fs_sit_entry *raw_sit)
3329 + {
3330 + #ifdef CONFIG_F2FS_CHECK_FS
3331 +@@ -605,11 +617,25 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
3332 + cur_pos = next_pos;
3333 + is_valid = !is_valid;
3334 + } while (cur_pos < sbi->blocks_per_seg);
3335 +- BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
3336 ++
3337 ++ if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
3338 ++ f2fs_msg(sbi->sb, KERN_ERR,
3339 ++ "Mismatch valid blocks %d vs. %d",
3340 ++ GET_SIT_VBLOCKS(raw_sit), valid_blocks);
3341 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
3342 ++ return -EINVAL;
3343 ++ }
3344 + #endif
3345 + /* check segment usage, and check boundary of a given segment number */
3346 +- f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
3347 +- || segno > TOTAL_SEGS(sbi) - 1);
3348 ++ if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
3349 ++ || segno > TOTAL_SEGS(sbi) - 1)) {
3350 ++ f2fs_msg(sbi->sb, KERN_ERR,
3351 ++ "Wrong valid blocks %d or segno %u",
3352 ++ GET_SIT_VBLOCKS(raw_sit), segno);
3353 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
3354 ++ return -EINVAL;
3355 ++ }
3356 ++ return 0;
3357 + }
3358 +
3359 + static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
3360 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
3361 +index 2ffc53d0c9c7..dbd7adff8b5a 100644
3362 +--- a/fs/f2fs/super.c
3363 ++++ b/fs/f2fs/super.c
3364 +@@ -994,6 +994,8 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
3365 + static int sanity_check_raw_super(struct super_block *sb,
3366 + struct f2fs_super_block *raw_super)
3367 + {
3368 ++ block_t segment_count, segs_per_sec, secs_per_zone;
3369 ++ block_t total_sections, blocks_per_seg;
3370 + unsigned int blocksize;
3371 +
3372 + if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
3373 +@@ -1047,6 +1049,68 @@ static int sanity_check_raw_super(struct super_block *sb,
3374 + return 1;
3375 + }
3376 +
3377 ++ segment_count = le32_to_cpu(raw_super->segment_count);
3378 ++ segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3379 ++ secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3380 ++ total_sections = le32_to_cpu(raw_super->section_count);
3381 ++
3382 ++ /* blocks_per_seg should be 512, given the above check */
3383 ++ blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
3384 ++
3385 ++ if (segment_count > F2FS_MAX_SEGMENT ||
3386 ++ segment_count < F2FS_MIN_SEGMENTS) {
3387 ++ f2fs_msg(sb, KERN_INFO,
3388 ++ "Invalid segment count (%u)",
3389 ++ segment_count);
3390 ++ return 1;
3391 ++ }
3392 ++
3393 ++ if (total_sections > segment_count ||
3394 ++ total_sections < F2FS_MIN_SEGMENTS ||
3395 ++ segs_per_sec > segment_count || !segs_per_sec) {
3396 ++ f2fs_msg(sb, KERN_INFO,
3397 ++ "Invalid segment/section count (%u, %u x %u)",
3398 ++ segment_count, total_sections, segs_per_sec);
3399 ++ return 1;
3400 ++ }
3401 ++
3402 ++ if ((segment_count / segs_per_sec) < total_sections) {
3403 ++ f2fs_msg(sb, KERN_INFO,
3404 ++ "Small segment_count (%u < %u * %u)",
3405 ++ segment_count, segs_per_sec, total_sections);
3406 ++ return 1;
3407 ++ }
3408 ++
3409 ++ if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
3410 ++ f2fs_msg(sb, KERN_INFO,
3411 ++ "Wrong segment_count / block_count (%u > %llu)",
3412 ++ segment_count, le64_to_cpu(raw_super->block_count));
3413 ++ return 1;
3414 ++ }
3415 ++
3416 ++ if (secs_per_zone > total_sections || !secs_per_zone) {
3417 ++ f2fs_msg(sb, KERN_INFO,
3418 ++ "Wrong secs_per_zone / total_sections (%u, %u)",
3419 ++ secs_per_zone, total_sections);
3420 ++ return 1;
3421 ++ }
3422 ++ if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION) {
3423 ++ f2fs_msg(sb, KERN_INFO,
3424 ++ "Corrupted extension count (%u > %u)",
3425 ++ le32_to_cpu(raw_super->extension_count),
3426 ++ F2FS_MAX_EXTENSION);
3427 ++ return 1;
3428 ++ }
3429 ++
3430 ++ if (le32_to_cpu(raw_super->cp_payload) >
3431 ++ (blocks_per_seg - F2FS_CP_PACKS)) {
3432 ++ f2fs_msg(sb, KERN_INFO,
3433 ++ "Insane cp_payload (%u > %u)",
3434 ++ le32_to_cpu(raw_super->cp_payload),
3435 ++ blocks_per_seg - F2FS_CP_PACKS);
3436 ++ return 1;
3437 ++ }
3438 ++
3439 + /* check reserved ino info */
3440 + if (le32_to_cpu(raw_super->node_ino) != 1 ||
3441 + le32_to_cpu(raw_super->meta_ino) != 2 ||
3442 +@@ -1059,13 +1123,6 @@ static int sanity_check_raw_super(struct super_block *sb,
3443 + return 1;
3444 + }
3445 +
3446 +- if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
3447 +- f2fs_msg(sb, KERN_INFO,
3448 +- "Invalid segment count (%u)",
3449 +- le32_to_cpu(raw_super->segment_count));
3450 +- return 1;
3451 +- }
3452 +-
3453 + /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
3454 + if (sanity_check_area_boundary(sb, raw_super))
3455 + return 1;
3456 +@@ -1073,15 +1130,19 @@ static int sanity_check_raw_super(struct super_block *sb,
3457 + return 0;
3458 + }
3459 +
3460 +-static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
3461 ++int sanity_check_ckpt(struct f2fs_sb_info *sbi)
3462 + {
3463 + unsigned int total, fsmeta;
3464 + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3465 + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3466 ++ unsigned int ovp_segments, reserved_segments;
3467 + unsigned int main_segs, blocks_per_seg;
3468 + unsigned int sit_segs, nat_segs;
3469 + unsigned int sit_bitmap_size, nat_bitmap_size;
3470 + unsigned int log_blocks_per_seg;
3471 ++ unsigned int segment_count_main;
3472 ++ unsigned int cp_pack_start_sum, cp_payload;
3473 ++ block_t user_block_count;
3474 + int i;
3475 +
3476 + total = le32_to_cpu(raw_super->segment_count);
3477 +@@ -1096,6 +1157,26 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
3478 + if (unlikely(fsmeta >= total))
3479 + return 1;
3480 +
3481 ++ ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3482 ++ reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3483 ++
3484 ++ if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
3485 ++ ovp_segments == 0 || reserved_segments == 0)) {
3486 ++ f2fs_msg(sbi->sb, KERN_ERR,
3487 ++ "Wrong layout: check mkfs.f2fs version");
3488 ++ return 1;
3489 ++ }
3490 ++
3491 ++ user_block_count = le64_to_cpu(ckpt->user_block_count);
3492 ++ segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3493 ++ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3494 ++ if (!user_block_count || user_block_count >=
3495 ++ segment_count_main << log_blocks_per_seg) {
3496 ++ f2fs_msg(sbi->sb, KERN_ERR,
3497 ++ "Wrong user_block_count: %u", user_block_count);
3498 ++ return 1;
3499 ++ }
3500 ++
3501 + main_segs = le32_to_cpu(raw_super->segment_count_main);
3502 + blocks_per_seg = sbi->blocks_per_seg;
3503 +
3504 +@@ -1112,7 +1193,6 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
3505 +
3506 + sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3507 + nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3508 +- log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3509 +
3510 + if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3511 + nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3512 +@@ -1122,6 +1202,17 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
3513 + return 1;
3514 + }
3515 +
3516 ++ cp_pack_start_sum = __start_sum_addr(sbi);
3517 ++ cp_payload = __cp_payload(sbi);
3518 ++ if (cp_pack_start_sum < cp_payload + 1 ||
3519 ++ cp_pack_start_sum > blocks_per_seg - 1 -
3520 ++ NR_CURSEG_TYPE) {
3521 ++ f2fs_msg(sbi->sb, KERN_ERR,
3522 ++ "Wrong cp_pack_start_sum: %u",
3523 ++ cp_pack_start_sum);
3524 ++ return 1;
3525 ++ }
3526 ++
3527 + if (unlikely(f2fs_cp_error(sbi))) {
3528 + f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
3529 + return 1;
3530 +@@ -1358,13 +1449,6 @@ try_onemore:
3531 + goto free_meta_inode;
3532 + }
3533 +
3534 +- /* sanity checking of checkpoint */
3535 +- err = -EINVAL;
3536 +- if (sanity_check_ckpt(sbi)) {
3537 +- f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
3538 +- goto free_cp;
3539 +- }
3540 +-
3541 + sbi->total_valid_node_count =
3542 + le32_to_cpu(sbi->ckpt->valid_node_count);
3543 + sbi->total_valid_inode_count =
3544 +@@ -1464,14 +1548,27 @@ try_onemore:
3545 + if (need_fsck)
3546 + set_sbi_flag(sbi, SBI_NEED_FSCK);
3547 +
3548 +- err = recover_fsync_data(sbi);
3549 +- if (err) {
3550 ++ if (!retry)
3551 ++ goto skip_recovery;
3552 ++
3553 ++ err = recover_fsync_data(sbi, false);
3554 ++ if (err < 0) {
3555 + need_fsck = true;
3556 + f2fs_msg(sb, KERN_ERR,
3557 + "Cannot recover all fsync data errno=%ld", err);
3558 + goto free_kobj;
3559 + }
3560 ++ } else {
3561 ++ err = recover_fsync_data(sbi, true);
3562 ++
3563 ++ if (!f2fs_readonly(sb) && err > 0) {
3564 ++ err = -EINVAL;
3565 ++ f2fs_msg(sb, KERN_ERR,
3566 ++ "Need to recover fsync data");
3567 ++ goto free_kobj;
3568 ++ }
3569 + }
3570 ++skip_recovery:
3571 + /* recover_fsync_data() cleared this already */
3572 + clear_sbi_flag(sbi, SBI_POR_DOING);
3573 +
3574 +@@ -1517,7 +1614,6 @@ free_nm:
3575 + destroy_node_manager(sbi);
3576 + free_sm:
3577 + destroy_segment_manager(sbi);
3578 +-free_cp:
3579 + kfree(sbi->ckpt);
3580 + free_meta_inode:
3581 + make_bad_inode(sbi->meta_inode);
3582 +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
3583 +index 1544f530ccd0..023e7f32ee1b 100644
3584 +--- a/fs/jffs2/super.c
3585 ++++ b/fs/jffs2/super.c
3586 +@@ -101,7 +101,8 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
3587 + struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
3588 +
3589 + #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
3590 +- cancel_delayed_work_sync(&c->wbuf_dwork);
3591 ++ if (jffs2_is_writebuffered(c))
3592 ++ cancel_delayed_work_sync(&c->wbuf_dwork);
3593 + #endif
3594 +
3595 + mutex_lock(&c->alloc_sem);
3596 +diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
3597 +index 0a4457fb0711..85111d740c9d 100644
3598 +--- a/fs/ocfs2/localalloc.c
3599 ++++ b/fs/ocfs2/localalloc.c
3600 +@@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
3601 + if (num_used
3602 + || alloc->id1.bitmap1.i_used
3603 + || alloc->id1.bitmap1.i_total
3604 +- || la->la_bm_off)
3605 +- mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
3606 ++ || la->la_bm_off) {
3607 ++ mlog(ML_ERROR, "inconsistent detected, clean journal with"
3608 ++ " unrecovered local alloc, please run fsck.ocfs2!\n"
3609 + "found = %u, set = %u, taken = %u, off = %u\n",
3610 + num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
3611 + le32_to_cpu(alloc->id1.bitmap1.i_total),
3612 + OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
3613 +
3614 ++ status = -EINVAL;
3615 ++ goto bail;
3616 ++ }
3617 ++
3618 + osb->local_alloc_bh = alloc_bh;
3619 + osb->local_alloc_state = OCFS2_LA_ENABLED;
3620 +
3621 +diff --git a/fs/proc/array.c b/fs/proc/array.c
3622 +index cb71cbae606d..60cbaa821164 100644
3623 +--- a/fs/proc/array.c
3624 ++++ b/fs/proc/array.c
3625 +@@ -333,7 +333,7 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
3626 + #ifdef CONFIG_SECCOMP
3627 + seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode);
3628 + #endif
3629 +- seq_printf(m, "\nSpeculation_Store_Bypass:\t");
3630 ++ seq_printf(m, "Speculation_Store_Bypass:\t");
3631 + switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
3632 + case -EINVAL:
3633 + seq_printf(m, "unknown");
3634 +diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
3635 +index bd21795ce657..679d75a864d0 100644
3636 +--- a/fs/pstore/ram_core.c
3637 ++++ b/fs/pstore/ram_core.c
3638 +@@ -445,6 +445,11 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
3639 + sig ^= PERSISTENT_RAM_SIG;
3640 +
3641 + if (prz->buffer->sig == sig) {
3642 ++ if (buffer_size(prz) == 0) {
3643 ++ pr_debug("found existing empty buffer\n");
3644 ++ return 0;
3645 ++ }
3646 ++
3647 + if (buffer_size(prz) > prz->buffer_size ||
3648 + buffer_start(prz) > buffer_size(prz))
3649 + pr_info("found existing invalid buffer, size %zu, start %zu\n",
3650 +diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
3651 +index fb9636cc927c..5d8d12746e6e 100644
3652 +--- a/fs/xfs/libxfs/xfs_attr.c
3653 ++++ b/fs/xfs/libxfs/xfs_attr.c
3654 +@@ -528,7 +528,14 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
3655 + if (args->flags & ATTR_CREATE)
3656 + return retval;
3657 + retval = xfs_attr_shortform_remove(args);
3658 +- ASSERT(retval == 0);
3659 ++ if (retval)
3660 ++ return retval;
3661 ++ /*
3662 ++ * Since we have removed the old attr, clear ATTR_REPLACE so
3663 ++ * that the leaf format add routine won't trip over the attr
3664 ++ * not being around.
3665 ++ */
3666 ++ args->flags &= ~ATTR_REPLACE;
3667 + }
3668 +
3669 + if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
3670 +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
3671 +index a307c37c2e6c..072501a0ac86 100644
3672 +--- a/include/linux/backing-dev-defs.h
3673 ++++ b/include/linux/backing-dev-defs.h
3674 +@@ -225,6 +225,14 @@ static inline void wb_get(struct bdi_writeback *wb)
3675 + */
3676 + static inline void wb_put(struct bdi_writeback *wb)
3677 + {
3678 ++ if (WARN_ON_ONCE(!wb->bdi)) {
3679 ++ /*
3680 ++ * A driver bug might cause a file to be removed before bdi was
3681 ++ * initialized.
3682 ++ */
3683 ++ return;
3684 ++ }
3685 ++
3686 + if (wb != &wb->bdi->wb)
3687 + percpu_ref_put(&wb->refcnt);
3688 + }
3689 +diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
3690 +index 3d6e6ce44c5c..520fd854e7b3 100644
3691 +--- a/include/linux/f2fs_fs.h
3692 ++++ b/include/linux/f2fs_fs.h
3693 +@@ -99,6 +99,7 @@ struct f2fs_super_block {
3694 + /*
3695 + * For checkpoint
3696 + */
3697 ++#define CP_CRC_RECOVERY_FLAG 0x00000040
3698 + #define CP_FASTBOOT_FLAG 0x00000020
3699 + #define CP_FSCK_FLAG 0x00000010
3700 + #define CP_ERROR_FLAG 0x00000008
3701 +@@ -497,4 +498,6 @@ enum {
3702 + F2FS_FT_MAX
3703 + };
3704 +
3705 ++#define S_SHIFT 12
3706 ++
3707 + #endif /* _LINUX_F2FS_FS_H */
3708 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
3709 +index 3309dbda7ffa..0bc7fa21db85 100644
3710 +--- a/mm/page-writeback.c
3711 ++++ b/mm/page-writeback.c
3712 +@@ -2151,6 +2151,7 @@ int write_cache_pages(struct address_space *mapping,
3713 + {
3714 + int ret = 0;
3715 + int done = 0;
3716 ++ int error;
3717 + struct pagevec pvec;
3718 + int nr_pages;
3719 + pgoff_t uninitialized_var(writeback_index);
3720 +@@ -2247,25 +2248,31 @@ continue_unlock:
3721 + goto continue_unlock;
3722 +
3723 + trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
3724 +- ret = (*writepage)(page, wbc, data);
3725 +- if (unlikely(ret)) {
3726 +- if (ret == AOP_WRITEPAGE_ACTIVATE) {
3727 ++ error = (*writepage)(page, wbc, data);
3728 ++ if (unlikely(error)) {
3729 ++ /*
3730 ++ * Handle errors according to the type of
3731 ++ * writeback. There's no need to continue for
3732 ++ * background writeback. Just push done_index
3733 ++ * past this page so media errors won't choke
3734 ++ * writeout for the entire file. For integrity
3735 ++ * writeback, we must process the entire dirty
3736 ++ * set regardless of errors because the fs may
3737 ++ * still have state to clear for each page. In
3738 ++ * that case we continue processing and return
3739 ++ * the first error.
3740 ++ */
3741 ++ if (error == AOP_WRITEPAGE_ACTIVATE) {
3742 + unlock_page(page);
3743 +- ret = 0;
3744 +- } else {
3745 +- /*
3746 +- * done_index is set past this page,
3747 +- * so media errors will not choke
3748 +- * background writeout for the entire
3749 +- * file. This has consequences for
3750 +- * range_cyclic semantics (ie. it may
3751 +- * not be suitable for data integrity
3752 +- * writeout).
3753 +- */
3754 ++ error = 0;
3755 ++ } else if (wbc->sync_mode != WB_SYNC_ALL) {
3756 ++ ret = error;
3757 + done_index = page->index + 1;
3758 + done = 1;
3759 + break;
3760 + }
3761 ++ if (!ret)
3762 ++ ret = error;
3763 + }
3764 +
3765 + /*
3766 +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
3767 +index 55dcb2b20b59..6def85d75b1d 100644
3768 +--- a/net/bridge/br_netfilter_hooks.c
3769 ++++ b/net/bridge/br_netfilter_hooks.c
3770 +@@ -267,7 +267,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
3771 + struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
3772 + int ret;
3773 +
3774 +- if (neigh->hh.hh_len) {
3775 ++ if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
3776 + neigh_hh_bridge(&neigh->hh, skb);
3777 + skb->dev = nf_bridge->physindev;
3778 + ret = br_handle_frame_finish(net, sk, skb);
3779 +diff --git a/net/can/gw.c b/net/can/gw.c
3780 +index 77c8af4047ef..81650affa3fa 100644
3781 +--- a/net/can/gw.c
3782 ++++ b/net/can/gw.c
3783 +@@ -418,13 +418,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
3784 + while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
3785 + (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
3786 +
3787 +- /* check for checksum updates when the CAN frame has been modified */
3788 ++ /* Has the CAN frame been modified? */
3789 + if (modidx) {
3790 +- if (gwj->mod.csumfunc.crc8)
3791 ++ /* get available space for the processed CAN frame type */
3792 ++ int max_len = nskb->len - offsetof(struct can_frame, data);
3793 ++
3794 ++ /* dlc may have changed, make sure it fits to the CAN frame */
3795 ++ if (cf->can_dlc > max_len)
3796 ++ goto out_delete;
3797 ++
3798 ++ /* check for checksum updates in classic CAN length only */
3799 ++ if (gwj->mod.csumfunc.crc8) {
3800 ++ if (cf->can_dlc > 8)
3801 ++ goto out_delete;
3802 ++
3803 + (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
3804 ++ }
3805 ++
3806 ++ if (gwj->mod.csumfunc.xor) {
3807 ++ if (cf->can_dlc > 8)
3808 ++ goto out_delete;
3809 +
3810 +- if (gwj->mod.csumfunc.xor)
3811 + (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
3812 ++ }
3813 + }
3814 +
3815 + /* clear the skb timestamp if not configured the other way */
3816 +@@ -436,6 +452,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
3817 + gwj->dropped_frames++;
3818 + else
3819 + gwj->handled_frames++;
3820 ++
3821 ++ return;
3822 ++
3823 ++ out_delete:
3824 ++ /* delete frame due to misconfiguration */
3825 ++ gwj->deleted_frames++;
3826 ++ kfree_skb(nskb);
3827 ++ return;
3828 + }
3829 +
3830 + static inline int cgw_register_filter(struct cgw_job *gwj)
3831 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3832 +index 9703924ed071..8a57bbaf7452 100644
3833 +--- a/net/core/skbuff.c
3834 ++++ b/net/core/skbuff.c
3835 +@@ -2388,12 +2388,15 @@ EXPORT_SYMBOL(skb_queue_purge);
3836 + */
3837 + void skb_rbtree_purge(struct rb_root *root)
3838 + {
3839 +- struct sk_buff *skb, *next;
3840 ++ struct rb_node *p = rb_first(root);
3841 +
3842 +- rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
3843 +- kfree_skb(skb);
3844 ++ while (p) {
3845 ++ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3846 +
3847 +- *root = RB_ROOT;
3848 ++ p = rb_next(p);
3849 ++ rb_erase(&skb->rbnode, root);
3850 ++ kfree_skb(skb);
3851 ++ }
3852 + }
3853 +
3854 + /**
3855 +diff --git a/net/core/sock.c b/net/core/sock.c
3856 +index 9fb1c073d0c4..8aa4a5f89572 100644
3857 +--- a/net/core/sock.c
3858 ++++ b/net/core/sock.c
3859 +@@ -732,6 +732,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
3860 + break;
3861 + case SO_DONTROUTE:
3862 + sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
3863 ++ sk_dst_reset(sk);
3864 + break;
3865 + case SO_BROADCAST:
3866 + sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
3867 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
3868 +index 3f8caf7d19b8..1ea36bf778e6 100644
3869 +--- a/net/ipv4/ip_sockglue.c
3870 ++++ b/net/ipv4/ip_sockglue.c
3871 +@@ -133,19 +133,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
3872 +
3873 + static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
3874 + {
3875 ++ __be16 _ports[2], *ports;
3876 + struct sockaddr_in sin;
3877 +- __be16 *ports;
3878 +- int end;
3879 +-
3880 +- end = skb_transport_offset(skb) + 4;
3881 +- if (end > 0 && !pskb_may_pull(skb, end))
3882 +- return;
3883 +
3884 + /* All current transport protocols have the port numbers in the
3885 + * first four bytes of the transport header and this function is
3886 + * written with this assumption in mind.
3887 + */
3888 +- ports = (__be16 *)skb_transport_header(skb);
3889 ++ ports = skb_header_pointer(skb, skb_transport_offset(skb),
3890 ++ sizeof(_ports), &_ports);
3891 ++ if (!ports)
3892 ++ return;
3893 +
3894 + sin.sin_family = AF_INET;
3895 + sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
3896 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
3897 +index 637a0e41b0aa..d6f2dab28d14 100644
3898 +--- a/net/ipv6/af_inet6.c
3899 ++++ b/net/ipv6/af_inet6.c
3900 +@@ -292,6 +292,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3901 +
3902 + /* Check if the address belongs to the host. */
3903 + if (addr_type == IPV6_ADDR_MAPPED) {
3904 ++ struct net_device *dev = NULL;
3905 + int chk_addr_ret;
3906 +
3907 + /* Binding to v4-mapped address on a v6-only socket
3908 +@@ -302,9 +303,20 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3909 + goto out;
3910 + }
3911 +
3912 ++ rcu_read_lock();
3913 ++ if (sk->sk_bound_dev_if) {
3914 ++ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
3915 ++ if (!dev) {
3916 ++ err = -ENODEV;
3917 ++ goto out_unlock;
3918 ++ }
3919 ++ }
3920 ++
3921 + /* Reproduce AF_INET checks to make the bindings consistent */
3922 + v4addr = addr->sin6_addr.s6_addr32[3];
3923 +- chk_addr_ret = inet_addr_type(net, v4addr);
3924 ++ chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
3925 ++ rcu_read_unlock();
3926 ++
3927 + if (!net->ipv4.sysctl_ip_nonlocal_bind &&
3928 + !(inet->freebind || inet->transparent) &&
3929 + v4addr != htonl(INADDR_ANY) &&
3930 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
3931 +index 9f6e57ded338..27cdf543c539 100644
3932 +--- a/net/ipv6/datagram.c
3933 ++++ b/net/ipv6/datagram.c
3934 +@@ -290,6 +290,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
3935 + skb_reset_network_header(skb);
3936 + iph = ipv6_hdr(skb);
3937 + iph->daddr = fl6->daddr;
3938 ++ ip6_flow_hdr(iph, 0, 0);
3939 +
3940 + serr = SKB_EXT_ERR(skb);
3941 + serr->ee.ee_errno = err;
3942 +@@ -657,17 +658,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
3943 + }
3944 + if (np->rxopt.bits.rxorigdstaddr) {
3945 + struct sockaddr_in6 sin6;
3946 +- __be16 *ports;
3947 +- int end;
3948 ++ __be16 _ports[2], *ports;
3949 +
3950 +- end = skb_transport_offset(skb) + 4;
3951 +- if (end <= 0 || pskb_may_pull(skb, end)) {
3952 ++ ports = skb_header_pointer(skb, skb_transport_offset(skb),
3953 ++ sizeof(_ports), &_ports);
3954 ++ if (ports) {
3955 + /* All current transport protocols have the port numbers in the
3956 + * first four bytes of the transport header and this function is
3957 + * written with this assumption in mind.
3958 + */
3959 +- ports = (__be16 *)skb_transport_header(skb);
3960 +-
3961 + sin6.sin6_family = AF_INET6;
3962 + sin6.sin6_addr = ipv6_hdr(skb)->daddr;
3963 + sin6.sin6_port = ports[1];
3964 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3965 +index 0f50977ed53b..753b2837318d 100644
3966 +--- a/net/packet/af_packet.c
3967 ++++ b/net/packet/af_packet.c
3968 +@@ -2514,7 +2514,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3969 + addr = saddr->sll_halen ? saddr->sll_addr : NULL;
3970 + dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
3971 + if (addr && dev && saddr->sll_halen < dev->addr_len)
3972 +- goto out;
3973 ++ goto out_put;
3974 + }
3975 +
3976 + err = -ENXIO;
3977 +@@ -2683,7 +2683,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
3978 + addr = saddr->sll_halen ? saddr->sll_addr : NULL;
3979 + dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
3980 + if (addr && dev && saddr->sll_halen < dev->addr_len)
3981 +- goto out;
3982 ++ goto out_unlock;
3983 + }
3984 +
3985 + err = -ENXIO;
3986 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
3987 +index 7dffc97a953c..9fa0b0dc3868 100644
3988 +--- a/net/sctp/ipv6.c
3989 ++++ b/net/sctp/ipv6.c
3990 +@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
3991 +
3992 + switch (ev) {
3993 + case NETDEV_UP:
3994 +- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
3995 ++ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
3996 + if (addr) {
3997 + addr->a.v6.sin6_family = AF_INET6;
3998 +- addr->a.v6.sin6_port = 0;
3999 +- addr->a.v6.sin6_flowinfo = 0;
4000 + addr->a.v6.sin6_addr = ifa->addr;
4001 + addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
4002 + addr->valid = 1;
4003 +@@ -412,7 +410,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
4004 + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
4005 + if (addr) {
4006 + addr->a.v6.sin6_family = AF_INET6;
4007 +- addr->a.v6.sin6_port = 0;
4008 + addr->a.v6.sin6_addr = ifp->addr;
4009 + addr->a.v6.sin6_scope_id = dev->ifindex;
4010 + addr->valid = 1;
4011 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
4012 +index dc030efa4447..9f2f3c48b7b6 100644
4013 +--- a/net/sctp/protocol.c
4014 ++++ b/net/sctp/protocol.c
4015 +@@ -151,7 +151,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
4016 + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
4017 + if (addr) {
4018 + addr->a.v4.sin_family = AF_INET;
4019 +- addr->a.v4.sin_port = 0;
4020 + addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
4021 + addr->valid = 1;
4022 + INIT_LIST_HEAD(&addr->list);
4023 +@@ -775,10 +774,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
4024 +
4025 + switch (ev) {
4026 + case NETDEV_UP:
4027 +- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
4028 ++ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
4029 + if (addr) {
4030 + addr->a.v4.sin_family = AF_INET;
4031 +- addr->a.v4.sin_port = 0;
4032 + addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
4033 + addr->valid = 1;
4034 + spin_lock_bh(&net->sctp.local_addr_lock);
4035 +diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
4036 +index cf5770d8f49a..c89626b2afff 100644
4037 +--- a/net/sunrpc/rpcb_clnt.c
4038 ++++ b/net/sunrpc/rpcb_clnt.c
4039 +@@ -772,6 +772,12 @@ void rpcb_getport_async(struct rpc_task *task)
4040 + case RPCBVERS_3:
4041 + map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
4042 + map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
4043 ++ if (!map->r_addr) {
4044 ++ status = -ENOMEM;
4045 ++ dprintk("RPC: %5u %s: no memory available\n",
4046 ++ task->tk_pid, __func__);
4047 ++ goto bailout_free_args;
4048 ++ }
4049 + map->r_owner = "";
4050 + break;
4051 + case RPCBVERS_2:
4052 +@@ -794,6 +800,8 @@ void rpcb_getport_async(struct rpc_task *task)
4053 + rpc_put_task(child);
4054 + return;
4055 +
4056 ++bailout_free_args:
4057 ++ kfree(map);
4058 + bailout_release_client:
4059 + rpc_release_client(rpcb_clnt);
4060 + bailout_nofree:
4061 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
4062 +index f86c6555a539..e9653c42cdd1 100644
4063 +--- a/net/tipc/netlink_compat.c
4064 ++++ b/net/tipc/netlink_compat.c
4065 +@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
4066 + return limit;
4067 + }
4068 +
4069 ++static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
4070 ++{
4071 ++ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
4072 ++}
4073 ++
4074 + static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
4075 + {
4076 + struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
4077 +@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
4078 + return buf;
4079 + }
4080 +
4081 ++static inline bool string_is_valid(char *s, int len)
4082 ++{
4083 ++ return memchr(s, '\0', len) ? true : false;
4084 ++}
4085 ++
4086 + static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
4087 + struct tipc_nl_compat_msg *msg,
4088 + struct sk_buff *arg)
4089 +@@ -364,6 +374,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
4090 + struct nlattr *prop;
4091 + struct nlattr *bearer;
4092 + struct tipc_bearer_config *b;
4093 ++ int len;
4094 +
4095 + b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
4096 +
4097 +@@ -371,6 +382,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
4098 + if (!bearer)
4099 + return -EMSGSIZE;
4100 +
4101 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
4102 ++ if (!string_is_valid(b->name, len))
4103 ++ return -EINVAL;
4104 ++
4105 + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
4106 + return -EMSGSIZE;
4107 +
4108 +@@ -396,6 +411,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
4109 + {
4110 + char *name;
4111 + struct nlattr *bearer;
4112 ++ int len;
4113 +
4114 + name = (char *)TLV_DATA(msg->req);
4115 +
4116 +@@ -403,6 +419,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
4117 + if (!bearer)
4118 + return -EMSGSIZE;
4119 +
4120 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
4121 ++ if (!string_is_valid(name, len))
4122 ++ return -EINVAL;
4123 ++
4124 + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
4125 + return -EMSGSIZE;
4126 +
4127 +@@ -462,6 +482,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
4128 + struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
4129 + struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
4130 + struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
4131 ++ int len;
4132 +
4133 + nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
4134 +
4135 +@@ -472,6 +493,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
4136 + NULL);
4137 +
4138 + name = (char *)TLV_DATA(msg->req);
4139 ++
4140 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
4141 ++ if (!string_is_valid(name, len))
4142 ++ return -EINVAL;
4143 ++
4144 + if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
4145 + return 0;
4146 +
4147 +@@ -605,6 +631,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
4148 + struct nlattr *prop;
4149 + struct nlattr *media;
4150 + struct tipc_link_config *lc;
4151 ++ int len;
4152 +
4153 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
4154 +
4155 +@@ -612,6 +639,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
4156 + if (!media)
4157 + return -EMSGSIZE;
4158 +
4159 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
4160 ++ if (!string_is_valid(lc->name, len))
4161 ++ return -EINVAL;
4162 ++
4163 + if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
4164 + return -EMSGSIZE;
4165 +
4166 +@@ -632,6 +663,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
4167 + struct nlattr *prop;
4168 + struct nlattr *bearer;
4169 + struct tipc_link_config *lc;
4170 ++ int len;
4171 +
4172 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
4173 +
4174 +@@ -639,6 +671,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
4175 + if (!bearer)
4176 + return -EMSGSIZE;
4177 +
4178 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
4179 ++ if (!string_is_valid(lc->name, len))
4180 ++ return -EINVAL;
4181 ++
4182 + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
4183 + return -EMSGSIZE;
4184 +
4185 +@@ -687,9 +723,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
4186 + struct tipc_link_config *lc;
4187 + struct tipc_bearer *bearer;
4188 + struct tipc_media *media;
4189 ++ int len;
4190 +
4191 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
4192 +
4193 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
4194 ++ if (!string_is_valid(lc->name, len))
4195 ++ return -EINVAL;
4196 ++
4197 + media = tipc_media_find(lc->name);
4198 + if (media) {
4199 + cmd->doit = &tipc_nl_media_set;
4200 +@@ -711,6 +752,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
4201 + {
4202 + char *name;
4203 + struct nlattr *link;
4204 ++ int len;
4205 +
4206 + name = (char *)TLV_DATA(msg->req);
4207 +
4208 +@@ -718,6 +760,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
4209 + if (!link)
4210 + return -EMSGSIZE;
4211 +
4212 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
4213 ++ if (!string_is_valid(name, len))
4214 ++ return -EINVAL;
4215 ++
4216 + if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
4217 + return -EMSGSIZE;
4218 +
4219 +@@ -739,6 +785,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
4220 + };
4221 +
4222 + ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
4223 ++ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
4224 ++ return -EINVAL;
4225 +
4226 + depth = ntohl(ntq->depth);
4227 +
4228 +@@ -1117,7 +1165,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
4229 + }
4230 +
4231 + len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
4232 +- if (len && !TLV_OK(msg.req, len)) {
4233 ++ if (!len || !TLV_OK(msg.req, len)) {
4234 + msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
4235 + err = -EOPNOTSUPP;
4236 + goto send;
4237 +diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
4238 +index c410d257da06..0c7800112ff5 100644
4239 +--- a/scripts/kconfig/zconf.l
4240 ++++ b/scripts/kconfig/zconf.l
4241 +@@ -71,7 +71,7 @@ static void warn_ignored_character(char chr)
4242 + {
4243 + fprintf(stderr,
4244 + "%s:%d:warning: ignoring unsupported character '%c'\n",
4245 +- zconf_curname(), zconf_lineno(), chr);
4246 ++ current_file->name, yylineno, chr);
4247 + }
4248 + %}
4249 +
4250 +@@ -191,6 +191,8 @@ n [A-Za-z0-9_-]
4251 + }
4252 + <<EOF>> {
4253 + BEGIN(INITIAL);
4254 ++ yylval.string = text;
4255 ++ return T_WORD_QUOTE;
4256 + }
4257 + }
4258 +
4259 +diff --git a/security/security.c b/security/security.c
4260 +index 46f405ce6b0f..0dde287db5c5 100644
4261 +--- a/security/security.c
4262 ++++ b/security/security.c
4263 +@@ -861,6 +861,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
4264 +
4265 + void security_cred_free(struct cred *cred)
4266 + {
4267 ++ /*
4268 ++ * There is a failure case in prepare_creds() that
4269 ++ * may result in a call here with ->security being NULL.
4270 ++ */
4271 ++ if (unlikely(cred->security == NULL))
4272 ++ return;
4273 ++
4274 + call_void_hook(cred_free, cred);
4275 + }
4276 +
4277 +diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
4278 +index 992a31530825..965a55eacaba 100644
4279 +--- a/security/selinux/ss/policydb.c
4280 ++++ b/security/selinux/ss/policydb.c
4281 +@@ -726,7 +726,8 @@ static int sens_destroy(void *key, void *datum, void *p)
4282 + kfree(key);
4283 + if (datum) {
4284 + levdatum = datum;
4285 +- ebitmap_destroy(&levdatum->level->cat);
4286 ++ if (levdatum->level)
4287 ++ ebitmap_destroy(&levdatum->level->cat);
4288 + kfree(levdatum->level);
4289 + }
4290 + kfree(datum);
4291 +diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
4292 +index cb6ed10816d4..0a8808954bd8 100644
4293 +--- a/security/yama/yama_lsm.c
4294 ++++ b/security/yama/yama_lsm.c
4295 +@@ -288,7 +288,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
4296 + break;
4297 + case YAMA_SCOPE_RELATIONAL:
4298 + rcu_read_lock();
4299 +- if (!task_is_descendant(current, child) &&
4300 ++ if (!pid_alive(child))
4301 ++ rc = -EPERM;
4302 ++ if (!rc && !task_is_descendant(current, child) &&
4303 + !ptracer_exception_found(current, child) &&
4304 + !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
4305 + rc = -EPERM;
4306 +diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
4307 +index 091290d1f3ea..1898fa4228ad 100644
4308 +--- a/sound/firewire/bebob/bebob.c
4309 ++++ b/sound/firewire/bebob/bebob.c
4310 +@@ -382,7 +382,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
4311 + /* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */
4312 + SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal),
4313 + /* Apogee Electronics, Ensemble */
4314 +- SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal),
4315 ++ SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
4316 + /* ESI, Quatafire610 */
4317 + SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
4318 + /* AcousticReality, eARMasterOne */
4319 +diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
4320 +index c53f78767568..df21da796fa7 100644
4321 +--- a/tools/perf/arch/x86/util/intel-pt.c
4322 ++++ b/tools/perf/arch/x86/util/intel-pt.c
4323 +@@ -471,10 +471,21 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
4324 + struct perf_evsel *evsel)
4325 + {
4326 + int err;
4327 ++ char c;
4328 +
4329 + if (!evsel)
4330 + return 0;
4331 +
4332 ++ /*
4333 ++ * If supported, force pass-through config term (pt=1) even if user
4334 ++ * sets pt=0, which avoids senseless kernel errors.
4335 ++ */
4336 ++ if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
4337 ++ !(evsel->attr.config & 1)) {
4338 ++ pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
4339 ++ evsel->attr.config |= 1;
4340 ++ }
4341 ++
4342 + err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
4343 + "cyc_thresh", "caps/psb_cyc",
4344 + evsel->attr.config);
4345 +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
4346 +index e81dfb2e239c..9351738df703 100644
4347 +--- a/tools/perf/util/parse-events.c
4348 ++++ b/tools/perf/util/parse-events.c
4349 +@@ -1903,7 +1903,7 @@ restart:
4350 + if (!name_only && strlen(syms->alias))
4351 + snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
4352 + else
4353 +- strncpy(name, syms->symbol, MAX_NAME_LEN);
4354 ++ strlcpy(name, syms->symbol, MAX_NAME_LEN);
4355 +
4356 + evt_list[evt_i] = strdup(name);
4357 + if (evt_list[evt_i] == NULL)
4358 +diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
4359 +index eec6c1149f44..132878d4847a 100644
4360 +--- a/tools/perf/util/svghelper.c
4361 ++++ b/tools/perf/util/svghelper.c
4362 +@@ -333,7 +333,7 @@ static char *cpu_model(void)
4363 + if (file) {
4364 + while (fgets(buf, 255, file)) {
4365 + if (strstr(buf, "model name")) {
4366 +- strncpy(cpu_m, &buf[13], 255);
4367 ++ strlcpy(cpu_m, &buf[13], 255);
4368 + break;
4369 + }
4370 + }