Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.18 commit in: /
Date: Thu, 17 Dec 2015 17:12:34
Message-Id: 1450372334.7302e8593ad28569d4ce2175a02536b1cdd4b909.mpagano@gentoo
1 commit: 7302e8593ad28569d4ce2175a02536b1cdd4b909
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Dec 17 17:12:14 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Dec 17 17:12:14 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7302e859
7
8 Linux patch 3.18.25
9
10 0000_README | 4 +
11 1024_linux-3.18.25.patch | 4339 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4343 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index d829823..369ed3d 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -139,6 +139,10 @@ Patch: 1023_linux-3.18.24.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.18.24
21
22 +Patch: 1024_linux-3.18.25.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.18.25
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1024_linux-3.18.25.patch b/1024_linux-3.18.25.patch
31 new file mode 100644
32 index 0000000..dba2da3
33 --- /dev/null
34 +++ b/1024_linux-3.18.25.patch
35 @@ -0,0 +1,4339 @@
36 +diff --git a/Makefile b/Makefile
37 +index 9769e3bce6a2..6df25277ea44 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 3
42 + PATCHLEVEL = 18
43 +-SUBLEVEL = 24
44 ++SUBLEVEL = 25
45 + EXTRAVERSION =
46 + NAME = Diseased Newt
47 +
48 +diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
49 +index f5b00f41c4f6..b8b6e22f9987 100644
50 +--- a/arch/arm/plat-orion/common.c
51 ++++ b/arch/arm/plat-orion/common.c
52 +@@ -499,7 +499,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq)
53 +
54 + d->netdev = &orion_ge00.dev;
55 + for (i = 0; i < d->nr_chips; i++)
56 +- d->chip[i].host_dev = &orion_ge00_shared.dev;
57 ++ d->chip[i].host_dev = &orion_ge_mvmdio.dev;
58 + orion_switch_device.dev.platform_data = d;
59 +
60 + platform_device_register(&orion_switch_device);
61 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
62 +index e7391aef5433..2d54c55400ed 100644
63 +--- a/arch/arm64/Makefile
64 ++++ b/arch/arm64/Makefile
65 +@@ -33,7 +33,7 @@ endif
66 + CHECKFLAGS += -D__aarch64__
67 +
68 + ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
69 +-CFLAGS_MODULE += -mcmodel=large
70 ++KBUILD_CFLAGS_MODULE += -mcmodel=large
71 + endif
72 +
73 + # Default value
74 +diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
75 +index 407991bf79f5..ccb6078ed9f2 100644
76 +--- a/arch/arm64/kernel/stacktrace.c
77 ++++ b/arch/arm64/kernel/stacktrace.c
78 +@@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackframe *frame)
79 +
80 + frame->sp = fp + 0x10;
81 + frame->fp = *(unsigned long *)(fp);
82 +- /*
83 +- * -4 here because we care about the PC at time of bl,
84 +- * not where the return will go.
85 +- */
86 +- frame->pc = *(unsigned long *)(fp + 8) - 4;
87 ++ frame->pc = *(unsigned long *)(fp + 8);
88 +
89 + return 0;
90 + }
91 +diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
92 +index af0dafab5807..79c459a2b684 100644
93 +--- a/arch/powerpc/kernel/rtas.c
94 ++++ b/arch/powerpc/kernel/rtas.c
95 +@@ -1045,6 +1045,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
96 + if (!capable(CAP_SYS_ADMIN))
97 + return -EPERM;
98 +
99 ++ if (!rtas.entry)
100 ++ return -EINVAL;
101 ++
102 + if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
103 + return -EFAULT;
104 +
105 +diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
106 +index 705408766ab0..ef0870500240 100644
107 +--- a/arch/sparc/crypto/aes_glue.c
108 ++++ b/arch/sparc/crypto/aes_glue.c
109 +@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
110 + .blkcipher = {
111 + .min_keysize = AES_MIN_KEY_SIZE,
112 + .max_keysize = AES_MAX_KEY_SIZE,
113 ++ .ivsize = AES_BLOCK_SIZE,
114 + .setkey = aes_set_key,
115 + .encrypt = cbc_encrypt,
116 + .decrypt = cbc_decrypt,
117 +@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
118 + .blkcipher = {
119 + .min_keysize = AES_MIN_KEY_SIZE,
120 + .max_keysize = AES_MAX_KEY_SIZE,
121 ++ .ivsize = AES_BLOCK_SIZE,
122 + .setkey = aes_set_key,
123 + .encrypt = ctr_crypt,
124 + .decrypt = ctr_crypt,
125 +diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
126 +index 641f55cb61c3..eb87d6dd86b1 100644
127 +--- a/arch/sparc/crypto/camellia_glue.c
128 ++++ b/arch/sparc/crypto/camellia_glue.c
129 +@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
130 + .blkcipher = {
131 + .min_keysize = CAMELLIA_MIN_KEY_SIZE,
132 + .max_keysize = CAMELLIA_MAX_KEY_SIZE,
133 ++ .ivsize = CAMELLIA_BLOCK_SIZE,
134 + .setkey = camellia_set_key,
135 + .encrypt = cbc_encrypt,
136 + .decrypt = cbc_decrypt,
137 +diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
138 +index d11500972994..1359bfc544e4 100644
139 +--- a/arch/sparc/crypto/des_glue.c
140 ++++ b/arch/sparc/crypto/des_glue.c
141 +@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
142 + .blkcipher = {
143 + .min_keysize = DES_KEY_SIZE,
144 + .max_keysize = DES_KEY_SIZE,
145 ++ .ivsize = DES_BLOCK_SIZE,
146 + .setkey = des_set_key,
147 + .encrypt = cbc_encrypt,
148 + .decrypt = cbc_decrypt,
149 +@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
150 + .blkcipher = {
151 + .min_keysize = DES3_EDE_KEY_SIZE,
152 + .max_keysize = DES3_EDE_KEY_SIZE,
153 ++ .ivsize = DES3_EDE_BLOCK_SIZE,
154 + .setkey = des3_ede_set_key,
155 + .encrypt = cbc3_encrypt,
156 + .decrypt = cbc3_decrypt,
157 +diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
158 +index b5d7640abc5d..8a4add8e4639 100644
159 +--- a/arch/x86/include/uapi/asm/svm.h
160 ++++ b/arch/x86/include/uapi/asm/svm.h
161 +@@ -100,6 +100,7 @@
162 + { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
163 + { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
164 + { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
165 ++ { SVM_EXIT_EXCP_BASE + AC_VECTOR, "AC excp" }, \
166 + { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
167 + { SVM_EXIT_INTR, "interrupt" }, \
168 + { SVM_EXIT_NMI, "nmi" }, \
169 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
170 +index 974e4d98ed29..852572c971c4 100644
171 +--- a/arch/x86/kvm/emulate.c
172 ++++ b/arch/x86/kvm/emulate.c
173 +@@ -658,7 +658,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
174 + *max_size = 0;
175 + switch (ctxt->mode) {
176 + case X86EMUL_MODE_PROT64:
177 +- if (((signed long)la << 16) >> 16 != la)
178 ++ if (is_noncanonical_address(la))
179 + return emulate_gp(ctxt, 0);
180 +
181 + *max_size = min_t(u64, ~0u, (1ull << 48) - la);
182 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
183 +index f98baebfa9a7..9dc0aa0dae96 100644
184 +--- a/arch/x86/kvm/svm.c
185 ++++ b/arch/x86/kvm/svm.c
186 +@@ -1103,6 +1103,7 @@ static void init_vmcb(struct vcpu_svm *svm)
187 + set_exception_intercept(svm, PF_VECTOR);
188 + set_exception_intercept(svm, UD_VECTOR);
189 + set_exception_intercept(svm, MC_VECTOR);
190 ++ set_exception_intercept(svm, AC_VECTOR);
191 +
192 + set_intercept(svm, INTERCEPT_INTR);
193 + set_intercept(svm, INTERCEPT_NMI);
194 +@@ -1789,6 +1790,12 @@ static int ud_interception(struct vcpu_svm *svm)
195 + return 1;
196 + }
197 +
198 ++static int ac_interception(struct vcpu_svm *svm)
199 ++{
200 ++ kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
201 ++ return 1;
202 ++}
203 ++
204 + static void svm_fpu_activate(struct kvm_vcpu *vcpu)
205 + {
206 + struct vcpu_svm *svm = to_svm(vcpu);
207 +@@ -3350,6 +3357,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
208 + [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
209 + [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
210 + [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
211 ++ [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
212 + [SVM_EXIT_INTR] = intr_interception,
213 + [SVM_EXIT_NMI] = nmi_interception,
214 + [SVM_EXIT_SMI] = nop_on_interception,
215 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
216 +index 0d7f1dcfcdac..84de207a8848 100644
217 +--- a/arch/x86/kvm/vmx.c
218 ++++ b/arch/x86/kvm/vmx.c
219 +@@ -1493,7 +1493,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
220 + u32 eb;
221 +
222 + eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
223 +- (1u << NM_VECTOR) | (1u << DB_VECTOR);
224 ++ (1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR);
225 + if ((vcpu->guest_debug &
226 + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
227 + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
228 +@@ -4933,6 +4933,9 @@ static int handle_exception(struct kvm_vcpu *vcpu)
229 + return handle_rmode_exception(vcpu, ex_no, error_code);
230 +
231 + switch (ex_no) {
232 ++ case AC_VECTOR:
233 ++ kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
234 ++ return 1;
235 + case DB_VECTOR:
236 + dr6 = vmcs_readl(EXIT_QUALIFICATION);
237 + if (!(vcpu->guest_debug &
238 +diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
239 +index 40886c489903..520729d898fe 100644
240 +--- a/crypto/ablkcipher.c
241 ++++ b/crypto/ablkcipher.c
242 +@@ -695,7 +695,7 @@ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
243 + err:
244 + if (err != -EAGAIN)
245 + break;
246 +- if (signal_pending(current)) {
247 ++ if (fatal_signal_pending(current)) {
248 + err = -EINTR;
249 + break;
250 + }
251 +diff --git a/crypto/ahash.c b/crypto/ahash.c
252 +index f6a36a52d738..c1d8591913e9 100644
253 +--- a/crypto/ahash.c
254 ++++ b/crypto/ahash.c
255 +@@ -543,7 +543,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
256 + struct crypto_alg *base = &alg->halg.base;
257 +
258 + if (alg->halg.digestsize > PAGE_SIZE / 8 ||
259 +- alg->halg.statesize > PAGE_SIZE / 8)
260 ++ alg->halg.statesize > PAGE_SIZE / 8 ||
261 ++ alg->halg.statesize == 0)
262 + return -EINVAL;
263 +
264 + base->cra_type = &crypto_ahash_type;
265 +diff --git a/crypto/algapi.c b/crypto/algapi.c
266 +index 71a8143e23b1..314cc745f2f8 100644
267 +--- a/crypto/algapi.c
268 ++++ b/crypto/algapi.c
269 +@@ -337,7 +337,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
270 + crypto_alg_tested(larval->alg.cra_driver_name, 0);
271 + }
272 +
273 +- err = wait_for_completion_interruptible(&larval->completion);
274 ++ err = wait_for_completion_killable(&larval->completion);
275 + WARN_ON(err);
276 +
277 + out:
278 +diff --git a/crypto/api.c b/crypto/api.c
279 +index 2a81e98a0021..7db2e89a3114 100644
280 +--- a/crypto/api.c
281 ++++ b/crypto/api.c
282 +@@ -172,7 +172,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
283 + struct crypto_larval *larval = (void *)alg;
284 + long timeout;
285 +
286 +- timeout = wait_for_completion_interruptible_timeout(
287 ++ timeout = wait_for_completion_killable_timeout(
288 + &larval->completion, 60 * HZ);
289 +
290 + alg = larval->adult;
291 +@@ -435,7 +435,7 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
292 + err:
293 + if (err != -EAGAIN)
294 + break;
295 +- if (signal_pending(current)) {
296 ++ if (fatal_signal_pending(current)) {
297 + err = -EINTR;
298 + break;
299 + }
300 +@@ -552,7 +552,7 @@ void *crypto_alloc_tfm(const char *alg_name,
301 + err:
302 + if (err != -EAGAIN)
303 + break;
304 +- if (signal_pending(current)) {
305 ++ if (fatal_signal_pending(current)) {
306 + err = -EINTR;
307 + break;
308 + }
309 +diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
310 +index e2a34feec7a4..c90af2537d24 100644
311 +--- a/crypto/crypto_user.c
312 ++++ b/crypto/crypto_user.c
313 +@@ -367,7 +367,7 @@ static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
314 + err = PTR_ERR(alg);
315 + if (err != -EAGAIN)
316 + break;
317 +- if (signal_pending(current)) {
318 ++ if (fatal_signal_pending(current)) {
319 + err = -EINTR;
320 + break;
321 + }
322 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
323 +index dc1ca84e90f1..8f51d6e3883e 100644
324 +--- a/drivers/block/rbd.c
325 ++++ b/drivers/block/rbd.c
326 +@@ -3780,6 +3780,9 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
327 + q->limits.discard_zeroes_data = 1;
328 +
329 + blk_queue_merge_bvec(q, rbd_merge_bvec);
330 ++ if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
331 ++ q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
332 ++
333 + disk->queue = q;
334 +
335 + q->queuedata = rbd_dev;
336 +@@ -5198,7 +5201,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
337 + out_err:
338 + if (parent) {
339 + rbd_dev_unparent(rbd_dev);
340 +- kfree(rbd_dev->header_name);
341 + rbd_dev_destroy(parent);
342 + } else {
343 + rbd_put_client(rbdc);
344 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
345 +index 218c4858f494..34a01f191b33 100644
346 +--- a/drivers/block/xen-blkfront.c
347 ++++ b/drivers/block/xen-blkfront.c
348 +@@ -1911,7 +1911,8 @@ static void blkback_changed(struct xenbus_device *dev,
349 + break;
350 + /* Missed the backend's Closing state -- fallthrough */
351 + case XenbusStateClosing:
352 +- blkfront_closing(info);
353 ++ if (info)
354 ++ blkfront_closing(info);
355 + break;
356 + }
357 + }
358 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
359 +index 448327fe4d85..60c68e3cedeb 100644
360 +--- a/drivers/gpu/drm/i915/intel_display.c
361 ++++ b/drivers/gpu/drm/i915/intel_display.c
362 +@@ -1650,6 +1650,8 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
363 + I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
364 + }
365 +
366 ++ I915_WRITE(reg, dpll);
367 ++
368 + /* Wait for the clocks to stabilize. */
369 + POSTING_READ(reg);
370 + udelay(150);
371 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
372 +index 593ef8a2a069..ddcfc1d2544b 100644
373 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
374 ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
375 +@@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info)
376 + return 0;
377 + }
378 +
379 ++static int
380 ++nouveau_fbcon_open(struct fb_info *info, int user)
381 ++{
382 ++ struct nouveau_fbdev *fbcon = info->par;
383 ++ struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
384 ++ int ret = pm_runtime_get_sync(drm->dev->dev);
385 ++ if (ret < 0 && ret != -EACCES)
386 ++ return ret;
387 ++ return 0;
388 ++}
389 ++
390 ++static int
391 ++nouveau_fbcon_release(struct fb_info *info, int user)
392 ++{
393 ++ struct nouveau_fbdev *fbcon = info->par;
394 ++ struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
395 ++ pm_runtime_put(drm->dev->dev);
396 ++ return 0;
397 ++}
398 ++
399 + static struct fb_ops nouveau_fbcon_ops = {
400 + .owner = THIS_MODULE,
401 ++ .fb_open = nouveau_fbcon_open,
402 ++ .fb_release = nouveau_fbcon_release,
403 + .fb_check_var = drm_fb_helper_check_var,
404 + .fb_set_par = drm_fb_helper_set_par,
405 + .fb_fillrect = nouveau_fbcon_fillrect,
406 +@@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = {
407 +
408 + static struct fb_ops nouveau_fbcon_sw_ops = {
409 + .owner = THIS_MODULE,
410 ++ .fb_open = nouveau_fbcon_open,
411 ++ .fb_release = nouveau_fbcon_release,
412 + .fb_check_var = drm_fb_helper_check_var,
413 + .fb_set_par = drm_fb_helper_set_par,
414 + .fb_fillrect = cfb_fillrect,
415 +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
416 +index 36951ee4b157..9fcc77d5923e 100644
417 +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
418 ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
419 +@@ -199,11 +199,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
420 + struct nouveau_bo *nvbo = nouveau_gem_object(gem);
421 + struct nouveau_vma *vma;
422 +
423 +- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
424 ++ if (is_power_of_2(nvbo->valid_domains))
425 ++ rep->domain = nvbo->valid_domains;
426 ++ else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
427 + rep->domain = NOUVEAU_GEM_DOMAIN_GART;
428 + else
429 + rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
430 +-
431 + rep->offset = nvbo->bo.offset;
432 + if (cli->vm) {
433 + vma = nouveau_bo_vma_find(nvbo, cli->vm);
434 +diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
435 +index d6f0e40db81d..ee38f9aba391 100644
436 +--- a/drivers/gpu/drm/radeon/radeon.h
437 ++++ b/drivers/gpu/drm/radeon/radeon.h
438 +@@ -1628,6 +1628,7 @@ struct radeon_pm {
439 + struct device *int_hwmon_dev;
440 + /* dpm */
441 + bool dpm_enabled;
442 ++ bool sysfs_initialized;
443 + struct radeon_dpm dpm;
444 + };
445 +
446 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
447 +index 2e1e9aa79cea..21e6e9745d00 100644
448 +--- a/drivers/gpu/drm/radeon/radeon_display.c
449 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
450 +@@ -1619,18 +1619,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
451 + radeon_fbdev_init(rdev);
452 + drm_kms_helper_poll_init(rdev->ddev);
453 +
454 +- if (rdev->pm.dpm_enabled) {
455 +- /* do dpm late init */
456 +- ret = radeon_pm_late_init(rdev);
457 +- if (ret) {
458 +- rdev->pm.dpm_enabled = false;
459 +- DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
460 +- }
461 +- /* set the dpm state for PX since there won't be
462 +- * a modeset to call this.
463 +- */
464 +- radeon_pm_compute_clocks(rdev);
465 +- }
466 ++ /* do pm late init */
467 ++ ret = radeon_pm_late_init(rdev);
468 +
469 + return 0;
470 + }
471 +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
472 +index 1d94b542cd82..fa537c0602e8 100644
473 +--- a/drivers/gpu/drm/radeon/radeon_pm.c
474 ++++ b/drivers/gpu/drm/radeon/radeon_pm.c
475 +@@ -1192,14 +1192,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
476 + INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
477 +
478 + if (rdev->pm.num_power_states > 1) {
479 +- /* where's the best place to put these? */
480 +- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
481 +- if (ret)
482 +- DRM_ERROR("failed to create device file for power profile\n");
483 +- ret = device_create_file(rdev->dev, &dev_attr_power_method);
484 +- if (ret)
485 +- DRM_ERROR("failed to create device file for power method\n");
486 +-
487 + if (radeon_debugfs_pm_init(rdev)) {
488 + DRM_ERROR("Failed to register debugfs file for PM!\n");
489 + }
490 +@@ -1257,20 +1249,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
491 + goto dpm_failed;
492 + rdev->pm.dpm_enabled = true;
493 +
494 +- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
495 +- if (ret)
496 +- DRM_ERROR("failed to create device file for dpm state\n");
497 +- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
498 +- if (ret)
499 +- DRM_ERROR("failed to create device file for dpm state\n");
500 +- /* XXX: these are noops for dpm but are here for backwards compat */
501 +- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
502 +- if (ret)
503 +- DRM_ERROR("failed to create device file for power profile\n");
504 +- ret = device_create_file(rdev->dev, &dev_attr_power_method);
505 +- if (ret)
506 +- DRM_ERROR("failed to create device file for power method\n");
507 +-
508 + if (radeon_debugfs_pm_init(rdev)) {
509 + DRM_ERROR("Failed to register debugfs file for dpm!\n");
510 + }
511 +@@ -1411,9 +1389,51 @@ int radeon_pm_late_init(struct radeon_device *rdev)
512 + int ret = 0;
513 +
514 + if (rdev->pm.pm_method == PM_METHOD_DPM) {
515 +- mutex_lock(&rdev->pm.mutex);
516 +- ret = radeon_dpm_late_enable(rdev);
517 +- mutex_unlock(&rdev->pm.mutex);
518 ++ if (rdev->pm.dpm_enabled) {
519 ++ if (!rdev->pm.sysfs_initialized) {
520 ++ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
521 ++ if (ret)
522 ++ DRM_ERROR("failed to create device file for dpm state\n");
523 ++ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
524 ++ if (ret)
525 ++ DRM_ERROR("failed to create device file for dpm state\n");
526 ++ /* XXX: these are noops for dpm but are here for backwards compat */
527 ++ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
528 ++ if (ret)
529 ++ DRM_ERROR("failed to create device file for power profile\n");
530 ++ ret = device_create_file(rdev->dev, &dev_attr_power_method);
531 ++ if (ret)
532 ++ DRM_ERROR("failed to create device file for power method\n");
533 ++ if (!ret)
534 ++ rdev->pm.sysfs_initialized = true;
535 ++ }
536 ++
537 ++ mutex_lock(&rdev->pm.mutex);
538 ++ ret = radeon_dpm_late_enable(rdev);
539 ++ mutex_unlock(&rdev->pm.mutex);
540 ++ if (ret) {
541 ++ rdev->pm.dpm_enabled = false;
542 ++ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
543 ++ } else {
544 ++ /* set the dpm state for PX since there won't be
545 ++ * a modeset to call this.
546 ++ */
547 ++ radeon_pm_compute_clocks(rdev);
548 ++ }
549 ++ }
550 ++ } else {
551 ++ if ((rdev->pm.num_power_states > 1) &&
552 ++ (!rdev->pm.sysfs_initialized)) {
553 ++ /* where's the best place to put these? */
554 ++ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
555 ++ if (ret)
556 ++ DRM_ERROR("failed to create device file for power profile\n");
557 ++ ret = device_create_file(rdev->dev, &dev_attr_power_method);
558 ++ if (ret)
559 ++ DRM_ERROR("failed to create device file for power method\n");
560 ++ if (!ret)
561 ++ rdev->pm.sysfs_initialized = true;
562 ++ }
563 + }
564 + return ret;
565 + }
566 +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
567 +index f822fd2a1ada..884d82f9190e 100644
568 +--- a/drivers/hid/hid-apple.c
569 ++++ b/drivers/hid/hid-apple.c
570 +@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
571 + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
572 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
573 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
574 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
575 ++ .driver_data = APPLE_HAS_FN },
576 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
577 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
578 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
579 ++ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
580 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
581 + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
582 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
583 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
584 +index 737d18c924bc..ab52d1b30161 100644
585 +--- a/drivers/hid/hid-core.c
586 ++++ b/drivers/hid/hid-core.c
587 +@@ -1751,6 +1751,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
588 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
589 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
590 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
591 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
592 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
593 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
594 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
595 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
596 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
597 +@@ -2434,6 +2437,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
598 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
599 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
600 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
601 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
602 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
603 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
604 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
605 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
606 + { }
607 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
608 +index ef984eba8396..984e43cb83f0 100644
609 +--- a/drivers/hid/hid-ids.h
610 ++++ b/drivers/hid/hid-ids.h
611 +@@ -142,6 +142,9 @@
612 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
613 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
614 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
615 ++#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
616 ++#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
617 ++#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
618 + #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
619 + #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
620 + #define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
621 +@@ -918,7 +921,8 @@
622 + #define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688
623 +
624 + #define USB_VENDOR_ID_TPV 0x25aa
625 +-#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN 0x8883
626 ++#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882
627 ++#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883
628 +
629 + #define USB_VENDOR_ID_TURBOX 0x062a
630 + #define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
631 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
632 +index a4d1fe64c925..f7dd36e4fcb3 100644
633 +--- a/drivers/hid/usbhid/hid-quirks.c
634 ++++ b/drivers/hid/usbhid/hid-quirks.c
635 +@@ -111,7 +111,8 @@ static const struct hid_blacklist {
636 + { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
637 + { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
638 + { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
639 +- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN, HID_QUIRK_NOGET },
640 ++ { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882, HID_QUIRK_NOGET },
641 ++ { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883, HID_QUIRK_NOGET },
642 + { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
643 + { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
644 + { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
645 +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
646 +index 373dd4d47765..76babdb800f9 100644
647 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c
648 ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
649 +@@ -24,6 +24,7 @@
650 + #include <linux/kernel.h>
651 + #include <linux/module.h>
652 + #include <linux/delay.h>
653 ++#include <linux/dmi.h>
654 + #include <linux/i2c.h>
655 + #include <linux/clk.h>
656 + #include <linux/clk-provider.h>
657 +@@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
658 + }
659 +
660 + #ifdef CONFIG_ACPI
661 ++/*
662 ++ * The HCNT/LCNT information coming from ACPI should be the most accurate
663 ++ * for given platform. However, some systems get it wrong. On such systems
664 ++ * we get better results by calculating those based on the input clock.
665 ++ */
666 ++static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
667 ++ {
668 ++ .ident = "Dell Inspiron 7348",
669 ++ .matches = {
670 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
671 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
672 ++ },
673 ++ },
674 ++ { }
675 ++};
676 ++
677 + static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
678 + u16 *hcnt, u16 *lcnt, u32 *sda_hold)
679 + {
680 +@@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
681 + acpi_handle handle = ACPI_HANDLE(&pdev->dev);
682 + union acpi_object *obj;
683 +
684 ++ if (dmi_check_system(dw_i2c_no_acpi_params))
685 ++ return;
686 ++
687 + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
688 + return;
689 +
690 +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
691 +index d826e82dd997..47bd421f2750 100644
692 +--- a/drivers/i2c/busses/i2c-rcar.c
693 ++++ b/drivers/i2c/busses/i2c-rcar.c
694 +@@ -584,15 +584,16 @@ static int rcar_i2c_probe(struct platform_device *pdev)
695 + return ret;
696 + }
697 +
698 ++ pm_runtime_enable(dev);
699 ++ platform_set_drvdata(pdev, priv);
700 ++
701 + ret = i2c_add_numbered_adapter(adap);
702 + if (ret < 0) {
703 + dev_err(dev, "reg adap failed: %d\n", ret);
704 ++ pm_runtime_disable(dev);
705 + return ret;
706 + }
707 +
708 +- pm_runtime_enable(dev);
709 +- platform_set_drvdata(pdev, priv);
710 +-
711 + dev_info(dev, "probed\n");
712 +
713 + return 0;
714 +diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
715 +index c127af99a0e0..b928e7a62f59 100644
716 +--- a/drivers/i2c/busses/i2c-s3c2410.c
717 ++++ b/drivers/i2c/busses/i2c-s3c2410.c
718 +@@ -1219,17 +1219,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
719 + i2c->adap.nr = i2c->pdata->bus_num;
720 + i2c->adap.dev.of_node = pdev->dev.of_node;
721 +
722 ++ platform_set_drvdata(pdev, i2c);
723 ++
724 ++ pm_runtime_enable(&pdev->dev);
725 ++
726 + ret = i2c_add_numbered_adapter(&i2c->adap);
727 + if (ret < 0) {
728 + dev_err(&pdev->dev, "failed to add bus to i2c core\n");
729 ++ pm_runtime_disable(&pdev->dev);
730 + s3c24xx_i2c_deregister_cpufreq(i2c);
731 + clk_unprepare(i2c->clk);
732 + return ret;
733 + }
734 +
735 +- platform_set_drvdata(pdev, i2c);
736 +-
737 +- pm_runtime_enable(&pdev->dev);
738 + pm_runtime_enable(&i2c->adap.dev);
739 +
740 + dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
741 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
742 +index e28a494e2a3a..c3a83f7aa096 100644
743 +--- a/drivers/infiniband/core/cm.c
744 ++++ b/drivers/infiniband/core/cm.c
745 +@@ -860,6 +860,11 @@ retest:
746 + case IB_CM_SIDR_REQ_RCVD:
747 + spin_unlock_irq(&cm_id_priv->lock);
748 + cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
749 ++ spin_lock_irq(&cm.lock);
750 ++ if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
751 ++ rb_erase(&cm_id_priv->sidr_id_node,
752 ++ &cm.remote_sidr_table);
753 ++ spin_unlock_irq(&cm.lock);
754 + break;
755 + case IB_CM_REQ_SENT:
756 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
757 +@@ -3099,7 +3104,10 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
758 + spin_unlock_irqrestore(&cm_id_priv->lock, flags);
759 +
760 + spin_lock_irqsave(&cm.lock, flags);
761 +- rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
762 ++ if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
763 ++ rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
764 ++ RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
765 ++ }
766 + spin_unlock_irqrestore(&cm.lock, flags);
767 + return 0;
768 +
769 +diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
770 +index 56eb471b5576..4215b5382092 100644
771 +--- a/drivers/input/joystick/Kconfig
772 ++++ b/drivers/input/joystick/Kconfig
773 +@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
774 + config JOYSTICK_ZHENHUA
775 + tristate "5-byte Zhenhua RC transmitter"
776 + select SERIO
777 ++ select BITREVERSE
778 + help
779 + Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
780 + supplied with a ready to fly micro electric indoor helicopters
781 +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
782 +index 024b7bdffe5b..3ab045369c0c 100644
783 +--- a/drivers/input/keyboard/omap4-keypad.c
784 ++++ b/drivers/input/keyboard/omap4-keypad.c
785 +@@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
786 +
787 + error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
788 + if (error)
789 +- return error;
790 ++ goto err_free_keypad;
791 +
792 + res = request_mem_region(res->start, resource_size(res), pdev->name);
793 + if (!res) {
794 +diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
795 +index 95a3a6e2faf6..51a61e28b89e 100644
796 +--- a/drivers/input/mouse/psmouse-base.c
797 ++++ b/drivers/input/mouse/psmouse-base.c
798 +@@ -1473,6 +1473,10 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
799 + if (error)
800 + goto err_clear_drvdata;
801 +
802 ++ /* give PT device some time to settle down before probing */
803 ++ if (serio->id.type == SERIO_PS_PSTHRU)
804 ++ usleep_range(10000, 15000);
805 ++
806 + if (psmouse_probe(psmouse) < 0) {
807 + error = -ENODEV;
808 + goto err_close_serio;
809 +diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
810 +index 26b45936f9fd..1e8cd6f1fe9e 100644
811 +--- a/drivers/input/serio/parkbd.c
812 ++++ b/drivers/input/serio/parkbd.c
813 +@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
814 + parkbd_port = parkbd_allocate_serio();
815 + if (!parkbd_port) {
816 + parport_release(parkbd_dev);
817 ++ parport_unregister_device(parkbd_dev);
818 + return -ENOMEM;
819 + }
820 +
821 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
822 +index fab0ea1a46d1..af3daf89c77d 100644
823 +--- a/drivers/iommu/amd_iommu.c
824 ++++ b/drivers/iommu/amd_iommu.c
825 +@@ -1705,14 +1705,16 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
826 + unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
827 + int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
828 + int i = start >> APERTURE_RANGE_SHIFT;
829 +- unsigned long boundary_size;
830 ++ unsigned long boundary_size, mask;
831 + unsigned long address = -1;
832 + unsigned long limit;
833 +
834 + next_bit >>= PAGE_SHIFT;
835 +
836 +- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
837 +- PAGE_SIZE) >> PAGE_SHIFT;
838 ++ mask = dma_get_seg_boundary(dev);
839 ++
840 ++ boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
841 ++ 1UL << (BITS_PER_LONG - PAGE_SHIFT);
842 +
843 + for (;i < max_index; ++i) {
844 + unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
845 +@@ -2100,8 +2102,8 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
846 + static void clear_dte_entry(u16 devid)
847 + {
848 + /* remove entry from the device table seen by the hardware */
849 +- amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
850 +- amd_iommu_dev_table[devid].data[1] = 0;
851 ++ amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
852 ++ amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
853 +
854 + amd_iommu_apply_erratum_63(devid);
855 + }
856 +diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
857 +index cec51a8ba844..791442af1fba 100644
858 +--- a/drivers/iommu/amd_iommu_types.h
859 ++++ b/drivers/iommu/amd_iommu_types.h
860 +@@ -289,6 +289,7 @@
861 + #define IOMMU_PTE_IR (1ULL << 61)
862 + #define IOMMU_PTE_IW (1ULL << 62)
863 +
864 ++#define DTE_FLAG_MASK (0x3ffULL << 32)
865 + #define DTE_FLAG_IOTLB (0x01UL << 32)
866 + #define DTE_FLAG_GV (0x01ULL << 55)
867 + #define DTE_GLX_SHIFT (56)
868 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
869 +index 0801e35b9940..a7c9685c52f6 100644
870 +--- a/drivers/md/dm-thin.c
871 ++++ b/drivers/md/dm-thin.c
872 +@@ -2596,7 +2596,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
873 + metadata_low_callback,
874 + pool);
875 + if (r)
876 +- goto out_free_pt;
877 ++ goto out_flags_changed;
878 +
879 + pt->callbacks.congested_fn = pool_is_congested;
880 + dm_table_add_target_callbacks(ti->table, &pt->callbacks);
881 +diff --git a/drivers/md/md.c b/drivers/md/md.c
882 +index 9c5d53f3e4c6..6c169f18aab8 100644
883 +--- a/drivers/md/md.c
884 ++++ b/drivers/md/md.c
885 +@@ -7560,8 +7560,7 @@ static int remove_and_add_spares(struct mddev *mddev,
886 + !test_bit(Bitmap_sync, &rdev->flags)))
887 + continue;
888 +
889 +- if (rdev->saved_raid_disk < 0)
890 +- rdev->recovery_offset = 0;
891 ++ rdev->recovery_offset = 0;
892 + if (mddev->pers->
893 + hot_add_disk(mddev, rdev) == 0) {
894 + if (sysfs_link_rdev(mddev, rdev))
895 +diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
896 +index 7c0d75547ccf..92cd09f3c69b 100644
897 +--- a/drivers/md/persistent-data/dm-btree-remove.c
898 ++++ b/drivers/md/persistent-data/dm-btree-remove.c
899 +@@ -301,11 +301,16 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
900 + {
901 + int s;
902 + uint32_t max_entries = le32_to_cpu(left->header.max_entries);
903 +- unsigned target = (nr_left + nr_center + nr_right) / 3;
904 +- BUG_ON(target > max_entries);
905 ++ unsigned total = nr_left + nr_center + nr_right;
906 ++ unsigned target_right = total / 3;
907 ++ unsigned remainder = (target_right * 3) != total;
908 ++ unsigned target_left = target_right + remainder;
909 ++
910 ++ BUG_ON(target_left > max_entries);
911 ++ BUG_ON(target_right > max_entries);
912 +
913 + if (nr_left < nr_right) {
914 +- s = nr_left - target;
915 ++ s = nr_left - target_left;
916 +
917 + if (s < 0 && nr_center < -s) {
918 + /* not enough in central node */
919 +@@ -316,10 +321,10 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
920 + } else
921 + shift(left, center, s);
922 +
923 +- shift(center, right, target - nr_right);
924 ++ shift(center, right, target_right - nr_right);
925 +
926 + } else {
927 +- s = target - nr_right;
928 ++ s = target_right - nr_right;
929 + if (s > 0 && nr_center < s) {
930 + /* not enough in central node */
931 + shift(center, right, nr_center);
932 +@@ -329,7 +334,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
933 + } else
934 + shift(center, right, s);
935 +
936 +- shift(left, center, nr_left - target);
937 ++ shift(left, center, nr_left - target_left);
938 + }
939 +
940 + *key_ptr(parent, c->index) = center->keys[0];
941 +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
942 +index c7726cebc495..d6e47033b5e0 100644
943 +--- a/drivers/md/persistent-data/dm-btree.c
944 ++++ b/drivers/md/persistent-data/dm-btree.c
945 +@@ -523,7 +523,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
946 +
947 + r = new_block(s->info, &right);
948 + if (r < 0) {
949 +- /* FIXME: put left */
950 ++ unlock_block(s->info, left);
951 + return r;
952 + }
953 +
954 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
955 +index d24245c7b94a..3c8ada48ca76 100644
956 +--- a/drivers/md/raid1.c
957 ++++ b/drivers/md/raid1.c
958 +@@ -2245,7 +2245,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
959 + bio_trim(wbio, sector - r1_bio->sector, sectors);
960 + wbio->bi_iter.bi_sector += rdev->data_offset;
961 + wbio->bi_bdev = rdev->bdev;
962 +- if (submit_bio_wait(WRITE, wbio) == 0)
963 ++ if (submit_bio_wait(WRITE, wbio) < 0)
964 + /* failure! */
965 + ok = rdev_set_badblocks(rdev, sector,
966 + sectors, 0)
967 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
968 +index 17eb76760bf5..644f9e576736 100644
969 +--- a/drivers/md/raid10.c
970 ++++ b/drivers/md/raid10.c
971 +@@ -2599,7 +2599,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
972 + choose_data_offset(r10_bio, rdev) +
973 + (sector - r10_bio->sector));
974 + wbio->bi_bdev = rdev->bdev;
975 +- if (submit_bio_wait(WRITE, wbio) == 0)
976 ++ if (submit_bio_wait(WRITE, wbio) < 0)
977 + /* Failure! */
978 + ok = rdev_set_badblocks(rdev, sector,
979 + sectors, 0)
980 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
981 +index e421016bab77..5fa7549ba409 100644
982 +--- a/drivers/md/raid5.c
983 ++++ b/drivers/md/raid5.c
984 +@@ -3060,6 +3060,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,
985 + }
986 + if (!discard_pending &&
987 + test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
988 ++ int hash = sh->hash_lock_index;
989 ++
990 + clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
991 + clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
992 + if (sh->qd_idx >= 0) {
993 +@@ -3073,9 +3075,9 @@ static void handle_stripe_clean_event(struct r5conf *conf,
994 + * no updated data, so remove it from hash list and the stripe
995 + * will be reinitialized
996 + */
997 +- spin_lock_irq(&conf->device_lock);
998 ++ spin_lock_irq(conf->hash_locks + hash);
999 + remove_hash(sh);
1000 +- spin_unlock_irq(&conf->device_lock);
1001 ++ spin_unlock_irq(conf->hash_locks + hash);
1002 + if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
1003 + set_bit(STRIPE_HANDLE, &sh->state);
1004 +
1005 +diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
1006 +index 55f163d32d15..8bf802ca16ac 100644
1007 +--- a/drivers/media/platform/vsp1/vsp1_regs.h
1008 ++++ b/drivers/media/platform/vsp1/vsp1_regs.h
1009 +@@ -238,7 +238,7 @@
1010 + #define VI6_WPF_SZCLIP_EN (1 << 28)
1011 + #define VI6_WPF_SZCLIP_OFST_MASK (0xff << 16)
1012 + #define VI6_WPF_SZCLIP_OFST_SHIFT 16
1013 +-#define VI6_WPF_SZCLIP_SIZE_MASK (0x1fff << 0)
1014 ++#define VI6_WPF_SZCLIP_SIZE_MASK (0xfff << 0)
1015 + #define VI6_WPF_SZCLIP_SIZE_SHIFT 0
1016 +
1017 + #define VI6_WPF_OUTFMT 0x100c
1018 +@@ -304,9 +304,9 @@
1019 + #define VI6_DPR_HST_ROUTE 0x2044
1020 + #define VI6_DPR_HSI_ROUTE 0x2048
1021 + #define VI6_DPR_BRU_ROUTE 0x204c
1022 +-#define VI6_DPR_ROUTE_FXA_MASK (0xff << 8)
1023 ++#define VI6_DPR_ROUTE_FXA_MASK (0xff << 16)
1024 + #define VI6_DPR_ROUTE_FXA_SHIFT 16
1025 +-#define VI6_DPR_ROUTE_FP_MASK (0xff << 8)
1026 ++#define VI6_DPR_ROUTE_FP_MASK (0x3f << 8)
1027 + #define VI6_DPR_ROUTE_FP_SHIFT 8
1028 + #define VI6_DPR_ROUTE_RT_MASK (0x3f << 0)
1029 + #define VI6_DPR_ROUTE_RT_SHIFT 0
1030 +diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
1031 +index 7cbc3a00bda8..bf6b215438e3 100644
1032 +--- a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
1033 ++++ b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
1034 +@@ -177,7 +177,7 @@ static int rotation_thread_function(void *data)
1035 + __s32 vflip, hflip;
1036 +
1037 + set_current_state(TASK_INTERRUPTIBLE);
1038 +- while (!schedule_timeout(100)) {
1039 ++ while (!schedule_timeout(msecs_to_jiffies(100))) {
1040 + if (mutex_lock_interruptible(&sd->gspca_dev.usb_lock))
1041 + break;
1042 +
1043 +diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
1044 +index 9bfa041e3316..d78689831aea 100644
1045 +--- a/drivers/media/usb/usbvision/usbvision-video.c
1046 ++++ b/drivers/media/usb/usbvision/usbvision-video.c
1047 +@@ -435,6 +435,7 @@ static int usbvision_v4l2_close(struct file *file)
1048 + usbvision_scratch_free(usbvision);
1049 +
1050 + usbvision->user--;
1051 ++ mutex_unlock(&usbvision->v4l2_lock);
1052 +
1053 + if (power_on_at_open) {
1054 + /* power off in a little while
1055 +@@ -448,7 +449,6 @@ static int usbvision_v4l2_close(struct file *file)
1056 + usbvision_release(usbvision);
1057 + return 0;
1058 + }
1059 +- mutex_unlock(&usbvision->v4l2_lock);
1060 +
1061 + PDEBUG(DBG_IO, "success");
1062 + return 0;
1063 +diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
1064 +index 70bb7530b22c..fc7393729081 100644
1065 +--- a/drivers/message/fusion/mptctl.c
1066 ++++ b/drivers/message/fusion/mptctl.c
1067 +@@ -1859,6 +1859,15 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1068 + }
1069 + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1070 +
1071 ++ /* Basic sanity checks to prevent underflows or integer overflows */
1072 ++ if (karg.maxReplyBytes < 0 ||
1073 ++ karg.dataInSize < 0 ||
1074 ++ karg.dataOutSize < 0 ||
1075 ++ karg.dataSgeOffset < 0 ||
1076 ++ karg.maxSenseBytes < 0 ||
1077 ++ karg.dataSgeOffset > ioc->req_sz / 4)
1078 ++ return -EINVAL;
1079 ++
1080 + /* Verify that the final request frame will not be too large.
1081 + */
1082 + sz = karg.dataSgeOffset * 4;
1083 +diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
1084 +index 4642b5b816a0..6ccc0fdab767 100644
1085 +--- a/drivers/mfd/wm5110-tables.c
1086 ++++ b/drivers/mfd/wm5110-tables.c
1087 +@@ -249,6 +249,16 @@ static const struct reg_default wm5110_revd_patch[] = {
1088 + { 0x80, 0x0 },
1089 + };
1090 +
1091 ++/* Add extra headphone write sequence locations */
1092 ++static const struct reg_default wm5110_reve_patch[] = {
1093 ++ { 0x80, 0x3 },
1094 ++ { 0x80, 0x3 },
1095 ++ { 0x4b, 0x138 },
1096 ++ { 0x4c, 0x13d },
1097 ++ { 0x80, 0x0 },
1098 ++ { 0x80, 0x0 },
1099 ++};
1100 ++
1101 + /* We use a function so we can use ARRAY_SIZE() */
1102 + int wm5110_patch(struct arizona *arizona)
1103 + {
1104 +@@ -266,7 +276,9 @@ int wm5110_patch(struct arizona *arizona)
1105 + wm5110_revd_patch,
1106 + ARRAY_SIZE(wm5110_revd_patch));
1107 + default:
1108 +- return 0;
1109 ++ return regmap_register_patch(arizona->regmap,
1110 ++ wm5110_reve_patch,
1111 ++ ARRAY_SIZE(wm5110_reve_patch));
1112 + }
1113 + }
1114 + EXPORT_SYMBOL_GPL(wm5110_patch);
1115 +diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
1116 +index 61ebb038fb75..2a6b149d7da3 100644
1117 +--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
1118 ++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
1119 +@@ -2047,7 +2047,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1120 + spin_lock_init(&s_state->lock);
1121 + }
1122 +
1123 +- memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
1124 ++ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
1125 + priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
1126 + INIT_WORK(&priv->mfunc.master.comm_work,
1127 + mlx4_master_comm_channel);
1128 +diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
1129 +index af67e7d410eb..11ef2c2af9bf 100644
1130 +--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
1131 ++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
1132 +@@ -185,7 +185,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
1133 + return;
1134 + }
1135 +
1136 +- memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
1137 ++ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
1138 + s_eqe->slave_id = slave;
1139 + /* ensure all information is written before setting the ownersip bit */
1140 + wmb();
1141 +diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
1142 +index a77f05ce8325..63ec209cdfd3 100644
1143 +--- a/drivers/net/ethernet/sfc/ef10.c
1144 ++++ b/drivers/net/ethernet/sfc/ef10.c
1145 +@@ -1344,7 +1344,9 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
1146 + unsigned int write_ptr;
1147 + efx_qword_t *txd;
1148 +
1149 +- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
1150 ++ tx_queue->xmit_more_available = false;
1151 ++ if (unlikely(tx_queue->write_count == tx_queue->insert_count))
1152 ++ return;
1153 +
1154 + do {
1155 + write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1156 +diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
1157 +index 6859437b59fb..b70b865fd19b 100644
1158 +--- a/drivers/net/ethernet/sfc/farch.c
1159 ++++ b/drivers/net/ethernet/sfc/farch.c
1160 +@@ -316,7 +316,9 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
1161 + unsigned write_ptr;
1162 + unsigned old_write_count = tx_queue->write_count;
1163 +
1164 +- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
1165 ++ tx_queue->xmit_more_available = false;
1166 ++ if (unlikely(tx_queue->write_count == tx_queue->insert_count))
1167 ++ return;
1168 +
1169 + do {
1170 + write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1171 +diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
1172 +index 9ede32064685..eda39c82f7e7 100644
1173 +--- a/drivers/net/ethernet/sfc/net_driver.h
1174 ++++ b/drivers/net/ethernet/sfc/net_driver.h
1175 +@@ -218,6 +218,7 @@ struct efx_tx_buffer {
1176 + * @tso_packets: Number of packets via the TSO xmit path
1177 + * @pushes: Number of times the TX push feature has been used
1178 + * @pio_packets: Number of times the TX PIO feature has been used
1179 ++ * @xmit_more_available: Are any packets waiting to be pushed to the NIC
1180 + * @empty_read_count: If the completion path has seen the queue as empty
1181 + * and the transmission path has not yet checked this, the value of
1182 + * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
1183 +@@ -250,6 +251,7 @@ struct efx_tx_queue {
1184 + unsigned int tso_packets;
1185 + unsigned int pushes;
1186 + unsigned int pio_packets;
1187 ++ bool xmit_more_available;
1188 + /* Statistics to supplement MAC stats */
1189 + unsigned long tx_packets;
1190 +
1191 +diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
1192 +index 10b6173d557d..b605dfd5c7bc 100644
1193 +--- a/drivers/net/ethernet/sfc/selftest.c
1194 ++++ b/drivers/net/ethernet/sfc/selftest.c
1195 +@@ -46,7 +46,7 @@ struct efx_loopback_payload {
1196 + struct iphdr ip;
1197 + struct udphdr udp;
1198 + __be16 iteration;
1199 +- const char msg[64];
1200 ++ char msg[64];
1201 + } __packed;
1202 +
1203 + /* Loopback test source MAC address */
1204 +diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
1205 +index aaf2987512b5..e70edc3dea7e 100644
1206 +--- a/drivers/net/ethernet/sfc/tx.c
1207 ++++ b/drivers/net/ethernet/sfc/tx.c
1208 +@@ -431,8 +431,20 @@ finish_packet:
1209 + efx_tx_maybe_stop_queue(tx_queue);
1210 +
1211 + /* Pass off to hardware */
1212 +- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
1213 ++ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
1214 ++ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
1215 ++
1216 ++ /* There could be packets left on the partner queue if those
1217 ++ * SKBs had skb->xmit_more set. If we do not push those they
1218 ++ * could be left for a long time and cause a netdev watchdog.
1219 ++ */
1220 ++ if (txq2->xmit_more_available)
1221 ++ efx_nic_push_buffers(txq2);
1222 ++
1223 + efx_nic_push_buffers(tx_queue);
1224 ++ } else {
1225 ++ tx_queue->xmit_more_available = skb->xmit_more;
1226 ++ }
1227 +
1228 + tx_queue->tx_packets++;
1229 +
1230 +@@ -721,6 +733,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
1231 + tx_queue->read_count = 0;
1232 + tx_queue->old_read_count = 0;
1233 + tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
1234 ++ tx_queue->xmit_more_available = false;
1235 +
1236 + /* Set up TX descriptor ring */
1237 + efx_nic_init_tx(tx_queue);
1238 +@@ -746,6 +759,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
1239 +
1240 + ++tx_queue->read_count;
1241 + }
1242 ++ tx_queue->xmit_more_available = false;
1243 + netdev_tx_reset_queue(tx_queue->core_txq);
1244 + }
1245 +
1246 +@@ -1301,8 +1315,20 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1247 + efx_tx_maybe_stop_queue(tx_queue);
1248 +
1249 + /* Pass off to hardware */
1250 +- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
1251 ++ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
1252 ++ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
1253 ++
1254 ++ /* There could be packets left on the partner queue if those
1255 ++ * SKBs had skb->xmit_more set. If we do not push those they
1256 ++ * could be left for a long time and cause a netdev watchdog.
1257 ++ */
1258 ++ if (txq2->xmit_more_available)
1259 ++ efx_nic_push_buffers(txq2);
1260 ++
1261 + efx_nic_push_buffers(tx_queue);
1262 ++ } else {
1263 ++ tx_queue->xmit_more_available = skb->xmit_more;
1264 ++ }
1265 +
1266 + tx_queue->tso_bursts++;
1267 + return NETDEV_TX_OK;
1268 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
1269 +index 3a08a1f78c73..11edb2c9e572 100644
1270 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
1271 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
1272 +@@ -721,10 +721,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
1273 + {
1274 + struct stmmac_priv *priv = netdev_priv(dev);
1275 +
1276 +- if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
1277 ++ if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
1278 +
1279 +- info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1280 ++ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1281 ++ SOF_TIMESTAMPING_TX_HARDWARE |
1282 ++ SOF_TIMESTAMPING_RX_SOFTWARE |
1283 + SOF_TIMESTAMPING_RX_HARDWARE |
1284 ++ SOF_TIMESTAMPING_SOFTWARE |
1285 + SOF_TIMESTAMPING_RAW_HARDWARE;
1286 +
1287 + if (priv->ptp_clock)
1288 +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
1289 +index 91d0c6a86e37..91120f0ed98c 100644
1290 +--- a/drivers/net/macvtap.c
1291 ++++ b/drivers/net/macvtap.c
1292 +@@ -68,7 +68,7 @@ static const struct proto_ops macvtap_socket_ops;
1293 + #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
1294 + NETIF_F_TSO6)
1295 + #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
1296 +-#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
1297 ++#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
1298 +
1299 + static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
1300 + {
1301 +diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
1302 +index 854f2c9a7b2b..64860c041745 100644
1303 +--- a/drivers/net/phy/broadcom.c
1304 ++++ b/drivers/net/phy/broadcom.c
1305 +@@ -674,7 +674,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
1306 + { PHY_ID_BCM5421, 0xfffffff0 },
1307 + { PHY_ID_BCM5461, 0xfffffff0 },
1308 + { PHY_ID_BCM5464, 0xfffffff0 },
1309 +- { PHY_ID_BCM5482, 0xfffffff0 },
1310 ++ { PHY_ID_BCM5481, 0xfffffff0 },
1311 + { PHY_ID_BCM5482, 0xfffffff0 },
1312 + { PHY_ID_BCM50610, 0xfffffff0 },
1313 + { PHY_ID_BCM50610M, 0xfffffff0 },
1314 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1315 +index 6c9c16d76935..5aa563136373 100644
1316 +--- a/drivers/net/ppp/pppoe.c
1317 ++++ b/drivers/net/ppp/pppoe.c
1318 +@@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev)
1319 + if (po->pppoe_dev == dev &&
1320 + sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
1321 + pppox_unbind_sock(sk);
1322 +- sk->sk_state = PPPOX_ZOMBIE;
1323 + sk->sk_state_change(sk);
1324 + po->pppoe_dev = NULL;
1325 + dev_put(dev);
1326 +@@ -570,7 +569,7 @@ static int pppoe_release(struct socket *sock)
1327 +
1328 + po = pppox_sk(sk);
1329 +
1330 +- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
1331 ++ if (po->pppoe_dev) {
1332 + dev_put(po->pppoe_dev);
1333 + po->pppoe_dev = NULL;
1334 + }
1335 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1336 +index b8a82b86f909..a5771515d9ab 100644
1337 +--- a/drivers/net/usb/qmi_wwan.c
1338 ++++ b/drivers/net/usb/qmi_wwan.c
1339 +@@ -535,6 +535,10 @@ static const struct usb_device_id products[] = {
1340 + USB_CDC_PROTO_NONE),
1341 + .driver_info = (unsigned long)&qmi_wwan_info,
1342 + },
1343 ++ { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
1344 ++ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
1345 ++ .driver_info = (unsigned long)&qmi_wwan_info,
1346 ++ },
1347 +
1348 + /* 3. Combined interface devices matching on interface number */
1349 + {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
1350 +@@ -760,12 +764,17 @@ static const struct usb_device_id products[] = {
1351 + {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
1352 + {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
1353 + {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
1354 ++ {QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */
1355 ++ {QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */
1356 ++ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
1357 ++ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
1358 + {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1359 + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1360 + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
1361 + {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
1362 + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1363 + {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
1364 ++ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
1365 + {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
1366 + {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
1367 + {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
1368 +@@ -780,7 +789,6 @@ static const struct usb_device_id products[] = {
1369 + {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
1370 + {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
1371 + {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
1372 +- {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
1373 +
1374 + /* 4. Gobi 1000 devices */
1375 + {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
1376 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1377 +index 484ecce78025..ce2a29971230 100644
1378 +--- a/drivers/net/virtio_net.c
1379 ++++ b/drivers/net/virtio_net.c
1380 +@@ -1746,9 +1746,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1381 + /* Do we support "hardware" checksums? */
1382 + if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1383 + /* This opens up the world of extra features. */
1384 +- dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1385 ++ dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1386 + if (csum)
1387 +- dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1388 ++ dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1389 +
1390 + if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1391 + dev->hw_features |= NETIF_F_TSO
1392 +diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
1393 +index 3bd030494986..ee15a579ec70 100644
1394 +--- a/drivers/net/wireless/ath/ath9k/init.c
1395 ++++ b/drivers/net/wireless/ath/ath9k/init.c
1396 +@@ -821,6 +821,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1397 + hw->max_rate_tries = 10;
1398 + hw->sta_data_size = sizeof(struct ath_node);
1399 + hw->vif_data_size = sizeof(struct ath_vif);
1400 ++ hw->extra_tx_headroom = 4;
1401 +
1402 + hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
1403 + hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
1404 +diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
1405 +index cfe1293692fc..b51fb8977104 100644
1406 +--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
1407 ++++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
1408 +@@ -1022,7 +1022,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
1409 + u8 *pn = seq.ccmp.pn;
1410 +
1411 + ieee80211_get_key_rx_seq(key, i, &seq);
1412 +- aes_sc->pn = cpu_to_le64(
1413 ++ aes_sc[i].pn = cpu_to_le64(
1414 + (u64)pn[5] |
1415 + ((u64)pn[4] << 8) |
1416 + ((u64)pn[3] << 16) |
1417 +diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
1418 +index b04b8858c690..9dfd1d1106d7 100644
1419 +--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
1420 ++++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
1421 +@@ -102,6 +102,9 @@
1422 + #define IWL7265_FW_PRE "iwlwifi-7265-"
1423 + #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
1424 +
1425 ++#define IWL7265D_FW_PRE "iwlwifi-7265D-"
1426 ++#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
1427 ++
1428 + #define NVM_HW_SECTION_NUM_FAMILY_7000 0
1429 +
1430 + static const struct iwl_base_params iwl7000_base_params = {
1431 +@@ -267,7 +270,37 @@ const struct iwl_cfg iwl7265_n_cfg = {
1432 + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
1433 + };
1434 +
1435 ++const struct iwl_cfg iwl7265d_2ac_cfg = {
1436 ++ .name = "Intel(R) Dual Band Wireless AC 7265",
1437 ++ .fw_name_pre = IWL7265D_FW_PRE,
1438 ++ IWL_DEVICE_7000,
1439 ++ .ht_params = &iwl7265_ht_params,
1440 ++ .nvm_ver = IWL7265_NVM_VERSION,
1441 ++ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
1442 ++ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
1443 ++};
1444 ++
1445 ++const struct iwl_cfg iwl7265d_2n_cfg = {
1446 ++ .name = "Intel(R) Dual Band Wireless N 7265",
1447 ++ .fw_name_pre = IWL7265D_FW_PRE,
1448 ++ IWL_DEVICE_7000,
1449 ++ .ht_params = &iwl7265_ht_params,
1450 ++ .nvm_ver = IWL7265_NVM_VERSION,
1451 ++ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
1452 ++ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
1453 ++};
1454 ++
1455 ++const struct iwl_cfg iwl7265d_n_cfg = {
1456 ++ .name = "Intel(R) Wireless N 7265",
1457 ++ .fw_name_pre = IWL7265D_FW_PRE,
1458 ++ IWL_DEVICE_7000,
1459 ++ .ht_params = &iwl7265_ht_params,
1460 ++ .nvm_ver = IWL7265_NVM_VERSION,
1461 ++ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
1462 ++ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
1463 ++};
1464 ++
1465 + MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
1466 +-MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
1467 +-MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
1468 ++MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
1469 + MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
1470 ++MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
1471 +diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
1472 +index 2ef83a39ff10..6cc051cace5d 100644
1473 +--- a/drivers/net/wireless/iwlwifi/iwl-config.h
1474 ++++ b/drivers/net/wireless/iwlwifi/iwl-config.h
1475 +@@ -346,6 +346,9 @@ extern const struct iwl_cfg iwl3165_2ac_cfg;
1476 + extern const struct iwl_cfg iwl7265_2ac_cfg;
1477 + extern const struct iwl_cfg iwl7265_2n_cfg;
1478 + extern const struct iwl_cfg iwl7265_n_cfg;
1479 ++extern const struct iwl_cfg iwl7265d_2ac_cfg;
1480 ++extern const struct iwl_cfg iwl7265d_2n_cfg;
1481 ++extern const struct iwl_cfg iwl7265d_n_cfg;
1482 + extern const struct iwl_cfg iwl8260_2n_cfg;
1483 + extern const struct iwl_cfg iwl8260_2ac_cfg;
1484 + extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
1485 +diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
1486 +index 3f6f015285e5..671bdbca33ee 100644
1487 +--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
1488 ++++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
1489 +@@ -305,23 +305,24 @@ enum {
1490 + };
1491 +
1492 +
1493 +-#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
1494 +-#define CSR_HW_REV_TYPE_5300 (0x0000020)
1495 +-#define CSR_HW_REV_TYPE_5350 (0x0000030)
1496 +-#define CSR_HW_REV_TYPE_5100 (0x0000050)
1497 +-#define CSR_HW_REV_TYPE_5150 (0x0000040)
1498 +-#define CSR_HW_REV_TYPE_1000 (0x0000060)
1499 +-#define CSR_HW_REV_TYPE_6x00 (0x0000070)
1500 +-#define CSR_HW_REV_TYPE_6x50 (0x0000080)
1501 +-#define CSR_HW_REV_TYPE_6150 (0x0000084)
1502 +-#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
1503 +-#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
1504 +-#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
1505 +-#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
1506 +-#define CSR_HW_REV_TYPE_2x00 (0x0000100)
1507 +-#define CSR_HW_REV_TYPE_105 (0x0000110)
1508 +-#define CSR_HW_REV_TYPE_135 (0x0000120)
1509 +-#define CSR_HW_REV_TYPE_NONE (0x00001F0)
1510 ++#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
1511 ++#define CSR_HW_REV_TYPE_5300 (0x0000020)
1512 ++#define CSR_HW_REV_TYPE_5350 (0x0000030)
1513 ++#define CSR_HW_REV_TYPE_5100 (0x0000050)
1514 ++#define CSR_HW_REV_TYPE_5150 (0x0000040)
1515 ++#define CSR_HW_REV_TYPE_1000 (0x0000060)
1516 ++#define CSR_HW_REV_TYPE_6x00 (0x0000070)
1517 ++#define CSR_HW_REV_TYPE_6x50 (0x0000080)
1518 ++#define CSR_HW_REV_TYPE_6150 (0x0000084)
1519 ++#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
1520 ++#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
1521 ++#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
1522 ++#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
1523 ++#define CSR_HW_REV_TYPE_2x00 (0x0000100)
1524 ++#define CSR_HW_REV_TYPE_105 (0x0000110)
1525 ++#define CSR_HW_REV_TYPE_135 (0x0000120)
1526 ++#define CSR_HW_REV_TYPE_7265D (0x0000210)
1527 ++#define CSR_HW_REV_TYPE_NONE (0x00001F0)
1528 +
1529 + /* EEPROM REG */
1530 + #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
1531 +diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
1532 +index c17be0fb7283..8da34a5d024f 100644
1533 +--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
1534 ++++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
1535 +@@ -298,12 +298,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
1536 + u8 *pn = seq.ccmp.pn;
1537 +
1538 + ieee80211_get_key_rx_seq(key, i, &seq);
1539 +- aes_sc->pn = cpu_to_le64((u64)pn[5] |
1540 +- ((u64)pn[4] << 8) |
1541 +- ((u64)pn[3] << 16) |
1542 +- ((u64)pn[2] << 24) |
1543 +- ((u64)pn[1] << 32) |
1544 +- ((u64)pn[0] << 40));
1545 ++ aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
1546 ++ ((u64)pn[4] << 8) |
1547 ++ ((u64)pn[3] << 16) |
1548 ++ ((u64)pn[2] << 24) |
1549 ++ ((u64)pn[1] << 32) |
1550 ++ ((u64)pn[0] << 40));
1551 + }
1552 + data->use_rsc_tsc = true;
1553 + break;
1554 +diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
1555 +index 05cba8c05d3f..a2f624d5771d 100644
1556 +--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
1557 ++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
1558 +@@ -410,6 +410,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
1559 + {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
1560 + {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
1561 + {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
1562 ++ {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)},
1563 ++ {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)},
1564 ++ {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
1565 ++ {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
1566 ++ {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
1567 +
1568 + /* 8000 Series */
1569 + {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
1570 +@@ -503,6 +508,7 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
1571 + static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1572 + {
1573 + const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
1574 ++ const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
1575 + struct iwl_trans *iwl_trans;
1576 + struct iwl_trans_pcie *trans_pcie;
1577 + int ret;
1578 +@@ -511,6 +517,25 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1579 + if (IS_ERR(iwl_trans))
1580 + return PTR_ERR(iwl_trans);
1581 +
1582 ++#if IS_ENABLED(CONFIG_IWLMVM)
1583 ++ /*
1584 ++ * special-case 7265D, it has the same PCI IDs.
1585 ++ *
1586 ++ * Note that because we already pass the cfg to the transport above,
1587 ++ * all the parameters that the transport uses must, until that is
1588 ++ * changed, be identical to the ones in the 7265D configuration.
1589 ++ */
1590 ++ if (cfg == &iwl7265_2ac_cfg)
1591 ++ cfg_7265d = &iwl7265d_2ac_cfg;
1592 ++ else if (cfg == &iwl7265_2n_cfg)
1593 ++ cfg_7265d = &iwl7265d_2n_cfg;
1594 ++ else if (cfg == &iwl7265_n_cfg)
1595 ++ cfg_7265d = &iwl7265d_n_cfg;
1596 ++ if (cfg_7265d &&
1597 ++ (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
1598 ++ cfg = cfg_7265d;
1599 ++#endif
1600 ++
1601 + pci_set_drvdata(pdev, iwl_trans);
1602 +
1603 + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
1604 +diff --git a/drivers/pci/access.c b/drivers/pci/access.c
1605 +index 49dd766852ba..7f249b9ab2ce 100644
1606 +--- a/drivers/pci/access.c
1607 ++++ b/drivers/pci/access.c
1608 +@@ -352,6 +352,56 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
1609 + .release = pci_vpd_pci22_release,
1610 + };
1611 +
1612 ++static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
1613 ++ void *arg)
1614 ++{
1615 ++ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
1616 ++ ssize_t ret;
1617 ++
1618 ++ if (!tdev)
1619 ++ return -ENODEV;
1620 ++
1621 ++ ret = pci_read_vpd(tdev, pos, count, arg);
1622 ++ pci_dev_put(tdev);
1623 ++ return ret;
1624 ++}
1625 ++
1626 ++static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
1627 ++ const void *arg)
1628 ++{
1629 ++ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
1630 ++ ssize_t ret;
1631 ++
1632 ++ if (!tdev)
1633 ++ return -ENODEV;
1634 ++
1635 ++ ret = pci_write_vpd(tdev, pos, count, arg);
1636 ++ pci_dev_put(tdev);
1637 ++ return ret;
1638 ++}
1639 ++
1640 ++static const struct pci_vpd_ops pci_vpd_f0_ops = {
1641 ++ .read = pci_vpd_f0_read,
1642 ++ .write = pci_vpd_f0_write,
1643 ++ .release = pci_vpd_pci22_release,
1644 ++};
1645 ++
1646 ++static int pci_vpd_f0_dev_check(struct pci_dev *dev)
1647 ++{
1648 ++ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
1649 ++ int ret = 0;
1650 ++
1651 ++ if (!tdev)
1652 ++ return -ENODEV;
1653 ++ if (!tdev->vpd || !tdev->multifunction ||
1654 ++ dev->class != tdev->class || dev->vendor != tdev->vendor ||
1655 ++ dev->device != tdev->device)
1656 ++ ret = -ENODEV;
1657 ++
1658 ++ pci_dev_put(tdev);
1659 ++ return ret;
1660 ++}
1661 ++
1662 + int pci_vpd_pci22_init(struct pci_dev *dev)
1663 + {
1664 + struct pci_vpd_pci22 *vpd;
1665 +@@ -360,12 +410,21 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
1666 + cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
1667 + if (!cap)
1668 + return -ENODEV;
1669 ++ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
1670 ++ int ret = pci_vpd_f0_dev_check(dev);
1671 ++
1672 ++ if (ret)
1673 ++ return ret;
1674 ++ }
1675 + vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
1676 + if (!vpd)
1677 + return -ENOMEM;
1678 +
1679 + vpd->base.len = PCI_VPD_PCI22_SIZE;
1680 +- vpd->base.ops = &pci_vpd_pci22_ops;
1681 ++ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
1682 ++ vpd->base.ops = &pci_vpd_f0_ops;
1683 ++ else
1684 ++ vpd->base.ops = &pci_vpd_pci22_ops;
1685 + mutex_init(&vpd->lock);
1686 + vpd->cap = cap;
1687 + vpd->busy = false;
1688 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1689 +index ff7af33f2353..ce0aa47222f6 100644
1690 +--- a/drivers/pci/pci.c
1691 ++++ b/drivers/pci/pci.c
1692 +@@ -3206,7 +3206,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
1693 + {
1694 + u16 csr;
1695 +
1696 +- if (!dev->pm_cap)
1697 ++ if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
1698 + return -ENOTTY;
1699 +
1700 + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
1701 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1702 +index 00b1cd32a4b1..b6d646a97494 100644
1703 +--- a/drivers/pci/quirks.c
1704 ++++ b/drivers/pci/quirks.c
1705 +@@ -1883,6 +1883,15 @@ static void quirk_netmos(struct pci_dev *dev)
1706 + DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
1707 + PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
1708 +
1709 ++static void quirk_f0_vpd_link(struct pci_dev *dev)
1710 ++{
1711 ++ if (!dev->multifunction || !PCI_FUNC(dev->devfn))
1712 ++ return;
1713 ++ dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
1714 ++}
1715 ++DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1716 ++ PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
1717 ++
1718 + static void quirk_e100_interrupt(struct pci_dev *dev)
1719 + {
1720 + u16 command, pmcsr;
1721 +diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
1722 +index e4c95e1a6733..d0e8236a6404 100644
1723 +--- a/drivers/power/bq24190_charger.c
1724 ++++ b/drivers/power/bq24190_charger.c
1725 +@@ -1208,7 +1208,7 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
1726 + {
1727 + struct bq24190_dev_info *bdi = data;
1728 + bool alert_userspace = false;
1729 +- u8 ss_reg, f_reg;
1730 ++ u8 ss_reg = 0, f_reg = 0;
1731 + int ret;
1732 +
1733 + pm_runtime_get_sync(bdi->dev);
1734 +diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
1735 +index 75ffe9980c3e..7c511add5aa7 100644
1736 +--- a/drivers/s390/char/con3270.c
1737 ++++ b/drivers/s390/char/con3270.c
1738 +@@ -413,6 +413,10 @@ con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
1739 + else
1740 + /* Normal end. Copy residual count. */
1741 + rq->rescnt = irb->scsw.cmd.count;
1742 ++ } else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
1743 ++ /* Interrupt without an outstanding request -> update all */
1744 ++ cp->update_flags = CON_UPDATE_ALL;
1745 ++ con3270_set_timer(cp, 1);
1746 + }
1747 + return RAW3270_IO_DONE;
1748 + }
1749 +diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
1750 +index e91b89dc6d1f..e96fc7fd9498 100644
1751 +--- a/drivers/s390/char/tty3270.c
1752 ++++ b/drivers/s390/char/tty3270.c
1753 +@@ -659,6 +659,10 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
1754 + else
1755 + /* Normal end. Copy residual count. */
1756 + rq->rescnt = irb->scsw.cmd.count;
1757 ++ } else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
1758 ++ /* Interrupt without an outstanding request -> update all */
1759 ++ tp->update_flags = TTY_UPDATE_ALL;
1760 ++ tty3270_set_timer(tp, 1);
1761 + }
1762 + return RAW3270_IO_DONE;
1763 + }
1764 +diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
1765 +index 0eb2da8a696f..89215d44d83f 100644
1766 +--- a/drivers/scsi/mvsas/mv_sas.c
1767 ++++ b/drivers/scsi/mvsas/mv_sas.c
1768 +@@ -988,6 +988,8 @@ static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1769 + static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1770 + struct mvs_slot_info *slot, u32 slot_idx)
1771 + {
1772 ++ if (!slot)
1773 ++ return;
1774 + if (!slot->task)
1775 + return;
1776 + if (!sas_protocol_ata(task->task_proto))
1777 +diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
1778 +index ffd42071a12e..6050c58db682 100644
1779 +--- a/drivers/staging/iio/adc/mxs-lradc.c
1780 ++++ b/drivers/staging/iio/adc/mxs-lradc.c
1781 +@@ -910,11 +910,12 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
1782 + case IIO_CHAN_INFO_OFFSET:
1783 + if (chan->type == IIO_TEMP) {
1784 + /* The calculated value from the ADC is in Kelvin, we
1785 +- * want Celsius for hwmon so the offset is
1786 +- * -272.15 * scale
1787 ++ * want Celsius for hwmon so the offset is -273.15
1788 ++ * The offset is applied before scaling so it is
1789 ++ * actually -213.15 * 4 / 1.012 = -1079.644268
1790 + */
1791 +- *val = -1075;
1792 +- *val2 = 691699;
1793 ++ *val = -1079;
1794 ++ *val2 = 644268;
1795 +
1796 + return IIO_VAL_INT_PLUS_MICRO;
1797 + }
1798 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1799 +index 439bd1a5d00c..422773242917 100644
1800 +--- a/drivers/tty/serial/8250/8250_pci.c
1801 ++++ b/drivers/tty/serial/8250/8250_pci.c
1802 +@@ -1815,6 +1815,9 @@ pci_wch_ch353_setup(struct serial_private *priv,
1803 + #define PCI_DEVICE_ID_SUNIX_1999 0x1999
1804 +
1805 +
1806 ++#define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
1807 ++#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
1808 ++
1809 + /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
1810 + #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
1811 + #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
1812 +@@ -2323,6 +2326,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1813 + .subdevice = PCI_ANY_ID,
1814 + .setup = pci_xr17v35x_setup,
1815 + },
1816 ++ {
1817 ++ .vendor = PCI_VENDOR_ID_EXAR,
1818 ++ .device = PCI_DEVICE_ID_EXAR_XR17V4358,
1819 ++ .subvendor = PCI_ANY_ID,
1820 ++ .subdevice = PCI_ANY_ID,
1821 ++ .setup = pci_xr17v35x_setup,
1822 ++ },
1823 ++ {
1824 ++ .vendor = PCI_VENDOR_ID_EXAR,
1825 ++ .device = PCI_DEVICE_ID_EXAR_XR17V8358,
1826 ++ .subvendor = PCI_ANY_ID,
1827 ++ .subdevice = PCI_ANY_ID,
1828 ++ .setup = pci_xr17v35x_setup,
1829 ++ },
1830 + /*
1831 + * Xircom cards
1832 + */
1833 +@@ -2771,6 +2788,8 @@ enum pci_board_num_t {
1834 + pbn_exar_XR17V352,
1835 + pbn_exar_XR17V354,
1836 + pbn_exar_XR17V358,
1837 ++ pbn_exar_XR17V4358,
1838 ++ pbn_exar_XR17V8358,
1839 + pbn_exar_ibm_saturn,
1840 + pbn_pasemi_1682M,
1841 + pbn_ni8430_2,
1842 +@@ -3440,6 +3459,22 @@ static struct pciserial_board pci_boards[] = {
1843 + .reg_shift = 0,
1844 + .first_offset = 0,
1845 + },
1846 ++ [pbn_exar_XR17V4358] = {
1847 ++ .flags = FL_BASE0,
1848 ++ .num_ports = 12,
1849 ++ .base_baud = 7812500,
1850 ++ .uart_offset = 0x400,
1851 ++ .reg_shift = 0,
1852 ++ .first_offset = 0,
1853 ++ },
1854 ++ [pbn_exar_XR17V8358] = {
1855 ++ .flags = FL_BASE0,
1856 ++ .num_ports = 16,
1857 ++ .base_baud = 7812500,
1858 ++ .uart_offset = 0x400,
1859 ++ .reg_shift = 0,
1860 ++ .first_offset = 0,
1861 ++ },
1862 + [pbn_exar_ibm_saturn] = {
1863 + .flags = FL_BASE0,
1864 + .num_ports = 1,
1865 +@@ -4808,7 +4843,7 @@ static struct pci_device_id serial_pci_tbl[] = {
1866 + 0,
1867 + 0, pbn_exar_XR17C158 },
1868 + /*
1869 +- * Exar Corp. XR17V35[248] Dual/Quad/Octal PCIe UARTs
1870 ++ * Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs
1871 + */
1872 + { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352,
1873 + PCI_ANY_ID, PCI_ANY_ID,
1874 +@@ -4822,7 +4857,14 @@ static struct pci_device_id serial_pci_tbl[] = {
1875 + PCI_ANY_ID, PCI_ANY_ID,
1876 + 0,
1877 + 0, pbn_exar_XR17V358 },
1878 +-
1879 ++ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V4358,
1880 ++ PCI_ANY_ID, PCI_ANY_ID,
1881 ++ 0,
1882 ++ 0, pbn_exar_XR17V4358 },
1883 ++ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V8358,
1884 ++ PCI_ANY_ID, PCI_ANY_ID,
1885 ++ 0,
1886 ++ 0, pbn_exar_XR17V8358 },
1887 + /*
1888 + * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
1889 + */
1890 +diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
1891 +index 795d6538d630..4188cacaf100 100644
1892 +--- a/drivers/usb/chipidea/debug.c
1893 ++++ b/drivers/usb/chipidea/debug.c
1894 +@@ -66,9 +66,11 @@ static int ci_port_test_show(struct seq_file *s, void *data)
1895 + unsigned long flags;
1896 + unsigned mode;
1897 +
1898 ++ pm_runtime_get_sync(ci->dev);
1899 + spin_lock_irqsave(&ci->lock, flags);
1900 + mode = hw_port_test_get(ci);
1901 + spin_unlock_irqrestore(&ci->lock, flags);
1902 ++ pm_runtime_put_sync(ci->dev);
1903 +
1904 + seq_printf(s, "mode = %u\n", mode);
1905 +
1906 +@@ -94,9 +96,11 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
1907 + if (sscanf(buf, "%u", &mode) != 1)
1908 + return -EINVAL;
1909 +
1910 ++ pm_runtime_get_sync(ci->dev);
1911 + spin_lock_irqsave(&ci->lock, flags);
1912 + ret = hw_port_test_set(ci, mode);
1913 + spin_unlock_irqrestore(&ci->lock, flags);
1914 ++ pm_runtime_put_sync(ci->dev);
1915 +
1916 + return ret ? ret : count;
1917 + }
1918 +@@ -312,8 +316,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
1919 + if (role == CI_ROLE_END || role == ci->role)
1920 + return -EINVAL;
1921 +
1922 ++ pm_runtime_get_sync(ci->dev);
1923 + ci_role_stop(ci);
1924 + ret = ci_role_start(ci, role);
1925 ++ pm_runtime_put_sync(ci->dev);
1926 +
1927 + return ret ? ret : count;
1928 + }
1929 +diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
1930 +index ec978408a2ee..49978cc495e9 100644
1931 +--- a/drivers/usb/class/usbtmc.c
1932 ++++ b/drivers/usb/class/usbtmc.c
1933 +@@ -109,6 +109,7 @@ struct usbtmc_ID_rigol_quirk {
1934 +
1935 + static const struct usbtmc_ID_rigol_quirk usbtmc_id_quirk[] = {
1936 + { 0x1ab1, 0x0588 },
1937 ++ { 0x1ab1, 0x04b0 },
1938 + { 0, 0 }
1939 + };
1940 +
1941 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1942 +index 2af32e26fafc..7e5c90eebb9c 100644
1943 +--- a/drivers/usb/host/xhci-pci.c
1944 ++++ b/drivers/usb/host/xhci-pci.c
1945 +@@ -135,6 +135,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1946 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1947 + pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
1948 + xhci->quirks |= XHCI_SPURIOUS_REBOOT;
1949 ++ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
1950 + }
1951 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1952 + (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
1953 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
1954 +index 136259bc93b8..1e5fb8cfc9e3 100644
1955 +--- a/drivers/usb/host/xhci-ring.c
1956 ++++ b/drivers/usb/host/xhci-ring.c
1957 +@@ -2238,6 +2238,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1958 + u32 trb_comp_code;
1959 + int ret = 0;
1960 + int td_num = 0;
1961 ++ bool handling_skipped_tds = false;
1962 +
1963 + slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1964 + xdev = xhci->devs[slot_id];
1965 +@@ -2371,6 +2372,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1966 + ep->skip = true;
1967 + xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
1968 + goto cleanup;
1969 ++ case COMP_PING_ERR:
1970 ++ ep->skip = true;
1971 ++ xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
1972 ++ goto cleanup;
1973 + default:
1974 + if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
1975 + status = 0;
1976 +@@ -2507,13 +2512,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1977 + ep, &status);
1978 +
1979 + cleanup:
1980 ++
1981 ++
1982 ++ handling_skipped_tds = ep->skip &&
1983 ++ trb_comp_code != COMP_MISSED_INT &&
1984 ++ trb_comp_code != COMP_PING_ERR;
1985 ++
1986 + /*
1987 +- * Do not update event ring dequeue pointer if ep->skip is set.
1988 +- * Will roll back to continue process missed tds.
1989 ++ * Do not update event ring dequeue pointer if we're in a loop
1990 ++ * processing missed tds.
1991 + */
1992 +- if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
1993 ++ if (!handling_skipped_tds)
1994 + inc_deq(xhci, xhci->event_ring);
1995 +- }
1996 +
1997 + if (ret) {
1998 + urb = td->urb;
1999 +@@ -2548,7 +2558,7 @@ cleanup:
2000 + * Process them as short transfer until reach the td pointed by
2001 + * the event.
2002 + */
2003 +- } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2004 ++ } while (handling_skipped_tds);
2005 +
2006 + return 0;
2007 + }
2008 +diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
2009 +index d59b232614d5..8dfd86771fac 100644
2010 +--- a/drivers/usb/musb/musb_cppi41.c
2011 ++++ b/drivers/usb/musb/musb_cppi41.c
2012 +@@ -541,10 +541,18 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
2013 + csr &= ~MUSB_TXCSR_DMAENAB;
2014 + musb_writew(epio, MUSB_TXCSR, csr);
2015 + } else {
2016 ++ cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
2017 ++
2018 ++ /* delay to drain to cppi dma pipeline for isoch */
2019 ++ udelay(250);
2020 ++
2021 + csr = musb_readw(epio, MUSB_RXCSR);
2022 + csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
2023 + musb_writew(epio, MUSB_RXCSR, csr);
2024 +
2025 ++ /* wait to drain cppi dma pipe line */
2026 ++ udelay(50);
2027 ++
2028 + csr = musb_readw(epio, MUSB_RXCSR);
2029 + if (csr & MUSB_RXCSR_RXPKTRDY) {
2030 + csr |= MUSB_RXCSR_FLUSHFIFO;
2031 +@@ -558,13 +566,14 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
2032 + tdbit <<= 16;
2033 +
2034 + do {
2035 +- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
2036 ++ if (is_tx)
2037 ++ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
2038 + ret = dmaengine_terminate_all(cppi41_channel->dc);
2039 + } while (ret == -EAGAIN);
2040 +
2041 +- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
2042 +-
2043 + if (is_tx) {
2044 ++ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
2045 ++
2046 + csr = musb_readw(epio, MUSB_TXCSR);
2047 + if (csr & MUSB_TXCSR_TXPKTRDY) {
2048 + csr |= MUSB_TXCSR_FLUSHFIFO;
2049 +diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
2050 +index 6ed804450a5a..37f3ad15ed06 100644
2051 +--- a/drivers/usb/serial/symbolserial.c
2052 ++++ b/drivers/usb/serial/symbolserial.c
2053 +@@ -60,17 +60,15 @@ static void symbol_int_callback(struct urb *urb)
2054 +
2055 + usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
2056 +
2057 ++ /*
2058 ++ * Data from the device comes with a 1 byte header:
2059 ++ *
2060 ++ * <size of data> <data>...
2061 ++ */
2062 + if (urb->actual_length > 1) {
2063 +- data_length = urb->actual_length - 1;
2064 +-
2065 +- /*
2066 +- * Data from the device comes with a 1 byte header:
2067 +- *
2068 +- * <size of data>data...
2069 +- * This is real data to be sent to the tty layer
2070 +- * we pretty much just ignore the size and send everything
2071 +- * else to the tty layer.
2072 +- */
2073 ++ data_length = data[0];
2074 ++ if (data_length > (urb->actual_length - 1))
2075 ++ data_length = urb->actual_length - 1;
2076 + tty_insert_flip_string(&port->port, &data[1], data_length);
2077 + tty_flip_buffer_push(&port->port);
2078 + } else {
2079 +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
2080 +index cb84f69f76ad..313f09a73624 100644
2081 +--- a/drivers/vhost/scsi.c
2082 ++++ b/drivers/vhost/scsi.c
2083 +@@ -1251,7 +1251,7 @@ tcm_vhost_send_evt(struct vhost_scsi *vs,
2084 + * lun[4-7] need to be zero according to virtio-scsi spec.
2085 + */
2086 + evt->event.lun[0] = 0x01;
2087 +- evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
2088 ++ evt->event.lun[1] = tpg->tport_tpgt;
2089 + if (lun->unpacked_lun >= 256)
2090 + evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
2091 + evt->event.lun[3] = lun->unpacked_lun & 0xFF;
2092 +@@ -2122,12 +2122,12 @@ tcm_vhost_make_tpg(struct se_wwn *wwn,
2093 + struct tcm_vhost_tport, tport_wwn);
2094 +
2095 + struct tcm_vhost_tpg *tpg;
2096 +- unsigned long tpgt;
2097 ++ u16 tpgt;
2098 + int ret;
2099 +
2100 + if (strstr(name, "tpgt_") != name)
2101 + return ERR_PTR(-EINVAL);
2102 +- if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2103 ++ if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2104 + return ERR_PTR(-EINVAL);
2105 +
2106 + tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
2107 +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
2108 +index 2d3e32ebfd15..cfd1e6f6ac64 100644
2109 +--- a/fs/btrfs/backref.c
2110 ++++ b/fs/btrfs/backref.c
2111 +@@ -1778,7 +1778,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2112 + int found = 0;
2113 + struct extent_buffer *eb;
2114 + struct btrfs_inode_extref *extref;
2115 +- struct extent_buffer *leaf;
2116 + u32 item_size;
2117 + u32 cur_offset;
2118 + unsigned long ptr;
2119 +@@ -1806,9 +1805,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2120 + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
2121 + btrfs_release_path(path);
2122 +
2123 +- leaf = path->nodes[0];
2124 +- item_size = btrfs_item_size_nr(leaf, slot);
2125 +- ptr = btrfs_item_ptr_offset(leaf, slot);
2126 ++ item_size = btrfs_item_size_nr(eb, slot);
2127 ++ ptr = btrfs_item_ptr_offset(eb, slot);
2128 + cur_offset = 0;
2129 +
2130 + while (cur_offset < item_size) {
2131 +@@ -1822,7 +1820,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2132 + if (ret)
2133 + break;
2134 +
2135 +- cur_offset += btrfs_inode_extref_name_len(leaf, extref);
2136 ++ cur_offset += btrfs_inode_extref_name_len(eb, extref);
2137 + cur_offset += sizeof(*extref);
2138 + }
2139 + btrfs_tree_read_unlock_blocking(eb);
2140 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2141 +index 61d00a6e398f..4ae04561be88 100644
2142 +--- a/fs/cifs/cifssmb.c
2143 ++++ b/fs/cifs/cifssmb.c
2144 +@@ -625,9 +625,8 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
2145 + server->negflavor = CIFS_NEGFLAVOR_UNENCAP;
2146 + memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey,
2147 + CIFS_CRYPTO_KEY_SIZE);
2148 +- } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
2149 +- server->capabilities & CAP_EXTENDED_SECURITY) &&
2150 +- (pSMBr->EncryptionKeyLength == 0)) {
2151 ++ } else if (pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
2152 ++ server->capabilities & CAP_EXTENDED_SECURITY) {
2153 + server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
2154 + rc = decode_ext_sec_blob(ses, pSMBr);
2155 + } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
2156 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2157 +index b5fcb1ac0dd7..b95c32096a68 100644
2158 +--- a/fs/ext4/extents.c
2159 ++++ b/fs/ext4/extents.c
2160 +@@ -4792,12 +4792,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
2161 + else
2162 + max_blocks -= lblk;
2163 +
2164 +- flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
2165 +- EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
2166 +- EXT4_EX_NOCACHE;
2167 +- if (mode & FALLOC_FL_KEEP_SIZE)
2168 +- flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
2169 +-
2170 + mutex_lock(&inode->i_mutex);
2171 +
2172 + /*
2173 +@@ -4814,15 +4808,28 @@ static long ext4_zero_range(struct file *file, loff_t offset,
2174 + ret = inode_newsize_ok(inode, new_size);
2175 + if (ret)
2176 + goto out_mutex;
2177 +- /*
2178 +- * If we have a partial block after EOF we have to allocate
2179 +- * the entire block.
2180 +- */
2181 +- if (partial_end)
2182 +- max_blocks += 1;
2183 + }
2184 +
2185 ++ flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
2186 ++ if (mode & FALLOC_FL_KEEP_SIZE)
2187 ++ flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
2188 ++
2189 ++ /* Preallocate the range including the unaligned edges */
2190 ++ if (partial_begin || partial_end) {
2191 ++ ret = ext4_alloc_file_blocks(file,
2192 ++ round_down(offset, 1 << blkbits) >> blkbits,
2193 ++ (round_up((offset + len), 1 << blkbits) -
2194 ++ round_down(offset, 1 << blkbits)) >> blkbits,
2195 ++ new_size, flags, mode);
2196 ++ if (ret)
2197 ++ goto out_mutex;
2198 ++
2199 ++ }
2200 ++
2201 ++ /* Zero range excluding the unaligned edges */
2202 + if (max_blocks > 0) {
2203 ++ flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
2204 ++ EXT4_EX_NOCACHE);
2205 +
2206 + /* Now release the pages and zero block aligned part of pages*/
2207 + truncate_pagecache_range(inode, start, end - 1);
2208 +@@ -4836,19 +4843,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
2209 + flags, mode);
2210 + if (ret)
2211 + goto out_dio;
2212 +- /*
2213 +- * Remove entire range from the extent status tree.
2214 +- *
2215 +- * ext4_es_remove_extent(inode, lblk, max_blocks) is
2216 +- * NOT sufficient. I'm not sure why this is the case,
2217 +- * but let's be conservative and remove the extent
2218 +- * status tree for the entire inode. There should be
2219 +- * no outstanding delalloc extents thanks to the
2220 +- * filemap_write_and_wait_range() call above.
2221 +- */
2222 +- ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
2223 +- if (ret)
2224 +- goto out_dio;
2225 + }
2226 + if (!partial_begin && !partial_end)
2227 + goto out_dio;
2228 +diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
2229 +index 4227dc4f7437..8c44654ce274 100644
2230 +--- a/fs/jbd2/checkpoint.c
2231 ++++ b/fs/jbd2/checkpoint.c
2232 +@@ -417,12 +417,12 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
2233 + * journal_clean_one_cp_list
2234 + *
2235 + * Find all the written-back checkpoint buffers in the given list and
2236 +- * release them.
2237 ++ * release them. If 'destroy' is set, clean all buffers unconditionally.
2238 + *
2239 + * Called with j_list_lock held.
2240 + * Returns 1 if we freed the transaction, 0 otherwise.
2241 + */
2242 +-static int journal_clean_one_cp_list(struct journal_head *jh)
2243 ++static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
2244 + {
2245 + struct journal_head *last_jh;
2246 + struct journal_head *next_jh = jh;
2247 +@@ -436,7 +436,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh)
2248 + do {
2249 + jh = next_jh;
2250 + next_jh = jh->b_cpnext;
2251 +- ret = __try_to_free_cp_buf(jh);
2252 ++ if (!destroy)
2253 ++ ret = __try_to_free_cp_buf(jh);
2254 ++ else
2255 ++ ret = __jbd2_journal_remove_checkpoint(jh) + 1;
2256 + if (!ret)
2257 + return freed;
2258 + if (ret == 2)
2259 +@@ -459,10 +462,11 @@ static int journal_clean_one_cp_list(struct journal_head *jh)
2260 + * journal_clean_checkpoint_list
2261 + *
2262 + * Find all the written-back checkpoint buffers in the journal and release them.
2263 ++ * If 'destroy' is set, release all buffers unconditionally.
2264 + *
2265 + * Called with j_list_lock held.
2266 + */
2267 +-void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
2268 ++void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
2269 + {
2270 + transaction_t *transaction, *last_transaction, *next_transaction;
2271 + int ret;
2272 +@@ -476,7 +480,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
2273 + do {
2274 + transaction = next_transaction;
2275 + next_transaction = transaction->t_cpnext;
2276 +- ret = journal_clean_one_cp_list(transaction->t_checkpoint_list);
2277 ++ ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
2278 ++ destroy);
2279 + /*
2280 + * This function only frees up some memory if possible so we
2281 + * dont have an obligation to finish processing. Bail out if
2282 +@@ -492,7 +497,7 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
2283 + * we can possibly see not yet submitted buffers on io_list
2284 + */
2285 + ret = journal_clean_one_cp_list(transaction->
2286 +- t_checkpoint_io_list);
2287 ++ t_checkpoint_io_list, destroy);
2288 + if (need_resched())
2289 + return;
2290 + /*
2291 +@@ -506,6 +511,28 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
2292 + }
2293 +
2294 + /*
2295 ++ * Remove buffers from all checkpoint lists as journal is aborted and we just
2296 ++ * need to free memory
2297 ++ */
2298 ++void jbd2_journal_destroy_checkpoint(journal_t *journal)
2299 ++{
2300 ++ /*
2301 ++ * We loop because __jbd2_journal_clean_checkpoint_list() may abort
2302 ++ * early due to a need of rescheduling.
2303 ++ */
2304 ++ while (1) {
2305 ++ spin_lock(&journal->j_list_lock);
2306 ++ if (!journal->j_checkpoint_transactions) {
2307 ++ spin_unlock(&journal->j_list_lock);
2308 ++ break;
2309 ++ }
2310 ++ __jbd2_journal_clean_checkpoint_list(journal, true);
2311 ++ spin_unlock(&journal->j_list_lock);
2312 ++ cond_resched();
2313 ++ }
2314 ++}
2315 ++
2316 ++/*
2317 + * journal_remove_checkpoint: called after a buffer has been committed
2318 + * to disk (either by being write-back flushed to disk, or being
2319 + * committed to the log).
2320 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
2321 +index b73e0215baa7..362e5f614450 100644
2322 +--- a/fs/jbd2/commit.c
2323 ++++ b/fs/jbd2/commit.c
2324 +@@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
2325 + * frees some memory
2326 + */
2327 + spin_lock(&journal->j_list_lock);
2328 +- __jbd2_journal_clean_checkpoint_list(journal);
2329 ++ __jbd2_journal_clean_checkpoint_list(journal, false);
2330 + spin_unlock(&journal->j_list_lock);
2331 +
2332 + jbd_debug(3, "JBD2: commit phase 1\n");
2333 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
2334 +index be6f7178d23a..2540324f084b 100644
2335 +--- a/fs/jbd2/journal.c
2336 ++++ b/fs/jbd2/journal.c
2337 +@@ -1708,8 +1708,17 @@ int jbd2_journal_destroy(journal_t *journal)
2338 + while (journal->j_checkpoint_transactions != NULL) {
2339 + spin_unlock(&journal->j_list_lock);
2340 + mutex_lock(&journal->j_checkpoint_mutex);
2341 +- jbd2_log_do_checkpoint(journal);
2342 ++ err = jbd2_log_do_checkpoint(journal);
2343 + mutex_unlock(&journal->j_checkpoint_mutex);
2344 ++ /*
2345 ++ * If checkpointing failed, just free the buffers to avoid
2346 ++ * looping forever
2347 ++ */
2348 ++ if (err) {
2349 ++ jbd2_journal_destroy_checkpoint(journal);
2350 ++ spin_lock(&journal->j_list_lock);
2351 ++ break;
2352 ++ }
2353 + spin_lock(&journal->j_list_lock);
2354 + }
2355 +
2356 +diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
2357 +index ff560537dd61..2725b03b4ae2 100644
2358 +--- a/include/linux/ipv6.h
2359 ++++ b/include/linux/ipv6.h
2360 +@@ -212,7 +212,7 @@ struct ipv6_pinfo {
2361 + struct ipv6_ac_socklist *ipv6_ac_list;
2362 + struct ipv6_fl_socklist __rcu *ipv6_fl_list;
2363 +
2364 +- struct ipv6_txoptions *opt;
2365 ++ struct ipv6_txoptions __rcu *opt;
2366 + struct sk_buff *pktoptions;
2367 + struct sk_buff *rxpmtu;
2368 + struct {
2369 +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
2370 +index dadb42109dec..4caf8acfef11 100644
2371 +--- a/include/linux/jbd2.h
2372 ++++ b/include/linux/jbd2.h
2373 +@@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
2374 + extern void jbd2_journal_commit_transaction(journal_t *);
2375 +
2376 + /* Checkpoint list management */
2377 +-void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
2378 ++void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
2379 + int __jbd2_journal_remove_checkpoint(struct journal_head *);
2380 ++void jbd2_journal_destroy_checkpoint(journal_t *journal);
2381 + void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
2382 +
2383 +
2384 +diff --git a/include/linux/pci.h b/include/linux/pci.h
2385 +index e92cdad3240d..7a3484490867 100644
2386 +--- a/include/linux/pci.h
2387 ++++ b/include/linux/pci.h
2388 +@@ -177,6 +177,10 @@ enum pci_dev_flags {
2389 + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
2390 + /* Do not use bus resets for device */
2391 + PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
2392 ++ /* Do not use PM reset even if device advertises NoSoftRst- */
2393 ++ PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
2394 ++ /* Get VPD from function 0 VPD */
2395 ++ PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
2396 + };
2397 +
2398 + enum pci_irq_reroute_variant {
2399 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2400 +index 522d83731709..3b57c6712495 100644
2401 +--- a/include/linux/skbuff.h
2402 ++++ b/include/linux/skbuff.h
2403 +@@ -2549,6 +2549,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
2404 + {
2405 + if (skb->ip_summed == CHECKSUM_COMPLETE)
2406 + skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2407 ++ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2408 ++ skb_checksum_start_offset(skb) < 0)
2409 ++ skb->ip_summed = CHECKSUM_NONE;
2410 + }
2411 +
2412 + unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2413 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
2414 +index a175ba4a7adb..dfe4ddfbb43c 100644
2415 +--- a/include/net/af_unix.h
2416 ++++ b/include/net/af_unix.h
2417 +@@ -64,7 +64,11 @@ struct unix_sock {
2418 + #define UNIX_GC_MAYBE_CYCLE 1
2419 + struct socket_wq peer_wq;
2420 + };
2421 +-#define unix_sk(__sk) ((struct unix_sock *)__sk)
2422 ++
2423 ++static inline struct unix_sock *unix_sk(struct sock *sk)
2424 ++{
2425 ++ return (struct unix_sock *)sk;
2426 ++}
2427 +
2428 + #define peer_wait peer_wq.wait
2429 +
2430 +diff --git a/include/net/inet_common.h b/include/net/inet_common.h
2431 +index b2828a06a5a6..a7d812d05fba 100644
2432 +--- a/include/net/inet_common.h
2433 ++++ b/include/net/inet_common.h
2434 +@@ -42,7 +42,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
2435 +
2436 + static inline void inet_ctl_sock_destroy(struct sock *sk)
2437 + {
2438 +- sk_release_kernel(sk);
2439 ++ if (sk)
2440 ++ sk_release_kernel(sk);
2441 + }
2442 +
2443 + #endif
2444 +diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
2445 +index a5593dab6af7..ef9557683fec 100644
2446 +--- a/include/net/ip6_tunnel.h
2447 ++++ b/include/net/ip6_tunnel.h
2448 +@@ -79,11 +79,12 @@ static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
2449 + err = ip6_local_out(skb);
2450 +
2451 + if (net_xmit_eval(err) == 0) {
2452 +- struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2453 ++ struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
2454 + u64_stats_update_begin(&tstats->syncp);
2455 + tstats->tx_bytes += pkt_len;
2456 + tstats->tx_packets++;
2457 + u64_stats_update_end(&tstats->syncp);
2458 ++ put_cpu_ptr(tstats);
2459 + } else {
2460 + stats->tx_errors++;
2461 + stats->tx_aborted_errors++;
2462 +diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
2463 +index 5bc6edeb7143..a3f2f8005126 100644
2464 +--- a/include/net/ip_tunnels.h
2465 ++++ b/include/net/ip_tunnels.h
2466 +@@ -186,12 +186,13 @@ static inline void iptunnel_xmit_stats(int err,
2467 + struct pcpu_sw_netstats __percpu *stats)
2468 + {
2469 + if (err > 0) {
2470 +- struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
2471 ++ struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
2472 +
2473 + u64_stats_update_begin(&tstats->syncp);
2474 + tstats->tx_bytes += err;
2475 + tstats->tx_packets++;
2476 + u64_stats_update_end(&tstats->syncp);
2477 ++ put_cpu_ptr(tstats);
2478 + } else if (err < 0) {
2479 + err_stats->tx_errors++;
2480 + err_stats->tx_aborted_errors++;
2481 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
2482 +index 4292929392b0..a5169a4e9ef7 100644
2483 +--- a/include/net/ipv6.h
2484 ++++ b/include/net/ipv6.h
2485 +@@ -207,6 +207,7 @@ extern rwlock_t ip6_ra_lock;
2486 + */
2487 +
2488 + struct ipv6_txoptions {
2489 ++ atomic_t refcnt;
2490 + /* Length of this structure */
2491 + int tot_len;
2492 +
2493 +@@ -219,7 +220,7 @@ struct ipv6_txoptions {
2494 + struct ipv6_opt_hdr *dst0opt;
2495 + struct ipv6_rt_hdr *srcrt; /* Routing Header */
2496 + struct ipv6_opt_hdr *dst1opt;
2497 +-
2498 ++ struct rcu_head rcu;
2499 + /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
2500 + };
2501 +
2502 +@@ -252,6 +253,24 @@ struct ipv6_fl_socklist {
2503 + struct rcu_head rcu;
2504 + };
2505 +
2506 ++static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
2507 ++{
2508 ++ struct ipv6_txoptions *opt;
2509 ++
2510 ++ rcu_read_lock();
2511 ++ opt = rcu_dereference(np->opt);
2512 ++ if (opt && !atomic_inc_not_zero(&opt->refcnt))
2513 ++ opt = NULL;
2514 ++ rcu_read_unlock();
2515 ++ return opt;
2516 ++}
2517 ++
2518 ++static inline void txopt_put(struct ipv6_txoptions *opt)
2519 ++{
2520 ++ if (opt && atomic_dec_and_test(&opt->refcnt))
2521 ++ kfree_rcu(opt, rcu);
2522 ++}
2523 ++
2524 + struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
2525 + struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
2526 + struct ip6_flowlabel *fl,
2527 +@@ -490,6 +509,7 @@ struct ip6_create_arg {
2528 + u32 user;
2529 + const struct in6_addr *src;
2530 + const struct in6_addr *dst;
2531 ++ int iif;
2532 + u8 ecn;
2533 + };
2534 +
2535 +diff --git a/include/net/sock.h b/include/net/sock.h
2536 +index 4406dbe491f0..a098ce3cd242 100644
2537 +--- a/include/net/sock.h
2538 ++++ b/include/net/sock.h
2539 +@@ -819,6 +819,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
2540 + if (sk_rcvqueues_full(sk, limit))
2541 + return -ENOBUFS;
2542 +
2543 ++ /*
2544 ++ * If the skb was allocated from pfmemalloc reserves, only
2545 ++ * allow SOCK_MEMALLOC sockets to use it as this socket is
2546 ++ * helping free memory
2547 ++ */
2548 ++ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
2549 ++ return -ENOMEM;
2550 ++
2551 + __sk_add_backlog(sk, skb);
2552 + sk->sk_backlog.len += skb->truesize;
2553 + return 0;
2554 +diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
2555 +index 898be3a8db9a..6d8f8fba3341 100644
2556 +--- a/include/sound/wm8904.h
2557 ++++ b/include/sound/wm8904.h
2558 +@@ -119,7 +119,7 @@
2559 + #define WM8904_MIC_REGS 2
2560 + #define WM8904_GPIO_REGS 4
2561 + #define WM8904_DRC_REGS 4
2562 +-#define WM8904_EQ_REGS 25
2563 ++#define WM8904_EQ_REGS 24
2564 +
2565 + /**
2566 + * DRC configurations are specified with a label and a set of register
2567 +diff --git a/kernel/auditsc.c b/kernel/auditsc.c
2568 +index e420a0c41b5f..cc3416f0deda 100644
2569 +--- a/kernel/auditsc.c
2570 ++++ b/kernel/auditsc.c
2571 +@@ -72,6 +72,8 @@
2572 + #include <linux/fs_struct.h>
2573 + #include <linux/compat.h>
2574 + #include <linux/ctype.h>
2575 ++#include <linux/string.h>
2576 ++#include <uapi/linux/limits.h>
2577 +
2578 + #include "audit.h"
2579 +
2580 +@@ -1861,8 +1863,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
2581 + }
2582 +
2583 + list_for_each_entry_reverse(n, &context->names_list, list) {
2584 +- /* does the name pointer match? */
2585 +- if (!n->name || n->name->name != name->name)
2586 ++ if (!n->name || strcmp(n->name->name, name->name))
2587 + continue;
2588 +
2589 + /* match the correct record type */
2590 +@@ -1877,12 +1878,48 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
2591 + }
2592 +
2593 + out_alloc:
2594 +- /* unable to find the name from a previous getname(). Allocate a new
2595 +- * anonymous entry.
2596 +- */
2597 +- n = audit_alloc_name(context, AUDIT_TYPE_NORMAL);
2598 ++ /* unable to find an entry with both a matching name and type */
2599 ++ n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
2600 + if (!n)
2601 + return;
2602 ++ /* unfortunately, while we may have a path name to record with the
2603 ++ * inode, we can't always rely on the string lasting until the end of
2604 ++ * the syscall so we need to create our own copy, it may fail due to
2605 ++ * memory allocation issues, but we do our best */
2606 ++ if (name) {
2607 ++ /* we can't use getname_kernel() due to size limits */
2608 ++ size_t len = strlen(name->name) + 1;
2609 ++ struct filename *new = __getname();
2610 ++
2611 ++ if (unlikely(!new))
2612 ++ goto out;
2613 ++
2614 ++ if (len <= (PATH_MAX - sizeof(*new))) {
2615 ++ new->name = (char *)(new) + sizeof(*new);
2616 ++ new->separate = false;
2617 ++ } else if (len <= PATH_MAX) {
2618 ++ /* this looks odd, but is due to final_putname() */
2619 ++ struct filename *new2;
2620 ++
2621 ++ new2 = kmalloc(sizeof(*new2), GFP_KERNEL);
2622 ++ if (unlikely(!new2)) {
2623 ++ __putname(new);
2624 ++ goto out;
2625 ++ }
2626 ++ new2->name = (char *)new;
2627 ++ new2->separate = true;
2628 ++ new = new2;
2629 ++ } else {
2630 ++ /* we should never get here, but let's be safe */
2631 ++ __putname(new);
2632 ++ goto out;
2633 ++ }
2634 ++ strlcpy((char *)new->name, name->name, len);
2635 ++ new->uptr = NULL;
2636 ++ new->aname = n;
2637 ++ n->name = new;
2638 ++ n->name_put = true;
2639 ++ }
2640 + out:
2641 + if (parent) {
2642 + n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
2643 +diff --git a/kernel/module.c b/kernel/module.c
2644 +index c353707bbbd5..3da0c001d985 100644
2645 +--- a/kernel/module.c
2646 ++++ b/kernel/module.c
2647 +@@ -914,11 +914,15 @@ void symbol_put_addr(void *addr)
2648 + if (core_kernel_text(a))
2649 + return;
2650 +
2651 +- /* module_text_address is safe here: we're supposed to have reference
2652 +- * to module from symbol_get, so it can't go away. */
2653 ++ /*
2654 ++ * Even though we hold a reference on the module; we still need to
2655 ++ * disable preemption in order to safely traverse the data structure.
2656 ++ */
2657 ++ preempt_disable();
2658 + modaddr = __module_text_address(a);
2659 + BUG_ON(!modaddr);
2660 + module_put(modaddr);
2661 ++ preempt_enable();
2662 + }
2663 + EXPORT_SYMBOL_GPL(symbol_put_addr);
2664 +
2665 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2666 +index a882dd91722d..d650e1e593b8 100644
2667 +--- a/kernel/sched/core.c
2668 ++++ b/kernel/sched/core.c
2669 +@@ -5284,6 +5284,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
2670 + case CPU_STARTING:
2671 + set_cpu_rq_start_time();
2672 + return NOTIFY_OK;
2673 ++ case CPU_ONLINE:
2674 ++ /*
2675 ++ * At this point a starting CPU has marked itself as online via
2676 ++ * set_cpu_online(). But it might not yet have marked itself
2677 ++ * as active, which is essential from here on.
2678 ++ *
2679 ++ * Thus, fall-through and help the starting CPU along.
2680 ++ */
2681 + case CPU_DOWN_FAILED:
2682 + set_cpu_active((long)hcpu, true);
2683 + return NOTIFY_OK;
2684 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2685 +index 2273f534b01a..bd3c41d4ec07 100644
2686 +--- a/kernel/workqueue.c
2687 ++++ b/kernel/workqueue.c
2688 +@@ -1442,13 +1442,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
2689 + timer_stats_timer_set_start_info(&dwork->timer);
2690 +
2691 + dwork->wq = wq;
2692 ++ /* timer isn't guaranteed to run in this cpu, record earlier */
2693 ++ if (cpu == WORK_CPU_UNBOUND)
2694 ++ cpu = raw_smp_processor_id();
2695 + dwork->cpu = cpu;
2696 + timer->expires = jiffies + delay;
2697 +
2698 +- if (unlikely(cpu != WORK_CPU_UNBOUND))
2699 +- add_timer_on(timer, cpu);
2700 +- else
2701 +- add_timer(timer);
2702 ++ add_timer_on(timer, cpu);
2703 + }
2704 +
2705 + /**
2706 +diff --git a/lib/radix-tree.c b/lib/radix-tree.c
2707 +index 3291a8e37490..3d2aa27b845b 100644
2708 +--- a/lib/radix-tree.c
2709 ++++ b/lib/radix-tree.c
2710 +@@ -33,7 +33,7 @@
2711 + #include <linux/string.h>
2712 + #include <linux/bitops.h>
2713 + #include <linux/rcupdate.h>
2714 +-#include <linux/hardirq.h> /* in_interrupt() */
2715 ++#include <linux/preempt_mask.h> /* in_interrupt() */
2716 +
2717 +
2718 + /*
2719 +diff --git a/mm/filemap.c b/mm/filemap.c
2720 +index 37beab98b416..7e6ab98d4d3c 100644
2721 +--- a/mm/filemap.c
2722 ++++ b/mm/filemap.c
2723 +@@ -2489,6 +2489,11 @@ again:
2724 + break;
2725 + }
2726 +
2727 ++ if (fatal_signal_pending(current)) {
2728 ++ status = -EINTR;
2729 ++ break;
2730 ++ }
2731 ++
2732 + status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2733 + &page, &fsdata);
2734 + if (unlikely(status < 0))
2735 +@@ -2526,10 +2531,6 @@ again:
2736 + written += copied;
2737 +
2738 + balance_dirty_pages_ratelimited(mapping);
2739 +- if (fatal_signal_pending(current)) {
2740 +- status = -EINTR;
2741 +- break;
2742 +- }
2743 + } while (iov_iter_count(i));
2744 +
2745 + return written ? written : status;
2746 +diff --git a/net/core/datagram.c b/net/core/datagram.c
2747 +index 61e99f315ed9..3a402a7b20e9 100644
2748 +--- a/net/core/datagram.c
2749 ++++ b/net/core/datagram.c
2750 +@@ -130,35 +130,6 @@ out_noerr:
2751 + goto out;
2752 + }
2753 +
2754 +-static int skb_set_peeked(struct sk_buff *skb)
2755 +-{
2756 +- struct sk_buff *nskb;
2757 +-
2758 +- if (skb->peeked)
2759 +- return 0;
2760 +-
2761 +- /* We have to unshare an skb before modifying it. */
2762 +- if (!skb_shared(skb))
2763 +- goto done;
2764 +-
2765 +- nskb = skb_clone(skb, GFP_ATOMIC);
2766 +- if (!nskb)
2767 +- return -ENOMEM;
2768 +-
2769 +- skb->prev->next = nskb;
2770 +- skb->next->prev = nskb;
2771 +- nskb->prev = skb->prev;
2772 +- nskb->next = skb->next;
2773 +-
2774 +- consume_skb(skb);
2775 +- skb = nskb;
2776 +-
2777 +-done:
2778 +- skb->peeked = 1;
2779 +-
2780 +- return 0;
2781 +-}
2782 +-
2783 + /**
2784 + * __skb_recv_datagram - Receive a datagram skbuff
2785 + * @sk: socket
2786 +@@ -193,9 +164,7 @@ done:
2787 + struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
2788 + int *peeked, int *off, int *err)
2789 + {
2790 +- struct sk_buff_head *queue = &sk->sk_receive_queue;
2791 + struct sk_buff *skb, *last;
2792 +- unsigned long cpu_flags;
2793 + long timeo;
2794 + /*
2795 + * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
2796 +@@ -214,6 +183,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
2797 + * Look at current nfs client by the way...
2798 + * However, this function was correct in any case. 8)
2799 + */
2800 ++ unsigned long cpu_flags;
2801 ++ struct sk_buff_head *queue = &sk->sk_receive_queue;
2802 + int _off = *off;
2803 +
2804 + last = (struct sk_buff *)queue;
2805 +@@ -227,11 +198,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
2806 + _off -= skb->len;
2807 + continue;
2808 + }
2809 +-
2810 +- error = skb_set_peeked(skb);
2811 +- if (error)
2812 +- goto unlock_err;
2813 +-
2814 ++ skb->peeked = 1;
2815 + atomic_inc(&skb->users);
2816 + } else
2817 + __skb_unlink(skb, queue);
2818 +@@ -255,8 +222,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
2819 +
2820 + return NULL;
2821 +
2822 +-unlock_err:
2823 +- spin_unlock_irqrestore(&queue->lock, cpu_flags);
2824 + no_packet:
2825 + *err = error;
2826 + return NULL;
2827 +diff --git a/net/core/dst.c b/net/core/dst.c
2828 +index a028409ee438..a80e92346b9b 100644
2829 +--- a/net/core/dst.c
2830 ++++ b/net/core/dst.c
2831 +@@ -285,7 +285,7 @@ void dst_release(struct dst_entry *dst)
2832 +
2833 + newrefcnt = atomic_dec_return(&dst->__refcnt);
2834 + WARN_ON(newrefcnt < 0);
2835 +- if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
2836 ++ if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
2837 + call_rcu(&dst->rcu_head, dst_destroy_rcu);
2838 + }
2839 + }
2840 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
2841 +index 06dfb293e5aa..14bb1583947e 100644
2842 +--- a/net/core/ethtool.c
2843 ++++ b/net/core/ethtool.c
2844 +@@ -1257,7 +1257,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
2845 +
2846 + gstrings.len = ret;
2847 +
2848 +- data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
2849 ++ data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
2850 + if (!data)
2851 + return -ENOMEM;
2852 +
2853 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2854 +index 2b0d99dad8be..0478423afd29 100644
2855 +--- a/net/core/neighbour.c
2856 ++++ b/net/core/neighbour.c
2857 +@@ -2263,7 +2263,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2858 + ndm->ndm_pad2 = 0;
2859 + ndm->ndm_flags = pn->flags | NTF_PROXY;
2860 + ndm->ndm_type = RTN_UNICAST;
2861 +- ndm->ndm_ifindex = pn->dev->ifindex;
2862 ++ ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2863 + ndm->ndm_state = NUD_NONE;
2864 +
2865 + if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2866 +@@ -2337,7 +2337,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2867 + if (h > s_h)
2868 + s_idx = 0;
2869 + for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2870 +- if (dev_net(n->dev) != net)
2871 ++ if (pneigh_net(n) != net)
2872 + continue;
2873 + if (idx < s_idx)
2874 + goto next;
2875 +diff --git a/net/core/scm.c b/net/core/scm.c
2876 +index b442e7e25e60..d30eb057fa7b 100644
2877 +--- a/net/core/scm.c
2878 ++++ b/net/core/scm.c
2879 +@@ -306,6 +306,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
2880 + err = put_user(cmlen, &cm->cmsg_len);
2881 + if (!err) {
2882 + cmlen = CMSG_SPACE(i*sizeof(int));
2883 ++ if (msg->msg_controllen < cmlen)
2884 ++ cmlen = msg->msg_controllen;
2885 + msg->msg_control += cmlen;
2886 + msg->msg_controllen -= cmlen;
2887 + }
2888 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2889 +index 72400a1bb439..ea0bcc4a9657 100644
2890 +--- a/net/core/skbuff.c
2891 ++++ b/net/core/skbuff.c
2892 +@@ -2881,11 +2881,12 @@ EXPORT_SYMBOL(skb_append_datato_frags);
2893 + */
2894 + unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2895 + {
2896 ++ unsigned char *data = skb->data;
2897 ++
2898 + BUG_ON(len > skb->len);
2899 +- skb->len -= len;
2900 +- BUG_ON(skb->len < skb->data_len);
2901 +- skb_postpull_rcsum(skb, skb->data, len);
2902 +- return skb->data += len;
2903 ++ __skb_pull(skb, len);
2904 ++ skb_postpull_rcsum(skb, data, len);
2905 ++ return skb->data;
2906 + }
2907 + EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2908 +
2909 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
2910 +index 6bcaa33cd804..7bcb22317841 100644
2911 +--- a/net/dccp/ipv6.c
2912 ++++ b/net/dccp/ipv6.c
2913 +@@ -238,7 +238,9 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
2914 + security_req_classify_flow(req, flowi6_to_flowi(&fl6));
2915 +
2916 +
2917 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
2918 ++ rcu_read_lock();
2919 ++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
2920 ++ rcu_read_unlock();
2921 +
2922 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
2923 + if (IS_ERR(dst)) {
2924 +@@ -255,7 +257,10 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
2925 + &ireq->ir_v6_loc_addr,
2926 + &ireq->ir_v6_rmt_addr);
2927 + fl6.daddr = ireq->ir_v6_rmt_addr;
2928 +- err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
2929 ++ rcu_read_lock();
2930 ++ err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
2931 ++ np->tclass);
2932 ++ rcu_read_unlock();
2933 + err = net_xmit_eval(err);
2934 + }
2935 +
2936 +@@ -450,6 +455,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
2937 + {
2938 + struct inet_request_sock *ireq = inet_rsk(req);
2939 + struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
2940 ++ struct ipv6_txoptions *opt;
2941 + struct inet_sock *newinet;
2942 + struct dccp6_sock *newdp6;
2943 + struct sock *newsk;
2944 +@@ -573,13 +579,15 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
2945 + * Yes, keeping reference count would be much more clever, but we make
2946 + * one more one thing there: reattach optmem to newsk.
2947 + */
2948 +- if (np->opt != NULL)
2949 +- newnp->opt = ipv6_dup_options(newsk, np->opt);
2950 +-
2951 ++ opt = rcu_dereference(np->opt);
2952 ++ if (opt) {
2953 ++ opt = ipv6_dup_options(newsk, opt);
2954 ++ RCU_INIT_POINTER(newnp->opt, opt);
2955 ++ }
2956 + inet_csk(newsk)->icsk_ext_hdr_len = 0;
2957 +- if (newnp->opt != NULL)
2958 +- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
2959 +- newnp->opt->opt_flen);
2960 ++ if (opt)
2961 ++ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
2962 ++ opt->opt_flen;
2963 +
2964 + dccp_sync_mss(newsk, dst_mtu(dst));
2965 +
2966 +@@ -832,6 +840,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
2967 + struct ipv6_pinfo *np = inet6_sk(sk);
2968 + struct dccp_sock *dp = dccp_sk(sk);
2969 + struct in6_addr *saddr = NULL, *final_p, final;
2970 ++ struct ipv6_txoptions *opt;
2971 + struct flowi6 fl6;
2972 + struct dst_entry *dst;
2973 + int addr_type;
2974 +@@ -933,7 +942,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
2975 + fl6.fl6_sport = inet->inet_sport;
2976 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
2977 +
2978 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
2979 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
2980 ++ final_p = fl6_update_dst(&fl6, opt, &final);
2981 +
2982 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
2983 + if (IS_ERR(dst)) {
2984 +@@ -953,9 +963,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
2985 + __ip6_dst_store(sk, dst, NULL, NULL);
2986 +
2987 + icsk->icsk_ext_hdr_len = 0;
2988 +- if (np->opt != NULL)
2989 +- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
2990 +- np->opt->opt_nflen);
2991 ++ if (opt)
2992 ++ icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
2993 +
2994 + inet->inet_dport = usin->sin6_port;
2995 +
2996 +diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
2997 +index 51973ddc05a6..abc50b41bc39 100644
2998 +--- a/net/ipv4/gre_offload.c
2999 ++++ b/net/ipv4/gre_offload.c
3000 +@@ -36,7 +36,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
3001 + SKB_GSO_TCP_ECN |
3002 + SKB_GSO_GRE |
3003 + SKB_GSO_GRE_CSUM |
3004 +- SKB_GSO_IPIP)))
3005 ++ SKB_GSO_IPIP |
3006 ++ SKB_GSO_SIT)))
3007 + goto out;
3008 +
3009 + if (!skb->encapsulation)
3010 +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
3011 +index c8034587859d..1b7f6da99ef4 100644
3012 +--- a/net/ipv4/ipmr.c
3013 ++++ b/net/ipv4/ipmr.c
3014 +@@ -136,7 +136,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
3015 + struct mfc_cache *c, struct rtmsg *rtm);
3016 + static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
3017 + int cmd);
3018 +-static void mroute_clean_tables(struct mr_table *mrt);
3019 ++static void mroute_clean_tables(struct mr_table *mrt, bool all);
3020 + static void ipmr_expire_process(unsigned long arg);
3021 +
3022 + #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
3023 +@@ -348,7 +348,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
3024 + static void ipmr_free_table(struct mr_table *mrt)
3025 + {
3026 + del_timer_sync(&mrt->ipmr_expire_timer);
3027 +- mroute_clean_tables(mrt);
3028 ++ mroute_clean_tables(mrt, true);
3029 + kfree(mrt);
3030 + }
3031 +
3032 +@@ -1201,7 +1201,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
3033 + * Close the multicast socket, and clear the vif tables etc
3034 + */
3035 +
3036 +-static void mroute_clean_tables(struct mr_table *mrt)
3037 ++static void mroute_clean_tables(struct mr_table *mrt, bool all)
3038 + {
3039 + int i;
3040 + LIST_HEAD(list);
3041 +@@ -1210,8 +1210,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
3042 + /* Shut down all active vif entries */
3043 +
3044 + for (i = 0; i < mrt->maxvif; i++) {
3045 +- if (!(mrt->vif_table[i].flags & VIFF_STATIC))
3046 +- vif_delete(mrt, i, 0, &list);
3047 ++ if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
3048 ++ continue;
3049 ++ vif_delete(mrt, i, 0, &list);
3050 + }
3051 + unregister_netdevice_many(&list);
3052 +
3053 +@@ -1219,7 +1220,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
3054 +
3055 + for (i = 0; i < MFC_LINES; i++) {
3056 + list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
3057 +- if (c->mfc_flags & MFC_STATIC)
3058 ++ if (!all && (c->mfc_flags & MFC_STATIC))
3059 + continue;
3060 + list_del_rcu(&c->list);
3061 + mroute_netlink_event(mrt, c, RTM_DELROUTE);
3062 +@@ -1254,7 +1255,7 @@ static void mrtsock_destruct(struct sock *sk)
3063 + NETCONFA_IFINDEX_ALL,
3064 + net->ipv4.devconf_all);
3065 + RCU_INIT_POINTER(mrt->mroute_sk, NULL);
3066 +- mroute_clean_tables(mrt);
3067 ++ mroute_clean_tables(mrt, false);
3068 + }
3069 + }
3070 + rtnl_unlock();
3071 +@@ -1674,8 +1675,8 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
3072 + {
3073 + struct ip_options *opt = &(IPCB(skb)->opt);
3074 +
3075 +- IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
3076 +- IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
3077 ++ IP_INC_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
3078 ++ IP_ADD_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
3079 +
3080 + if (unlikely(opt->optlen))
3081 + ip_forward_options(skb);
3082 +@@ -1737,7 +1738,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
3083 + * to blackhole.
3084 + */
3085 +
3086 +- IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
3087 ++ IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
3088 + ip_rt_put(rt);
3089 + goto out_free;
3090 + }
3091 +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
3092 +index b3c53c8b331e..633cd34f57ec 100644
3093 +--- a/net/ipv4/sysctl_net_ipv4.c
3094 ++++ b/net/ipv4/sysctl_net_ipv4.c
3095 +@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
3096 + /* Update system visible IP port range */
3097 + static void set_local_port_range(struct net *net, int range[2])
3098 + {
3099 +- write_seqlock(&net->ipv4.ip_local_ports.lock);
3100 ++ write_seqlock_bh(&net->ipv4.ip_local_ports.lock);
3101 + net->ipv4.ip_local_ports.range[0] = range[0];
3102 + net->ipv4.ip_local_ports.range[1] = range[1];
3103 +- write_sequnlock(&net->ipv4.ip_local_ports.lock);
3104 ++ write_sequnlock_bh(&net->ipv4.ip_local_ports.lock);
3105 + }
3106 +
3107 + /* Validate changes from /proc interface. */
3108 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3109 +index 6f46cde58e54..0c96055b2382 100644
3110 +--- a/net/ipv4/tcp_input.c
3111 ++++ b/net/ipv4/tcp_input.c
3112 +@@ -4356,19 +4356,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
3113 + int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
3114 + {
3115 + struct sk_buff *skb;
3116 ++ int err = -ENOMEM;
3117 ++ int data_len = 0;
3118 + bool fragstolen;
3119 +
3120 + if (size == 0)
3121 + return 0;
3122 +
3123 +- skb = alloc_skb(size, sk->sk_allocation);
3124 ++ if (size > PAGE_SIZE) {
3125 ++ int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
3126 ++
3127 ++ data_len = npages << PAGE_SHIFT;
3128 ++ size = data_len + (size & ~PAGE_MASK);
3129 ++ }
3130 ++ skb = alloc_skb_with_frags(size - data_len, data_len,
3131 ++ PAGE_ALLOC_COSTLY_ORDER,
3132 ++ &err, sk->sk_allocation);
3133 + if (!skb)
3134 + goto err;
3135 +
3136 ++ skb_put(skb, size - data_len);
3137 ++ skb->data_len = data_len;
3138 ++ skb->len = size;
3139 ++
3140 + if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
3141 + goto err_free;
3142 +
3143 +- if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
3144 ++ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
3145 ++ if (err)
3146 + goto err_free;
3147 +
3148 + TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
3149 +@@ -4384,7 +4399,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
3150 + err_free:
3151 + kfree_skb(skb);
3152 + err:
3153 +- return -ENOMEM;
3154 ++ return err;
3155 ++
3156 + }
3157 +
3158 + static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3159 +@@ -5524,6 +5540,7 @@ discard:
3160 + }
3161 +
3162 + tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
3163 ++ tp->copied_seq = tp->rcv_nxt;
3164 + tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
3165 +
3166 + /* RFC1323: The window in SYN & SYN/ACK segments is
3167 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
3168 +index a5fdfe9fa542..6cd9f696d9c6 100644
3169 +--- a/net/ipv4/tcp_ipv4.c
3170 ++++ b/net/ipv4/tcp_ipv4.c
3171 +@@ -959,7 +959,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
3172 + }
3173 +
3174 + md5sig = rcu_dereference_protected(tp->md5sig_info,
3175 +- sock_owned_by_user(sk));
3176 ++ sock_owned_by_user(sk) ||
3177 ++ lockdep_is_held(&sk->sk_lock.slock));
3178 + if (!md5sig) {
3179 + md5sig = kmalloc(sizeof(*md5sig), gfp);
3180 + if (!md5sig)
3181 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
3182 +index 17a025847999..e34efa766031 100644
3183 +--- a/net/ipv6/addrconf.c
3184 ++++ b/net/ipv6/addrconf.c
3185 +@@ -396,6 +396,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
3186 + if (err) {
3187 + ipv6_mc_destroy_dev(ndev);
3188 + del_timer(&ndev->regen_timer);
3189 ++ snmp6_unregister_dev(ndev);
3190 + goto err_release;
3191 + }
3192 + /* protected by rtnl_lock */
3193 +@@ -4843,6 +4844,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
3194 + return ret;
3195 + }
3196 +
3197 ++static
3198 ++int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
3199 ++ void __user *buffer, size_t *lenp, loff_t *ppos)
3200 ++{
3201 ++ struct inet6_dev *idev = ctl->extra1;
3202 ++ int min_mtu = IPV6_MIN_MTU;
3203 ++ struct ctl_table lctl;
3204 ++
3205 ++ lctl = *ctl;
3206 ++ lctl.extra1 = &min_mtu;
3207 ++ lctl.extra2 = idev ? &idev->dev->mtu : NULL;
3208 ++
3209 ++ return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
3210 ++}
3211 ++
3212 + static void dev_disable_change(struct inet6_dev *idev)
3213 + {
3214 + struct netdev_notifier_info info;
3215 +@@ -4994,7 +5010,7 @@ static struct addrconf_sysctl_table
3216 + .data = &ipv6_devconf.mtu6,
3217 + .maxlen = sizeof(int),
3218 + .mode = 0644,
3219 +- .proc_handler = proc_dointvec,
3220 ++ .proc_handler = addrconf_sysctl_mtu,
3221 + },
3222 + {
3223 + .procname = "accept_ra",
3224 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
3225 +index e8c4400f23e9..05417c330f4e 100644
3226 +--- a/net/ipv6/af_inet6.c
3227 ++++ b/net/ipv6/af_inet6.c
3228 +@@ -425,9 +425,11 @@ void inet6_destroy_sock(struct sock *sk)
3229 +
3230 + /* Free tx options */
3231 +
3232 +- opt = xchg(&np->opt, NULL);
3233 +- if (opt != NULL)
3234 +- sock_kfree_s(sk, opt, opt->tot_len);
3235 ++ opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
3236 ++ if (opt) {
3237 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
3238 ++ txopt_put(opt);
3239 ++ }
3240 + }
3241 + EXPORT_SYMBOL_GPL(inet6_destroy_sock);
3242 +
3243 +@@ -656,7 +658,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
3244 + fl6.fl6_sport = inet->inet_sport;
3245 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
3246 +
3247 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
3248 ++ rcu_read_lock();
3249 ++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
3250 ++ &final);
3251 ++ rcu_read_unlock();
3252 +
3253 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
3254 + if (IS_ERR(dst)) {
3255 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
3256 +index e069aeb2cf72..9e3b0b66a4f3 100644
3257 +--- a/net/ipv6/datagram.c
3258 ++++ b/net/ipv6/datagram.c
3259 +@@ -167,8 +167,10 @@ ipv4_connected:
3260 +
3261 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
3262 +
3263 +- opt = flowlabel ? flowlabel->opt : np->opt;
3264 ++ rcu_read_lock();
3265 ++ opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
3266 + final_p = fl6_update_dst(&fl6, opt, &final);
3267 ++ rcu_read_unlock();
3268 +
3269 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
3270 + err = 0;
3271 +diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
3272 +index bfde361b6134..4f08a0f452eb 100644
3273 +--- a/net/ipv6/exthdrs.c
3274 ++++ b/net/ipv6/exthdrs.c
3275 +@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
3276 + *((char **)&opt2->dst1opt) += dif;
3277 + if (opt2->srcrt)
3278 + *((char **)&opt2->srcrt) += dif;
3279 ++ atomic_set(&opt2->refcnt, 1);
3280 + }
3281 + return opt2;
3282 + }
3283 +@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
3284 + return ERR_PTR(-ENOBUFS);
3285 +
3286 + memset(opt2, 0, tot_len);
3287 +-
3288 ++ atomic_set(&opt2->refcnt, 1);
3289 + opt2->tot_len = tot_len;
3290 + p = (char *)(opt2 + 1);
3291 +
3292 +diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
3293 +index 29b32206e494..6cc516c825b6 100644
3294 +--- a/net/ipv6/inet6_connection_sock.c
3295 ++++ b/net/ipv6/inet6_connection_sock.c
3296 +@@ -77,7 +77,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
3297 + memset(fl6, 0, sizeof(*fl6));
3298 + fl6->flowi6_proto = IPPROTO_TCP;
3299 + fl6->daddr = ireq->ir_v6_rmt_addr;
3300 +- final_p = fl6_update_dst(fl6, np->opt, &final);
3301 ++ rcu_read_lock();
3302 ++ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
3303 ++ rcu_read_unlock();
3304 + fl6->saddr = ireq->ir_v6_loc_addr;
3305 + fl6->flowi6_oif = ireq->ir_iif;
3306 + fl6->flowi6_mark = ireq->ir_mark;
3307 +@@ -208,7 +210,9 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
3308 + fl6->fl6_dport = inet->inet_dport;
3309 + security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
3310 +
3311 +- final_p = fl6_update_dst(fl6, np->opt, &final);
3312 ++ rcu_read_lock();
3313 ++ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
3314 ++ rcu_read_unlock();
3315 +
3316 + dst = __inet6_csk_dst_check(sk, np->dst_cookie);
3317 + if (!dst) {
3318 +@@ -241,7 +245,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
3319 + /* Restore final destination back after routing done */
3320 + fl6.daddr = sk->sk_v6_daddr;
3321 +
3322 +- res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
3323 ++ res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
3324 ++ np->tclass);
3325 + rcu_read_unlock();
3326 + return res;
3327 + }
3328 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
3329 +index 0d58542f9db0..4ca7cdd15aad 100644
3330 +--- a/net/ipv6/ip6mr.c
3331 ++++ b/net/ipv6/ip6mr.c
3332 +@@ -120,7 +120,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
3333 + int cmd);
3334 + static int ip6mr_rtm_dumproute(struct sk_buff *skb,
3335 + struct netlink_callback *cb);
3336 +-static void mroute_clean_tables(struct mr6_table *mrt);
3337 ++static void mroute_clean_tables(struct mr6_table *mrt, bool all);
3338 + static void ipmr_expire_process(unsigned long arg);
3339 +
3340 + #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
3341 +@@ -337,7 +337,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
3342 + static void ip6mr_free_table(struct mr6_table *mrt)
3343 + {
3344 + del_timer(&mrt->ipmr_expire_timer);
3345 +- mroute_clean_tables(mrt);
3346 ++ mroute_clean_tables(mrt, true);
3347 + kfree(mrt);
3348 + }
3349 +
3350 +@@ -1540,7 +1540,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
3351 + * Close the multicast socket, and clear the vif tables etc
3352 + */
3353 +
3354 +-static void mroute_clean_tables(struct mr6_table *mrt)
3355 ++static void mroute_clean_tables(struct mr6_table *mrt, bool all)
3356 + {
3357 + int i;
3358 + LIST_HEAD(list);
3359 +@@ -1550,8 +1550,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
3360 + * Shut down all active vif entries
3361 + */
3362 + for (i = 0; i < mrt->maxvif; i++) {
3363 +- if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
3364 +- mif6_delete(mrt, i, &list);
3365 ++ if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
3366 ++ continue;
3367 ++ mif6_delete(mrt, i, &list);
3368 + }
3369 + unregister_netdevice_many(&list);
3370 +
3371 +@@ -1560,7 +1561,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
3372 + */
3373 + for (i = 0; i < MFC6_LINES; i++) {
3374 + list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
3375 +- if (c->mfc_flags & MFC_STATIC)
3376 ++ if (!all && (c->mfc_flags & MFC_STATIC))
3377 + continue;
3378 + write_lock_bh(&mrt_lock);
3379 + list_del(&c->list);
3380 +@@ -1623,7 +1624,7 @@ int ip6mr_sk_done(struct sock *sk)
3381 + net->ipv6.devconf_all);
3382 + write_unlock_bh(&mrt_lock);
3383 +
3384 +- mroute_clean_tables(mrt);
3385 ++ mroute_clean_tables(mrt, false);
3386 + err = 0;
3387 + break;
3388 + }
3389 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
3390 +index e1a9583bb419..f81fcc09ea6c 100644
3391 +--- a/net/ipv6/ipv6_sockglue.c
3392 ++++ b/net/ipv6/ipv6_sockglue.c
3393 +@@ -110,10 +110,12 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
3394 + icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
3395 + icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
3396 + }
3397 +- opt = xchg(&inet6_sk(sk)->opt, opt);
3398 ++ opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
3399 ++ opt);
3400 + } else {
3401 + spin_lock(&sk->sk_dst_lock);
3402 +- opt = xchg(&inet6_sk(sk)->opt, opt);
3403 ++ opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
3404 ++ opt);
3405 + spin_unlock(&sk->sk_dst_lock);
3406 + }
3407 + sk_dst_reset(sk);
3408 +@@ -213,9 +215,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
3409 + sk->sk_socket->ops = &inet_dgram_ops;
3410 + sk->sk_family = PF_INET;
3411 + }
3412 +- opt = xchg(&np->opt, NULL);
3413 +- if (opt)
3414 +- sock_kfree_s(sk, opt, opt->tot_len);
3415 ++ opt = xchg((__force struct ipv6_txoptions **)&np->opt,
3416 ++ NULL);
3417 ++ if (opt) {
3418 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
3419 ++ txopt_put(opt);
3420 ++ }
3421 + pktopt = xchg(&np->pktoptions, NULL);
3422 + kfree_skb(pktopt);
3423 +
3424 +@@ -385,7 +390,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
3425 + if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
3426 + break;
3427 +
3428 +- opt = ipv6_renew_options(sk, np->opt, optname,
3429 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
3430 ++ opt = ipv6_renew_options(sk, opt, optname,
3431 + (struct ipv6_opt_hdr __user *)optval,
3432 + optlen);
3433 + if (IS_ERR(opt)) {
3434 +@@ -414,8 +420,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
3435 + retv = 0;
3436 + opt = ipv6_update_options(sk, opt);
3437 + sticky_done:
3438 +- if (opt)
3439 +- sock_kfree_s(sk, opt, opt->tot_len);
3440 ++ if (opt) {
3441 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
3442 ++ txopt_put(opt);
3443 ++ }
3444 + break;
3445 + }
3446 +
3447 +@@ -468,6 +476,7 @@ sticky_done:
3448 + break;
3449 +
3450 + memset(opt, 0, sizeof(*opt));
3451 ++ atomic_set(&opt->refcnt, 1);
3452 + opt->tot_len = sizeof(*opt) + optlen;
3453 + retv = -EFAULT;
3454 + if (copy_from_user(opt+1, optval, optlen))
3455 +@@ -484,8 +493,10 @@ update:
3456 + retv = 0;
3457 + opt = ipv6_update_options(sk, opt);
3458 + done:
3459 +- if (opt)
3460 +- sock_kfree_s(sk, opt, opt->tot_len);
3461 ++ if (opt) {
3462 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
3463 ++ txopt_put(opt);
3464 ++ }
3465 + break;
3466 + }
3467 + case IPV6_UNICAST_HOPS:
3468 +@@ -1092,10 +1103,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
3469 + case IPV6_RTHDR:
3470 + case IPV6_DSTOPTS:
3471 + {
3472 ++ struct ipv6_txoptions *opt;
3473 +
3474 + lock_sock(sk);
3475 +- len = ipv6_getsockopt_sticky(sk, np->opt,
3476 +- optname, optval, len);
3477 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
3478 ++ len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
3479 + release_sock(sk);
3480 + /* check if ipv6_getsockopt_sticky() returns err code */
3481 + if (len < 0)
3482 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
3483 +index ed2c4e400b46..5aedf76fe287 100644
3484 +--- a/net/ipv6/mcast.c
3485 ++++ b/net/ipv6/mcast.c
3486 +@@ -1652,7 +1652,6 @@ out:
3487 + if (!err) {
3488 + ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
3489 + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
3490 +- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
3491 + } else {
3492 + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
3493 + }
3494 +@@ -2015,7 +2014,6 @@ out:
3495 + if (!err) {
3496 + ICMP6MSGOUT_INC_STATS(net, idev, type);
3497 + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
3498 +- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
3499 + } else
3500 + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
3501 +
3502 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3503 +index 6f187c8d8a1b..d235ed7f47ab 100644
3504 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3505 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3506 +@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data)
3507 + /* Creation primitives. */
3508 + static inline struct frag_queue *fq_find(struct net *net, __be32 id,
3509 + u32 user, struct in6_addr *src,
3510 +- struct in6_addr *dst, u8 ecn)
3511 ++ struct in6_addr *dst, int iif, u8 ecn)
3512 + {
3513 + struct inet_frag_queue *q;
3514 + struct ip6_create_arg arg;
3515 +@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
3516 + arg.user = user;
3517 + arg.src = src;
3518 + arg.dst = dst;
3519 ++ arg.iif = iif;
3520 + arg.ecn = ecn;
3521 +
3522 + local_bh_disable();
3523 +@@ -603,7 +604,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
3524 + fhdr = (struct frag_hdr *)skb_transport_header(clone);
3525 +
3526 + fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
3527 +- ip6_frag_ecn(hdr));
3528 ++ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
3529 + if (fq == NULL) {
3530 + pr_debug("Can't find and can't create new queue\n");
3531 + goto ret_orig;
3532 +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
3533 +index 896af8807979..a66a67d17ed6 100644
3534 +--- a/net/ipv6/raw.c
3535 ++++ b/net/ipv6/raw.c
3536 +@@ -735,6 +735,7 @@ static int rawv6_probe_proto_opt(struct flowi6 *fl6, struct msghdr *msg)
3537 + static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
3538 + struct msghdr *msg, size_t len)
3539 + {
3540 ++ struct ipv6_txoptions *opt_to_free = NULL;
3541 + struct ipv6_txoptions opt_space;
3542 + DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
3543 + struct in6_addr *daddr, *final_p, final;
3544 +@@ -840,8 +841,10 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
3545 + if (!(opt->opt_nflen|opt->opt_flen))
3546 + opt = NULL;
3547 + }
3548 +- if (opt == NULL)
3549 +- opt = np->opt;
3550 ++ if (!opt) {
3551 ++ opt = txopt_get(np);
3552 ++ opt_to_free = opt;
3553 ++ }
3554 + if (flowlabel)
3555 + opt = fl6_merge_options(&opt_space, flowlabel, opt);
3556 + opt = ipv6_fixup_options(&opt_space, opt);
3557 +@@ -902,6 +905,7 @@ done:
3558 + dst_release(dst);
3559 + out:
3560 + fl6_sock_release(flowlabel);
3561 ++ txopt_put(opt_to_free);
3562 + return err < 0 ? err : len;
3563 + do_confirm:
3564 + dst_confirm(dst);
3565 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
3566 +index 1a157ca2ebc1..28e72f396fde 100644
3567 +--- a/net/ipv6/reassembly.c
3568 ++++ b/net/ipv6/reassembly.c
3569 +@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
3570 + return fq->id == arg->id &&
3571 + fq->user == arg->user &&
3572 + ipv6_addr_equal(&fq->saddr, arg->src) &&
3573 +- ipv6_addr_equal(&fq->daddr, arg->dst);
3574 ++ ipv6_addr_equal(&fq->daddr, arg->dst) &&
3575 ++ (arg->iif == fq->iif ||
3576 ++ !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
3577 ++ IPV6_ADDR_LINKLOCAL)));
3578 + }
3579 + EXPORT_SYMBOL(ip6_frag_match);
3580 +
3581 +@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
3582 +
3583 + static __inline__ struct frag_queue *
3584 + fq_find(struct net *net, __be32 id, const struct in6_addr *src,
3585 +- const struct in6_addr *dst, u8 ecn)
3586 ++ const struct in6_addr *dst, int iif, u8 ecn)
3587 + {
3588 + struct inet_frag_queue *q;
3589 + struct ip6_create_arg arg;
3590 +@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
3591 + arg.user = IP6_DEFRAG_LOCAL_DELIVER;
3592 + arg.src = src;
3593 + arg.dst = dst;
3594 ++ arg.iif = iif;
3595 + arg.ecn = ecn;
3596 +
3597 + hash = inet6_hash_frag(id, src, dst);
3598 +@@ -550,7 +554,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
3599 + }
3600 +
3601 + fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
3602 +- ip6_frag_ecn(hdr));
3603 ++ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
3604 + if (fq != NULL) {
3605 + int ret;
3606 +
3607 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
3608 +index a24557a1c1d8..45eae1e609d6 100644
3609 +--- a/net/ipv6/sit.c
3610 ++++ b/net/ipv6/sit.c
3611 +@@ -1394,34 +1394,20 @@ static int ipip6_tunnel_init(struct net_device *dev)
3612 + return 0;
3613 + }
3614 +
3615 +-static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
3616 ++static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
3617 + {
3618 + struct ip_tunnel *tunnel = netdev_priv(dev);
3619 + struct iphdr *iph = &tunnel->parms.iph;
3620 + struct net *net = dev_net(dev);
3621 + struct sit_net *sitn = net_generic(net, sit_net_id);
3622 +
3623 +- tunnel->dev = dev;
3624 +- tunnel->net = dev_net(dev);
3625 +-
3626 + iph->version = 4;
3627 + iph->protocol = IPPROTO_IPV6;
3628 + iph->ihl = 5;
3629 + iph->ttl = 64;
3630 +
3631 +- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3632 +- if (!dev->tstats)
3633 +- return -ENOMEM;
3634 +-
3635 +- tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
3636 +- if (!tunnel->dst_cache) {
3637 +- free_percpu(dev->tstats);
3638 +- return -ENOMEM;
3639 +- }
3640 +-
3641 + dev_hold(dev);
3642 + rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
3643 +- return 0;
3644 + }
3645 +
3646 + static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[])
3647 +@@ -1831,23 +1817,18 @@ static int __net_init sit_init_net(struct net *net)
3648 + */
3649 + sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
3650 +
3651 +- err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
3652 +- if (err)
3653 +- goto err_dev_free;
3654 +-
3655 +- ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
3656 +-
3657 + if ((err = register_netdev(sitn->fb_tunnel_dev)))
3658 + goto err_reg_dev;
3659 +
3660 ++ ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
3661 ++ ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
3662 ++
3663 + t = netdev_priv(sitn->fb_tunnel_dev);
3664 +
3665 + strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
3666 + return 0;
3667 +
3668 + err_reg_dev:
3669 +- dev_put(sitn->fb_tunnel_dev);
3670 +-err_dev_free:
3671 + ipip6_dev_free(sitn->fb_tunnel_dev);
3672 + err_alloc_dev:
3673 + return err;
3674 +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
3675 +index 2f25cb6347ca..aa9699301ea8 100644
3676 +--- a/net/ipv6/syncookies.c
3677 ++++ b/net/ipv6/syncookies.c
3678 +@@ -241,7 +241,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
3679 + memset(&fl6, 0, sizeof(fl6));
3680 + fl6.flowi6_proto = IPPROTO_TCP;
3681 + fl6.daddr = ireq->ir_v6_rmt_addr;
3682 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
3683 ++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
3684 + fl6.saddr = ireq->ir_v6_loc_addr;
3685 + fl6.flowi6_oif = sk->sk_bound_dev_if;
3686 + fl6.flowi6_mark = ireq->ir_mark;
3687 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3688 +index a3f9f11abf4c..26feadd0b763 100644
3689 +--- a/net/ipv6/tcp_ipv6.c
3690 ++++ b/net/ipv6/tcp_ipv6.c
3691 +@@ -134,6 +134,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
3692 + struct ipv6_pinfo *np = inet6_sk(sk);
3693 + struct tcp_sock *tp = tcp_sk(sk);
3694 + struct in6_addr *saddr = NULL, *final_p, final;
3695 ++ struct ipv6_txoptions *opt;
3696 + struct rt6_info *rt;
3697 + struct flowi6 fl6;
3698 + struct dst_entry *dst;
3699 +@@ -253,7 +254,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
3700 + fl6.fl6_dport = usin->sin6_port;
3701 + fl6.fl6_sport = inet->inet_sport;
3702 +
3703 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
3704 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
3705 ++ final_p = fl6_update_dst(&fl6, opt, &final);
3706 +
3707 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
3708 +
3709 +@@ -282,9 +284,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
3710 + tcp_fetch_timewait_stamp(sk, dst);
3711 +
3712 + icsk->icsk_ext_hdr_len = 0;
3713 +- if (np->opt)
3714 +- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
3715 +- np->opt->opt_nflen);
3716 ++ if (opt)
3717 ++ icsk->icsk_ext_hdr_len = opt->opt_flen +
3718 ++ opt->opt_nflen;
3719 +
3720 + tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
3721 +
3722 +@@ -501,7 +503,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
3723 + fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
3724 +
3725 + skb_set_queue_mapping(skb, queue_mapping);
3726 +- err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
3727 ++ err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
3728 ++ np->tclass);
3729 + err = net_xmit_eval(err);
3730 + }
3731 +
3732 +@@ -1052,6 +1055,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
3733 + struct inet_request_sock *ireq;
3734 + struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
3735 + struct tcp6_sock *newtcp6sk;
3736 ++ struct ipv6_txoptions *opt;
3737 + struct inet_sock *newinet;
3738 + struct tcp_sock *newtp;
3739 + struct sock *newsk;
3740 +@@ -1191,13 +1195,15 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
3741 + but we make one more one thing there: reattach optmem
3742 + to newsk.
3743 + */
3744 +- if (np->opt)
3745 +- newnp->opt = ipv6_dup_options(newsk, np->opt);
3746 +-
3747 ++ opt = rcu_dereference(np->opt);
3748 ++ if (opt) {
3749 ++ opt = ipv6_dup_options(newsk, opt);
3750 ++ RCU_INIT_POINTER(newnp->opt, opt);
3751 ++ }
3752 + inet_csk(newsk)->icsk_ext_hdr_len = 0;
3753 +- if (newnp->opt)
3754 +- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
3755 +- newnp->opt->opt_flen);
3756 ++ if (opt)
3757 ++ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
3758 ++ opt->opt_flen;
3759 +
3760 + tcp_sync_mss(newsk, dst_mtu(dst));
3761 + newtp->advmss = dst_metric_advmss(dst);
3762 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3763 +index dd530f0e5a8a..a5ce70502699 100644
3764 +--- a/net/ipv6/udp.c
3765 ++++ b/net/ipv6/udp.c
3766 +@@ -1082,6 +1082,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
3767 + DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
3768 + struct in6_addr *daddr, *final_p, final;
3769 + struct ipv6_txoptions *opt = NULL;
3770 ++ struct ipv6_txoptions *opt_to_free = NULL;
3771 + struct ip6_flowlabel *flowlabel = NULL;
3772 + struct flowi6 fl6;
3773 + struct dst_entry *dst;
3774 +@@ -1234,8 +1235,10 @@ do_udp_sendmsg:
3775 + opt = NULL;
3776 + connected = 0;
3777 + }
3778 +- if (opt == NULL)
3779 +- opt = np->opt;
3780 ++ if (!opt) {
3781 ++ opt = txopt_get(np);
3782 ++ opt_to_free = opt;
3783 ++ }
3784 + if (flowlabel)
3785 + opt = fl6_merge_options(&opt_space, flowlabel, opt);
3786 + opt = ipv6_fixup_options(&opt_space, opt);
3787 +@@ -1329,6 +1332,7 @@ do_append_data:
3788 + out:
3789 + dst_release(dst);
3790 + fl6_sock_release(flowlabel);
3791 ++ txopt_put(opt_to_free);
3792 + if (!err)
3793 + return len;
3794 + /*
3795 +diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
3796 +index a5f28d421ea8..c3a939d7848f 100644
3797 +--- a/net/irda/irlmp.c
3798 ++++ b/net/irda/irlmp.c
3799 +@@ -1877,7 +1877,7 @@ static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off)
3800 + for (element = hashbin_get_first(iter->hashbin);
3801 + element != NULL;
3802 + element = hashbin_get_next(iter->hashbin)) {
3803 +- if (!off || *off-- == 0) {
3804 ++ if (!off || (*off)-- == 0) {
3805 + /* NB: hashbin left locked */
3806 + return element;
3807 + }
3808 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
3809 +index 895348e44c7d..508154a04558 100644
3810 +--- a/net/l2tp/l2tp_core.c
3811 ++++ b/net/l2tp/l2tp_core.c
3812 +@@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
3813 + tunnel = container_of(work, struct l2tp_tunnel, del_work);
3814 + sk = l2tp_tunnel_sock_lookup(tunnel);
3815 + if (!sk)
3816 +- return;
3817 ++ goto out;
3818 +
3819 + sock = sk->sk_socket;
3820 +
3821 +@@ -1340,6 +1340,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
3822 + }
3823 +
3824 + l2tp_tunnel_sock_put(sk);
3825 ++out:
3826 ++ l2tp_tunnel_dec_refcount(tunnel);
3827 + }
3828 +
3829 + /* Create a socket for the tunnel, if one isn't set up by
3830 +@@ -1639,8 +1641,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
3831 + */
3832 + int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
3833 + {
3834 ++ l2tp_tunnel_inc_refcount(tunnel);
3835 + l2tp_tunnel_closeall(tunnel);
3836 +- return (false == queue_work(l2tp_wq, &tunnel->del_work));
3837 ++ if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
3838 ++ l2tp_tunnel_dec_refcount(tunnel);
3839 ++ return 1;
3840 ++ }
3841 ++ return 0;
3842 + }
3843 + EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
3844 +
3845 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
3846 +index 0edb263cc002..38658826175c 100644
3847 +--- a/net/l2tp/l2tp_ip6.c
3848 ++++ b/net/l2tp/l2tp_ip6.c
3849 +@@ -487,6 +487,7 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
3850 + DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
3851 + struct in6_addr *daddr, *final_p, final;
3852 + struct ipv6_pinfo *np = inet6_sk(sk);
3853 ++ struct ipv6_txoptions *opt_to_free = NULL;
3854 + struct ipv6_txoptions *opt = NULL;
3855 + struct ip6_flowlabel *flowlabel = NULL;
3856 + struct dst_entry *dst = NULL;
3857 +@@ -576,8 +577,10 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
3858 + opt = NULL;
3859 + }
3860 +
3861 +- if (opt == NULL)
3862 +- opt = np->opt;
3863 ++ if (!opt) {
3864 ++ opt = txopt_get(np);
3865 ++ opt_to_free = opt;
3866 ++ }
3867 + if (flowlabel)
3868 + opt = fl6_merge_options(&opt_space, flowlabel, opt);
3869 + opt = ipv6_fixup_options(&opt_space, opt);
3870 +@@ -632,6 +635,7 @@ done:
3871 + dst_release(dst);
3872 + out:
3873 + fl6_sock_release(flowlabel);
3874 ++ txopt_put(opt_to_free);
3875 +
3876 + return err < 0 ? err : len;
3877 +
3878 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3879 +index bf6097793170..22853af1b6b7 100644
3880 +--- a/net/packet/af_packet.c
3881 ++++ b/net/packet/af_packet.c
3882 +@@ -1507,6 +1507,20 @@ static void fanout_release(struct sock *sk)
3883 + mutex_unlock(&fanout_mutex);
3884 + }
3885 +
3886 ++static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
3887 ++ struct sk_buff *skb)
3888 ++{
3889 ++ /* Earlier code assumed this would be a VLAN pkt, double-check
3890 ++ * this now that we have the actual packet in hand. We can only
3891 ++ * do this check on Ethernet devices.
3892 ++ */
3893 ++ if (unlikely(dev->type != ARPHRD_ETHER))
3894 ++ return false;
3895 ++
3896 ++ skb_reset_mac_header(skb);
3897 ++ return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
3898 ++}
3899 ++
3900 + static const struct proto_ops packet_ops;
3901 +
3902 + static const struct proto_ops packet_ops_spkt;
3903 +@@ -1668,18 +1682,10 @@ retry:
3904 + goto retry;
3905 + }
3906 +
3907 +- if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
3908 +- /* Earlier code assumed this would be a VLAN pkt,
3909 +- * double-check this now that we have the actual
3910 +- * packet in hand.
3911 +- */
3912 +- struct ethhdr *ehdr;
3913 +- skb_reset_mac_header(skb);
3914 +- ehdr = eth_hdr(skb);
3915 +- if (ehdr->h_proto != htons(ETH_P_8021Q)) {
3916 +- err = -EMSGSIZE;
3917 +- goto out_unlock;
3918 +- }
3919 ++ if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
3920 ++ !packet_extra_vlan_len_allowed(dev, skb)) {
3921 ++ err = -EMSGSIZE;
3922 ++ goto out_unlock;
3923 + }
3924 +
3925 + skb->protocol = proto;
3926 +@@ -2081,6 +2087,15 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
3927 + sock_wfree(skb);
3928 + }
3929 +
3930 ++static void tpacket_set_protocol(const struct net_device *dev,
3931 ++ struct sk_buff *skb)
3932 ++{
3933 ++ if (dev->type == ARPHRD_ETHER) {
3934 ++ skb_reset_mac_header(skb);
3935 ++ skb->protocol = eth_hdr(skb)->h_proto;
3936 ++ }
3937 ++}
3938 ++
3939 + static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
3940 + void *frame, struct net_device *dev, int size_max,
3941 + __be16 proto, unsigned char *addr, int hlen)
3942 +@@ -2117,8 +2132,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
3943 + skb_reserve(skb, hlen);
3944 + skb_reset_network_header(skb);
3945 +
3946 +- if (!packet_use_direct_xmit(po))
3947 +- skb_probe_transport_header(skb, 0);
3948 + if (unlikely(po->tp_tx_has_off)) {
3949 + int off_min, off_max, off;
3950 + off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
3951 +@@ -2168,6 +2181,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
3952 + dev->hard_header_len);
3953 + if (unlikely(err))
3954 + return err;
3955 ++ if (!skb->protocol)
3956 ++ tpacket_set_protocol(dev, skb);
3957 +
3958 + data += dev->hard_header_len;
3959 + to_write -= dev->hard_header_len;
3960 +@@ -2202,6 +2217,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
3961 + len = ((to_write > len_max) ? len_max : to_write);
3962 + }
3963 +
3964 ++ skb_probe_transport_header(skb, 0);
3965 ++
3966 + return tp_len;
3967 + }
3968 +
3969 +@@ -2246,12 +2263,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3970 + if (unlikely(!(dev->flags & IFF_UP)))
3971 + goto out_put;
3972 +
3973 +- reserve = dev->hard_header_len + VLAN_HLEN;
3974 ++ if (po->sk.sk_socket->type == SOCK_RAW)
3975 ++ reserve = dev->hard_header_len;
3976 + size_max = po->tx_ring.frame_size
3977 + - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
3978 +
3979 +- if (size_max > dev->mtu + reserve)
3980 +- size_max = dev->mtu + reserve;
3981 ++ if (size_max > dev->mtu + reserve + VLAN_HLEN)
3982 ++ size_max = dev->mtu + reserve + VLAN_HLEN;
3983 +
3984 + do {
3985 + ph = packet_current_frame(po, &po->tx_ring,
3986 +@@ -2274,18 +2292,11 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3987 +
3988 + tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
3989 + addr, hlen);
3990 +- if (tp_len > dev->mtu + dev->hard_header_len) {
3991 +- struct ethhdr *ehdr;
3992 +- /* Earlier code assumed this would be a VLAN pkt,
3993 +- * double-check this now that we have the actual
3994 +- * packet in hand.
3995 +- */
3996 ++ if (likely(tp_len >= 0) &&
3997 ++ tp_len > dev->mtu + reserve &&
3998 ++ !packet_extra_vlan_len_allowed(dev, skb))
3999 ++ tp_len = -EMSGSIZE;
4000 +
4001 +- skb_reset_mac_header(skb);
4002 +- ehdr = eth_hdr(skb);
4003 +- if (ehdr->h_proto != htons(ETH_P_8021Q))
4004 +- tp_len = -EMSGSIZE;
4005 +- }
4006 + if (unlikely(tp_len < 0)) {
4007 + if (po->tp_loss) {
4008 + __packet_set_status(po, ph,
4009 +@@ -2497,18 +2508,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
4010 +
4011 + sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
4012 +
4013 +- if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
4014 +- /* Earlier code assumed this would be a VLAN pkt,
4015 +- * double-check this now that we have the actual
4016 +- * packet in hand.
4017 +- */
4018 +- struct ethhdr *ehdr;
4019 +- skb_reset_mac_header(skb);
4020 +- ehdr = eth_hdr(skb);
4021 +- if (ehdr->h_proto != htons(ETH_P_8021Q)) {
4022 +- err = -EMSGSIZE;
4023 +- goto out_free;
4024 +- }
4025 ++ if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
4026 ++ !packet_extra_vlan_len_allowed(dev, skb)) {
4027 ++ err = -EMSGSIZE;
4028 ++ goto out_free;
4029 + }
4030 +
4031 + skb->protocol = proto;
4032 +@@ -2537,8 +2540,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
4033 + len += vnet_hdr_len;
4034 + }
4035 +
4036 +- if (!packet_use_direct_xmit(po))
4037 +- skb_probe_transport_header(skb, reserve);
4038 ++ skb_probe_transport_header(skb, reserve);
4039 ++
4040 + if (unlikely(extra_len == 4))
4041 + skb->no_fcs = 1;
4042 +
4043 +@@ -2642,22 +2645,40 @@ static int packet_release(struct socket *sock)
4044 + * Attach a packet hook.
4045 + */
4046 +
4047 +-static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
4048 ++static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
4049 ++ __be16 proto)
4050 + {
4051 + struct packet_sock *po = pkt_sk(sk);
4052 + struct net_device *dev_curr;
4053 + __be16 proto_curr;
4054 + bool need_rehook;
4055 ++ struct net_device *dev = NULL;
4056 ++ int ret = 0;
4057 ++ bool unlisted = false;
4058 +
4059 +- if (po->fanout) {
4060 +- if (dev)
4061 +- dev_put(dev);
4062 +-
4063 ++ if (po->fanout)
4064 + return -EINVAL;
4065 +- }
4066 +
4067 + lock_sock(sk);
4068 + spin_lock(&po->bind_lock);
4069 ++ rcu_read_lock();
4070 ++
4071 ++ if (name) {
4072 ++ dev = dev_get_by_name_rcu(sock_net(sk), name);
4073 ++ if (!dev) {
4074 ++ ret = -ENODEV;
4075 ++ goto out_unlock;
4076 ++ }
4077 ++ } else if (ifindex) {
4078 ++ dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
4079 ++ if (!dev) {
4080 ++ ret = -ENODEV;
4081 ++ goto out_unlock;
4082 ++ }
4083 ++ }
4084 ++
4085 ++ if (dev)
4086 ++ dev_hold(dev);
4087 +
4088 + proto_curr = po->prot_hook.type;
4089 + dev_curr = po->prot_hook.dev;
4090 +@@ -2665,14 +2686,29 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
4091 + need_rehook = proto_curr != proto || dev_curr != dev;
4092 +
4093 + if (need_rehook) {
4094 +- unregister_prot_hook(sk, true);
4095 ++ if (po->running) {
4096 ++ rcu_read_unlock();
4097 ++ __unregister_prot_hook(sk, true);
4098 ++ rcu_read_lock();
4099 ++ dev_curr = po->prot_hook.dev;
4100 ++ if (dev)
4101 ++ unlisted = !dev_get_by_index_rcu(sock_net(sk),
4102 ++ dev->ifindex);
4103 ++ }
4104 +
4105 + po->num = proto;
4106 + po->prot_hook.type = proto;
4107 +- po->prot_hook.dev = dev;
4108 +
4109 +- po->ifindex = dev ? dev->ifindex : 0;
4110 +- packet_cached_dev_assign(po, dev);
4111 ++ if (unlikely(unlisted)) {
4112 ++ dev_put(dev);
4113 ++ po->prot_hook.dev = NULL;
4114 ++ po->ifindex = -1;
4115 ++ packet_cached_dev_reset(po);
4116 ++ } else {
4117 ++ po->prot_hook.dev = dev;
4118 ++ po->ifindex = dev ? dev->ifindex : 0;
4119 ++ packet_cached_dev_assign(po, dev);
4120 ++ }
4121 + }
4122 + if (dev_curr)
4123 + dev_put(dev_curr);
4124 +@@ -2680,7 +2716,7 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
4125 + if (proto == 0 || !need_rehook)
4126 + goto out_unlock;
4127 +
4128 +- if (!dev || (dev->flags & IFF_UP)) {
4129 ++ if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
4130 + register_prot_hook(sk);
4131 + } else {
4132 + sk->sk_err = ENETDOWN;
4133 +@@ -2689,9 +2725,10 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
4134 + }
4135 +
4136 + out_unlock:
4137 ++ rcu_read_unlock();
4138 + spin_unlock(&po->bind_lock);
4139 + release_sock(sk);
4140 +- return 0;
4141 ++ return ret;
4142 + }
4143 +
4144 + /*
4145 +@@ -2703,8 +2740,6 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
4146 + {
4147 + struct sock *sk = sock->sk;
4148 + char name[15];
4149 +- struct net_device *dev;
4150 +- int err = -ENODEV;
4151 +
4152 + /*
4153 + * Check legality
4154 +@@ -2714,19 +2749,13 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
4155 + return -EINVAL;
4156 + strlcpy(name, uaddr->sa_data, sizeof(name));
4157 +
4158 +- dev = dev_get_by_name(sock_net(sk), name);
4159 +- if (dev)
4160 +- err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
4161 +- return err;
4162 ++ return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
4163 + }
4164 +
4165 + static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
4166 + {
4167 + struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
4168 + struct sock *sk = sock->sk;
4169 +- struct net_device *dev = NULL;
4170 +- int err;
4171 +-
4172 +
4173 + /*
4174 + * Check legality
4175 +@@ -2737,16 +2766,8 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
4176 + if (sll->sll_family != AF_PACKET)
4177 + return -EINVAL;
4178 +
4179 +- if (sll->sll_ifindex) {
4180 +- err = -ENODEV;
4181 +- dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
4182 +- if (dev == NULL)
4183 +- goto out;
4184 +- }
4185 +- err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
4186 +-
4187 +-out:
4188 +- return err;
4189 ++ return packet_do_bind(sk, NULL, sll->sll_ifindex,
4190 ++ sll->sll_protocol ? : pkt_sk(sk)->num);
4191 + }
4192 +
4193 + static struct proto packet_proto = {
4194 +diff --git a/net/rds/connection.c b/net/rds/connection.c
4195 +index 378c3a6acf84..f5fb7d6b7c41 100644
4196 +--- a/net/rds/connection.c
4197 ++++ b/net/rds/connection.c
4198 +@@ -183,6 +183,12 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
4199 + }
4200 + }
4201 +
4202 ++ if (trans == NULL) {
4203 ++ kmem_cache_free(rds_conn_slab, conn);
4204 ++ conn = ERR_PTR(-ENODEV);
4205 ++ goto out;
4206 ++ }
4207 ++
4208 + conn->c_trans = trans;
4209 +
4210 + ret = trans->conn_alloc(conn, gfp);
4211 +diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
4212 +index 9ae6e0a264ec..2dd88db4a1a2 100644
4213 +--- a/net/rds/tcp_recv.c
4214 ++++ b/net/rds/tcp_recv.c
4215 +@@ -234,8 +234,15 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
4216 + }
4217 +
4218 + to_copy = min(tc->t_tinc_data_rem, left);
4219 +- pskb_pull(clone, offset);
4220 +- pskb_trim(clone, to_copy);
4221 ++ if (!pskb_pull(clone, offset) ||
4222 ++ pskb_trim(clone, to_copy)) {
4223 ++ pr_warn("rds_tcp_data_recv: pull/trim failed "
4224 ++ "left %zu data_rem %zu skb_len %d\n",
4225 ++ left, tc->t_tinc_data_rem, skb->len);
4226 ++ kfree_skb(clone);
4227 ++ desc->error = -ENOMEM;
4228 ++ goto out;
4229 ++ }
4230 + skb_queue_tail(&tinc->ti_skb_list, clone);
4231 +
4232 + rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
4233 +diff --git a/net/sctp/auth.c b/net/sctp/auth.c
4234 +index fb7976aee61c..603c3bbc5923 100644
4235 +--- a/net/sctp/auth.c
4236 ++++ b/net/sctp/auth.c
4237 +@@ -800,8 +800,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
4238 + if (!has_sha1)
4239 + return -EINVAL;
4240 +
4241 +- memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
4242 +- hmacs->shmac_num_idents * sizeof(__u16));
4243 ++ for (i = 0; i < hmacs->shmac_num_idents; i++)
4244 ++ ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
4245 + ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
4246 + hmacs->shmac_num_idents * sizeof(__u16));
4247 + return 0;
4248 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4249 +index 99e640c46ab2..fb082aa4d656 100644
4250 +--- a/net/sctp/socket.c
4251 ++++ b/net/sctp/socket.c
4252 +@@ -7393,6 +7393,13 @@ struct proto sctp_prot = {
4253 +
4254 + #if IS_ENABLED(CONFIG_IPV6)
4255 +
4256 ++#include <net/transp_v6.h>
4257 ++static void sctp_v6_destroy_sock(struct sock *sk)
4258 ++{
4259 ++ sctp_destroy_sock(sk);
4260 ++ inet6_destroy_sock(sk);
4261 ++}
4262 ++
4263 + struct proto sctpv6_prot = {
4264 + .name = "SCTPv6",
4265 + .owner = THIS_MODULE,
4266 +@@ -7402,7 +7409,7 @@ struct proto sctpv6_prot = {
4267 + .accept = sctp_accept,
4268 + .ioctl = sctp_ioctl,
4269 + .init = sctp_init_sock,
4270 +- .destroy = sctp_destroy_sock,
4271 ++ .destroy = sctp_v6_destroy_sock,
4272 + .shutdown = sctp_shutdown,
4273 + .setsockopt = sctp_setsockopt,
4274 + .getsockopt = sctp_getsockopt,
4275 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
4276 +index 8232118b3f82..2ae4a5915aa7 100644
4277 +--- a/net/unix/af_unix.c
4278 ++++ b/net/unix/af_unix.c
4279 +@@ -1962,6 +1962,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
4280 + goto out;
4281 + }
4282 +
4283 ++ if (flags & MSG_PEEK)
4284 ++ skip = sk_peek_offset(sk, flags);
4285 ++ else
4286 ++ skip = 0;
4287 ++
4288 + do {
4289 + int chunk;
4290 + struct sk_buff *skb, *last;
4291 +@@ -2008,7 +2013,6 @@ again:
4292 + break;
4293 + }
4294 +
4295 +- skip = sk_peek_offset(sk, flags);
4296 + while (skip >= unix_skb_len(skb)) {
4297 + skip -= unix_skb_len(skb);
4298 + last = skb;
4299 +@@ -2072,6 +2076,16 @@ again:
4300 +
4301 + sk_peek_offset_fwd(sk, chunk);
4302 +
4303 ++ if (UNIXCB(skb).fp)
4304 ++ break;
4305 ++
4306 ++ skip = 0;
4307 ++ last = skb;
4308 ++ unix_state_lock(sk);
4309 ++ skb = skb_peek_next(skb, &sk->sk_receive_queue);
4310 ++ if (skb)
4311 ++ goto again;
4312 ++ unix_state_unlock(sk);
4313 + break;
4314 + }
4315 + } while (size);
4316 +diff --git a/security/keys/gc.c b/security/keys/gc.c
4317 +index c7952375ac53..addf060399e0 100644
4318 +--- a/security/keys/gc.c
4319 ++++ b/security/keys/gc.c
4320 +@@ -134,6 +134,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
4321 + kdebug("- %u", key->serial);
4322 + key_check(key);
4323 +
4324 ++ /* Throw away the key data if the key is instantiated */
4325 ++ if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
4326 ++ !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
4327 ++ key->type->destroy)
4328 ++ key->type->destroy(key);
4329 ++
4330 + security_key_free(key);
4331 +
4332 + /* deal with the user's key tracking and quota */
4333 +@@ -148,10 +154,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
4334 + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
4335 + atomic_dec(&key->user->nikeys);
4336 +
4337 +- /* now throw away the key memory */
4338 +- if (key->type->destroy)
4339 +- key->type->destroy(key);
4340 +-
4341 + key_user_put(key->user);
4342 +
4343 + kfree(key->description);
4344 +diff --git a/tools/net/Makefile b/tools/net/Makefile
4345 +index ee577ea03ba5..ddf888010652 100644
4346 +--- a/tools/net/Makefile
4347 ++++ b/tools/net/Makefile
4348 +@@ -4,6 +4,9 @@ CC = gcc
4349 + LEX = flex
4350 + YACC = bison
4351 +
4352 ++CFLAGS += -Wall -O2
4353 ++CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
4354 ++
4355 + %.yacc.c: %.y
4356 + $(YACC) -o $@ -d $<
4357 +
4358 +@@ -12,15 +15,13 @@ YACC = bison
4359 +
4360 + all : bpf_jit_disasm bpf_dbg bpf_asm
4361 +
4362 +-bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
4363 ++bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
4364 + bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
4365 + bpf_jit_disasm : bpf_jit_disasm.o
4366 +
4367 +-bpf_dbg : CFLAGS = -Wall -O2
4368 + bpf_dbg : LDLIBS = -lreadline
4369 + bpf_dbg : bpf_dbg.o
4370 +
4371 +-bpf_asm : CFLAGS = -Wall -O2 -I.
4372 + bpf_asm : LDLIBS =
4373 + bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
4374 + bpf_exp.lex.o : bpf_exp.yacc.c