Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Thu, 19 Sep 2019 10:04:53
Message-Id: 1568887469.b28b4e9d1e4258389e49dd53e4243018cdb7493a.mpagano@gentoo
1 commit: b28b4e9d1e4258389e49dd53e4243018cdb7493a
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Sep 19 10:04:29 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Sep 19 10:04:29 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b28b4e9d
7
8 Linux patch 4.19.74
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1073_linux-4.19.74.patch | 1433 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1437 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d5d2e47..7734d78 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -331,6 +331,10 @@ Patch: 1072_linux-4.19.73.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.73
23
24 +Patch: 1073_linux-4.19.74.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.74
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1073_linux-4.19.74.patch b/1073_linux-4.19.74.patch
33 new file mode 100644
34 index 0000000..2a11d7d
35 --- /dev/null
36 +++ b/1073_linux-4.19.74.patch
37 @@ -0,0 +1,1433 @@
38 +diff --git a/Makefile b/Makefile
39 +index 9748fa3704bc..3509e0c6e5ae 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 73
47 ++SUBLEVEL = 74
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
52 +index 23bea99bf8d5..1ca9e37f7cc9 100644
53 +--- a/arch/powerpc/include/asm/uaccess.h
54 ++++ b/arch/powerpc/include/asm/uaccess.h
55 +@@ -306,6 +306,7 @@ extern unsigned long __copy_tofrom_user(void __user *to,
56 + static inline unsigned long
57 + raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
58 + {
59 ++ barrier_nospec();
60 + return __copy_tofrom_user(to, from, n);
61 + }
62 + #endif /* __powerpc64__ */
63 +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
64 +index fcb55b02990e..05ea466b9e40 100644
65 +--- a/arch/s390/kvm/interrupt.c
66 ++++ b/arch/s390/kvm/interrupt.c
67 +@@ -1879,6 +1879,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
68 + case KVM_S390_MCHK:
69 + irq->u.mchk.mcic = s390int->parm64;
70 + break;
71 ++ case KVM_S390_INT_PFAULT_INIT:
72 ++ irq->u.ext.ext_params = s390int->parm;
73 ++ irq->u.ext.ext_params2 = s390int->parm64;
74 ++ break;
75 ++ case KVM_S390_RESTART:
76 ++ case KVM_S390_INT_CLOCK_COMP:
77 ++ case KVM_S390_INT_CPU_TIMER:
78 ++ break;
79 ++ default:
80 ++ return -EINVAL;
81 + }
82 + return 0;
83 + }
84 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
85 +index fc7de27960e7..e0551c948c59 100644
86 +--- a/arch/s390/kvm/kvm-s390.c
87 ++++ b/arch/s390/kvm/kvm-s390.c
88 +@@ -928,6 +928,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
89 + /* mark all the pages in active slots as dirty */
90 + for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
91 + ms = slots->memslots + slotnr;
92 ++ if (!ms->dirty_bitmap)
93 ++ return -EINVAL;
94 + /*
95 + * The second half of the bitmap is only used on x86,
96 + * and would be wasted otherwise, so we put it to good
97 +@@ -3956,7 +3958,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
98 + }
99 + case KVM_S390_INTERRUPT: {
100 + struct kvm_s390_interrupt s390int;
101 +- struct kvm_s390_irq s390irq;
102 ++ struct kvm_s390_irq s390irq = {};
103 +
104 + if (copy_from_user(&s390int, argp, sizeof(s390int)))
105 + return -EFAULT;
106 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
107 +index ce0d0424a53d..4833dd7e2cc0 100644
108 +--- a/arch/x86/Makefile
109 ++++ b/arch/x86/Makefile
110 +@@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
111 +
112 + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
113 + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
114 ++REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
115 + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
116 + export REALMODE_CFLAGS
117 +
118 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
119 +index 2938b4bcc968..e83f4f6bfdac 100644
120 +--- a/arch/x86/kvm/vmx.c
121 ++++ b/arch/x86/kvm/vmx.c
122 +@@ -8757,6 +8757,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
123 + u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
124 + gva_t gva = 0;
125 + struct vmcs12 *vmcs12;
126 ++ struct x86_exception e;
127 +
128 + if (!nested_vmx_check_permission(vcpu))
129 + return 1;
130 +@@ -8798,8 +8799,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
131 + vmx_instruction_info, true, &gva))
132 + return 1;
133 + /* _system ok, nested_vmx_check_permission has verified cpl=0 */
134 +- kvm_write_guest_virt_system(vcpu, gva, &field_value,
135 +- (is_long_mode(vcpu) ? 8 : 4), NULL);
136 ++ if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
137 ++ (is_long_mode(vcpu) ? 8 : 4),
138 ++ NULL))
139 ++ kvm_inject_page_fault(vcpu, &e);
140 + }
141 +
142 + nested_vmx_succeed(vcpu);
143 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
144 +index cbc39751f36b..dbae8415cf4a 100644
145 +--- a/arch/x86/kvm/x86.c
146 ++++ b/arch/x86/kvm/x86.c
147 +@@ -5016,6 +5016,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
148 + /* kvm_write_guest_virt_system can pull in tons of pages. */
149 + vcpu->arch.l1tf_flush_l1d = true;
150 +
151 ++ /*
152 ++ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
153 ++ * is returned, but our callers are not ready for that and they blindly
154 ++ * call kvm_inject_page_fault. Ensure that they at least do not leak
155 ++ * uninitialized kernel stack memory into cr2 and error code.
156 ++ */
157 ++ memset(exception, 0, sizeof(*exception));
158 + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
159 + PFERR_WRITE_MASK, exception);
160 + }
161 +diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
162 +index 8901a1f89cf5..10fb42da0007 100644
163 +--- a/arch/x86/purgatory/Makefile
164 ++++ b/arch/x86/purgatory/Makefile
165 +@@ -18,37 +18,40 @@ targets += purgatory.ro
166 + KASAN_SANITIZE := n
167 + KCOV_INSTRUMENT := n
168 +
169 ++# These are adjustments to the compiler flags used for objects that
170 ++# make up the standalone purgatory.ro
171 ++
172 ++PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
173 ++PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
174 ++
175 + # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
176 + # in turn leaves some undefined symbols like __fentry__ in purgatory and not
177 + # sure how to relocate those.
178 + ifdef CONFIG_FUNCTION_TRACER
179 +-CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
180 +-CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
181 +-CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
182 +-CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
183 ++PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE)
184 + endif
185 +
186 + ifdef CONFIG_STACKPROTECTOR
187 +-CFLAGS_REMOVE_sha256.o += -fstack-protector
188 +-CFLAGS_REMOVE_purgatory.o += -fstack-protector
189 +-CFLAGS_REMOVE_string.o += -fstack-protector
190 +-CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
191 ++PURGATORY_CFLAGS_REMOVE += -fstack-protector
192 + endif
193 +
194 + ifdef CONFIG_STACKPROTECTOR_STRONG
195 +-CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
196 +-CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
197 +-CFLAGS_REMOVE_string.o += -fstack-protector-strong
198 +-CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
199 ++PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
200 + endif
201 +
202 + ifdef CONFIG_RETPOLINE
203 +-CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
204 +-CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
205 +-CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
206 +-CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
207 ++PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS)
208 + endif
209 +
210 ++CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
211 ++CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
212 ++
213 ++CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE)
214 ++CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
215 ++
216 ++CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
217 ++CFLAGS_string.o += $(PURGATORY_CFLAGS)
218 ++
219 + $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
220 + $(call if_changed,ld)
221 +
222 +diff --git a/drivers/base/core.c b/drivers/base/core.c
223 +index e1a8d5c06f65..fcda6313e7de 100644
224 +--- a/drivers/base/core.c
225 ++++ b/drivers/base/core.c
226 +@@ -1648,12 +1648,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
227 + */
228 + static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
229 + {
230 ++ unsigned int ref;
231 ++
232 + /* see if we live in a "glue" directory */
233 + if (!live_in_glue_dir(glue_dir, dev))
234 + return;
235 +
236 + mutex_lock(&gdp_mutex);
237 +- if (!kobject_has_children(glue_dir))
238 ++ /**
239 ++ * There is a race condition between removing glue directory
240 ++ * and adding a new device under the glue directory.
241 ++ *
242 ++ * CPU1: CPU2:
243 ++ *
244 ++ * device_add()
245 ++ * get_device_parent()
246 ++ * class_dir_create_and_add()
247 ++ * kobject_add_internal()
248 ++ * create_dir() // create glue_dir
249 ++ *
250 ++ * device_add()
251 ++ * get_device_parent()
252 ++ * kobject_get() // get glue_dir
253 ++ *
254 ++ * device_del()
255 ++ * cleanup_glue_dir()
256 ++ * kobject_del(glue_dir)
257 ++ *
258 ++ * kobject_add()
259 ++ * kobject_add_internal()
260 ++ * create_dir() // in glue_dir
261 ++ * sysfs_create_dir_ns()
262 ++ * kernfs_create_dir_ns(sd)
263 ++ *
264 ++ * sysfs_remove_dir() // glue_dir->sd=NULL
265 ++ * sysfs_put() // free glue_dir->sd
266 ++ *
267 ++ * // sd is freed
268 ++ * kernfs_new_node(sd)
269 ++ * kernfs_get(glue_dir)
270 ++ * kernfs_add_one()
271 ++ * kernfs_put()
272 ++ *
273 ++ * Before CPU1 remove last child device under glue dir, if CPU2 add
274 ++ * a new device under glue dir, the glue_dir kobject reference count
275 ++ * will be increase to 2 in kobject_get(k). And CPU2 has been called
276 ++ * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
277 ++ * and sysfs_put(). This result in glue_dir->sd is freed.
278 ++ *
279 ++ * Then the CPU2 will see a stale "empty" but still potentially used
280 ++ * glue dir around in kernfs_new_node().
281 ++ *
282 ++ * In order to avoid this happening, we also should make sure that
283 ++ * kernfs_node for glue_dir is released in CPU1 only when refcount
284 ++ * for glue_dir kobj is 1.
285 ++ */
286 ++ ref = kref_read(&glue_dir->kref);
287 ++ if (!kobject_has_children(glue_dir) && !--ref)
288 + kobject_del(glue_dir);
289 + kobject_put(glue_dir);
290 + mutex_unlock(&gdp_mutex);
291 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
292 +index 75cf605f54e5..09c83dc2ef67 100644
293 +--- a/drivers/bluetooth/btusb.c
294 ++++ b/drivers/bluetooth/btusb.c
295 +@@ -1139,10 +1139,6 @@ static int btusb_open(struct hci_dev *hdev)
296 + }
297 +
298 + data->intf->needs_remote_wakeup = 1;
299 +- /* device specific wakeup source enabled and required for USB
300 +- * remote wakeup while host is suspended
301 +- */
302 +- device_wakeup_enable(&data->udev->dev);
303 +
304 + if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
305 + goto done;
306 +@@ -1206,7 +1202,6 @@ static int btusb_close(struct hci_dev *hdev)
307 + goto failed;
308 +
309 + data->intf->needs_remote_wakeup = 0;
310 +- device_wakeup_disable(&data->udev->dev);
311 + usb_autopm_put_interface(data->intf);
312 +
313 + failed:
314 +diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
315 +index 026a26bb702d..dbec84238ecd 100644
316 +--- a/drivers/clk/rockchip/clk-mmc-phase.c
317 ++++ b/drivers/clk/rockchip/clk-mmc-phase.c
318 +@@ -61,10 +61,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
319 + u32 delay_num = 0;
320 +
321 + /* See the comment for rockchip_mmc_set_phase below */
322 +- if (!rate) {
323 +- pr_err("%s: invalid clk rate\n", __func__);
324 ++ if (!rate)
325 + return -EINVAL;
326 +- }
327 +
328 + raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
329 +
330 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
331 +index 41b288bdcdbf..064315edd289 100644
332 +--- a/drivers/crypto/talitos.c
333 ++++ b/drivers/crypto/talitos.c
334 +@@ -959,11 +959,13 @@ static void talitos_sg_unmap(struct device *dev,
335 +
336 + static void ipsec_esp_unmap(struct device *dev,
337 + struct talitos_edesc *edesc,
338 +- struct aead_request *areq)
339 ++ struct aead_request *areq, bool encrypt)
340 + {
341 + struct crypto_aead *aead = crypto_aead_reqtfm(areq);
342 + struct talitos_ctx *ctx = crypto_aead_ctx(aead);
343 + unsigned int ivsize = crypto_aead_ivsize(aead);
344 ++ unsigned int authsize = crypto_aead_authsize(aead);
345 ++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
346 + bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
347 + struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
348 +
349 +@@ -972,7 +974,7 @@ static void ipsec_esp_unmap(struct device *dev,
350 + DMA_FROM_DEVICE);
351 + unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
352 +
353 +- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
354 ++ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
355 + areq->assoclen);
356 +
357 + if (edesc->dma_len)
358 +@@ -983,7 +985,7 @@ static void ipsec_esp_unmap(struct device *dev,
359 + unsigned int dst_nents = edesc->dst_nents ? : 1;
360 +
361 + sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
362 +- areq->assoclen + areq->cryptlen - ivsize);
363 ++ areq->assoclen + cryptlen - ivsize);
364 + }
365 + }
366 +
367 +@@ -1005,7 +1007,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
368 +
369 + edesc = container_of(desc, struct talitos_edesc, desc);
370 +
371 +- ipsec_esp_unmap(dev, edesc, areq);
372 ++ ipsec_esp_unmap(dev, edesc, areq, true);
373 +
374 + /* copy the generated ICV to dst */
375 + if (edesc->icv_ool) {
376 +@@ -1039,7 +1041,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
377 +
378 + edesc = container_of(desc, struct talitos_edesc, desc);
379 +
380 +- ipsec_esp_unmap(dev, edesc, req);
381 ++ ipsec_esp_unmap(dev, edesc, req, false);
382 +
383 + if (!err) {
384 + char icvdata[SHA512_DIGEST_SIZE];
385 +@@ -1085,7 +1087,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
386 +
387 + edesc = container_of(desc, struct talitos_edesc, desc);
388 +
389 +- ipsec_esp_unmap(dev, edesc, req);
390 ++ ipsec_esp_unmap(dev, edesc, req, false);
391 +
392 + /* check ICV auth status */
393 + if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
394 +@@ -1188,6 +1190,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
395 + * fill in and submit ipsec_esp descriptor
396 + */
397 + static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
398 ++ bool encrypt,
399 + void (*callback)(struct device *dev,
400 + struct talitos_desc *desc,
401 + void *context, int error))
402 +@@ -1197,7 +1200,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
403 + struct talitos_ctx *ctx = crypto_aead_ctx(aead);
404 + struct device *dev = ctx->dev;
405 + struct talitos_desc *desc = &edesc->desc;
406 +- unsigned int cryptlen = areq->cryptlen;
407 ++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
408 + unsigned int ivsize = crypto_aead_ivsize(aead);
409 + int tbl_off = 0;
410 + int sg_count, ret;
411 +@@ -1324,7 +1327,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
412 +
413 + ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
414 + if (ret != -EINPROGRESS) {
415 +- ipsec_esp_unmap(dev, edesc, areq);
416 ++ ipsec_esp_unmap(dev, edesc, areq, encrypt);
417 + kfree(edesc);
418 + }
419 + return ret;
420 +@@ -1438,9 +1441,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
421 + unsigned int authsize = crypto_aead_authsize(authenc);
422 + struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
423 + unsigned int ivsize = crypto_aead_ivsize(authenc);
424 ++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
425 +
426 + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
427 +- iv, areq->assoclen, areq->cryptlen,
428 ++ iv, areq->assoclen, cryptlen,
429 + authsize, ivsize, icv_stashing,
430 + areq->base.flags, encrypt);
431 + }
432 +@@ -1459,7 +1463,7 @@ static int aead_encrypt(struct aead_request *req)
433 + /* set encrypt */
434 + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
435 +
436 +- return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
437 ++ return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
438 + }
439 +
440 + static int aead_decrypt(struct aead_request *req)
441 +@@ -1471,14 +1475,13 @@ static int aead_decrypt(struct aead_request *req)
442 + struct talitos_edesc *edesc;
443 + void *icvdata;
444 +
445 +- req->cryptlen -= authsize;
446 +-
447 + /* allocate extended descriptor */
448 + edesc = aead_edesc_alloc(req, req->iv, 1, false);
449 + if (IS_ERR(edesc))
450 + return PTR_ERR(edesc);
451 +
452 +- if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
453 ++ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
454 ++ (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
455 + ((!edesc->src_nents && !edesc->dst_nents) ||
456 + priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
457 +
458 +@@ -1489,7 +1492,8 @@ static int aead_decrypt(struct aead_request *req)
459 +
460 + /* reset integrity check result bits */
461 +
462 +- return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
463 ++ return ipsec_esp(edesc, req, false,
464 ++ ipsec_esp_decrypt_hwauth_done);
465 + }
466 +
467 + /* Have to check the ICV with software */
468 +@@ -1505,7 +1509,7 @@ static int aead_decrypt(struct aead_request *req)
469 + sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
470 + req->assoclen + req->cryptlen - authsize);
471 +
472 +- return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
473 ++ return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
474 + }
475 +
476 + static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
477 +@@ -1538,6 +1542,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
478 + return 0;
479 + }
480 +
481 ++static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
482 ++ const u8 *key, unsigned int keylen)
483 ++{
484 ++ if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
485 ++ keylen == AES_KEYSIZE_256)
486 ++ return ablkcipher_setkey(cipher, key, keylen);
487 ++
488 ++ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
489 ++
490 ++ return -EINVAL;
491 ++}
492 ++
493 + static void common_nonsnoop_unmap(struct device *dev,
494 + struct talitos_edesc *edesc,
495 + struct ablkcipher_request *areq)
496 +@@ -1660,6 +1676,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
497 + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
498 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
499 + struct talitos_edesc *edesc;
500 ++ unsigned int blocksize =
501 ++ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
502 ++
503 ++ if (!areq->nbytes)
504 ++ return 0;
505 ++
506 ++ if (areq->nbytes % blocksize)
507 ++ return -EINVAL;
508 +
509 + /* allocate extended descriptor */
510 + edesc = ablkcipher_edesc_alloc(areq, true);
511 +@@ -1677,6 +1701,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
512 + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
513 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
514 + struct talitos_edesc *edesc;
515 ++ unsigned int blocksize =
516 ++ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
517 ++
518 ++ if (!areq->nbytes)
519 ++ return 0;
520 ++
521 ++ if (areq->nbytes % blocksize)
522 ++ return -EINVAL;
523 +
524 + /* allocate extended descriptor */
525 + edesc = ablkcipher_edesc_alloc(areq, false);
526 +@@ -2705,6 +2737,7 @@ static struct talitos_alg_template driver_algs[] = {
527 + .min_keysize = AES_MIN_KEY_SIZE,
528 + .max_keysize = AES_MAX_KEY_SIZE,
529 + .ivsize = AES_BLOCK_SIZE,
530 ++ .setkey = ablkcipher_aes_setkey,
531 + }
532 + },
533 + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
534 +@@ -2715,13 +2748,13 @@ static struct talitos_alg_template driver_algs[] = {
535 + .alg.crypto = {
536 + .cra_name = "ctr(aes)",
537 + .cra_driver_name = "ctr-aes-talitos",
538 +- .cra_blocksize = AES_BLOCK_SIZE,
539 ++ .cra_blocksize = 1,
540 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
541 + CRYPTO_ALG_ASYNC,
542 + .cra_ablkcipher = {
543 + .min_keysize = AES_MIN_KEY_SIZE,
544 + .max_keysize = AES_MAX_KEY_SIZE,
545 +- .ivsize = AES_BLOCK_SIZE,
546 ++ .setkey = ablkcipher_aes_setkey,
547 + }
548 + },
549 + .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
550 +diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
551 +index 7fa744793bc5..5e35a66ed0ae 100644
552 +--- a/drivers/firmware/ti_sci.c
553 ++++ b/drivers/firmware/ti_sci.c
554 +@@ -463,9 +463,9 @@ static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
555 + struct ti_sci_xfer *xfer;
556 + int ret;
557 +
558 +- /* No need to setup flags since it is expected to respond */
559 + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
560 +- 0x0, sizeof(struct ti_sci_msg_hdr),
561 ++ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
562 ++ sizeof(struct ti_sci_msg_hdr),
563 + sizeof(*rev_info));
564 + if (IS_ERR(xfer)) {
565 + ret = PTR_ERR(xfer);
566 +@@ -593,9 +593,9 @@ static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
567 + info = handle_to_ti_sci_info(handle);
568 + dev = info->dev;
569 +
570 +- /* Response is expected, so need of any flags */
571 + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
572 +- 0, sizeof(*req), sizeof(*resp));
573 ++ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
574 ++ sizeof(*req), sizeof(*resp));
575 + if (IS_ERR(xfer)) {
576 + ret = PTR_ERR(xfer);
577 + dev_err(dev, "Message alloc failed(%d)\n", ret);
578 +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
579 +index c5e009f61021..cf2604e63599 100644
580 +--- a/drivers/gpio/gpiolib-acpi.c
581 ++++ b/drivers/gpio/gpiolib-acpi.c
582 +@@ -10,6 +10,7 @@
583 + * published by the Free Software Foundation.
584 + */
585 +
586 ++#include <linux/dmi.h>
587 + #include <linux/errno.h>
588 + #include <linux/gpio.h>
589 + #include <linux/gpio/consumer.h>
590 +@@ -23,6 +24,11 @@
591 +
592 + #include "gpiolib.h"
593 +
594 ++static int run_edge_events_on_boot = -1;
595 ++module_param(run_edge_events_on_boot, int, 0444);
596 ++MODULE_PARM_DESC(run_edge_events_on_boot,
597 ++ "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
598 ++
599 + /**
600 + * struct acpi_gpio_event - ACPI GPIO event handler data
601 + *
602 +@@ -174,10 +180,13 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
603 + event->irq_requested = true;
604 +
605 + /* Make sure we trigger the initial state of edge-triggered IRQs */
606 +- value = gpiod_get_raw_value_cansleep(event->desc);
607 +- if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
608 +- ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
609 +- event->handler(event->irq, event);
610 ++ if (run_edge_events_on_boot &&
611 ++ (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
612 ++ value = gpiod_get_raw_value_cansleep(event->desc);
613 ++ if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
614 ++ ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
615 ++ event->handler(event->irq, event);
616 ++ }
617 + }
618 +
619 + static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
620 +@@ -1253,3 +1262,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
621 + }
622 + /* We must use _sync so that this runs after the first deferred_probe run */
623 + late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
624 ++
625 ++static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
626 ++ {
627 ++ .matches = {
628 ++ DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
629 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
630 ++ }
631 ++ },
632 ++ {} /* Terminating entry */
633 ++};
634 ++
635 ++static int acpi_gpio_setup_params(void)
636 ++{
637 ++ if (run_edge_events_on_boot < 0) {
638 ++ if (dmi_check_system(run_edge_events_on_boot_blacklist))
639 ++ run_edge_events_on_boot = 0;
640 ++ else
641 ++ run_edge_events_on_boot = 1;
642 ++ }
643 ++
644 ++ return 0;
645 ++}
646 ++
647 ++/* Directly after dmi_setup() which runs as core_initcall() */
648 ++postcore_initcall(acpi_gpio_setup_params);
649 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
650 +index 53395852f012..3289b53a7ba1 100644
651 +--- a/drivers/gpio/gpiolib.c
652 ++++ b/drivers/gpio/gpiolib.c
653 +@@ -524,6 +524,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
654 + if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
655 + return -EINVAL;
656 +
657 ++ /*
658 ++ * Do not allow both INPUT & OUTPUT flags to be set as they are
659 ++ * contradictory.
660 ++ */
661 ++ if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
662 ++ (lflags & GPIOHANDLE_REQUEST_OUTPUT))
663 ++ return -EINVAL;
664 ++
665 + /*
666 + * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
667 + * the hardware actually supports enabling both at the same time the
668 +@@ -916,7 +924,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
669 + }
670 +
671 + /* This is just wrong: we don't look for events on output lines */
672 +- if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
673 ++ if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
674 ++ (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
675 ++ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
676 + ret = -EINVAL;
677 + goto out_free_label;
678 + }
679 +@@ -930,10 +940,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
680 +
681 + if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
682 + set_bit(FLAG_ACTIVE_LOW, &desc->flags);
683 +- if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
684 +- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
685 +- if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
686 +- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
687 +
688 + ret = gpiod_direction_input(desc);
689 + if (ret)
690 +diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
691 +index b44bed554211..cc354b491774 100644
692 +--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
693 ++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
694 +@@ -82,6 +82,12 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
695 + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
696 + };
697 +
698 ++static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
699 ++ .width = 720,
700 ++ .height = 1280,
701 ++ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
702 ++};
703 ++
704 + static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
705 + .width = 800,
706 + .height = 1280,
707 +@@ -109,6 +115,12 @@ static const struct dmi_system_id orientation_data[] = {
708 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
709 + },
710 + .driver_data = (void *)&gpd_micropc,
711 ++ }, { /* GPD MicroPC (later BIOS versions with proper DMI strings) */
712 ++ .matches = {
713 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
714 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
715 ++ },
716 ++ .driver_data = (void *)&lcd720x1280_rightside_up,
717 + }, { /*
718 + * GPD Pocket, note that the the DMI data is less generic then
719 + * it seems, devices with a board-vendor of "AMI Corporation"
720 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
721 +index f6389479fccb..947bc6d62302 100644
722 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
723 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
724 +@@ -566,12 +566,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
725 + comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
726 + if (!comp) {
727 + ret = -ENOMEM;
728 ++ of_node_put(node);
729 + goto err_node;
730 + }
731 +
732 + ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
733 +- if (ret)
734 ++ if (ret) {
735 ++ of_node_put(node);
736 + goto err_node;
737 ++ }
738 +
739 + private->ddp_comp[comp_id] = comp;
740 + }
741 +diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
742 +index 12c80dfcff59..c7daae53fa1f 100644
743 +--- a/drivers/gpu/drm/meson/meson_plane.c
744 ++++ b/drivers/gpu/drm/meson/meson_plane.c
745 +@@ -120,6 +120,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
746 + priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
747 + OSD_COLOR_MATRIX_32_ARGB;
748 + break;
749 ++ case DRM_FORMAT_XBGR8888:
750 ++ /* For XRGB, replace the pixel's alpha by 0xFF */
751 ++ writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
752 ++ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
753 ++ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
754 ++ OSD_COLOR_MATRIX_32_ABGR;
755 ++ break;
756 + case DRM_FORMAT_ARGB8888:
757 + /* For ARGB, use the pixel's alpha */
758 + writel_bits_relaxed(OSD_REPLACE_EN, 0,
759 +@@ -127,6 +134,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
760 + priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
761 + OSD_COLOR_MATRIX_32_ARGB;
762 + break;
763 ++ case DRM_FORMAT_ABGR8888:
764 ++ /* For ARGB, use the pixel's alpha */
765 ++ writel_bits_relaxed(OSD_REPLACE_EN, 0,
766 ++ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
767 ++ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
768 ++ OSD_COLOR_MATRIX_32_ABGR;
769 ++ break;
770 + case DRM_FORMAT_RGB888:
771 + priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
772 + OSD_COLOR_MATRIX_24_RGB;
773 +@@ -196,7 +210,9 @@ static const struct drm_plane_funcs meson_plane_funcs = {
774 +
775 + static const uint32_t supported_drm_formats[] = {
776 + DRM_FORMAT_ARGB8888,
777 ++ DRM_FORMAT_ABGR8888,
778 + DRM_FORMAT_XRGB8888,
779 ++ DRM_FORMAT_XBGR8888,
780 + DRM_FORMAT_RGB888,
781 + DRM_FORMAT_RGB565,
782 + };
783 +diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
784 +index 15a115210108..f5586dd6414d 100644
785 +--- a/drivers/iio/adc/stm32-dfsdm-adc.c
786 ++++ b/drivers/iio/adc/stm32-dfsdm-adc.c
787 +@@ -981,11 +981,11 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
788 + ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
789 +
790 + if (adc->dev_data->type == DFSDM_AUDIO) {
791 +- ch->scan_type.sign = 's';
792 + ch->ext_info = dfsdm_adc_audio_ext_info;
793 + } else {
794 +- ch->scan_type.sign = 'u';
795 ++ ch->scan_type.shift = 8;
796 + }
797 ++ ch->scan_type.sign = 's';
798 + ch->scan_type.realbits = 24;
799 + ch->scan_type.storagebits = 32;
800 +
801 +diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
802 +index ef5560b848ab..21786a442368 100644
803 +--- a/drivers/isdn/capi/capi.c
804 ++++ b/drivers/isdn/capi/capi.c
805 +@@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
806 + if (!cdev->ap.applid)
807 + return -ENODEV;
808 +
809 ++ if (count < CAPIMSG_BASELEN)
810 ++ return -EINVAL;
811 ++
812 + skb = alloc_skb(count, GFP_USER);
813 + if (!skb)
814 + return -ENOMEM;
815 +@@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
816 + }
817 + mlen = CAPIMSG_LEN(skb->data);
818 + if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
819 +- if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
820 ++ if (count < CAPI_DATA_B3_REQ_LEN ||
821 ++ (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
822 + kfree_skb(skb);
823 + return -EINVAL;
824 + }
825 +@@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
826 + CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
827 +
828 + if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
829 ++ if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
830 ++ kfree_skb(skb);
831 ++ return -EINVAL;
832 ++ }
833 + mutex_lock(&cdev->lock);
834 + capincci_free(cdev, CAPIMSG_NCCI(skb->data));
835 + mutex_unlock(&cdev->lock);
836 +diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
837 +index dce5b7e44e7a..ab5a8778c4b2 100644
838 +--- a/drivers/mtd/nand/raw/mtk_nand.c
839 ++++ b/drivers/mtd/nand/raw/mtk_nand.c
840 +@@ -863,19 +863,21 @@ static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
841 + return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
842 + }
843 +
844 +-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
845 ++static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
846 ++ u32 sectors)
847 + {
848 + struct nand_chip *chip = mtd_to_nand(mtd);
849 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
850 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
851 + struct mtk_ecc_stats stats;
852 ++ u32 reg_size = mtk_nand->fdm.reg_size;
853 + int rc, i;
854 +
855 + rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
856 + if (rc) {
857 + memset(buf, 0xff, sectors * chip->ecc.size);
858 + for (i = 0; i < sectors; i++)
859 +- memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
860 ++ memset(oob_ptr(chip, start + i), 0xff, reg_size);
861 + return 0;
862 + }
863 +
864 +@@ -895,7 +897,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
865 + u32 spare = mtk_nand->spare_per_sector;
866 + u32 column, sectors, start, end, reg;
867 + dma_addr_t addr;
868 +- int bitflips;
869 ++ int bitflips = 0;
870 + size_t len;
871 + u8 *buf;
872 + int rc;
873 +@@ -962,14 +964,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
874 + if (rc < 0) {
875 + dev_err(nfc->dev, "subpage done timeout\n");
876 + bitflips = -EIO;
877 +- } else {
878 +- bitflips = 0;
879 +- if (!raw) {
880 +- rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
881 +- bitflips = rc < 0 ? -ETIMEDOUT :
882 +- mtk_nfc_update_ecc_stats(mtd, buf, sectors);
883 +- mtk_nfc_read_fdm(chip, start, sectors);
884 +- }
885 ++ } else if (!raw) {
886 ++ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
887 ++ bitflips = rc < 0 ? -ETIMEDOUT :
888 ++ mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
889 ++ mtk_nfc_read_fdm(chip, start, sectors);
890 + }
891 +
892 + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
893 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
894 +index 410d5d3aa393..85280765d793 100644
895 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
896 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
897 +@@ -34,6 +34,7 @@
898 + #include <net/tc_act/tc_mirred.h>
899 + #include <net/vxlan.h>
900 + #include <net/mpls.h>
901 ++#include <net/xfrm.h>
902 +
903 + #include "ixgbe.h"
904 + #include "ixgbe_common.h"
905 +@@ -2625,7 +2626,7 @@ adjust_by_size:
906 + /* 16K ints/sec to 9.2K ints/sec */
907 + avg_wire_size *= 15;
908 + avg_wire_size += 11452;
909 +- } else if (avg_wire_size <= 1980) {
910 ++ } else if (avg_wire_size < 1968) {
911 + /* 9.2K ints/sec to 8K ints/sec */
912 + avg_wire_size *= 5;
913 + avg_wire_size += 22420;
914 +@@ -2658,6 +2659,8 @@ adjust_by_size:
915 + case IXGBE_LINK_SPEED_2_5GB_FULL:
916 + case IXGBE_LINK_SPEED_1GB_FULL:
917 + case IXGBE_LINK_SPEED_10_FULL:
918 ++ if (avg_wire_size > 8064)
919 ++ avg_wire_size = 8064;
920 + itr += DIV_ROUND_UP(avg_wire_size,
921 + IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
922 + IXGBE_ITR_ADAPTIVE_MIN_INC;
923 +@@ -8599,7 +8602,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
924 + #endif /* IXGBE_FCOE */
925 +
926 + #ifdef CONFIG_XFRM_OFFLOAD
927 +- if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
928 ++ if (xfrm_offload(skb) &&
929 ++ !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
930 + goto out_drop;
931 + #endif
932 + tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
933 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
934 +index 2e8056d48f4a..723611ac9102 100644
935 +--- a/drivers/net/phy/phylink.c
936 ++++ b/drivers/net/phy/phylink.c
937 +@@ -380,8 +380,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
938 + * Local device Link partner
939 + * Pause AsymDir Pause AsymDir Result
940 + * 1 X 1 X TX+RX
941 +- * 0 1 1 1 RX
942 +- * 1 1 0 1 TX
943 ++ * 0 1 1 1 TX
944 ++ * 1 1 0 1 RX
945 + */
946 + static void phylink_resolve_flow(struct phylink *pl,
947 + struct phylink_link_state *state)
948 +@@ -402,7 +402,7 @@ static void phylink_resolve_flow(struct phylink *pl,
949 + new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
950 + else if (pause & MLO_PAUSE_ASYM)
951 + new_pause = state->pause & MLO_PAUSE_SYM ?
952 +- MLO_PAUSE_RX : MLO_PAUSE_TX;
953 ++ MLO_PAUSE_TX : MLO_PAUSE_RX;
954 + } else {
955 + new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
956 + }
957 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
958 +index 5fa7047ea361..e1ac1c57089f 100644
959 +--- a/drivers/net/tun.c
960 ++++ b/drivers/net/tun.c
961 +@@ -801,7 +801,8 @@ static void tun_detach_all(struct net_device *dev)
962 + }
963 +
964 + static int tun_attach(struct tun_struct *tun, struct file *file,
965 +- bool skip_filter, bool napi, bool napi_frags)
966 ++ bool skip_filter, bool napi, bool napi_frags,
967 ++ bool publish_tun)
968 + {
969 + struct tun_file *tfile = file->private_data;
970 + struct net_device *dev = tun->dev;
971 +@@ -881,7 +882,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
972 + * initialized tfile; otherwise we risk using half-initialized
973 + * object.
974 + */
975 +- rcu_assign_pointer(tfile->tun, tun);
976 ++ if (publish_tun)
977 ++ rcu_assign_pointer(tfile->tun, tun);
978 + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
979 + tun->numqueues++;
980 + tun_set_real_num_queues(tun);
981 +@@ -2553,7 +2555,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
982 +
983 + err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
984 + ifr->ifr_flags & IFF_NAPI,
985 +- ifr->ifr_flags & IFF_NAPI_FRAGS);
986 ++ ifr->ifr_flags & IFF_NAPI_FRAGS, true);
987 + if (err < 0)
988 + return err;
989 +
990 +@@ -2652,13 +2654,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
991 +
992 + INIT_LIST_HEAD(&tun->disabled);
993 + err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
994 +- ifr->ifr_flags & IFF_NAPI_FRAGS);
995 ++ ifr->ifr_flags & IFF_NAPI_FRAGS, false);
996 + if (err < 0)
997 + goto err_free_flow;
998 +
999 + err = register_netdevice(tun->dev);
1000 + if (err < 0)
1001 + goto err_detach;
1002 ++ /* free_netdev() won't check refcnt, to aovid race
1003 ++ * with dev_put() we need publish tun after registration.
1004 ++ */
1005 ++ rcu_assign_pointer(tfile->tun, tun);
1006 + }
1007 +
1008 + netif_carrier_on(tun->dev);
1009 +@@ -2802,7 +2808,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
1010 + if (ret < 0)
1011 + goto unlock;
1012 + ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
1013 +- tun->flags & IFF_NAPI_FRAGS);
1014 ++ tun->flags & IFF_NAPI_FRAGS, true);
1015 + } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1016 + tun = rtnl_dereference(tfile->tun);
1017 + if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
1018 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
1019 +index 5c42cf81a08b..85fba64c3fcf 100644
1020 +--- a/drivers/net/usb/cdc_ether.c
1021 ++++ b/drivers/net/usb/cdc_ether.c
1022 +@@ -221,9 +221,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
1023 + goto bad_desc;
1024 + }
1025 + skip:
1026 +- if ( rndis &&
1027 +- header.usb_cdc_acm_descriptor &&
1028 +- header.usb_cdc_acm_descriptor->bmCapabilities) {
1029 ++ /* Communcation class functions with bmCapabilities are not
1030 ++ * RNDIS. But some Wireless class RNDIS functions use
1031 ++ * bmCapabilities for their own purpose. The failsafe is
1032 ++ * therefore applied only to Communication class RNDIS
1033 ++ * functions. The rndis test is redundant, but a cheap
1034 ++ * optimization.
1035 ++ */
1036 ++ if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
1037 ++ header.usb_cdc_acm_descriptor &&
1038 ++ header.usb_cdc_acm_descriptor->bmCapabilities) {
1039 + dev_dbg(&intf->dev,
1040 + "ACM capabilities %02x, not really RNDIS?\n",
1041 + header.usb_cdc_acm_descriptor->bmCapabilities);
1042 +diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
1043 +index f360690396dd..14e56bee0548 100644
1044 +--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
1045 ++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
1046 +@@ -643,7 +643,6 @@ fail_rx:
1047 + kfree(rsi_dev->tx_buffer);
1048 +
1049 + fail_eps:
1050 +- kfree(rsi_dev);
1051 +
1052 + return status;
1053 + }
1054 +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
1055 +index 99de51e87f7f..d32eba11c000 100644
1056 +--- a/drivers/nvmem/core.c
1057 ++++ b/drivers/nvmem/core.c
1058 +@@ -415,10 +415,17 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
1059 + if (!config->base_dev)
1060 + return -EINVAL;
1061 +
1062 +- if (nvmem->read_only)
1063 +- nvmem->eeprom = bin_attr_ro_root_nvmem;
1064 +- else
1065 +- nvmem->eeprom = bin_attr_rw_root_nvmem;
1066 ++ if (nvmem->read_only) {
1067 ++ if (config->root_only)
1068 ++ nvmem->eeprom = bin_attr_ro_root_nvmem;
1069 ++ else
1070 ++ nvmem->eeprom = bin_attr_ro_nvmem;
1071 ++ } else {
1072 ++ if (config->root_only)
1073 ++ nvmem->eeprom = bin_attr_rw_root_nvmem;
1074 ++ else
1075 ++ nvmem->eeprom = bin_attr_rw_nvmem;
1076 ++ }
1077 + nvmem->eeprom.attr.name = "eeprom";
1078 + nvmem->eeprom.size = nvmem->size;
1079 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
1080 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
1081 +index 956ee7527d2c..ec317bcb1bca 100644
1082 +--- a/drivers/pci/pci-driver.c
1083 ++++ b/drivers/pci/pci-driver.c
1084 +@@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev)
1085 + #ifdef CONFIG_PCI_IOV
1086 + static inline bool pci_device_can_probe(struct pci_dev *pdev)
1087 + {
1088 +- return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
1089 ++ return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
1090 ++ pdev->driver_override);
1091 + }
1092 + #else
1093 + static inline bool pci_device_can_probe(struct pci_dev *pdev)
1094 +diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
1095 +index b1d804376237..6a61028cbb3c 100644
1096 +--- a/drivers/platform/x86/pmc_atom.c
1097 ++++ b/drivers/platform/x86/pmc_atom.c
1098 +@@ -421,6 +421,14 @@ static const struct dmi_system_id critclk_systems[] = {
1099 + DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
1100 + },
1101 + },
1102 ++ {
1103 ++ /* pmc_plt_clk* - are used for ethernet controllers */
1104 ++ .ident = "Beckhoff CB4063",
1105 ++ .matches = {
1106 ++ DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
1107 ++ DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
1108 ++ },
1109 ++ },
1110 + {
1111 + /* pmc_plt_clk* - are used for ethernet controllers */
1112 + .ident = "Beckhoff CB6263",
1113 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1114 +index 08c5afa06aee..e561eb475339 100644
1115 +--- a/fs/btrfs/tree-log.c
1116 ++++ b/fs/btrfs/tree-log.c
1117 +@@ -5107,7 +5107,7 @@ again:
1118 + BTRFS_I(other_inode),
1119 + LOG_OTHER_INODE, 0, LLONG_MAX,
1120 + ctx);
1121 +- iput(other_inode);
1122 ++ btrfs_add_delayed_iput(other_inode);
1123 + if (err)
1124 + goto out_unlock;
1125 + else
1126 +@@ -5519,7 +5519,7 @@ process_leaf:
1127 + }
1128 +
1129 + if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
1130 +- iput(di_inode);
1131 ++ btrfs_add_delayed_iput(di_inode);
1132 + break;
1133 + }
1134 +
1135 +@@ -5531,7 +5531,7 @@ process_leaf:
1136 + if (!ret &&
1137 + btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
1138 + ret = 1;
1139 +- iput(di_inode);
1140 ++ btrfs_add_delayed_iput(di_inode);
1141 + if (ret)
1142 + goto next_dir_inode;
1143 + if (ctx->log_new_dentries) {
1144 +@@ -5678,7 +5678,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
1145 + if (!ret && ctx && ctx->log_new_dentries)
1146 + ret = log_new_dir_dentries(trans, root,
1147 + BTRFS_I(dir_inode), ctx);
1148 +- iput(dir_inode);
1149 ++ btrfs_add_delayed_iput(dir_inode);
1150 + if (ret)
1151 + goto out;
1152 + }
1153 +diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
1154 +index bf416e512743..f15ac37956e7 100644
1155 +--- a/fs/ubifs/tnc.c
1156 ++++ b/fs/ubifs/tnc.c
1157 +@@ -1165,8 +1165,8 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
1158 + * o exact match, i.e. the found zero-level znode contains key @key, then %1
1159 + * is returned and slot number of the matched branch is stored in @n;
1160 + * o not exact match, which means that zero-level znode does not contain
1161 +- * @key, then %0 is returned and slot number of the closest branch is stored
1162 +- * in @n;
1163 ++ * @key, then %0 is returned and slot number of the closest branch or %-1
1164 ++ * is stored in @n; In this case calling tnc_next() is mandatory.
1165 + * o @key is so small that it is even less than the lowest key of the
1166 + * leftmost zero-level node, then %0 is returned and %0 is stored in @n.
1167 + *
1168 +@@ -1883,13 +1883,19 @@ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1169 +
1170 + static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
1171 + struct ubifs_dent_node *dent, uint32_t cookie,
1172 +- struct ubifs_znode **zn, int *n)
1173 ++ struct ubifs_znode **zn, int *n, int exact)
1174 + {
1175 + int err;
1176 + struct ubifs_znode *znode = *zn;
1177 + struct ubifs_zbranch *zbr;
1178 + union ubifs_key *dkey;
1179 +
1180 ++ if (!exact) {
1181 ++ err = tnc_next(c, &znode, n);
1182 ++ if (err)
1183 ++ return err;
1184 ++ }
1185 ++
1186 + for (;;) {
1187 + zbr = &znode->zbranch[*n];
1188 + dkey = &zbr->key;
1189 +@@ -1931,7 +1937,7 @@ static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
1190 + if (unlikely(err < 0))
1191 + goto out_unlock;
1192 +
1193 +- err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
1194 ++ err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
1195 +
1196 + out_unlock:
1197 + mutex_unlock(&c->tnc_mutex);
1198 +@@ -2718,7 +2724,7 @@ int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key,
1199 + if (unlikely(err < 0))
1200 + goto out_free;
1201 +
1202 +- err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
1203 ++ err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
1204 + if (err)
1205 + goto out_free;
1206 + }
1207 +diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h
1208 +index 4941628a4fb9..5ec88e7548a9 100644
1209 +--- a/include/uapi/linux/isdn/capicmd.h
1210 ++++ b/include/uapi/linux/isdn/capicmd.h
1211 +@@ -16,6 +16,7 @@
1212 + #define CAPI_MSG_BASELEN 8
1213 + #define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
1214 + #define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
1215 ++#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
1216 +
1217 + /*----- CAPI commands -----*/
1218 + #define CAPI_ALERT 0x01
1219 +diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
1220 +index 95414ad3506a..98c04ca5fa43 100644
1221 +--- a/kernel/irq/resend.c
1222 ++++ b/kernel/irq/resend.c
1223 +@@ -36,6 +36,8 @@ static void resend_irqs(unsigned long arg)
1224 + irq = find_first_bit(irqs_resend, nr_irqs);
1225 + clear_bit(irq, irqs_resend);
1226 + desc = irq_to_desc(irq);
1227 ++ if (!desc)
1228 ++ continue;
1229 + local_irq_disable();
1230 + desc->handle_irq(desc);
1231 + local_irq_enable();
1232 +diff --git a/kernel/module.c b/kernel/module.c
1233 +index 0d86fc73d63d..8257110bf599 100644
1234 +--- a/kernel/module.c
1235 ++++ b/kernel/module.c
1236 +@@ -1884,7 +1884,7 @@ static void mod_sysfs_teardown(struct module *mod)
1237 + mod_sysfs_fini(mod);
1238 + }
1239 +
1240 +-#ifdef CONFIG_STRICT_MODULE_RWX
1241 ++#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
1242 + /*
1243 + * LKM RO/NX protection: protect module's text/ro-data
1244 + * from modification and any data from execution.
1245 +@@ -1907,6 +1907,7 @@ static void frob_text(const struct module_layout *layout,
1246 + layout->text_size >> PAGE_SHIFT);
1247 + }
1248 +
1249 ++#ifdef CONFIG_STRICT_MODULE_RWX
1250 + static void frob_rodata(const struct module_layout *layout,
1251 + int (*set_memory)(unsigned long start, int num_pages))
1252 + {
1253 +@@ -1956,13 +1957,9 @@ void module_enable_ro(const struct module *mod, bool after_init)
1254 + return;
1255 +
1256 + frob_text(&mod->core_layout, set_memory_ro);
1257 +- frob_text(&mod->core_layout, set_memory_x);
1258 +
1259 + frob_rodata(&mod->core_layout, set_memory_ro);
1260 +-
1261 + frob_text(&mod->init_layout, set_memory_ro);
1262 +- frob_text(&mod->init_layout, set_memory_x);
1263 +-
1264 + frob_rodata(&mod->init_layout, set_memory_ro);
1265 +
1266 + if (after_init)
1267 +@@ -2043,11 +2040,23 @@ static void disable_ro_nx(const struct module_layout *layout)
1268 + frob_writable_data(layout, set_memory_x);
1269 + }
1270 +
1271 +-#else
1272 ++#else /* !CONFIG_STRICT_MODULE_RWX */
1273 + static void disable_ro_nx(const struct module_layout *layout) { }
1274 + static void module_enable_nx(const struct module *mod) { }
1275 + static void module_disable_nx(const struct module *mod) { }
1276 +-#endif
1277 ++#endif /* CONFIG_STRICT_MODULE_RWX */
1278 ++
1279 ++static void module_enable_x(const struct module *mod)
1280 ++{
1281 ++ frob_text(&mod->core_layout, set_memory_x);
1282 ++ frob_text(&mod->init_layout, set_memory_x);
1283 ++}
1284 ++#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
1285 ++static void disable_ro_nx(const struct module_layout *layout) { }
1286 ++static void module_enable_nx(const struct module *mod) { }
1287 ++static void module_disable_nx(const struct module *mod) { }
1288 ++static void module_enable_x(const struct module *mod) { }
1289 ++#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
1290 +
1291 + #ifdef CONFIG_LIVEPATCH
1292 + /*
1293 +@@ -3604,6 +3613,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
1294 +
1295 + module_enable_ro(mod, false);
1296 + module_enable_nx(mod);
1297 ++ module_enable_x(mod);
1298 +
1299 + /* Mark state as coming so strong_try_module_get() ignores us,
1300 + * but kallsyms etc. can see us. */
1301 +diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
1302 +index 6d9f48bd374a..55198818e3e5 100644
1303 +--- a/net/bridge/br_mdb.c
1304 ++++ b/net/bridge/br_mdb.c
1305 +@@ -419,7 +419,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
1306 + struct nlmsghdr *nlh;
1307 + struct nlattr *nest;
1308 +
1309 +- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
1310 ++ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
1311 + if (!nlh)
1312 + return -EMSGSIZE;
1313 +
1314 +diff --git a/net/core/dev.c b/net/core/dev.c
1315 +index e4b4cb40da00..ddd8aab20adf 100644
1316 +--- a/net/core/dev.c
1317 ++++ b/net/core/dev.c
1318 +@@ -8562,6 +8562,8 @@ int register_netdevice(struct net_device *dev)
1319 + ret = notifier_to_errno(ret);
1320 + if (ret) {
1321 + rollback_registered(dev);
1322 ++ rcu_barrier();
1323 ++
1324 + dev->reg_state = NETREG_UNREGISTERED;
1325 + }
1326 + /*
1327 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1328 +index 9b9f696281a9..0629ca89ab74 100644
1329 +--- a/net/core/skbuff.c
1330 ++++ b/net/core/skbuff.c
1331 +@@ -3530,6 +3530,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
1332 + int pos;
1333 + int dummy;
1334 +
1335 ++ if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
1336 ++ (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
1337 ++ /* gso_size is untrusted, and we have a frag_list with a linear
1338 ++ * non head_frag head.
1339 ++ *
1340 ++ * (we assume checking the first list_skb member suffices;
1341 ++ * i.e if either of the list_skb members have non head_frag
1342 ++ * head, then the first one has too).
1343 ++ *
1344 ++ * If head_skb's headlen does not fit requested gso_size, it
1345 ++ * means that the frag_list members do NOT terminate on exact
1346 ++ * gso_size boundaries. Hence we cannot perform skb_frag_t page
1347 ++ * sharing. Therefore we must fallback to copying the frag_list
1348 ++ * skbs; we do so by disabling SG.
1349 ++ */
1350 ++ if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
1351 ++ features &= ~NETIF_F_SG;
1352 ++ }
1353 ++
1354 + __skb_push(head_skb, doffset);
1355 + proto = skb_network_protocol(head_skb, &dummy);
1356 + if (unlikely(!proto))
1357 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1358 +index 4a8869d39662..14a6a489937c 100644
1359 +--- a/net/ipv4/tcp_input.c
1360 ++++ b/net/ipv4/tcp_input.c
1361 +@@ -260,7 +260,7 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
1362 +
1363 + static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
1364 + {
1365 +- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
1366 ++ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
1367 + }
1368 +
1369 + static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
1370 +diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
1371 +index 4c04bccc7417..5c9be8594483 100644
1372 +--- a/net/ipv6/ping.c
1373 ++++ b/net/ipv6/ping.c
1374 +@@ -228,7 +228,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
1375 + return 0;
1376 + }
1377 +
1378 +-static void __net_init ping_v6_proc_exit_net(struct net *net)
1379 ++static void __net_exit ping_v6_proc_exit_net(struct net *net)
1380 + {
1381 + remove_proc_entry("icmp6", net->proc_net);
1382 + }
1383 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1384 +index 77b289da7763..875f521bce0d 100644
1385 +--- a/net/sched/sch_generic.c
1386 ++++ b/net/sched/sch_generic.c
1387 +@@ -49,6 +49,8 @@ EXPORT_SYMBOL(default_qdisc_ops);
1388 + * - updates to tree and tree walking are only done under the rtnl mutex.
1389 + */
1390 +
1391 ++#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
1392 ++
1393 + static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
1394 + {
1395 + const struct netdev_queue *txq = q->dev_queue;
1396 +@@ -74,7 +76,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
1397 + q->q.qlen--;
1398 + }
1399 + } else {
1400 +- skb = NULL;
1401 ++ skb = SKB_XOFF_MAGIC;
1402 + }
1403 + }
1404 +
1405 +@@ -272,8 +274,11 @@ validate:
1406 + return skb;
1407 +
1408 + skb = qdisc_dequeue_skb_bad_txq(q);
1409 +- if (unlikely(skb))
1410 ++ if (unlikely(skb)) {
1411 ++ if (skb == SKB_XOFF_MAGIC)
1412 ++ return NULL;
1413 + goto bulk;
1414 ++ }
1415 + skb = q->dequeue(q);
1416 + if (skb) {
1417 + bulk:
1418 +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
1419 +index c3a8388dcdf6..a80fe8aa8527 100644
1420 +--- a/net/sched/sch_hhf.c
1421 ++++ b/net/sched/sch_hhf.c
1422 +@@ -529,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
1423 + new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
1424 +
1425 + non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
1426 +- if (non_hh_quantum > INT_MAX)
1427 ++ if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
1428 + return -EINVAL;
1429 +
1430 + sch_tree_lock(sch);
1431 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
1432 +index d97b2b4b7a8b..6d36f74ad295 100644
1433 +--- a/net/sctp/protocol.c
1434 ++++ b/net/sctp/protocol.c
1435 +@@ -1350,7 +1350,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
1436 + return status;
1437 + }
1438 +
1439 +-static void __net_init sctp_ctrlsock_exit(struct net *net)
1440 ++static void __net_exit sctp_ctrlsock_exit(struct net *net)
1441 + {
1442 + /* Free the control endpoint. */
1443 + inet_ctl_sock_destroy(net->sctp.ctl_sock);
1444 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
1445 +index 28adac31f0ff..de8a82bc6b42 100644
1446 +--- a/net/sctp/sm_sideeffect.c
1447 ++++ b/net/sctp/sm_sideeffect.c
1448 +@@ -562,7 +562,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
1449 + if (net->sctp.pf_enable &&
1450 + (transport->state == SCTP_ACTIVE) &&
1451 + (transport->error_count < transport->pathmaxrxt) &&
1452 +- (transport->error_count > asoc->pf_retrans)) {
1453 ++ (transport->error_count > transport->pf_retrans)) {
1454 +
1455 + sctp_assoc_control_transport(asoc, transport,
1456 + SCTP_TRANSPORT_PF,
1457 +diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
1458 +index 3cfeb9df64b0..e0a3dd424d8c 100644
1459 +--- a/net/tipc/name_distr.c
1460 ++++ b/net/tipc/name_distr.c
1461 +@@ -221,7 +221,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
1462 + publ->key);
1463 + }
1464 +
1465 +- kfree_rcu(p, rcu);
1466 ++ if (p)
1467 ++ kfree_rcu(p, rcu);
1468 + }
1469 +
1470 + /**