Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 03 Feb 2021 23:25:56
Message-Id: 1612394740.9554fe74fb3fc553da584adb8d094e1f2abaccb4.mpagano@gentoo
1 commit: 9554fe74fb3fc553da584adb8d094e1f2abaccb4
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 3 23:25:40 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 3 23:25:40 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9554fe74
7
8 Linux patch 4.9.255
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1254_linux-4.9.255.patch | 1576 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1580 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d62c153..73bc4cd 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1059,6 +1059,10 @@ Patch: 1253_linux-4.9.254.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.254
23
24 +Patch: 1254_linux-4.9.255.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.255
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1254_linux-4.9.255.patch b/1254_linux-4.9.255.patch
33 new file mode 100644
34 index 0000000..c658410
35 --- /dev/null
36 +++ b/1254_linux-4.9.255.patch
37 @@ -0,0 +1,1576 @@
38 +diff --git a/Makefile b/Makefile
39 +index ea9ea119460d4..4780b5f80b2a8 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 254
46 ++SUBLEVEL = 255
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
51 +index 7d84b617af481..99d2e296082c7 100644
52 +--- a/arch/arm/mach-imx/suspend-imx6.S
53 ++++ b/arch/arm/mach-imx/suspend-imx6.S
54 +@@ -73,6 +73,7 @@
55 + #define MX6Q_CCM_CCR 0x0
56 +
57 + .align 3
58 ++ .arm
59 +
60 + .macro sync_l2_cache
61 +
62 +diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
63 +index 84ae4dd261caf..cafdaabf062fc 100644
64 +--- a/arch/x86/kvm/pmu_intel.c
65 ++++ b/arch/x86/kvm/pmu_intel.c
66 +@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
67 + [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
68 + [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
69 + [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
70 +- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
71 ++ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
72 + };
73 +
74 + /* mapping between fixed pmc index and intel_arch_events array */
75 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
76 +index 3c0f9be107e42..98fb3a7240371 100644
77 +--- a/arch/x86/kvm/x86.c
78 ++++ b/arch/x86/kvm/x86.c
79 +@@ -97,6 +97,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
80 +
81 + static void update_cr8_intercept(struct kvm_vcpu *vcpu);
82 + static void process_nmi(struct kvm_vcpu *vcpu);
83 ++static void process_smi(struct kvm_vcpu *vcpu);
84 + static void enter_smm(struct kvm_vcpu *vcpu);
85 + static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
86 +
87 +@@ -3199,6 +3200,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
88 + struct kvm_vcpu_events *events)
89 + {
90 + process_nmi(vcpu);
91 ++
92 ++ if (kvm_check_request(KVM_REQ_SMI, vcpu))
93 ++ process_smi(vcpu);
94 ++
95 + events->exception.injected =
96 + vcpu->arch.exception.pending &&
97 + !kvm_exception_is_soft(vcpu->arch.exception.nr);
98 +diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
99 +index 98b513d049f6a..fb610ad495f10 100644
100 +--- a/drivers/acpi/device_sysfs.c
101 ++++ b/drivers/acpi/device_sysfs.c
102 +@@ -259,20 +259,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
103 + if (add_uevent_var(env, "MODALIAS="))
104 + return -ENOMEM;
105 +
106 +- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
107 +- sizeof(env->buf) - env->buflen);
108 +- if (len < 0)
109 +- return len;
110 +-
111 +- env->buflen += len;
112 +- if (!adev->data.of_compatible)
113 +- return 0;
114 +-
115 +- if (len > 0 && add_uevent_var(env, "MODALIAS="))
116 +- return -ENOMEM;
117 +-
118 +- len = create_of_modalias(adev, &env->buf[env->buflen - 1],
119 +- sizeof(env->buf) - env->buflen);
120 ++ if (adev->data.of_compatible)
121 ++ len = create_of_modalias(adev, &env->buf[env->buflen - 1],
122 ++ sizeof(env->buf) - env->buflen);
123 ++ else
124 ++ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
125 ++ sizeof(env->buf) - env->buflen);
126 + if (len < 0)
127 + return len;
128 +
129 +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
130 +index bb45eb22ba1f5..36bdb04f8f018 100644
131 +--- a/drivers/infiniband/hw/cxgb4/qp.c
132 ++++ b/drivers/infiniband/hw/cxgb4/qp.c
133 +@@ -1976,7 +1976,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
134 + init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
135 + init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
136 + init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
137 +- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
138 ++ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
139 + init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
140 + init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
141 + return 0;
142 +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
143 +index 977070ce4fe97..9ad5a7019abfd 100644
144 +--- a/drivers/iommu/dmar.c
145 ++++ b/drivers/iommu/dmar.c
146 +@@ -1024,8 +1024,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
147 + {
148 + struct intel_iommu *iommu;
149 + u32 ver, sts;
150 +- int agaw = 0;
151 +- int msagaw = 0;
152 ++ int agaw = -1;
153 ++ int msagaw = -1;
154 + int err;
155 +
156 + if (!drhd->reg_base_addr) {
157 +@@ -1050,17 +1050,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
158 + }
159 +
160 + err = -EINVAL;
161 +- agaw = iommu_calculate_agaw(iommu);
162 +- if (agaw < 0) {
163 +- pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
164 +- iommu->seq_id);
165 +- goto err_unmap;
166 ++ if (cap_sagaw(iommu->cap) == 0) {
167 ++ pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
168 ++ iommu->name);
169 ++ drhd->ignored = 1;
170 + }
171 +- msagaw = iommu_calculate_max_sagaw(iommu);
172 +- if (msagaw < 0) {
173 +- pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
174 +- iommu->seq_id);
175 +- goto err_unmap;
176 ++
177 ++ if (!drhd->ignored) {
178 ++ agaw = iommu_calculate_agaw(iommu);
179 ++ if (agaw < 0) {
180 ++ pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
181 ++ iommu->seq_id);
182 ++ drhd->ignored = 1;
183 ++ }
184 ++ }
185 ++ if (!drhd->ignored) {
186 ++ msagaw = iommu_calculate_max_sagaw(iommu);
187 ++ if (msagaw < 0) {
188 ++ pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
189 ++ iommu->seq_id);
190 ++ drhd->ignored = 1;
191 ++ agaw = -1;
192 ++ }
193 + }
194 + iommu->agaw = agaw;
195 + iommu->msagaw = msagaw;
196 +@@ -1087,7 +1098,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
197 +
198 + raw_spin_lock_init(&iommu->register_lock);
199 +
200 +- if (intel_iommu_enabled) {
201 ++ if (intel_iommu_enabled && !drhd->ignored) {
202 + iommu->iommu_dev = iommu_device_create(NULL, iommu,
203 + intel_iommu_groups,
204 + "%s", iommu->name);
205 +@@ -1099,6 +1110,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
206 + }
207 +
208 + drhd->iommu = iommu;
209 ++ iommu->drhd = drhd;
210 +
211 + return 0;
212 +
213 +@@ -1113,7 +1125,8 @@ error:
214 +
215 + static void free_iommu(struct intel_iommu *iommu)
216 + {
217 +- iommu_device_destroy(iommu->iommu_dev);
218 ++ if (intel_iommu_enabled && !iommu->drhd->ignored)
219 ++ iommu_device_destroy(iommu->iommu_dev);
220 +
221 + if (iommu->irq) {
222 + if (iommu->pr_irq) {
223 +diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
224 +index 431123b048a27..573a5a80b23c6 100644
225 +--- a/drivers/leds/led-triggers.c
226 ++++ b/drivers/leds/led-triggers.c
227 +@@ -283,14 +283,15 @@ void led_trigger_event(struct led_trigger *trig,
228 + enum led_brightness brightness)
229 + {
230 + struct led_classdev *led_cdev;
231 ++ unsigned long flags;
232 +
233 + if (!trig)
234 + return;
235 +
236 +- read_lock(&trig->leddev_list_lock);
237 ++ read_lock_irqsave(&trig->leddev_list_lock, flags);
238 + list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
239 + led_set_brightness(led_cdev, brightness);
240 +- read_unlock(&trig->leddev_list_lock);
241 ++ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
242 + }
243 + EXPORT_SYMBOL_GPL(led_trigger_event);
244 +
245 +@@ -301,11 +302,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
246 + int invert)
247 + {
248 + struct led_classdev *led_cdev;
249 ++ unsigned long flags;
250 +
251 + if (!trig)
252 + return;
253 +
254 +- read_lock(&trig->leddev_list_lock);
255 ++ read_lock_irqsave(&trig->leddev_list_lock, flags);
256 + list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
257 + if (oneshot)
258 + led_blink_set_oneshot(led_cdev, delay_on, delay_off,
259 +@@ -313,7 +315,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
260 + else
261 + led_blink_set(led_cdev, delay_on, delay_off);
262 + }
263 +- read_unlock(&trig->leddev_list_lock);
264 ++ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
265 + }
266 +
267 + void led_trigger_blink(struct led_trigger *trig,
268 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
269 +index 164078609f98e..ea38b67d0b737 100644
270 +--- a/drivers/net/can/dev.c
271 ++++ b/drivers/net/can/dev.c
272 +@@ -1017,7 +1017,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
273 + {
274 + struct can_priv *priv = netdev_priv(dev);
275 + struct can_ctrlmode cm = {.flags = priv->ctrlmode};
276 +- struct can_berr_counter bec;
277 ++ struct can_berr_counter bec = { };
278 + enum can_state state = priv->state;
279 +
280 + if (priv->do_get_state)
281 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
282 +index a571024882d7c..1c0aec70ee5d2 100644
283 +--- a/drivers/net/usb/qmi_wwan.c
284 ++++ b/drivers/net/usb/qmi_wwan.c
285 +@@ -942,6 +942,7 @@ static const struct usb_device_id products[] = {
286 + {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
287 + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
288 + {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
289 ++ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
290 + {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
291 + {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
292 + {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
293 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
294 +index e1287c3421165..71edbf7a42ed4 100644
295 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
296 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
297 +@@ -1909,7 +1909,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
298 +
299 + while (offs < dwords) {
300 + /* limit the time we spin here under lock to 1/2s */
301 +- ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
302 ++ unsigned long end = jiffies + HZ / 2;
303 ++ bool resched = false;
304 +
305 + if (iwl_trans_grab_nic_access(trans, &flags)) {
306 + iwl_write32(trans, HBUS_TARG_MEM_RADDR,
307 +@@ -1920,14 +1921,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
308 + HBUS_TARG_MEM_RDAT);
309 + offs++;
310 +
311 +- /* calling ktime_get is expensive so
312 +- * do it once in 128 reads
313 +- */
314 +- if (offs % 128 == 0 && ktime_after(ktime_get(),
315 +- timeout))
316 ++ if (time_after(jiffies, end)) {
317 ++ resched = true;
318 + break;
319 ++ }
320 + }
321 + iwl_trans_release_nic_access(trans, &flags);
322 ++
323 ++ if (resched)
324 ++ cond_resched();
325 + } else {
326 + return -EBUSY;
327 + }
328 +diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
329 +index 56cad16e70ca6..1b68aef03fe2e 100644
330 +--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
331 ++++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
332 +@@ -160,8 +160,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
333 +
334 + if (new_p) {
335 + /* we have one extra ref from the allocator */
336 +- __free_pages(e->p, MT_RX_ORDER);
337 +-
338 ++ put_page(e->p);
339 + e->p = new_p;
340 + }
341 + }
342 +@@ -318,7 +317,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
343 + }
344 +
345 + e = &q->e[q->end];
346 +- e->skb = skb;
347 + usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
348 + mt7601u_complete_tx, q);
349 + ret = usb_submit_urb(e->urb, GFP_ATOMIC);
350 +@@ -336,6 +334,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
351 +
352 + q->end = (q->end + 1) % q->entries;
353 + q->used++;
354 ++ e->skb = skb;
355 +
356 + if (q->used >= q->entries)
357 + ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
358 +diff --git a/fs/exec.c b/fs/exec.c
359 +index cd5da140f94cb..319a1f5732fa9 100644
360 +--- a/fs/exec.c
361 ++++ b/fs/exec.c
362 +@@ -1021,7 +1021,7 @@ static int exec_mmap(struct mm_struct *mm)
363 + /* Notify parent that we're no longer interested in the old VM */
364 + tsk = current;
365 + old_mm = current->mm;
366 +- mm_release(tsk, old_mm);
367 ++ exec_mm_release(tsk, old_mm);
368 +
369 + if (old_mm) {
370 + sync_mm_rss(old_mm);
371 +diff --git a/include/linux/compat.h b/include/linux/compat.h
372 +index fab35daf87596..6b9d38a7adcaf 100644
373 +--- a/include/linux/compat.h
374 ++++ b/include/linux/compat.h
375 +@@ -311,8 +311,6 @@ struct compat_kexec_segment;
376 + struct compat_mq_attr;
377 + struct compat_msgbuf;
378 +
379 +-extern void compat_exit_robust_list(struct task_struct *curr);
380 +-
381 + asmlinkage long
382 + compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
383 + compat_size_t len);
384 +diff --git a/include/linux/futex.h b/include/linux/futex.h
385 +index c015fa91e7cce..0f294ae63c78c 100644
386 +--- a/include/linux/futex.h
387 ++++ b/include/linux/futex.h
388 +@@ -1,6 +1,8 @@
389 + #ifndef _LINUX_FUTEX_H
390 + #define _LINUX_FUTEX_H
391 +
392 ++#include <linux/sched.h>
393 ++
394 + #include <uapi/linux/futex.h>
395 +
396 + struct inode;
397 +@@ -11,9 +13,6 @@ union ktime;
398 + long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
399 + u32 __user *uaddr2, u32 val2, u32 val3);
400 +
401 +-extern int
402 +-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
403 +-
404 + /*
405 + * Futexes are matched on equal values of this key.
406 + * The key type depends on whether it's a shared or private mapping.
407 +@@ -56,19 +55,34 @@ union futex_key {
408 + #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
409 +
410 + #ifdef CONFIG_FUTEX
411 +-extern void exit_robust_list(struct task_struct *curr);
412 +-extern void exit_pi_state_list(struct task_struct *curr);
413 +-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
414 +-#define futex_cmpxchg_enabled 1
415 +-#else
416 +-extern int futex_cmpxchg_enabled;
417 +-#endif
418 +-#else
419 +-static inline void exit_robust_list(struct task_struct *curr)
420 +-{
421 +-}
422 +-static inline void exit_pi_state_list(struct task_struct *curr)
423 ++enum {
424 ++ FUTEX_STATE_OK,
425 ++ FUTEX_STATE_EXITING,
426 ++ FUTEX_STATE_DEAD,
427 ++};
428 ++
429 ++static inline void futex_init_task(struct task_struct *tsk)
430 + {
431 ++ tsk->robust_list = NULL;
432 ++#ifdef CONFIG_COMPAT
433 ++ tsk->compat_robust_list = NULL;
434 ++#endif
435 ++ INIT_LIST_HEAD(&tsk->pi_state_list);
436 ++ tsk->pi_state_cache = NULL;
437 ++ tsk->futex_state = FUTEX_STATE_OK;
438 ++ mutex_init(&tsk->futex_exit_mutex);
439 + }
440 ++
441 ++void futex_exit_recursive(struct task_struct *tsk);
442 ++void futex_exit_release(struct task_struct *tsk);
443 ++void futex_exec_release(struct task_struct *tsk);
444 ++
445 ++long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
446 ++ u32 __user *uaddr2, u32 val2, u32 val3);
447 ++#else
448 ++static inline void futex_init_task(struct task_struct *tsk) { }
449 ++static inline void futex_exit_recursive(struct task_struct *tsk) { }
450 ++static inline void futex_exit_release(struct task_struct *tsk) { }
451 ++static inline void futex_exec_release(struct task_struct *tsk) { }
452 + #endif
453 + #endif
454 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
455 +index d86ac620f0aac..188bd17689711 100644
456 +--- a/include/linux/intel-iommu.h
457 ++++ b/include/linux/intel-iommu.h
458 +@@ -447,6 +447,8 @@ struct intel_iommu {
459 + struct device *iommu_dev; /* IOMMU-sysfs device */
460 + int node;
461 + u32 flags; /* Software defined flags */
462 ++
463 ++ struct dmar_drhd_unit *drhd;
464 + };
465 +
466 + static inline void __iommu_flush_cache(
467 +diff --git a/include/linux/sched.h b/include/linux/sched.h
468 +index 1872d4e9acbe1..f094882822a63 100644
469 +--- a/include/linux/sched.h
470 ++++ b/include/linux/sched.h
471 +@@ -1815,6 +1815,8 @@ struct task_struct {
472 + #endif
473 + struct list_head pi_state_list;
474 + struct futex_pi_state *pi_state_cache;
475 ++ struct mutex futex_exit_mutex;
476 ++ unsigned int futex_state;
477 + #endif
478 + #ifdef CONFIG_PERF_EVENTS
479 + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
480 +@@ -2276,7 +2278,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
481 + * Per process flags
482 + */
483 + #define PF_EXITING 0x00000004 /* getting shut down */
484 +-#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
485 + #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
486 + #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
487 + #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
488 +@@ -2955,8 +2956,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
489 + * succeeds.
490 + */
491 + extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
492 +-/* Remove the current tasks stale references to the old mm_struct */
493 +-extern void mm_release(struct task_struct *, struct mm_struct *);
494 ++/* Remove the current tasks stale references to the old mm_struct on exit() */
495 ++extern void exit_mm_release(struct task_struct *, struct mm_struct *);
496 ++/* Remove the current tasks stale references to the old mm_struct on exec() */
497 ++extern void exec_mm_release(struct task_struct *, struct mm_struct *);
498 +
499 + #ifdef CONFIG_HAVE_COPY_THREAD_TLS
500 + extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
501 +diff --git a/kernel/Makefile b/kernel/Makefile
502 +index 184fa9aa58027..92488cf6ad913 100644
503 +--- a/kernel/Makefile
504 ++++ b/kernel/Makefile
505 +@@ -47,9 +47,6 @@ obj-$(CONFIG_PROFILING) += profile.o
506 + obj-$(CONFIG_STACKTRACE) += stacktrace.o
507 + obj-y += time/
508 + obj-$(CONFIG_FUTEX) += futex.o
509 +-ifeq ($(CONFIG_COMPAT),y)
510 +-obj-$(CONFIG_FUTEX) += futex_compat.o
511 +-endif
512 + obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
513 + obj-$(CONFIG_SMP) += smp.o
514 + ifneq ($(CONFIG_SMP),y)
515 +diff --git a/kernel/exit.c b/kernel/exit.c
516 +index f9943ef23fa82..8716f0780fe3d 100644
517 +--- a/kernel/exit.c
518 ++++ b/kernel/exit.c
519 +@@ -464,7 +464,7 @@ static void exit_mm(struct task_struct *tsk)
520 + struct mm_struct *mm = tsk->mm;
521 + struct core_state *core_state;
522 +
523 +- mm_release(tsk, mm);
524 ++ exit_mm_release(tsk, mm);
525 + if (!mm)
526 + return;
527 + sync_mm_rss(mm);
528 +@@ -785,31 +785,12 @@ void __noreturn do_exit(long code)
529 + */
530 + if (unlikely(tsk->flags & PF_EXITING)) {
531 + pr_alert("Fixing recursive fault but reboot is needed!\n");
532 +- /*
533 +- * We can do this unlocked here. The futex code uses
534 +- * this flag just to verify whether the pi state
535 +- * cleanup has been done or not. In the worst case it
536 +- * loops once more. We pretend that the cleanup was
537 +- * done as there is no way to return. Either the
538 +- * OWNER_DIED bit is set by now or we push the blocked
539 +- * task into the wait for ever nirwana as well.
540 +- */
541 +- tsk->flags |= PF_EXITPIDONE;
542 ++ futex_exit_recursive(tsk);
543 + set_current_state(TASK_UNINTERRUPTIBLE);
544 + schedule();
545 + }
546 +
547 + exit_signals(tsk); /* sets PF_EXITING */
548 +- /*
549 +- * Ensure that all new tsk->pi_lock acquisitions must observe
550 +- * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
551 +- */
552 +- smp_mb();
553 +- /*
554 +- * Ensure that we must observe the pi_state in exit_mm() ->
555 +- * mm_release() -> exit_pi_state_list().
556 +- */
557 +- raw_spin_unlock_wait(&tsk->pi_lock);
558 +
559 + /* sync mm's RSS info before statistics gathering */
560 + if (tsk->mm)
561 +@@ -876,12 +857,6 @@ void __noreturn do_exit(long code)
562 + * Make sure we are holding no locks:
563 + */
564 + debug_check_no_locks_held();
565 +- /*
566 +- * We can do this unlocked here. The futex code uses this flag
567 +- * just to verify whether the pi state cleanup has been done
568 +- * or not. In the worst case it loops once more.
569 +- */
570 +- tsk->flags |= PF_EXITPIDONE;
571 +
572 + if (tsk->io_context)
573 + exit_io_context(tsk);
574 +diff --git a/kernel/fork.c b/kernel/fork.c
575 +index b64efec4a6e6e..91349fd3e162d 100644
576 +--- a/kernel/fork.c
577 ++++ b/kernel/fork.c
578 +@@ -1082,24 +1082,8 @@ static int wait_for_vfork_done(struct task_struct *child,
579 + * restoring the old one. . .
580 + * Eric Biederman 10 January 1998
581 + */
582 +-void mm_release(struct task_struct *tsk, struct mm_struct *mm)
583 ++static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
584 + {
585 +- /* Get rid of any futexes when releasing the mm */
586 +-#ifdef CONFIG_FUTEX
587 +- if (unlikely(tsk->robust_list)) {
588 +- exit_robust_list(tsk);
589 +- tsk->robust_list = NULL;
590 +- }
591 +-#ifdef CONFIG_COMPAT
592 +- if (unlikely(tsk->compat_robust_list)) {
593 +- compat_exit_robust_list(tsk);
594 +- tsk->compat_robust_list = NULL;
595 +- }
596 +-#endif
597 +- if (unlikely(!list_empty(&tsk->pi_state_list)))
598 +- exit_pi_state_list(tsk);
599 +-#endif
600 +-
601 + uprobe_free_utask(tsk);
602 +
603 + /* Get rid of any cached register state */
604 +@@ -1132,6 +1116,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
605 + complete_vfork_done(tsk);
606 + }
607 +
608 ++void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
609 ++{
610 ++ futex_exit_release(tsk);
611 ++ mm_release(tsk, mm);
612 ++}
613 ++
614 ++void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
615 ++{
616 ++ futex_exec_release(tsk);
617 ++ mm_release(tsk, mm);
618 ++}
619 ++
620 + /*
621 + * Allocate a new mm structure and copy contents from the
622 + * mm structure of the passed in task structure.
623 +@@ -1706,14 +1702,8 @@ static __latent_entropy struct task_struct *copy_process(
624 + #ifdef CONFIG_BLOCK
625 + p->plug = NULL;
626 + #endif
627 +-#ifdef CONFIG_FUTEX
628 +- p->robust_list = NULL;
629 +-#ifdef CONFIG_COMPAT
630 +- p->compat_robust_list = NULL;
631 +-#endif
632 +- INIT_LIST_HEAD(&p->pi_state_list);
633 +- p->pi_state_cache = NULL;
634 +-#endif
635 ++ futex_init_task(p);
636 ++
637 + /*
638 + * sigaltstack should be cleared when sharing the same VM
639 + */
640 +diff --git a/kernel/futex.c b/kernel/futex.c
641 +index 7123d9cab4568..2ef8c5aef35d0 100644
642 +--- a/kernel/futex.c
643 ++++ b/kernel/futex.c
644 +@@ -44,6 +44,7 @@
645 + * along with this program; if not, write to the Free Software
646 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
647 + */
648 ++#include <linux/compat.h>
649 + #include <linux/slab.h>
650 + #include <linux/poll.h>
651 + #include <linux/fs.h>
652 +@@ -171,8 +172,10 @@
653 + * double_lock_hb() and double_unlock_hb(), respectively.
654 + */
655 +
656 +-#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
657 +-int __read_mostly futex_cmpxchg_enabled;
658 ++#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
659 ++#define futex_cmpxchg_enabled 1
660 ++#else
661 ++static int __read_mostly futex_cmpxchg_enabled;
662 + #endif
663 +
664 + /*
665 +@@ -336,6 +339,12 @@ static inline bool should_fail_futex(bool fshared)
666 + }
667 + #endif /* CONFIG_FAIL_FUTEX */
668 +
669 ++#ifdef CONFIG_COMPAT
670 ++static void compat_exit_robust_list(struct task_struct *curr);
671 ++#else
672 ++static inline void compat_exit_robust_list(struct task_struct *curr) { }
673 ++#endif
674 ++
675 + static inline void futex_get_mm(union futex_key *key)
676 + {
677 + atomic_inc(&key->private.mm->mm_count);
678 +@@ -891,7 +900,7 @@ static struct task_struct * futex_find_get_task(pid_t pid)
679 + * Kernel cleans up PI-state, but userspace is likely hosed.
680 + * (Robust-futex cleanup is separate and might save the day for userspace.)
681 + */
682 +-void exit_pi_state_list(struct task_struct *curr)
683 ++static void exit_pi_state_list(struct task_struct *curr)
684 + {
685 + struct list_head *next, *head = &curr->pi_state_list;
686 + struct futex_pi_state *pi_state;
687 +@@ -1063,12 +1072,43 @@ out_state:
688 + return 0;
689 + }
690 +
691 ++/**
692 ++ * wait_for_owner_exiting - Block until the owner has exited
693 ++ * @exiting: Pointer to the exiting task
694 ++ *
695 ++ * Caller must hold a refcount on @exiting.
696 ++ */
697 ++static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
698 ++{
699 ++ if (ret != -EBUSY) {
700 ++ WARN_ON_ONCE(exiting);
701 ++ return;
702 ++ }
703 ++
704 ++ if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
705 ++ return;
706 ++
707 ++ mutex_lock(&exiting->futex_exit_mutex);
708 ++ /*
709 ++ * No point in doing state checking here. If the waiter got here
710 ++ * while the task was in exec()->exec_futex_release() then it can
711 ++ * have any FUTEX_STATE_* value when the waiter has acquired the
712 ++ * mutex. OK, if running, EXITING or DEAD if it reached exit()
713 ++ * already. Highly unlikely and not a problem. Just one more round
714 ++ * through the futex maze.
715 ++ */
716 ++ mutex_unlock(&exiting->futex_exit_mutex);
717 ++
718 ++ put_task_struct(exiting);
719 ++}
720 ++
721 + /*
722 + * Lookup the task for the TID provided from user space and attach to
723 + * it after doing proper sanity checks.
724 + */
725 + static int attach_to_pi_owner(u32 uval, union futex_key *key,
726 +- struct futex_pi_state **ps)
727 ++ struct futex_pi_state **ps,
728 ++ struct task_struct **exiting)
729 + {
730 + pid_t pid = uval & FUTEX_TID_MASK;
731 + struct futex_pi_state *pi_state;
732 +@@ -1090,22 +1130,33 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
733 + }
734 +
735 + /*
736 +- * We need to look at the task state flags to figure out,
737 +- * whether the task is exiting. To protect against the do_exit
738 +- * change of the task flags, we do this protected by
739 +- * p->pi_lock:
740 ++ * We need to look at the task state to figure out, whether the
741 ++ * task is exiting. To protect against the change of the task state
742 ++ * in futex_exit_release(), we do this protected by p->pi_lock:
743 + */
744 + raw_spin_lock_irq(&p->pi_lock);
745 +- if (unlikely(p->flags & PF_EXITING)) {
746 ++ if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
747 + /*
748 +- * The task is on the way out. When PF_EXITPIDONE is
749 +- * set, we know that the task has finished the
750 +- * cleanup:
751 ++ * The task is on the way out. When the futex state is
752 ++ * FUTEX_STATE_DEAD, we know that the task has finished
753 ++ * the cleanup:
754 + */
755 +- int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
756 ++ int ret = (p->futex_state = FUTEX_STATE_DEAD) ? -ESRCH : -EAGAIN;
757 +
758 + raw_spin_unlock_irq(&p->pi_lock);
759 +- put_task_struct(p);
760 ++ /*
761 ++ * If the owner task is between FUTEX_STATE_EXITING and
762 ++ * FUTEX_STATE_DEAD then store the task pointer and keep
763 ++ * the reference on the task struct. The calling code will
764 ++ * drop all locks, wait for the task to reach
765 ++ * FUTEX_STATE_DEAD and then drop the refcount. This is
766 ++ * required to prevent a live lock when the current task
767 ++ * preempted the exiting task between the two states.
768 ++ */
769 ++ if (ret == -EBUSY)
770 ++ *exiting = p;
771 ++ else
772 ++ put_task_struct(p);
773 + return ret;
774 + }
775 +
776 +@@ -1136,7 +1187,8 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
777 + }
778 +
779 + static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
780 +- union futex_key *key, struct futex_pi_state **ps)
781 ++ union futex_key *key, struct futex_pi_state **ps,
782 ++ struct task_struct **exiting)
783 + {
784 + struct futex_q *match = futex_top_waiter(hb, key);
785 +
786 +@@ -1151,7 +1203,7 @@ static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
787 + * We are the first waiter - try to look up the owner based on
788 + * @uval and attach to it.
789 + */
790 +- return attach_to_pi_owner(uval, key, ps);
791 ++ return attach_to_pi_owner(uval, key, ps, exiting);
792 + }
793 +
794 + static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
795 +@@ -1177,6 +1229,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
796 + * lookup
797 + * @task: the task to perform the atomic lock work for. This will
798 + * be "current" except in the case of requeue pi.
799 ++ * @exiting: Pointer to store the task pointer of the owner task
800 ++ * which is in the middle of exiting
801 + * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
802 + *
803 + * Return:
804 +@@ -1185,11 +1239,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
805 + * <0 - error
806 + *
807 + * The hb->lock and futex_key refs shall be held by the caller.
808 ++ *
809 ++ * @exiting is only set when the return value is -EBUSY. If so, this holds
810 ++ * a refcount on the exiting task on return and the caller needs to drop it
811 ++ * after waiting for the exit to complete.
812 + */
813 + static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
814 + union futex_key *key,
815 + struct futex_pi_state **ps,
816 +- struct task_struct *task, int set_waiters)
817 ++ struct task_struct *task,
818 ++ struct task_struct **exiting,
819 ++ int set_waiters)
820 + {
821 + u32 uval, newval, vpid = task_pid_vnr(task);
822 + struct futex_q *match;
823 +@@ -1259,7 +1319,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
824 + * attach to the owner. If that fails, no harm done, we only
825 + * set the FUTEX_WAITERS bit in the user space variable.
826 + */
827 +- return attach_to_pi_owner(uval, key, ps);
828 ++ return attach_to_pi_owner(uval, key, ps, exiting);
829 + }
830 +
831 + /**
832 +@@ -1685,6 +1745,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
833 + * @key1: the from futex key
834 + * @key2: the to futex key
835 + * @ps: address to store the pi_state pointer
836 ++ * @exiting: Pointer to store the task pointer of the owner task
837 ++ * which is in the middle of exiting
838 + * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
839 + *
840 + * Try and get the lock on behalf of the top waiter if we can do it atomically.
841 +@@ -1692,16 +1754,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
842 + * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
843 + * hb1 and hb2 must be held by the caller.
844 + *
845 ++ * @exiting is only set when the return value is -EBUSY. If so, this holds
846 ++ * a refcount on the exiting task on return and the caller needs to drop it
847 ++ * after waiting for the exit to complete.
848 ++ *
849 + * Return:
850 + * 0 - failed to acquire the lock atomically;
851 + * >0 - acquired the lock, return value is vpid of the top_waiter
852 + * <0 - error
853 + */
854 +-static int futex_proxy_trylock_atomic(u32 __user *pifutex,
855 +- struct futex_hash_bucket *hb1,
856 +- struct futex_hash_bucket *hb2,
857 +- union futex_key *key1, union futex_key *key2,
858 +- struct futex_pi_state **ps, int set_waiters)
859 ++static int
860 ++futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
861 ++ struct futex_hash_bucket *hb2, union futex_key *key1,
862 ++ union futex_key *key2, struct futex_pi_state **ps,
863 ++ struct task_struct **exiting, int set_waiters)
864 + {
865 + struct futex_q *top_waiter = NULL;
866 + u32 curval;
867 +@@ -1738,7 +1804,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
868 + */
869 + vpid = task_pid_vnr(top_waiter->task);
870 + ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
871 +- set_waiters);
872 ++ exiting, set_waiters);
873 + if (ret == 1) {
874 + requeue_pi_wake_futex(top_waiter, key2, hb2);
875 + return vpid;
876 +@@ -1858,6 +1924,8 @@ retry_private:
877 + }
878 +
879 + if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
880 ++ struct task_struct *exiting = NULL;
881 ++
882 + /*
883 + * Attempt to acquire uaddr2 and wake the top waiter. If we
884 + * intend to requeue waiters, force setting the FUTEX_WAITERS
885 +@@ -1865,7 +1933,8 @@ retry_private:
886 + * faults rather in the requeue loop below.
887 + */
888 + ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
889 +- &key2, &pi_state, nr_requeue);
890 ++ &key2, &pi_state,
891 ++ &exiting, nr_requeue);
892 +
893 + /*
894 + * At this point the top_waiter has either taken uaddr2 or is
895 +@@ -1892,7 +1961,8 @@ retry_private:
896 + * If that call succeeds then we have pi_state and an
897 + * initial refcount on it.
898 + */
899 +- ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
900 ++ ret = lookup_pi_state(ret, hb2, &key2,
901 ++ &pi_state, &exiting);
902 + }
903 +
904 + switch (ret) {
905 +@@ -1910,17 +1980,24 @@ retry_private:
906 + if (!ret)
907 + goto retry;
908 + goto out;
909 ++ case -EBUSY:
910 + case -EAGAIN:
911 + /*
912 + * Two reasons for this:
913 +- * - Owner is exiting and we just wait for the
914 ++ * - EBUSY: Owner is exiting and we just wait for the
915 + * exit to complete.
916 +- * - The user space value changed.
917 ++ * - EAGAIN: The user space value changed.
918 + */
919 + double_unlock_hb(hb1, hb2);
920 + hb_waiters_dec(hb2);
921 + put_futex_key(&key2);
922 + put_futex_key(&key1);
923 ++ /*
924 ++ * Handle the case where the owner is in the middle of
925 ++ * exiting. Wait for the exit to complete otherwise
926 ++ * this task might loop forever, aka. live lock.
927 ++ */
928 ++ wait_for_owner_exiting(ret, exiting);
929 + cond_resched();
930 + goto retry;
931 + default:
932 +@@ -2571,6 +2648,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
933 + ktime_t *time, int trylock)
934 + {
935 + struct hrtimer_sleeper timeout, *to = NULL;
936 ++ struct task_struct *exiting = NULL;
937 + struct futex_hash_bucket *hb;
938 + struct futex_q q = futex_q_init;
939 + int res, ret;
940 +@@ -2594,7 +2672,8 @@ retry:
941 + retry_private:
942 + hb = queue_lock(&q);
943 +
944 +- ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
945 ++ ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
946 ++ &exiting, 0);
947 + if (unlikely(ret)) {
948 + /*
949 + * Atomic work succeeded and we got the lock,
950 +@@ -2607,15 +2686,22 @@ retry_private:
951 + goto out_unlock_put_key;
952 + case -EFAULT:
953 + goto uaddr_faulted;
954 ++ case -EBUSY:
955 + case -EAGAIN:
956 + /*
957 + * Two reasons for this:
958 +- * - Task is exiting and we just wait for the
959 ++ * - EBUSY: Task is exiting and we just wait for the
960 + * exit to complete.
961 +- * - The user space value changed.
962 ++ * - EAGAIN: The user space value changed.
963 + */
964 + queue_unlock(hb);
965 + put_futex_key(&q.key);
966 ++ /*
967 ++ * Handle the case where the owner is in the middle of
968 ++ * exiting. Wait for the exit to complete otherwise
969 ++ * this task might loop forever, aka. live lock.
970 ++ */
971 ++ wait_for_owner_exiting(ret, exiting);
972 + cond_resched();
973 + goto retry;
974 + default:
975 +@@ -3123,7 +3209,7 @@ err_unlock:
976 + * Process a futex-list entry, check whether it's owned by the
977 + * dying task, and do notification if so:
978 + */
979 +-int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
980 ++static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
981 + {
982 + u32 uval, uninitialized_var(nval), mval;
983 +
984 +@@ -3198,7 +3284,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
985 + *
986 + * We silently return on any sign of list-walking problem.
987 + */
988 +-void exit_robust_list(struct task_struct *curr)
989 ++static void exit_robust_list(struct task_struct *curr)
990 + {
991 + struct robust_list_head __user *head = curr->robust_list;
992 + struct robust_list __user *entry, *next_entry, *pending;
993 +@@ -3261,6 +3347,114 @@ void exit_robust_list(struct task_struct *curr)
994 + curr, pip);
995 + }
996 +
997 ++static void futex_cleanup(struct task_struct *tsk)
998 ++{
999 ++ if (unlikely(tsk->robust_list)) {
1000 ++ exit_robust_list(tsk);
1001 ++ tsk->robust_list = NULL;
1002 ++ }
1003 ++
1004 ++#ifdef CONFIG_COMPAT
1005 ++ if (unlikely(tsk->compat_robust_list)) {
1006 ++ compat_exit_robust_list(tsk);
1007 ++ tsk->compat_robust_list = NULL;
1008 ++ }
1009 ++#endif
1010 ++
1011 ++ if (unlikely(!list_empty(&tsk->pi_state_list)))
1012 ++ exit_pi_state_list(tsk);
1013 ++}
1014 ++
1015 ++/**
1016 ++ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1017 ++ * @tsk: task to set the state on
1018 ++ *
1019 ++ * Set the futex exit state of the task lockless. The futex waiter code
1020 ++ * observes that state when a task is exiting and loops until the task has
1021 ++ * actually finished the futex cleanup. The worst case for this is that the
1022 ++ * waiter runs through the wait loop until the state becomes visible.
1023 ++ *
1024 ++ * This is called from the recursive fault handling path in do_exit().
1025 ++ *
1026 ++ * This is best effort. Either the futex exit code has run already or
1027 ++ * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1028 ++ * take it over. If not, the problem is pushed back to user space. If the
1029 ++ * futex exit code did not run yet, then an already queued waiter might
1030 ++ * block forever, but there is nothing which can be done about that.
1031 ++ */
1032 ++void futex_exit_recursive(struct task_struct *tsk)
1033 ++{
1034 ++ /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1035 ++ if (tsk->futex_state == FUTEX_STATE_EXITING)
1036 ++ mutex_unlock(&tsk->futex_exit_mutex);
1037 ++ tsk->futex_state = FUTEX_STATE_DEAD;
1038 ++}
1039 ++
1040 ++static void futex_cleanup_begin(struct task_struct *tsk)
1041 ++{
1042 ++ /*
1043 ++ * Prevent various race issues against a concurrent incoming waiter
1044 ++ * including live locks by forcing the waiter to block on
1045 ++ * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1046 ++ * attach_to_pi_owner().
1047 ++ */
1048 ++ mutex_lock(&tsk->futex_exit_mutex);
1049 ++
1050 ++ /*
1051 ++ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1052 ++ *
1053 ++ * This ensures that all subsequent checks of tsk->futex_state in
1054 ++ * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1055 ++ * tsk->pi_lock held.
1056 ++ *
1057 ++ * It guarantees also that a pi_state which was queued right before
1058 ++ * the state change under tsk->pi_lock by a concurrent waiter must
1059 ++ * be observed in exit_pi_state_list().
1060 ++ */
1061 ++ raw_spin_lock_irq(&tsk->pi_lock);
1062 ++ tsk->futex_state = FUTEX_STATE_EXITING;
1063 ++ raw_spin_unlock_irq(&tsk->pi_lock);
1064 ++}
1065 ++
1066 ++static void futex_cleanup_end(struct task_struct *tsk, int state)
1067 ++{
1068 ++ /*
1069 ++ * Lockless store. The only side effect is that an observer might
1070 ++ * take another loop until it becomes visible.
1071 ++ */
1072 ++ tsk->futex_state = state;
1073 ++ /*
1074 ++ * Drop the exit protection. This unblocks waiters which observed
1075 ++ * FUTEX_STATE_EXITING to reevaluate the state.
1076 ++ */
1077 ++ mutex_unlock(&tsk->futex_exit_mutex);
1078 ++}
1079 ++
1080 ++void futex_exec_release(struct task_struct *tsk)
1081 ++{
1082 ++ /*
1083 ++ * The state handling is done for consistency, but in the case of
1084 ++ * exec() there is no way to prevent futher damage as the PID stays
1085 ++ * the same. But for the unlikely and arguably buggy case that a
1086 ++ * futex is held on exec(), this provides at least as much state
1087 ++ * consistency protection which is possible.
1088 ++ */
1089 ++ futex_cleanup_begin(tsk);
1090 ++ futex_cleanup(tsk);
1091 ++ /*
1092 ++ * Reset the state to FUTEX_STATE_OK. The task is alive and about
1093 ++ * exec a new binary.
1094 ++ */
1095 ++ futex_cleanup_end(tsk, FUTEX_STATE_OK);
1096 ++}
1097 ++
1098 ++void futex_exit_release(struct task_struct *tsk)
1099 ++{
1100 ++ futex_cleanup_begin(tsk);
1101 ++ futex_cleanup(tsk);
1102 ++ futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1103 ++}
1104 ++
1105 + long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
1106 + u32 __user *uaddr2, u32 val2, u32 val3)
1107 + {
1108 +@@ -3354,6 +3548,192 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
1109 + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
1110 + }
1111 +
1112 ++#ifdef CONFIG_COMPAT
1113 ++/*
1114 ++ * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1115 ++ */
1116 ++static inline int
1117 ++compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
1118 ++ compat_uptr_t __user *head, unsigned int *pi)
1119 ++{
1120 ++ if (get_user(*uentry, head))
1121 ++ return -EFAULT;
1122 ++
1123 ++ *entry = compat_ptr((*uentry) & ~1);
1124 ++ *pi = (unsigned int)(*uentry) & 1;
1125 ++
1126 ++ return 0;
1127 ++}
1128 ++
1129 ++static void __user *futex_uaddr(struct robust_list __user *entry,
1130 ++ compat_long_t futex_offset)
1131 ++{
1132 ++ compat_uptr_t base = ptr_to_compat(entry);
1133 ++ void __user *uaddr = compat_ptr(base + futex_offset);
1134 ++
1135 ++ return uaddr;
1136 ++}
1137 ++
1138 ++/*
1139 ++ * Walk curr->robust_list (very carefully, it's a userspace list!)
1140 ++ * and mark any locks found there dead, and notify any waiters.
1141 ++ *
1142 ++ * We silently return on any sign of list-walking problem.
1143 ++ */
1144 ++void compat_exit_robust_list(struct task_struct *curr)
1145 ++{
1146 ++ struct compat_robust_list_head __user *head = curr->compat_robust_list;
1147 ++ struct robust_list __user *entry, *next_entry, *pending;
1148 ++ unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1149 ++ unsigned int uninitialized_var(next_pi);
1150 ++ compat_uptr_t uentry, next_uentry, upending;
1151 ++ compat_long_t futex_offset;
1152 ++ int rc;
1153 ++
1154 ++ if (!futex_cmpxchg_enabled)
1155 ++ return;
1156 ++
1157 ++ /*
1158 ++ * Fetch the list head (which was registered earlier, via
1159 ++ * sys_set_robust_list()):
1160 ++ */
1161 ++ if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
1162 ++ return;
1163 ++ /*
1164 ++ * Fetch the relative futex offset:
1165 ++ */
1166 ++ if (get_user(futex_offset, &head->futex_offset))
1167 ++ return;
1168 ++ /*
1169 ++ * Fetch any possibly pending lock-add first, and handle it
1170 ++ * if it exists:
1171 ++ */
1172 ++ if (compat_fetch_robust_entry(&upending, &pending,
1173 ++ &head->list_op_pending, &pip))
1174 ++ return;
1175 ++
1176 ++ next_entry = NULL; /* avoid warning with gcc */
1177 ++ while (entry != (struct robust_list __user *) &head->list) {
1178 ++ /*
1179 ++ * Fetch the next entry in the list before calling
1180 ++ * handle_futex_death:
1181 ++ */
1182 ++ rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
1183 ++ (compat_uptr_t __user *)&entry->next, &next_pi);
1184 ++ /*
1185 ++ * A pending lock might already be on the list, so
1186 ++ * dont process it twice:
1187 ++ */
1188 ++ if (entry != pending) {
1189 ++ void __user *uaddr = futex_uaddr(entry, futex_offset);
1190 ++
1191 ++ if (handle_futex_death(uaddr, curr, pi))
1192 ++ return;
1193 ++ }
1194 ++ if (rc)
1195 ++ return;
1196 ++ uentry = next_uentry;
1197 ++ entry = next_entry;
1198 ++ pi = next_pi;
1199 ++ /*
1200 ++ * Avoid excessively long or circular lists:
1201 ++ */
1202 ++ if (!--limit)
1203 ++ break;
1204 ++
1205 ++ cond_resched();
1206 ++ }
1207 ++ if (pending) {
1208 ++ void __user *uaddr = futex_uaddr(pending, futex_offset);
1209 ++
1210 ++ handle_futex_death(uaddr, curr, pip);
1211 ++ }
1212 ++}
1213 ++
1214 ++COMPAT_SYSCALL_DEFINE2(set_robust_list,
1215 ++ struct compat_robust_list_head __user *, head,
1216 ++ compat_size_t, len)
1217 ++{
1218 ++ if (!futex_cmpxchg_enabled)
1219 ++ return -ENOSYS;
1220 ++
1221 ++ if (unlikely(len != sizeof(*head)))
1222 ++ return -EINVAL;
1223 ++
1224 ++ current->compat_robust_list = head;
1225 ++
1226 ++ return 0;
1227 ++}
1228 ++
1229 ++COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
1230 ++ compat_uptr_t __user *, head_ptr,
1231 ++ compat_size_t __user *, len_ptr)
1232 ++{
1233 ++ struct compat_robust_list_head __user *head;
1234 ++ unsigned long ret;
1235 ++ struct task_struct *p;
1236 ++
1237 ++ if (!futex_cmpxchg_enabled)
1238 ++ return -ENOSYS;
1239 ++
1240 ++ rcu_read_lock();
1241 ++
1242 ++ ret = -ESRCH;
1243 ++ if (!pid)
1244 ++ p = current;
1245 ++ else {
1246 ++ p = find_task_by_vpid(pid);
1247 ++ if (!p)
1248 ++ goto err_unlock;
1249 ++ }
1250 ++
1251 ++ ret = -EPERM;
1252 ++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
1253 ++ goto err_unlock;
1254 ++
1255 ++ head = p->compat_robust_list;
1256 ++ rcu_read_unlock();
1257 ++
1258 ++ if (put_user(sizeof(*head), len_ptr))
1259 ++ return -EFAULT;
1260 ++ return put_user(ptr_to_compat(head), head_ptr);
1261 ++
1262 ++err_unlock:
1263 ++ rcu_read_unlock();
1264 ++
1265 ++ return ret;
1266 ++}
1267 ++
1268 ++COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
1269 ++ struct compat_timespec __user *, utime, u32 __user *, uaddr2,
1270 ++ u32, val3)
1271 ++{
1272 ++ struct timespec ts;
1273 ++ ktime_t t, *tp = NULL;
1274 ++ int val2 = 0;
1275 ++ int cmd = op & FUTEX_CMD_MASK;
1276 ++
1277 ++ if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
1278 ++ cmd == FUTEX_WAIT_BITSET ||
1279 ++ cmd == FUTEX_WAIT_REQUEUE_PI)) {
1280 ++ if (compat_get_timespec(&ts, utime))
1281 ++ return -EFAULT;
1282 ++ if (!timespec_valid(&ts))
1283 ++ return -EINVAL;
1284 ++
1285 ++ t = timespec_to_ktime(ts);
1286 ++ if (cmd == FUTEX_WAIT)
1287 ++ t = ktime_add_safe(ktime_get(), t);
1288 ++ tp = &t;
1289 ++ }
1290 ++ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
1291 ++ cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
1292 ++ val2 = (int) (unsigned long) utime;
1293 ++
1294 ++ return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
1295 ++}
1296 ++#endif /* CONFIG_COMPAT */
1297 ++
1298 + static void __init futex_detect_cmpxchg(void)
1299 + {
1300 + #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
1301 +diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
1302 +deleted file mode 100644
1303 +index 4ae3232e7a28a..0000000000000
1304 +--- a/kernel/futex_compat.c
1305 ++++ /dev/null
1306 +@@ -1,201 +0,0 @@
1307 +-/*
1308 +- * linux/kernel/futex_compat.c
1309 +- *
1310 +- * Futex compatibililty routines.
1311 +- *
1312 +- * Copyright 2006, Red Hat, Inc., Ingo Molnar
1313 +- */
1314 +-
1315 +-#include <linux/linkage.h>
1316 +-#include <linux/compat.h>
1317 +-#include <linux/nsproxy.h>
1318 +-#include <linux/futex.h>
1319 +-#include <linux/ptrace.h>
1320 +-#include <linux/syscalls.h>
1321 +-
1322 +-#include <asm/uaccess.h>
1323 +-
1324 +-
1325 +-/*
1326 +- * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1327 +- */
1328 +-static inline int
1329 +-fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
1330 +- compat_uptr_t __user *head, unsigned int *pi)
1331 +-{
1332 +- if (get_user(*uentry, head))
1333 +- return -EFAULT;
1334 +-
1335 +- *entry = compat_ptr((*uentry) & ~1);
1336 +- *pi = (unsigned int)(*uentry) & 1;
1337 +-
1338 +- return 0;
1339 +-}
1340 +-
1341 +-static void __user *futex_uaddr(struct robust_list __user *entry,
1342 +- compat_long_t futex_offset)
1343 +-{
1344 +- compat_uptr_t base = ptr_to_compat(entry);
1345 +- void __user *uaddr = compat_ptr(base + futex_offset);
1346 +-
1347 +- return uaddr;
1348 +-}
1349 +-
1350 +-/*
1351 +- * Walk curr->robust_list (very carefully, it's a userspace list!)
1352 +- * and mark any locks found there dead, and notify any waiters.
1353 +- *
1354 +- * We silently return on any sign of list-walking problem.
1355 +- */
1356 +-void compat_exit_robust_list(struct task_struct *curr)
1357 +-{
1358 +- struct compat_robust_list_head __user *head = curr->compat_robust_list;
1359 +- struct robust_list __user *entry, *next_entry, *pending;
1360 +- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1361 +- unsigned int uninitialized_var(next_pi);
1362 +- compat_uptr_t uentry, next_uentry, upending;
1363 +- compat_long_t futex_offset;
1364 +- int rc;
1365 +-
1366 +- if (!futex_cmpxchg_enabled)
1367 +- return;
1368 +-
1369 +- /*
1370 +- * Fetch the list head (which was registered earlier, via
1371 +- * sys_set_robust_list()):
1372 +- */
1373 +- if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
1374 +- return;
1375 +- /*
1376 +- * Fetch the relative futex offset:
1377 +- */
1378 +- if (get_user(futex_offset, &head->futex_offset))
1379 +- return;
1380 +- /*
1381 +- * Fetch any possibly pending lock-add first, and handle it
1382 +- * if it exists:
1383 +- */
1384 +- if (fetch_robust_entry(&upending, &pending,
1385 +- &head->list_op_pending, &pip))
1386 +- return;
1387 +-
1388 +- next_entry = NULL; /* avoid warning with gcc */
1389 +- while (entry != (struct robust_list __user *) &head->list) {
1390 +- /*
1391 +- * Fetch the next entry in the list before calling
1392 +- * handle_futex_death:
1393 +- */
1394 +- rc = fetch_robust_entry(&next_uentry, &next_entry,
1395 +- (compat_uptr_t __user *)&entry->next, &next_pi);
1396 +- /*
1397 +- * A pending lock might already be on the list, so
1398 +- * dont process it twice:
1399 +- */
1400 +- if (entry != pending) {
1401 +- void __user *uaddr = futex_uaddr(entry, futex_offset);
1402 +-
1403 +- if (handle_futex_death(uaddr, curr, pi))
1404 +- return;
1405 +- }
1406 +- if (rc)
1407 +- return;
1408 +- uentry = next_uentry;
1409 +- entry = next_entry;
1410 +- pi = next_pi;
1411 +- /*
1412 +- * Avoid excessively long or circular lists:
1413 +- */
1414 +- if (!--limit)
1415 +- break;
1416 +-
1417 +- cond_resched();
1418 +- }
1419 +- if (pending) {
1420 +- void __user *uaddr = futex_uaddr(pending, futex_offset);
1421 +-
1422 +- handle_futex_death(uaddr, curr, pip);
1423 +- }
1424 +-}
1425 +-
1426 +-COMPAT_SYSCALL_DEFINE2(set_robust_list,
1427 +- struct compat_robust_list_head __user *, head,
1428 +- compat_size_t, len)
1429 +-{
1430 +- if (!futex_cmpxchg_enabled)
1431 +- return -ENOSYS;
1432 +-
1433 +- if (unlikely(len != sizeof(*head)))
1434 +- return -EINVAL;
1435 +-
1436 +- current->compat_robust_list = head;
1437 +-
1438 +- return 0;
1439 +-}
1440 +-
1441 +-COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
1442 +- compat_uptr_t __user *, head_ptr,
1443 +- compat_size_t __user *, len_ptr)
1444 +-{
1445 +- struct compat_robust_list_head __user *head;
1446 +- unsigned long ret;
1447 +- struct task_struct *p;
1448 +-
1449 +- if (!futex_cmpxchg_enabled)
1450 +- return -ENOSYS;
1451 +-
1452 +- rcu_read_lock();
1453 +-
1454 +- ret = -ESRCH;
1455 +- if (!pid)
1456 +- p = current;
1457 +- else {
1458 +- p = find_task_by_vpid(pid);
1459 +- if (!p)
1460 +- goto err_unlock;
1461 +- }
1462 +-
1463 +- ret = -EPERM;
1464 +- if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
1465 +- goto err_unlock;
1466 +-
1467 +- head = p->compat_robust_list;
1468 +- rcu_read_unlock();
1469 +-
1470 +- if (put_user(sizeof(*head), len_ptr))
1471 +- return -EFAULT;
1472 +- return put_user(ptr_to_compat(head), head_ptr);
1473 +-
1474 +-err_unlock:
1475 +- rcu_read_unlock();
1476 +-
1477 +- return ret;
1478 +-}
1479 +-
1480 +-COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
1481 +- struct compat_timespec __user *, utime, u32 __user *, uaddr2,
1482 +- u32, val3)
1483 +-{
1484 +- struct timespec ts;
1485 +- ktime_t t, *tp = NULL;
1486 +- int val2 = 0;
1487 +- int cmd = op & FUTEX_CMD_MASK;
1488 +-
1489 +- if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
1490 +- cmd == FUTEX_WAIT_BITSET ||
1491 +- cmd == FUTEX_WAIT_REQUEUE_PI)) {
1492 +- if (compat_get_timespec(&ts, utime))
1493 +- return -EFAULT;
1494 +- if (!timespec_valid(&ts))
1495 +- return -EINVAL;
1496 +-
1497 +- t = timespec_to_ktime(ts);
1498 +- if (cmd == FUTEX_WAIT)
1499 +- t = ktime_add_safe(ktime_get(), t);
1500 +- tp = &t;
1501 +- }
1502 +- if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
1503 +- cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
1504 +- val2 = (int) (unsigned long) utime;
1505 +-
1506 +- return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
1507 +-}
1508 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
1509 +index 0b0de3030e0dc..9c20c53f6729e 100644
1510 +--- a/net/mac80211/ieee80211_i.h
1511 ++++ b/net/mac80211/ieee80211_i.h
1512 +@@ -1046,6 +1046,7 @@ enum queue_stop_reason {
1513 + IEEE80211_QUEUE_STOP_REASON_FLUSH,
1514 + IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
1515 + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID,
1516 ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE,
1517 +
1518 + IEEE80211_QUEUE_STOP_REASONS,
1519 + };
1520 +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
1521 +index ad03331ee7855..7d43e0085cfc7 100644
1522 +--- a/net/mac80211/iface.c
1523 ++++ b/net/mac80211/iface.c
1524 +@@ -1577,6 +1577,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
1525 + if (ret)
1526 + return ret;
1527 +
1528 ++ ieee80211_stop_vif_queues(local, sdata,
1529 ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
1530 ++ synchronize_net();
1531 ++
1532 + ieee80211_do_stop(sdata, false);
1533 +
1534 + ieee80211_teardown_sdata(sdata);
1535 +@@ -1597,6 +1601,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
1536 + err = ieee80211_do_open(&sdata->wdev, false);
1537 + WARN(err, "type change: do_open returned %d", err);
1538 +
1539 ++ ieee80211_wake_vif_queues(local, sdata,
1540 ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
1541 + return ret;
1542 + }
1543 +
1544 +diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
1545 +index b9dd4e9604261..81adbfaffe38f 100644
1546 +--- a/net/netfilter/nft_dynset.c
1547 ++++ b/net/netfilter/nft_dynset.c
1548 +@@ -210,8 +210,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
1549 + nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR,
1550 + priv->expr->ops->size);
1551 + if (set->flags & NFT_SET_TIMEOUT) {
1552 +- if (timeout || set->timeout)
1553 ++ if (timeout || set->timeout) {
1554 ++ nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
1555 + nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
1556 ++ }
1557 + }
1558 +
1559 + priv->timeout = timeout;
1560 +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
1561 +index 0afae9f73ebb4..f326a6ea35fc7 100644
1562 +--- a/net/nfc/netlink.c
1563 ++++ b/net/nfc/netlink.c
1564 +@@ -887,6 +887,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
1565 +
1566 + if (!dev->polling) {
1567 + device_unlock(&dev->dev);
1568 ++ nfc_put_device(dev);
1569 + return -EINVAL;
1570 + }
1571 +
1572 +diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
1573 +index 574af981806fa..92a3cfae4de87 100644
1574 +--- a/net/nfc/rawsock.c
1575 ++++ b/net/nfc/rawsock.c
1576 +@@ -117,7 +117,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
1577 + if (addr->target_idx > dev->target_next_idx - 1 ||
1578 + addr->target_idx < dev->target_next_idx - dev->n_targets) {
1579 + rc = -EINVAL;
1580 +- goto error;
1581 ++ goto put_dev;
1582 + }
1583 +
1584 + rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
1585 +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
1586 +index 4bf0296a7c433..0e809ae17f381 100644
1587 +--- a/net/wireless/wext-core.c
1588 ++++ b/net/wireless/wext-core.c
1589 +@@ -898,8 +898,9 @@ out:
1590 + int call_commit_handler(struct net_device *dev)
1591 + {
1592 + #ifdef CONFIG_WIRELESS_EXT
1593 +- if ((netif_running(dev)) &&
1594 +- (dev->wireless_handlers->standard[0] != NULL))
1595 ++ if (netif_running(dev) &&
1596 ++ dev->wireless_handlers &&
1597 ++ dev->wireless_handlers->standard[0])
1598 + /* Call the commit handler on the driver */
1599 + return dev->wireless_handlers->standard[0](dev, NULL,
1600 + NULL, NULL);
1601 +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
1602 +index 1e87639f2c270..d613bf77cc0f9 100644
1603 +--- a/net/xfrm/xfrm_input.c
1604 ++++ b/net/xfrm/xfrm_input.c
1605 +@@ -315,7 +315,7 @@ resume:
1606 + /* only the first xfrm gets the encap type */
1607 + encap_type = 0;
1608 +
1609 +- if (async && x->repl->recheck(x, skb, seq)) {
1610 ++ if (x->repl->recheck(x, skb, seq)) {
1611 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
1612 + goto drop_unlock;
1613 + }