Gentoo Archives: gentoo-commits

From: "Tom Wijsman (tomwij)" <tomwij@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2529 - in genpatches-2.6/trunk: 3.0 3.10 3.11 3.4
Date: Fri, 27 Sep 2013 17:20:15
Message-Id: 20130927172008.B25C62004C@flycatcher.gentoo.org
1 Author: tomwij
2 Date: 2013-09-27 17:20:08 +0000 (Fri, 27 Sep 2013)
3 New Revision: 2529
4
5 Added:
6 genpatches-2.6/trunk/3.0/1096_linux-3.0.97.patch
7 genpatches-2.6/trunk/3.10/1012_linux-3.10.13.patch
8 genpatches-2.6/trunk/3.11/1001_linux-3.11.2.patch
9 genpatches-2.6/trunk/3.4/1062_linux-3.4.63.patch
10 Removed:
11 genpatches-2.6/trunk/3.10/1500_CVE-2013-4300-net-Check-the-correct-namespace-when-spoofing-pid-ov.patch
12 Modified:
13 genpatches-2.6/trunk/3.0/0000_README
14 genpatches-2.6/trunk/3.10/0000_README
15 genpatches-2.6/trunk/3.11/0000_README
16 genpatches-2.6/trunk/3.4/0000_README
17 Log:
18 Linux patches 3.0.97, 3.4.63, 3.10.13 and 3.11.2.
19
20 Modified: genpatches-2.6/trunk/3.0/0000_README
21 ===================================================================
22 --- genpatches-2.6/trunk/3.0/0000_README 2013-09-25 17:13:05 UTC (rev 2528)
23 +++ genpatches-2.6/trunk/3.0/0000_README 2013-09-27 17:20:08 UTC (rev 2529)
24 @@ -419,6 +419,10 @@
25 From: http://www.kernel.org
26 Desc: Linux 3.0.96
27
28 +Patch: 1096_linux-3.0.97.patch
29 +From: http://www.kernel.org
30 +Desc: Linux 3.0.97
31 +
32 Patch: 1500_XATTR_USER_PREFIX.patch
33 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
34 Desc: Support for namespace user.pax.* on tmpfs.
35
36 Added: genpatches-2.6/trunk/3.0/1096_linux-3.0.97.patch
37 ===================================================================
38 --- genpatches-2.6/trunk/3.0/1096_linux-3.0.97.patch (rev 0)
39 +++ genpatches-2.6/trunk/3.0/1096_linux-3.0.97.patch 2013-09-27 17:20:08 UTC (rev 2529)
40 @@ -0,0 +1,597 @@
41 +diff --git a/Makefile b/Makefile
42 +index e2a73ebd..53c9a5dc 100644
43 +--- a/Makefile
44 ++++ b/Makefile
45 +@@ -1,6 +1,6 @@
46 + VERSION = 3
47 + PATCHLEVEL = 0
48 +-SUBLEVEL = 96
49 ++SUBLEVEL = 97
50 + EXTRAVERSION =
51 + NAME = Sneaky Weasel
52 +
53 +diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
54 +index 13c7e5f9..3f47259b 100644
55 +--- a/arch/arm/mach-versatile/pci.c
56 ++++ b/arch/arm/mach-versatile/pci.c
57 +@@ -43,9 +43,9 @@
58 + #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
59 + #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
60 + #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
61 +-#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
62 +-#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
63 +-#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
64 ++#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
65 ++#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
66 ++#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
67 + #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
68 +
69 + #define DEVICE_ID_OFFSET 0x00
70 +diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
71 +index 8184ee97..3fcbae0f 100644
72 +--- a/arch/powerpc/kernel/align.c
73 ++++ b/arch/powerpc/kernel/align.c
74 +@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
75 + nb = aligninfo[instr].len;
76 + flags = aligninfo[instr].flags;
77 +
78 ++ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
79 ++ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
80 ++ nb = 8;
81 ++ flags = LD+SW;
82 ++ } else if (IS_XFORM(instruction) &&
83 ++ ((instruction >> 1) & 0x3ff) == 660) {
84 ++ nb = 8;
85 ++ flags = ST+SW;
86 ++ }
87 ++
88 + /* Byteswap little endian loads and stores */
89 + swiz = 0;
90 + if (regs->msr & MSR_LE) {
91 +diff --git a/crypto/api.c b/crypto/api.c
92 +index 033a7147..4f98dd5b 100644
93 +--- a/crypto/api.c
94 ++++ b/crypto/api.c
95 +@@ -40,6 +40,8 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
96 + return alg;
97 + }
98 +
99 ++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
100 ++
101 + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
102 + {
103 + return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
104 +@@ -150,8 +152,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
105 + }
106 + up_write(&crypto_alg_sem);
107 +
108 +- if (alg != &larval->alg)
109 ++ if (alg != &larval->alg) {
110 + kfree(larval);
111 ++ if (crypto_is_larval(alg))
112 ++ alg = crypto_larval_wait(alg);
113 ++ }
114 +
115 + return alg;
116 + }
117 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
118 +index 53576e7c..a9c2b689 100644
119 +--- a/drivers/hid/hid-core.c
120 ++++ b/drivers/hid/hid-core.c
121 +@@ -58,6 +58,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
122 + struct hid_report_enum *report_enum = device->report_enum + type;
123 + struct hid_report *report;
124 +
125 ++ if (id >= HID_MAX_IDS)
126 ++ return NULL;
127 + if (report_enum->report_id_hash[id])
128 + return report_enum->report_id_hash[id];
129 +
130 +@@ -379,9 +381,11 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
131 +
132 + case HID_GLOBAL_ITEM_TAG_REPORT_ID:
133 + parser->global.report_id = item_udata(item);
134 +- if (parser->global.report_id == 0) {
135 +- dbg_hid("report_id 0 is invalid\n");
136 +- return -1;
137 ++ if (parser->global.report_id == 0 ||
138 ++ parser->global.report_id >= HID_MAX_IDS) {
139 ++ hid_err(parser->device, "report_id %u is invalid\n",
140 ++ parser->global.report_id);
141 ++ return -1;
142 + }
143 + return 0;
144 +
145 +@@ -551,7 +555,7 @@ static void hid_device_release(struct device *dev)
146 + for (i = 0; i < HID_REPORT_TYPES; i++) {
147 + struct hid_report_enum *report_enum = device->report_enum + i;
148 +
149 +- for (j = 0; j < 256; j++) {
150 ++ for (j = 0; j < HID_MAX_IDS; j++) {
151 + struct hid_report *report = report_enum->report_id_hash[j];
152 + if (report)
153 + hid_free_report(report);
154 +@@ -989,7 +993,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
155 +
156 + int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
157 + {
158 +- unsigned size = field->report_size;
159 ++ unsigned size;
160 ++
161 ++ if (!field)
162 ++ return -1;
163 ++
164 ++ size = field->report_size;
165 +
166 + hid_dump_input(field->report->device, field->usage + offset, value);
167 +
168 +diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
169 +index 9fae2ebd..48cba857 100644
170 +--- a/drivers/hid/hid-ntrig.c
171 ++++ b/drivers/hid/hid-ntrig.c
172 +@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
173 + struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
174 + report_id_hash[0x0d];
175 +
176 +- if (!report)
177 ++ if (!report || report->maxfield < 1 ||
178 ++ report->field[0]->report_count < 1)
179 + return -EINVAL;
180 +
181 + usbhid_submit_report(hdev, report, USB_DIR_IN);
182 +diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
183 +index 06e5300d..fa742323 100644
184 +--- a/drivers/hid/hid-pl.c
185 ++++ b/drivers/hid/hid-pl.c
186 +@@ -128,8 +128,14 @@ static int plff_init(struct hid_device *hid)
187 + strong = &report->field[0]->value[2];
188 + weak = &report->field[0]->value[3];
189 + debug("detected single-field device");
190 +- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
191 +- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
192 ++ } else if (report->field[0]->maxusage == 1 &&
193 ++ report->field[0]->usage[0].hid ==
194 ++ (HID_UP_LED | 0x43) &&
195 ++ report->maxfield >= 4 &&
196 ++ report->field[0]->report_count >= 1 &&
197 ++ report->field[1]->report_count >= 1 &&
198 ++ report->field[2]->report_count >= 1 &&
199 ++ report->field[3]->report_count >= 1) {
200 + report->field[0]->value[0] = 0x00;
201 + report->field[1]->value[0] = 0x00;
202 + strong = &report->field[2]->value[0];
203 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
204 +index 892c48b1..b8a26d20 100644
205 +--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
206 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
207 +@@ -1005,6 +1005,10 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
208 + * is_on == 0 means MRC CCK is OFF (more noise imm)
209 + */
210 + bool is_on = param ? 1 : 0;
211 ++
212 ++ if (ah->caps.rx_chainmask == 1)
213 ++ break;
214 ++
215 + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
216 + AR_PHY_MRC_CCK_ENABLE, is_on);
217 + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
218 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
219 +index 6f6f1002..e1f19719 100644
220 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
221 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
222 +@@ -2433,6 +2433,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
223 + for (acno = 0, ac = &an->ac[acno];
224 + acno < WME_NUM_AC; acno++, ac++) {
225 + ac->sched = false;
226 ++ ac->clear_ps_filter = true;
227 + ac->txq = sc->tx.txq_map[acno];
228 + INIT_LIST_HEAD(&ac->tid_q);
229 + }
230 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
231 +index 3d15a37c..cc3943a0 100644
232 +--- a/drivers/scsi/sd.c
233 ++++ b/drivers/scsi/sd.c
234 +@@ -2135,14 +2135,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
235 + }
236 + }
237 +
238 +- if (modepage == 0x3F) {
239 +- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
240 +- "present\n");
241 +- goto defaults;
242 +- } else if ((buffer[offset] & 0x3f) != modepage) {
243 +- sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
244 +- goto defaults;
245 +- }
246 ++ sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
247 ++ goto defaults;
248 ++
249 + Page_found:
250 + if (modepage == 8) {
251 + sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
252 +diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
253 +index 8cea9dca..00a285c5 100644
254 +--- a/drivers/staging/comedi/drivers/dt282x.c
255 ++++ b/drivers/staging/comedi/drivers/dt282x.c
256 +@@ -406,8 +406,9 @@ struct dt282x_private {
257 + } \
258 + udelay(5); \
259 + } \
260 +- if (_i) \
261 ++ if (_i) { \
262 + b \
263 ++ } \
264 + } while (0)
265 +
266 + static int dt282x_attach(struct comedi_device *dev,
267 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
268 +index c84b4553..0b85e2c7 100644
269 +--- a/drivers/usb/class/cdc-wdm.c
270 ++++ b/drivers/usb/class/cdc-wdm.c
271 +@@ -171,6 +171,7 @@ skip_error:
272 + static void wdm_int_callback(struct urb *urb)
273 + {
274 + int rv = 0;
275 ++ int responding;
276 + int status = urb->status;
277 + struct wdm_device *desc;
278 + struct usb_ctrlrequest *req;
279 +@@ -244,8 +245,8 @@ static void wdm_int_callback(struct urb *urb)
280 + desc->response->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
281 + spin_lock(&desc->iuspin);
282 + clear_bit(WDM_READ, &desc->flags);
283 +- set_bit(WDM_RESPONDING, &desc->flags);
284 +- if (!test_bit(WDM_DISCONNECTING, &desc->flags)
285 ++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
286 ++ if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
287 + && !test_bit(WDM_SUSPENDING, &desc->flags)) {
288 + rv = usb_submit_urb(desc->response, GFP_ATOMIC);
289 + dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
290 +@@ -635,16 +636,20 @@ static void wdm_rxwork(struct work_struct *work)
291 + {
292 + struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
293 + unsigned long flags;
294 +- int rv;
295 ++ int rv = 0;
296 ++ int responding;
297 +
298 + spin_lock_irqsave(&desc->iuspin, flags);
299 + if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
300 + spin_unlock_irqrestore(&desc->iuspin, flags);
301 + } else {
302 ++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
303 + spin_unlock_irqrestore(&desc->iuspin, flags);
304 +- rv = usb_submit_urb(desc->response, GFP_KERNEL);
305 ++ if (!responding)
306 ++ rv = usb_submit_urb(desc->response, GFP_KERNEL);
307 + if (rv < 0 && rv != -EPERM) {
308 + spin_lock_irqsave(&desc->iuspin, flags);
309 ++ clear_bit(WDM_RESPONDING, &desc->flags);
310 + if (!test_bit(WDM_DISCONNECTING, &desc->flags))
311 + schedule_work(&desc->rxwork);
312 + spin_unlock_irqrestore(&desc->iuspin, flags);
313 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
314 +index 26678cad..c29f5214 100644
315 +--- a/drivers/usb/core/config.c
316 ++++ b/drivers/usb/core/config.c
317 +@@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
318 +
319 + memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
320 + if (config->desc.bDescriptorType != USB_DT_CONFIG ||
321 +- config->desc.bLength < USB_DT_CONFIG_SIZE) {
322 ++ config->desc.bLength < USB_DT_CONFIG_SIZE ||
323 ++ config->desc.bLength > size) {
324 + dev_err(ddev, "invalid descriptor for config index %d: "
325 + "type = 0x%X, length = %d\n", cfgidx,
326 + config->desc.bDescriptorType, config->desc.bLength);
327 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
328 +index 7f07eb8d..c95252d1 100644
329 +--- a/drivers/usb/host/xhci.c
330 ++++ b/drivers/usb/host/xhci.c
331 +@@ -2713,10 +2713,21 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
332 + {
333 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
334 + struct xhci_virt_device *virt_dev;
335 ++ struct device *dev = hcd->self.controller;
336 + unsigned long flags;
337 + u32 state;
338 + int i, ret;
339 +
340 ++#ifndef CONFIG_USB_DEFAULT_PERSIST
341 ++ /*
342 ++ * We called pm_runtime_get_noresume when the device was attached.
343 ++ * Decrement the counter here to allow controller to runtime suspend
344 ++ * if no devices remain.
345 ++ */
346 ++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
347 ++ pm_runtime_put_noidle(dev);
348 ++#endif
349 ++
350 + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
351 + /* If the host is halted due to driver unload, we still need to free the
352 + * device.
353 +@@ -2783,6 +2794,7 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
354 + int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
355 + {
356 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
357 ++ struct device *dev = hcd->self.controller;
358 + unsigned long flags;
359 + int timeleft;
360 + int ret;
361 +@@ -2835,6 +2847,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
362 + goto disable_slot;
363 + }
364 + udev->slot_id = xhci->slot_id;
365 ++
366 ++#ifndef CONFIG_USB_DEFAULT_PERSIST
367 ++ /*
368 ++ * If resetting upon resume, we can't put the controller into runtime
369 ++ * suspend if there is a device attached.
370 ++ */
371 ++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
372 ++ pm_runtime_get_noresume(dev);
373 ++#endif
374 ++
375 + /* Is this a LS or FS device under a HS hub? */
376 + /* Hub or peripherial? */
377 + return 1;
378 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
379 +index 267dff94..7ee78bd6 100644
380 +--- a/drivers/usb/serial/mos7720.c
381 ++++ b/drivers/usb/serial/mos7720.c
382 +@@ -383,7 +383,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
383 + kfree(urbtrack);
384 + return -ENOMEM;
385 + }
386 +- urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
387 ++ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC);
388 + if (!urbtrack->setup) {
389 + usb_free_urb(urbtrack->urb);
390 + kfree(urbtrack);
391 +@@ -391,8 +391,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
392 + }
393 + urbtrack->setup->bRequestType = (__u8)0x40;
394 + urbtrack->setup->bRequest = (__u8)0x0e;
395 +- urbtrack->setup->wValue = get_reg_value(reg, dummy);
396 +- urbtrack->setup->wIndex = get_reg_index(reg);
397 ++ urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy));
398 ++ urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg));
399 + urbtrack->setup->wLength = 0;
400 + usb_fill_control_urb(urbtrack->urb, usbdev,
401 + usb_sndctrlpipe(usbdev, 0),
402 +diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
403 +index fd725cde..949af52a 100644
404 +--- a/drivers/xen/grant-table.c
405 ++++ b/drivers/xen/grant-table.c
406 +@@ -355,9 +355,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback,
407 + void (*fn)(void *), void *arg, u16 count)
408 + {
409 + unsigned long flags;
410 ++ struct gnttab_free_callback *cb;
411 ++
412 + spin_lock_irqsave(&gnttab_list_lock, flags);
413 +- if (callback->next)
414 +- goto out;
415 ++
416 ++ /* Check if the callback is already on the list */
417 ++ cb = gnttab_free_callback_list;
418 ++ while (cb) {
419 ++ if (cb == callback)
420 ++ goto out;
421 ++ cb = cb->next;
422 ++ }
423 ++
424 + callback->fn = fn;
425 + callback->arg = arg;
426 + callback->count = count;
427 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
428 +index b7758094..9e6ee471 100644
429 +--- a/fs/cifs/connect.c
430 ++++ b/fs/cifs/connect.c
431 +@@ -158,6 +158,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
432 + try_to_freeze();
433 +
434 + /* we should try only the port we connected to before */
435 ++ mutex_lock(&server->srv_mutex);
436 + rc = generic_ip_connect(server);
437 + if (rc) {
438 + cFYI(1, "reconnect error %d", rc);
439 +@@ -169,6 +170,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
440 + server->tcpStatus = CifsNeedNegotiate;
441 + spin_unlock(&GlobalMid_Lock);
442 + }
443 ++ mutex_unlock(&server->srv_mutex);
444 + } while (server->tcpStatus == CifsNeedReconnect);
445 +
446 + return rc;
447 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
448 +index c04a025c..607a9736 100644
449 +--- a/fs/fuse/dir.c
450 ++++ b/fs/fuse/dir.c
451 +@@ -1439,6 +1439,8 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
452 + fc->no_setxattr = 1;
453 + err = -EOPNOTSUPP;
454 + }
455 ++ if (!err)
456 ++ fuse_invalidate_attr(inode);
457 + return err;
458 + }
459 +
460 +@@ -1568,6 +1570,8 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
461 + fc->no_removexattr = 1;
462 + err = -EOPNOTSUPP;
463 + }
464 ++ if (!err)
465 ++ fuse_invalidate_attr(inode);
466 + return err;
467 + }
468 +
469 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
470 +index 79fca8df..2e1c10fe 100644
471 +--- a/fs/fuse/file.c
472 ++++ b/fs/fuse/file.c
473 +@@ -1298,7 +1298,6 @@ static int fuse_writepage_locked(struct page *page)
474 +
475 + inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
476 + inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
477 +- end_page_writeback(page);
478 +
479 + spin_lock(&fc->lock);
480 + list_add(&req->writepages_entry, &fi->writepages);
481 +@@ -1306,6 +1305,8 @@ static int fuse_writepage_locked(struct page *page)
482 + fuse_flush_writepages(inode);
483 + spin_unlock(&fc->lock);
484 +
485 ++ end_page_writeback(page);
486 ++
487 + return 0;
488 +
489 + err_free:
490 +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
491 +index b3cc8586..26f63644 100644
492 +--- a/fs/isofs/inode.c
493 ++++ b/fs/isofs/inode.c
494 +@@ -119,8 +119,8 @@ static void destroy_inodecache(void)
495 +
496 + static int isofs_remount(struct super_block *sb, int *flags, char *data)
497 + {
498 +- /* we probably want a lot more here */
499 +- *flags |= MS_RDONLY;
500 ++ if (!(*flags & MS_RDONLY))
501 ++ return -EROFS;
502 + return 0;
503 + }
504 +
505 +@@ -769,15 +769,6 @@ root_found:
506 + */
507 + s->s_maxbytes = 0x80000000000LL;
508 +
509 +- /*
510 +- * The CDROM is read-only, has no nodes (devices) on it, and since
511 +- * all of the files appear to be owned by root, we really do not want
512 +- * to allow suid. (suid or devices will not show up unless we have
513 +- * Rock Ridge extensions)
514 +- */
515 +-
516 +- s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
517 +-
518 + /* Set this for reference. Its not currently used except on write
519 + which we don't have .. */
520 +
521 +@@ -1528,6 +1519,9 @@ struct inode *isofs_iget(struct super_block *sb,
522 + static struct dentry *isofs_mount(struct file_system_type *fs_type,
523 + int flags, const char *dev_name, void *data)
524 + {
525 ++ /* We don't support read-write mounts */
526 ++ if (!(flags & MS_RDONLY))
527 ++ return ERR_PTR(-EACCES);
528 + return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
529 + }
530 +
531 +diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
532 +index 59412840..774a032a 100644
533 +--- a/fs/ocfs2/extent_map.c
534 ++++ b/fs/ocfs2/extent_map.c
535 +@@ -782,7 +782,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
536 + cpos = map_start >> osb->s_clustersize_bits;
537 + mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
538 + map_start + map_len);
539 +- mapping_end -= cpos;
540 + is_last = 0;
541 + while (cpos < mapping_end && !is_last) {
542 + u32 fe_flags;
543 +diff --git a/include/linux/hid.h b/include/linux/hid.h
544 +index 42f7e2fb..af30c641 100644
545 +--- a/include/linux/hid.h
546 ++++ b/include/linux/hid.h
547 +@@ -414,10 +414,12 @@ struct hid_report {
548 + struct hid_device *device; /* associated device */
549 + };
550 +
551 ++#define HID_MAX_IDS 256
552 ++
553 + struct hid_report_enum {
554 + unsigned numbered;
555 + struct list_head report_list;
556 +- struct hid_report *report_id_hash[256];
557 ++ struct hid_report *report_id_hash[HID_MAX_IDS];
558 + };
559 +
560 + #define HID_REPORT_TYPES 3
561 +diff --git a/include/linux/rculist.h b/include/linux/rculist.h
562 +index c30ffd87..52d280ba 100644
563 +--- a/include/linux/rculist.h
564 ++++ b/include/linux/rculist.h
565 +@@ -254,8 +254,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
566 + */
567 + #define list_first_or_null_rcu(ptr, type, member) \
568 + ({struct list_head *__ptr = (ptr); \
569 +- struct list_head __rcu *__next = list_next_rcu(__ptr); \
570 +- likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
571 ++ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
572 ++ likely(__ptr != __next) ? \
573 ++ list_entry_rcu(__next, type, member) : NULL; \
574 + })
575 +
576 + /**
577 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
578 +index 983d2e11..78f71869 100644
579 +--- a/mm/huge_memory.c
580 ++++ b/mm/huge_memory.c
581 +@@ -1838,6 +1838,8 @@ static void collapse_huge_page(struct mm_struct *mm,
582 + goto out;
583 +
584 + vma = find_vma(mm, address);
585 ++ if (!vma)
586 ++ goto out;
587 + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
588 + hend = vma->vm_end & HPAGE_PMD_MASK;
589 + if (address < hstart || address + HPAGE_PMD_SIZE > hend)
590 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
591 +index 57cdf5ad..d7b51d5b 100644
592 +--- a/mm/memcontrol.c
593 ++++ b/mm/memcontrol.c
594 +@@ -4433,7 +4433,13 @@ static int compare_thresholds(const void *a, const void *b)
595 + const struct mem_cgroup_threshold *_a = a;
596 + const struct mem_cgroup_threshold *_b = b;
597 +
598 +- return _a->threshold - _b->threshold;
599 ++ if (_a->threshold > _b->threshold)
600 ++ return 1;
601 ++
602 ++ if (_a->threshold < _b->threshold)
603 ++ return -1;
604 ++
605 ++ return 0;
606 + }
607 +
608 + static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
609 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
610 +index c5c97880..a9537d42 100644
611 +--- a/sound/pci/hda/hda_intel.c
612 ++++ b/sound/pci/hda/hda_intel.c
613 +@@ -2461,6 +2461,7 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
614 + SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
615 + SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
616 + SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
617 ++ SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */
618 + SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
619 + SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
620 + {}
621 +diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
622 +index 4393394b..ca1a90b8 100644
623 +--- a/sound/soc/codecs/wm8960.c
624 ++++ b/sound/soc/codecs/wm8960.c
625 +@@ -801,9 +801,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
626 + if (pll_div.k) {
627 + reg |= 0x20;
628 +
629 +- snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
630 +- snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
631 +- snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
632 ++ snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
633 ++ snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
634 ++ snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
635 + }
636 + snd_soc_write(codec, WM8960_PLL1, reg);
637 +
638
639 Modified: genpatches-2.6/trunk/3.10/0000_README
640 ===================================================================
641 --- genpatches-2.6/trunk/3.10/0000_README 2013-09-25 17:13:05 UTC (rev 2528)
642 +++ genpatches-2.6/trunk/3.10/0000_README 2013-09-27 17:20:08 UTC (rev 2529)
643 @@ -90,9 +90,9 @@
644 From: http://www.kernel.org
645 Desc: Linux 3.10.12
646
647 -Patch: 1500_CVE-2013-4300-net-Check-the-correct-namespace-when-spoofing-pid-ov.patch
648 -From: http://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git/commit/?id=d661684cf6820331feae71146c35da83d794467e
649 -Desc: CVE-2013-4300: PID Spoofing Privilege Escalation Vulnerability
650 +Patch: 1012_linux-3.10.13.patch
651 +From: http://www.kernel.org
652 +Desc: Linux 3.10.13
653
654 Patch: 1500_XATTR_USER_PREFIX.patch
655 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
656
657 Added: genpatches-2.6/trunk/3.10/1012_linux-3.10.13.patch
658 ===================================================================
659 --- genpatches-2.6/trunk/3.10/1012_linux-3.10.13.patch (rev 0)
660 +++ genpatches-2.6/trunk/3.10/1012_linux-3.10.13.patch 2013-09-27 17:20:08 UTC (rev 2529)
661 @@ -0,0 +1,3897 @@
662 +diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
663 +index 6a8b7158..9c92bb87 100644
664 +--- a/Documentation/DocBook/media_api.tmpl
665 ++++ b/Documentation/DocBook/media_api.tmpl
666 +@@ -1,6 +1,6 @@
667 + <?xml version="1.0"?>
668 +-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
669 +- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
670 ++<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
671 ++ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
672 + <!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities;
673 + <!ENTITY media-indices SYSTEM "./media-indices.tmpl">
674 +
675 +diff --git a/Makefile b/Makefile
676 +index afe001e3..25d38b79 100644
677 +--- a/Makefile
678 ++++ b/Makefile
679 +@@ -1,6 +1,6 @@
680 + VERSION = 3
681 + PATCHLEVEL = 10
682 +-SUBLEVEL = 12
683 ++SUBLEVEL = 13
684 + EXTRAVERSION =
685 + NAME = TOSSUG Baby Fish
686 +
687 +diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
688 +index 5d3ed5aa..0af879a4 100644
689 +--- a/arch/arm/boot/dts/at91rm9200.dtsi
690 ++++ b/arch/arm/boot/dts/at91rm9200.dtsi
691 +@@ -35,8 +35,12 @@
692 + ssc2 = &ssc2;
693 + };
694 + cpus {
695 +- cpu@0 {
696 ++ #address-cells = <0>;
697 ++ #size-cells = <0>;
698 ++
699 ++ cpu {
700 + compatible = "arm,arm920t";
701 ++ device_type = "cpu";
702 + };
703 + };
704 +
705 +diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
706 +index 43a18f74..0dbdb846 100644
707 +--- a/arch/arm/boot/dts/at91sam9260.dtsi
708 ++++ b/arch/arm/boot/dts/at91sam9260.dtsi
709 +@@ -32,8 +32,12 @@
710 + ssc0 = &ssc0;
711 + };
712 + cpus {
713 +- cpu@0 {
714 +- compatible = "arm,arm926ejs";
715 ++ #address-cells = <0>;
716 ++ #size-cells = <0>;
717 ++
718 ++ cpu {
719 ++ compatible = "arm,arm926ej-s";
720 ++ device_type = "cpu";
721 + };
722 + };
723 +
724 +diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
725 +index 94b58ab2..fcd38f89 100644
726 +--- a/arch/arm/boot/dts/at91sam9263.dtsi
727 ++++ b/arch/arm/boot/dts/at91sam9263.dtsi
728 +@@ -29,8 +29,12 @@
729 + ssc1 = &ssc1;
730 + };
731 + cpus {
732 +- cpu@0 {
733 +- compatible = "arm,arm926ejs";
734 ++ #address-cells = <0>;
735 ++ #size-cells = <0>;
736 ++
737 ++ cpu {
738 ++ compatible = "arm,arm926ej-s";
739 ++ device_type = "cpu";
740 + };
741 + };
742 +
743 +diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
744 +index bf18a735..479a0622 100644
745 +--- a/arch/arm/boot/dts/at91sam9g45.dtsi
746 ++++ b/arch/arm/boot/dts/at91sam9g45.dtsi
747 +@@ -35,8 +35,12 @@
748 + ssc1 = &ssc1;
749 + };
750 + cpus {
751 +- cpu@0 {
752 +- compatible = "arm,arm926ejs";
753 ++ #address-cells = <0>;
754 ++ #size-cells = <0>;
755 ++
756 ++ cpu {
757 ++ compatible = "arm,arm926ej-s";
758 ++ device_type = "cpu";
759 + };
760 + };
761 +
762 +diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
763 +index 8d25f889..a92ec783 100644
764 +--- a/arch/arm/boot/dts/at91sam9n12.dtsi
765 ++++ b/arch/arm/boot/dts/at91sam9n12.dtsi
766 +@@ -31,8 +31,12 @@
767 + ssc0 = &ssc0;
768 + };
769 + cpus {
770 +- cpu@0 {
771 +- compatible = "arm,arm926ejs";
772 ++ #address-cells = <0>;
773 ++ #size-cells = <0>;
774 ++
775 ++ cpu {
776 ++ compatible = "arm,arm926ej-s";
777 ++ device_type = "cpu";
778 + };
779 + };
780 +
781 +diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
782 +index b5833d1f..2b2b6923 100644
783 +--- a/arch/arm/boot/dts/at91sam9x5.dtsi
784 ++++ b/arch/arm/boot/dts/at91sam9x5.dtsi
785 +@@ -33,8 +33,12 @@
786 + ssc0 = &ssc0;
787 + };
788 + cpus {
789 +- cpu@0 {
790 +- compatible = "arm,arm926ejs";
791 ++ #address-cells = <0>;
792 ++ #size-cells = <0>;
793 ++
794 ++ cpu {
795 ++ compatible = "arm,arm926ej-s";
796 ++ device_type = "cpu";
797 + };
798 + };
799 +
800 +diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
801 +index 5000e0d4..642775d7 100644
802 +--- a/arch/arm/boot/dts/sama5d3.dtsi
803 ++++ b/arch/arm/boot/dts/sama5d3.dtsi
804 +@@ -35,8 +35,12 @@
805 + ssc1 = &ssc1;
806 + };
807 + cpus {
808 ++ #address-cells = <1>;
809 ++ #size-cells = <0>;
810 + cpu@0 {
811 ++ device_type = "cpu";
812 + compatible = "arm,cortex-a5";
813 ++ reg = <0x0>;
814 + };
815 + };
816 +
817 +diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
818 +index e7ef619a..06ef8b62 100644
819 +--- a/arch/arm/boot/dts/sun4i-a10.dtsi
820 ++++ b/arch/arm/boot/dts/sun4i-a10.dtsi
821 +@@ -16,8 +16,12 @@
822 + interrupt-parent = <&intc>;
823 +
824 + cpus {
825 ++ #address-cells = <1>;
826 ++ #size-cells = <0>;
827 + cpu@0 {
828 ++ device_type = "cpu";
829 + compatible = "arm,cortex-a8";
830 ++ reg = <0x0>;
831 + };
832 + };
833 +
834 +diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
835 +index 31fa38f8..d2852547 100644
836 +--- a/arch/arm/boot/dts/sun5i-a13.dtsi
837 ++++ b/arch/arm/boot/dts/sun5i-a13.dtsi
838 +@@ -17,8 +17,12 @@
839 + interrupt-parent = <&intc>;
840 +
841 + cpus {
842 ++ #address-cells = <1>;
843 ++ #size-cells = <0>;
844 + cpu@0 {
845 ++ device_type = "cpu";
846 + compatible = "arm,cortex-a8";
847 ++ reg = <0x0>;
848 + };
849 + };
850 +
851 +diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
852 +index 4a519907..db9cf692 100644
853 +--- a/arch/arm/kvm/coproc.c
854 ++++ b/arch/arm/kvm/coproc.c
855 +@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
856 + #define access_pmintenclr pm_fake
857 +
858 + /* Architected CP15 registers.
859 +- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
860 ++ * CRn denotes the primary register number, but is copied to the CRm in the
861 ++ * user space API for 64-bit register access in line with the terminology used
862 ++ * in the ARM ARM.
863 ++ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
864 ++ * registers preceding 32-bit ones.
865 + */
866 + static const struct coproc_reg cp15_regs[] = {
867 + /* CSSELR: swapped by interrupt.S. */
868 +@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = {
869 + NULL, reset_unknown, c0_CSSELR },
870 +
871 + /* TTBR0/TTBR1: swapped by interrupt.S. */
872 +- { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
873 +- { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
874 ++ { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
875 ++ { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
876 +
877 + /* TTBCR: swapped by interrupt.S. */
878 + { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
879 +@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = {
880 + NULL, reset_unknown, c6_IFAR },
881 +
882 + /* PAR swapped by interrupt.S */
883 +- { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
884 ++ { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
885 +
886 + /*
887 + * DC{C,I,CI}SW operations:
888 +@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params)
889 + | KVM_REG_ARM_OPC1_MASK))
890 + return false;
891 + params->is_64bit = true;
892 +- params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
893 ++ /* CRm to CRn: see cp15_to_index for details */
894 ++ params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
895 + >> KVM_REG_ARM_CRM_SHIFT);
896 + params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
897 + >> KVM_REG_ARM_OPC1_SHIFT);
898 + params->Op2 = 0;
899 +- params->CRn = 0;
900 ++ params->CRm = 0;
901 + return true;
902 + default:
903 + return false;
904 +@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg)
905 + if (reg->is_64) {
906 + val |= KVM_REG_SIZE_U64;
907 + val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
908 +- val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
909 ++ /*
910 ++ * CRn always denotes the primary coproc. reg. nr. for the
911 ++ * in-kernel representation, but the user space API uses the
912 ++ * CRm for the encoding, because it is modelled after the
913 ++ * MRRC/MCRR instructions: see the ARM ARM rev. c page
914 ++ * B3-1445
915 ++ */
916 ++ val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
917 + } else {
918 + val |= KVM_REG_SIZE_U32;
919 + val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
920 +diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
921 +index b7301d3e..0461d5c8 100644
922 +--- a/arch/arm/kvm/coproc.h
923 ++++ b/arch/arm/kvm/coproc.h
924 +@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
925 + return -1;
926 + if (i1->CRn != i2->CRn)
927 + return i1->CRn - i2->CRn;
928 ++ if (i1->is_64 != i2->is_64)
929 ++ return i2->is_64 - i1->is_64;
930 + if (i1->CRm != i2->CRm)
931 + return i1->CRm - i2->CRm;
932 + if (i1->Op1 != i2->Op1)
933 +@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
934 +
935 + #define CRn(_x) .CRn = _x
936 + #define CRm(_x) .CRm = _x
937 ++#define CRm64(_x) .CRn = _x, .CRm = 0
938 + #define Op1(_x) .Op1 = _x
939 + #define Op2(_x) .Op2 = _x
940 + #define is64 .is_64 = true
941 +diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
942 +index 685063a6..cf93472b 100644
943 +--- a/arch/arm/kvm/coproc_a15.c
944 ++++ b/arch/arm/kvm/coproc_a15.c
945 +@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
946 +
947 + /*
948 + * A15-specific CP15 registers.
949 +- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
950 ++ * CRn denotes the primary register number, but is copied to the CRm in the
951 ++ * user space API for 64-bit register access in line with the terminology used
952 ++ * in the ARM ARM.
953 ++ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
954 ++ * registers preceding 32-bit ones.
955 + */
956 + static const struct coproc_reg a15_regs[] = {
957 + /* MPIDR: we use VMPIDR for guest access. */
958 +diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h
959 +index ec087407..6f938ccb 100644
960 +--- a/arch/arm/mach-versatile/include/mach/platform.h
961 ++++ b/arch/arm/mach-versatile/include/mach/platform.h
962 +@@ -231,12 +231,14 @@
963 + /* PCI space */
964 + #define VERSATILE_PCI_BASE 0x41000000 /* PCI Interface */
965 + #define VERSATILE_PCI_CFG_BASE 0x42000000
966 ++#define VERSATILE_PCI_IO_BASE 0x43000000
967 + #define VERSATILE_PCI_MEM_BASE0 0x44000000
968 + #define VERSATILE_PCI_MEM_BASE1 0x50000000
969 + #define VERSATILE_PCI_MEM_BASE2 0x60000000
970 + /* Sizes of above maps */
971 + #define VERSATILE_PCI_BASE_SIZE 0x01000000
972 + #define VERSATILE_PCI_CFG_BASE_SIZE 0x02000000
973 ++#define VERSATILE_PCI_IO_BASE_SIZE 0x01000000
974 + #define VERSATILE_PCI_MEM_BASE0_SIZE 0x0c000000 /* 32Mb */
975 + #define VERSATILE_PCI_MEM_BASE1_SIZE 0x10000000 /* 256Mb */
976 + #define VERSATILE_PCI_MEM_BASE2_SIZE 0x10000000 /* 256Mb */
977 +diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
978 +index e92e5e07..c97be4ea 100644
979 +--- a/arch/arm/mach-versatile/pci.c
980 ++++ b/arch/arm/mach-versatile/pci.c
981 +@@ -43,9 +43,9 @@
982 + #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
983 + #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
984 + #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
985 +-#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
986 +-#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
987 +-#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
988 ++#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
989 ++#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
990 ++#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
991 + #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
992 +
993 + #define DEVICE_ID_OFFSET 0x00
994 +@@ -170,8 +170,8 @@ static struct pci_ops pci_versatile_ops = {
995 + .write = versatile_write_config,
996 + };
997 +
998 +-static struct resource io_mem = {
999 +- .name = "PCI I/O space",
1000 ++static struct resource unused_mem = {
1001 ++ .name = "PCI unused",
1002 + .start = VERSATILE_PCI_MEM_BASE0,
1003 + .end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
1004 + .flags = IORESOURCE_MEM,
1005 +@@ -195,9 +195,9 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
1006 + {
1007 + int ret = 0;
1008 +
1009 +- ret = request_resource(&iomem_resource, &io_mem);
1010 ++ ret = request_resource(&iomem_resource, &unused_mem);
1011 + if (ret) {
1012 +- printk(KERN_ERR "PCI: unable to allocate I/O "
1013 ++ printk(KERN_ERR "PCI: unable to allocate unused "
1014 + "memory region (%d)\n", ret);
1015 + goto out;
1016 + }
1017 +@@ -205,7 +205,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
1018 + if (ret) {
1019 + printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
1020 + "memory region (%d)\n", ret);
1021 +- goto release_io_mem;
1022 ++ goto release_unused_mem;
1023 + }
1024 + ret = request_resource(&iomem_resource, &pre_mem);
1025 + if (ret) {
1026 +@@ -225,8 +225,8 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
1027 +
1028 + release_non_mem:
1029 + release_resource(&non_mem);
1030 +- release_io_mem:
1031 +- release_resource(&io_mem);
1032 ++ release_unused_mem:
1033 ++ release_resource(&unused_mem);
1034 + out:
1035 + return ret;
1036 + }
1037 +@@ -246,7 +246,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
1038 + goto out;
1039 + }
1040 +
1041 +- ret = pci_ioremap_io(0, VERSATILE_PCI_MEM_BASE0);
1042 ++ ret = pci_ioremap_io(0, VERSATILE_PCI_IO_BASE);
1043 + if (ret)
1044 + goto out;
1045 +
1046 +@@ -295,6 +295,19 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
1047 + __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
1048 +
1049 + /*
1050 ++ * For many years the kernel and QEMU were symbiotically buggy
1051 ++ * in that they both assumed the same broken IRQ mapping.
1052 ++ * QEMU therefore attempts to auto-detect old broken kernels
1053 ++ * so that they still work on newer QEMU as they did on old
1054 ++ * QEMU. Since we now use the correct (ie matching-hardware)
1055 ++ * IRQ mapping we write a definitely different value to a
1056 ++ * PCI_INTERRUPT_LINE register to tell QEMU that we expect
1057 ++ * real hardware behaviour and it need not be backwards
1058 ++ * compatible for us. This write is harmless on real hardware.
1059 ++ */
1060 ++ __raw_writel(0, VERSATILE_PCI_VIRT_BASE+PCI_INTERRUPT_LINE);
1061 ++
1062 ++ /*
1063 + * Do not to map Versatile FPGA PCI device into memory space
1064 + */
1065 + pci_slot_ignore |= (1 << myslot);
1066 +@@ -327,13 +340,13 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1067 + {
1068 + int irq;
1069 +
1070 +- /* slot, pin, irq
1071 +- * 24 1 IRQ_SIC_PCI0
1072 +- * 25 1 IRQ_SIC_PCI1
1073 +- * 26 1 IRQ_SIC_PCI2
1074 +- * 27 1 IRQ_SIC_PCI3
1075 ++ /*
1076 ++ * Slot INTA INTB INTC INTD
1077 ++ * 31 PCI1 PCI2 PCI3 PCI0
1078 ++ * 30 PCI0 PCI1 PCI2 PCI3
1079 ++ * 29 PCI3 PCI0 PCI1 PCI2
1080 + */
1081 +- irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
1082 ++ irq = IRQ_SIC_PCI0 + ((slot + 2 + pin - 1) & 3);
1083 +
1084 + return irq;
1085 + }
1086 +diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
1087 +index 05db95d0..81edd31b 100644
1088 +--- a/arch/arm/xen/enlighten.c
1089 ++++ b/arch/arm/xen/enlighten.c
1090 +@@ -273,12 +273,15 @@ core_initcall(xen_guest_init);
1091 +
1092 + static int __init xen_pm_init(void)
1093 + {
1094 ++ if (!xen_domain())
1095 ++ return -ENODEV;
1096 ++
1097 + pm_power_off = xen_power_off;
1098 + arm_pm_restart = xen_restart;
1099 +
1100 + return 0;
1101 + }
1102 +-subsys_initcall(xen_pm_init);
1103 ++late_initcall(xen_pm_init);
1104 +
1105 + static irqreturn_t xen_arm_callback(int irq, void *arg)
1106 + {
1107 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
1108 +index 12e6ccb8..cea1594f 100644
1109 +--- a/arch/arm64/kernel/perf_event.c
1110 ++++ b/arch/arm64/kernel/perf_event.c
1111 +@@ -325,7 +325,10 @@ validate_event(struct pmu_hw_events *hw_events,
1112 + if (is_software_event(event))
1113 + return 1;
1114 +
1115 +- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
1116 ++ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
1117 ++ return 1;
1118 ++
1119 ++ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
1120 + return 1;
1121 +
1122 + return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
1123 +@@ -781,7 +784,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1124 + /*
1125 + * PMXEVTYPER: Event selection reg
1126 + */
1127 +-#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
1128 ++#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
1129 + #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
1130 +
1131 + /*
1132 +diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
1133 +index 765ef30e..733017b3 100644
1134 +--- a/arch/mips/ath79/clock.c
1135 ++++ b/arch/mips/ath79/clock.c
1136 +@@ -164,7 +164,7 @@ static void __init ar933x_clocks_init(void)
1137 + ath79_ahb_clk.rate = freq / t;
1138 + }
1139 +
1140 +- ath79_wdt_clk.rate = ath79_ref_clk.rate;
1141 ++ ath79_wdt_clk.rate = ath79_ahb_clk.rate;
1142 + ath79_uart_clk.rate = ath79_ref_clk.rate;
1143 + }
1144 +
1145 +diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
1146 +index ee5b690a..52e5758e 100644
1147 +--- a/arch/powerpc/kernel/align.c
1148 ++++ b/arch/powerpc/kernel/align.c
1149 +@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
1150 + nb = aligninfo[instr].len;
1151 + flags = aligninfo[instr].flags;
1152 +
1153 ++ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
1154 ++ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
1155 ++ nb = 8;
1156 ++ flags = LD+SW;
1157 ++ } else if (IS_XFORM(instruction) &&
1158 ++ ((instruction >> 1) & 0x3ff) == 660) {
1159 ++ nb = 8;
1160 ++ flags = ST+SW;
1161 ++ }
1162 ++
1163 + /* Byteswap little endian loads and stores */
1164 + swiz = 0;
1165 + if (regs->msr & MSR_LE) {
1166 +diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
1167 +index 94c1dd46..a3a5cb8e 100644
1168 +--- a/arch/powerpc/kvm/book3s_xics.c
1169 ++++ b/arch/powerpc/kvm/book3s_xics.c
1170 +@@ -19,6 +19,7 @@
1171 + #include <asm/hvcall.h>
1172 + #include <asm/xics.h>
1173 + #include <asm/debug.h>
1174 ++#include <asm/time.h>
1175 +
1176 + #include <linux/debugfs.h>
1177 + #include <linux/seq_file.h>
1178 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
1179 +index c11c8238..54b998f2 100644
1180 +--- a/arch/powerpc/platforms/pseries/setup.c
1181 ++++ b/arch/powerpc/platforms/pseries/setup.c
1182 +@@ -354,7 +354,7 @@ static int alloc_dispatch_log_kmem_cache(void)
1183 + }
1184 + early_initcall(alloc_dispatch_log_kmem_cache);
1185 +
1186 +-static void pSeries_idle(void)
1187 ++static void pseries_lpar_idle(void)
1188 + {
1189 + /* This would call on the cpuidle framework, and the back-end pseries
1190 + * driver to go to idle states
1191 +@@ -362,10 +362,22 @@ static void pSeries_idle(void)
1192 + if (cpuidle_idle_call()) {
1193 + /* On error, execute default handler
1194 + * to go into low thread priority and possibly
1195 +- * low power mode.
1196 ++ * low power mode by cedeing processor to hypervisor
1197 + */
1198 +- HMT_low();
1199 +- HMT_very_low();
1200 ++
1201 ++ /* Indicate to hypervisor that we are idle. */
1202 ++ get_lppaca()->idle = 1;
1203 ++
1204 ++ /*
1205 ++ * Yield the processor to the hypervisor. We return if
1206 ++ * an external interrupt occurs (which are driven prior
1207 ++ * to returning here) or if a prod occurs from another
1208 ++ * processor. When returning here, external interrupts
1209 ++ * are enabled.
1210 ++ */
1211 ++ cede_processor();
1212 ++
1213 ++ get_lppaca()->idle = 0;
1214 + }
1215 + }
1216 +
1217 +@@ -456,15 +468,14 @@ static void __init pSeries_setup_arch(void)
1218 +
1219 + pSeries_nvram_init();
1220 +
1221 +- if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1222 ++ if (firmware_has_feature(FW_FEATURE_LPAR)) {
1223 + vpa_init(boot_cpuid);
1224 +- ppc_md.power_save = pSeries_idle;
1225 +- }
1226 +-
1227 +- if (firmware_has_feature(FW_FEATURE_LPAR))
1228 ++ ppc_md.power_save = pseries_lpar_idle;
1229 + ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
1230 +- else
1231 ++ } else {
1232 ++ /* No special idle routine */
1233 + ppc_md.enable_pmcs = power4_enable_pmcs;
1234 ++ }
1235 +
1236 + ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
1237 +
1238 +diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
1239 +index 95feaa47..c70a234a 100644
1240 +--- a/arch/um/include/shared/os.h
1241 ++++ b/arch/um/include/shared/os.h
1242 +@@ -200,6 +200,7 @@ extern int os_unmap_memory(void *addr, int len);
1243 + extern int os_drop_memory(void *addr, int length);
1244 + extern int can_drop_memory(void);
1245 + extern void os_flush_stdout(void);
1246 ++extern int os_mincore(void *addr, unsigned long len);
1247 +
1248 + /* execvp.c */
1249 + extern int execvp_noalloc(char *buf, const char *file, char *const argv[]);
1250 +diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
1251 +index babe2182..d8b78a03 100644
1252 +--- a/arch/um/kernel/Makefile
1253 ++++ b/arch/um/kernel/Makefile
1254 +@@ -13,7 +13,7 @@ clean-files :=
1255 + obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
1256 + physmem.o process.o ptrace.o reboot.o sigio.o \
1257 + signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \
1258 +- um_arch.o umid.o skas/
1259 ++ um_arch.o umid.o maccess.o skas/
1260 +
1261 + obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
1262 + obj-$(CONFIG_GPROF) += gprof_syms.o
1263 +diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
1264 +new file mode 100644
1265 +index 00000000..1f3d5c49
1266 +--- /dev/null
1267 ++++ b/arch/um/kernel/maccess.c
1268 +@@ -0,0 +1,24 @@
1269 ++/*
1270 ++ * Copyright (C) 2013 Richard Weinberger <richrd@×××.at>
1271 ++ *
1272 ++ * This program is free software; you can redistribute it and/or modify
1273 ++ * it under the terms of the GNU General Public License version 2 as
1274 ++ * published by the Free Software Foundation.
1275 ++ */
1276 ++
1277 ++#include <linux/uaccess.h>
1278 ++#include <linux/kernel.h>
1279 ++#include <os.h>
1280 ++
1281 ++long probe_kernel_read(void *dst, const void *src, size_t size)
1282 ++{
1283 ++ void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
1284 ++
1285 ++ if ((unsigned long)src < PAGE_SIZE || size <= 0)
1286 ++ return -EFAULT;
1287 ++
1288 ++ if (os_mincore(psrc, size + src - psrc) <= 0)
1289 ++ return -EFAULT;
1290 ++
1291 ++ return __probe_kernel_read(dst, src, size);
1292 ++}
1293 +diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
1294 +index b8f34c9e..67b9c8f5 100644
1295 +--- a/arch/um/os-Linux/process.c
1296 ++++ b/arch/um/os-Linux/process.c
1297 +@@ -4,6 +4,7 @@
1298 + */
1299 +
1300 + #include <stdio.h>
1301 ++#include <stdlib.h>
1302 + #include <unistd.h>
1303 + #include <errno.h>
1304 + #include <signal.h>
1305 +@@ -232,6 +233,57 @@ out:
1306 + return ok;
1307 + }
1308 +
1309 ++static int os_page_mincore(void *addr)
1310 ++{
1311 ++ char vec[2];
1312 ++ int ret;
1313 ++
1314 ++ ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
1315 ++ if (ret < 0) {
1316 ++ if (errno == ENOMEM || errno == EINVAL)
1317 ++ return 0;
1318 ++ else
1319 ++ return -errno;
1320 ++ }
1321 ++
1322 ++ return vec[0] & 1;
1323 ++}
1324 ++
1325 ++int os_mincore(void *addr, unsigned long len)
1326 ++{
1327 ++ char *vec;
1328 ++ int ret, i;
1329 ++
1330 ++ if (len <= UM_KERN_PAGE_SIZE)
1331 ++ return os_page_mincore(addr);
1332 ++
1333 ++ vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
1334 ++ if (!vec)
1335 ++ return -ENOMEM;
1336 ++
1337 ++ ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
1338 ++ if (ret < 0) {
1339 ++ if (errno == ENOMEM || errno == EINVAL)
1340 ++ ret = 0;
1341 ++ else
1342 ++ ret = -errno;
1343 ++
1344 ++ goto out;
1345 ++ }
1346 ++
1347 ++ for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
1348 ++ if (!(vec[i] & 1)) {
1349 ++ ret = 0;
1350 ++ goto out;
1351 ++ }
1352 ++ }
1353 ++
1354 ++ ret = 1;
1355 ++out:
1356 ++ free(vec);
1357 ++ return ret;
1358 ++}
1359 ++
1360 + void init_new_thread_signals(void)
1361 + {
1362 + set_handler(SIGSEGV);
1363 +diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
1364 +index cf1a471a..10adb41f 100644
1365 +--- a/arch/x86/ia32/ia32_signal.c
1366 ++++ b/arch/x86/ia32/ia32_signal.c
1367 +@@ -459,7 +459,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
1368 + else
1369 + put_user_ex(0, &frame->uc.uc_flags);
1370 + put_user_ex(0, &frame->uc.uc_link);
1371 +- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
1372 ++ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1373 +
1374 + if (ksig->ka.sa.sa_flags & SA_RESTORER)
1375 + restorer = ksig->ka.sa.sa_restorer;
1376 +diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
1377 +index 46fc474f..f50de695 100644
1378 +--- a/arch/x86/include/asm/checksum_32.h
1379 ++++ b/arch/x86/include/asm/checksum_32.h
1380 +@@ -49,9 +49,15 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
1381 + int len, __wsum sum,
1382 + int *err_ptr)
1383 + {
1384 ++ __wsum ret;
1385 ++
1386 + might_sleep();
1387 +- return csum_partial_copy_generic((__force void *)src, dst,
1388 +- len, sum, err_ptr, NULL);
1389 ++ stac();
1390 ++ ret = csum_partial_copy_generic((__force void *)src, dst,
1391 ++ len, sum, err_ptr, NULL);
1392 ++ clac();
1393 ++
1394 ++ return ret;
1395 + }
1396 +
1397 + /*
1398 +@@ -176,10 +182,16 @@ static inline __wsum csum_and_copy_to_user(const void *src,
1399 + int len, __wsum sum,
1400 + int *err_ptr)
1401 + {
1402 ++ __wsum ret;
1403 ++
1404 + might_sleep();
1405 +- if (access_ok(VERIFY_WRITE, dst, len))
1406 +- return csum_partial_copy_generic(src, (__force void *)dst,
1407 +- len, sum, NULL, err_ptr);
1408 ++ if (access_ok(VERIFY_WRITE, dst, len)) {
1409 ++ stac();
1410 ++ ret = csum_partial_copy_generic(src, (__force void *)dst,
1411 ++ len, sum, NULL, err_ptr);
1412 ++ clac();
1413 ++ return ret;
1414 ++ }
1415 +
1416 + if (len)
1417 + *err_ptr = -EFAULT;
1418 +diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
1419 +index fa5f71e0..e6833c65 100644
1420 +--- a/arch/x86/include/asm/mce.h
1421 ++++ b/arch/x86/include/asm/mce.h
1422 +@@ -32,11 +32,20 @@
1423 + #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
1424 + #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
1425 + #define MCI_STATUS_AR (1ULL<<55) /* Action required */
1426 +-#define MCACOD 0xffff /* MCA Error Code */
1427 ++
1428 ++/*
1429 ++ * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
1430 ++ * bits 15:0. But bit 12 is the 'F' bit, defined for corrected
1431 ++ * errors to indicate that errors are being filtered by hardware.
1432 ++ * We should mask out bit 12 when looking for specific signatures
1433 ++ * of uncorrected errors - so the F bit is deliberately skipped
1434 ++ * in this #define.
1435 ++ */
1436 ++#define MCACOD 0xefff /* MCA Error Code */
1437 +
1438 + /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
1439 + #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
1440 +-#define MCACOD_SCRUBMSK 0xfff0
1441 ++#define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */
1442 + #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
1443 + #define MCACOD_DATA 0x0134 /* Data Load */
1444 + #define MCACOD_INSTR 0x0150 /* Instruction Fetch */
1445 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
1446 +index cdbf3677..be12c534 100644
1447 +--- a/arch/x86/include/asm/mmu_context.h
1448 ++++ b/arch/x86/include/asm/mmu_context.h
1449 +@@ -45,22 +45,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
1450 + /* Re-load page tables */
1451 + load_cr3(next->pgd);
1452 +
1453 +- /* stop flush ipis for the previous mm */
1454 ++ /* Stop flush ipis for the previous mm */
1455 + cpumask_clear_cpu(cpu, mm_cpumask(prev));
1456 +
1457 +- /*
1458 +- * load the LDT, if the LDT is different:
1459 +- */
1460 ++ /* Load the LDT, if the LDT is different: */
1461 + if (unlikely(prev->context.ldt != next->context.ldt))
1462 + load_LDT_nolock(&next->context);
1463 + }
1464 + #ifdef CONFIG_SMP
1465 +- else {
1466 ++ else {
1467 + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
1468 + BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
1469 +
1470 +- if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
1471 +- /* We were in lazy tlb mode and leave_mm disabled
1472 ++ if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
1473 ++ /*
1474 ++ * On established mms, the mm_cpumask is only changed
1475 ++ * from irq context, from ptep_clear_flush() while in
1476 ++ * lazy tlb mode, and here. Irqs are blocked during
1477 ++ * schedule, protecting us from simultaneous changes.
1478 ++ */
1479 ++ cpumask_set_cpu(cpu, mm_cpumask(next));
1480 ++ /*
1481 ++ * We were in lazy tlb mode and leave_mm disabled
1482 + * tlb flush IPI delivery. We must reload CR3
1483 + * to make sure to use no freed page tables.
1484 + */
1485 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
1486 +index 3048ded1..59554dca 100644
1487 +--- a/arch/x86/kernel/amd_nb.c
1488 ++++ b/arch/x86/kernel/amd_nb.c
1489 +@@ -20,6 +20,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
1490 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
1491 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
1492 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
1493 ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
1494 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
1495 + {}
1496 + };
1497 +@@ -27,6 +28,7 @@ EXPORT_SYMBOL(amd_nb_misc_ids);
1498 +
1499 + static const struct pci_device_id amd_nb_link_ids[] = {
1500 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
1501 ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
1502 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
1503 + {}
1504 + };
1505 +@@ -81,13 +83,20 @@ int amd_cache_northbridges(void)
1506 + next_northbridge(misc, amd_nb_misc_ids);
1507 + node_to_amd_nb(i)->link = link =
1508 + next_northbridge(link, amd_nb_link_ids);
1509 +- }
1510 ++ }
1511 +
1512 ++ /* GART present only on Fam15h upto model 0fh */
1513 + if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
1514 +- boot_cpu_data.x86 == 0x15)
1515 ++ (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
1516 + amd_northbridges.flags |= AMD_NB_GART;
1517 +
1518 + /*
1519 ++ * Check for L3 cache presence.
1520 ++ */
1521 ++ if (!cpuid_edx(0x80000006))
1522 ++ return 0;
1523 ++
1524 ++ /*
1525 + * Some CPU families support L3 Cache Index Disable. There are some
1526 + * limitations because of E382 and E388 on family 0x10.
1527 + */
1528 +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
1529 +index 69562992..087ab2af 100644
1530 +--- a/arch/x86/kernel/signal.c
1531 ++++ b/arch/x86/kernel/signal.c
1532 +@@ -364,7 +364,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1533 + else
1534 + put_user_ex(0, &frame->uc.uc_flags);
1535 + put_user_ex(0, &frame->uc.uc_link);
1536 +- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1537 ++ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1538 +
1539 + /* Set up to return from userspace. */
1540 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
1541 +@@ -429,7 +429,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1542 + else
1543 + put_user_ex(0, &frame->uc.uc_flags);
1544 + put_user_ex(0, &frame->uc.uc_link);
1545 +- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1546 ++ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1547 +
1548 + /* Set up to return from userspace. If provided, use a stub
1549 + already in userspace. */
1550 +@@ -496,7 +496,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1551 + else
1552 + put_user_ex(0, &frame->uc.uc_flags);
1553 + put_user_ex(0, &frame->uc.uc_link);
1554 +- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
1555 ++ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1556 + put_user_ex(0, &frame->uc.uc__pad0);
1557 +
1558 + if (ksig->ka.sa.sa_flags & SA_RESTORER) {
1559 +diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
1560 +index 25b7ae8d..7609e0e4 100644
1561 +--- a/arch/x86/lib/csum-wrappers_64.c
1562 ++++ b/arch/x86/lib/csum-wrappers_64.c
1563 +@@ -6,6 +6,7 @@
1564 + */
1565 + #include <asm/checksum.h>
1566 + #include <linux/module.h>
1567 ++#include <asm/smap.h>
1568 +
1569 + /**
1570 + * csum_partial_copy_from_user - Copy and checksum from user space.
1571 +@@ -52,8 +53,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
1572 + len -= 2;
1573 + }
1574 + }
1575 ++ stac();
1576 + isum = csum_partial_copy_generic((__force const void *)src,
1577 + dst, len, isum, errp, NULL);
1578 ++ clac();
1579 + if (unlikely(*errp))
1580 + goto out_err;
1581 +
1582 +@@ -82,6 +85,8 @@ __wsum
1583 + csum_partial_copy_to_user(const void *src, void __user *dst,
1584 + int len, __wsum isum, int *errp)
1585 + {
1586 ++ __wsum ret;
1587 ++
1588 + might_sleep();
1589 +
1590 + if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
1591 +@@ -105,8 +110,11 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
1592 + }
1593 +
1594 + *errp = 0;
1595 +- return csum_partial_copy_generic(src, (void __force *)dst,
1596 +- len, isum, NULL, errp);
1597 ++ stac();
1598 ++ ret = csum_partial_copy_generic(src, (void __force *)dst,
1599 ++ len, isum, NULL, errp);
1600 ++ clac();
1601 ++ return ret;
1602 + }
1603 + EXPORT_SYMBOL(csum_partial_copy_to_user);
1604 +
1605 +diff --git a/crypto/api.c b/crypto/api.c
1606 +index 3b618033..37c4c721 100644
1607 +--- a/crypto/api.c
1608 ++++ b/crypto/api.c
1609 +@@ -34,6 +34,8 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
1610 + BLOCKING_NOTIFIER_HEAD(crypto_chain);
1611 + EXPORT_SYMBOL_GPL(crypto_chain);
1612 +
1613 ++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
1614 ++
1615 + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
1616 + {
1617 + return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
1618 +@@ -144,8 +146,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
1619 + }
1620 + up_write(&crypto_alg_sem);
1621 +
1622 +- if (alg != &larval->alg)
1623 ++ if (alg != &larval->alg) {
1624 + kfree(larval);
1625 ++ if (crypto_is_larval(alg))
1626 ++ alg = crypto_larval_wait(alg);
1627 ++ }
1628 +
1629 + return alg;
1630 + }
1631 +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
1632 +index cab13f2f..7c451cb2 100644
1633 +--- a/drivers/acpi/acpi_lpss.c
1634 ++++ b/drivers/acpi/acpi_lpss.c
1635 +@@ -155,12 +155,13 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
1636 + pdata->mmio_size = resource_size(&rentry->res);
1637 + pdata->mmio_base = ioremap(rentry->res.start,
1638 + pdata->mmio_size);
1639 +- pdata->dev_desc = dev_desc;
1640 + break;
1641 + }
1642 +
1643 + acpi_dev_free_resource_list(&resource_list);
1644 +
1645 ++ pdata->dev_desc = dev_desc;
1646 ++
1647 + if (dev_desc->clk_required) {
1648 + ret = register_device_clock(adev, pdata);
1649 + if (ret) {
1650 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
1651 +index aff789d6..8c7421af 100644
1652 +--- a/drivers/block/rbd.c
1653 ++++ b/drivers/block/rbd.c
1654 +@@ -1565,11 +1565,12 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1655 + obj_request, obj_request->img_request, obj_request->result,
1656 + xferred, length);
1657 + /*
1658 +- * ENOENT means a hole in the image. We zero-fill the
1659 +- * entire length of the request. A short read also implies
1660 +- * zero-fill to the end of the request. Either way we
1661 +- * update the xferred count to indicate the whole request
1662 +- * was satisfied.
1663 ++ * ENOENT means a hole in the image. We zero-fill the entire
1664 ++ * length of the request. A short read also implies zero-fill
1665 ++ * to the end of the request. An error requires the whole
1666 ++ * length of the request to be reported finished with an error
1667 ++ * to the block layer. In each case we update the xferred
1668 ++ * count to indicate the whole request was satisfied.
1669 + */
1670 + rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1671 + if (obj_request->result == -ENOENT) {
1672 +@@ -1578,14 +1579,13 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1673 + else
1674 + zero_pages(obj_request->pages, 0, length);
1675 + obj_request->result = 0;
1676 +- obj_request->xferred = length;
1677 + } else if (xferred < length && !obj_request->result) {
1678 + if (obj_request->type == OBJ_REQUEST_BIO)
1679 + zero_bio_chain(obj_request->bio_list, xferred);
1680 + else
1681 + zero_pages(obj_request->pages, xferred, length);
1682 +- obj_request->xferred = length;
1683 + }
1684 ++ obj_request->xferred = length;
1685 + obj_request_done_set(obj_request);
1686 + }
1687 +
1688 +diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
1689 +index 16ed0680..917a3ab4 100644
1690 +--- a/drivers/clk/clk-wm831x.c
1691 ++++ b/drivers/clk/clk-wm831x.c
1692 +@@ -360,6 +360,8 @@ static int wm831x_clk_probe(struct platform_device *pdev)
1693 + if (!clkdata)
1694 + return -ENOMEM;
1695 +
1696 ++ clkdata->wm831x = wm831x;
1697 ++
1698 + /* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
1699 + ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
1700 + if (ret < 0) {
1701 +diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
1702 +index 2a297f86..fe853903 100644
1703 +--- a/drivers/cpuidle/coupled.c
1704 ++++ b/drivers/cpuidle/coupled.c
1705 +@@ -106,6 +106,7 @@ struct cpuidle_coupled {
1706 + cpumask_t coupled_cpus;
1707 + int requested_state[NR_CPUS];
1708 + atomic_t ready_waiting_counts;
1709 ++ atomic_t abort_barrier;
1710 + int online_count;
1711 + int refcnt;
1712 + int prevent;
1713 +@@ -122,12 +123,19 @@ static DEFINE_MUTEX(cpuidle_coupled_lock);
1714 + static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
1715 +
1716 + /*
1717 +- * The cpuidle_coupled_poked_mask mask is used to avoid calling
1718 ++ * The cpuidle_coupled_poke_pending mask is used to avoid calling
1719 + * __smp_call_function_single with the per cpu call_single_data struct already
1720 + * in use. This prevents a deadlock where two cpus are waiting for each others
1721 + * call_single_data struct to be available
1722 + */
1723 +-static cpumask_t cpuidle_coupled_poked_mask;
1724 ++static cpumask_t cpuidle_coupled_poke_pending;
1725 ++
1726 ++/*
1727 ++ * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
1728 ++ * once to minimize entering the ready loop with a poke pending, which would
1729 ++ * require aborting and retrying.
1730 ++ */
1731 ++static cpumask_t cpuidle_coupled_poked;
1732 +
1733 + /**
1734 + * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
1735 +@@ -291,10 +299,11 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
1736 + return state;
1737 + }
1738 +
1739 +-static void cpuidle_coupled_poked(void *info)
1740 ++static void cpuidle_coupled_handle_poke(void *info)
1741 + {
1742 + int cpu = (unsigned long)info;
1743 +- cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
1744 ++ cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
1745 ++ cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
1746 + }
1747 +
1748 + /**
1749 +@@ -313,7 +322,7 @@ static void cpuidle_coupled_poke(int cpu)
1750 + {
1751 + struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
1752 +
1753 +- if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
1754 ++ if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
1755 + __smp_call_function_single(cpu, csd, 0);
1756 + }
1757 +
1758 +@@ -340,30 +349,19 @@ static void cpuidle_coupled_poke_others(int this_cpu,
1759 + * @coupled: the struct coupled that contains the current cpu
1760 + * @next_state: the index in drv->states of the requested state for this cpu
1761 + *
1762 +- * Updates the requested idle state for the specified cpuidle device,
1763 +- * poking all coupled cpus out of idle if necessary to let them see the new
1764 +- * state.
1765 ++ * Updates the requested idle state for the specified cpuidle device.
1766 ++ * Returns the number of waiting cpus.
1767 + */
1768 +-static void cpuidle_coupled_set_waiting(int cpu,
1769 ++static int cpuidle_coupled_set_waiting(int cpu,
1770 + struct cpuidle_coupled *coupled, int next_state)
1771 + {
1772 +- int w;
1773 +-
1774 + coupled->requested_state[cpu] = next_state;
1775 +
1776 + /*
1777 +- * If this is the last cpu to enter the waiting state, poke
1778 +- * all the other cpus out of their waiting state so they can
1779 +- * enter a deeper state. This can race with one of the cpus
1780 +- * exiting the waiting state due to an interrupt and
1781 +- * decrementing waiting_count, see comment below.
1782 +- *
1783 + * The atomic_inc_return provides a write barrier to order the write
1784 + * to requested_state with the later write that increments ready_count.
1785 + */
1786 +- w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
1787 +- if (w == coupled->online_count)
1788 +- cpuidle_coupled_poke_others(cpu, coupled);
1789 ++ return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
1790 + }
1791 +
1792 + /**
1793 +@@ -410,19 +408,33 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
1794 + * been processed and the poke bit has been cleared.
1795 + *
1796 + * Other interrupts may also be processed while interrupts are enabled, so
1797 +- * need_resched() must be tested after turning interrupts off again to make sure
1798 ++ * need_resched() must be tested after this function returns to make sure
1799 + * the interrupt didn't schedule work that should take the cpu out of idle.
1800 + *
1801 +- * Returns 0 if need_resched was false, -EINTR if need_resched was true.
1802 ++ * Returns 0 if no poke was pending, 1 if a poke was cleared.
1803 + */
1804 + static int cpuidle_coupled_clear_pokes(int cpu)
1805 + {
1806 ++ if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
1807 ++ return 0;
1808 ++
1809 + local_irq_enable();
1810 +- while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
1811 ++ while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
1812 + cpu_relax();
1813 + local_irq_disable();
1814 +
1815 +- return need_resched() ? -EINTR : 0;
1816 ++ return 1;
1817 ++}
1818 ++
1819 ++static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
1820 ++{
1821 ++ cpumask_t cpus;
1822 ++ int ret;
1823 ++
1824 ++ cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
1825 ++ ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
1826 ++
1827 ++ return ret;
1828 + }
1829 +
1830 + /**
1831 +@@ -449,12 +461,14 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
1832 + {
1833 + int entered_state = -1;
1834 + struct cpuidle_coupled *coupled = dev->coupled;
1835 ++ int w;
1836 +
1837 + if (!coupled)
1838 + return -EINVAL;
1839 +
1840 + while (coupled->prevent) {
1841 +- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
1842 ++ cpuidle_coupled_clear_pokes(dev->cpu);
1843 ++ if (need_resched()) {
1844 + local_irq_enable();
1845 + return entered_state;
1846 + }
1847 +@@ -465,15 +479,37 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
1848 + /* Read barrier ensures online_count is read after prevent is cleared */
1849 + smp_rmb();
1850 +
1851 +- cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
1852 ++reset:
1853 ++ cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
1854 ++
1855 ++ w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
1856 ++ /*
1857 ++ * If this is the last cpu to enter the waiting state, poke
1858 ++ * all the other cpus out of their waiting state so they can
1859 ++ * enter a deeper state. This can race with one of the cpus
1860 ++ * exiting the waiting state due to an interrupt and
1861 ++ * decrementing waiting_count, see comment below.
1862 ++ */
1863 ++ if (w == coupled->online_count) {
1864 ++ cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
1865 ++ cpuidle_coupled_poke_others(dev->cpu, coupled);
1866 ++ }
1867 +
1868 + retry:
1869 + /*
1870 + * Wait for all coupled cpus to be idle, using the deepest state
1871 +- * allowed for a single cpu.
1872 ++ * allowed for a single cpu. If this was not the poking cpu, wait
1873 ++ * for at least one poke before leaving to avoid a race where
1874 ++ * two cpus could arrive at the waiting loop at the same time,
1875 ++ * but the first of the two to arrive could skip the loop without
1876 ++ * processing the pokes from the last to arrive.
1877 + */
1878 +- while (!cpuidle_coupled_cpus_waiting(coupled)) {
1879 +- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
1880 ++ while (!cpuidle_coupled_cpus_waiting(coupled) ||
1881 ++ !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
1882 ++ if (cpuidle_coupled_clear_pokes(dev->cpu))
1883 ++ continue;
1884 ++
1885 ++ if (need_resched()) {
1886 + cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
1887 + goto out;
1888 + }
1889 +@@ -487,12 +523,19 @@ retry:
1890 + dev->safe_state_index);
1891 + }
1892 +
1893 +- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
1894 ++ cpuidle_coupled_clear_pokes(dev->cpu);
1895 ++ if (need_resched()) {
1896 + cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
1897 + goto out;
1898 + }
1899 +
1900 + /*
1901 ++ * Make sure final poke status for this cpu is visible before setting
1902 ++ * cpu as ready.
1903 ++ */
1904 ++ smp_wmb();
1905 ++
1906 ++ /*
1907 + * All coupled cpus are probably idle. There is a small chance that
1908 + * one of the other cpus just became active. Increment the ready count,
1909 + * and spin until all coupled cpus have incremented the counter. Once a
1910 +@@ -511,6 +554,28 @@ retry:
1911 + cpu_relax();
1912 + }
1913 +
1914 ++ /*
1915 ++ * Make sure read of all cpus ready is done before reading pending pokes
1916 ++ */
1917 ++ smp_rmb();
1918 ++
1919 ++ /*
1920 ++ * There is a small chance that a cpu left and reentered idle after this
1921 ++ * cpu saw that all cpus were waiting. The cpu that reentered idle will
1922 ++ * have sent this cpu a poke, which will still be pending after the
1923 ++ * ready loop. The pending interrupt may be lost by the interrupt
1924 ++ * controller when entering the deep idle state. It's not possible to
1925 ++ * clear a pending interrupt without turning interrupts on and handling
1926 ++ * it, and it's too late to turn on interrupts here, so reset the
1927 ++ * coupled idle state of all cpus and retry.
1928 ++ */
1929 ++ if (cpuidle_coupled_any_pokes_pending(coupled)) {
1930 ++ cpuidle_coupled_set_done(dev->cpu, coupled);
1931 ++ /* Wait for all cpus to see the pending pokes */
1932 ++ cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
1933 ++ goto reset;
1934 ++ }
1935 ++
1936 + /* all cpus have acked the coupled state */
1937 + next_state = cpuidle_coupled_get_state(dev, coupled);
1938 +
1939 +@@ -596,7 +661,7 @@ have_coupled:
1940 + coupled->refcnt++;
1941 +
1942 + csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
1943 +- csd->func = cpuidle_coupled_poked;
1944 ++ csd->func = cpuidle_coupled_handle_poke;
1945 + csd->info = (void *)(unsigned long)dev->cpu;
1946 +
1947 + return 0;
1948 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
1949 +index 8b6a0343..8b3d9014 100644
1950 +--- a/drivers/edac/amd64_edac.c
1951 ++++ b/drivers/edac/amd64_edac.c
1952 +@@ -2470,8 +2470,15 @@ static int amd64_init_one_instance(struct pci_dev *F2)
1953 + layers[0].size = pvt->csels[0].b_cnt;
1954 + layers[0].is_virt_csrow = true;
1955 + layers[1].type = EDAC_MC_LAYER_CHANNEL;
1956 +- layers[1].size = pvt->channel_count;
1957 ++
1958 ++ /*
1959 ++ * Always allocate two channels since we can have setups with DIMMs on
1960 ++ * only one channel. Also, this simplifies handling later for the price
1961 ++ * of a couple of KBs tops.
1962 ++ */
1963 ++ layers[1].size = 2;
1964 + layers[1].is_virt_csrow = false;
1965 ++
1966 + mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
1967 + if (!mci)
1968 + goto err_siblings;
1969 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1970 +index 9e62bbed..0cb9b5d8 100644
1971 +--- a/drivers/gpu/drm/drm_edid.c
1972 ++++ b/drivers/gpu/drm/drm_edid.c
1973 +@@ -125,6 +125,9 @@ static struct edid_quirk {
1974 +
1975 + /* ViewSonic VA2026w */
1976 + { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
1977 ++
1978 ++ /* Medion MD 30217 PG */
1979 ++ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
1980 + };
1981 +
1982 + /*
1983 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1984 +index 402f4868..ed626e01 100644
1985 +--- a/drivers/hid/hid-core.c
1986 ++++ b/drivers/hid/hid-core.c
1987 +@@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
1988 + struct hid_report_enum *report_enum = device->report_enum + type;
1989 + struct hid_report *report;
1990 +
1991 ++ if (id >= HID_MAX_IDS)
1992 ++ return NULL;
1993 + if (report_enum->report_id_hash[id])
1994 + return report_enum->report_id_hash[id];
1995 +
1996 +@@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
1997 +
1998 + case HID_GLOBAL_ITEM_TAG_REPORT_ID:
1999 + parser->global.report_id = item_udata(item);
2000 +- if (parser->global.report_id == 0) {
2001 +- hid_err(parser->device, "report_id 0 is invalid\n");
2002 ++ if (parser->global.report_id == 0 ||
2003 ++ parser->global.report_id >= HID_MAX_IDS) {
2004 ++ hid_err(parser->device, "report_id %u is invalid\n",
2005 ++ parser->global.report_id);
2006 + return -1;
2007 + }
2008 + return 0;
2009 +@@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device)
2010 + for (i = 0; i < HID_REPORT_TYPES; i++) {
2011 + struct hid_report_enum *report_enum = device->report_enum + i;
2012 +
2013 +- for (j = 0; j < 256; j++) {
2014 ++ for (j = 0; j < HID_MAX_IDS; j++) {
2015 + struct hid_report *report = report_enum->report_id_hash[j];
2016 + if (report)
2017 + hid_free_report(report);
2018 +@@ -1152,7 +1156,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
2019 +
2020 + int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
2021 + {
2022 +- unsigned size = field->report_size;
2023 ++ unsigned size;
2024 ++
2025 ++ if (!field)
2026 ++ return -1;
2027 ++
2028 ++ size = field->report_size;
2029 +
2030 + hid_dump_input(field->report->device, field->usage + offset, value);
2031 +
2032 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2033 +index 21688853..ca0219f9 100644
2034 +--- a/drivers/hid/hid-ids.h
2035 ++++ b/drivers/hid/hid-ids.h
2036 +@@ -135,9 +135,9 @@
2037 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
2038 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
2039 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
2040 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291
2041 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292
2042 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293
2043 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
2044 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
2045 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
2046 + #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
2047 + #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
2048 + #define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
2049 +@@ -646,6 +646,7 @@
2050 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012
2051 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013
2052 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014
2053 ++#define USB_DEVICE_ID_NTRIG_DUOSENSE 0x1500
2054 +
2055 + #define USB_VENDOR_ID_ONTRAK 0x0a07
2056 + #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064
2057 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
2058 +index 945b8158..ac5e9352 100644
2059 +--- a/drivers/hid/hid-input.c
2060 ++++ b/drivers/hid/hid-input.c
2061 +@@ -340,7 +340,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
2062 + {
2063 + struct hid_device *dev = container_of(psy, struct hid_device, battery);
2064 + int ret = 0;
2065 +- __u8 buf[2] = {};
2066 ++ __u8 *buf;
2067 +
2068 + switch (prop) {
2069 + case POWER_SUPPLY_PROP_PRESENT:
2070 +@@ -349,13 +349,20 @@ static int hidinput_get_battery_property(struct power_supply *psy,
2071 + break;
2072 +
2073 + case POWER_SUPPLY_PROP_CAPACITY:
2074 ++
2075 ++ buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL);
2076 ++ if (!buf) {
2077 ++ ret = -ENOMEM;
2078 ++ break;
2079 ++ }
2080 + ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
2081 +- buf, sizeof(buf),
2082 ++ buf, 2,
2083 + dev->battery_report_type);
2084 +
2085 + if (ret != 2) {
2086 + if (ret >= 0)
2087 + ret = -EINVAL;
2088 ++ kfree(buf);
2089 + break;
2090 + }
2091 +
2092 +@@ -364,6 +371,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
2093 + buf[1] <= dev->battery_max)
2094 + val->intval = (100 * (buf[1] - dev->battery_min)) /
2095 + (dev->battery_max - dev->battery_min);
2096 ++ kfree(buf);
2097 + break;
2098 +
2099 + case POWER_SUPPLY_PROP_MODEL_NAME:
2100 +diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
2101 +index ef951025..5482156a 100644
2102 +--- a/drivers/hid/hid-ntrig.c
2103 ++++ b/drivers/hid/hid-ntrig.c
2104 +@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
2105 + struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
2106 + report_id_hash[0x0d];
2107 +
2108 +- if (!report)
2109 ++ if (!report || report->maxfield < 1 ||
2110 ++ report->field[0]->report_count < 1)
2111 + return -EINVAL;
2112 +
2113 + hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
2114 +diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
2115 +index e346038f..59d5eb1e 100644
2116 +--- a/drivers/hid/hid-picolcd_cir.c
2117 ++++ b/drivers/hid/hid-picolcd_cir.c
2118 +@@ -145,6 +145,7 @@ void picolcd_exit_cir(struct picolcd_data *data)
2119 + struct rc_dev *rdev = data->rc_dev;
2120 +
2121 + data->rc_dev = NULL;
2122 +- rc_unregister_device(rdev);
2123 ++ if (rdev)
2124 ++ rc_unregister_device(rdev);
2125 + }
2126 +
2127 +diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
2128 +index b48092d0..acbb0210 100644
2129 +--- a/drivers/hid/hid-picolcd_core.c
2130 ++++ b/drivers/hid/hid-picolcd_core.c
2131 +@@ -290,7 +290,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
2132 + buf += 10;
2133 + cnt -= 10;
2134 + }
2135 +- if (!report)
2136 ++ if (!report || report->maxfield != 1)
2137 + return -EINVAL;
2138 +
2139 + while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
2140 +diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
2141 +index 591f6b22..c930ab85 100644
2142 +--- a/drivers/hid/hid-picolcd_fb.c
2143 ++++ b/drivers/hid/hid-picolcd_fb.c
2144 +@@ -593,10 +593,14 @@ err_nomem:
2145 + void picolcd_exit_framebuffer(struct picolcd_data *data)
2146 + {
2147 + struct fb_info *info = data->fb_info;
2148 +- struct picolcd_fb_data *fbdata = info->par;
2149 ++ struct picolcd_fb_data *fbdata;
2150 + unsigned long flags;
2151 +
2152 ++ if (!info)
2153 ++ return;
2154 ++
2155 + device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
2156 ++ fbdata = info->par;
2157 +
2158 + /* disconnect framebuffer from HID dev */
2159 + spin_lock_irqsave(&fbdata->lock, flags);
2160 +diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
2161 +index d29112fa..2dcd7d98 100644
2162 +--- a/drivers/hid/hid-pl.c
2163 ++++ b/drivers/hid/hid-pl.c
2164 +@@ -132,8 +132,14 @@ static int plff_init(struct hid_device *hid)
2165 + strong = &report->field[0]->value[2];
2166 + weak = &report->field[0]->value[3];
2167 + debug("detected single-field device");
2168 +- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
2169 +- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
2170 ++ } else if (report->field[0]->maxusage == 1 &&
2171 ++ report->field[0]->usage[0].hid ==
2172 ++ (HID_UP_LED | 0x43) &&
2173 ++ report->maxfield >= 4 &&
2174 ++ report->field[0]->report_count >= 1 &&
2175 ++ report->field[1]->report_count >= 1 &&
2176 ++ report->field[2]->report_count >= 1 &&
2177 ++ report->field[3]->report_count >= 1) {
2178 + report->field[0]->value[0] = 0x00;
2179 + report->field[1]->value[0] = 0x00;
2180 + strong = &report->field[2]->value[0];
2181 +diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
2182 +index ca749810..aa34755c 100644
2183 +--- a/drivers/hid/hid-sensor-hub.c
2184 ++++ b/drivers/hid/hid-sensor-hub.c
2185 +@@ -221,7 +221,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
2186 +
2187 + mutex_lock(&data->mutex);
2188 + report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
2189 +- if (!report || (field_index >= report->maxfield)) {
2190 ++ if (!report || (field_index >= report->maxfield) ||
2191 ++ report->field[field_index]->report_count < 1) {
2192 + ret = -EINVAL;
2193 + goto done_proc;
2194 + }
2195 +diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
2196 +index a2f587d0..7112f3e8 100644
2197 +--- a/drivers/hid/hid-speedlink.c
2198 ++++ b/drivers/hid/hid-speedlink.c
2199 +@@ -3,7 +3,7 @@
2200 + * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
2201 + * the HID descriptor.
2202 + *
2203 +- * Copyright (c) 2011 Stefan Kriwanek <mail@××××××××××××××.de>
2204 ++ * Copyright (c) 2011, 2013 Stefan Kriwanek <dev@××××××××××××××.de>
2205 + */
2206 +
2207 + /*
2208 +@@ -46,8 +46,13 @@ static int speedlink_event(struct hid_device *hdev, struct hid_field *field,
2209 + struct hid_usage *usage, __s32 value)
2210 + {
2211 + /* No other conditions due to usage_table. */
2212 +- /* Fix "jumpy" cursor (invalid events sent by device). */
2213 +- if (value == 256)
2214 ++
2215 ++ /* This fixes the "jumpy" cursor occuring due to invalid events sent
2216 ++ * by the device. Some devices only send them with value==+256, others
2217 ++ * don't. However, catching abs(value)>=256 is restrictive enough not
2218 ++ * to interfere with devices that were bug-free (has been tested).
2219 ++ */
2220 ++ if (abs(value) >= 256)
2221 + return 1;
2222 + /* Drop useless distance 0 events (on button clicks etc.) as well */
2223 + if (value == 0)
2224 +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
2225 +index a7451632..612a655b 100644
2226 +--- a/drivers/hid/hidraw.c
2227 ++++ b/drivers/hid/hidraw.c
2228 +@@ -113,7 +113,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
2229 + __u8 *buf;
2230 + int ret = 0;
2231 +
2232 +- if (!hidraw_table[minor]) {
2233 ++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
2234 + ret = -ENODEV;
2235 + goto out;
2236 + }
2237 +@@ -261,7 +261,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
2238 + }
2239 +
2240 + mutex_lock(&minors_lock);
2241 +- if (!hidraw_table[minor]) {
2242 ++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
2243 + err = -ENODEV;
2244 + goto out_unlock;
2245 + }
2246 +@@ -302,39 +302,38 @@ static int hidraw_fasync(int fd, struct file *file, int on)
2247 + return fasync_helper(fd, file, on, &list->fasync);
2248 + }
2249 +
2250 ++static void drop_ref(struct hidraw *hidraw, int exists_bit)
2251 ++{
2252 ++ if (exists_bit) {
2253 ++ hid_hw_close(hidraw->hid);
2254 ++ hidraw->exist = 0;
2255 ++ if (hidraw->open)
2256 ++ wake_up_interruptible(&hidraw->wait);
2257 ++ } else {
2258 ++ --hidraw->open;
2259 ++ }
2260 ++
2261 ++ if (!hidraw->open && !hidraw->exist) {
2262 ++ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
2263 ++ hidraw_table[hidraw->minor] = NULL;
2264 ++ kfree(hidraw);
2265 ++ }
2266 ++}
2267 ++
2268 + static int hidraw_release(struct inode * inode, struct file * file)
2269 + {
2270 + unsigned int minor = iminor(inode);
2271 +- struct hidraw *dev;
2272 + struct hidraw_list *list = file->private_data;
2273 +- int ret;
2274 +- int i;
2275 +
2276 + mutex_lock(&minors_lock);
2277 +- if (!hidraw_table[minor]) {
2278 +- ret = -ENODEV;
2279 +- goto unlock;
2280 +- }
2281 +
2282 + list_del(&list->node);
2283 +- dev = hidraw_table[minor];
2284 +- if (!--dev->open) {
2285 +- if (list->hidraw->exist) {
2286 +- hid_hw_power(dev->hid, PM_HINT_NORMAL);
2287 +- hid_hw_close(dev->hid);
2288 +- } else {
2289 +- kfree(list->hidraw);
2290 +- }
2291 +- }
2292 +-
2293 +- for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
2294 +- kfree(list->buffer[i].value);
2295 + kfree(list);
2296 +- ret = 0;
2297 +-unlock:
2298 +- mutex_unlock(&minors_lock);
2299 +
2300 +- return ret;
2301 ++ drop_ref(hidraw_table[minor], 0);
2302 ++
2303 ++ mutex_unlock(&minors_lock);
2304 ++ return 0;
2305 + }
2306 +
2307 + static long hidraw_ioctl(struct file *file, unsigned int cmd,
2308 +@@ -539,18 +538,9 @@ void hidraw_disconnect(struct hid_device *hid)
2309 + struct hidraw *hidraw = hid->hidraw;
2310 +
2311 + mutex_lock(&minors_lock);
2312 +- hidraw->exist = 0;
2313 +-
2314 +- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
2315 +
2316 +- hidraw_table[hidraw->minor] = NULL;
2317 ++ drop_ref(hidraw, 1);
2318 +
2319 +- if (hidraw->open) {
2320 +- hid_hw_close(hid);
2321 +- wake_up_interruptible(&hidraw->wait);
2322 +- } else {
2323 +- kfree(hidraw);
2324 +- }
2325 + mutex_unlock(&minors_lock);
2326 + }
2327 + EXPORT_SYMBOL_GPL(hidraw_disconnect);
2328 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
2329 +index 19b8360f..07345521 100644
2330 +--- a/drivers/hid/usbhid/hid-quirks.c
2331 ++++ b/drivers/hid/usbhid/hid-quirks.c
2332 +@@ -109,6 +109,8 @@ static const struct hid_blacklist {
2333 + { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
2334 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
2335 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
2336 ++ { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
2337 ++
2338 + { 0, 0 }
2339 + };
2340 +
2341 +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
2342 +index 4ef4d5e1..a73f9618 100644
2343 +--- a/drivers/input/mouse/bcm5974.c
2344 ++++ b/drivers/input/mouse/bcm5974.c
2345 +@@ -89,9 +89,9 @@
2346 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
2347 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
2348 + /* MacbookAir6,2 (unibody, June 2013) */
2349 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291
2350 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292
2351 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293
2352 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
2353 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
2354 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
2355 +
2356 + #define BCM5974_DEVICE(prod) { \
2357 + .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
2358 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2359 +index b4f0e28d..fa004b11 100644
2360 +--- a/drivers/iommu/intel-iommu.c
2361 ++++ b/drivers/iommu/intel-iommu.c
2362 +@@ -890,56 +890,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
2363 + return order;
2364 + }
2365 +
2366 ++static void dma_pte_free_level(struct dmar_domain *domain, int level,
2367 ++ struct dma_pte *pte, unsigned long pfn,
2368 ++ unsigned long start_pfn, unsigned long last_pfn)
2369 ++{
2370 ++ pfn = max(start_pfn, pfn);
2371 ++ pte = &pte[pfn_level_offset(pfn, level)];
2372 ++
2373 ++ do {
2374 ++ unsigned long level_pfn;
2375 ++ struct dma_pte *level_pte;
2376 ++
2377 ++ if (!dma_pte_present(pte) || dma_pte_superpage(pte))
2378 ++ goto next;
2379 ++
2380 ++ level_pfn = pfn & level_mask(level - 1);
2381 ++ level_pte = phys_to_virt(dma_pte_addr(pte));
2382 ++
2383 ++ if (level > 2)
2384 ++ dma_pte_free_level(domain, level - 1, level_pte,
2385 ++ level_pfn, start_pfn, last_pfn);
2386 ++
2387 ++ /* If range covers entire pagetable, free it */
2388 ++ if (!(start_pfn > level_pfn ||
2389 ++ last_pfn < level_pfn + level_size(level))) {
2390 ++ dma_clear_pte(pte);
2391 ++ domain_flush_cache(domain, pte, sizeof(*pte));
2392 ++ free_pgtable_page(level_pte);
2393 ++ }
2394 ++next:
2395 ++ pfn += level_size(level);
2396 ++ } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
2397 ++}
2398 ++
2399 + /* free page table pages. last level pte should already be cleared */
2400 + static void dma_pte_free_pagetable(struct dmar_domain *domain,
2401 + unsigned long start_pfn,
2402 + unsigned long last_pfn)
2403 + {
2404 + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
2405 +- struct dma_pte *first_pte, *pte;
2406 +- int total = agaw_to_level(domain->agaw);
2407 +- int level;
2408 +- unsigned long tmp;
2409 +- int large_page = 2;
2410 +
2411 + BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
2412 + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
2413 + BUG_ON(start_pfn > last_pfn);
2414 +
2415 + /* We don't need lock here; nobody else touches the iova range */
2416 +- level = 2;
2417 +- while (level <= total) {
2418 +- tmp = align_to_level(start_pfn, level);
2419 +-
2420 +- /* If we can't even clear one PTE at this level, we're done */
2421 +- if (tmp + level_size(level) - 1 > last_pfn)
2422 +- return;
2423 +-
2424 +- do {
2425 +- large_page = level;
2426 +- first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
2427 +- if (large_page > level)
2428 +- level = large_page + 1;
2429 +- if (!pte) {
2430 +- tmp = align_to_level(tmp + 1, level + 1);
2431 +- continue;
2432 +- }
2433 +- do {
2434 +- if (dma_pte_present(pte)) {
2435 +- free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
2436 +- dma_clear_pte(pte);
2437 +- }
2438 +- pte++;
2439 +- tmp += level_size(level);
2440 +- } while (!first_pte_in_page(pte) &&
2441 +- tmp + level_size(level) - 1 <= last_pfn);
2442 ++ dma_pte_free_level(domain, agaw_to_level(domain->agaw),
2443 ++ domain->pgd, 0, start_pfn, last_pfn);
2444 +
2445 +- domain_flush_cache(domain, first_pte,
2446 +- (void *)pte - (void *)first_pte);
2447 +-
2448 +- } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
2449 +- level++;
2450 +- }
2451 + /* free pgd */
2452 + if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
2453 + free_pgtable_page(domain->pgd);
2454 +diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
2455 +index 6bd5c679..b7d83d68 100644
2456 +--- a/drivers/leds/leds-wm831x-status.c
2457 ++++ b/drivers/leds/leds-wm831x-status.c
2458 +@@ -230,9 +230,9 @@ static int wm831x_status_probe(struct platform_device *pdev)
2459 + int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
2460 + int ret;
2461 +
2462 +- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
2463 ++ res = platform_get_resource(pdev, IORESOURCE_REG, 0);
2464 + if (res == NULL) {
2465 +- dev_err(&pdev->dev, "No I/O resource\n");
2466 ++ dev_err(&pdev->dev, "No register resource\n");
2467 + ret = -EINVAL;
2468 + goto err;
2469 + }
2470 +diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
2471 +index 297f1b2f..8df1aea5 100644
2472 +--- a/drivers/media/common/siano/smsdvb-main.c
2473 ++++ b/drivers/media/common/siano/smsdvb-main.c
2474 +@@ -275,7 +275,8 @@ static void smsdvb_update_per_slices(struct smsdvb_client_t *client,
2475 +
2476 + /* Legacy PER/BER */
2477 + tmp = p->ets_packets * 65535;
2478 +- do_div(tmp, p->ts_packets + p->ets_packets);
2479 ++ if (p->ts_packets + p->ets_packets)
2480 ++ do_div(tmp, p->ts_packets + p->ets_packets);
2481 + client->legacy_per = tmp;
2482 + }
2483 +
2484 +diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
2485 +index 856374bd..2c7217fb 100644
2486 +--- a/drivers/media/dvb-frontends/mb86a20s.c
2487 ++++ b/drivers/media/dvb-frontends/mb86a20s.c
2488 +@@ -157,7 +157,6 @@ static struct regdata mb86a20s_init2[] = {
2489 + { 0x45, 0x04 }, /* CN symbol 4 */
2490 + { 0x48, 0x04 }, /* CN manual mode */
2491 +
2492 +- { 0x50, 0xd5 }, { 0x51, 0x01 }, /* Serial */
2493 + { 0x50, 0xd6 }, { 0x51, 0x1f },
2494 + { 0x50, 0xd2 }, { 0x51, 0x03 },
2495 + { 0x50, 0xd7 }, { 0x51, 0xbf },
2496 +@@ -1860,16 +1859,15 @@ static int mb86a20s_initfe(struct dvb_frontend *fe)
2497 + dev_dbg(&state->i2c->dev, "%s: IF=%d, IF reg=0x%06llx\n",
2498 + __func__, state->if_freq, (long long)pll);
2499 +
2500 +- if (!state->config->is_serial) {
2501 ++ if (!state->config->is_serial)
2502 + regD5 &= ~1;
2503 +
2504 +- rc = mb86a20s_writereg(state, 0x50, 0xd5);
2505 +- if (rc < 0)
2506 +- goto err;
2507 +- rc = mb86a20s_writereg(state, 0x51, regD5);
2508 +- if (rc < 0)
2509 +- goto err;
2510 +- }
2511 ++ rc = mb86a20s_writereg(state, 0x50, 0xd5);
2512 ++ if (rc < 0)
2513 ++ goto err;
2514 ++ rc = mb86a20s_writereg(state, 0x51, regD5);
2515 ++ if (rc < 0)
2516 ++ goto err;
2517 +
2518 + rc = mb86a20s_writeregdata(state, mb86a20s_init2);
2519 + if (rc < 0)
2520 +diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
2521 +index 9d1481a6..c504f70d 100644
2522 +--- a/drivers/media/platform/coda.c
2523 ++++ b/drivers/media/platform/coda.c
2524 +@@ -1933,7 +1933,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids);
2525 +
2526 + #ifdef CONFIG_OF
2527 + static const struct of_device_id coda_dt_ids[] = {
2528 +- { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] },
2529 ++ { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
2530 + { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
2531 + { /* sentinel */ }
2532 + };
2533 +diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
2534 +index 33b5ffc8..f45b940d 100644
2535 +--- a/drivers/media/platform/exynos-gsc/gsc-core.c
2536 ++++ b/drivers/media/platform/exynos-gsc/gsc-core.c
2537 +@@ -1122,10 +1122,14 @@ static int gsc_probe(struct platform_device *pdev)
2538 + goto err_clk;
2539 + }
2540 +
2541 +- ret = gsc_register_m2m_device(gsc);
2542 ++ ret = v4l2_device_register(dev, &gsc->v4l2_dev);
2543 + if (ret)
2544 + goto err_clk;
2545 +
2546 ++ ret = gsc_register_m2m_device(gsc);
2547 ++ if (ret)
2548 ++ goto err_v4l2;
2549 ++
2550 + platform_set_drvdata(pdev, gsc);
2551 + pm_runtime_enable(dev);
2552 + ret = pm_runtime_get_sync(&pdev->dev);
2553 +@@ -1147,6 +1151,8 @@ err_pm:
2554 + pm_runtime_put(dev);
2555 + err_m2m:
2556 + gsc_unregister_m2m_device(gsc);
2557 ++err_v4l2:
2558 ++ v4l2_device_unregister(&gsc->v4l2_dev);
2559 + err_clk:
2560 + gsc_clk_put(gsc);
2561 + return ret;
2562 +@@ -1157,6 +1163,7 @@ static int gsc_remove(struct platform_device *pdev)
2563 + struct gsc_dev *gsc = platform_get_drvdata(pdev);
2564 +
2565 + gsc_unregister_m2m_device(gsc);
2566 ++ v4l2_device_unregister(&gsc->v4l2_dev);
2567 +
2568 + vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx);
2569 + pm_runtime_disable(&pdev->dev);
2570 +diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
2571 +index cc19bba0..76435d3b 100644
2572 +--- a/drivers/media/platform/exynos-gsc/gsc-core.h
2573 ++++ b/drivers/media/platform/exynos-gsc/gsc-core.h
2574 +@@ -343,6 +343,7 @@ struct gsc_dev {
2575 + unsigned long state;
2576 + struct vb2_alloc_ctx *alloc_ctx;
2577 + struct video_device vdev;
2578 ++ struct v4l2_device v4l2_dev;
2579 + };
2580 +
2581 + /**
2582 +diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
2583 +index 40a73f7d..e576ff2d 100644
2584 +--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
2585 ++++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
2586 +@@ -751,6 +751,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
2587 + gsc->vdev.release = video_device_release_empty;
2588 + gsc->vdev.lock = &gsc->lock;
2589 + gsc->vdev.vfl_dir = VFL_DIR_M2M;
2590 ++ gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
2591 + snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
2592 + GSC_MODULE_NAME, gsc->id);
2593 +
2594 +diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
2595 +index 15ef8f28..b5b480be 100644
2596 +--- a/drivers/media/platform/exynos4-is/media-dev.c
2597 ++++ b/drivers/media/platform/exynos4-is/media-dev.c
2598 +@@ -1441,9 +1441,9 @@ static int fimc_md_probe(struct platform_device *pdev)
2599 + err_unlock:
2600 + mutex_unlock(&fmd->media_dev.graph_mutex);
2601 + err_clk:
2602 +- media_device_unregister(&fmd->media_dev);
2603 + fimc_md_put_clocks(fmd);
2604 + fimc_md_unregister_entities(fmd);
2605 ++ media_device_unregister(&fmd->media_dev);
2606 + err_md:
2607 + v4l2_device_unregister(&fmd->v4l2_dev);
2608 + return ret;
2609 +diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
2610 +index 553d87e5..fd6289d6 100644
2611 +--- a/drivers/media/platform/s5p-g2d/g2d.c
2612 ++++ b/drivers/media/platform/s5p-g2d/g2d.c
2613 +@@ -784,6 +784,7 @@ static int g2d_probe(struct platform_device *pdev)
2614 + }
2615 + *vfd = g2d_videodev;
2616 + vfd->lock = &dev->mutex;
2617 ++ vfd->v4l2_dev = &dev->v4l2_dev;
2618 + ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
2619 + if (ret) {
2620 + v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
2621 +diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
2622 +index 8247c19d..77d7b7fb 100644
2623 +--- a/drivers/media/usb/hdpvr/hdpvr-core.c
2624 ++++ b/drivers/media/usb/hdpvr/hdpvr-core.c
2625 +@@ -311,6 +311,11 @@ static int hdpvr_probe(struct usb_interface *interface,
2626 +
2627 + dev->workqueue = 0;
2628 +
2629 ++ /* init video transfer queues first of all */
2630 ++ /* to prevent oops in hdpvr_delete() on error paths */
2631 ++ INIT_LIST_HEAD(&dev->free_buff_list);
2632 ++ INIT_LIST_HEAD(&dev->rec_buff_list);
2633 ++
2634 + /* register v4l2_device early so it can be used for printks */
2635 + if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
2636 + dev_err(&interface->dev, "v4l2_device_register failed\n");
2637 +@@ -333,10 +338,6 @@ static int hdpvr_probe(struct usb_interface *interface,
2638 + if (!dev->workqueue)
2639 + goto error;
2640 +
2641 +- /* init video transfer queues */
2642 +- INIT_LIST_HEAD(&dev->free_buff_list);
2643 +- INIT_LIST_HEAD(&dev->rec_buff_list);
2644 +-
2645 + dev->options = hdpvr_default_options;
2646 +
2647 + if (default_video_input < HDPVR_VIDEO_INPUTS)
2648 +@@ -413,7 +414,7 @@ static int hdpvr_probe(struct usb_interface *interface,
2649 + video_nr[atomic_inc_return(&dev_nr)]);
2650 + if (retval < 0) {
2651 + v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
2652 +- goto error;
2653 ++ goto reg_fail;
2654 + }
2655 +
2656 + /* let the user know what node this device is now attached to */
2657 +diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
2658 +index fff92860..491e9ecc 100644
2659 +--- a/drivers/mmc/host/tmio_mmc_dma.c
2660 ++++ b/drivers/mmc/host/tmio_mmc_dma.c
2661 +@@ -104,6 +104,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
2662 + pio:
2663 + if (!desc) {
2664 + /* DMA failed, fall back to PIO */
2665 ++ tmio_mmc_enable_dma(host, false);
2666 + if (ret >= 0)
2667 + ret = -EIO;
2668 + host->chan_rx = NULL;
2669 +@@ -116,7 +117,6 @@ pio:
2670 + }
2671 + dev_warn(&host->pdev->dev,
2672 + "DMA failed: %d, falling back to PIO\n", ret);
2673 +- tmio_mmc_enable_dma(host, false);
2674 + }
2675 +
2676 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
2677 +@@ -185,6 +185,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
2678 + pio:
2679 + if (!desc) {
2680 + /* DMA failed, fall back to PIO */
2681 ++ tmio_mmc_enable_dma(host, false);
2682 + if (ret >= 0)
2683 + ret = -EIO;
2684 + host->chan_tx = NULL;
2685 +@@ -197,7 +198,6 @@ pio:
2686 + }
2687 + dev_warn(&host->pdev->dev,
2688 + "DMA failed: %d, falling back to PIO\n", ret);
2689 +- tmio_mmc_enable_dma(host, false);
2690 + }
2691 +
2692 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
2693 +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
2694 +index dfcd0a56..fb8c4dea 100644
2695 +--- a/drivers/mtd/nand/nand_base.c
2696 ++++ b/drivers/mtd/nand/nand_base.c
2697 +@@ -2793,7 +2793,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2698 +
2699 + if (!chip->select_chip)
2700 + chip->select_chip = nand_select_chip;
2701 +- if (!chip->read_byte)
2702 ++
2703 ++ /* If called twice, pointers that depend on busw may need to be reset */
2704 ++ if (!chip->read_byte || chip->read_byte == nand_read_byte)
2705 + chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
2706 + if (!chip->read_word)
2707 + chip->read_word = nand_read_word;
2708 +@@ -2801,9 +2803,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2709 + chip->block_bad = nand_block_bad;
2710 + if (!chip->block_markbad)
2711 + chip->block_markbad = nand_default_block_markbad;
2712 +- if (!chip->write_buf)
2713 ++ if (!chip->write_buf || chip->write_buf == nand_write_buf)
2714 + chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
2715 +- if (!chip->read_buf)
2716 ++ if (!chip->read_buf || chip->read_buf == nand_read_buf)
2717 + chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
2718 + if (!chip->scan_bbt)
2719 + chip->scan_bbt = nand_default_bbt;
2720 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
2721 +index 5df49d3c..c95bfb18 100644
2722 +--- a/drivers/mtd/ubi/wl.c
2723 ++++ b/drivers/mtd/ubi/wl.c
2724 +@@ -1069,6 +1069,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
2725 + if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
2726 + dbg_wl("no WL needed: min used EC %d, max free EC %d",
2727 + e1->ec, e2->ec);
2728 ++
2729 ++ /* Give the unused PEB back */
2730 ++ wl_tree_add(e2, &ubi->free);
2731 + goto out_cancel;
2732 + }
2733 + self_check_in_wl_tree(ubi, e1, &ubi->used);
2734 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
2735 +index e1714d7c..3457ca53 100644
2736 +--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
2737 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
2738 +@@ -1076,6 +1076,10 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
2739 + * is_on == 0 means MRC CCK is OFF (more noise imm)
2740 + */
2741 + bool is_on = param ? 1 : 0;
2742 ++
2743 ++ if (ah->caps.rx_chainmask == 1)
2744 ++ break;
2745 ++
2746 + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
2747 + AR_PHY_MRC_CCK_ENABLE, is_on);
2748 + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
2749 +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
2750 +index 42b03dc3..4ebd9fd8 100644
2751 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h
2752 ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
2753 +@@ -79,10 +79,6 @@ struct ath_config {
2754 + sizeof(struct ath_buf_state)); \
2755 + } while (0)
2756 +
2757 +-#define ATH_RXBUF_RESET(_bf) do { \
2758 +- (_bf)->bf_stale = false; \
2759 +- } while (0)
2760 +-
2761 + /**
2762 + * enum buffer_type - Buffer type flags
2763 + *
2764 +@@ -316,6 +312,7 @@ struct ath_rx {
2765 + struct ath_descdma rxdma;
2766 + struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
2767 +
2768 ++ struct ath_buf *buf_hold;
2769 + struct sk_buff *frag;
2770 +
2771 + u32 ampdu_ref;
2772 +diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
2773 +index 8be2b5d8..f53dbd11 100644
2774 +--- a/drivers/net/wireless/ath/ath9k/recv.c
2775 ++++ b/drivers/net/wireless/ath/ath9k/recv.c
2776 +@@ -42,8 +42,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
2777 + struct ath_desc *ds;
2778 + struct sk_buff *skb;
2779 +
2780 +- ATH_RXBUF_RESET(bf);
2781 +-
2782 + ds = bf->bf_desc;
2783 + ds->ds_link = 0; /* link to null */
2784 + ds->ds_data = bf->bf_buf_addr;
2785 +@@ -70,6 +68,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
2786 + sc->rx.rxlink = &ds->ds_link;
2787 + }
2788 +
2789 ++static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
2790 ++{
2791 ++ if (sc->rx.buf_hold)
2792 ++ ath_rx_buf_link(sc, sc->rx.buf_hold);
2793 ++
2794 ++ sc->rx.buf_hold = bf;
2795 ++}
2796 ++
2797 + static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
2798 + {
2799 + /* XXX block beacon interrupts */
2800 +@@ -117,7 +123,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
2801 +
2802 + skb = bf->bf_mpdu;
2803 +
2804 +- ATH_RXBUF_RESET(bf);
2805 + memset(skb->data, 0, ah->caps.rx_status_len);
2806 + dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
2807 + ah->caps.rx_status_len, DMA_TO_DEVICE);
2808 +@@ -432,6 +437,7 @@ int ath_startrecv(struct ath_softc *sc)
2809 + if (list_empty(&sc->rx.rxbuf))
2810 + goto start_recv;
2811 +
2812 ++ sc->rx.buf_hold = NULL;
2813 + sc->rx.rxlink = NULL;
2814 + list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
2815 + ath_rx_buf_link(sc, bf);
2816 +@@ -677,6 +683,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
2817 + }
2818 +
2819 + bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
2820 ++ if (bf == sc->rx.buf_hold)
2821 ++ return NULL;
2822 ++
2823 + ds = bf->bf_desc;
2824 +
2825 + /*
2826 +@@ -1378,7 +1387,7 @@ requeue:
2827 + if (edma) {
2828 + ath_rx_edma_buf_link(sc, qtype);
2829 + } else {
2830 +- ath_rx_buf_link(sc, bf);
2831 ++ ath_rx_buf_relink(sc, bf);
2832 + ath9k_hw_rxena(ah);
2833 + }
2834 + } while (1);
2835 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2836 +index 83ab6be3..e752f5d4 100644
2837 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
2838 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
2839 +@@ -2387,6 +2387,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2840 + for (acno = 0, ac = &an->ac[acno];
2841 + acno < IEEE80211_NUM_ACS; acno++, ac++) {
2842 + ac->sched = false;
2843 ++ ac->clear_ps_filter = true;
2844 + ac->txq = sc->tx.txq_map[acno];
2845 + INIT_LIST_HEAD(&ac->tid_q);
2846 + }
2847 +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
2848 +index 1860c572..4fb9635d 100644
2849 +--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
2850 ++++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
2851 +@@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di)
2852 +
2853 + /*
2854 + * post receive buffers
2855 +- * return false is refill failed completely and ring is empty this will stall
2856 +- * the rx dma and user might want to call rxfill again asap. This unlikely
2857 +- * happens on memory-rich NIC, but often on memory-constrained dongle
2858 ++ * Return false if refill failed completely or dma mapping failed. The ring
2859 ++ * is empty, which will stall the rx dma and user might want to call rxfill
2860 ++ * again asap. This is unlikely to happen on a memory-rich NIC, but often on
2861 ++ * memory-constrained dongle.
2862 + */
2863 + bool dma_rxfill(struct dma_pub *pub)
2864 + {
2865 +@@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub)
2866 +
2867 + pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
2868 + DMA_FROM_DEVICE);
2869 ++ if (dma_mapping_error(di->dmadev, pa))
2870 ++ return false;
2871 +
2872 + /* save the free packet pointer */
2873 + di->rxp[rxout] = p;
2874 +@@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p)
2875 +
2876 + /* get physical address of buffer start */
2877 + pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
2878 +-
2879 ++ /* if mapping failed, free skb */
2880 ++ if (dma_mapping_error(di->dmadev, pa)) {
2881 ++ brcmu_pkt_buf_free_skb(p);
2882 ++ return;
2883 ++ }
2884 + /* With a DMA segment list, Descriptor table is filled
2885 + * using the segment list instead of looping over
2886 + * buffers in multi-chain DMA. Therefore, EOF for SGLIST
2887 +diff --git a/drivers/of/base.c b/drivers/of/base.c
2888 +index a6f584a7..1d10b4ec 100644
2889 +--- a/drivers/of/base.c
2890 ++++ b/drivers/of/base.c
2891 +@@ -1629,6 +1629,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
2892 + ap = dt_alloc(sizeof(*ap) + len + 1, 4);
2893 + if (!ap)
2894 + continue;
2895 ++ memset(ap, 0, sizeof(*ap) + len + 1);
2896 + ap->alias = start;
2897 + of_alias_add(ap, np, id, start, len);
2898 + }
2899 +diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
2900 +index 5d7529ed..314e5e8e 100644
2901 +--- a/drivers/pinctrl/pinctrl-at91.c
2902 ++++ b/drivers/pinctrl/pinctrl-at91.c
2903 +@@ -325,7 +325,7 @@ static void at91_mux_disable_interrupt(void __iomem *pio, unsigned mask)
2904 +
2905 + static unsigned at91_mux_get_pullup(void __iomem *pio, unsigned pin)
2906 + {
2907 +- return (readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1;
2908 ++ return !((readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1);
2909 + }
2910 +
2911 + static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on)
2912 +@@ -445,7 +445,7 @@ static void at91_mux_pio3_set_debounce(void __iomem *pio, unsigned mask,
2913 +
2914 + static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin)
2915 + {
2916 +- return (__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1;
2917 ++ return !((__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1);
2918 + }
2919 +
2920 + static void at91_mux_pio3_set_pulldown(void __iomem *pio, unsigned mask, bool is_on)
2921 +diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
2922 +index 4c1d2e7a..efb0c4c2 100644
2923 +--- a/drivers/scsi/mpt3sas/Makefile
2924 ++++ b/drivers/scsi/mpt3sas/Makefile
2925 +@@ -1,5 +1,5 @@
2926 + # mpt3sas makefile
2927 +-obj-m += mpt3sas.o
2928 ++obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
2929 + mpt3sas-y += mpt3sas_base.o \
2930 + mpt3sas_config.o \
2931 + mpt3sas_scsih.o \
2932 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2933 +index 610417ec..c3986344 100644
2934 +--- a/drivers/scsi/sd.c
2935 ++++ b/drivers/scsi/sd.c
2936 +@@ -2409,14 +2409,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2937 + }
2938 + }
2939 +
2940 +- if (modepage == 0x3F) {
2941 +- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
2942 +- "present\n");
2943 +- goto defaults;
2944 +- } else if ((buffer[offset] & 0x3f) != modepage) {
2945 +- sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
2946 +- goto defaults;
2947 +- }
2948 ++ sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
2949 ++ goto defaults;
2950 ++
2951 + Page_found:
2952 + if (modepage == 8) {
2953 + sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2954 +diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
2955 +index 90f2de9b..f4c1e998 100644
2956 +--- a/drivers/staging/comedi/drivers/dt282x.c
2957 ++++ b/drivers/staging/comedi/drivers/dt282x.c
2958 +@@ -269,8 +269,9 @@ struct dt282x_private {
2959 + } \
2960 + udelay(5); \
2961 + } \
2962 +- if (_i) \
2963 ++ if (_i) { \
2964 + b \
2965 ++ } \
2966 + } while (0)
2967 +
2968 + static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
2969 +diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
2970 +index 163c638e..972a0723 100644
2971 +--- a/drivers/staging/iio/adc/mxs-lradc.c
2972 ++++ b/drivers/staging/iio/adc/mxs-lradc.c
2973 +@@ -234,7 +234,6 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
2974 + {
2975 + struct mxs_lradc *lradc = iio_priv(iio_dev);
2976 + int ret;
2977 +- unsigned long mask;
2978 +
2979 + if (m != IIO_CHAN_INFO_RAW)
2980 + return -EINVAL;
2981 +@@ -243,12 +242,6 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
2982 + if (chan->channel > LRADC_MAX_TOTAL_CHANS)
2983 + return -EINVAL;
2984 +
2985 +- /* Validate the channel if it doesn't intersect with reserved chans. */
2986 +- bitmap_set(&mask, chan->channel, 1);
2987 +- ret = iio_validate_scan_mask_onehot(iio_dev, &mask);
2988 +- if (ret)
2989 +- return -EINVAL;
2990 +-
2991 + /*
2992 + * See if there is no buffered operation in progess. If there is, simply
2993 + * bail out. This can be improved to support both buffered and raw IO at
2994 +@@ -661,12 +654,13 @@ static int mxs_lradc_trigger_init(struct iio_dev *iio)
2995 + {
2996 + int ret;
2997 + struct iio_trigger *trig;
2998 ++ struct mxs_lradc *lradc = iio_priv(iio);
2999 +
3000 + trig = iio_trigger_alloc("%s-dev%i", iio->name, iio->id);
3001 + if (trig == NULL)
3002 + return -ENOMEM;
3003 +
3004 +- trig->dev.parent = iio->dev.parent;
3005 ++ trig->dev.parent = lradc->dev;
3006 + iio_trigger_set_drvdata(trig, iio);
3007 + trig->ops = &mxs_lradc_trigger_ops;
3008 +
3009 +@@ -676,15 +670,17 @@ static int mxs_lradc_trigger_init(struct iio_dev *iio)
3010 + return ret;
3011 + }
3012 +
3013 +- iio->trig = trig;
3014 ++ lradc->trig = trig;
3015 +
3016 + return 0;
3017 + }
3018 +
3019 + static void mxs_lradc_trigger_remove(struct iio_dev *iio)
3020 + {
3021 +- iio_trigger_unregister(iio->trig);
3022 +- iio_trigger_free(iio->trig);
3023 ++ struct mxs_lradc *lradc = iio_priv(iio);
3024 ++
3025 ++ iio_trigger_unregister(lradc->trig);
3026 ++ iio_trigger_free(lradc->trig);
3027 + }
3028 +
3029 + static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
3030 +diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
3031 +index cbe48ab4..f608fbc1 100644
3032 +--- a/drivers/target/target_core_alua.c
3033 ++++ b/drivers/target/target_core_alua.c
3034 +@@ -730,7 +730,7 @@ static int core_alua_write_tpg_metadata(
3035 + if (ret < 0)
3036 + pr_err("Error writing ALUA metadata file: %s\n", path);
3037 + fput(file);
3038 +- return ret ? -EIO : 0;
3039 ++ return (ret < 0) ? -EIO : 0;
3040 + }
3041 +
3042 + /*
3043 +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
3044 +index 3240f2cc..04a74938 100644
3045 +--- a/drivers/target/target_core_pr.c
3046 ++++ b/drivers/target/target_core_pr.c
3047 +@@ -1987,7 +1987,7 @@ static int __core_scsi3_write_aptpl_to_file(
3048 + pr_debug("Error writing APTPL metadata file: %s\n", path);
3049 + fput(file);
3050 +
3051 +- return ret ? -EIO : 0;
3052 ++ return (ret < 0) ? -EIO : 0;
3053 + }
3054 +
3055 + static int
3056 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
3057 +index 44766821..59d26ef5 100644
3058 +--- a/drivers/tty/tty_io.c
3059 ++++ b/drivers/tty/tty_io.c
3060 +@@ -850,7 +850,8 @@ void disassociate_ctty(int on_exit)
3061 + struct pid *tty_pgrp = tty_get_pgrp(tty);
3062 + if (tty_pgrp) {
3063 + kill_pgrp(tty_pgrp, SIGHUP, on_exit);
3064 +- kill_pgrp(tty_pgrp, SIGCONT, on_exit);
3065 ++ if (!on_exit)
3066 ++ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
3067 + put_pid(tty_pgrp);
3068 + }
3069 + }
3070 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
3071 +index 8a230f0e..d3318a0d 100644
3072 +--- a/drivers/usb/class/cdc-wdm.c
3073 ++++ b/drivers/usb/class/cdc-wdm.c
3074 +@@ -209,6 +209,7 @@ skip_error:
3075 + static void wdm_int_callback(struct urb *urb)
3076 + {
3077 + int rv = 0;
3078 ++ int responding;
3079 + int status = urb->status;
3080 + struct wdm_device *desc;
3081 + struct usb_cdc_notification *dr;
3082 +@@ -262,8 +263,8 @@ static void wdm_int_callback(struct urb *urb)
3083 +
3084 + spin_lock(&desc->iuspin);
3085 + clear_bit(WDM_READ, &desc->flags);
3086 +- set_bit(WDM_RESPONDING, &desc->flags);
3087 +- if (!test_bit(WDM_DISCONNECTING, &desc->flags)
3088 ++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
3089 ++ if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
3090 + && !test_bit(WDM_SUSPENDING, &desc->flags)) {
3091 + rv = usb_submit_urb(desc->response, GFP_ATOMIC);
3092 + dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
3093 +@@ -685,16 +686,20 @@ static void wdm_rxwork(struct work_struct *work)
3094 + {
3095 + struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
3096 + unsigned long flags;
3097 +- int rv;
3098 ++ int rv = 0;
3099 ++ int responding;
3100 +
3101 + spin_lock_irqsave(&desc->iuspin, flags);
3102 + if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
3103 + spin_unlock_irqrestore(&desc->iuspin, flags);
3104 + } else {
3105 ++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
3106 + spin_unlock_irqrestore(&desc->iuspin, flags);
3107 +- rv = usb_submit_urb(desc->response, GFP_KERNEL);
3108 ++ if (!responding)
3109 ++ rv = usb_submit_urb(desc->response, GFP_KERNEL);
3110 + if (rv < 0 && rv != -EPERM) {
3111 + spin_lock_irqsave(&desc->iuspin, flags);
3112 ++ clear_bit(WDM_RESPONDING, &desc->flags);
3113 + if (!test_bit(WDM_DISCONNECTING, &desc->flags))
3114 + schedule_work(&desc->rxwork);
3115 + spin_unlock_irqrestore(&desc->iuspin, flags);
3116 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
3117 +index 7199adcc..a6b2cabe 100644
3118 +--- a/drivers/usb/core/config.c
3119 ++++ b/drivers/usb/core/config.c
3120 +@@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
3121 +
3122 + memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
3123 + if (config->desc.bDescriptorType != USB_DT_CONFIG ||
3124 +- config->desc.bLength < USB_DT_CONFIG_SIZE) {
3125 ++ config->desc.bLength < USB_DT_CONFIG_SIZE ||
3126 ++ config->desc.bLength > size) {
3127 + dev_err(ddev, "invalid descriptor for config index %d: "
3128 + "type = 0x%X, length = %d\n", cfgidx,
3129 + config->desc.bDescriptorType, config->desc.bLength);
3130 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3131 +index da2905a1..6cf2ae0a 100644
3132 +--- a/drivers/usb/core/hub.c
3133 ++++ b/drivers/usb/core/hub.c
3134 +@@ -2916,7 +2916,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3135 + {
3136 + struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
3137 + struct usb_port *port_dev = hub->ports[udev->portnum - 1];
3138 +- enum pm_qos_flags_status pm_qos_stat;
3139 + int port1 = udev->portnum;
3140 + int status;
3141 + bool really_suspend = true;
3142 +@@ -2954,7 +2953,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3143 + status);
3144 + /* bail if autosuspend is requested */
3145 + if (PMSG_IS_AUTO(msg))
3146 +- return status;
3147 ++ goto err_wakeup;
3148 + }
3149 + }
3150 +
3151 +@@ -2963,14 +2962,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3152 + usb_set_usb2_hardware_lpm(udev, 0);
3153 +
3154 + if (usb_disable_ltm(udev)) {
3155 +- dev_err(&udev->dev, "%s Failed to disable LTM before suspend\n.",
3156 +- __func__);
3157 +- return -ENOMEM;
3158 ++ dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
3159 ++ status = -ENOMEM;
3160 ++ if (PMSG_IS_AUTO(msg))
3161 ++ goto err_ltm;
3162 + }
3163 + if (usb_unlocked_disable_lpm(udev)) {
3164 +- dev_err(&udev->dev, "%s Failed to disable LPM before suspend\n.",
3165 +- __func__);
3166 +- return -ENOMEM;
3167 ++ dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
3168 ++ status = -ENOMEM;
3169 ++ if (PMSG_IS_AUTO(msg))
3170 ++ goto err_lpm3;
3171 + }
3172 +
3173 + /* see 7.1.7.6 */
3174 +@@ -2998,28 +2999,31 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3175 + if (status) {
3176 + dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
3177 + port1, status);
3178 +- /* paranoia: "should not happen" */
3179 +- if (udev->do_remote_wakeup) {
3180 +- if (!hub_is_superspeed(hub->hdev)) {
3181 +- (void) usb_control_msg(udev,
3182 +- usb_sndctrlpipe(udev, 0),
3183 +- USB_REQ_CLEAR_FEATURE,
3184 +- USB_RECIP_DEVICE,
3185 +- USB_DEVICE_REMOTE_WAKEUP, 0,
3186 +- NULL, 0,
3187 +- USB_CTRL_SET_TIMEOUT);
3188 +- } else
3189 +- (void) usb_disable_function_remotewakeup(udev);
3190 +-
3191 +- }
3192 +
3193 ++ /* Try to enable USB3 LPM and LTM again */
3194 ++ usb_unlocked_enable_lpm(udev);
3195 ++ err_lpm3:
3196 ++ usb_enable_ltm(udev);
3197 ++ err_ltm:
3198 + /* Try to enable USB2 hardware LPM again */
3199 + if (udev->usb2_hw_lpm_capable == 1)
3200 + usb_set_usb2_hardware_lpm(udev, 1);
3201 +
3202 +- /* Try to enable USB3 LTM and LPM again */
3203 +- usb_enable_ltm(udev);
3204 +- usb_unlocked_enable_lpm(udev);
3205 ++ if (udev->do_remote_wakeup) {
3206 ++ if (udev->speed < USB_SPEED_SUPER)
3207 ++ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
3208 ++ USB_REQ_CLEAR_FEATURE,
3209 ++ USB_RECIP_DEVICE,
3210 ++ USB_DEVICE_REMOTE_WAKEUP, 0,
3211 ++ NULL, 0, USB_CTRL_SET_TIMEOUT);
3212 ++ else
3213 ++ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
3214 ++ USB_REQ_CLEAR_FEATURE,
3215 ++ USB_RECIP_INTERFACE,
3216 ++ USB_INTRF_FUNC_SUSPEND, 0,
3217 ++ NULL, 0, USB_CTRL_SET_TIMEOUT);
3218 ++ }
3219 ++ err_wakeup:
3220 +
3221 + /* System sleep transitions should never fail */
3222 + if (!PMSG_IS_AUTO(msg))
3223 +@@ -3037,16 +3041,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3224 + usb_set_device_state(udev, USB_STATE_SUSPENDED);
3225 + }
3226 +
3227 +- /*
3228 +- * Check whether current status meets the requirement of
3229 +- * usb port power off mechanism
3230 +- */
3231 +- pm_qos_stat = dev_pm_qos_flags(&port_dev->dev,
3232 +- PM_QOS_FLAG_NO_POWER_OFF);
3233 +- if (!udev->do_remote_wakeup
3234 +- && pm_qos_stat != PM_QOS_FLAGS_ALL
3235 +- && udev->persist_enabled
3236 +- && !status) {
3237 ++ if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled) {
3238 + pm_runtime_put_sync(&port_dev->dev);
3239 + port_dev->did_runtime_put = true;
3240 + }
3241 +diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
3242 +index b8bad294..ef07b359 100644
3243 +--- a/drivers/usb/core/port.c
3244 ++++ b/drivers/usb/core/port.c
3245 +@@ -89,22 +89,19 @@ static int usb_port_runtime_resume(struct device *dev)
3246 + retval = usb_hub_set_port_power(hdev, port1, true);
3247 + if (port_dev->child && !retval) {
3248 + /*
3249 +- * Wait for usb hub port to be reconnected in order to make
3250 +- * the resume procedure successful.
3251 ++ * Attempt to wait for usb hub port to be reconnected in order
3252 ++ * to make the resume procedure successful. The device may have
3253 ++ * disconnected while the port was powered off, so ignore the
3254 ++ * return status.
3255 + */
3256 + retval = hub_port_debounce_be_connected(hub, port1);
3257 +- if (retval < 0) {
3258 ++ if (retval < 0)
3259 + dev_dbg(&port_dev->dev, "can't get reconnection after setting port power on, status %d\n",
3260 + retval);
3261 +- goto out;
3262 +- }
3263 + usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
3264 +-
3265 +- /* Set return value to 0 if debounce successful */
3266 + retval = 0;
3267 + }
3268 +
3269 +-out:
3270 + clear_bit(port1, hub->busy_bits);
3271 + usb_autopm_put_interface(intf);
3272 + return retval;
3273 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3274 +index f77083fe..14d28d61 100644
3275 +--- a/drivers/usb/dwc3/gadget.c
3276 ++++ b/drivers/usb/dwc3/gadget.c
3277 +@@ -1508,6 +1508,15 @@ static int dwc3_gadget_start(struct usb_gadget *g,
3278 + int irq;
3279 + u32 reg;
3280 +
3281 ++ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
3282 ++ ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
3283 ++ IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
3284 ++ if (ret) {
3285 ++ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
3286 ++ irq, ret);
3287 ++ goto err0;
3288 ++ }
3289 ++
3290 + spin_lock_irqsave(&dwc->lock, flags);
3291 +
3292 + if (dwc->gadget_driver) {
3293 +@@ -1515,7 +1524,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
3294 + dwc->gadget.name,
3295 + dwc->gadget_driver->driver.name);
3296 + ret = -EBUSY;
3297 +- goto err0;
3298 ++ goto err1;
3299 + }
3300 +
3301 + dwc->gadget_driver = driver;
3302 +@@ -1551,42 +1560,38 @@ static int dwc3_gadget_start(struct usb_gadget *g,
3303 + ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
3304 + if (ret) {
3305 + dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3306 +- goto err0;
3307 ++ goto err2;
3308 + }
3309 +
3310 + dep = dwc->eps[1];
3311 + ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
3312 + if (ret) {
3313 + dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3314 +- goto err1;
3315 ++ goto err3;
3316 + }
3317 +
3318 + /* begin to receive SETUP packets */
3319 + dwc->ep0state = EP0_SETUP_PHASE;
3320 + dwc3_ep0_out_start(dwc);
3321 +
3322 +- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
3323 +- ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
3324 +- IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
3325 +- if (ret) {
3326 +- dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
3327 +- irq, ret);
3328 +- goto err1;
3329 +- }
3330 +-
3331 + dwc3_gadget_enable_irq(dwc);
3332 +
3333 + spin_unlock_irqrestore(&dwc->lock, flags);
3334 +
3335 + return 0;
3336 +
3337 +-err1:
3338 ++err3:
3339 + __dwc3_gadget_ep_disable(dwc->eps[0]);
3340 +
3341 +-err0:
3342 ++err2:
3343 + dwc->gadget_driver = NULL;
3344 ++
3345 ++err1:
3346 + spin_unlock_irqrestore(&dwc->lock, flags);
3347 +
3348 ++ free_irq(irq, dwc);
3349 ++
3350 ++err0:
3351 + return ret;
3352 + }
3353 +
3354 +@@ -1600,9 +1605,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g,
3355 + spin_lock_irqsave(&dwc->lock, flags);
3356 +
3357 + dwc3_gadget_disable_irq(dwc);
3358 +- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
3359 +- free_irq(irq, dwc);
3360 +-
3361 + __dwc3_gadget_ep_disable(dwc->eps[0]);
3362 + __dwc3_gadget_ep_disable(dwc->eps[1]);
3363 +
3364 +@@ -1610,6 +1612,9 @@ static int dwc3_gadget_stop(struct usb_gadget *g,
3365 +
3366 + spin_unlock_irqrestore(&dwc->lock, flags);
3367 +
3368 ++ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
3369 ++ free_irq(irq, dwc);
3370 ++
3371 + return 0;
3372 + }
3373 +
3374 +diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
3375 +index 7ce27e35..de456a5a 100644
3376 +--- a/drivers/usb/gadget/uvc_queue.c
3377 ++++ b/drivers/usb/gadget/uvc_queue.c
3378 +@@ -177,12 +177,16 @@ static int uvc_queue_buffer(struct uvc_video_queue *queue,
3379 +
3380 + mutex_lock(&queue->mutex);
3381 + ret = vb2_qbuf(&queue->queue, buf);
3382 ++ if (ret < 0)
3383 ++ goto done;
3384 ++
3385 + spin_lock_irqsave(&queue->irqlock, flags);
3386 + ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
3387 + queue->flags &= ~UVC_QUEUE_PAUSED;
3388 + spin_unlock_irqrestore(&queue->irqlock, flags);
3389 +- mutex_unlock(&queue->mutex);
3390 +
3391 ++done:
3392 ++ mutex_unlock(&queue->mutex);
3393 + return ret;
3394 + }
3395 +
3396 +diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
3397 +index c369767b..ec128bc7 100644
3398 +--- a/drivers/usb/host/ehci-mxc.c
3399 ++++ b/drivers/usb/host/ehci-mxc.c
3400 +@@ -184,7 +184,7 @@ static int ehci_mxc_drv_remove(struct platform_device *pdev)
3401 + if (pdata && pdata->exit)
3402 + pdata->exit(pdev);
3403 +
3404 +- if (pdata->otg)
3405 ++ if (pdata && pdata->otg)
3406 + usb_phy_shutdown(pdata->otg);
3407 +
3408 + clk_disable_unprepare(priv->usbclk);
3409 +diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
3410 +index 951514ef..ef6782bd 100644
3411 +--- a/drivers/usb/host/ohci-pci.c
3412 ++++ b/drivers/usb/host/ohci-pci.c
3413 +@@ -371,7 +371,7 @@ static struct pci_driver ohci_pci_driver = {
3414 + .remove = usb_hcd_pci_remove,
3415 + .shutdown = usb_hcd_pci_shutdown,
3416 +
3417 +-#ifdef CONFIG_PM_SLEEP
3418 ++#ifdef CONFIG_PM
3419 + .driver = {
3420 + .pm = &usb_hcd_pci_pm_ops
3421 + },
3422 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
3423 +index 93ad67ec..6e70ce97 100644
3424 +--- a/drivers/usb/host/xhci-plat.c
3425 ++++ b/drivers/usb/host/xhci-plat.c
3426 +@@ -24,7 +24,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
3427 + * here that the generic code does not try to make a pci_dev from our
3428 + * dev struct in order to setup MSI
3429 + */
3430 +- xhci->quirks |= XHCI_BROKEN_MSI;
3431 ++ xhci->quirks |= XHCI_PLAT;
3432 + }
3433 +
3434 + /* called during probe() after chip reset completes */
3435 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3436 +index 9a550b6a..f4a49c45 100644
3437 +--- a/drivers/usb/host/xhci.c
3438 ++++ b/drivers/usb/host/xhci.c
3439 +@@ -342,9 +342,14 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
3440 + static int xhci_try_enable_msi(struct usb_hcd *hcd)
3441 + {
3442 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3443 +- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
3444 ++ struct pci_dev *pdev;
3445 + int ret;
3446 +
3447 ++ /* The xhci platform device has set up IRQs through usb_add_hcd. */
3448 ++ if (xhci->quirks & XHCI_PLAT)
3449 ++ return 0;
3450 ++
3451 ++ pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
3452 + /*
3453 + * Some Fresco Logic host controllers advertise MSI, but fail to
3454 + * generate interrupts. Don't even try to enable MSI.
3455 +@@ -3506,10 +3511,21 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3456 + {
3457 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3458 + struct xhci_virt_device *virt_dev;
3459 ++ struct device *dev = hcd->self.controller;
3460 + unsigned long flags;
3461 + u32 state;
3462 + int i, ret;
3463 +
3464 ++#ifndef CONFIG_USB_DEFAULT_PERSIST
3465 ++ /*
3466 ++ * We called pm_runtime_get_noresume when the device was attached.
3467 ++ * Decrement the counter here to allow controller to runtime suspend
3468 ++ * if no devices remain.
3469 ++ */
3470 ++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
3471 ++ pm_runtime_put_noidle(dev);
3472 ++#endif
3473 ++
3474 + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3475 + /* If the host is halted due to driver unload, we still need to free the
3476 + * device.
3477 +@@ -3581,6 +3597,7 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3478 + int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3479 + {
3480 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3481 ++ struct device *dev = hcd->self.controller;
3482 + unsigned long flags;
3483 + int timeleft;
3484 + int ret;
3485 +@@ -3633,6 +3650,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3486 + goto disable_slot;
3487 + }
3488 + udev->slot_id = xhci->slot_id;
3489 ++
3490 ++#ifndef CONFIG_USB_DEFAULT_PERSIST
3491 ++ /*
3492 ++ * If resetting upon resume, we can't put the controller into runtime
3493 ++ * suspend if there is a device attached.
3494 ++ */
3495 ++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
3496 ++ pm_runtime_get_noresume(dev);
3497 ++#endif
3498 ++
3499 + /* Is this a LS or FS device under a HS hub? */
3500 + /* Hub or peripherial? */
3501 + return 1;
3502 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3503 +index 77600cef..3638f1d7 100644
3504 +--- a/drivers/usb/host/xhci.h
3505 ++++ b/drivers/usb/host/xhci.h
3506 +@@ -1516,6 +1516,7 @@ struct xhci_hcd {
3507 + #define XHCI_SPURIOUS_REBOOT (1 << 13)
3508 + #define XHCI_COMP_MODE_QUIRK (1 << 14)
3509 + #define XHCI_AVOID_BEI (1 << 15)
3510 ++#define XHCI_PLAT (1 << 16)
3511 + unsigned int num_active_eps;
3512 + unsigned int limit_active_eps;
3513 + /* There are two roothubs to keep track of bus suspend info for */
3514 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
3515 +index 5050cc85..0f16bf6e 100644
3516 +--- a/drivers/usb/serial/mos7720.c
3517 ++++ b/drivers/usb/serial/mos7720.c
3518 +@@ -374,7 +374,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
3519 + kfree(urbtrack);
3520 + return -ENOMEM;
3521 + }
3522 +- urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
3523 ++ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC);
3524 + if (!urbtrack->setup) {
3525 + usb_free_urb(urbtrack->urb);
3526 + kfree(urbtrack);
3527 +@@ -382,8 +382,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
3528 + }
3529 + urbtrack->setup->bRequestType = (__u8)0x40;
3530 + urbtrack->setup->bRequest = (__u8)0x0e;
3531 +- urbtrack->setup->wValue = get_reg_value(reg, dummy);
3532 +- urbtrack->setup->wIndex = get_reg_index(reg);
3533 ++ urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy));
3534 ++ urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg));
3535 + urbtrack->setup->wLength = 0;
3536 + usb_fill_control_urb(urbtrack->urb, usbdev,
3537 + usb_sndctrlpipe(usbdev, 0),
3538 +diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
3539 +index 04c1b2d9..d5418c1b 100644
3540 +--- a/drivers/xen/grant-table.c
3541 ++++ b/drivers/xen/grant-table.c
3542 +@@ -729,9 +729,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback,
3543 + void (*fn)(void *), void *arg, u16 count)
3544 + {
3545 + unsigned long flags;
3546 ++ struct gnttab_free_callback *cb;
3547 ++
3548 + spin_lock_irqsave(&gnttab_list_lock, flags);
3549 +- if (callback->next)
3550 +- goto out;
3551 ++
3552 ++ /* Check if the callback is already on the list */
3553 ++ cb = gnttab_free_callback_list;
3554 ++ while (cb) {
3555 ++ if (cb == callback)
3556 ++ goto out;
3557 ++ cb = cb->next;
3558 ++ }
3559 ++
3560 + callback->fn = fn;
3561 + callback->arg = arg;
3562 + callback->count = count;
3563 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
3564 +index 0f81d67c..8dedf401 100644
3565 +--- a/fs/btrfs/ioctl.c
3566 ++++ b/fs/btrfs/ioctl.c
3567 +@@ -3299,6 +3299,9 @@ static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
3568 +
3569 + switch (p->cmd) {
3570 + case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
3571 ++ if (root->fs_info->sb->s_flags & MS_RDONLY)
3572 ++ return -EROFS;
3573 ++
3574 + if (atomic_xchg(
3575 + &root->fs_info->mutually_exclusive_operation_running,
3576 + 1)) {
3577 +diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
3578 +index e0b4ef31..a5ce62eb 100644
3579 +--- a/fs/ceph/ioctl.c
3580 ++++ b/fs/ceph/ioctl.c
3581 +@@ -196,8 +196,10 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
3582 + r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
3583 + &dl.object_no, &dl.object_offset,
3584 + &olen);
3585 +- if (r < 0)
3586 ++ if (r < 0) {
3587 ++ up_read(&osdc->map_sem);
3588 + return -EIO;
3589 ++ }
3590 + dl.file_offset -= dl.object_offset;
3591 + dl.object_size = ceph_file_layout_object_size(ci->i_layout);
3592 + dl.block_size = ceph_file_layout_su(ci->i_layout);
3593 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3594 +index d6a5c5ac..d05a3007 100644
3595 +--- a/fs/cifs/connect.c
3596 ++++ b/fs/cifs/connect.c
3597 +@@ -377,6 +377,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
3598 + try_to_freeze();
3599 +
3600 + /* we should try only the port we connected to before */
3601 ++ mutex_lock(&server->srv_mutex);
3602 + rc = generic_ip_connect(server);
3603 + if (rc) {
3604 + cifs_dbg(FYI, "reconnect error %d\n", rc);
3605 +@@ -388,6 +389,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
3606 + server->tcpStatus = CifsNeedNegotiate;
3607 + spin_unlock(&GlobalMid_Lock);
3608 + }
3609 ++ mutex_unlock(&server->srv_mutex);
3610 + } while (server->tcpStatus == CifsNeedReconnect);
3611 +
3612 + return rc;
3613 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
3614 +index 10383d8c..4f791e0e 100644
3615 +--- a/fs/cifs/smb2misc.c
3616 ++++ b/fs/cifs/smb2misc.c
3617 +@@ -413,96 +413,108 @@ cifs_ses_oplock_break(struct work_struct *work)
3618 + }
3619 +
3620 + static bool
3621 +-smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
3622 ++smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
3623 ++ struct smb2_lease_break_work *lw)
3624 + {
3625 +- struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
3626 +- struct list_head *tmp, *tmp1, *tmp2;
3627 +- struct cifs_ses *ses;
3628 +- struct cifs_tcon *tcon;
3629 +- struct cifsInodeInfo *cinode;
3630 ++ bool found;
3631 ++ __u8 lease_state;
3632 ++ struct list_head *tmp;
3633 + struct cifsFileInfo *cfile;
3634 + struct cifs_pending_open *open;
3635 +- struct smb2_lease_break_work *lw;
3636 +- bool found;
3637 ++ struct cifsInodeInfo *cinode;
3638 + int ack_req = le32_to_cpu(rsp->Flags &
3639 + SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
3640 +
3641 +- lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
3642 +- if (!lw)
3643 +- return false;
3644 ++ lease_state = smb2_map_lease_to_oplock(rsp->NewLeaseState);
3645 +
3646 +- INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
3647 +- lw->lease_state = rsp->NewLeaseState;
3648 ++ list_for_each(tmp, &tcon->openFileList) {
3649 ++ cfile = list_entry(tmp, struct cifsFileInfo, tlist);
3650 ++ cinode = CIFS_I(cfile->dentry->d_inode);
3651 +
3652 +- cifs_dbg(FYI, "Checking for lease break\n");
3653 ++ if (memcmp(cinode->lease_key, rsp->LeaseKey,
3654 ++ SMB2_LEASE_KEY_SIZE))
3655 ++ continue;
3656 +
3657 +- /* look up tcon based on tid & uid */
3658 +- spin_lock(&cifs_tcp_ses_lock);
3659 +- list_for_each(tmp, &server->smb_ses_list) {
3660 +- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
3661 ++ cifs_dbg(FYI, "found in the open list\n");
3662 ++ cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3663 ++ le32_to_cpu(rsp->NewLeaseState));
3664 +
3665 +- spin_lock(&cifs_file_list_lock);
3666 +- list_for_each(tmp1, &ses->tcon_list) {
3667 +- tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
3668 ++ smb2_set_oplock_level(cinode, lease_state);
3669 +
3670 +- cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
3671 +- list_for_each(tmp2, &tcon->openFileList) {
3672 +- cfile = list_entry(tmp2, struct cifsFileInfo,
3673 +- tlist);
3674 +- cinode = CIFS_I(cfile->dentry->d_inode);
3675 ++ if (ack_req)
3676 ++ cfile->oplock_break_cancelled = false;
3677 ++ else
3678 ++ cfile->oplock_break_cancelled = true;
3679 +
3680 +- if (memcmp(cinode->lease_key, rsp->LeaseKey,
3681 +- SMB2_LEASE_KEY_SIZE))
3682 +- continue;
3683 ++ queue_work(cifsiod_wq, &cfile->oplock_break);
3684 ++ kfree(lw);
3685 ++ return true;
3686 ++ }
3687 +
3688 +- cifs_dbg(FYI, "found in the open list\n");
3689 +- cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3690 +- le32_to_cpu(rsp->NewLeaseState));
3691 ++ found = false;
3692 ++ list_for_each_entry(open, &tcon->pending_opens, olist) {
3693 ++ if (memcmp(open->lease_key, rsp->LeaseKey,
3694 ++ SMB2_LEASE_KEY_SIZE))
3695 ++ continue;
3696 ++
3697 ++ if (!found && ack_req) {
3698 ++ found = true;
3699 ++ memcpy(lw->lease_key, open->lease_key,
3700 ++ SMB2_LEASE_KEY_SIZE);
3701 ++ lw->tlink = cifs_get_tlink(open->tlink);
3702 ++ queue_work(cifsiod_wq, &lw->lease_break);
3703 ++ }
3704 +
3705 +- smb2_set_oplock_level(cinode,
3706 +- smb2_map_lease_to_oplock(rsp->NewLeaseState));
3707 ++ cifs_dbg(FYI, "found in the pending open list\n");
3708 ++ cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3709 ++ le32_to_cpu(rsp->NewLeaseState));
3710 +
3711 +- if (ack_req)
3712 +- cfile->oplock_break_cancelled = false;
3713 +- else
3714 +- cfile->oplock_break_cancelled = true;
3715 ++ open->oplock = lease_state;
3716 ++ }
3717 ++ return found;
3718 ++}
3719 +
3720 +- queue_work(cifsiod_wq, &cfile->oplock_break);
3721 ++static bool
3722 ++smb2_is_valid_lease_break(char *buffer)
3723 ++{
3724 ++ struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
3725 ++ struct list_head *tmp, *tmp1, *tmp2;
3726 ++ struct TCP_Server_Info *server;
3727 ++ struct cifs_ses *ses;
3728 ++ struct cifs_tcon *tcon;
3729 ++ struct smb2_lease_break_work *lw;
3730 +
3731 +- spin_unlock(&cifs_file_list_lock);
3732 +- spin_unlock(&cifs_tcp_ses_lock);
3733 +- return true;
3734 +- }
3735 ++ lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
3736 ++ if (!lw)
3737 ++ return false;
3738 +
3739 +- found = false;
3740 +- list_for_each_entry(open, &tcon->pending_opens, olist) {
3741 +- if (memcmp(open->lease_key, rsp->LeaseKey,
3742 +- SMB2_LEASE_KEY_SIZE))
3743 +- continue;
3744 ++ INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
3745 ++ lw->lease_state = rsp->NewLeaseState;
3746 +
3747 +- if (!found && ack_req) {
3748 +- found = true;
3749 +- memcpy(lw->lease_key, open->lease_key,
3750 +- SMB2_LEASE_KEY_SIZE);
3751 +- lw->tlink = cifs_get_tlink(open->tlink);
3752 +- queue_work(cifsiod_wq,
3753 +- &lw->lease_break);
3754 +- }
3755 ++ cifs_dbg(FYI, "Checking for lease break\n");
3756 ++
3757 ++ /* look up tcon based on tid & uid */
3758 ++ spin_lock(&cifs_tcp_ses_lock);
3759 ++ list_for_each(tmp, &cifs_tcp_ses_list) {
3760 ++ server = list_entry(tmp, struct TCP_Server_Info, tcp_ses_list);
3761 +
3762 +- cifs_dbg(FYI, "found in the pending open list\n");
3763 +- cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3764 +- le32_to_cpu(rsp->NewLeaseState));
3765 ++ list_for_each(tmp1, &server->smb_ses_list) {
3766 ++ ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
3767 +
3768 +- open->oplock =
3769 +- smb2_map_lease_to_oplock(rsp->NewLeaseState);
3770 +- }
3771 +- if (found) {
3772 +- spin_unlock(&cifs_file_list_lock);
3773 +- spin_unlock(&cifs_tcp_ses_lock);
3774 +- return true;
3775 ++ spin_lock(&cifs_file_list_lock);
3776 ++ list_for_each(tmp2, &ses->tcon_list) {
3777 ++ tcon = list_entry(tmp2, struct cifs_tcon,
3778 ++ tcon_list);
3779 ++ cifs_stats_inc(
3780 ++ &tcon->stats.cifs_stats.num_oplock_brks);
3781 ++ if (smb2_tcon_has_lease(tcon, rsp, lw)) {
3782 ++ spin_unlock(&cifs_file_list_lock);
3783 ++ spin_unlock(&cifs_tcp_ses_lock);
3784 ++ return true;
3785 ++ }
3786 + }
3787 ++ spin_unlock(&cifs_file_list_lock);
3788 + }
3789 +- spin_unlock(&cifs_file_list_lock);
3790 + }
3791 + spin_unlock(&cifs_tcp_ses_lock);
3792 + kfree(lw);
3793 +@@ -528,7 +540,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3794 + if (rsp->StructureSize !=
3795 + smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
3796 + if (le16_to_cpu(rsp->StructureSize) == 44)
3797 +- return smb2_is_valid_lease_break(buffer, server);
3798 ++ return smb2_is_valid_lease_break(buffer);
3799 + else
3800 + return false;
3801 + }
3802 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3803 +index ccbfbbb1..904ca1a2 100644
3804 +--- a/fs/ext4/inode.c
3805 ++++ b/fs/ext4/inode.c
3806 +@@ -4706,7 +4706,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3807 + ext4_journal_stop(handle);
3808 + }
3809 +
3810 +- if (attr->ia_valid & ATTR_SIZE) {
3811 ++ if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
3812 ++ handle_t *handle;
3813 ++ loff_t oldsize = inode->i_size;
3814 +
3815 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3816 + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3817 +@@ -4714,73 +4716,60 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3818 + if (attr->ia_size > sbi->s_bitmap_maxbytes)
3819 + return -EFBIG;
3820 + }
3821 +- }
3822 +-
3823 +- if (S_ISREG(inode->i_mode) &&
3824 +- attr->ia_valid & ATTR_SIZE &&
3825 +- (attr->ia_size < inode->i_size)) {
3826 +- handle_t *handle;
3827 +-
3828 +- handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
3829 +- if (IS_ERR(handle)) {
3830 +- error = PTR_ERR(handle);
3831 +- goto err_out;
3832 +- }
3833 +- if (ext4_handle_valid(handle)) {
3834 +- error = ext4_orphan_add(handle, inode);
3835 +- orphan = 1;
3836 +- }
3837 +- EXT4_I(inode)->i_disksize = attr->ia_size;
3838 +- rc = ext4_mark_inode_dirty(handle, inode);
3839 +- if (!error)
3840 +- error = rc;
3841 +- ext4_journal_stop(handle);
3842 +-
3843 +- if (ext4_should_order_data(inode)) {
3844 +- error = ext4_begin_ordered_truncate(inode,
3845 ++ if (S_ISREG(inode->i_mode) &&
3846 ++ (attr->ia_size < inode->i_size)) {
3847 ++ if (ext4_should_order_data(inode)) {
3848 ++ error = ext4_begin_ordered_truncate(inode,
3849 + attr->ia_size);
3850 +- if (error) {
3851 +- /* Do as much error cleanup as possible */
3852 +- handle = ext4_journal_start(inode,
3853 +- EXT4_HT_INODE, 3);
3854 +- if (IS_ERR(handle)) {
3855 +- ext4_orphan_del(NULL, inode);
3856 ++ if (error)
3857 + goto err_out;
3858 +- }
3859 +- ext4_orphan_del(handle, inode);
3860 +- orphan = 0;
3861 +- ext4_journal_stop(handle);
3862 ++ }
3863 ++ handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
3864 ++ if (IS_ERR(handle)) {
3865 ++ error = PTR_ERR(handle);
3866 ++ goto err_out;
3867 ++ }
3868 ++ if (ext4_handle_valid(handle)) {
3869 ++ error = ext4_orphan_add(handle, inode);
3870 ++ orphan = 1;
3871 ++ }
3872 ++ EXT4_I(inode)->i_disksize = attr->ia_size;
3873 ++ rc = ext4_mark_inode_dirty(handle, inode);
3874 ++ if (!error)
3875 ++ error = rc;
3876 ++ ext4_journal_stop(handle);
3877 ++ if (error) {
3878 ++ ext4_orphan_del(NULL, inode);
3879 + goto err_out;
3880 + }
3881 + }
3882 +- }
3883 +-
3884 +- if (attr->ia_valid & ATTR_SIZE) {
3885 +- if (attr->ia_size != inode->i_size) {
3886 +- loff_t oldsize = inode->i_size;
3887 +
3888 +- i_size_write(inode, attr->ia_size);
3889 +- /*
3890 +- * Blocks are going to be removed from the inode. Wait
3891 +- * for dio in flight. Temporarily disable
3892 +- * dioread_nolock to prevent livelock.
3893 +- */
3894 +- if (orphan) {
3895 +- if (!ext4_should_journal_data(inode)) {
3896 +- ext4_inode_block_unlocked_dio(inode);
3897 +- inode_dio_wait(inode);
3898 +- ext4_inode_resume_unlocked_dio(inode);
3899 +- } else
3900 +- ext4_wait_for_tail_page_commit(inode);
3901 +- }
3902 +- /*
3903 +- * Truncate pagecache after we've waited for commit
3904 +- * in data=journal mode to make pages freeable.
3905 +- */
3906 +- truncate_pagecache(inode, oldsize, inode->i_size);
3907 ++ i_size_write(inode, attr->ia_size);
3908 ++ /*
3909 ++ * Blocks are going to be removed from the inode. Wait
3910 ++ * for dio in flight. Temporarily disable
3911 ++ * dioread_nolock to prevent livelock.
3912 ++ */
3913 ++ if (orphan) {
3914 ++ if (!ext4_should_journal_data(inode)) {
3915 ++ ext4_inode_block_unlocked_dio(inode);
3916 ++ inode_dio_wait(inode);
3917 ++ ext4_inode_resume_unlocked_dio(inode);
3918 ++ } else
3919 ++ ext4_wait_for_tail_page_commit(inode);
3920 + }
3921 +- ext4_truncate(inode);
3922 ++ /*
3923 ++ * Truncate pagecache after we've waited for commit
3924 ++ * in data=journal mode to make pages freeable.
3925 ++ */
3926 ++ truncate_pagecache(inode, oldsize, inode->i_size);
3927 + }
3928 ++ /*
3929 ++ * We want to call ext4_truncate() even if attr->ia_size ==
3930 ++ * inode->i_size for cases like truncation of fallocated space
3931 ++ */
3932 ++ if (attr->ia_valid & ATTR_SIZE)
3933 ++ ext4_truncate(inode);
3934 +
3935 + if (!rc) {
3936 + setattr_copy(inode, attr);
3937 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
3938 +index 5b127469..e67b13de 100644
3939 +--- a/fs/fuse/dir.c
3940 ++++ b/fs/fuse/dir.c
3941 +@@ -1175,6 +1175,8 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
3942 + return -EIO;
3943 + if (reclen > nbytes)
3944 + break;
3945 ++ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
3946 ++ return -EIO;
3947 +
3948 + over = filldir(dstbuf, dirent->name, dirent->namelen,
3949 + file->f_pos, dirent->ino, dirent->type);
3950 +@@ -1323,6 +1325,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
3951 + return -EIO;
3952 + if (reclen > nbytes)
3953 + break;
3954 ++ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
3955 ++ return -EIO;
3956 +
3957 + if (!over) {
3958 + /* We fill entries into dstbuf only as much as
3959 +@@ -1594,6 +1598,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
3960 + struct file *file)
3961 + {
3962 + struct fuse_conn *fc = get_fuse_conn(inode);
3963 ++ struct fuse_inode *fi = get_fuse_inode(inode);
3964 + struct fuse_req *req;
3965 + struct fuse_setattr_in inarg;
3966 + struct fuse_attr_out outarg;
3967 +@@ -1621,8 +1626,10 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
3968 + if (IS_ERR(req))
3969 + return PTR_ERR(req);
3970 +
3971 +- if (is_truncate)
3972 ++ if (is_truncate) {
3973 + fuse_set_nowrite(inode);
3974 ++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3975 ++ }
3976 +
3977 + memset(&inarg, 0, sizeof(inarg));
3978 + memset(&outarg, 0, sizeof(outarg));
3979 +@@ -1684,12 +1691,14 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
3980 + invalidate_inode_pages2(inode->i_mapping);
3981 + }
3982 +
3983 ++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3984 + return 0;
3985 +
3986 + error:
3987 + if (is_truncate)
3988 + fuse_release_nowrite(inode);
3989 +
3990 ++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3991 + return err;
3992 + }
3993 +
3994 +@@ -1753,6 +1762,8 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
3995 + fc->no_setxattr = 1;
3996 + err = -EOPNOTSUPP;
3997 + }
3998 ++ if (!err)
3999 ++ fuse_invalidate_attr(inode);
4000 + return err;
4001 + }
4002 +
4003 +@@ -1882,6 +1893,8 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
4004 + fc->no_removexattr = 1;
4005 + err = -EOPNOTSUPP;
4006 + }
4007 ++ if (!err)
4008 ++ fuse_invalidate_attr(inode);
4009 + return err;
4010 + }
4011 +
4012 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
4013 +index 35f28103..473e8453 100644
4014 +--- a/fs/fuse/file.c
4015 ++++ b/fs/fuse/file.c
4016 +@@ -630,7 +630,8 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
4017 + struct fuse_inode *fi = get_fuse_inode(inode);
4018 +
4019 + spin_lock(&fc->lock);
4020 +- if (attr_ver == fi->attr_version && size < inode->i_size) {
4021 ++ if (attr_ver == fi->attr_version && size < inode->i_size &&
4022 ++ !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
4023 + fi->attr_version = ++fc->attr_version;
4024 + i_size_write(inode, size);
4025 + }
4026 +@@ -1033,12 +1034,16 @@ static ssize_t fuse_perform_write(struct file *file,
4027 + {
4028 + struct inode *inode = mapping->host;
4029 + struct fuse_conn *fc = get_fuse_conn(inode);
4030 ++ struct fuse_inode *fi = get_fuse_inode(inode);
4031 + int err = 0;
4032 + ssize_t res = 0;
4033 +
4034 + if (is_bad_inode(inode))
4035 + return -EIO;
4036 +
4037 ++ if (inode->i_size < pos + iov_iter_count(ii))
4038 ++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
4039 ++
4040 + do {
4041 + struct fuse_req *req;
4042 + ssize_t count;
4043 +@@ -1074,6 +1079,7 @@ static ssize_t fuse_perform_write(struct file *file,
4044 + if (res > 0)
4045 + fuse_write_update_size(inode, pos);
4046 +
4047 ++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
4048 + fuse_invalidate_attr(inode);
4049 +
4050 + return res > 0 ? res : err;
4051 +@@ -1530,7 +1536,6 @@ static int fuse_writepage_locked(struct page *page)
4052 +
4053 + inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
4054 + inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
4055 +- end_page_writeback(page);
4056 +
4057 + spin_lock(&fc->lock);
4058 + list_add(&req->writepages_entry, &fi->writepages);
4059 +@@ -1538,6 +1543,8 @@ static int fuse_writepage_locked(struct page *page)
4060 + fuse_flush_writepages(inode);
4061 + spin_unlock(&fc->lock);
4062 +
4063 ++ end_page_writeback(page);
4064 ++
4065 + return 0;
4066 +
4067 + err_free:
4068 +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
4069 +index fde7249a..5ced199b 100644
4070 +--- a/fs/fuse/fuse_i.h
4071 ++++ b/fs/fuse/fuse_i.h
4072 +@@ -115,6 +115,8 @@ struct fuse_inode {
4073 + enum {
4074 + /** Advise readdirplus */
4075 + FUSE_I_ADVISE_RDPLUS,
4076 ++ /** An operation changing file size is in progress */
4077 ++ FUSE_I_SIZE_UNSTABLE,
4078 + };
4079 +
4080 + struct fuse_conn;
4081 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
4082 +index 9a0cdde1..b5718516 100644
4083 +--- a/fs/fuse/inode.c
4084 ++++ b/fs/fuse/inode.c
4085 +@@ -201,7 +201,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
4086 + struct timespec old_mtime;
4087 +
4088 + spin_lock(&fc->lock);
4089 +- if (attr_version != 0 && fi->attr_version > attr_version) {
4090 ++ if ((attr_version != 0 && fi->attr_version > attr_version) ||
4091 ++ test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
4092 + spin_unlock(&fc->lock);
4093 + return;
4094 + }
4095 +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
4096 +index d9b8aebd..d3705490 100644
4097 +--- a/fs/isofs/inode.c
4098 ++++ b/fs/isofs/inode.c
4099 +@@ -125,8 +125,8 @@ static void destroy_inodecache(void)
4100 +
4101 + static int isofs_remount(struct super_block *sb, int *flags, char *data)
4102 + {
4103 +- /* we probably want a lot more here */
4104 +- *flags |= MS_RDONLY;
4105 ++ if (!(*flags & MS_RDONLY))
4106 ++ return -EROFS;
4107 + return 0;
4108 + }
4109 +
4110 +@@ -779,15 +779,6 @@ root_found:
4111 + */
4112 + s->s_maxbytes = 0x80000000000LL;
4113 +
4114 +- /*
4115 +- * The CDROM is read-only, has no nodes (devices) on it, and since
4116 +- * all of the files appear to be owned by root, we really do not want
4117 +- * to allow suid. (suid or devices will not show up unless we have
4118 +- * Rock Ridge extensions)
4119 +- */
4120 +-
4121 +- s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
4122 +-
4123 + /* Set this for reference. Its not currently used except on write
4124 + which we don't have .. */
4125 +
4126 +@@ -1546,6 +1537,9 @@ struct inode *isofs_iget(struct super_block *sb,
4127 + static struct dentry *isofs_mount(struct file_system_type *fs_type,
4128 + int flags, const char *dev_name, void *data)
4129 + {
4130 ++ /* We don't support read-write mounts */
4131 ++ if (!(flags & MS_RDONLY))
4132 ++ return ERR_PTR(-EACCES);
4133 + return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
4134 + }
4135 +
4136 +diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
4137 +index 2487116d..84606472 100644
4138 +--- a/fs/ocfs2/extent_map.c
4139 ++++ b/fs/ocfs2/extent_map.c
4140 +@@ -781,7 +781,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4141 + cpos = map_start >> osb->s_clustersize_bits;
4142 + mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
4143 + map_start + map_len);
4144 +- mapping_end -= cpos;
4145 + is_last = 0;
4146 + while (cpos < mapping_end && !is_last) {
4147 + u32 fe_flags;
4148 +diff --git a/fs/proc/root.c b/fs/proc/root.c
4149 +index 41a6ea93..04ec276c 100644
4150 +--- a/fs/proc/root.c
4151 ++++ b/fs/proc/root.c
4152 +@@ -110,7 +110,8 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
4153 + ns = task_active_pid_ns(current);
4154 + options = data;
4155 +
4156 +- if (!current_user_ns()->may_mount_proc)
4157 ++ if (!current_user_ns()->may_mount_proc ||
4158 ++ !ns_capable(ns->user_ns, CAP_SYS_ADMIN))
4159 + return ERR_PTR(-EPERM);
4160 + }
4161 +
4162 +diff --git a/include/linux/compat.h b/include/linux/compat.h
4163 +index 7f0c1dd0..ec1aee4a 100644
4164 +--- a/include/linux/compat.h
4165 ++++ b/include/linux/compat.h
4166 +@@ -669,6 +669,13 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
4167 +
4168 + int compat_restore_altstack(const compat_stack_t __user *uss);
4169 + int __compat_save_altstack(compat_stack_t __user *, unsigned long);
4170 ++#define compat_save_altstack_ex(uss, sp) do { \
4171 ++ compat_stack_t __user *__uss = uss; \
4172 ++ struct task_struct *t = current; \
4173 ++ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
4174 ++ put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
4175 ++ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
4176 ++} while (0);
4177 +
4178 + asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
4179 + struct compat_timespec __user *interval);
4180 +diff --git a/include/linux/hid.h b/include/linux/hid.h
4181 +index 0c48991b..ff545cc3 100644
4182 +--- a/include/linux/hid.h
4183 ++++ b/include/linux/hid.h
4184 +@@ -393,10 +393,12 @@ struct hid_report {
4185 + struct hid_device *device; /* associated device */
4186 + };
4187 +
4188 ++#define HID_MAX_IDS 256
4189 ++
4190 + struct hid_report_enum {
4191 + unsigned numbered;
4192 + struct list_head report_list;
4193 +- struct hid_report *report_id_hash[256];
4194 ++ struct hid_report *report_id_hash[HID_MAX_IDS];
4195 + };
4196 +
4197 + #define HID_REPORT_TYPES 3
4198 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
4199 +index c1291624..6c7bb35a 100644
4200 +--- a/include/linux/pci_ids.h
4201 ++++ b/include/linux/pci_ids.h
4202 +@@ -518,6 +518,8 @@
4203 + #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
4204 + #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
4205 + #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
4206 ++#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d
4207 ++#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e
4208 + #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
4209 + #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
4210 + #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
4211 +diff --git a/include/linux/rculist.h b/include/linux/rculist.h
4212 +index f4b1001a..4106721c 100644
4213 +--- a/include/linux/rculist.h
4214 ++++ b/include/linux/rculist.h
4215 +@@ -267,8 +267,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
4216 + */
4217 + #define list_first_or_null_rcu(ptr, type, member) \
4218 + ({struct list_head *__ptr = (ptr); \
4219 +- struct list_head __rcu *__next = list_next_rcu(__ptr); \
4220 +- likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
4221 ++ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
4222 ++ likely(__ptr != __next) ? \
4223 ++ list_entry_rcu(__next, type, member) : NULL; \
4224 + })
4225 +
4226 + /**
4227 +diff --git a/include/linux/signal.h b/include/linux/signal.h
4228 +index d8974847..2ac423bd 100644
4229 +--- a/include/linux/signal.h
4230 ++++ b/include/linux/signal.h
4231 +@@ -434,6 +434,14 @@ void signals_init(void);
4232 + int restore_altstack(const stack_t __user *);
4233 + int __save_altstack(stack_t __user *, unsigned long);
4234 +
4235 ++#define save_altstack_ex(uss, sp) do { \
4236 ++ stack_t __user *__uss = uss; \
4237 ++ struct task_struct *t = current; \
4238 ++ put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
4239 ++ put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
4240 ++ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
4241 ++} while (0);
4242 ++
4243 + #ifdef CONFIG_PROC_FS
4244 + struct seq_file;
4245 + extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
4246 +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
4247 +index f5f5c7df..0fdff28d 100644
4248 +--- a/include/linux/usb/hcd.h
4249 ++++ b/include/linux/usb/hcd.h
4250 +@@ -410,7 +410,7 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
4251 + extern void usb_hcd_pci_remove(struct pci_dev *dev);
4252 + extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
4253 +
4254 +-#ifdef CONFIG_PM_SLEEP
4255 ++#ifdef CONFIG_PM
4256 + extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
4257 + #endif
4258 + #endif /* CONFIG_PCI */
4259 +diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
4260 +index 7343a27f..47ada233 100644
4261 +--- a/include/media/v4l2-ctrls.h
4262 ++++ b/include/media/v4l2-ctrls.h
4263 +@@ -22,6 +22,7 @@
4264 + #define _V4L2_CTRLS_H
4265 +
4266 + #include <linux/list.h>
4267 ++#include <linux/mutex.h>
4268 + #include <linux/videodev2.h>
4269 +
4270 + /* forward references */
4271 +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
4272 +index f3569747..ad8e1bdc 100644
4273 +--- a/kernel/events/uprobes.c
4274 ++++ b/kernel/events/uprobes.c
4275 +@@ -1682,12 +1682,10 @@ static bool handle_trampoline(struct pt_regs *regs)
4276 + tmp = ri;
4277 + ri = ri->next;
4278 + kfree(tmp);
4279 ++ utask->depth--;
4280 +
4281 + if (!chained)
4282 + break;
4283 +-
4284 +- utask->depth--;
4285 +-
4286 + BUG_ON(!ri);
4287 + }
4288 +
4289 +diff --git a/kernel/fork.c b/kernel/fork.c
4290 +index ffbc0904..80d92e98 100644
4291 +--- a/kernel/fork.c
4292 ++++ b/kernel/fork.c
4293 +@@ -1171,10 +1171,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
4294 + return ERR_PTR(-EINVAL);
4295 +
4296 + /*
4297 +- * If the new process will be in a different pid namespace
4298 +- * don't allow the creation of threads.
4299 ++ * If the new process will be in a different pid namespace don't
4300 ++ * allow it to share a thread group or signal handlers with the
4301 ++ * forking task.
4302 + */
4303 +- if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
4304 ++ if ((clone_flags & (CLONE_SIGHAND | CLONE_NEWPID)) &&
4305 + (task_active_pid_ns(current) != current->nsproxy->pid_ns))
4306 + return ERR_PTR(-EINVAL);
4307 +
4308 +diff --git a/kernel/pid.c b/kernel/pid.c
4309 +index 0db3e791..0eb6d8e8 100644
4310 +--- a/kernel/pid.c
4311 ++++ b/kernel/pid.c
4312 +@@ -264,6 +264,7 @@ void free_pid(struct pid *pid)
4313 + struct pid_namespace *ns = upid->ns;
4314 + hlist_del_rcu(&upid->pid_chain);
4315 + switch(--ns->nr_hashed) {
4316 ++ case 2:
4317 + case 1:
4318 + /* When all that is left in the pid namespace
4319 + * is the reaper wake up the reaper. The reaper
4320 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4321 +index 362c329b..b92d0ce4 100644
4322 +--- a/mm/huge_memory.c
4323 ++++ b/mm/huge_memory.c
4324 +@@ -2286,6 +2286,8 @@ static void collapse_huge_page(struct mm_struct *mm,
4325 + goto out;
4326 +
4327 + vma = find_vma(mm, address);
4328 ++ if (!vma)
4329 ++ goto out;
4330 + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
4331 + hend = vma->vm_end & HPAGE_PMD_MASK;
4332 + if (address < hstart || address + HPAGE_PMD_SIZE > hend)
4333 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4334 +index 82a187ae..905ce72c 100644
4335 +--- a/mm/memcontrol.c
4336 ++++ b/mm/memcontrol.c
4337 +@@ -5584,7 +5584,13 @@ static int compare_thresholds(const void *a, const void *b)
4338 + const struct mem_cgroup_threshold *_a = a;
4339 + const struct mem_cgroup_threshold *_b = b;
4340 +
4341 +- return _a->threshold - _b->threshold;
4342 ++ if (_a->threshold > _b->threshold)
4343 ++ return 1;
4344 ++
4345 ++ if (_a->threshold < _b->threshold)
4346 ++ return -1;
4347 ++
4348 ++ return 0;
4349 + }
4350 +
4351 + static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4352 +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
4353 +index 3a246a6c..46ec7672 100644
4354 +--- a/net/ceph/osd_client.c
4355 ++++ b/net/ceph/osd_client.c
4356 +@@ -2130,6 +2130,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4357 + dout("osdc_start_request failed map, "
4358 + " will retry %lld\n", req->r_tid);
4359 + rc = 0;
4360 ++ } else {
4361 ++ __unregister_request(osdc, req);
4362 + }
4363 + goto out_unlock;
4364 + }
4365 +diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
4366 +index 603ddd92..dbd9a479 100644
4367 +--- a/net/ceph/osdmap.c
4368 ++++ b/net/ceph/osdmap.c
4369 +@@ -1129,7 +1129,7 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
4370 +
4371 + /* pg_temp? */
4372 + pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
4373 +- pool->pgp_num_mask);
4374 ++ pool->pg_num_mask);
4375 + pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
4376 + if (pg) {
4377 + *num = pg->len;
4378 +diff --git a/net/core/scm.c b/net/core/scm.c
4379 +index 03795d01..b4da80b1 100644
4380 +--- a/net/core/scm.c
4381 ++++ b/net/core/scm.c
4382 +@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
4383 + return -EINVAL;
4384 +
4385 + if ((creds->pid == task_tgid_vnr(current) ||
4386 +- ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
4387 ++ ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
4388 + ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
4389 + uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
4390 + ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
4391 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4392 +index de18722c..624e6c04 100644
4393 +--- a/sound/pci/hda/hda_intel.c
4394 ++++ b/sound/pci/hda/hda_intel.c
4395 +@@ -3335,6 +3335,7 @@ static struct snd_pci_quirk msi_black_list[] = {
4396 + SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
4397 + SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
4398 + SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
4399 ++ SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */
4400 + SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
4401 + SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
4402 + {}
4403 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4404 +index 5bc41945..b5375ed0 100644
4405 +--- a/sound/pci/hda/patch_hdmi.c
4406 ++++ b/sound/pci/hda/patch_hdmi.c
4407 +@@ -67,6 +67,8 @@ struct hdmi_spec_per_pin {
4408 + struct delayed_work work;
4409 + struct snd_kcontrol *eld_ctl;
4410 + int repoll_count;
4411 ++ bool setup; /* the stream has been set up by prepare callback */
4412 ++ int channels; /* current number of channels */
4413 + bool non_pcm;
4414 + bool chmap_set; /* channel-map override by ALSA API? */
4415 + unsigned char chmap[8]; /* ALSA API channel-map */
4416 +@@ -551,6 +553,17 @@ static int hdmi_channel_allocation(struct hdmi_eld *eld, int channels)
4417 + }
4418 + }
4419 +
4420 ++ if (!ca) {
4421 ++ /* if there was no match, select the regular ALSA channel
4422 ++ * allocation with the matching number of channels */
4423 ++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
4424 ++ if (channels == channel_allocations[i].channels) {
4425 ++ ca = channel_allocations[i].ca_index;
4426 ++ break;
4427 ++ }
4428 ++ }
4429 ++ }
4430 ++
4431 + snd_print_channel_allocation(eld->info.spk_alloc, buf, sizeof(buf));
4432 + snd_printdd("HDMI: select CA 0x%x for %d-channel allocation: %s\n",
4433 + ca, channels, buf);
4434 +@@ -868,18 +881,19 @@ static bool hdmi_infoframe_uptodate(struct hda_codec *codec, hda_nid_t pin_nid,
4435 + return true;
4436 + }
4437 +
4438 +-static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
4439 +- bool non_pcm,
4440 +- struct snd_pcm_substream *substream)
4441 ++static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
4442 ++ struct hdmi_spec_per_pin *per_pin,
4443 ++ bool non_pcm)
4444 + {
4445 +- struct hdmi_spec *spec = codec->spec;
4446 +- struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
4447 + hda_nid_t pin_nid = per_pin->pin_nid;
4448 +- int channels = substream->runtime->channels;
4449 ++ int channels = per_pin->channels;
4450 + struct hdmi_eld *eld;
4451 + int ca;
4452 + union audio_infoframe ai;
4453 +
4454 ++ if (!channels)
4455 ++ return;
4456 ++
4457 + eld = &per_pin->sink_eld;
4458 + if (!eld->monitor_present)
4459 + return;
4460 +@@ -1263,6 +1277,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
4461 + eld_changed = true;
4462 + }
4463 + if (update_eld) {
4464 ++ bool old_eld_valid = pin_eld->eld_valid;
4465 + pin_eld->eld_valid = eld->eld_valid;
4466 + eld_changed = pin_eld->eld_size != eld->eld_size ||
4467 + memcmp(pin_eld->eld_buffer, eld->eld_buffer,
4468 +@@ -1272,6 +1287,18 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
4469 + eld->eld_size);
4470 + pin_eld->eld_size = eld->eld_size;
4471 + pin_eld->info = eld->info;
4472 ++
4473 ++ /* Haswell-specific workaround: re-setup when the transcoder is
4474 ++ * changed during the stream playback
4475 ++ */
4476 ++ if (codec->vendor_id == 0x80862807 &&
4477 ++ eld->eld_valid && !old_eld_valid && per_pin->setup) {
4478 ++ snd_hda_codec_write(codec, pin_nid, 0,
4479 ++ AC_VERB_SET_AMP_GAIN_MUTE,
4480 ++ AMP_OUT_UNMUTE);
4481 ++ hdmi_setup_audio_infoframe(codec, per_pin,
4482 ++ per_pin->non_pcm);
4483 ++ }
4484 + }
4485 + mutex_unlock(&pin_eld->lock);
4486 +
4487 +@@ -1444,14 +1471,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
4488 + hda_nid_t cvt_nid = hinfo->nid;
4489 + struct hdmi_spec *spec = codec->spec;
4490 + int pin_idx = hinfo_to_pin_index(spec, hinfo);
4491 +- hda_nid_t pin_nid = get_pin(spec, pin_idx)->pin_nid;
4492 ++ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
4493 ++ hda_nid_t pin_nid = per_pin->pin_nid;
4494 + bool non_pcm;
4495 +
4496 + non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
4497 ++ per_pin->channels = substream->runtime->channels;
4498 ++ per_pin->setup = true;
4499 +
4500 + hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
4501 +
4502 +- hdmi_setup_audio_infoframe(codec, pin_idx, non_pcm, substream);
4503 ++ hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
4504 +
4505 + return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
4506 + }
4507 +@@ -1491,6 +1521,9 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
4508 + snd_hda_spdif_ctls_unassign(codec, pin_idx);
4509 + per_pin->chmap_set = false;
4510 + memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
4511 ++
4512 ++ per_pin->setup = false;
4513 ++ per_pin->channels = 0;
4514 + }
4515 +
4516 + return 0;
4517 +@@ -1626,8 +1659,7 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
4518 + per_pin->chmap_set = true;
4519 + memcpy(per_pin->chmap, chmap, sizeof(chmap));
4520 + if (prepared)
4521 +- hdmi_setup_audio_infoframe(codec, pin_idx, per_pin->non_pcm,
4522 +- substream);
4523 ++ hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
4524 +
4525 + return 0;
4526 + }
4527 +diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
4528 +index 5402dfbb..8a8d9364 100644
4529 +--- a/sound/soc/codecs/mc13783.c
4530 ++++ b/sound/soc/codecs/mc13783.c
4531 +@@ -126,6 +126,10 @@ static int mc13783_write(struct snd_soc_codec *codec,
4532 +
4533 + ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
4534 +
4535 ++ /* include errata fix for spi audio problems */
4536 ++ if (reg == MC13783_AUDIO_CODEC || reg == MC13783_AUDIO_DAC)
4537 ++ ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
4538 ++
4539 + mc13xxx_unlock(priv->mc13xxx);
4540 +
4541 + return ret;
4542 +diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
4543 +index 0a4ffdd1..5e5af898 100644
4544 +--- a/sound/soc/codecs/wm8960.c
4545 ++++ b/sound/soc/codecs/wm8960.c
4546 +@@ -857,9 +857,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
4547 + if (pll_div.k) {
4548 + reg |= 0x20;
4549 +
4550 +- snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
4551 +- snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
4552 +- snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
4553 ++ snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
4554 ++ snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
4555 ++ snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
4556 + }
4557 + snd_soc_write(codec, WM8960_PLL1, reg);
4558 +
4559
4560 Deleted: genpatches-2.6/trunk/3.10/1500_CVE-2013-4300-net-Check-the-correct-namespace-when-spoofing-pid-ov.patch
4561 ===================================================================
4562 --- genpatches-2.6/trunk/3.10/1500_CVE-2013-4300-net-Check-the-correct-namespace-when-spoofing-pid-ov.patch 2013-09-25 17:13:05 UTC (rev 2528)
4563 +++ genpatches-2.6/trunk/3.10/1500_CVE-2013-4300-net-Check-the-correct-namespace-when-spoofing-pid-ov.patch 2013-09-27 17:20:08 UTC (rev 2529)
4564 @@ -1,35 +0,0 @@
4565 -From d661684cf6820331feae71146c35da83d794467e Mon Sep 17 00:00:00 2001
4566 -From: Andy Lutomirski <luto@××××××××××.net>
4567 -Date: Thu, 22 Aug 2013 11:39:15 -0700
4568 -Subject: [PATCH] net: Check the correct namespace when spoofing pid over
4569 - SCM_RIGHTS
4570 -
4571 -This is a security bug.
4572 -
4573 -The follow-up will fix nsproxy to discourage this type of issue from
4574 -happening again.
4575 -
4576 -Cc: stable@×××××××××××.org
4577 -Signed-off-by: Andy Lutomirski <luto@××××××××××.net>
4578 -Reviewed-by: "Eric W. Biederman" <ebiederm@××××××××.com>
4579 -Signed-off-by: David S. Miller <davem@×××××××××.net>
4580 ----
4581 - net/core/scm.c | 2 +-
4582 - 1 file changed, 1 insertion(+), 1 deletion(-)
4583 -
4584 -diff --git a/net/core/scm.c b/net/core/scm.c
4585 -index 03795d0..b4da80b 100644
4586 ---- a/net/core/scm.c
4587 -+++ b/net/core/scm.c
4588 -@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
4589 - return -EINVAL;
4590 -
4591 - if ((creds->pid == task_tgid_vnr(current) ||
4592 -- ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
4593 -+ ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
4594 - ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
4595 - uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
4596 - ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
4597 ---
4598 -1.8.3.2
4599 -
4600
4601 Modified: genpatches-2.6/trunk/3.11/0000_README
4602 ===================================================================
4603 --- genpatches-2.6/trunk/3.11/0000_README 2013-09-25 17:13:05 UTC (rev 2528)
4604 +++ genpatches-2.6/trunk/3.11/0000_README 2013-09-27 17:20:08 UTC (rev 2529)
4605 @@ -46,6 +46,10 @@
4606 From: http://www.kernel.org
4607 Desc: Linux 3.11.1
4608
4609 +Patch: 1001_linux-3.11.2.patch
4610 +From: http://www.kernel.org
4611 +Desc: Linux 3.11.2
4612 +
4613 Patch: 1500_XATTR_USER_PREFIX.patch
4614 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
4615 Desc: Support for namespace user.pax.* on tmpfs.
4616
4617 Added: genpatches-2.6/trunk/3.11/1001_linux-3.11.2.patch
4618 ===================================================================
4619 --- genpatches-2.6/trunk/3.11/1001_linux-3.11.2.patch (rev 0)
4620 +++ genpatches-2.6/trunk/3.11/1001_linux-3.11.2.patch 2013-09-27 17:20:08 UTC (rev 2529)
4621 @@ -0,0 +1,4419 @@
4622 +diff --git a/Makefile b/Makefile
4623 +index efd23961..aede3194 100644
4624 +--- a/Makefile
4625 ++++ b/Makefile
4626 +@@ -1,6 +1,6 @@
4627 + VERSION = 3
4628 + PATCHLEVEL = 11
4629 +-SUBLEVEL = 1
4630 ++SUBLEVEL = 2
4631 + EXTRAVERSION =
4632 + NAME = Linux for Workgroups
4633 +
4634 +diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
4635 +index 6fc1159d..764f1e3b 100644
4636 +--- a/arch/arc/include/asm/sections.h
4637 ++++ b/arch/arc/include/asm/sections.h
4638 +@@ -11,7 +11,6 @@
4639 +
4640 + #include <asm-generic/sections.h>
4641 +
4642 +-extern char _int_vec_base_lds[];
4643 + extern char __arc_dccm_base[];
4644 + extern char __dtb_start[];
4645 +
4646 +diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
4647 +index 2a913f85..0f944f02 100644
4648 +--- a/arch/arc/kernel/head.S
4649 ++++ b/arch/arc/kernel/head.S
4650 +@@ -34,6 +34,9 @@ stext:
4651 + ; IDENTITY Reg [ 3 2 1 0 ]
4652 + ; (cpu-id) ^^^ => Zero for UP ARC700
4653 + ; => #Core-ID if SMP (Master 0)
4654 ++ ; Note that non-boot CPUs might not land here if halt-on-reset and
4655 ++ ; instead breath life from @first_lines_of_secondary, but we still
4656 ++ ; need to make sure only boot cpu takes this path.
4657 + GET_CPU_ID r5
4658 + cmp r5, 0
4659 + jnz arc_platform_smp_wait_to_boot
4660 +@@ -98,6 +101,8 @@ stext:
4661 +
4662 + first_lines_of_secondary:
4663 +
4664 ++ sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
4665 ++
4666 + ; setup per-cpu idle task as "current" on this CPU
4667 + ld r0, [@secondary_idle_tsk]
4668 + SET_CURR_TASK_ON_CPU r0, r1
4669 +diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
4670 +index 305b3f86..5fc92455 100644
4671 +--- a/arch/arc/kernel/irq.c
4672 ++++ b/arch/arc/kernel/irq.c
4673 +@@ -24,7 +24,6 @@
4674 + * -Needed for each CPU (hence not foldable into init_IRQ)
4675 + *
4676 + * what it does ?
4677 +- * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
4678 + * -Disable all IRQs (on CPU side)
4679 + * -Optionally, setup the High priority Interrupts as Level 2 IRQs
4680 + */
4681 +diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
4682 +index 6b083454..e8185631 100644
4683 +--- a/arch/arc/kernel/setup.c
4684 ++++ b/arch/arc/kernel/setup.c
4685 +@@ -47,10 +47,7 @@ void read_arc_build_cfg_regs(void)
4686 + READ_BCR(AUX_IDENTITY, cpu->core);
4687 +
4688 + cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
4689 +-
4690 + cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
4691 +- if (cpu->vec_base == 0)
4692 +- cpu->vec_base = (unsigned int)_int_vec_base_lds;
4693 +
4694 + READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
4695 + cpu->uncached_base = uncached_space.start << 24;
4696 +diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h
4697 +index ec087407..6f938ccb 100644
4698 +--- a/arch/arm/mach-versatile/include/mach/platform.h
4699 ++++ b/arch/arm/mach-versatile/include/mach/platform.h
4700 +@@ -231,12 +231,14 @@
4701 + /* PCI space */
4702 + #define VERSATILE_PCI_BASE 0x41000000 /* PCI Interface */
4703 + #define VERSATILE_PCI_CFG_BASE 0x42000000
4704 ++#define VERSATILE_PCI_IO_BASE 0x43000000
4705 + #define VERSATILE_PCI_MEM_BASE0 0x44000000
4706 + #define VERSATILE_PCI_MEM_BASE1 0x50000000
4707 + #define VERSATILE_PCI_MEM_BASE2 0x60000000
4708 + /* Sizes of above maps */
4709 + #define VERSATILE_PCI_BASE_SIZE 0x01000000
4710 + #define VERSATILE_PCI_CFG_BASE_SIZE 0x02000000
4711 ++#define VERSATILE_PCI_IO_BASE_SIZE 0x01000000
4712 + #define VERSATILE_PCI_MEM_BASE0_SIZE 0x0c000000 /* 32Mb */
4713 + #define VERSATILE_PCI_MEM_BASE1_SIZE 0x10000000 /* 256Mb */
4714 + #define VERSATILE_PCI_MEM_BASE2_SIZE 0x10000000 /* 256Mb */
4715 +diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
4716 +index e92e5e07..c97be4ea 100644
4717 +--- a/arch/arm/mach-versatile/pci.c
4718 ++++ b/arch/arm/mach-versatile/pci.c
4719 +@@ -43,9 +43,9 @@
4720 + #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
4721 + #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
4722 + #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
4723 +-#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
4724 +-#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
4725 +-#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
4726 ++#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
4727 ++#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
4728 ++#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
4729 + #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
4730 +
4731 + #define DEVICE_ID_OFFSET 0x00
4732 +@@ -170,8 +170,8 @@ static struct pci_ops pci_versatile_ops = {
4733 + .write = versatile_write_config,
4734 + };
4735 +
4736 +-static struct resource io_mem = {
4737 +- .name = "PCI I/O space",
4738 ++static struct resource unused_mem = {
4739 ++ .name = "PCI unused",
4740 + .start = VERSATILE_PCI_MEM_BASE0,
4741 + .end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
4742 + .flags = IORESOURCE_MEM,
4743 +@@ -195,9 +195,9 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
4744 + {
4745 + int ret = 0;
4746 +
4747 +- ret = request_resource(&iomem_resource, &io_mem);
4748 ++ ret = request_resource(&iomem_resource, &unused_mem);
4749 + if (ret) {
4750 +- printk(KERN_ERR "PCI: unable to allocate I/O "
4751 ++ printk(KERN_ERR "PCI: unable to allocate unused "
4752 + "memory region (%d)\n", ret);
4753 + goto out;
4754 + }
4755 +@@ -205,7 +205,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
4756 + if (ret) {
4757 + printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
4758 + "memory region (%d)\n", ret);
4759 +- goto release_io_mem;
4760 ++ goto release_unused_mem;
4761 + }
4762 + ret = request_resource(&iomem_resource, &pre_mem);
4763 + if (ret) {
4764 +@@ -225,8 +225,8 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
4765 +
4766 + release_non_mem:
4767 + release_resource(&non_mem);
4768 +- release_io_mem:
4769 +- release_resource(&io_mem);
4770 ++ release_unused_mem:
4771 ++ release_resource(&unused_mem);
4772 + out:
4773 + return ret;
4774 + }
4775 +@@ -246,7 +246,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
4776 + goto out;
4777 + }
4778 +
4779 +- ret = pci_ioremap_io(0, VERSATILE_PCI_MEM_BASE0);
4780 ++ ret = pci_ioremap_io(0, VERSATILE_PCI_IO_BASE);
4781 + if (ret)
4782 + goto out;
4783 +
4784 +@@ -295,6 +295,19 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
4785 + __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
4786 +
4787 + /*
4788 ++ * For many years the kernel and QEMU were symbiotically buggy
4789 ++ * in that they both assumed the same broken IRQ mapping.
4790 ++ * QEMU therefore attempts to auto-detect old broken kernels
4791 ++ * so that they still work on newer QEMU as they did on old
4792 ++ * QEMU. Since we now use the correct (ie matching-hardware)
4793 ++ * IRQ mapping we write a definitely different value to a
4794 ++ * PCI_INTERRUPT_LINE register to tell QEMU that we expect
4795 ++ * real hardware behaviour and it need not be backwards
4796 ++ * compatible for us. This write is harmless on real hardware.
4797 ++ */
4798 ++ __raw_writel(0, VERSATILE_PCI_VIRT_BASE+PCI_INTERRUPT_LINE);
4799 ++
4800 ++ /*
4801 + * Do not to map Versatile FPGA PCI device into memory space
4802 + */
4803 + pci_slot_ignore |= (1 << myslot);
4804 +@@ -327,13 +340,13 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
4805 + {
4806 + int irq;
4807 +
4808 +- /* slot, pin, irq
4809 +- * 24 1 IRQ_SIC_PCI0
4810 +- * 25 1 IRQ_SIC_PCI1
4811 +- * 26 1 IRQ_SIC_PCI2
4812 +- * 27 1 IRQ_SIC_PCI3
4813 ++ /*
4814 ++ * Slot INTA INTB INTC INTD
4815 ++ * 31 PCI1 PCI2 PCI3 PCI0
4816 ++ * 30 PCI0 PCI1 PCI2 PCI3
4817 ++ * 29 PCI3 PCI0 PCI1 PCI2
4818 + */
4819 +- irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
4820 ++ irq = IRQ_SIC_PCI0 + ((slot + 2 + pin - 1) & 3);
4821 +
4822 + return irq;
4823 + }
4824 +diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
4825 +index 8a6295c8..7071fcac 100644
4826 +--- a/arch/arm/xen/enlighten.c
4827 ++++ b/arch/arm/xen/enlighten.c
4828 +@@ -273,12 +273,15 @@ core_initcall(xen_guest_init);
4829 +
4830 + static int __init xen_pm_init(void)
4831 + {
4832 ++ if (!xen_domain())
4833 ++ return -ENODEV;
4834 ++
4835 + pm_power_off = xen_power_off;
4836 + arm_pm_restart = xen_restart;
4837 +
4838 + return 0;
4839 + }
4840 +-subsys_initcall(xen_pm_init);
4841 ++late_initcall(xen_pm_init);
4842 +
4843 + static irqreturn_t xen_arm_callback(int irq, void *arg)
4844 + {
4845 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
4846 +index 12e6ccb8..cea1594f 100644
4847 +--- a/arch/arm64/kernel/perf_event.c
4848 ++++ b/arch/arm64/kernel/perf_event.c
4849 +@@ -325,7 +325,10 @@ validate_event(struct pmu_hw_events *hw_events,
4850 + if (is_software_event(event))
4851 + return 1;
4852 +
4853 +- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
4854 ++ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
4855 ++ return 1;
4856 ++
4857 ++ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
4858 + return 1;
4859 +
4860 + return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
4861 +@@ -781,7 +784,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
4862 + /*
4863 + * PMXEVTYPER: Event selection reg
4864 + */
4865 +-#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
4866 ++#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
4867 + #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
4868 +
4869 + /*
4870 +diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
4871 +index 765ef30e..733017b3 100644
4872 +--- a/arch/mips/ath79/clock.c
4873 ++++ b/arch/mips/ath79/clock.c
4874 +@@ -164,7 +164,7 @@ static void __init ar933x_clocks_init(void)
4875 + ath79_ahb_clk.rate = freq / t;
4876 + }
4877 +
4878 +- ath79_wdt_clk.rate = ath79_ref_clk.rate;
4879 ++ ath79_wdt_clk.rate = ath79_ahb_clk.rate;
4880 + ath79_uart_clk.rate = ath79_ref_clk.rate;
4881 + }
4882 +
4883 +diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
4884 +index ee5b690a..52e5758e 100644
4885 +--- a/arch/powerpc/kernel/align.c
4886 ++++ b/arch/powerpc/kernel/align.c
4887 +@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
4888 + nb = aligninfo[instr].len;
4889 + flags = aligninfo[instr].flags;
4890 +
4891 ++ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
4892 ++ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
4893 ++ nb = 8;
4894 ++ flags = LD+SW;
4895 ++ } else if (IS_XFORM(instruction) &&
4896 ++ ((instruction >> 1) & 0x3ff) == 660) {
4897 ++ nb = 8;
4898 ++ flags = ST+SW;
4899 ++ }
4900 ++
4901 + /* Byteswap little endian loads and stores */
4902 + swiz = 0;
4903 + if (regs->msr & MSR_LE) {
4904 +diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
4905 +index 94c1dd46..a3a5cb8e 100644
4906 +--- a/arch/powerpc/kvm/book3s_xics.c
4907 ++++ b/arch/powerpc/kvm/book3s_xics.c
4908 +@@ -19,6 +19,7 @@
4909 + #include <asm/hvcall.h>
4910 + #include <asm/xics.h>
4911 + #include <asm/debug.h>
4912 ++#include <asm/time.h>
4913 +
4914 + #include <linux/debugfs.h>
4915 + #include <linux/seq_file.h>
4916 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
4917 +index c11c8238..54b998f2 100644
4918 +--- a/arch/powerpc/platforms/pseries/setup.c
4919 ++++ b/arch/powerpc/platforms/pseries/setup.c
4920 +@@ -354,7 +354,7 @@ static int alloc_dispatch_log_kmem_cache(void)
4921 + }
4922 + early_initcall(alloc_dispatch_log_kmem_cache);
4923 +
4924 +-static void pSeries_idle(void)
4925 ++static void pseries_lpar_idle(void)
4926 + {
4927 + /* This would call on the cpuidle framework, and the back-end pseries
4928 + * driver to go to idle states
4929 +@@ -362,10 +362,22 @@ static void pSeries_idle(void)
4930 + if (cpuidle_idle_call()) {
4931 + /* On error, execute default handler
4932 + * to go into low thread priority and possibly
4933 +- * low power mode.
4934 ++ * low power mode by cedeing processor to hypervisor
4935 + */
4936 +- HMT_low();
4937 +- HMT_very_low();
4938 ++
4939 ++ /* Indicate to hypervisor that we are idle. */
4940 ++ get_lppaca()->idle = 1;
4941 ++
4942 ++ /*
4943 ++ * Yield the processor to the hypervisor. We return if
4944 ++ * an external interrupt occurs (which are driven prior
4945 ++ * to returning here) or if a prod occurs from another
4946 ++ * processor. When returning here, external interrupts
4947 ++ * are enabled.
4948 ++ */
4949 ++ cede_processor();
4950 ++
4951 ++ get_lppaca()->idle = 0;
4952 + }
4953 + }
4954 +
4955 +@@ -456,15 +468,14 @@ static void __init pSeries_setup_arch(void)
4956 +
4957 + pSeries_nvram_init();
4958 +
4959 +- if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
4960 ++ if (firmware_has_feature(FW_FEATURE_LPAR)) {
4961 + vpa_init(boot_cpuid);
4962 +- ppc_md.power_save = pSeries_idle;
4963 +- }
4964 +-
4965 +- if (firmware_has_feature(FW_FEATURE_LPAR))
4966 ++ ppc_md.power_save = pseries_lpar_idle;
4967 + ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
4968 +- else
4969 ++ } else {
4970 ++ /* No special idle routine */
4971 + ppc_md.enable_pmcs = power4_enable_pmcs;
4972 ++ }
4973 +
4974 + ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
4975 +
4976 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
4977 +index d5f10a43..70923928 100644
4978 +--- a/arch/s390/net/bpf_jit_comp.c
4979 ++++ b/arch/s390/net/bpf_jit_comp.c
4980 +@@ -805,7 +805,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
4981 + return NULL;
4982 + memset(header, 0, sz);
4983 + header->pages = sz / PAGE_SIZE;
4984 +- hole = sz - bpfsize + sizeof(*header);
4985 ++ hole = sz - (bpfsize + sizeof(*header));
4986 + /* Insert random number of illegal instructions before BPF code
4987 + * and make sure the first instruction starts at an even address.
4988 + */
4989 +diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
4990 +index 95feaa47..c70a234a 100644
4991 +--- a/arch/um/include/shared/os.h
4992 ++++ b/arch/um/include/shared/os.h
4993 +@@ -200,6 +200,7 @@ extern int os_unmap_memory(void *addr, int len);
4994 + extern int os_drop_memory(void *addr, int length);
4995 + extern int can_drop_memory(void);
4996 + extern void os_flush_stdout(void);
4997 ++extern int os_mincore(void *addr, unsigned long len);
4998 +
4999 + /* execvp.c */
5000 + extern int execvp_noalloc(char *buf, const char *file, char *const argv[]);
5001 +diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
5002 +index babe2182..d8b78a03 100644
5003 +--- a/arch/um/kernel/Makefile
5004 ++++ b/arch/um/kernel/Makefile
5005 +@@ -13,7 +13,7 @@ clean-files :=
5006 + obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
5007 + physmem.o process.o ptrace.o reboot.o sigio.o \
5008 + signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \
5009 +- um_arch.o umid.o skas/
5010 ++ um_arch.o umid.o maccess.o skas/
5011 +
5012 + obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
5013 + obj-$(CONFIG_GPROF) += gprof_syms.o
5014 +diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
5015 +new file mode 100644
5016 +index 00000000..1f3d5c49
5017 +--- /dev/null
5018 ++++ b/arch/um/kernel/maccess.c
5019 +@@ -0,0 +1,24 @@
5020 ++/*
5021 ++ * Copyright (C) 2013 Richard Weinberger <richrd@×××.at>
5022 ++ *
5023 ++ * This program is free software; you can redistribute it and/or modify
5024 ++ * it under the terms of the GNU General Public License version 2 as
5025 ++ * published by the Free Software Foundation.
5026 ++ */
5027 ++
5028 ++#include <linux/uaccess.h>
5029 ++#include <linux/kernel.h>
5030 ++#include <os.h>
5031 ++
5032 ++long probe_kernel_read(void *dst, const void *src, size_t size)
5033 ++{
5034 ++ void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
5035 ++
5036 ++ if ((unsigned long)src < PAGE_SIZE || size <= 0)
5037 ++ return -EFAULT;
5038 ++
5039 ++ if (os_mincore(psrc, size + src - psrc) <= 0)
5040 ++ return -EFAULT;
5041 ++
5042 ++ return __probe_kernel_read(dst, src, size);
5043 ++}
5044 +diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
5045 +index b8f34c9e..67b9c8f5 100644
5046 +--- a/arch/um/os-Linux/process.c
5047 ++++ b/arch/um/os-Linux/process.c
5048 +@@ -4,6 +4,7 @@
5049 + */
5050 +
5051 + #include <stdio.h>
5052 ++#include <stdlib.h>
5053 + #include <unistd.h>
5054 + #include <errno.h>
5055 + #include <signal.h>
5056 +@@ -232,6 +233,57 @@ out:
5057 + return ok;
5058 + }
5059 +
5060 ++static int os_page_mincore(void *addr)
5061 ++{
5062 ++ char vec[2];
5063 ++ int ret;
5064 ++
5065 ++ ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
5066 ++ if (ret < 0) {
5067 ++ if (errno == ENOMEM || errno == EINVAL)
5068 ++ return 0;
5069 ++ else
5070 ++ return -errno;
5071 ++ }
5072 ++
5073 ++ return vec[0] & 1;
5074 ++}
5075 ++
5076 ++int os_mincore(void *addr, unsigned long len)
5077 ++{
5078 ++ char *vec;
5079 ++ int ret, i;
5080 ++
5081 ++ if (len <= UM_KERN_PAGE_SIZE)
5082 ++ return os_page_mincore(addr);
5083 ++
5084 ++ vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
5085 ++ if (!vec)
5086 ++ return -ENOMEM;
5087 ++
5088 ++ ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
5089 ++ if (ret < 0) {
5090 ++ if (errno == ENOMEM || errno == EINVAL)
5091 ++ ret = 0;
5092 ++ else
5093 ++ ret = -errno;
5094 ++
5095 ++ goto out;
5096 ++ }
5097 ++
5098 ++ for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
5099 ++ if (!(vec[i] & 1)) {
5100 ++ ret = 0;
5101 ++ goto out;
5102 ++ }
5103 ++ }
5104 ++
5105 ++ ret = 1;
5106 ++out:
5107 ++ free(vec);
5108 ++ return ret;
5109 ++}
5110 ++
5111 + void init_new_thread_signals(void)
5112 + {
5113 + set_handler(SIGSEGV);
5114 +diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
5115 +index bccfca68..665a7303 100644
5116 +--- a/arch/x86/ia32/ia32_signal.c
5117 ++++ b/arch/x86/ia32/ia32_signal.c
5118 +@@ -457,7 +457,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
5119 + else
5120 + put_user_ex(0, &frame->uc.uc_flags);
5121 + put_user_ex(0, &frame->uc.uc_link);
5122 +- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
5123 ++ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
5124 +
5125 + if (ksig->ka.sa.sa_flags & SA_RESTORER)
5126 + restorer = ksig->ka.sa.sa_restorer;
5127 +diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
5128 +index 46fc474f..f50de695 100644
5129 +--- a/arch/x86/include/asm/checksum_32.h
5130 ++++ b/arch/x86/include/asm/checksum_32.h
5131 +@@ -49,9 +49,15 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
5132 + int len, __wsum sum,
5133 + int *err_ptr)
5134 + {
5135 ++ __wsum ret;
5136 ++
5137 + might_sleep();
5138 +- return csum_partial_copy_generic((__force void *)src, dst,
5139 +- len, sum, err_ptr, NULL);
5140 ++ stac();
5141 ++ ret = csum_partial_copy_generic((__force void *)src, dst,
5142 ++ len, sum, err_ptr, NULL);
5143 ++ clac();
5144 ++
5145 ++ return ret;
5146 + }
5147 +
5148 + /*
5149 +@@ -176,10 +182,16 @@ static inline __wsum csum_and_copy_to_user(const void *src,
5150 + int len, __wsum sum,
5151 + int *err_ptr)
5152 + {
5153 ++ __wsum ret;
5154 ++
5155 + might_sleep();
5156 +- if (access_ok(VERIFY_WRITE, dst, len))
5157 +- return csum_partial_copy_generic(src, (__force void *)dst,
5158 +- len, sum, NULL, err_ptr);
5159 ++ if (access_ok(VERIFY_WRITE, dst, len)) {
5160 ++ stac();
5161 ++ ret = csum_partial_copy_generic(src, (__force void *)dst,
5162 ++ len, sum, NULL, err_ptr);
5163 ++ clac();
5164 ++ return ret;
5165 ++ }
5166 +
5167 + if (len)
5168 + *err_ptr = -EFAULT;
5169 +diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
5170 +index 29e3093b..aa97342e 100644
5171 +--- a/arch/x86/include/asm/mce.h
5172 ++++ b/arch/x86/include/asm/mce.h
5173 +@@ -32,11 +32,20 @@
5174 + #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
5175 + #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
5176 + #define MCI_STATUS_AR (1ULL<<55) /* Action required */
5177 +-#define MCACOD 0xffff /* MCA Error Code */
5178 ++
5179 ++/*
5180 ++ * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
5181 ++ * bits 15:0. But bit 12 is the 'F' bit, defined for corrected
5182 ++ * errors to indicate that errors are being filtered by hardware.
5183 ++ * We should mask out bit 12 when looking for specific signatures
5184 ++ * of uncorrected errors - so the F bit is deliberately skipped
5185 ++ * in this #define.
5186 ++ */
5187 ++#define MCACOD 0xefff /* MCA Error Code */
5188 +
5189 + /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
5190 + #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
5191 +-#define MCACOD_SCRUBMSK 0xfff0
5192 ++#define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */
5193 + #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
5194 + #define MCACOD_DATA 0x0134 /* Data Load */
5195 + #define MCACOD_INSTR 0x0150 /* Instruction Fetch */
5196 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
5197 +index cdbf3677..be12c534 100644
5198 +--- a/arch/x86/include/asm/mmu_context.h
5199 ++++ b/arch/x86/include/asm/mmu_context.h
5200 +@@ -45,22 +45,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
5201 + /* Re-load page tables */
5202 + load_cr3(next->pgd);
5203 +
5204 +- /* stop flush ipis for the previous mm */
5205 ++ /* Stop flush ipis for the previous mm */
5206 + cpumask_clear_cpu(cpu, mm_cpumask(prev));
5207 +
5208 +- /*
5209 +- * load the LDT, if the LDT is different:
5210 +- */
5211 ++ /* Load the LDT, if the LDT is different: */
5212 + if (unlikely(prev->context.ldt != next->context.ldt))
5213 + load_LDT_nolock(&next->context);
5214 + }
5215 + #ifdef CONFIG_SMP
5216 +- else {
5217 ++ else {
5218 + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
5219 + BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
5220 +
5221 +- if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
5222 +- /* We were in lazy tlb mode and leave_mm disabled
5223 ++ if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
5224 ++ /*
5225 ++ * On established mms, the mm_cpumask is only changed
5226 ++ * from irq context, from ptep_clear_flush() while in
5227 ++ * lazy tlb mode, and here. Irqs are blocked during
5228 ++ * schedule, protecting us from simultaneous changes.
5229 ++ */
5230 ++ cpumask_set_cpu(cpu, mm_cpumask(next));
5231 ++ /*
5232 ++ * We were in lazy tlb mode and leave_mm disabled
5233 + * tlb flush IPI delivery. We must reload CR3
5234 + * to make sure to use no freed page tables.
5235 + */
5236 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
5237 +index 3048ded1..59554dca 100644
5238 +--- a/arch/x86/kernel/amd_nb.c
5239 ++++ b/arch/x86/kernel/amd_nb.c
5240 +@@ -20,6 +20,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
5241 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
5242 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
5243 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
5244 ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
5245 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
5246 + {}
5247 + };
5248 +@@ -27,6 +28,7 @@ EXPORT_SYMBOL(amd_nb_misc_ids);
5249 +
5250 + static const struct pci_device_id amd_nb_link_ids[] = {
5251 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
5252 ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
5253 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
5254 + {}
5255 + };
5256 +@@ -81,13 +83,20 @@ int amd_cache_northbridges(void)
5257 + next_northbridge(misc, amd_nb_misc_ids);
5258 + node_to_amd_nb(i)->link = link =
5259 + next_northbridge(link, amd_nb_link_ids);
5260 +- }
5261 ++ }
5262 +
5263 ++ /* GART present only on Fam15h upto model 0fh */
5264 + if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
5265 +- boot_cpu_data.x86 == 0x15)
5266 ++ (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
5267 + amd_northbridges.flags |= AMD_NB_GART;
5268 +
5269 + /*
5270 ++ * Check for L3 cache presence.
5271 ++ */
5272 ++ if (!cpuid_edx(0x80000006))
5273 ++ return 0;
5274 ++
5275 ++ /*
5276 + * Some CPU families support L3 Cache Index Disable. There are some
5277 + * limitations because of E382 and E388 on family 0x10.
5278 + */
5279 +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
5280 +index cf913587..d859eea0 100644
5281 +--- a/arch/x86/kernel/signal.c
5282 ++++ b/arch/x86/kernel/signal.c
5283 +@@ -358,7 +358,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
5284 + else
5285 + put_user_ex(0, &frame->uc.uc_flags);
5286 + put_user_ex(0, &frame->uc.uc_link);
5287 +- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
5288 ++ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
5289 +
5290 + /* Set up to return from userspace. */
5291 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5292 +@@ -423,7 +423,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
5293 + else
5294 + put_user_ex(0, &frame->uc.uc_flags);
5295 + put_user_ex(0, &frame->uc.uc_link);
5296 +- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
5297 ++ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
5298 +
5299 + /* Set up to return from userspace. If provided, use a stub
5300 + already in userspace. */
5301 +@@ -490,7 +490,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
5302 + else
5303 + put_user_ex(0, &frame->uc.uc_flags);
5304 + put_user_ex(0, &frame->uc.uc_link);
5305 +- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
5306 ++ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
5307 + put_user_ex(0, &frame->uc.uc__pad0);
5308 +
5309 + if (ksig->ka.sa.sa_flags & SA_RESTORER) {
5310 +diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
5311 +index 25b7ae8d..7609e0e4 100644
5312 +--- a/arch/x86/lib/csum-wrappers_64.c
5313 ++++ b/arch/x86/lib/csum-wrappers_64.c
5314 +@@ -6,6 +6,7 @@
5315 + */
5316 + #include <asm/checksum.h>
5317 + #include <linux/module.h>
5318 ++#include <asm/smap.h>
5319 +
5320 + /**
5321 + * csum_partial_copy_from_user - Copy and checksum from user space.
5322 +@@ -52,8 +53,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
5323 + len -= 2;
5324 + }
5325 + }
5326 ++ stac();
5327 + isum = csum_partial_copy_generic((__force const void *)src,
5328 + dst, len, isum, errp, NULL);
5329 ++ clac();
5330 + if (unlikely(*errp))
5331 + goto out_err;
5332 +
5333 +@@ -82,6 +85,8 @@ __wsum
5334 + csum_partial_copy_to_user(const void *src, void __user *dst,
5335 + int len, __wsum isum, int *errp)
5336 + {
5337 ++ __wsum ret;
5338 ++
5339 + might_sleep();
5340 +
5341 + if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
5342 +@@ -105,8 +110,11 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
5343 + }
5344 +
5345 + *errp = 0;
5346 +- return csum_partial_copy_generic(src, (void __force *)dst,
5347 +- len, isum, NULL, errp);
5348 ++ stac();
5349 ++ ret = csum_partial_copy_generic(src, (void __force *)dst,
5350 ++ len, isum, NULL, errp);
5351 ++ clac();
5352 ++ return ret;
5353 + }
5354 + EXPORT_SYMBOL(csum_partial_copy_to_user);
5355 +
5356 +diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
5357 +index d8507f81..74a60c7e 100644
5358 +--- a/arch/xtensa/kernel/xtensa_ksyms.c
5359 ++++ b/arch/xtensa/kernel/xtensa_ksyms.c
5360 +@@ -25,6 +25,7 @@
5361 + #include <asm/io.h>
5362 + #include <asm/page.h>
5363 + #include <asm/pgalloc.h>
5364 ++#include <asm/ftrace.h>
5365 + #ifdef CONFIG_BLK_DEV_FD
5366 + #include <asm/floppy.h>
5367 + #endif
5368 +diff --git a/crypto/api.c b/crypto/api.c
5369 +index 3b618033..37c4c721 100644
5370 +--- a/crypto/api.c
5371 ++++ b/crypto/api.c
5372 +@@ -34,6 +34,8 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
5373 + BLOCKING_NOTIFIER_HEAD(crypto_chain);
5374 + EXPORT_SYMBOL_GPL(crypto_chain);
5375 +
5376 ++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
5377 ++
5378 + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
5379 + {
5380 + return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
5381 +@@ -144,8 +146,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
5382 + }
5383 + up_write(&crypto_alg_sem);
5384 +
5385 +- if (alg != &larval->alg)
5386 ++ if (alg != &larval->alg) {
5387 + kfree(larval);
5388 ++ if (crypto_is_larval(alg))
5389 ++ alg = crypto_larval_wait(alg);
5390 ++ }
5391 +
5392 + return alg;
5393 + }
5394 +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
5395 +index 6a382188..fb78bb9a 100644
5396 +--- a/drivers/acpi/acpi_lpss.c
5397 ++++ b/drivers/acpi/acpi_lpss.c
5398 +@@ -257,12 +257,13 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
5399 + pdata->mmio_size = resource_size(&rentry->res);
5400 + pdata->mmio_base = ioremap(rentry->res.start,
5401 + pdata->mmio_size);
5402 +- pdata->dev_desc = dev_desc;
5403 + break;
5404 + }
5405 +
5406 + acpi_dev_free_resource_list(&resource_list);
5407 +
5408 ++ pdata->dev_desc = dev_desc;
5409 ++
5410 + if (dev_desc->clk_required) {
5411 + ret = register_device_clock(adev, pdata);
5412 + if (ret) {
5413 +diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
5414 +index 59178393..a67853e3 100644
5415 +--- a/drivers/acpi/pci_root.c
5416 ++++ b/drivers/acpi/pci_root.c
5417 +@@ -378,6 +378,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
5418 + struct acpi_pci_root *root;
5419 + u32 flags, base_flags;
5420 + acpi_handle handle = device->handle;
5421 ++ bool no_aspm = false, clear_aspm = false;
5422 +
5423 + root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
5424 + if (!root)
5425 +@@ -437,27 +438,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
5426 + flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
5427 + acpi_pci_osc_support(root, flags);
5428 +
5429 +- /*
5430 +- * TBD: Need PCI interface for enumeration/configuration of roots.
5431 +- */
5432 +-
5433 +- /*
5434 +- * Scan the Root Bridge
5435 +- * --------------------
5436 +- * Must do this prior to any attempt to bind the root device, as the
5437 +- * PCI namespace does not get created until this call is made (and
5438 +- * thus the root bridge's pci_dev does not exist).
5439 +- */
5440 +- root->bus = pci_acpi_scan_root(root);
5441 +- if (!root->bus) {
5442 +- dev_err(&device->dev,
5443 +- "Bus %04x:%02x not present in PCI namespace\n",
5444 +- root->segment, (unsigned int)root->secondary.start);
5445 +- result = -ENODEV;
5446 +- goto end;
5447 +- }
5448 +-
5449 +- /* Indicate support for various _OSC capabilities. */
5450 + if (pci_ext_cfg_avail())
5451 + flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
5452 + if (pcie_aspm_support_enabled()) {
5453 +@@ -471,7 +451,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
5454 + if (ACPI_FAILURE(status)) {
5455 + dev_info(&device->dev, "ACPI _OSC support "
5456 + "notification failed, disabling PCIe ASPM\n");
5457 +- pcie_no_aspm();
5458 ++ no_aspm = true;
5459 + flags = base_flags;
5460 + }
5461 + }
5462 +@@ -503,7 +483,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
5463 + * We have ASPM control, but the FADT indicates
5464 + * that it's unsupported. Clear it.
5465 + */
5466 +- pcie_clear_aspm(root->bus);
5467 ++ clear_aspm = true;
5468 + }
5469 + } else {
5470 + dev_info(&device->dev,
5471 +@@ -512,7 +492,14 @@ static int acpi_pci_root_add(struct acpi_device *device,
5472 + acpi_format_exception(status), flags);
5473 + dev_info(&device->dev,
5474 + "ACPI _OSC control for PCIe not granted, disabling ASPM\n");
5475 +- pcie_no_aspm();
5476 ++ /*
5477 ++ * We want to disable ASPM here, but aspm_disabled
5478 ++ * needs to remain in its state from boot so that we
5479 ++ * properly handle PCIe 1.1 devices. So we set this
5480 ++ * flag here, to defer the action until after the ACPI
5481 ++ * root scan.
5482 ++ */
5483 ++ no_aspm = true;
5484 + }
5485 + } else {
5486 + dev_info(&device->dev,
5487 +@@ -520,6 +507,33 @@ static int acpi_pci_root_add(struct acpi_device *device,
5488 + "(_OSC support mask: 0x%02x)\n", flags);
5489 + }
5490 +
5491 ++ /*
5492 ++ * TBD: Need PCI interface for enumeration/configuration of roots.
5493 ++ */
5494 ++
5495 ++ /*
5496 ++ * Scan the Root Bridge
5497 ++ * --------------------
5498 ++ * Must do this prior to any attempt to bind the root device, as the
5499 ++ * PCI namespace does not get created until this call is made (and
5500 ++ * thus the root bridge's pci_dev does not exist).
5501 ++ */
5502 ++ root->bus = pci_acpi_scan_root(root);
5503 ++ if (!root->bus) {
5504 ++ dev_err(&device->dev,
5505 ++ "Bus %04x:%02x not present in PCI namespace\n",
5506 ++ root->segment, (unsigned int)root->secondary.start);
5507 ++ result = -ENODEV;
5508 ++ goto end;
5509 ++ }
5510 ++
5511 ++ if (clear_aspm) {
5512 ++ dev_info(&device->dev, "Disabling ASPM (FADT indicates it is unsupported)\n");
5513 ++ pcie_clear_aspm(root->bus);
5514 ++ }
5515 ++ if (no_aspm)
5516 ++ pcie_no_aspm();
5517 ++
5518 + pci_acpi_add_bus_pm_notifier(device, root->bus);
5519 + if (device->wakeup.flags.run_wake)
5520 + device_set_run_wake(root->bus->bridge, true);
5521 +diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
5522 +index a439602e..c8dac745 100644
5523 +--- a/drivers/base/firmware_class.c
5524 ++++ b/drivers/base/firmware_class.c
5525 +@@ -868,8 +868,15 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
5526 + goto err_del_dev;
5527 + }
5528 +
5529 ++ mutex_lock(&fw_lock);
5530 ++ list_add(&buf->pending_list, &pending_fw_head);
5531 ++ mutex_unlock(&fw_lock);
5532 ++
5533 + retval = device_create_file(f_dev, &dev_attr_loading);
5534 + if (retval) {
5535 ++ mutex_lock(&fw_lock);
5536 ++ list_del_init(&buf->pending_list);
5537 ++ mutex_unlock(&fw_lock);
5538 + dev_err(f_dev, "%s: device_create_file failed\n", __func__);
5539 + goto err_del_bin_attr;
5540 + }
5541 +@@ -884,10 +891,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
5542 + kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
5543 + }
5544 +
5545 +- mutex_lock(&fw_lock);
5546 +- list_add(&buf->pending_list, &pending_fw_head);
5547 +- mutex_unlock(&fw_lock);
5548 +-
5549 + wait_for_completion(&buf->completion);
5550 +
5551 + cancel_delayed_work_sync(&fw_priv->timeout_work);
5552 +diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
5553 +index 53495753..6c2652a8 100644
5554 +--- a/drivers/base/regmap/regmap-debugfs.c
5555 ++++ b/drivers/base/regmap/regmap-debugfs.c
5556 +@@ -85,8 +85,8 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
5557 + unsigned int reg_offset;
5558 +
5559 + /* Suppress the cache if we're using a subrange */
5560 +- if (from)
5561 +- return from;
5562 ++ if (base)
5563 ++ return base;
5564 +
5565 + /*
5566 + * If we don't have a cache build one so we don't have to do a
5567 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
5568 +index 4ad2ad9a..45aa20aa 100644
5569 +--- a/drivers/block/rbd.c
5570 ++++ b/drivers/block/rbd.c
5571 +@@ -1557,11 +1557,12 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
5572 + obj_request, obj_request->img_request, obj_request->result,
5573 + xferred, length);
5574 + /*
5575 +- * ENOENT means a hole in the image. We zero-fill the
5576 +- * entire length of the request. A short read also implies
5577 +- * zero-fill to the end of the request. Either way we
5578 +- * update the xferred count to indicate the whole request
5579 +- * was satisfied.
5580 ++ * ENOENT means a hole in the image. We zero-fill the entire
5581 ++ * length of the request. A short read also implies zero-fill
5582 ++ * to the end of the request. An error requires the whole
5583 ++ * length of the request to be reported finished with an error
5584 ++ * to the block layer. In each case we update the xferred
5585 ++ * count to indicate the whole request was satisfied.
5586 + */
5587 + rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
5588 + if (obj_request->result == -ENOENT) {
5589 +@@ -1570,14 +1571,13 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
5590 + else
5591 + zero_pages(obj_request->pages, 0, length);
5592 + obj_request->result = 0;
5593 +- obj_request->xferred = length;
5594 + } else if (xferred < length && !obj_request->result) {
5595 + if (obj_request->type == OBJ_REQUEST_BIO)
5596 + zero_bio_chain(obj_request->bio_list, xferred);
5597 + else
5598 + zero_pages(obj_request->pages, xferred, length);
5599 +- obj_request->xferred = length;
5600 + }
5601 ++ obj_request->xferred = length;
5602 + obj_request_done_set(obj_request);
5603 + }
5604 +
5605 +diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
5606 +index 1b3f8c9b..1d5af3f7 100644
5607 +--- a/drivers/clk/clk-wm831x.c
5608 ++++ b/drivers/clk/clk-wm831x.c
5609 +@@ -360,6 +360,8 @@ static int wm831x_clk_probe(struct platform_device *pdev)
5610 + if (!clkdata)
5611 + return -ENOMEM;
5612 +
5613 ++ clkdata->wm831x = wm831x;
5614 ++
5615 + /* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
5616 + ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
5617 + if (ret < 0) {
5618 +diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
5619 +index 2a297f86..fe853903 100644
5620 +--- a/drivers/cpuidle/coupled.c
5621 ++++ b/drivers/cpuidle/coupled.c
5622 +@@ -106,6 +106,7 @@ struct cpuidle_coupled {
5623 + cpumask_t coupled_cpus;
5624 + int requested_state[NR_CPUS];
5625 + atomic_t ready_waiting_counts;
5626 ++ atomic_t abort_barrier;
5627 + int online_count;
5628 + int refcnt;
5629 + int prevent;
5630 +@@ -122,12 +123,19 @@ static DEFINE_MUTEX(cpuidle_coupled_lock);
5631 + static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
5632 +
5633 + /*
5634 +- * The cpuidle_coupled_poked_mask mask is used to avoid calling
5635 ++ * The cpuidle_coupled_poke_pending mask is used to avoid calling
5636 + * __smp_call_function_single with the per cpu call_single_data struct already
5637 + * in use. This prevents a deadlock where two cpus are waiting for each others
5638 + * call_single_data struct to be available
5639 + */
5640 +-static cpumask_t cpuidle_coupled_poked_mask;
5641 ++static cpumask_t cpuidle_coupled_poke_pending;
5642 ++
5643 ++/*
5644 ++ * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
5645 ++ * once to minimize entering the ready loop with a poke pending, which would
5646 ++ * require aborting and retrying.
5647 ++ */
5648 ++static cpumask_t cpuidle_coupled_poked;
5649 +
5650 + /**
5651 + * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
5652 +@@ -291,10 +299,11 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
5653 + return state;
5654 + }
5655 +
5656 +-static void cpuidle_coupled_poked(void *info)
5657 ++static void cpuidle_coupled_handle_poke(void *info)
5658 + {
5659 + int cpu = (unsigned long)info;
5660 +- cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
5661 ++ cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
5662 ++ cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
5663 + }
5664 +
5665 + /**
5666 +@@ -313,7 +322,7 @@ static void cpuidle_coupled_poke(int cpu)
5667 + {
5668 + struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
5669 +
5670 +- if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
5671 ++ if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
5672 + __smp_call_function_single(cpu, csd, 0);
5673 + }
5674 +
5675 +@@ -340,30 +349,19 @@ static void cpuidle_coupled_poke_others(int this_cpu,
5676 + * @coupled: the struct coupled that contains the current cpu
5677 + * @next_state: the index in drv->states of the requested state for this cpu
5678 + *
5679 +- * Updates the requested idle state for the specified cpuidle device,
5680 +- * poking all coupled cpus out of idle if necessary to let them see the new
5681 +- * state.
5682 ++ * Updates the requested idle state for the specified cpuidle device.
5683 ++ * Returns the number of waiting cpus.
5684 + */
5685 +-static void cpuidle_coupled_set_waiting(int cpu,
5686 ++static int cpuidle_coupled_set_waiting(int cpu,
5687 + struct cpuidle_coupled *coupled, int next_state)
5688 + {
5689 +- int w;
5690 +-
5691 + coupled->requested_state[cpu] = next_state;
5692 +
5693 + /*
5694 +- * If this is the last cpu to enter the waiting state, poke
5695 +- * all the other cpus out of their waiting state so they can
5696 +- * enter a deeper state. This can race with one of the cpus
5697 +- * exiting the waiting state due to an interrupt and
5698 +- * decrementing waiting_count, see comment below.
5699 +- *
5700 + * The atomic_inc_return provides a write barrier to order the write
5701 + * to requested_state with the later write that increments ready_count.
5702 + */
5703 +- w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
5704 +- if (w == coupled->online_count)
5705 +- cpuidle_coupled_poke_others(cpu, coupled);
5706 ++ return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
5707 + }
5708 +
5709 + /**
5710 +@@ -410,19 +408,33 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
5711 + * been processed and the poke bit has been cleared.
5712 + *
5713 + * Other interrupts may also be processed while interrupts are enabled, so
5714 +- * need_resched() must be tested after turning interrupts off again to make sure
5715 ++ * need_resched() must be tested after this function returns to make sure
5716 + * the interrupt didn't schedule work that should take the cpu out of idle.
5717 + *
5718 +- * Returns 0 if need_resched was false, -EINTR if need_resched was true.
5719 ++ * Returns 0 if no poke was pending, 1 if a poke was cleared.
5720 + */
5721 + static int cpuidle_coupled_clear_pokes(int cpu)
5722 + {
5723 ++ if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
5724 ++ return 0;
5725 ++
5726 + local_irq_enable();
5727 +- while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
5728 ++ while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
5729 + cpu_relax();
5730 + local_irq_disable();
5731 +
5732 +- return need_resched() ? -EINTR : 0;
5733 ++ return 1;
5734 ++}
5735 ++
5736 ++static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
5737 ++{
5738 ++ cpumask_t cpus;
5739 ++ int ret;
5740 ++
5741 ++ cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
5742 ++ ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
5743 ++
5744 ++ return ret;
5745 + }
5746 +
5747 + /**
5748 +@@ -449,12 +461,14 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
5749 + {
5750 + int entered_state = -1;
5751 + struct cpuidle_coupled *coupled = dev->coupled;
5752 ++ int w;
5753 +
5754 + if (!coupled)
5755 + return -EINVAL;
5756 +
5757 + while (coupled->prevent) {
5758 +- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
5759 ++ cpuidle_coupled_clear_pokes(dev->cpu);
5760 ++ if (need_resched()) {
5761 + local_irq_enable();
5762 + return entered_state;
5763 + }
5764 +@@ -465,15 +479,37 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
5765 + /* Read barrier ensures online_count is read after prevent is cleared */
5766 + smp_rmb();
5767 +
5768 +- cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
5769 ++reset:
5770 ++ cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
5771 ++
5772 ++ w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
5773 ++ /*
5774 ++ * If this is the last cpu to enter the waiting state, poke
5775 ++ * all the other cpus out of their waiting state so they can
5776 ++ * enter a deeper state. This can race with one of the cpus
5777 ++ * exiting the waiting state due to an interrupt and
5778 ++ * decrementing waiting_count, see comment below.
5779 ++ */
5780 ++ if (w == coupled->online_count) {
5781 ++ cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
5782 ++ cpuidle_coupled_poke_others(dev->cpu, coupled);
5783 ++ }
5784 +
5785 + retry:
5786 + /*
5787 + * Wait for all coupled cpus to be idle, using the deepest state
5788 +- * allowed for a single cpu.
5789 ++ * allowed for a single cpu. If this was not the poking cpu, wait
5790 ++ * for at least one poke before leaving to avoid a race where
5791 ++ * two cpus could arrive at the waiting loop at the same time,
5792 ++ * but the first of the two to arrive could skip the loop without
5793 ++ * processing the pokes from the last to arrive.
5794 + */
5795 +- while (!cpuidle_coupled_cpus_waiting(coupled)) {
5796 +- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
5797 ++ while (!cpuidle_coupled_cpus_waiting(coupled) ||
5798 ++ !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
5799 ++ if (cpuidle_coupled_clear_pokes(dev->cpu))
5800 ++ continue;
5801 ++
5802 ++ if (need_resched()) {
5803 + cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
5804 + goto out;
5805 + }
5806 +@@ -487,12 +523,19 @@ retry:
5807 + dev->safe_state_index);
5808 + }
5809 +
5810 +- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
5811 ++ cpuidle_coupled_clear_pokes(dev->cpu);
5812 ++ if (need_resched()) {
5813 + cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
5814 + goto out;
5815 + }
5816 +
5817 + /*
5818 ++ * Make sure final poke status for this cpu is visible before setting
5819 ++ * cpu as ready.
5820 ++ */
5821 ++ smp_wmb();
5822 ++
5823 ++ /*
5824 + * All coupled cpus are probably idle. There is a small chance that
5825 + * one of the other cpus just became active. Increment the ready count,
5826 + * and spin until all coupled cpus have incremented the counter. Once a
5827 +@@ -511,6 +554,28 @@ retry:
5828 + cpu_relax();
5829 + }
5830 +
5831 ++ /*
5832 ++ * Make sure read of all cpus ready is done before reading pending pokes
5833 ++ */
5834 ++ smp_rmb();
5835 ++
5836 ++ /*
5837 ++ * There is a small chance that a cpu left and reentered idle after this
5838 ++ * cpu saw that all cpus were waiting. The cpu that reentered idle will
5839 ++ * have sent this cpu a poke, which will still be pending after the
5840 ++ * ready loop. The pending interrupt may be lost by the interrupt
5841 ++ * controller when entering the deep idle state. It's not possible to
5842 ++ * clear a pending interrupt without turning interrupts on and handling
5843 ++ * it, and it's too late to turn on interrupts here, so reset the
5844 ++ * coupled idle state of all cpus and retry.
5845 ++ */
5846 ++ if (cpuidle_coupled_any_pokes_pending(coupled)) {
5847 ++ cpuidle_coupled_set_done(dev->cpu, coupled);
5848 ++ /* Wait for all cpus to see the pending pokes */
5849 ++ cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
5850 ++ goto reset;
5851 ++ }
5852 ++
5853 + /* all cpus have acked the coupled state */
5854 + next_state = cpuidle_coupled_get_state(dev, coupled);
5855 +
5856 +@@ -596,7 +661,7 @@ have_coupled:
5857 + coupled->refcnt++;
5858 +
5859 + csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
5860 +- csd->func = cpuidle_coupled_poked;
5861 ++ csd->func = cpuidle_coupled_handle_poke;
5862 + csd->info = (void *)(unsigned long)dev->cpu;
5863 +
5864 + return 0;
5865 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
5866 +index 8b6a0343..8b3d9014 100644
5867 +--- a/drivers/edac/amd64_edac.c
5868 ++++ b/drivers/edac/amd64_edac.c
5869 +@@ -2470,8 +2470,15 @@ static int amd64_init_one_instance(struct pci_dev *F2)
5870 + layers[0].size = pvt->csels[0].b_cnt;
5871 + layers[0].is_virt_csrow = true;
5872 + layers[1].type = EDAC_MC_LAYER_CHANNEL;
5873 +- layers[1].size = pvt->channel_count;
5874 ++
5875 ++ /*
5876 ++ * Always allocate two channels since we can have setups with DIMMs on
5877 ++ * only one channel. Also, this simplifies handling later for the price
5878 ++ * of a couple of KBs tops.
5879 ++ */
5880 ++ layers[1].size = 2;
5881 + layers[1].is_virt_csrow = false;
5882 ++
5883 + mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
5884 + if (!mci)
5885 + goto err_siblings;
5886 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
5887 +index 95d6f4b6..70fc1335 100644
5888 +--- a/drivers/gpu/drm/drm_edid.c
5889 ++++ b/drivers/gpu/drm/drm_edid.c
5890 +@@ -125,6 +125,9 @@ static struct edid_quirk {
5891 +
5892 + /* ViewSonic VA2026w */
5893 + { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
5894 ++
5895 ++ /* Medion MD 30217 PG */
5896 ++ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
5897 + };
5898 +
5899 + /*
5900 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
5901 +index be79f477..ca40d1b1 100644
5902 +--- a/drivers/gpu/drm/i915/intel_display.c
5903 ++++ b/drivers/gpu/drm/i915/intel_display.c
5904 +@@ -7809,6 +7809,19 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
5905 + pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
5906 + pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5907 +
5908 ++ /*
5909 ++ * Sanitize sync polarity flags based on requested ones. If neither
5910 ++ * positive or negative polarity is requested, treat this as meaning
5911 ++ * negative polarity.
5912 ++ */
5913 ++ if (!(pipe_config->adjusted_mode.flags &
5914 ++ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5915 ++ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5916 ++
5917 ++ if (!(pipe_config->adjusted_mode.flags &
5918 ++ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5919 ++ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5920 ++
5921 + /* Compute a starting value for pipe_config->pipe_bpp taking the source
5922 + * plane pixel format and any sink constraints into account. Returns the
5923 + * source plane bpp so that dithering can be selected on mismatches
5924 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
5925 +index 36668d1a..5956445d 100644
5926 +--- a/drivers/hid/hid-core.c
5927 ++++ b/drivers/hid/hid-core.c
5928 +@@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
5929 + struct hid_report_enum *report_enum = device->report_enum + type;
5930 + struct hid_report *report;
5931 +
5932 ++ if (id >= HID_MAX_IDS)
5933 ++ return NULL;
5934 + if (report_enum->report_id_hash[id])
5935 + return report_enum->report_id_hash[id];
5936 +
5937 +@@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
5938 +
5939 + case HID_GLOBAL_ITEM_TAG_REPORT_ID:
5940 + parser->global.report_id = item_udata(item);
5941 +- if (parser->global.report_id == 0) {
5942 +- hid_err(parser->device, "report_id 0 is invalid\n");
5943 ++ if (parser->global.report_id == 0 ||
5944 ++ parser->global.report_id >= HID_MAX_IDS) {
5945 ++ hid_err(parser->device, "report_id %u is invalid\n",
5946 ++ parser->global.report_id);
5947 + return -1;
5948 + }
5949 + return 0;
5950 +@@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device)
5951 + for (i = 0; i < HID_REPORT_TYPES; i++) {
5952 + struct hid_report_enum *report_enum = device->report_enum + i;
5953 +
5954 +- for (j = 0; j < 256; j++) {
5955 ++ for (j = 0; j < HID_MAX_IDS; j++) {
5956 + struct hid_report *report = report_enum->report_id_hash[j];
5957 + if (report)
5958 + hid_free_report(report);
5959 +@@ -1152,7 +1156,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
5960 +
5961 + int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
5962 + {
5963 +- unsigned size = field->report_size;
5964 ++ unsigned size;
5965 ++
5966 ++ if (!field)
5967 ++ return -1;
5968 ++
5969 ++ size = field->report_size;
5970 +
5971 + hid_dump_input(field->report->device, field->usage + offset, value);
5972 +
5973 +@@ -1597,6 +1606,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
5974 + { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
5975 + { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
5976 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
5977 ++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
5978 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
5979 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
5980 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
5981 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
5982 +index ffe4c7ae..22134d4b 100644
5983 +--- a/drivers/hid/hid-ids.h
5984 ++++ b/drivers/hid/hid-ids.h
5985 +@@ -135,9 +135,9 @@
5986 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
5987 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
5988 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
5989 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291
5990 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292
5991 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293
5992 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
5993 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
5994 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
5995 + #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
5996 + #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
5997 + #define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
5998 +@@ -482,6 +482,7 @@
5999 + #define USB_VENDOR_ID_KYE 0x0458
6000 + #define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
6001 + #define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
6002 ++#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018
6003 + #define USB_DEVICE_ID_KYE_GPEN_560 0x5003
6004 + #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
6005 + #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
6006 +@@ -658,6 +659,7 @@
6007 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012
6008 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013
6009 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014
6010 ++#define USB_DEVICE_ID_NTRIG_DUOSENSE 0x1500
6011 +
6012 + #define USB_VENDOR_ID_ONTRAK 0x0a07
6013 + #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064
6014 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
6015 +index 7480799e..3fc4034a 100644
6016 +--- a/drivers/hid/hid-input.c
6017 ++++ b/drivers/hid/hid-input.c
6018 +@@ -340,7 +340,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
6019 + {
6020 + struct hid_device *dev = container_of(psy, struct hid_device, battery);
6021 + int ret = 0;
6022 +- __u8 buf[2] = {};
6023 ++ __u8 *buf;
6024 +
6025 + switch (prop) {
6026 + case POWER_SUPPLY_PROP_PRESENT:
6027 +@@ -349,12 +349,19 @@ static int hidinput_get_battery_property(struct power_supply *psy,
6028 + break;
6029 +
6030 + case POWER_SUPPLY_PROP_CAPACITY:
6031 ++
6032 ++ buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL);
6033 ++ if (!buf) {
6034 ++ ret = -ENOMEM;
6035 ++ break;
6036 ++ }
6037 + ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
6038 +- buf, sizeof(buf),
6039 ++ buf, 2,
6040 + dev->battery_report_type);
6041 +
6042 + if (ret != 2) {
6043 + ret = -ENODATA;
6044 ++ kfree(buf);
6045 + break;
6046 + }
6047 + ret = 0;
6048 +@@ -364,6 +371,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
6049 + buf[1] <= dev->battery_max)
6050 + val->intval = (100 * (buf[1] - dev->battery_min)) /
6051 + (dev->battery_max - dev->battery_min);
6052 ++ kfree(buf);
6053 + break;
6054 +
6055 + case POWER_SUPPLY_PROP_MODEL_NAME:
6056 +diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
6057 +index 1e2ee2aa..73845120 100644
6058 +--- a/drivers/hid/hid-kye.c
6059 ++++ b/drivers/hid/hid-kye.c
6060 +@@ -268,6 +268,26 @@ static __u8 easypen_m610x_rdesc_fixed[] = {
6061 + 0xC0 /* End Collection */
6062 + };
6063 +
6064 ++static __u8 *kye_consumer_control_fixup(struct hid_device *hdev, __u8 *rdesc,
6065 ++ unsigned int *rsize, int offset, const char *device_name) {
6066 ++ /*
6067 ++ * the fixup that need to be done:
6068 ++ * - change Usage Maximum in the Comsumer Control
6069 ++ * (report ID 3) to a reasonable value
6070 ++ */
6071 ++ if (*rsize >= offset + 31 &&
6072 ++ /* Usage Page (Consumer Devices) */
6073 ++ rdesc[offset] == 0x05 && rdesc[offset + 1] == 0x0c &&
6074 ++ /* Usage (Consumer Control) */
6075 ++ rdesc[offset + 2] == 0x09 && rdesc[offset + 3] == 0x01 &&
6076 ++ /* Usage Maximum > 12287 */
6077 ++ rdesc[offset + 10] == 0x2a && rdesc[offset + 12] > 0x2f) {
6078 ++ hid_info(hdev, "fixing up %s report descriptor\n", device_name);
6079 ++ rdesc[offset + 12] = 0x2f;
6080 ++ }
6081 ++ return rdesc;
6082 ++}
6083 ++
6084 + static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
6085 + unsigned int *rsize)
6086 + {
6087 +@@ -315,23 +335,12 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
6088 + }
6089 + break;
6090 + case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
6091 +- /*
6092 +- * the fixup that need to be done:
6093 +- * - change Usage Maximum in the Comsumer Control
6094 +- * (report ID 3) to a reasonable value
6095 +- */
6096 +- if (*rsize >= 135 &&
6097 +- /* Usage Page (Consumer Devices) */
6098 +- rdesc[104] == 0x05 && rdesc[105] == 0x0c &&
6099 +- /* Usage (Consumer Control) */
6100 +- rdesc[106] == 0x09 && rdesc[107] == 0x01 &&
6101 +- /* Usage Maximum > 12287 */
6102 +- rdesc[114] == 0x2a && rdesc[116] > 0x2f) {
6103 +- hid_info(hdev,
6104 +- "fixing up Genius Gila Gaming Mouse "
6105 +- "report descriptor\n");
6106 +- rdesc[116] = 0x2f;
6107 +- }
6108 ++ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
6109 ++ "Genius Gila Gaming Mouse");
6110 ++ break;
6111 ++ case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
6112 ++ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
6113 ++ "Genius Gx Imperator Keyboard");
6114 + break;
6115 + }
6116 + return rdesc;
6117 +@@ -428,6 +437,8 @@ static const struct hid_device_id kye_devices[] = {
6118 + USB_DEVICE_ID_KYE_EASYPEN_M610X) },
6119 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
6120 + USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
6121 ++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
6122 ++ USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
6123 + { }
6124 + };
6125 + MODULE_DEVICE_TABLE(hid, kye_devices);
6126 +diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
6127 +index ef951025..5482156a 100644
6128 +--- a/drivers/hid/hid-ntrig.c
6129 ++++ b/drivers/hid/hid-ntrig.c
6130 +@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
6131 + struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
6132 + report_id_hash[0x0d];
6133 +
6134 +- if (!report)
6135 ++ if (!report || report->maxfield < 1 ||
6136 ++ report->field[0]->report_count < 1)
6137 + return -EINVAL;
6138 +
6139 + hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
6140 +diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
6141 +index e346038f..59d5eb1e 100644
6142 +--- a/drivers/hid/hid-picolcd_cir.c
6143 ++++ b/drivers/hid/hid-picolcd_cir.c
6144 +@@ -145,6 +145,7 @@ void picolcd_exit_cir(struct picolcd_data *data)
6145 + struct rc_dev *rdev = data->rc_dev;
6146 +
6147 + data->rc_dev = NULL;
6148 +- rc_unregister_device(rdev);
6149 ++ if (rdev)
6150 ++ rc_unregister_device(rdev);
6151 + }
6152 +
6153 +diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
6154 +index b48092d0..acbb0210 100644
6155 +--- a/drivers/hid/hid-picolcd_core.c
6156 ++++ b/drivers/hid/hid-picolcd_core.c
6157 +@@ -290,7 +290,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
6158 + buf += 10;
6159 + cnt -= 10;
6160 + }
6161 +- if (!report)
6162 ++ if (!report || report->maxfield != 1)
6163 + return -EINVAL;
6164 +
6165 + while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
6166 +diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
6167 +index 591f6b22..c930ab85 100644
6168 +--- a/drivers/hid/hid-picolcd_fb.c
6169 ++++ b/drivers/hid/hid-picolcd_fb.c
6170 +@@ -593,10 +593,14 @@ err_nomem:
6171 + void picolcd_exit_framebuffer(struct picolcd_data *data)
6172 + {
6173 + struct fb_info *info = data->fb_info;
6174 +- struct picolcd_fb_data *fbdata = info->par;
6175 ++ struct picolcd_fb_data *fbdata;
6176 + unsigned long flags;
6177 +
6178 ++ if (!info)
6179 ++ return;
6180 ++
6181 + device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
6182 ++ fbdata = info->par;
6183 +
6184 + /* disconnect framebuffer from HID dev */
6185 + spin_lock_irqsave(&fbdata->lock, flags);
6186 +diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
6187 +index d29112fa..2dcd7d98 100644
6188 +--- a/drivers/hid/hid-pl.c
6189 ++++ b/drivers/hid/hid-pl.c
6190 +@@ -132,8 +132,14 @@ static int plff_init(struct hid_device *hid)
6191 + strong = &report->field[0]->value[2];
6192 + weak = &report->field[0]->value[3];
6193 + debug("detected single-field device");
6194 +- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
6195 +- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
6196 ++ } else if (report->field[0]->maxusage == 1 &&
6197 ++ report->field[0]->usage[0].hid ==
6198 ++ (HID_UP_LED | 0x43) &&
6199 ++ report->maxfield >= 4 &&
6200 ++ report->field[0]->report_count >= 1 &&
6201 ++ report->field[1]->report_count >= 1 &&
6202 ++ report->field[2]->report_count >= 1 &&
6203 ++ report->field[3]->report_count >= 1) {
6204 + report->field[0]->value[0] = 0x00;
6205 + report->field[1]->value[0] = 0x00;
6206 + strong = &report->field[2]->value[0];
6207 +diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
6208 +index ca749810..aa34755c 100644
6209 +--- a/drivers/hid/hid-sensor-hub.c
6210 ++++ b/drivers/hid/hid-sensor-hub.c
6211 +@@ -221,7 +221,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
6212 +
6213 + mutex_lock(&data->mutex);
6214 + report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
6215 +- if (!report || (field_index >= report->maxfield)) {
6216 ++ if (!report || (field_index >= report->maxfield) ||
6217 ++ report->field[field_index]->report_count < 1) {
6218 + ret = -EINVAL;
6219 + goto done_proc;
6220 + }
6221 +diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
6222 +index a2f587d0..7112f3e8 100644
6223 +--- a/drivers/hid/hid-speedlink.c
6224 ++++ b/drivers/hid/hid-speedlink.c
6225 +@@ -3,7 +3,7 @@
6226 + * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
6227 + * the HID descriptor.
6228 + *
6229 +- * Copyright (c) 2011 Stefan Kriwanek <mail@××××××××××××××.de>
6230 ++ * Copyright (c) 2011, 2013 Stefan Kriwanek <dev@××××××××××××××.de>
6231 + */
6232 +
6233 + /*
6234 +@@ -46,8 +46,13 @@ static int speedlink_event(struct hid_device *hdev, struct hid_field *field,
6235 + struct hid_usage *usage, __s32 value)
6236 + {
6237 + /* No other conditions due to usage_table. */
6238 +- /* Fix "jumpy" cursor (invalid events sent by device). */
6239 +- if (value == 256)
6240 ++
6241 ++ /* This fixes the "jumpy" cursor occuring due to invalid events sent
6242 ++ * by the device. Some devices only send them with value==+256, others
6243 ++ * don't. However, catching abs(value)>=256 is restrictive enough not
6244 ++ * to interfere with devices that were bug-free (has been tested).
6245 ++ */
6246 ++ if (abs(value) >= 256)
6247 + return 1;
6248 + /* Drop useless distance 0 events (on button clicks etc.) as well */
6249 + if (value == 0)
6250 +diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
6251 +index 0c06054c..66020982 100644
6252 +--- a/drivers/hid/hid-wiimote-core.c
6253 ++++ b/drivers/hid/hid-wiimote-core.c
6254 +@@ -212,10 +212,12 @@ static __u8 select_drm(struct wiimote_data *wdata)
6255 +
6256 + if (ir == WIIPROTO_FLAG_IR_BASIC) {
6257 + if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) {
6258 +- if (ext)
6259 +- return WIIPROTO_REQ_DRM_KAIE;
6260 +- else
6261 +- return WIIPROTO_REQ_DRM_KAI;
6262 ++ /* GEN10 and ealier devices bind IR formats to DRMs.
6263 ++ * Hence, we cannot use DRM_KAI here as it might be
6264 ++ * bound to IR_EXT. Use DRM_KAIE unconditionally so we
6265 ++ * work with all devices and our parsers can use the
6266 ++ * fixed formats, too. */
6267 ++ return WIIPROTO_REQ_DRM_KAIE;
6268 + } else {
6269 + return WIIPROTO_REQ_DRM_KIE;
6270 + }
6271 +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
6272 +index 6f1feb2c..dbfe3007 100644
6273 +--- a/drivers/hid/hidraw.c
6274 ++++ b/drivers/hid/hidraw.c
6275 +@@ -113,7 +113,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
6276 + __u8 *buf;
6277 + int ret = 0;
6278 +
6279 +- if (!hidraw_table[minor]) {
6280 ++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
6281 + ret = -ENODEV;
6282 + goto out;
6283 + }
6284 +@@ -261,7 +261,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
6285 + }
6286 +
6287 + mutex_lock(&minors_lock);
6288 +- if (!hidraw_table[minor]) {
6289 ++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
6290 + err = -ENODEV;
6291 + goto out_unlock;
6292 + }
6293 +@@ -302,39 +302,38 @@ static int hidraw_fasync(int fd, struct file *file, int on)
6294 + return fasync_helper(fd, file, on, &list->fasync);
6295 + }
6296 +
6297 ++static void drop_ref(struct hidraw *hidraw, int exists_bit)
6298 ++{
6299 ++ if (exists_bit) {
6300 ++ hid_hw_close(hidraw->hid);
6301 ++ hidraw->exist = 0;
6302 ++ if (hidraw->open)
6303 ++ wake_up_interruptible(&hidraw->wait);
6304 ++ } else {
6305 ++ --hidraw->open;
6306 ++ }
6307 ++
6308 ++ if (!hidraw->open && !hidraw->exist) {
6309 ++ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
6310 ++ hidraw_table[hidraw->minor] = NULL;
6311 ++ kfree(hidraw);
6312 ++ }
6313 ++}
6314 ++
6315 + static int hidraw_release(struct inode * inode, struct file * file)
6316 + {
6317 + unsigned int minor = iminor(inode);
6318 +- struct hidraw *dev;
6319 + struct hidraw_list *list = file->private_data;
6320 +- int ret;
6321 +- int i;
6322 +
6323 + mutex_lock(&minors_lock);
6324 +- if (!hidraw_table[minor]) {
6325 +- ret = -ENODEV;
6326 +- goto unlock;
6327 +- }
6328 +
6329 + list_del(&list->node);
6330 +- dev = hidraw_table[minor];
6331 +- if (!--dev->open) {
6332 +- if (list->hidraw->exist) {
6333 +- hid_hw_power(dev->hid, PM_HINT_NORMAL);
6334 +- hid_hw_close(dev->hid);
6335 +- } else {
6336 +- kfree(list->hidraw);
6337 +- }
6338 +- }
6339 +-
6340 +- for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
6341 +- kfree(list->buffer[i].value);
6342 + kfree(list);
6343 +- ret = 0;
6344 +-unlock:
6345 +- mutex_unlock(&minors_lock);
6346 +
6347 +- return ret;
6348 ++ drop_ref(hidraw_table[minor], 0);
6349 ++
6350 ++ mutex_unlock(&minors_lock);
6351 ++ return 0;
6352 + }
6353 +
6354 + static long hidraw_ioctl(struct file *file, unsigned int cmd,
6355 +@@ -539,18 +538,9 @@ void hidraw_disconnect(struct hid_device *hid)
6356 + struct hidraw *hidraw = hid->hidraw;
6357 +
6358 + mutex_lock(&minors_lock);
6359 +- hidraw->exist = 0;
6360 +-
6361 +- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
6362 +
6363 +- hidraw_table[hidraw->minor] = NULL;
6364 ++ drop_ref(hidraw, 1);
6365 +
6366 +- if (hidraw->open) {
6367 +- hid_hw_close(hid);
6368 +- wake_up_interruptible(&hidraw->wait);
6369 +- } else {
6370 +- kfree(hidraw);
6371 +- }
6372 + mutex_unlock(&minors_lock);
6373 + }
6374 + EXPORT_SYMBOL_GPL(hidraw_disconnect);
6375 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
6376 +index 19b8360f..07345521 100644
6377 +--- a/drivers/hid/usbhid/hid-quirks.c
6378 ++++ b/drivers/hid/usbhid/hid-quirks.c
6379 +@@ -109,6 +109,8 @@ static const struct hid_blacklist {
6380 + { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
6381 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
6382 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
6383 ++ { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
6384 ++
6385 + { 0, 0 }
6386 + };
6387 +
6388 +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
6389 +index 4ef4d5e1..a73f9618 100644
6390 +--- a/drivers/input/mouse/bcm5974.c
6391 ++++ b/drivers/input/mouse/bcm5974.c
6392 +@@ -89,9 +89,9 @@
6393 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
6394 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
6395 + /* MacbookAir6,2 (unibody, June 2013) */
6396 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291
6397 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292
6398 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293
6399 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
6400 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
6401 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
6402 +
6403 + #define BCM5974_DEVICE(prod) { \
6404 + .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
6405 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
6406 +index eec0d3e0..15e9b57e 100644
6407 +--- a/drivers/iommu/intel-iommu.c
6408 ++++ b/drivers/iommu/intel-iommu.c
6409 +@@ -890,56 +890,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
6410 + return order;
6411 + }
6412 +
6413 ++static void dma_pte_free_level(struct dmar_domain *domain, int level,
6414 ++ struct dma_pte *pte, unsigned long pfn,
6415 ++ unsigned long start_pfn, unsigned long last_pfn)
6416 ++{
6417 ++ pfn = max(start_pfn, pfn);
6418 ++ pte = &pte[pfn_level_offset(pfn, level)];
6419 ++
6420 ++ do {
6421 ++ unsigned long level_pfn;
6422 ++ struct dma_pte *level_pte;
6423 ++
6424 ++ if (!dma_pte_present(pte) || dma_pte_superpage(pte))
6425 ++ goto next;
6426 ++
6427 ++ level_pfn = pfn & level_mask(level - 1);
6428 ++ level_pte = phys_to_virt(dma_pte_addr(pte));
6429 ++
6430 ++ if (level > 2)
6431 ++ dma_pte_free_level(domain, level - 1, level_pte,
6432 ++ level_pfn, start_pfn, last_pfn);
6433 ++
6434 ++ /* If range covers entire pagetable, free it */
6435 ++ if (!(start_pfn > level_pfn ||
6436 ++ last_pfn < level_pfn + level_size(level))) {
6437 ++ dma_clear_pte(pte);
6438 ++ domain_flush_cache(domain, pte, sizeof(*pte));
6439 ++ free_pgtable_page(level_pte);
6440 ++ }
6441 ++next:
6442 ++ pfn += level_size(level);
6443 ++ } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
6444 ++}
6445 ++
6446 + /* free page table pages. last level pte should already be cleared */
6447 + static void dma_pte_free_pagetable(struct dmar_domain *domain,
6448 + unsigned long start_pfn,
6449 + unsigned long last_pfn)
6450 + {
6451 + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
6452 +- struct dma_pte *first_pte, *pte;
6453 +- int total = agaw_to_level(domain->agaw);
6454 +- int level;
6455 +- unsigned long tmp;
6456 +- int large_page = 2;
6457 +
6458 + BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
6459 + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
6460 + BUG_ON(start_pfn > last_pfn);
6461 +
6462 + /* We don't need lock here; nobody else touches the iova range */
6463 +- level = 2;
6464 +- while (level <= total) {
6465 +- tmp = align_to_level(start_pfn, level);
6466 +-
6467 +- /* If we can't even clear one PTE at this level, we're done */
6468 +- if (tmp + level_size(level) - 1 > last_pfn)
6469 +- return;
6470 +-
6471 +- do {
6472 +- large_page = level;
6473 +- first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
6474 +- if (large_page > level)
6475 +- level = large_page + 1;
6476 +- if (!pte) {
6477 +- tmp = align_to_level(tmp + 1, level + 1);
6478 +- continue;
6479 +- }
6480 +- do {
6481 +- if (dma_pte_present(pte)) {
6482 +- free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
6483 +- dma_clear_pte(pte);
6484 +- }
6485 +- pte++;
6486 +- tmp += level_size(level);
6487 +- } while (!first_pte_in_page(pte) &&
6488 +- tmp + level_size(level) - 1 <= last_pfn);
6489 ++ dma_pte_free_level(domain, agaw_to_level(domain->agaw),
6490 ++ domain->pgd, 0, start_pfn, last_pfn);
6491 +
6492 +- domain_flush_cache(domain, first_pte,
6493 +- (void *)pte - (void *)first_pte);
6494 +-
6495 +- } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
6496 +- level++;
6497 +- }
6498 + /* free pgd */
6499 + if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
6500 + free_pgtable_page(domain->pgd);
6501 +diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
6502 +index 120815a4..5a19abde 100644
6503 +--- a/drivers/leds/leds-wm831x-status.c
6504 ++++ b/drivers/leds/leds-wm831x-status.c
6505 +@@ -230,9 +230,9 @@ static int wm831x_status_probe(struct platform_device *pdev)
6506 + int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
6507 + int ret;
6508 +
6509 +- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
6510 ++ res = platform_get_resource(pdev, IORESOURCE_REG, 0);
6511 + if (res == NULL) {
6512 +- dev_err(&pdev->dev, "No I/O resource\n");
6513 ++ dev_err(&pdev->dev, "No register resource\n");
6514 + ret = -EINVAL;
6515 + goto err;
6516 + }
6517 +diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
6518 +index 08626225..63676a8b 100644
6519 +--- a/drivers/media/common/siano/smsdvb-main.c
6520 ++++ b/drivers/media/common/siano/smsdvb-main.c
6521 +@@ -276,7 +276,8 @@ static void smsdvb_update_per_slices(struct smsdvb_client_t *client,
6522 +
6523 + /* Legacy PER/BER */
6524 + tmp = p->ets_packets * 65535;
6525 +- do_div(tmp, p->ts_packets + p->ets_packets);
6526 ++ if (p->ts_packets + p->ets_packets)
6527 ++ do_div(tmp, p->ts_packets + p->ets_packets);
6528 + client->legacy_per = tmp;
6529 + }
6530 +
6531 +diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
6532 +index 856374bd..2c7217fb 100644
6533 +--- a/drivers/media/dvb-frontends/mb86a20s.c
6534 ++++ b/drivers/media/dvb-frontends/mb86a20s.c
6535 +@@ -157,7 +157,6 @@ static struct regdata mb86a20s_init2[] = {
6536 + { 0x45, 0x04 }, /* CN symbol 4 */
6537 + { 0x48, 0x04 }, /* CN manual mode */
6538 +
6539 +- { 0x50, 0xd5 }, { 0x51, 0x01 }, /* Serial */
6540 + { 0x50, 0xd6 }, { 0x51, 0x1f },
6541 + { 0x50, 0xd2 }, { 0x51, 0x03 },
6542 + { 0x50, 0xd7 }, { 0x51, 0xbf },
6543 +@@ -1860,16 +1859,15 @@ static int mb86a20s_initfe(struct dvb_frontend *fe)
6544 + dev_dbg(&state->i2c->dev, "%s: IF=%d, IF reg=0x%06llx\n",
6545 + __func__, state->if_freq, (long long)pll);
6546 +
6547 +- if (!state->config->is_serial) {
6548 ++ if (!state->config->is_serial)
6549 + regD5 &= ~1;
6550 +
6551 +- rc = mb86a20s_writereg(state, 0x50, 0xd5);
6552 +- if (rc < 0)
6553 +- goto err;
6554 +- rc = mb86a20s_writereg(state, 0x51, regD5);
6555 +- if (rc < 0)
6556 +- goto err;
6557 +- }
6558 ++ rc = mb86a20s_writereg(state, 0x50, 0xd5);
6559 ++ if (rc < 0)
6560 ++ goto err;
6561 ++ rc = mb86a20s_writereg(state, 0x51, regD5);
6562 ++ if (rc < 0)
6563 ++ goto err;
6564 +
6565 + rc = mb86a20s_writeregdata(state, mb86a20s_init2);
6566 + if (rc < 0)
6567 +diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
6568 +index afe0eaea..28893a6b 100644
6569 +--- a/drivers/media/pci/cx88/cx88.h
6570 ++++ b/drivers/media/pci/cx88/cx88.h
6571 +@@ -259,7 +259,7 @@ struct cx88_input {
6572 + };
6573 +
6574 + enum cx88_audio_chip {
6575 +- CX88_AUDIO_WM8775,
6576 ++ CX88_AUDIO_WM8775 = 1,
6577 + CX88_AUDIO_TVAUDIO,
6578 + };
6579 +
6580 +diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
6581 +index 559fab2a..1ec60264 100644
6582 +--- a/drivers/media/platform/exynos-gsc/gsc-core.c
6583 ++++ b/drivers/media/platform/exynos-gsc/gsc-core.c
6584 +@@ -1122,10 +1122,14 @@ static int gsc_probe(struct platform_device *pdev)
6585 + goto err_clk;
6586 + }
6587 +
6588 +- ret = gsc_register_m2m_device(gsc);
6589 ++ ret = v4l2_device_register(dev, &gsc->v4l2_dev);
6590 + if (ret)
6591 + goto err_clk;
6592 +
6593 ++ ret = gsc_register_m2m_device(gsc);
6594 ++ if (ret)
6595 ++ goto err_v4l2;
6596 ++
6597 + platform_set_drvdata(pdev, gsc);
6598 + pm_runtime_enable(dev);
6599 + ret = pm_runtime_get_sync(&pdev->dev);
6600 +@@ -1147,6 +1151,8 @@ err_pm:
6601 + pm_runtime_put(dev);
6602 + err_m2m:
6603 + gsc_unregister_m2m_device(gsc);
6604 ++err_v4l2:
6605 ++ v4l2_device_unregister(&gsc->v4l2_dev);
6606 + err_clk:
6607 + gsc_clk_put(gsc);
6608 + return ret;
6609 +@@ -1157,6 +1163,7 @@ static int gsc_remove(struct platform_device *pdev)
6610 + struct gsc_dev *gsc = platform_get_drvdata(pdev);
6611 +
6612 + gsc_unregister_m2m_device(gsc);
6613 ++ v4l2_device_unregister(&gsc->v4l2_dev);
6614 +
6615 + vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx);
6616 + pm_runtime_disable(&pdev->dev);
6617 +diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
6618 +index cc19bba0..76435d3b 100644
6619 +--- a/drivers/media/platform/exynos-gsc/gsc-core.h
6620 ++++ b/drivers/media/platform/exynos-gsc/gsc-core.h
6621 +@@ -343,6 +343,7 @@ struct gsc_dev {
6622 + unsigned long state;
6623 + struct vb2_alloc_ctx *alloc_ctx;
6624 + struct video_device vdev;
6625 ++ struct v4l2_device v4l2_dev;
6626 + };
6627 +
6628 + /**
6629 +diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
6630 +index 40a73f7d..e576ff2d 100644
6631 +--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
6632 ++++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
6633 +@@ -751,6 +751,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
6634 + gsc->vdev.release = video_device_release_empty;
6635 + gsc->vdev.lock = &gsc->lock;
6636 + gsc->vdev.vfl_dir = VFL_DIR_M2M;
6637 ++ gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
6638 + snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
6639 + GSC_MODULE_NAME, gsc->id);
6640 +
6641 +diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
6642 +index 08fbfede..e85dc4f2 100644
6643 +--- a/drivers/media/platform/exynos4-is/fimc-lite.c
6644 ++++ b/drivers/media/platform/exynos4-is/fimc-lite.c
6645 +@@ -90,7 +90,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
6646 + .name = "RAW10 (GRBG)",
6647 + .fourcc = V4L2_PIX_FMT_SGRBG10,
6648 + .colorspace = V4L2_COLORSPACE_SRGB,
6649 +- .depth = { 10 },
6650 ++ .depth = { 16 },
6651 + .color = FIMC_FMT_RAW10,
6652 + .memplanes = 1,
6653 + .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
6654 +@@ -99,7 +99,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
6655 + .name = "RAW12 (GRBG)",
6656 + .fourcc = V4L2_PIX_FMT_SGRBG12,
6657 + .colorspace = V4L2_COLORSPACE_SRGB,
6658 +- .depth = { 12 },
6659 ++ .depth = { 16 },
6660 + .color = FIMC_FMT_RAW12,
6661 + .memplanes = 1,
6662 + .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
6663 +diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
6664 +index 19f556c5..91f21e27 100644
6665 +--- a/drivers/media/platform/exynos4-is/media-dev.c
6666 ++++ b/drivers/media/platform/exynos4-is/media-dev.c
6667 +@@ -1530,9 +1530,9 @@ static int fimc_md_probe(struct platform_device *pdev)
6668 + err_unlock:
6669 + mutex_unlock(&fmd->media_dev.graph_mutex);
6670 + err_clk:
6671 +- media_device_unregister(&fmd->media_dev);
6672 + fimc_md_put_clocks(fmd);
6673 + fimc_md_unregister_entities(fmd);
6674 ++ media_device_unregister(&fmd->media_dev);
6675 + err_md:
6676 + v4l2_device_unregister(&fmd->v4l2_dev);
6677 + return ret;
6678 +diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
6679 +index 47bdb8fa..65edb4a6 100644
6680 +--- a/drivers/mmc/host/tmio_mmc_dma.c
6681 ++++ b/drivers/mmc/host/tmio_mmc_dma.c
6682 +@@ -104,6 +104,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
6683 + pio:
6684 + if (!desc) {
6685 + /* DMA failed, fall back to PIO */
6686 ++ tmio_mmc_enable_dma(host, false);
6687 + if (ret >= 0)
6688 + ret = -EIO;
6689 + host->chan_rx = NULL;
6690 +@@ -116,7 +117,6 @@ pio:
6691 + }
6692 + dev_warn(&host->pdev->dev,
6693 + "DMA failed: %d, falling back to PIO\n", ret);
6694 +- tmio_mmc_enable_dma(host, false);
6695 + }
6696 +
6697 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
6698 +@@ -185,6 +185,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
6699 + pio:
6700 + if (!desc) {
6701 + /* DMA failed, fall back to PIO */
6702 ++ tmio_mmc_enable_dma(host, false);
6703 + if (ret >= 0)
6704 + ret = -EIO;
6705 + host->chan_tx = NULL;
6706 +@@ -197,7 +198,6 @@ pio:
6707 + }
6708 + dev_warn(&host->pdev->dev,
6709 + "DMA failed: %d, falling back to PIO\n", ret);
6710 +- tmio_mmc_enable_dma(host, false);
6711 + }
6712 +
6713 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
6714 +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
6715 +index dfcd0a56..fb8c4dea 100644
6716 +--- a/drivers/mtd/nand/nand_base.c
6717 ++++ b/drivers/mtd/nand/nand_base.c
6718 +@@ -2793,7 +2793,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
6719 +
6720 + if (!chip->select_chip)
6721 + chip->select_chip = nand_select_chip;
6722 +- if (!chip->read_byte)
6723 ++
6724 ++ /* If called twice, pointers that depend on busw may need to be reset */
6725 ++ if (!chip->read_byte || chip->read_byte == nand_read_byte)
6726 + chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
6727 + if (!chip->read_word)
6728 + chip->read_word = nand_read_word;
6729 +@@ -2801,9 +2803,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
6730 + chip->block_bad = nand_block_bad;
6731 + if (!chip->block_markbad)
6732 + chip->block_markbad = nand_default_block_markbad;
6733 +- if (!chip->write_buf)
6734 ++ if (!chip->write_buf || chip->write_buf == nand_write_buf)
6735 + chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
6736 +- if (!chip->read_buf)
6737 ++ if (!chip->read_buf || chip->read_buf == nand_read_buf)
6738 + chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
6739 + if (!chip->scan_bbt)
6740 + chip->scan_bbt = nand_default_bbt;
6741 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
6742 +index 5df49d3c..c95bfb18 100644
6743 +--- a/drivers/mtd/ubi/wl.c
6744 ++++ b/drivers/mtd/ubi/wl.c
6745 +@@ -1069,6 +1069,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
6746 + if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
6747 + dbg_wl("no WL needed: min used EC %d, max free EC %d",
6748 + e1->ec, e2->ec);
6749 ++
6750 ++ /* Give the unused PEB back */
6751 ++ wl_tree_add(e2, &ubi->free);
6752 + goto out_cancel;
6753 + }
6754 + self_check_in_wl_tree(ubi, e1, &ubi->used);
6755 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
6756 +index b017818b..90ab2928 100644
6757 +--- a/drivers/net/ethernet/marvell/mvneta.c
6758 ++++ b/drivers/net/ethernet/marvell/mvneta.c
6759 +@@ -138,7 +138,9 @@
6760 + #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
6761 + #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
6762 + #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
6763 ++#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
6764 + #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
6765 ++#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
6766 + #define MVNETA_MIB_COUNTERS_BASE 0x3080
6767 + #define MVNETA_MIB_LATE_COLLISION 0x7c
6768 + #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
6769 +@@ -915,6 +917,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
6770 + /* Assign port SDMA configuration */
6771 + mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
6772 +
6773 ++ /* Disable PHY polling in hardware, since we're using the
6774 ++ * kernel phylib to do this.
6775 ++ */
6776 ++ val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
6777 ++ val &= ~MVNETA_PHY_POLLING_ENABLE;
6778 ++ mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
6779 ++
6780 + mvneta_set_ucast_table(pp, -1);
6781 + mvneta_set_special_mcast_table(pp, -1);
6782 + mvneta_set_other_mcast_table(pp, -1);
6783 +@@ -2307,7 +2316,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
6784 + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
6785 + val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
6786 + MVNETA_GMAC_CONFIG_GMII_SPEED |
6787 +- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
6788 ++ MVNETA_GMAC_CONFIG_FULL_DUPLEX |
6789 ++ MVNETA_GMAC_AN_SPEED_EN |
6790 ++ MVNETA_GMAC_AN_DUPLEX_EN);
6791 +
6792 + if (phydev->duplex)
6793 + val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
6794 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
6795 +index 1f694ab3..77d3a705 100644
6796 +--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
6797 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
6798 +@@ -1173,6 +1173,10 @@ skip_ws_det:
6799 + * is_on == 0 means MRC CCK is OFF (more noise imm)
6800 + */
6801 + bool is_on = param ? 1 : 0;
6802 ++
6803 ++ if (ah->caps.rx_chainmask == 1)
6804 ++ break;
6805 ++
6806 + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
6807 + AR_PHY_MRC_CCK_ENABLE, is_on);
6808 + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
6809 +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
6810 +index c1224b5a..020b9b37 100644
6811 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h
6812 ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
6813 +@@ -79,10 +79,6 @@ struct ath_config {
6814 + sizeof(struct ath_buf_state)); \
6815 + } while (0)
6816 +
6817 +-#define ATH_RXBUF_RESET(_bf) do { \
6818 +- (_bf)->bf_stale = false; \
6819 +- } while (0)
6820 +-
6821 + /**
6822 + * enum buffer_type - Buffer type flags
6823 + *
6824 +@@ -317,6 +313,7 @@ struct ath_rx {
6825 + struct ath_descdma rxdma;
6826 + struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
6827 +
6828 ++ struct ath_buf *buf_hold;
6829 + struct sk_buff *frag;
6830 +
6831 + u32 ampdu_ref;
6832 +diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
6833 +index 865e043e..b4902b34 100644
6834 +--- a/drivers/net/wireless/ath/ath9k/recv.c
6835 ++++ b/drivers/net/wireless/ath/ath9k/recv.c
6836 +@@ -42,8 +42,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
6837 + struct ath_desc *ds;
6838 + struct sk_buff *skb;
6839 +
6840 +- ATH_RXBUF_RESET(bf);
6841 +-
6842 + ds = bf->bf_desc;
6843 + ds->ds_link = 0; /* link to null */
6844 + ds->ds_data = bf->bf_buf_addr;
6845 +@@ -70,6 +68,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
6846 + sc->rx.rxlink = &ds->ds_link;
6847 + }
6848 +
6849 ++static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
6850 ++{
6851 ++ if (sc->rx.buf_hold)
6852 ++ ath_rx_buf_link(sc, sc->rx.buf_hold);
6853 ++
6854 ++ sc->rx.buf_hold = bf;
6855 ++}
6856 ++
6857 + static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
6858 + {
6859 + /* XXX block beacon interrupts */
6860 +@@ -117,7 +123,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
6861 +
6862 + skb = bf->bf_mpdu;
6863 +
6864 +- ATH_RXBUF_RESET(bf);
6865 + memset(skb->data, 0, ah->caps.rx_status_len);
6866 + dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
6867 + ah->caps.rx_status_len, DMA_TO_DEVICE);
6868 +@@ -432,6 +437,7 @@ int ath_startrecv(struct ath_softc *sc)
6869 + if (list_empty(&sc->rx.rxbuf))
6870 + goto start_recv;
6871 +
6872 ++ sc->rx.buf_hold = NULL;
6873 + sc->rx.rxlink = NULL;
6874 + list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
6875 + ath_rx_buf_link(sc, bf);
6876 +@@ -677,6 +683,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
6877 + }
6878 +
6879 + bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
6880 ++ if (bf == sc->rx.buf_hold)
6881 ++ return NULL;
6882 ++
6883 + ds = bf->bf_desc;
6884 +
6885 + /*
6886 +@@ -1375,7 +1384,7 @@ requeue:
6887 + if (edma) {
6888 + ath_rx_edma_buf_link(sc, qtype);
6889 + } else {
6890 +- ath_rx_buf_link(sc, bf);
6891 ++ ath_rx_buf_relink(sc, bf);
6892 + ath9k_hw_rxena(ah);
6893 + }
6894 + } while (1);
6895 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
6896 +index 92799273..ab646838 100644
6897 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
6898 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
6899 +@@ -2602,6 +2602,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
6900 + for (acno = 0, ac = &an->ac[acno];
6901 + acno < IEEE80211_NUM_ACS; acno++, ac++) {
6902 + ac->sched = false;
6903 ++ ac->clear_ps_filter = true;
6904 + ac->txq = sc->tx.txq_map[acno];
6905 + INIT_LIST_HEAD(&ac->tid_q);
6906 + }
6907 +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
6908 +index 1860c572..4fb9635d 100644
6909 +--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
6910 ++++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
6911 +@@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di)
6912 +
6913 + /*
6914 + * post receive buffers
6915 +- * return false is refill failed completely and ring is empty this will stall
6916 +- * the rx dma and user might want to call rxfill again asap. This unlikely
6917 +- * happens on memory-rich NIC, but often on memory-constrained dongle
6918 ++ * Return false if refill failed completely or dma mapping failed. The ring
6919 ++ * is empty, which will stall the rx dma and user might want to call rxfill
6920 ++ * again asap. This is unlikely to happen on a memory-rich NIC, but often on
6921 ++ * memory-constrained dongle.
6922 + */
6923 + bool dma_rxfill(struct dma_pub *pub)
6924 + {
6925 +@@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub)
6926 +
6927 + pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
6928 + DMA_FROM_DEVICE);
6929 ++ if (dma_mapping_error(di->dmadev, pa))
6930 ++ return false;
6931 +
6932 + /* save the free packet pointer */
6933 + di->rxp[rxout] = p;
6934 +@@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p)
6935 +
6936 + /* get physical address of buffer start */
6937 + pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
6938 +-
6939 ++ /* if mapping failed, free skb */
6940 ++ if (dma_mapping_error(di->dmadev, pa)) {
6941 ++ brcmu_pkt_buf_free_skb(p);
6942 ++ return;
6943 ++ }
6944 + /* With a DMA segment list, Descriptor table is filled
6945 + * using the segment list instead of looping over
6946 + * buffers in multi-chain DMA. Therefore, EOF for SGLIST
6947 +diff --git a/drivers/of/base.c b/drivers/of/base.c
6948 +index 5c542791..bf8432f5 100644
6949 +--- a/drivers/of/base.c
6950 ++++ b/drivers/of/base.c
6951 +@@ -1629,6 +1629,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
6952 + ap = dt_alloc(sizeof(*ap) + len + 1, 4);
6953 + if (!ap)
6954 + continue;
6955 ++ memset(ap, 0, sizeof(*ap) + len + 1);
6956 + ap->alias = start;
6957 + of_alias_add(ap, np, id, start, len);
6958 + }
6959 +diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
6960 +index b90a3a0a..19afb9a7 100644
6961 +--- a/drivers/pinctrl/pinctrl-at91.c
6962 ++++ b/drivers/pinctrl/pinctrl-at91.c
6963 +@@ -325,7 +325,7 @@ static void at91_mux_disable_interrupt(void __iomem *pio, unsigned mask)
6964 +
6965 + static unsigned at91_mux_get_pullup(void __iomem *pio, unsigned pin)
6966 + {
6967 +- return (readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1;
6968 ++ return !((readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1);
6969 + }
6970 +
6971 + static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on)
6972 +@@ -445,7 +445,7 @@ static void at91_mux_pio3_set_debounce(void __iomem *pio, unsigned mask,
6973 +
6974 + static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin)
6975 + {
6976 +- return (__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1;
6977 ++ return !((__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1);
6978 + }
6979 +
6980 + static void at91_mux_pio3_set_pulldown(void __iomem *pio, unsigned mask, bool is_on)
6981 +diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
6982 +index 4c1d2e7a..efb0c4c2 100644
6983 +--- a/drivers/scsi/mpt3sas/Makefile
6984 ++++ b/drivers/scsi/mpt3sas/Makefile
6985 +@@ -1,5 +1,5 @@
6986 + # mpt3sas makefile
6987 +-obj-m += mpt3sas.o
6988 ++obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
6989 + mpt3sas-y += mpt3sas_base.o \
6990 + mpt3sas_config.o \
6991 + mpt3sas_scsih.o \
6992 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
6993 +index 86fcf2c3..2783dd70 100644
6994 +--- a/drivers/scsi/sd.c
6995 ++++ b/drivers/scsi/sd.c
6996 +@@ -2419,14 +2419,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
6997 + }
6998 + }
6999 +
7000 +- if (modepage == 0x3F) {
7001 +- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
7002 +- "present\n");
7003 +- goto defaults;
7004 +- } else if ((buffer[offset] & 0x3f) != modepage) {
7005 +- sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
7006 +- goto defaults;
7007 +- }
7008 ++ sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
7009 ++ goto defaults;
7010 ++
7011 + Page_found:
7012 + if (modepage == 8) {
7013 + sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
7014 +diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
7015 +index c1950e3b..674b236f 100644
7016 +--- a/drivers/staging/comedi/drivers/dt282x.c
7017 ++++ b/drivers/staging/comedi/drivers/dt282x.c
7018 +@@ -264,8 +264,9 @@ struct dt282x_private {
7019 + } \
7020 + udelay(5); \
7021 + } \
7022 +- if (_i) \
7023 ++ if (_i) { \
7024 + b \
7025 ++ } \
7026 + } while (0)
7027 +
7028 + static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
7029 +diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
7030 +index e77fb6ea..8f54c503 100644
7031 +--- a/drivers/staging/zram/zram_drv.c
7032 ++++ b/drivers/staging/zram/zram_drv.c
7033 +@@ -445,6 +445,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
7034 + goto out;
7035 + }
7036 +
7037 ++ /*
7038 ++ * zram_slot_free_notify could miss free so that let's
7039 ++ * double check.
7040 ++ */
7041 ++ if (unlikely(meta->table[index].handle ||
7042 ++ zram_test_flag(meta, index, ZRAM_ZERO)))
7043 ++ zram_free_page(zram, index);
7044 ++
7045 + ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
7046 + meta->compress_workmem);
7047 +
7048 +@@ -504,6 +512,20 @@ out:
7049 + return ret;
7050 + }
7051 +
7052 ++static void handle_pending_slot_free(struct zram *zram)
7053 ++{
7054 ++ struct zram_slot_free *free_rq;
7055 ++
7056 ++ spin_lock(&zram->slot_free_lock);
7057 ++ while (zram->slot_free_rq) {
7058 ++ free_rq = zram->slot_free_rq;
7059 ++ zram->slot_free_rq = free_rq->next;
7060 ++ zram_free_page(zram, free_rq->index);
7061 ++ kfree(free_rq);
7062 ++ }
7063 ++ spin_unlock(&zram->slot_free_lock);
7064 ++}
7065 ++
7066 + static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
7067 + int offset, struct bio *bio, int rw)
7068 + {
7069 +@@ -511,10 +533,12 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
7070 +
7071 + if (rw == READ) {
7072 + down_read(&zram->lock);
7073 ++ handle_pending_slot_free(zram);
7074 + ret = zram_bvec_read(zram, bvec, index, offset, bio);
7075 + up_read(&zram->lock);
7076 + } else {
7077 + down_write(&zram->lock);
7078 ++ handle_pending_slot_free(zram);
7079 + ret = zram_bvec_write(zram, bvec, index, offset);
7080 + up_write(&zram->lock);
7081 + }
7082 +@@ -522,11 +546,13 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
7083 + return ret;
7084 + }
7085 +
7086 +-static void zram_reset_device(struct zram *zram)
7087 ++static void zram_reset_device(struct zram *zram, bool reset_capacity)
7088 + {
7089 + size_t index;
7090 + struct zram_meta *meta;
7091 +
7092 ++ flush_work(&zram->free_work);
7093 ++
7094 + down_write(&zram->init_lock);
7095 + if (!zram->init_done) {
7096 + up_write(&zram->init_lock);
7097 +@@ -551,7 +577,8 @@ static void zram_reset_device(struct zram *zram)
7098 + memset(&zram->stats, 0, sizeof(zram->stats));
7099 +
7100 + zram->disksize = 0;
7101 +- set_capacity(zram->disk, 0);
7102 ++ if (reset_capacity)
7103 ++ set_capacity(zram->disk, 0);
7104 + up_write(&zram->init_lock);
7105 + }
7106 +
7107 +@@ -635,7 +662,7 @@ static ssize_t reset_store(struct device *dev,
7108 + if (bdev)
7109 + fsync_bdev(bdev);
7110 +
7111 +- zram_reset_device(zram);
7112 ++ zram_reset_device(zram, true);
7113 + return len;
7114 + }
7115 +
7116 +@@ -720,16 +747,40 @@ error:
7117 + bio_io_error(bio);
7118 + }
7119 +
7120 ++static void zram_slot_free(struct work_struct *work)
7121 ++{
7122 ++ struct zram *zram;
7123 ++
7124 ++ zram = container_of(work, struct zram, free_work);
7125 ++ down_write(&zram->lock);
7126 ++ handle_pending_slot_free(zram);
7127 ++ up_write(&zram->lock);
7128 ++}
7129 ++
7130 ++static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
7131 ++{
7132 ++ spin_lock(&zram->slot_free_lock);
7133 ++ free_rq->next = zram->slot_free_rq;
7134 ++ zram->slot_free_rq = free_rq;
7135 ++ spin_unlock(&zram->slot_free_lock);
7136 ++}
7137 ++
7138 + static void zram_slot_free_notify(struct block_device *bdev,
7139 + unsigned long index)
7140 + {
7141 + struct zram *zram;
7142 ++ struct zram_slot_free *free_rq;
7143 +
7144 + zram = bdev->bd_disk->private_data;
7145 +- down_write(&zram->lock);
7146 +- zram_free_page(zram, index);
7147 +- up_write(&zram->lock);
7148 + atomic64_inc(&zram->stats.notify_free);
7149 ++
7150 ++ free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
7151 ++ if (!free_rq)
7152 ++ return;
7153 ++
7154 ++ free_rq->index = index;
7155 ++ add_slot_free(zram, free_rq);
7156 ++ schedule_work(&zram->free_work);
7157 + }
7158 +
7159 + static const struct block_device_operations zram_devops = {
7160 +@@ -776,6 +827,10 @@ static int create_device(struct zram *zram, int device_id)
7161 + init_rwsem(&zram->lock);
7162 + init_rwsem(&zram->init_lock);
7163 +
7164 ++ INIT_WORK(&zram->free_work, zram_slot_free);
7165 ++ spin_lock_init(&zram->slot_free_lock);
7166 ++ zram->slot_free_rq = NULL;
7167 ++
7168 + zram->queue = blk_alloc_queue(GFP_KERNEL);
7169 + if (!zram->queue) {
7170 + pr_err("Error allocating disk queue for device %d\n",
7171 +@@ -902,10 +957,12 @@ static void __exit zram_exit(void)
7172 + for (i = 0; i < num_devices; i++) {
7173 + zram = &zram_devices[i];
7174 +
7175 +- get_disk(zram->disk);
7176 + destroy_device(zram);
7177 +- zram_reset_device(zram);
7178 +- put_disk(zram->disk);
7179 ++ /*
7180 ++ * Shouldn't access zram->disk after destroy_device
7181 ++ * because destroy_device already released zram->disk.
7182 ++ */
7183 ++ zram_reset_device(zram, false);
7184 + }
7185 +
7186 + unregister_blkdev(zram_major, "zram");
7187 +diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
7188 +index 9e57bfb2..97a3acf6 100644
7189 +--- a/drivers/staging/zram/zram_drv.h
7190 ++++ b/drivers/staging/zram/zram_drv.h
7191 +@@ -94,11 +94,20 @@ struct zram_meta {
7192 + struct zs_pool *mem_pool;
7193 + };
7194 +
7195 ++struct zram_slot_free {
7196 ++ unsigned long index;
7197 ++ struct zram_slot_free *next;
7198 ++};
7199 ++
7200 + struct zram {
7201 + struct zram_meta *meta;
7202 + struct rw_semaphore lock; /* protect compression buffers, table,
7203 + * 32bit stat counters against concurrent
7204 + * notifications, reads and writes */
7205 ++
7206 ++ struct work_struct free_work; /* handle pending free request */
7207 ++ struct zram_slot_free *slot_free_rq; /* list head of free request */
7208 ++
7209 + struct request_queue *queue;
7210 + struct gendisk *disk;
7211 + int init_done;
7212 +@@ -109,6 +118,7 @@ struct zram {
7213 + * we can store in a disk.
7214 + */
7215 + u64 disksize; /* bytes */
7216 ++ spinlock_t slot_free_lock;
7217 +
7218 + struct zram_stats stats;
7219 + };
7220 +diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
7221 +index cbe48ab4..f608fbc1 100644
7222 +--- a/drivers/target/target_core_alua.c
7223 ++++ b/drivers/target/target_core_alua.c
7224 +@@ -730,7 +730,7 @@ static int core_alua_write_tpg_metadata(
7225 + if (ret < 0)
7226 + pr_err("Error writing ALUA metadata file: %s\n", path);
7227 + fput(file);
7228 +- return ret ? -EIO : 0;
7229 ++ return (ret < 0) ? -EIO : 0;
7230 + }
7231 +
7232 + /*
7233 +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
7234 +index bd78faf6..adec5a82 100644
7235 +--- a/drivers/target/target_core_pr.c
7236 ++++ b/drivers/target/target_core_pr.c
7237 +@@ -1949,7 +1949,7 @@ static int __core_scsi3_write_aptpl_to_file(
7238 + pr_debug("Error writing APTPL metadata file: %s\n", path);
7239 + fput(file);
7240 +
7241 +- return ret ? -EIO : 0;
7242 ++ return (ret < 0) ? -EIO : 0;
7243 + }
7244 +
7245 + /*
7246 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
7247 +index 366af832..20689b95 100644
7248 +--- a/drivers/tty/tty_io.c
7249 ++++ b/drivers/tty/tty_io.c
7250 +@@ -850,7 +850,8 @@ void disassociate_ctty(int on_exit)
7251 + struct pid *tty_pgrp = tty_get_pgrp(tty);
7252 + if (tty_pgrp) {
7253 + kill_pgrp(tty_pgrp, SIGHUP, on_exit);
7254 +- kill_pgrp(tty_pgrp, SIGCONT, on_exit);
7255 ++ if (!on_exit)
7256 ++ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
7257 + put_pid(tty_pgrp);
7258 + }
7259 + }
7260 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
7261 +index 8a230f0e..d3318a0d 100644
7262 +--- a/drivers/usb/class/cdc-wdm.c
7263 ++++ b/drivers/usb/class/cdc-wdm.c
7264 +@@ -209,6 +209,7 @@ skip_error:
7265 + static void wdm_int_callback(struct urb *urb)
7266 + {
7267 + int rv = 0;
7268 ++ int responding;
7269 + int status = urb->status;
7270 + struct wdm_device *desc;
7271 + struct usb_cdc_notification *dr;
7272 +@@ -262,8 +263,8 @@ static void wdm_int_callback(struct urb *urb)
7273 +
7274 + spin_lock(&desc->iuspin);
7275 + clear_bit(WDM_READ, &desc->flags);
7276 +- set_bit(WDM_RESPONDING, &desc->flags);
7277 +- if (!test_bit(WDM_DISCONNECTING, &desc->flags)
7278 ++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
7279 ++ if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
7280 + && !test_bit(WDM_SUSPENDING, &desc->flags)) {
7281 + rv = usb_submit_urb(desc->response, GFP_ATOMIC);
7282 + dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
7283 +@@ -685,16 +686,20 @@ static void wdm_rxwork(struct work_struct *work)
7284 + {
7285 + struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
7286 + unsigned long flags;
7287 +- int rv;
7288 ++ int rv = 0;
7289 ++ int responding;
7290 +
7291 + spin_lock_irqsave(&desc->iuspin, flags);
7292 + if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
7293 + spin_unlock_irqrestore(&desc->iuspin, flags);
7294 + } else {
7295 ++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
7296 + spin_unlock_irqrestore(&desc->iuspin, flags);
7297 +- rv = usb_submit_urb(desc->response, GFP_KERNEL);
7298 ++ if (!responding)
7299 ++ rv = usb_submit_urb(desc->response, GFP_KERNEL);
7300 + if (rv < 0 && rv != -EPERM) {
7301 + spin_lock_irqsave(&desc->iuspin, flags);
7302 ++ clear_bit(WDM_RESPONDING, &desc->flags);
7303 + if (!test_bit(WDM_DISCONNECTING, &desc->flags))
7304 + schedule_work(&desc->rxwork);
7305 + spin_unlock_irqrestore(&desc->iuspin, flags);
7306 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
7307 +index 7199adcc..a6b2cabe 100644
7308 +--- a/drivers/usb/core/config.c
7309 ++++ b/drivers/usb/core/config.c
7310 +@@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
7311 +
7312 + memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
7313 + if (config->desc.bDescriptorType != USB_DT_CONFIG ||
7314 +- config->desc.bLength < USB_DT_CONFIG_SIZE) {
7315 ++ config->desc.bLength < USB_DT_CONFIG_SIZE ||
7316 ++ config->desc.bLength > size) {
7317 + dev_err(ddev, "invalid descriptor for config index %d: "
7318 + "type = 0x%X, length = %d\n", cfgidx,
7319 + config->desc.bDescriptorType, config->desc.bLength);
7320 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
7321 +index 558313de..17c37852 100644
7322 +--- a/drivers/usb/core/hub.c
7323 ++++ b/drivers/usb/core/hub.c
7324 +@@ -2918,7 +2918,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
7325 + {
7326 + struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
7327 + struct usb_port *port_dev = hub->ports[udev->portnum - 1];
7328 +- enum pm_qos_flags_status pm_qos_stat;
7329 + int port1 = udev->portnum;
7330 + int status;
7331 + bool really_suspend = true;
7332 +@@ -2956,7 +2955,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
7333 + status);
7334 + /* bail if autosuspend is requested */
7335 + if (PMSG_IS_AUTO(msg))
7336 +- return status;
7337 ++ goto err_wakeup;
7338 + }
7339 + }
7340 +
7341 +@@ -2965,14 +2964,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
7342 + usb_set_usb2_hardware_lpm(udev, 0);
7343 +
7344 + if (usb_disable_ltm(udev)) {
7345 +- dev_err(&udev->dev, "%s Failed to disable LTM before suspend\n.",
7346 +- __func__);
7347 +- return -ENOMEM;
7348 ++ dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
7349 ++ status = -ENOMEM;
7350 ++ if (PMSG_IS_AUTO(msg))
7351 ++ goto err_ltm;
7352 + }
7353 + if (usb_unlocked_disable_lpm(udev)) {
7354 +- dev_err(&udev->dev, "%s Failed to disable LPM before suspend\n.",
7355 +- __func__);
7356 +- return -ENOMEM;
7357 ++ dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
7358 ++ status = -ENOMEM;
7359 ++ if (PMSG_IS_AUTO(msg))
7360 ++ goto err_lpm3;
7361 + }
7362 +
7363 + /* see 7.1.7.6 */
7364 +@@ -3000,28 +3001,31 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
7365 + if (status) {
7366 + dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
7367 + port1, status);
7368 +- /* paranoia: "should not happen" */
7369 +- if (udev->do_remote_wakeup) {
7370 +- if (!hub_is_superspeed(hub->hdev)) {
7371 +- (void) usb_control_msg(udev,
7372 +- usb_sndctrlpipe(udev, 0),
7373 +- USB_REQ_CLEAR_FEATURE,
7374 +- USB_RECIP_DEVICE,
7375 +- USB_DEVICE_REMOTE_WAKEUP, 0,
7376 +- NULL, 0,
7377 +- USB_CTRL_SET_TIMEOUT);
7378 +- } else
7379 +- (void) usb_disable_function_remotewakeup(udev);
7380 +-
7381 +- }
7382 +
7383 ++ /* Try to enable USB3 LPM and LTM again */
7384 ++ usb_unlocked_enable_lpm(udev);
7385 ++ err_lpm3:
7386 ++ usb_enable_ltm(udev);
7387 ++ err_ltm:
7388 + /* Try to enable USB2 hardware LPM again */
7389 + if (udev->usb2_hw_lpm_capable == 1)
7390 + usb_set_usb2_hardware_lpm(udev, 1);
7391 +
7392 +- /* Try to enable USB3 LTM and LPM again */
7393 +- usb_enable_ltm(udev);
7394 +- usb_unlocked_enable_lpm(udev);
7395 ++ if (udev->do_remote_wakeup) {
7396 ++ if (udev->speed < USB_SPEED_SUPER)
7397 ++ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
7398 ++ USB_REQ_CLEAR_FEATURE,
7399 ++ USB_RECIP_DEVICE,
7400 ++ USB_DEVICE_REMOTE_WAKEUP, 0,
7401 ++ NULL, 0, USB_CTRL_SET_TIMEOUT);
7402 ++ else
7403 ++ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
7404 ++ USB_REQ_CLEAR_FEATURE,
7405 ++ USB_RECIP_INTERFACE,
7406 ++ USB_INTRF_FUNC_SUSPEND, 0,
7407 ++ NULL, 0, USB_CTRL_SET_TIMEOUT);
7408 ++ }
7409 ++ err_wakeup:
7410 +
7411 + /* System sleep transitions should never fail */
7412 + if (!PMSG_IS_AUTO(msg))
7413 +@@ -3039,16 +3043,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
7414 + usb_set_device_state(udev, USB_STATE_SUSPENDED);
7415 + }
7416 +
7417 +- /*
7418 +- * Check whether current status meets the requirement of
7419 +- * usb port power off mechanism
7420 +- */
7421 +- pm_qos_stat = dev_pm_qos_flags(&port_dev->dev,
7422 +- PM_QOS_FLAG_NO_POWER_OFF);
7423 +- if (!udev->do_remote_wakeup
7424 +- && pm_qos_stat != PM_QOS_FLAGS_ALL
7425 +- && udev->persist_enabled
7426 +- && !status) {
7427 ++ if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled) {
7428 + pm_runtime_put_sync(&port_dev->dev);
7429 + port_dev->did_runtime_put = true;
7430 + }
7431 +diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
7432 +index d6b0fadf..99099116 100644
7433 +--- a/drivers/usb/core/port.c
7434 ++++ b/drivers/usb/core/port.c
7435 +@@ -89,22 +89,19 @@ static int usb_port_runtime_resume(struct device *dev)
7436 + retval = usb_hub_set_port_power(hdev, hub, port1, true);
7437 + if (port_dev->child && !retval) {
7438 + /*
7439 +- * Wait for usb hub port to be reconnected in order to make
7440 +- * the resume procedure successful.
7441 ++ * Attempt to wait for usb hub port to be reconnected in order
7442 ++ * to make the resume procedure successful. The device may have
7443 ++ * disconnected while the port was powered off, so ignore the
7444 ++ * return status.
7445 + */
7446 + retval = hub_port_debounce_be_connected(hub, port1);
7447 +- if (retval < 0) {
7448 ++ if (retval < 0)
7449 + dev_dbg(&port_dev->dev, "can't get reconnection after setting port power on, status %d\n",
7450 + retval);
7451 +- goto out;
7452 +- }
7453 + usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
7454 +-
7455 +- /* Set return value to 0 if debounce successful */
7456 + retval = 0;
7457 + }
7458 +
7459 +-out:
7460 + clear_bit(port1, hub->busy_bits);
7461 + usb_autopm_put_interface(intf);
7462 + return retval;
7463 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
7464 +index f77083fe..14d28d61 100644
7465 +--- a/drivers/usb/dwc3/gadget.c
7466 ++++ b/drivers/usb/dwc3/gadget.c
7467 +@@ -1508,6 +1508,15 @@ static int dwc3_gadget_start(struct usb_gadget *g,
7468 + int irq;
7469 + u32 reg;
7470 +
7471 ++ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
7472 ++ ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
7473 ++ IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
7474 ++ if (ret) {
7475 ++ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
7476 ++ irq, ret);
7477 ++ goto err0;
7478 ++ }
7479 ++
7480 + spin_lock_irqsave(&dwc->lock, flags);
7481 +
7482 + if (dwc->gadget_driver) {
7483 +@@ -1515,7 +1524,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
7484 + dwc->gadget.name,
7485 + dwc->gadget_driver->driver.name);
7486 + ret = -EBUSY;
7487 +- goto err0;
7488 ++ goto err1;
7489 + }
7490 +
7491 + dwc->gadget_driver = driver;
7492 +@@ -1551,42 +1560,38 @@ static int dwc3_gadget_start(struct usb_gadget *g,
7493 + ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
7494 + if (ret) {
7495 + dev_err(dwc->dev, "failed to enable %s\n", dep->name);
7496 +- goto err0;
7497 ++ goto err2;
7498 + }
7499 +
7500 + dep = dwc->eps[1];
7501 + ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
7502 + if (ret) {
7503 + dev_err(dwc->dev, "failed to enable %s\n", dep->name);
7504 +- goto err1;
7505 ++ goto err3;
7506 + }
7507 +
7508 + /* begin to receive SETUP packets */
7509 + dwc->ep0state = EP0_SETUP_PHASE;
7510 + dwc3_ep0_out_start(dwc);
7511 +
7512 +- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
7513 +- ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
7514 +- IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
7515 +- if (ret) {
7516 +- dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
7517 +- irq, ret);
7518 +- goto err1;
7519 +- }
7520 +-
7521 + dwc3_gadget_enable_irq(dwc);
7522 +
7523 + spin_unlock_irqrestore(&dwc->lock, flags);
7524 +
7525 + return 0;
7526 +
7527 +-err1:
7528 ++err3:
7529 + __dwc3_gadget_ep_disable(dwc->eps[0]);
7530 +
7531 +-err0:
7532 ++err2:
7533 + dwc->gadget_driver = NULL;
7534 ++
7535 ++err1:
7536 + spin_unlock_irqrestore(&dwc->lock, flags);
7537 +
7538 ++ free_irq(irq, dwc);
7539 ++
7540 ++err0:
7541 + return ret;
7542 + }
7543 +
7544 +@@ -1600,9 +1605,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g,
7545 + spin_lock_irqsave(&dwc->lock, flags);
7546 +
7547 + dwc3_gadget_disable_irq(dwc);
7548 +- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
7549 +- free_irq(irq, dwc);
7550 +-
7551 + __dwc3_gadget_ep_disable(dwc->eps[0]);
7552 + __dwc3_gadget_ep_disable(dwc->eps[1]);
7553 +
7554 +@@ -1610,6 +1612,9 @@ static int dwc3_gadget_stop(struct usb_gadget *g,
7555 +
7556 + spin_unlock_irqrestore(&dwc->lock, flags);
7557 +
7558 ++ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
7559 ++ free_irq(irq, dwc);
7560 ++
7561 + return 0;
7562 + }
7563 +
7564 +diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
7565 +index e6170478..0bb5d500 100644
7566 +--- a/drivers/usb/gadget/uvc_queue.c
7567 ++++ b/drivers/usb/gadget/uvc_queue.c
7568 +@@ -193,12 +193,16 @@ static int uvc_queue_buffer(struct uvc_video_queue *queue,
7569 +
7570 + mutex_lock(&queue->mutex);
7571 + ret = vb2_qbuf(&queue->queue, buf);
7572 ++ if (ret < 0)
7573 ++ goto done;
7574 ++
7575 + spin_lock_irqsave(&queue->irqlock, flags);
7576 + ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
7577 + queue->flags &= ~UVC_QUEUE_PAUSED;
7578 + spin_unlock_irqrestore(&queue->irqlock, flags);
7579 +- mutex_unlock(&queue->mutex);
7580 +
7581 ++done:
7582 ++ mutex_unlock(&queue->mutex);
7583 + return ret;
7584 + }
7585 +
7586 +diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
7587 +index e4c34ac3..4c166e1e 100644
7588 +--- a/drivers/usb/host/ehci-mxc.c
7589 ++++ b/drivers/usb/host/ehci-mxc.c
7590 +@@ -184,7 +184,7 @@ static int ehci_mxc_drv_remove(struct platform_device *pdev)
7591 + if (pdata && pdata->exit)
7592 + pdata->exit(pdev);
7593 +
7594 +- if (pdata->otg)
7595 ++ if (pdata && pdata->otg)
7596 + usb_phy_shutdown(pdata->otg);
7597 +
7598 + clk_disable_unprepare(priv->usbclk);
7599 +diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
7600 +index 279b0491..ec337c2b 100644
7601 +--- a/drivers/usb/host/ohci-pci.c
7602 ++++ b/drivers/usb/host/ohci-pci.c
7603 +@@ -289,7 +289,7 @@ static struct pci_driver ohci_pci_driver = {
7604 + .remove = usb_hcd_pci_remove,
7605 + .shutdown = usb_hcd_pci_shutdown,
7606 +
7607 +-#ifdef CONFIG_PM_SLEEP
7608 ++#ifdef CONFIG_PM
7609 + .driver = {
7610 + .pm = &usb_hcd_pci_pm_ops
7611 + },
7612 +diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
7613 +index 8d7a1324..9fe3225e 100644
7614 +--- a/drivers/usb/host/xhci-ext-caps.h
7615 ++++ b/drivers/usb/host/xhci-ext-caps.h
7616 +@@ -71,7 +71,7 @@
7617 +
7618 + /* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
7619 + #define XHCI_HLC (1 << 19)
7620 +-#define XHCI_BLC (1 << 19)
7621 ++#define XHCI_BLC (1 << 20)
7622 +
7623 + /* command register values to disable interrupts and halt the HC */
7624 + /* start/stop HC execution - do not write unless HC is halted*/
7625 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
7626 +index 51e22bf8..6eca5a53 100644
7627 +--- a/drivers/usb/host/xhci-plat.c
7628 ++++ b/drivers/usb/host/xhci-plat.c
7629 +@@ -24,7 +24,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
7630 + * here that the generic code does not try to make a pci_dev from our
7631 + * dev struct in order to setup MSI
7632 + */
7633 +- xhci->quirks |= XHCI_BROKEN_MSI;
7634 ++ xhci->quirks |= XHCI_PLAT;
7635 + }
7636 +
7637 + /* called during probe() after chip reset completes */
7638 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
7639 +index 9478caa2..b3c4162c 100644
7640 +--- a/drivers/usb/host/xhci.c
7641 ++++ b/drivers/usb/host/xhci.c
7642 +@@ -343,9 +343,14 @@ static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
7643 + static int xhci_try_enable_msi(struct usb_hcd *hcd)
7644 + {
7645 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
7646 +- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
7647 ++ struct pci_dev *pdev;
7648 + int ret;
7649 +
7650 ++ /* The xhci platform device has set up IRQs through usb_add_hcd. */
7651 ++ if (xhci->quirks & XHCI_PLAT)
7652 ++ return 0;
7653 ++
7654 ++ pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
7655 + /*
7656 + * Some Fresco Logic host controllers advertise MSI, but fail to
7657 + * generate interrupts. Don't even try to enable MSI.
7658 +@@ -3581,10 +3586,21 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
7659 + {
7660 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
7661 + struct xhci_virt_device *virt_dev;
7662 ++ struct device *dev = hcd->self.controller;
7663 + unsigned long flags;
7664 + u32 state;
7665 + int i, ret;
7666 +
7667 ++#ifndef CONFIG_USB_DEFAULT_PERSIST
7668 ++ /*
7669 ++ * We called pm_runtime_get_noresume when the device was attached.
7670 ++ * Decrement the counter here to allow controller to runtime suspend
7671 ++ * if no devices remain.
7672 ++ */
7673 ++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
7674 ++ pm_runtime_put_noidle(dev);
7675 ++#endif
7676 ++
7677 + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
7678 + /* If the host is halted due to driver unload, we still need to free the
7679 + * device.
7680 +@@ -3656,6 +3672,7 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
7681 + int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
7682 + {
7683 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
7684 ++ struct device *dev = hcd->self.controller;
7685 + unsigned long flags;
7686 + int timeleft;
7687 + int ret;
7688 +@@ -3708,6 +3725,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
7689 + goto disable_slot;
7690 + }
7691 + udev->slot_id = xhci->slot_id;
7692 ++
7693 ++#ifndef CONFIG_USB_DEFAULT_PERSIST
7694 ++ /*
7695 ++ * If resetting upon resume, we can't put the controller into runtime
7696 ++ * suspend if there is a device attached.
7697 ++ */
7698 ++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
7699 ++ pm_runtime_get_noresume(dev);
7700 ++#endif
7701 ++
7702 + /* Is this a LS or FS device under a HS hub? */
7703 + /* Hub or peripherial? */
7704 + return 1;
7705 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
7706 +index c338741a..6ab1e600 100644
7707 +--- a/drivers/usb/host/xhci.h
7708 ++++ b/drivers/usb/host/xhci.h
7709 +@@ -1542,6 +1542,7 @@ struct xhci_hcd {
7710 + #define XHCI_SPURIOUS_REBOOT (1 << 13)
7711 + #define XHCI_COMP_MODE_QUIRK (1 << 14)
7712 + #define XHCI_AVOID_BEI (1 << 15)
7713 ++#define XHCI_PLAT (1 << 16)
7714 + unsigned int num_active_eps;
7715 + unsigned int limit_active_eps;
7716 + /* There are two roothubs to keep track of bus suspend info for */
7717 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
7718 +index b0130016..84657e07 100644
7719 +--- a/drivers/usb/serial/mos7720.c
7720 ++++ b/drivers/usb/serial/mos7720.c
7721 +@@ -374,7 +374,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
7722 + kfree(urbtrack);
7723 + return -ENOMEM;
7724 + }
7725 +- urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
7726 ++ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC);
7727 + if (!urbtrack->setup) {
7728 + usb_free_urb(urbtrack->urb);
7729 + kfree(urbtrack);
7730 +@@ -382,8 +382,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
7731 + }
7732 + urbtrack->setup->bRequestType = (__u8)0x40;
7733 + urbtrack->setup->bRequest = (__u8)0x0e;
7734 +- urbtrack->setup->wValue = get_reg_value(reg, dummy);
7735 +- urbtrack->setup->wIndex = get_reg_index(reg);
7736 ++ urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy));
7737 ++ urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg));
7738 + urbtrack->setup->wLength = 0;
7739 + usb_fill_control_urb(urbtrack->urb, usbdev,
7740 + usb_sndctrlpipe(usbdev, 0),
7741 +diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
7742 +index 04cdeb8e..c4d22988 100644
7743 +--- a/drivers/xen/grant-table.c
7744 ++++ b/drivers/xen/grant-table.c
7745 +@@ -730,9 +730,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback,
7746 + void (*fn)(void *), void *arg, u16 count)
7747 + {
7748 + unsigned long flags;
7749 ++ struct gnttab_free_callback *cb;
7750 ++
7751 + spin_lock_irqsave(&gnttab_list_lock, flags);
7752 +- if (callback->next)
7753 +- goto out;
7754 ++
7755 ++ /* Check if the callback is already on the list */
7756 ++ cb = gnttab_free_callback_list;
7757 ++ while (cb) {
7758 ++ if (cb == callback)
7759 ++ goto out;
7760 ++ cb = cb->next;
7761 ++ }
7762 ++
7763 + callback->fn = fn;
7764 + callback->arg = arg;
7765 + callback->count = count;
7766 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
7767 +index 238a0554..9877a2a2 100644
7768 +--- a/fs/btrfs/ioctl.c
7769 ++++ b/fs/btrfs/ioctl.c
7770 +@@ -3312,6 +3312,9 @@ static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
7771 +
7772 + switch (p->cmd) {
7773 + case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
7774 ++ if (root->fs_info->sb->s_flags & MS_RDONLY)
7775 ++ return -EROFS;
7776 ++
7777 + if (atomic_xchg(
7778 + &root->fs_info->mutually_exclusive_operation_running,
7779 + 1)) {
7780 +diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
7781 +index e0b4ef31..a5ce62eb 100644
7782 +--- a/fs/ceph/ioctl.c
7783 ++++ b/fs/ceph/ioctl.c
7784 +@@ -196,8 +196,10 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
7785 + r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
7786 + &dl.object_no, &dl.object_offset,
7787 + &olen);
7788 +- if (r < 0)
7789 ++ if (r < 0) {
7790 ++ up_read(&osdc->map_sem);
7791 + return -EIO;
7792 ++ }
7793 + dl.file_offset -= dl.object_offset;
7794 + dl.object_size = ceph_file_layout_object_size(ci->i_layout);
7795 + dl.block_size = ceph_file_layout_su(ci->i_layout);
7796 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
7797 +index d67c550c..37950c65 100644
7798 +--- a/fs/cifs/connect.c
7799 ++++ b/fs/cifs/connect.c
7800 +@@ -379,6 +379,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
7801 + try_to_freeze();
7802 +
7803 + /* we should try only the port we connected to before */
7804 ++ mutex_lock(&server->srv_mutex);
7805 + rc = generic_ip_connect(server);
7806 + if (rc) {
7807 + cifs_dbg(FYI, "reconnect error %d\n", rc);
7808 +@@ -390,6 +391,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
7809 + server->tcpStatus = CifsNeedNegotiate;
7810 + spin_unlock(&GlobalMid_Lock);
7811 + }
7812 ++ mutex_unlock(&server->srv_mutex);
7813 + } while (server->tcpStatus == CifsNeedReconnect);
7814 +
7815 + return rc;
7816 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
7817 +index b0c43345..f851d03f 100644
7818 +--- a/fs/cifs/smb2misc.c
7819 ++++ b/fs/cifs/smb2misc.c
7820 +@@ -417,96 +417,108 @@ cifs_ses_oplock_break(struct work_struct *work)
7821 + }
7822 +
7823 + static bool
7824 +-smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
7825 ++smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
7826 ++ struct smb2_lease_break_work *lw)
7827 + {
7828 +- struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
7829 +- struct list_head *tmp, *tmp1, *tmp2;
7830 +- struct cifs_ses *ses;
7831 +- struct cifs_tcon *tcon;
7832 +- struct cifsInodeInfo *cinode;
7833 ++ bool found;
7834 ++ __u8 lease_state;
7835 ++ struct list_head *tmp;
7836 + struct cifsFileInfo *cfile;
7837 + struct cifs_pending_open *open;
7838 +- struct smb2_lease_break_work *lw;
7839 +- bool found;
7840 ++ struct cifsInodeInfo *cinode;
7841 + int ack_req = le32_to_cpu(rsp->Flags &
7842 + SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
7843 +
7844 +- lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
7845 +- if (!lw)
7846 +- return false;
7847 ++ lease_state = smb2_map_lease_to_oplock(rsp->NewLeaseState);
7848 +
7849 +- INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
7850 +- lw->lease_state = rsp->NewLeaseState;
7851 ++ list_for_each(tmp, &tcon->openFileList) {
7852 ++ cfile = list_entry(tmp, struct cifsFileInfo, tlist);
7853 ++ cinode = CIFS_I(cfile->dentry->d_inode);
7854 +
7855 +- cifs_dbg(FYI, "Checking for lease break\n");
7856 ++ if (memcmp(cinode->lease_key, rsp->LeaseKey,
7857 ++ SMB2_LEASE_KEY_SIZE))
7858 ++ continue;
7859 +
7860 +- /* look up tcon based on tid & uid */
7861 +- spin_lock(&cifs_tcp_ses_lock);
7862 +- list_for_each(tmp, &server->smb_ses_list) {
7863 +- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
7864 ++ cifs_dbg(FYI, "found in the open list\n");
7865 ++ cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
7866 ++ le32_to_cpu(rsp->NewLeaseState));
7867 +
7868 +- spin_lock(&cifs_file_list_lock);
7869 +- list_for_each(tmp1, &ses->tcon_list) {
7870 +- tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
7871 ++ smb2_set_oplock_level(cinode, lease_state);
7872 +
7873 +- cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
7874 +- list_for_each(tmp2, &tcon->openFileList) {
7875 +- cfile = list_entry(tmp2, struct cifsFileInfo,
7876 +- tlist);
7877 +- cinode = CIFS_I(cfile->dentry->d_inode);
7878 ++ if (ack_req)
7879 ++ cfile->oplock_break_cancelled = false;
7880 ++ else
7881 ++ cfile->oplock_break_cancelled = true;
7882 +
7883 +- if (memcmp(cinode->lease_key, rsp->LeaseKey,
7884 +- SMB2_LEASE_KEY_SIZE))
7885 +- continue;
7886 ++ queue_work(cifsiod_wq, &cfile->oplock_break);
7887 ++ kfree(lw);
7888 ++ return true;
7889 ++ }
7890 +
7891 +- cifs_dbg(FYI, "found in the open list\n");
7892 +- cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
7893 +- le32_to_cpu(rsp->NewLeaseState));
7894 ++ found = false;
7895 ++ list_for_each_entry(open, &tcon->pending_opens, olist) {
7896 ++ if (memcmp(open->lease_key, rsp->LeaseKey,
7897 ++ SMB2_LEASE_KEY_SIZE))
7898 ++ continue;
7899 ++
7900 ++ if (!found && ack_req) {
7901 ++ found = true;
7902 ++ memcpy(lw->lease_key, open->lease_key,
7903 ++ SMB2_LEASE_KEY_SIZE);
7904 ++ lw->tlink = cifs_get_tlink(open->tlink);
7905 ++ queue_work(cifsiod_wq, &lw->lease_break);
7906 ++ }
7907 +
7908 +- smb2_set_oplock_level(cinode,
7909 +- smb2_map_lease_to_oplock(rsp->NewLeaseState));
7910 ++ cifs_dbg(FYI, "found in the pending open list\n");
7911 ++ cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
7912 ++ le32_to_cpu(rsp->NewLeaseState));
7913 +
7914 +- if (ack_req)
7915 +- cfile->oplock_break_cancelled = false;
7916 +- else
7917 +- cfile->oplock_break_cancelled = true;
7918 ++ open->oplock = lease_state;
7919 ++ }
7920 ++ return found;
7921 ++}
7922 +
7923 +- queue_work(cifsiod_wq, &cfile->oplock_break);
7924 ++static bool
7925 ++smb2_is_valid_lease_break(char *buffer)
7926 ++{
7927 ++ struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
7928 ++ struct list_head *tmp, *tmp1, *tmp2;
7929 ++ struct TCP_Server_Info *server;
7930 ++ struct cifs_ses *ses;
7931 ++ struct cifs_tcon *tcon;
7932 ++ struct smb2_lease_break_work *lw;
7933 +
7934 +- spin_unlock(&cifs_file_list_lock);
7935 +- spin_unlock(&cifs_tcp_ses_lock);
7936 +- return true;
7937 +- }
7938 ++ lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
7939 ++ if (!lw)
7940 ++ return false;
7941 +
7942 +- found = false;
7943 +- list_for_each_entry(open, &tcon->pending_opens, olist) {
7944 +- if (memcmp(open->lease_key, rsp->LeaseKey,
7945 +- SMB2_LEASE_KEY_SIZE))
7946 +- continue;
7947 ++ INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
7948 ++ lw->lease_state = rsp->NewLeaseState;
7949 +
7950 +- if (!found && ack_req) {
7951 +- found = true;
7952 +- memcpy(lw->lease_key, open->lease_key,
7953 +- SMB2_LEASE_KEY_SIZE);
7954 +- lw->tlink = cifs_get_tlink(open->tlink);
7955 +- queue_work(cifsiod_wq,
7956 +- &lw->lease_break);
7957 +- }
7958 ++ cifs_dbg(FYI, "Checking for lease break\n");
7959 ++
7960 ++ /* look up tcon based on tid & uid */
7961 ++ spin_lock(&cifs_tcp_ses_lock);
7962 ++ list_for_each(tmp, &cifs_tcp_ses_list) {
7963 ++ server = list_entry(tmp, struct TCP_Server_Info, tcp_ses_list);
7964 +
7965 +- cifs_dbg(FYI, "found in the pending open list\n");
7966 +- cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
7967 +- le32_to_cpu(rsp->NewLeaseState));
7968 ++ list_for_each(tmp1, &server->smb_ses_list) {
7969 ++ ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
7970 +
7971 +- open->oplock =
7972 +- smb2_map_lease_to_oplock(rsp->NewLeaseState);
7973 +- }
7974 +- if (found) {
7975 +- spin_unlock(&cifs_file_list_lock);
7976 +- spin_unlock(&cifs_tcp_ses_lock);
7977 +- return true;
7978 ++ spin_lock(&cifs_file_list_lock);
7979 ++ list_for_each(tmp2, &ses->tcon_list) {
7980 ++ tcon = list_entry(tmp2, struct cifs_tcon,
7981 ++ tcon_list);
7982 ++ cifs_stats_inc(
7983 ++ &tcon->stats.cifs_stats.num_oplock_brks);
7984 ++ if (smb2_tcon_has_lease(tcon, rsp, lw)) {
7985 ++ spin_unlock(&cifs_file_list_lock);
7986 ++ spin_unlock(&cifs_tcp_ses_lock);
7987 ++ return true;
7988 ++ }
7989 + }
7990 ++ spin_unlock(&cifs_file_list_lock);
7991 + }
7992 +- spin_unlock(&cifs_file_list_lock);
7993 + }
7994 + spin_unlock(&cifs_tcp_ses_lock);
7995 + kfree(lw);
7996 +@@ -532,7 +544,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
7997 + if (rsp->StructureSize !=
7998 + smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
7999 + if (le16_to_cpu(rsp->StructureSize) == 44)
8000 +- return smb2_is_valid_lease_break(buffer, server);
8001 ++ return smb2_is_valid_lease_break(buffer);
8002 + else
8003 + return false;
8004 + }
8005 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
8006 +index c2ca04e6..ea4d1884 100644
8007 +--- a/fs/ext4/inode.c
8008 ++++ b/fs/ext4/inode.c
8009 +@@ -1890,6 +1890,26 @@ static int ext4_writepage(struct page *page,
8010 + return ret;
8011 + }
8012 +
8013 ++static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
8014 ++{
8015 ++ int len;
8016 ++ loff_t size = i_size_read(mpd->inode);
8017 ++ int err;
8018 ++
8019 ++ BUG_ON(page->index != mpd->first_page);
8020 ++ if (page->index == size >> PAGE_CACHE_SHIFT)
8021 ++ len = size & ~PAGE_CACHE_MASK;
8022 ++ else
8023 ++ len = PAGE_CACHE_SIZE;
8024 ++ clear_page_dirty_for_io(page);
8025 ++ err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
8026 ++ if (!err)
8027 ++ mpd->wbc->nr_to_write--;
8028 ++ mpd->first_page++;
8029 ++
8030 ++ return err;
8031 ++}
8032 ++
8033 + #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
8034 +
8035 + /*
8036 +@@ -1904,82 +1924,94 @@ static int ext4_writepage(struct page *page,
8037 + *
8038 + * @mpd - extent of blocks
8039 + * @lblk - logical number of the block in the file
8040 +- * @b_state - b_state of the buffer head added
8041 ++ * @bh - buffer head we want to add to the extent
8042 + *
8043 +- * the function is used to collect contig. blocks in same state
8044 ++ * The function is used to collect contig. blocks in the same state. If the
8045 ++ * buffer doesn't require mapping for writeback and we haven't started the
8046 ++ * extent of buffers to map yet, the function returns 'true' immediately - the
8047 ++ * caller can write the buffer right away. Otherwise the function returns true
8048 ++ * if the block has been added to the extent, false if the block couldn't be
8049 ++ * added.
8050 + */
8051 +-static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
8052 +- unsigned long b_state)
8053 ++static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
8054 ++ struct buffer_head *bh)
8055 + {
8056 + struct ext4_map_blocks *map = &mpd->map;
8057 +
8058 +- /* Don't go larger than mballoc is willing to allocate */
8059 +- if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
8060 +- return 0;
8061 ++ /* Buffer that doesn't need mapping for writeback? */
8062 ++ if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
8063 ++ (!buffer_delay(bh) && !buffer_unwritten(bh))) {
8064 ++ /* So far no extent to map => we write the buffer right away */
8065 ++ if (map->m_len == 0)
8066 ++ return true;
8067 ++ return false;
8068 ++ }
8069 +
8070 + /* First block in the extent? */
8071 + if (map->m_len == 0) {
8072 + map->m_lblk = lblk;
8073 + map->m_len = 1;
8074 +- map->m_flags = b_state & BH_FLAGS;
8075 +- return 1;
8076 ++ map->m_flags = bh->b_state & BH_FLAGS;
8077 ++ return true;
8078 + }
8079 +
8080 ++ /* Don't go larger than mballoc is willing to allocate */
8081 ++ if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
8082 ++ return false;
8083 ++
8084 + /* Can we merge the block to our big extent? */
8085 + if (lblk == map->m_lblk + map->m_len &&
8086 +- (b_state & BH_FLAGS) == map->m_flags) {
8087 ++ (bh->b_state & BH_FLAGS) == map->m_flags) {
8088 + map->m_len++;
8089 +- return 1;
8090 ++ return true;
8091 + }
8092 +- return 0;
8093 ++ return false;
8094 + }
8095 +
8096 +-static bool add_page_bufs_to_extent(struct mpage_da_data *mpd,
8097 +- struct buffer_head *head,
8098 +- struct buffer_head *bh,
8099 +- ext4_lblk_t lblk)
8100 ++/*
8101 ++ * mpage_process_page_bufs - submit page buffers for IO or add them to extent
8102 ++ *
8103 ++ * @mpd - extent of blocks for mapping
8104 ++ * @head - the first buffer in the page
8105 ++ * @bh - buffer we should start processing from
8106 ++ * @lblk - logical number of the block in the file corresponding to @bh
8107 ++ *
8108 ++ * Walk through page buffers from @bh upto @head (exclusive) and either submit
8109 ++ * the page for IO if all buffers in this page were mapped and there's no
8110 ++ * accumulated extent of buffers to map or add buffers in the page to the
8111 ++ * extent of buffers to map. The function returns 1 if the caller can continue
8112 ++ * by processing the next page, 0 if it should stop adding buffers to the
8113 ++ * extent to map because we cannot extend it anymore. It can also return value
8114 ++ * < 0 in case of error during IO submission.
8115 ++ */
8116 ++static int mpage_process_page_bufs(struct mpage_da_data *mpd,
8117 ++ struct buffer_head *head,
8118 ++ struct buffer_head *bh,
8119 ++ ext4_lblk_t lblk)
8120 + {
8121 + struct inode *inode = mpd->inode;
8122 ++ int err;
8123 + ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
8124 + >> inode->i_blkbits;
8125 +
8126 + do {
8127 + BUG_ON(buffer_locked(bh));
8128 +
8129 +- if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
8130 +- (!buffer_delay(bh) && !buffer_unwritten(bh)) ||
8131 +- lblk >= blocks) {
8132 ++ if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
8133 + /* Found extent to map? */
8134 + if (mpd->map.m_len)
8135 +- return false;
8136 +- if (lblk >= blocks)
8137 +- return true;
8138 +- continue;
8139 ++ return 0;
8140 ++ /* Everything mapped so far and we hit EOF */
8141 ++ break;
8142 + }
8143 +- if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state))
8144 +- return false;
8145 + } while (lblk++, (bh = bh->b_this_page) != head);
8146 +- return true;
8147 +-}
8148 +-
8149 +-static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
8150 +-{
8151 +- int len;
8152 +- loff_t size = i_size_read(mpd->inode);
8153 +- int err;
8154 +-
8155 +- BUG_ON(page->index != mpd->first_page);
8156 +- if (page->index == size >> PAGE_CACHE_SHIFT)
8157 +- len = size & ~PAGE_CACHE_MASK;
8158 +- else
8159 +- len = PAGE_CACHE_SIZE;
8160 +- clear_page_dirty_for_io(page);
8161 +- err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
8162 +- if (!err)
8163 +- mpd->wbc->nr_to_write--;
8164 +- mpd->first_page++;
8165 +-
8166 +- return err;
8167 ++ /* So far everything mapped? Submit the page for IO. */
8168 ++ if (mpd->map.m_len == 0) {
8169 ++ err = mpage_submit_page(mpd, head->b_page);
8170 ++ if (err < 0)
8171 ++ return err;
8172 ++ }
8173 ++ return lblk < blocks;
8174 + }
8175 +
8176 + /*
8177 +@@ -2003,8 +2035,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
8178 + struct inode *inode = mpd->inode;
8179 + struct buffer_head *head, *bh;
8180 + int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
8181 +- ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
8182 +- >> inode->i_blkbits;
8183 + pgoff_t start, end;
8184 + ext4_lblk_t lblk;
8185 + sector_t pblock;
8186 +@@ -2039,18 +2069,26 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
8187 + */
8188 + mpd->map.m_len = 0;
8189 + mpd->map.m_flags = 0;
8190 +- add_page_bufs_to_extent(mpd, head, bh,
8191 +- lblk);
8192 ++ /*
8193 ++ * FIXME: If dioread_nolock supports
8194 ++ * blocksize < pagesize, we need to make
8195 ++ * sure we add size mapped so far to
8196 ++ * io_end->size as the following call
8197 ++ * can submit the page for IO.
8198 ++ */
8199 ++ err = mpage_process_page_bufs(mpd, head,
8200 ++ bh, lblk);
8201 + pagevec_release(&pvec);
8202 +- return 0;
8203 ++ if (err > 0)
8204 ++ err = 0;
8205 ++ return err;
8206 + }
8207 + if (buffer_delay(bh)) {
8208 + clear_buffer_delay(bh);
8209 + bh->b_blocknr = pblock++;
8210 + }
8211 + clear_buffer_unwritten(bh);
8212 +- } while (++lblk < blocks &&
8213 +- (bh = bh->b_this_page) != head);
8214 ++ } while (lblk++, (bh = bh->b_this_page) != head);
8215 +
8216 + /*
8217 + * FIXME: This is going to break if dioread_nolock
8218 +@@ -2319,14 +2357,10 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
8219 + lblk = ((ext4_lblk_t)page->index) <<
8220 + (PAGE_CACHE_SHIFT - blkbits);
8221 + head = page_buffers(page);
8222 +- if (!add_page_bufs_to_extent(mpd, head, head, lblk))
8223 ++ err = mpage_process_page_bufs(mpd, head, head, lblk);
8224 ++ if (err <= 0)
8225 + goto out;
8226 +- /* So far everything mapped? Submit the page for IO. */
8227 +- if (mpd->map.m_len == 0) {
8228 +- err = mpage_submit_page(mpd, page);
8229 +- if (err < 0)
8230 +- goto out;
8231 +- }
8232 ++ err = 0;
8233 +
8234 + /*
8235 + * Accumulated enough dirty pages? This doesn't apply
8236 +@@ -4566,7 +4600,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
8237 + ext4_journal_stop(handle);
8238 + }
8239 +
8240 +- if (attr->ia_valid & ATTR_SIZE) {
8241 ++ if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
8242 ++ handle_t *handle;
8243 ++ loff_t oldsize = inode->i_size;
8244 +
8245 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
8246 + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
8247 +@@ -4574,73 +4610,60 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
8248 + if (attr->ia_size > sbi->s_bitmap_maxbytes)
8249 + return -EFBIG;
8250 + }
8251 +- }
8252 +-
8253 +- if (S_ISREG(inode->i_mode) &&
8254 +- attr->ia_valid & ATTR_SIZE &&
8255 +- (attr->ia_size < inode->i_size)) {
8256 +- handle_t *handle;
8257 +-
8258 +- handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
8259 +- if (IS_ERR(handle)) {
8260 +- error = PTR_ERR(handle);
8261 +- goto err_out;
8262 +- }
8263 +- if (ext4_handle_valid(handle)) {
8264 +- error = ext4_orphan_add(handle, inode);
8265 +- orphan = 1;
8266 +- }
8267 +- EXT4_I(inode)->i_disksize = attr->ia_size;
8268 +- rc = ext4_mark_inode_dirty(handle, inode);
8269 +- if (!error)
8270 +- error = rc;
8271 +- ext4_journal_stop(handle);
8272 +-
8273 +- if (ext4_should_order_data(inode)) {
8274 +- error = ext4_begin_ordered_truncate(inode,
8275 ++ if (S_ISREG(inode->i_mode) &&
8276 ++ (attr->ia_size < inode->i_size)) {
8277 ++ if (ext4_should_order_data(inode)) {
8278 ++ error = ext4_begin_ordered_truncate(inode,
8279 + attr->ia_size);
8280 +- if (error) {
8281 +- /* Do as much error cleanup as possible */
8282 +- handle = ext4_journal_start(inode,
8283 +- EXT4_HT_INODE, 3);
8284 +- if (IS_ERR(handle)) {
8285 +- ext4_orphan_del(NULL, inode);
8286 ++ if (error)
8287 + goto err_out;
8288 +- }
8289 +- ext4_orphan_del(handle, inode);
8290 +- orphan = 0;
8291 +- ext4_journal_stop(handle);
8292 ++ }
8293 ++ handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
8294 ++ if (IS_ERR(handle)) {
8295 ++ error = PTR_ERR(handle);
8296 ++ goto err_out;
8297 ++ }
8298 ++ if (ext4_handle_valid(handle)) {
8299 ++ error = ext4_orphan_add(handle, inode);
8300 ++ orphan = 1;
8301 ++ }
8302 ++ EXT4_I(inode)->i_disksize = attr->ia_size;
8303 ++ rc = ext4_mark_inode_dirty(handle, inode);
8304 ++ if (!error)
8305 ++ error = rc;
8306 ++ ext4_journal_stop(handle);
8307 ++ if (error) {
8308 ++ ext4_orphan_del(NULL, inode);
8309 + goto err_out;
8310 + }
8311 + }
8312 +- }
8313 +-
8314 +- if (attr->ia_valid & ATTR_SIZE) {
8315 +- if (attr->ia_size != inode->i_size) {
8316 +- loff_t oldsize = inode->i_size;
8317 +
8318 +- i_size_write(inode, attr->ia_size);
8319 +- /*
8320 +- * Blocks are going to be removed from the inode. Wait
8321 +- * for dio in flight. Temporarily disable
8322 +- * dioread_nolock to prevent livelock.
8323 +- */
8324 +- if (orphan) {
8325 +- if (!ext4_should_journal_data(inode)) {
8326 +- ext4_inode_block_unlocked_dio(inode);
8327 +- inode_dio_wait(inode);
8328 +- ext4_inode_resume_unlocked_dio(inode);
8329 +- } else
8330 +- ext4_wait_for_tail_page_commit(inode);
8331 +- }
8332 +- /*
8333 +- * Truncate pagecache after we've waited for commit
8334 +- * in data=journal mode to make pages freeable.
8335 +- */
8336 +- truncate_pagecache(inode, oldsize, inode->i_size);
8337 ++ i_size_write(inode, attr->ia_size);
8338 ++ /*
8339 ++ * Blocks are going to be removed from the inode. Wait
8340 ++ * for dio in flight. Temporarily disable
8341 ++ * dioread_nolock to prevent livelock.
8342 ++ */
8343 ++ if (orphan) {
8344 ++ if (!ext4_should_journal_data(inode)) {
8345 ++ ext4_inode_block_unlocked_dio(inode);
8346 ++ inode_dio_wait(inode);
8347 ++ ext4_inode_resume_unlocked_dio(inode);
8348 ++ } else
8349 ++ ext4_wait_for_tail_page_commit(inode);
8350 + }
8351 +- ext4_truncate(inode);
8352 ++ /*
8353 ++ * Truncate pagecache after we've waited for commit
8354 ++ * in data=journal mode to make pages freeable.
8355 ++ */
8356 ++ truncate_pagecache(inode, oldsize, inode->i_size);
8357 + }
8358 ++ /*
8359 ++ * We want to call ext4_truncate() even if attr->ia_size ==
8360 ++ * inode->i_size for cases like truncation of fallocated space
8361 ++ */
8362 ++ if (attr->ia_valid & ATTR_SIZE)
8363 ++ ext4_truncate(inode);
8364 +
8365 + if (!rc) {
8366 + setattr_copy(inode, attr);
8367 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
8368 +index 72a5d5b0..8fec28ff 100644
8369 +--- a/fs/fuse/dir.c
8370 ++++ b/fs/fuse/dir.c
8371 +@@ -1174,6 +1174,8 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
8372 + return -EIO;
8373 + if (reclen > nbytes)
8374 + break;
8375 ++ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
8376 ++ return -EIO;
8377 +
8378 + if (!dir_emit(ctx, dirent->name, dirent->namelen,
8379 + dirent->ino, dirent->type))
8380 +@@ -1320,6 +1322,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
8381 + return -EIO;
8382 + if (reclen > nbytes)
8383 + break;
8384 ++ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
8385 ++ return -EIO;
8386 +
8387 + if (!over) {
8388 + /* We fill entries into dstbuf only as much as
8389 +@@ -1590,6 +1594,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
8390 + struct file *file)
8391 + {
8392 + struct fuse_conn *fc = get_fuse_conn(inode);
8393 ++ struct fuse_inode *fi = get_fuse_inode(inode);
8394 + struct fuse_req *req;
8395 + struct fuse_setattr_in inarg;
8396 + struct fuse_attr_out outarg;
8397 +@@ -1617,8 +1622,10 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
8398 + if (IS_ERR(req))
8399 + return PTR_ERR(req);
8400 +
8401 +- if (is_truncate)
8402 ++ if (is_truncate) {
8403 + fuse_set_nowrite(inode);
8404 ++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
8405 ++ }
8406 +
8407 + memset(&inarg, 0, sizeof(inarg));
8408 + memset(&outarg, 0, sizeof(outarg));
8409 +@@ -1680,12 +1687,14 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
8410 + invalidate_inode_pages2(inode->i_mapping);
8411 + }
8412 +
8413 ++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
8414 + return 0;
8415 +
8416 + error:
8417 + if (is_truncate)
8418 + fuse_release_nowrite(inode);
8419 +
8420 ++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
8421 + return err;
8422 + }
8423 +
8424 +@@ -1749,6 +1758,8 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
8425 + fc->no_setxattr = 1;
8426 + err = -EOPNOTSUPP;
8427 + }
8428 ++ if (!err)
8429 ++ fuse_invalidate_attr(inode);
8430 + return err;
8431 + }
8432 +
8433 +@@ -1878,6 +1889,8 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
8434 + fc->no_removexattr = 1;
8435 + err = -EOPNOTSUPP;
8436 + }
8437 ++ if (!err)
8438 ++ fuse_invalidate_attr(inode);
8439 + return err;
8440 + }
8441 +
8442 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
8443 +index 5c121fe1..d409deaf 100644
8444 +--- a/fs/fuse/file.c
8445 ++++ b/fs/fuse/file.c
8446 +@@ -629,7 +629,8 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
8447 + struct fuse_inode *fi = get_fuse_inode(inode);
8448 +
8449 + spin_lock(&fc->lock);
8450 +- if (attr_ver == fi->attr_version && size < inode->i_size) {
8451 ++ if (attr_ver == fi->attr_version && size < inode->i_size &&
8452 ++ !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
8453 + fi->attr_version = ++fc->attr_version;
8454 + i_size_write(inode, size);
8455 + }
8456 +@@ -1032,12 +1033,16 @@ static ssize_t fuse_perform_write(struct file *file,
8457 + {
8458 + struct inode *inode = mapping->host;
8459 + struct fuse_conn *fc = get_fuse_conn(inode);
8460 ++ struct fuse_inode *fi = get_fuse_inode(inode);
8461 + int err = 0;
8462 + ssize_t res = 0;
8463 +
8464 + if (is_bad_inode(inode))
8465 + return -EIO;
8466 +
8467 ++ if (inode->i_size < pos + iov_iter_count(ii))
8468 ++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
8469 ++
8470 + do {
8471 + struct fuse_req *req;
8472 + ssize_t count;
8473 +@@ -1073,6 +1078,7 @@ static ssize_t fuse_perform_write(struct file *file,
8474 + if (res > 0)
8475 + fuse_write_update_size(inode, pos);
8476 +
8477 ++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
8478 + fuse_invalidate_attr(inode);
8479 +
8480 + return res > 0 ? res : err;
8481 +@@ -1529,7 +1535,6 @@ static int fuse_writepage_locked(struct page *page)
8482 +
8483 + inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
8484 + inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
8485 +- end_page_writeback(page);
8486 +
8487 + spin_lock(&fc->lock);
8488 + list_add(&req->writepages_entry, &fi->writepages);
8489 +@@ -1537,6 +1542,8 @@ static int fuse_writepage_locked(struct page *page)
8490 + fuse_flush_writepages(inode);
8491 + spin_unlock(&fc->lock);
8492 +
8493 ++ end_page_writeback(page);
8494 ++
8495 + return 0;
8496 +
8497 + err_free:
8498 +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
8499 +index fde7249a..5ced199b 100644
8500 +--- a/fs/fuse/fuse_i.h
8501 ++++ b/fs/fuse/fuse_i.h
8502 +@@ -115,6 +115,8 @@ struct fuse_inode {
8503 + enum {
8504 + /** Advise readdirplus */
8505 + FUSE_I_ADVISE_RDPLUS,
8506 ++ /** An operation changing file size is in progress */
8507 ++ FUSE_I_SIZE_UNSTABLE,
8508 + };
8509 +
8510 + struct fuse_conn;
8511 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
8512 +index 0b578598..e0fe703e 100644
8513 +--- a/fs/fuse/inode.c
8514 ++++ b/fs/fuse/inode.c
8515 +@@ -201,7 +201,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
8516 + struct timespec old_mtime;
8517 +
8518 + spin_lock(&fc->lock);
8519 +- if (attr_version != 0 && fi->attr_version > attr_version) {
8520 ++ if ((attr_version != 0 && fi->attr_version > attr_version) ||
8521 ++ test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
8522 + spin_unlock(&fc->lock);
8523 + return;
8524 + }
8525 +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
8526 +index c348d6d8..e5d408a7 100644
8527 +--- a/fs/isofs/inode.c
8528 ++++ b/fs/isofs/inode.c
8529 +@@ -117,8 +117,8 @@ static void destroy_inodecache(void)
8530 +
8531 + static int isofs_remount(struct super_block *sb, int *flags, char *data)
8532 + {
8533 +- /* we probably want a lot more here */
8534 +- *flags |= MS_RDONLY;
8535 ++ if (!(*flags & MS_RDONLY))
8536 ++ return -EROFS;
8537 + return 0;
8538 + }
8539 +
8540 +@@ -763,15 +763,6 @@ root_found:
8541 + */
8542 + s->s_maxbytes = 0x80000000000LL;
8543 +
8544 +- /*
8545 +- * The CDROM is read-only, has no nodes (devices) on it, and since
8546 +- * all of the files appear to be owned by root, we really do not want
8547 +- * to allow suid. (suid or devices will not show up unless we have
8548 +- * Rock Ridge extensions)
8549 +- */
8550 +-
8551 +- s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
8552 +-
8553 + /* Set this for reference. Its not currently used except on write
8554 + which we don't have .. */
8555 +
8556 +@@ -1530,6 +1521,9 @@ struct inode *isofs_iget(struct super_block *sb,
8557 + static struct dentry *isofs_mount(struct file_system_type *fs_type,
8558 + int flags, const char *dev_name, void *data)
8559 + {
8560 ++ /* We don't support read-write mounts */
8561 ++ if (!(flags & MS_RDONLY))
8562 ++ return ERR_PTR(-EACCES);
8563 + return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
8564 + }
8565 +
8566 +diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
8567 +index 2487116d..84606472 100644
8568 +--- a/fs/ocfs2/extent_map.c
8569 ++++ b/fs/ocfs2/extent_map.c
8570 +@@ -781,7 +781,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8571 + cpos = map_start >> osb->s_clustersize_bits;
8572 + mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
8573 + map_start + map_len);
8574 +- mapping_end -= cpos;
8575 + is_last = 0;
8576 + while (cpos < mapping_end && !is_last) {
8577 + u32 fe_flags;
8578 +diff --git a/fs/proc/root.c b/fs/proc/root.c
8579 +index e0a790da..0e0e83c4 100644
8580 +--- a/fs/proc/root.c
8581 ++++ b/fs/proc/root.c
8582 +@@ -110,7 +110,8 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
8583 + ns = task_active_pid_ns(current);
8584 + options = data;
8585 +
8586 +- if (!current_user_ns()->may_mount_proc)
8587 ++ if (!current_user_ns()->may_mount_proc ||
8588 ++ !ns_capable(ns->user_ns, CAP_SYS_ADMIN))
8589 + return ERR_PTR(-EPERM);
8590 + }
8591 +
8592 +diff --git a/include/linux/compat.h b/include/linux/compat.h
8593 +index 7f0c1dd0..ec1aee4a 100644
8594 +--- a/include/linux/compat.h
8595 ++++ b/include/linux/compat.h
8596 +@@ -669,6 +669,13 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
8597 +
8598 + int compat_restore_altstack(const compat_stack_t __user *uss);
8599 + int __compat_save_altstack(compat_stack_t __user *, unsigned long);
8600 ++#define compat_save_altstack_ex(uss, sp) do { \
8601 ++ compat_stack_t __user *__uss = uss; \
8602 ++ struct task_struct *t = current; \
8603 ++ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
8604 ++ put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
8605 ++ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
8606 ++} while (0);
8607 +
8608 + asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
8609 + struct compat_timespec __user *interval);
8610 +diff --git a/include/linux/hid.h b/include/linux/hid.h
8611 +index 0c48991b..ff545cc3 100644
8612 +--- a/include/linux/hid.h
8613 ++++ b/include/linux/hid.h
8614 +@@ -393,10 +393,12 @@ struct hid_report {
8615 + struct hid_device *device; /* associated device */
8616 + };
8617 +
8618 ++#define HID_MAX_IDS 256
8619 ++
8620 + struct hid_report_enum {
8621 + unsigned numbered;
8622 + struct list_head report_list;
8623 +- struct hid_report *report_id_hash[256];
8624 ++ struct hid_report *report_id_hash[HID_MAX_IDS];
8625 + };
8626 +
8627 + #define HID_REPORT_TYPES 3
8628 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
8629 +index 3bed2e89..d1fe5d00 100644
8630 +--- a/include/linux/pci_ids.h
8631 ++++ b/include/linux/pci_ids.h
8632 +@@ -518,6 +518,8 @@
8633 + #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
8634 + #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
8635 + #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
8636 ++#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d
8637 ++#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e
8638 + #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
8639 + #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
8640 + #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
8641 +diff --git a/include/linux/rculist.h b/include/linux/rculist.h
8642 +index f4b1001a..4106721c 100644
8643 +--- a/include/linux/rculist.h
8644 ++++ b/include/linux/rculist.h
8645 +@@ -267,8 +267,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
8646 + */
8647 + #define list_first_or_null_rcu(ptr, type, member) \
8648 + ({struct list_head *__ptr = (ptr); \
8649 +- struct list_head __rcu *__next = list_next_rcu(__ptr); \
8650 +- likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
8651 ++ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
8652 ++ likely(__ptr != __next) ? \
8653 ++ list_entry_rcu(__next, type, member) : NULL; \
8654 + })
8655 +
8656 + /**
8657 +diff --git a/include/linux/signal.h b/include/linux/signal.h
8658 +index d8974847..2ac423bd 100644
8659 +--- a/include/linux/signal.h
8660 ++++ b/include/linux/signal.h
8661 +@@ -434,6 +434,14 @@ void signals_init(void);
8662 + int restore_altstack(const stack_t __user *);
8663 + int __save_altstack(stack_t __user *, unsigned long);
8664 +
8665 ++#define save_altstack_ex(uss, sp) do { \
8666 ++ stack_t __user *__uss = uss; \
8667 ++ struct task_struct *t = current; \
8668 ++ put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
8669 ++ put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
8670 ++ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
8671 ++} while (0);
8672 ++
8673 + #ifdef CONFIG_PROC_FS
8674 + struct seq_file;
8675 + extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
8676 +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
8677 +index 1e88377e..3e541e63 100644
8678 +--- a/include/linux/usb/hcd.h
8679 ++++ b/include/linux/usb/hcd.h
8680 +@@ -411,7 +411,7 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
8681 + extern void usb_hcd_pci_remove(struct pci_dev *dev);
8682 + extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
8683 +
8684 +-#ifdef CONFIG_PM_SLEEP
8685 ++#ifdef CONFIG_PM
8686 + extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
8687 + #endif
8688 + #endif /* CONFIG_PCI */
8689 +diff --git a/ipc/msg.c b/ipc/msg.c
8690 +index 9f29d9e8..b65fdf1a 100644
8691 +--- a/ipc/msg.c
8692 ++++ b/ipc/msg.c
8693 +@@ -680,16 +680,18 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
8694 + goto out_unlock1;
8695 + }
8696 +
8697 ++ ipc_lock_object(&msq->q_perm);
8698 ++
8699 + for (;;) {
8700 + struct msg_sender s;
8701 +
8702 + err = -EACCES;
8703 + if (ipcperms(ns, &msq->q_perm, S_IWUGO))
8704 +- goto out_unlock1;
8705 ++ goto out_unlock0;
8706 +
8707 + err = security_msg_queue_msgsnd(msq, msg, msgflg);
8708 + if (err)
8709 +- goto out_unlock1;
8710 ++ goto out_unlock0;
8711 +
8712 + if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
8713 + 1 + msq->q_qnum <= msq->q_qbytes) {
8714 +@@ -699,10 +701,9 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
8715 + /* queue full, wait: */
8716 + if (msgflg & IPC_NOWAIT) {
8717 + err = -EAGAIN;
8718 +- goto out_unlock1;
8719 ++ goto out_unlock0;
8720 + }
8721 +
8722 +- ipc_lock_object(&msq->q_perm);
8723 + ss_add(msq, &s);
8724 +
8725 + if (!ipc_rcu_getref(msq)) {
8726 +@@ -730,10 +731,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
8727 + goto out_unlock0;
8728 + }
8729 +
8730 +- ipc_unlock_object(&msq->q_perm);
8731 + }
8732 +-
8733 +- ipc_lock_object(&msq->q_perm);
8734 + msq->q_lspid = task_tgid_vnr(current);
8735 + msq->q_stime = get_seconds();
8736 +
8737 +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
8738 +index f3569747..ad8e1bdc 100644
8739 +--- a/kernel/events/uprobes.c
8740 ++++ b/kernel/events/uprobes.c
8741 +@@ -1682,12 +1682,10 @@ static bool handle_trampoline(struct pt_regs *regs)
8742 + tmp = ri;
8743 + ri = ri->next;
8744 + kfree(tmp);
8745 ++ utask->depth--;
8746 +
8747 + if (!chained)
8748 + break;
8749 +-
8750 +- utask->depth--;
8751 +-
8752 + BUG_ON(!ri);
8753 + }
8754 +
8755 +diff --git a/kernel/fork.c b/kernel/fork.c
8756 +index bf46287c..200a7a29 100644
8757 +--- a/kernel/fork.c
8758 ++++ b/kernel/fork.c
8759 +@@ -1173,10 +1173,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
8760 + return ERR_PTR(-EINVAL);
8761 +
8762 + /*
8763 +- * If the new process will be in a different pid namespace
8764 +- * don't allow the creation of threads.
8765 ++ * If the new process will be in a different pid namespace don't
8766 ++ * allow it to share a thread group or signal handlers with the
8767 ++ * forking task.
8768 + */
8769 +- if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
8770 ++ if ((clone_flags & (CLONE_SIGHAND | CLONE_NEWPID)) &&
8771 + (task_active_pid_ns(current) !=
8772 + current->nsproxy->pid_ns_for_children))
8773 + return ERR_PTR(-EINVAL);
8774 +diff --git a/kernel/pid.c b/kernel/pid.c
8775 +index 66505c1d..ebe5e80b 100644
8776 +--- a/kernel/pid.c
8777 ++++ b/kernel/pid.c
8778 +@@ -265,6 +265,7 @@ void free_pid(struct pid *pid)
8779 + struct pid_namespace *ns = upid->ns;
8780 + hlist_del_rcu(&upid->pid_chain);
8781 + switch(--ns->nr_hashed) {
8782 ++ case 2:
8783 + case 1:
8784 + /* When all that is left in the pid namespace
8785 + * is the reaper wake up the reaper. The reaper
8786 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
8787 +index a92012a7..f2820fbf 100644
8788 +--- a/mm/huge_memory.c
8789 ++++ b/mm/huge_memory.c
8790 +@@ -2296,6 +2296,8 @@ static void collapse_huge_page(struct mm_struct *mm,
8791 + goto out;
8792 +
8793 + vma = find_vma(mm, address);
8794 ++ if (!vma)
8795 ++ goto out;
8796 + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
8797 + hend = vma->vm_end & HPAGE_PMD_MASK;
8798 + if (address < hstart || address + HPAGE_PMD_SIZE > hend)
8799 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
8800 +index 0878ff7c..aa44621e 100644
8801 +--- a/mm/memcontrol.c
8802 ++++ b/mm/memcontrol.c
8803 +@@ -5616,7 +5616,13 @@ static int compare_thresholds(const void *a, const void *b)
8804 + const struct mem_cgroup_threshold *_a = a;
8805 + const struct mem_cgroup_threshold *_b = b;
8806 +
8807 +- return _a->threshold - _b->threshold;
8808 ++ if (_a->threshold > _b->threshold)
8809 ++ return 1;
8810 ++
8811 ++ if (_a->threshold < _b->threshold)
8812 ++ return -1;
8813 ++
8814 ++ return 0;
8815 + }
8816 +
8817 + static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
8818 +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
8819 +index dd47889a..dbc0a739 100644
8820 +--- a/net/ceph/osd_client.c
8821 ++++ b/net/ceph/osd_client.c
8822 +@@ -2129,6 +2129,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
8823 + dout("osdc_start_request failed map, "
8824 + " will retry %lld\n", req->r_tid);
8825 + rc = 0;
8826 ++ } else {
8827 ++ __unregister_request(osdc, req);
8828 + }
8829 + goto out_unlock;
8830 + }
8831 +diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
8832 +index 603ddd92..dbd9a479 100644
8833 +--- a/net/ceph/osdmap.c
8834 ++++ b/net/ceph/osdmap.c
8835 +@@ -1129,7 +1129,7 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
8836 +
8837 + /* pg_temp? */
8838 + pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
8839 +- pool->pgp_num_mask);
8840 ++ pool->pg_num_mask);
8841 + pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
8842 + if (pg) {
8843 + *num = pg->len;
8844 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
8845 +index cc9e02d7..7a98d524 100644
8846 +--- a/net/mac80211/mlme.c
8847 ++++ b/net/mac80211/mlme.c
8848 +@@ -2851,14 +2851,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
8849 + ieee80211_rx_bss_put(local, bss);
8850 + sdata->vif.bss_conf.beacon_rate = bss->beacon_rate;
8851 + }
8852 +-
8853 +- if (!sdata->u.mgd.associated ||
8854 +- !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid))
8855 +- return;
8856 +-
8857 +- ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
8858 +- elems, true);
8859 +-
8860 + }
8861 +
8862 +
8863 +@@ -3147,6 +3139,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
8864 +
8865 + ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
8866 +
8867 ++ ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
8868 ++ &elems, true);
8869 ++
8870 + if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
8871 + elems.wmm_param_len))
8872 + changed |= BSS_CHANGED_QOS;
8873 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
8874 +index 8860dd52..9552da23 100644
8875 +--- a/sound/pci/hda/hda_intel.c
8876 ++++ b/sound/pci/hda/hda_intel.c
8877 +@@ -3376,6 +3376,7 @@ static struct snd_pci_quirk msi_black_list[] = {
8878 + SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
8879 + SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
8880 + SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
8881 ++ SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */
8882 + SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
8883 + SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
8884 + {}
8885 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
8886 +index 9f358627..45850f67 100644
8887 +--- a/sound/pci/hda/patch_hdmi.c
8888 ++++ b/sound/pci/hda/patch_hdmi.c
8889 +@@ -67,6 +67,8 @@ struct hdmi_spec_per_pin {
8890 + struct delayed_work work;
8891 + struct snd_kcontrol *eld_ctl;
8892 + int repoll_count;
8893 ++ bool setup; /* the stream has been set up by prepare callback */
8894 ++ int channels; /* current number of channels */
8895 + bool non_pcm;
8896 + bool chmap_set; /* channel-map override by ALSA API? */
8897 + unsigned char chmap[8]; /* ALSA API channel-map */
8898 +@@ -551,6 +553,17 @@ static int hdmi_channel_allocation(struct hdmi_eld *eld, int channels)
8899 + }
8900 + }
8901 +
8902 ++ if (!ca) {
8903 ++ /* if there was no match, select the regular ALSA channel
8904 ++ * allocation with the matching number of channels */
8905 ++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
8906 ++ if (channels == channel_allocations[i].channels) {
8907 ++ ca = channel_allocations[i].ca_index;
8908 ++ break;
8909 ++ }
8910 ++ }
8911 ++ }
8912 ++
8913 + snd_print_channel_allocation(eld->info.spk_alloc, buf, sizeof(buf));
8914 + snd_printdd("HDMI: select CA 0x%x for %d-channel allocation: %s\n",
8915 + ca, channels, buf);
8916 +@@ -868,18 +881,19 @@ static bool hdmi_infoframe_uptodate(struct hda_codec *codec, hda_nid_t pin_nid,
8917 + return true;
8918 + }
8919 +
8920 +-static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
8921 +- bool non_pcm,
8922 +- struct snd_pcm_substream *substream)
8923 ++static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
8924 ++ struct hdmi_spec_per_pin *per_pin,
8925 ++ bool non_pcm)
8926 + {
8927 +- struct hdmi_spec *spec = codec->spec;
8928 +- struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
8929 + hda_nid_t pin_nid = per_pin->pin_nid;
8930 +- int channels = substream->runtime->channels;
8931 ++ int channels = per_pin->channels;
8932 + struct hdmi_eld *eld;
8933 + int ca;
8934 + union audio_infoframe ai;
8935 +
8936 ++ if (!channels)
8937 ++ return;
8938 ++
8939 + eld = &per_pin->sink_eld;
8940 + if (!eld->monitor_present)
8941 + return;
8942 +@@ -1329,6 +1343,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
8943 + eld_changed = true;
8944 + }
8945 + if (update_eld) {
8946 ++ bool old_eld_valid = pin_eld->eld_valid;
8947 + pin_eld->eld_valid = eld->eld_valid;
8948 + eld_changed = pin_eld->eld_size != eld->eld_size ||
8949 + memcmp(pin_eld->eld_buffer, eld->eld_buffer,
8950 +@@ -1338,6 +1353,18 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
8951 + eld->eld_size);
8952 + pin_eld->eld_size = eld->eld_size;
8953 + pin_eld->info = eld->info;
8954 ++
8955 ++ /* Haswell-specific workaround: re-setup when the transcoder is
8956 ++ * changed during the stream playback
8957 ++ */
8958 ++ if (codec->vendor_id == 0x80862807 &&
8959 ++ eld->eld_valid && !old_eld_valid && per_pin->setup) {
8960 ++ snd_hda_codec_write(codec, pin_nid, 0,
8961 ++ AC_VERB_SET_AMP_GAIN_MUTE,
8962 ++ AMP_OUT_UNMUTE);
8963 ++ hdmi_setup_audio_infoframe(codec, per_pin,
8964 ++ per_pin->non_pcm);
8965 ++ }
8966 + }
8967 + mutex_unlock(&pin_eld->lock);
8968 +
8969 +@@ -1510,14 +1537,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
8970 + hda_nid_t cvt_nid = hinfo->nid;
8971 + struct hdmi_spec *spec = codec->spec;
8972 + int pin_idx = hinfo_to_pin_index(spec, hinfo);
8973 +- hda_nid_t pin_nid = get_pin(spec, pin_idx)->pin_nid;
8974 ++ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
8975 ++ hda_nid_t pin_nid = per_pin->pin_nid;
8976 + bool non_pcm;
8977 +
8978 + non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
8979 ++ per_pin->channels = substream->runtime->channels;
8980 ++ per_pin->setup = true;
8981 +
8982 + hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
8983 +
8984 +- hdmi_setup_audio_infoframe(codec, pin_idx, non_pcm, substream);
8985 ++ hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
8986 +
8987 + return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
8988 + }
8989 +@@ -1557,6 +1587,9 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
8990 + snd_hda_spdif_ctls_unassign(codec, pin_idx);
8991 + per_pin->chmap_set = false;
8992 + memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
8993 ++
8994 ++ per_pin->setup = false;
8995 ++ per_pin->channels = 0;
8996 + }
8997 +
8998 + return 0;
8999 +@@ -1692,8 +1725,7 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
9000 + per_pin->chmap_set = true;
9001 + memcpy(per_pin->chmap, chmap, sizeof(chmap));
9002 + if (prepared)
9003 +- hdmi_setup_audio_infoframe(codec, pin_idx, per_pin->non_pcm,
9004 +- substream);
9005 ++ hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
9006 +
9007 + return 0;
9008 + }
9009 +diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
9010 +index 5402dfbb..8a8d9364 100644
9011 +--- a/sound/soc/codecs/mc13783.c
9012 ++++ b/sound/soc/codecs/mc13783.c
9013 +@@ -126,6 +126,10 @@ static int mc13783_write(struct snd_soc_codec *codec,
9014 +
9015 + ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
9016 +
9017 ++ /* include errata fix for spi audio problems */
9018 ++ if (reg == MC13783_AUDIO_CODEC || reg == MC13783_AUDIO_DAC)
9019 ++ ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
9020 ++
9021 + mc13xxx_unlock(priv->mc13xxx);
9022 +
9023 + return ret;
9024 +diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
9025 +index 0a4ffdd1..5e5af898 100644
9026 +--- a/sound/soc/codecs/wm8960.c
9027 ++++ b/sound/soc/codecs/wm8960.c
9028 +@@ -857,9 +857,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
9029 + if (pll_div.k) {
9030 + reg |= 0x20;
9031 +
9032 +- snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
9033 +- snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
9034 +- snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
9035 ++ snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
9036 ++ snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
9037 ++ snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
9038 + }
9039 + snd_soc_write(codec, WM8960_PLL1, reg);
9040 +
9041
9042 Modified: genpatches-2.6/trunk/3.4/0000_README
9043 ===================================================================
9044 --- genpatches-2.6/trunk/3.4/0000_README 2013-09-25 17:13:05 UTC (rev 2528)
9045 +++ genpatches-2.6/trunk/3.4/0000_README 2013-09-27 17:20:08 UTC (rev 2529)
9046 @@ -287,6 +287,10 @@
9047 From: http://www.kernel.org
9048 Desc: Linux 3.4.62
9049
9050 +Patch: 1062_linux-3.4.63.patch
9051 +From: http://www.kernel.org
9052 +Desc: Linux 3.4.63
9053 +
9054 Patch: 1500_XATTR_USER_PREFIX.patch
9055 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
9056 Desc: Support for namespace user.pax.* on tmpfs.
9057
9058 Added: genpatches-2.6/trunk/3.4/1062_linux-3.4.63.patch
9059 ===================================================================
9060 --- genpatches-2.6/trunk/3.4/1062_linux-3.4.63.patch (rev 0)
9061 +++ genpatches-2.6/trunk/3.4/1062_linux-3.4.63.patch 2013-09-27 17:20:08 UTC (rev 2529)
9062 @@ -0,0 +1,1055 @@
9063 +diff --git a/Makefile b/Makefile
9064 +index 3f23cb7e..94ce9416 100644
9065 +--- a/Makefile
9066 ++++ b/Makefile
9067 +@@ -1,6 +1,6 @@
9068 + VERSION = 3
9069 + PATCHLEVEL = 4
9070 +-SUBLEVEL = 62
9071 ++SUBLEVEL = 63
9072 + EXTRAVERSION =
9073 + NAME = Saber-toothed Squirrel
9074 +
9075 +diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
9076 +index d2268be8..709beb1d 100644
9077 +--- a/arch/arm/mach-versatile/pci.c
9078 ++++ b/arch/arm/mach-versatile/pci.c
9079 +@@ -42,9 +42,9 @@
9080 + #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
9081 + #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
9082 + #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
9083 +-#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
9084 +-#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
9085 +-#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
9086 ++#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
9087 ++#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
9088 ++#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
9089 + #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
9090 +
9091 + #define DEVICE_ID_OFFSET 0x00
9092 +diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
9093 +index 54d0eb4db..89276a2f 100644
9094 +--- a/arch/mips/ath79/clock.c
9095 ++++ b/arch/mips/ath79/clock.c
9096 +@@ -159,7 +159,7 @@ static void __init ar933x_clocks_init(void)
9097 + ath79_ahb_clk.rate = freq / t;
9098 + }
9099 +
9100 +- ath79_wdt_clk.rate = ath79_ref_clk.rate;
9101 ++ ath79_wdt_clk.rate = ath79_ahb_clk.rate;
9102 + ath79_uart_clk.rate = ath79_ref_clk.rate;
9103 + }
9104 +
9105 +diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
9106 +index ee5b690a..52e5758e 100644
9107 +--- a/arch/powerpc/kernel/align.c
9108 ++++ b/arch/powerpc/kernel/align.c
9109 +@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
9110 + nb = aligninfo[instr].len;
9111 + flags = aligninfo[instr].flags;
9112 +
9113 ++ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
9114 ++ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
9115 ++ nb = 8;
9116 ++ flags = LD+SW;
9117 ++ } else if (IS_XFORM(instruction) &&
9118 ++ ((instruction >> 1) & 0x3ff) == 660) {
9119 ++ nb = 8;
9120 ++ flags = ST+SW;
9121 ++ }
9122 ++
9123 + /* Byteswap little endian loads and stores */
9124 + swiz = 0;
9125 + if (regs->msr & MSR_LE) {
9126 +diff --git a/crypto/api.c b/crypto/api.c
9127 +index 033a7147..4f98dd5b 100644
9128 +--- a/crypto/api.c
9129 ++++ b/crypto/api.c
9130 +@@ -40,6 +40,8 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
9131 + return alg;
9132 + }
9133 +
9134 ++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
9135 ++
9136 + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
9137 + {
9138 + return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
9139 +@@ -150,8 +152,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
9140 + }
9141 + up_write(&crypto_alg_sem);
9142 +
9143 +- if (alg != &larval->alg)
9144 ++ if (alg != &larval->alg) {
9145 + kfree(larval);
9146 ++ if (crypto_is_larval(alg))
9147 ++ alg = crypto_larval_wait(alg);
9148 ++ }
9149 +
9150 + return alg;
9151 + }
9152 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
9153 +index 8b77fd31..efe172f3 100644
9154 +--- a/drivers/gpu/drm/drm_edid.c
9155 ++++ b/drivers/gpu/drm/drm_edid.c
9156 +@@ -125,6 +125,9 @@ static struct edid_quirk {
9157 +
9158 + /* ViewSonic VA2026w */
9159 + { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
9160 ++
9161 ++ /* Medion MD 30217 PG */
9162 ++ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
9163 + };
9164 +
9165 + /*** DDC fetch and block validation ***/
9166 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
9167 +index ff73d60c..ab59fdf0 100644
9168 +--- a/drivers/hid/hid-core.c
9169 ++++ b/drivers/hid/hid-core.c
9170 +@@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
9171 + struct hid_report_enum *report_enum = device->report_enum + type;
9172 + struct hid_report *report;
9173 +
9174 ++ if (id >= HID_MAX_IDS)
9175 ++ return NULL;
9176 + if (report_enum->report_id_hash[id])
9177 + return report_enum->report_id_hash[id];
9178 +
9179 +@@ -385,8 +387,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
9180 +
9181 + case HID_GLOBAL_ITEM_TAG_REPORT_ID:
9182 + parser->global.report_id = item_udata(item);
9183 +- if (parser->global.report_id == 0) {
9184 +- hid_err(parser->device, "report_id 0 is invalid\n");
9185 ++ if (parser->global.report_id == 0 ||
9186 ++ parser->global.report_id >= HID_MAX_IDS) {
9187 ++ hid_err(parser->device, "report_id %u is invalid\n",
9188 ++ parser->global.report_id);
9189 + return -1;
9190 + }
9191 + return 0;
9192 +@@ -557,7 +561,7 @@ static void hid_device_release(struct device *dev)
9193 + for (i = 0; i < HID_REPORT_TYPES; i++) {
9194 + struct hid_report_enum *report_enum = device->report_enum + i;
9195 +
9196 +- for (j = 0; j < 256; j++) {
9197 ++ for (j = 0; j < HID_MAX_IDS; j++) {
9198 + struct hid_report *report = report_enum->report_id_hash[j];
9199 + if (report)
9200 + hid_free_report(report);
9201 +@@ -995,7 +999,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
9202 +
9203 + int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
9204 + {
9205 +- unsigned size = field->report_size;
9206 ++ unsigned size;
9207 ++
9208 ++ if (!field)
9209 ++ return -1;
9210 ++
9211 ++ size = field->report_size;
9212 +
9213 + hid_dump_input(field->report->device, field->usage + offset, value);
9214 +
9215 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
9216 +index 14d22399..8cc08e23 100644
9217 +--- a/drivers/hid/hid-ids.h
9218 ++++ b/drivers/hid/hid-ids.h
9219 +@@ -595,6 +595,7 @@
9220 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012
9221 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013
9222 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014
9223 ++#define USB_DEVICE_ID_NTRIG_DUOSENSE 0x1500
9224 +
9225 + #define USB_VENDOR_ID_ONTRAK 0x0a07
9226 + #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064
9227 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
9228 +index 21e473e7..f03c684e 100644
9229 +--- a/drivers/hid/hid-input.c
9230 ++++ b/drivers/hid/hid-input.c
9231 +@@ -314,7 +314,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
9232 + {
9233 + struct hid_device *dev = container_of(psy, struct hid_device, battery);
9234 + int ret = 0;
9235 +- __u8 buf[2] = {};
9236 ++ __u8 *buf;
9237 +
9238 + switch (prop) {
9239 + case POWER_SUPPLY_PROP_PRESENT:
9240 +@@ -323,13 +323,20 @@ static int hidinput_get_battery_property(struct power_supply *psy,
9241 + break;
9242 +
9243 + case POWER_SUPPLY_PROP_CAPACITY:
9244 ++
9245 ++ buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL);
9246 ++ if (!buf) {
9247 ++ ret = -ENOMEM;
9248 ++ break;
9249 ++ }
9250 + ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
9251 +- buf, sizeof(buf),
9252 ++ buf, 2,
9253 + dev->battery_report_type);
9254 +
9255 + if (ret != 2) {
9256 + if (ret >= 0)
9257 + ret = -EINVAL;
9258 ++ kfree(buf);
9259 + break;
9260 + }
9261 +
9262 +@@ -338,6 +345,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
9263 + buf[1] <= dev->battery_max)
9264 + val->intval = (100 * (buf[1] - dev->battery_min)) /
9265 + (dev->battery_max - dev->battery_min);
9266 ++ kfree(buf);
9267 + break;
9268 +
9269 + case POWER_SUPPLY_PROP_MODEL_NAME:
9270 +diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
9271 +index 9fae2ebd..48cba857 100644
9272 +--- a/drivers/hid/hid-ntrig.c
9273 ++++ b/drivers/hid/hid-ntrig.c
9274 +@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
9275 + struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
9276 + report_id_hash[0x0d];
9277 +
9278 +- if (!report)
9279 ++ if (!report || report->maxfield < 1 ||
9280 ++ report->field[0]->report_count < 1)
9281 + return -EINVAL;
9282 +
9283 + usbhid_submit_report(hdev, report, USB_DIR_IN);
9284 +diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
9285 +index 47ed74c4..00cd2f8b 100644
9286 +--- a/drivers/hid/hid-pl.c
9287 ++++ b/drivers/hid/hid-pl.c
9288 +@@ -129,8 +129,14 @@ static int plff_init(struct hid_device *hid)
9289 + strong = &report->field[0]->value[2];
9290 + weak = &report->field[0]->value[3];
9291 + debug("detected single-field device");
9292 +- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
9293 +- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
9294 ++ } else if (report->field[0]->maxusage == 1 &&
9295 ++ report->field[0]->usage[0].hid ==
9296 ++ (HID_UP_LED | 0x43) &&
9297 ++ report->maxfield >= 4 &&
9298 ++ report->field[0]->report_count >= 1 &&
9299 ++ report->field[1]->report_count >= 1 &&
9300 ++ report->field[2]->report_count >= 1 &&
9301 ++ report->field[3]->report_count >= 1) {
9302 + report->field[0]->value[0] = 0x00;
9303 + report->field[1]->value[0] = 0x00;
9304 + strong = &report->field[2]->value[0];
9305 +diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
9306 +index 60201374..2b03c9ba 100644
9307 +--- a/drivers/hid/hid-speedlink.c
9308 ++++ b/drivers/hid/hid-speedlink.c
9309 +@@ -3,7 +3,7 @@
9310 + * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
9311 + * the HID descriptor.
9312 + *
9313 +- * Copyright (c) 2011 Stefan Kriwanek <mail@××××××××××××××.de>
9314 ++ * Copyright (c) 2011, 2013 Stefan Kriwanek <dev@××××××××××××××.de>
9315 + */
9316 +
9317 + /*
9318 +@@ -48,8 +48,13 @@ static int speedlink_event(struct hid_device *hdev, struct hid_field *field,
9319 + struct hid_usage *usage, __s32 value)
9320 + {
9321 + /* No other conditions due to usage_table. */
9322 +- /* Fix "jumpy" cursor (invalid events sent by device). */
9323 +- if (value == 256)
9324 ++
9325 ++ /* This fixes the "jumpy" cursor occuring due to invalid events sent
9326 ++ * by the device. Some devices only send them with value==+256, others
9327 ++ * don't. However, catching abs(value)>=256 is restrictive enough not
9328 ++ * to interfere with devices that were bug-free (has been tested).
9329 ++ */
9330 ++ if (abs(value) >= 256)
9331 + return 1;
9332 + /* Drop useless distance 0 events (on button clicks etc.) as well */
9333 + if (value == 0)
9334 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
9335 +index 5c4112e6..d712294b 100644
9336 +--- a/drivers/hid/usbhid/hid-quirks.c
9337 ++++ b/drivers/hid/usbhid/hid-quirks.c
9338 +@@ -103,6 +103,8 @@ static const struct hid_blacklist {
9339 + { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
9340 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
9341 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
9342 ++ { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
9343 ++
9344 + { 0, 0 }
9345 + };
9346 +
9347 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
9348 +index 17119247..a60a54d8 100644
9349 +--- a/drivers/iommu/intel-iommu.c
9350 ++++ b/drivers/iommu/intel-iommu.c
9351 +@@ -886,56 +886,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
9352 + return order;
9353 + }
9354 +
9355 ++static void dma_pte_free_level(struct dmar_domain *domain, int level,
9356 ++ struct dma_pte *pte, unsigned long pfn,
9357 ++ unsigned long start_pfn, unsigned long last_pfn)
9358 ++{
9359 ++ pfn = max(start_pfn, pfn);
9360 ++ pte = &pte[pfn_level_offset(pfn, level)];
9361 ++
9362 ++ do {
9363 ++ unsigned long level_pfn;
9364 ++ struct dma_pte *level_pte;
9365 ++
9366 ++ if (!dma_pte_present(pte) || dma_pte_superpage(pte))
9367 ++ goto next;
9368 ++
9369 ++ level_pfn = pfn & level_mask(level - 1);
9370 ++ level_pte = phys_to_virt(dma_pte_addr(pte));
9371 ++
9372 ++ if (level > 2)
9373 ++ dma_pte_free_level(domain, level - 1, level_pte,
9374 ++ level_pfn, start_pfn, last_pfn);
9375 ++
9376 ++ /* If range covers entire pagetable, free it */
9377 ++ if (!(start_pfn > level_pfn ||
9378 ++ last_pfn < level_pfn + level_size(level))) {
9379 ++ dma_clear_pte(pte);
9380 ++ domain_flush_cache(domain, pte, sizeof(*pte));
9381 ++ free_pgtable_page(level_pte);
9382 ++ }
9383 ++next:
9384 ++ pfn += level_size(level);
9385 ++ } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
9386 ++}
9387 ++
9388 + /* free page table pages. last level pte should already be cleared */
9389 + static void dma_pte_free_pagetable(struct dmar_domain *domain,
9390 + unsigned long start_pfn,
9391 + unsigned long last_pfn)
9392 + {
9393 + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
9394 +- struct dma_pte *first_pte, *pte;
9395 +- int total = agaw_to_level(domain->agaw);
9396 +- int level;
9397 +- unsigned long tmp;
9398 +- int large_page = 2;
9399 +
9400 + BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
9401 + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
9402 + BUG_ON(start_pfn > last_pfn);
9403 +
9404 + /* We don't need lock here; nobody else touches the iova range */
9405 +- level = 2;
9406 +- while (level <= total) {
9407 +- tmp = align_to_level(start_pfn, level);
9408 +-
9409 +- /* If we can't even clear one PTE at this level, we're done */
9410 +- if (tmp + level_size(level) - 1 > last_pfn)
9411 +- return;
9412 +-
9413 +- do {
9414 +- large_page = level;
9415 +- first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
9416 +- if (large_page > level)
9417 +- level = large_page + 1;
9418 +- if (!pte) {
9419 +- tmp = align_to_level(tmp + 1, level + 1);
9420 +- continue;
9421 +- }
9422 +- do {
9423 +- if (dma_pte_present(pte)) {
9424 +- free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
9425 +- dma_clear_pte(pte);
9426 +- }
9427 +- pte++;
9428 +- tmp += level_size(level);
9429 +- } while (!first_pte_in_page(pte) &&
9430 +- tmp + level_size(level) - 1 <= last_pfn);
9431 ++ dma_pte_free_level(domain, agaw_to_level(domain->agaw),
9432 ++ domain->pgd, 0, start_pfn, last_pfn);
9433 +
9434 +- domain_flush_cache(domain, first_pte,
9435 +- (void *)pte - (void *)first_pte);
9436 +-
9437 +- } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
9438 +- level++;
9439 +- }
9440 + /* free pgd */
9441 + if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
9442 + free_pgtable_page(domain->pgd);
9443 +diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
9444 +index fff92860..491e9ecc 100644
9445 +--- a/drivers/mmc/host/tmio_mmc_dma.c
9446 ++++ b/drivers/mmc/host/tmio_mmc_dma.c
9447 +@@ -104,6 +104,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
9448 + pio:
9449 + if (!desc) {
9450 + /* DMA failed, fall back to PIO */
9451 ++ tmio_mmc_enable_dma(host, false);
9452 + if (ret >= 0)
9453 + ret = -EIO;
9454 + host->chan_rx = NULL;
9455 +@@ -116,7 +117,6 @@ pio:
9456 + }
9457 + dev_warn(&host->pdev->dev,
9458 + "DMA failed: %d, falling back to PIO\n", ret);
9459 +- tmio_mmc_enable_dma(host, false);
9460 + }
9461 +
9462 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
9463 +@@ -185,6 +185,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
9464 + pio:
9465 + if (!desc) {
9466 + /* DMA failed, fall back to PIO */
9467 ++ tmio_mmc_enable_dma(host, false);
9468 + if (ret >= 0)
9469 + ret = -EIO;
9470 + host->chan_tx = NULL;
9471 +@@ -197,7 +198,6 @@ pio:
9472 + }
9473 + dev_warn(&host->pdev->dev,
9474 + "DMA failed: %d, falling back to PIO\n", ret);
9475 +- tmio_mmc_enable_dma(host, false);
9476 + }
9477 +
9478 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
9479 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
9480 +index f86ee0c7..503ff9f6 100644
9481 +--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
9482 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
9483 +@@ -1030,6 +1030,10 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
9484 + * is_on == 0 means MRC CCK is OFF (more noise imm)
9485 + */
9486 + bool is_on = param ? 1 : 0;
9487 ++
9488 ++ if (ah->caps.rx_chainmask == 1)
9489 ++ break;
9490 ++
9491 + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
9492 + AR_PHY_MRC_CCK_ENABLE, is_on);
9493 + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
9494 +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
9495 +index 4bfb44a0..e2ab182d 100644
9496 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h
9497 ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
9498 +@@ -78,10 +78,6 @@ struct ath_config {
9499 + sizeof(struct ath_buf_state)); \
9500 + } while (0)
9501 +
9502 +-#define ATH_RXBUF_RESET(_bf) do { \
9503 +- (_bf)->bf_stale = false; \
9504 +- } while (0)
9505 +-
9506 + /**
9507 + * enum buffer_type - Buffer type flags
9508 + *
9509 +@@ -314,6 +310,7 @@ struct ath_rx {
9510 + struct ath_buf *rx_bufptr;
9511 + struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
9512 +
9513 ++ struct ath_buf *buf_hold;
9514 + struct sk_buff *frag;
9515 + };
9516 +
9517 +diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
9518 +index 039bac7e..2e6583d3 100644
9519 +--- a/drivers/net/wireless/ath/ath9k/recv.c
9520 ++++ b/drivers/net/wireless/ath/ath9k/recv.c
9521 +@@ -78,8 +78,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
9522 + struct ath_desc *ds;
9523 + struct sk_buff *skb;
9524 +
9525 +- ATH_RXBUF_RESET(bf);
9526 +-
9527 + ds = bf->bf_desc;
9528 + ds->ds_link = 0; /* link to null */
9529 + ds->ds_data = bf->bf_buf_addr;
9530 +@@ -106,6 +104,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
9531 + sc->rx.rxlink = &ds->ds_link;
9532 + }
9533 +
9534 ++static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
9535 ++{
9536 ++ if (sc->rx.buf_hold)
9537 ++ ath_rx_buf_link(sc, sc->rx.buf_hold);
9538 ++
9539 ++ sc->rx.buf_hold = bf;
9540 ++}
9541 ++
9542 + static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
9543 + {
9544 + /* XXX block beacon interrupts */
9545 +@@ -153,7 +159,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
9546 +
9547 + skb = bf->bf_mpdu;
9548 +
9549 +- ATH_RXBUF_RESET(bf);
9550 + memset(skb->data, 0, ah->caps.rx_status_len);
9551 + dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
9552 + ah->caps.rx_status_len, DMA_TO_DEVICE);
9553 +@@ -485,6 +490,7 @@ int ath_startrecv(struct ath_softc *sc)
9554 + if (list_empty(&sc->rx.rxbuf))
9555 + goto start_recv;
9556 +
9557 ++ sc->rx.buf_hold = NULL;
9558 + sc->rx.rxlink = NULL;
9559 + list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
9560 + ath_rx_buf_link(sc, bf);
9561 +@@ -734,6 +740,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
9562 + }
9563 +
9564 + bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
9565 ++ if (bf == sc->rx.buf_hold)
9566 ++ return NULL;
9567 ++
9568 + ds = bf->bf_desc;
9569 +
9570 + /*
9571 +@@ -1974,7 +1983,7 @@ requeue:
9572 + if (edma) {
9573 + ath_rx_edma_buf_link(sc, qtype);
9574 + } else {
9575 +- ath_rx_buf_link(sc, bf);
9576 ++ ath_rx_buf_relink(sc, bf);
9577 + ath9k_hw_rxena(ah);
9578 + }
9579 + } while (1);
9580 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
9581 +index 12a42f2c..3d0aa472 100644
9582 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
9583 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
9584 +@@ -2479,6 +2479,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
9585 + for (acno = 0, ac = &an->ac[acno];
9586 + acno < WME_NUM_AC; acno++, ac++) {
9587 + ac->sched = false;
9588 ++ ac->clear_ps_filter = true;
9589 + ac->txq = sc->tx.txq_map[acno];
9590 + INIT_LIST_HEAD(&ac->tid_q);
9591 + }
9592 +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
9593 +index 11054ae9..9a184058 100644
9594 +--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
9595 ++++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
9596 +@@ -1013,9 +1013,10 @@ static bool dma64_rxidle(struct dma_info *di)
9597 +
9598 + /*
9599 + * post receive buffers
9600 +- * return false is refill failed completely and ring is empty this will stall
9601 +- * the rx dma and user might want to call rxfill again asap. This unlikely
9602 +- * happens on memory-rich NIC, but often on memory-constrained dongle
9603 ++ * Return false if refill failed completely or dma mapping failed. The ring
9604 ++ * is empty, which will stall the rx dma and user might want to call rxfill
9605 ++ * again asap. This is unlikely to happen on a memory-rich NIC, but often on
9606 ++ * memory-constrained dongle.
9607 + */
9608 + bool dma_rxfill(struct dma_pub *pub)
9609 + {
9610 +@@ -1074,6 +1075,8 @@ bool dma_rxfill(struct dma_pub *pub)
9611 +
9612 + pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
9613 + DMA_FROM_DEVICE);
9614 ++ if (dma_mapping_error(di->dmadev, pa))
9615 ++ return false;
9616 +
9617 + /* save the free packet pointer */
9618 + di->rxp[rxout] = p;
9619 +@@ -1294,7 +1297,11 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
9620 +
9621 + /* get physical address of buffer start */
9622 + pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
9623 +-
9624 ++ /* if mapping failed, free skb */
9625 ++ if (dma_mapping_error(di->dmadev, pa)) {
9626 ++ brcmu_pkt_buf_free_skb(p);
9627 ++ return;
9628 ++ }
9629 + /* With a DMA segment list, Descriptor table is filled
9630 + * using the segment list instead of looping over
9631 + * buffers in multi-chain DMA. Therefore, EOF for SGLIST
9632 +diff --git a/drivers/of/base.c b/drivers/of/base.c
9633 +index 58064498..1c207f23 100644
9634 +--- a/drivers/of/base.c
9635 ++++ b/drivers/of/base.c
9636 +@@ -1227,6 +1227,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
9637 + ap = dt_alloc(sizeof(*ap) + len + 1, 4);
9638 + if (!ap)
9639 + continue;
9640 ++ memset(ap, 0, sizeof(*ap) + len + 1);
9641 + ap->alias = start;
9642 + of_alias_add(ap, np, id, start, len);
9643 + }
9644 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
9645 +index 105fff2e..05973a49 100644
9646 +--- a/drivers/scsi/sd.c
9647 ++++ b/drivers/scsi/sd.c
9648 +@@ -2225,14 +2225,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
9649 + }
9650 + }
9651 +
9652 +- if (modepage == 0x3F) {
9653 +- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
9654 +- "present\n");
9655 +- goto defaults;
9656 +- } else if ((buffer[offset] & 0x3f) != modepage) {
9657 +- sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
9658 +- goto defaults;
9659 +- }
9660 ++ sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
9661 ++ goto defaults;
9662 ++
9663 + Page_found:
9664 + if (modepage == 8) {
9665 + sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
9666 +diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
9667 +index 95ebc267..e3adb382 100644
9668 +--- a/drivers/staging/comedi/drivers/dt282x.c
9669 ++++ b/drivers/staging/comedi/drivers/dt282x.c
9670 +@@ -407,8 +407,9 @@ struct dt282x_private {
9671 + } \
9672 + udelay(5); \
9673 + } \
9674 +- if (_i) \
9675 ++ if (_i) { \
9676 + b \
9677 ++ } \
9678 + } while (0)
9679 +
9680 + static int dt282x_attach(struct comedi_device *dev,
9681 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
9682 +index 9dd51f7f..1434ee9e 100644
9683 +--- a/drivers/usb/class/cdc-wdm.c
9684 ++++ b/drivers/usb/class/cdc-wdm.c
9685 +@@ -233,6 +233,7 @@ skip_error:
9686 + static void wdm_int_callback(struct urb *urb)
9687 + {
9688 + int rv = 0;
9689 ++ int responding;
9690 + int status = urb->status;
9691 + struct wdm_device *desc;
9692 + struct usb_cdc_notification *dr;
9693 +@@ -286,8 +287,8 @@ static void wdm_int_callback(struct urb *urb)
9694 +
9695 + spin_lock(&desc->iuspin);
9696 + clear_bit(WDM_READ, &desc->flags);
9697 +- set_bit(WDM_RESPONDING, &desc->flags);
9698 +- if (!test_bit(WDM_DISCONNECTING, &desc->flags)
9699 ++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
9700 ++ if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
9701 + && !test_bit(WDM_SUSPENDING, &desc->flags)) {
9702 + rv = usb_submit_urb(desc->response, GFP_ATOMIC);
9703 + dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
9704 +@@ -687,16 +688,20 @@ static void wdm_rxwork(struct work_struct *work)
9705 + {
9706 + struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
9707 + unsigned long flags;
9708 +- int rv;
9709 ++ int rv = 0;
9710 ++ int responding;
9711 +
9712 + spin_lock_irqsave(&desc->iuspin, flags);
9713 + if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
9714 + spin_unlock_irqrestore(&desc->iuspin, flags);
9715 + } else {
9716 ++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
9717 + spin_unlock_irqrestore(&desc->iuspin, flags);
9718 +- rv = usb_submit_urb(desc->response, GFP_KERNEL);
9719 ++ if (!responding)
9720 ++ rv = usb_submit_urb(desc->response, GFP_KERNEL);
9721 + if (rv < 0 && rv != -EPERM) {
9722 + spin_lock_irqsave(&desc->iuspin, flags);
9723 ++ clear_bit(WDM_RESPONDING, &desc->flags);
9724 + if (!test_bit(WDM_DISCONNECTING, &desc->flags))
9725 + schedule_work(&desc->rxwork);
9726 + spin_unlock_irqrestore(&desc->iuspin, flags);
9727 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
9728 +index f4bdd0ce..78609d30 100644
9729 +--- a/drivers/usb/core/config.c
9730 ++++ b/drivers/usb/core/config.c
9731 +@@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
9732 +
9733 + memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
9734 + if (config->desc.bDescriptorType != USB_DT_CONFIG ||
9735 +- config->desc.bLength < USB_DT_CONFIG_SIZE) {
9736 ++ config->desc.bLength < USB_DT_CONFIG_SIZE ||
9737 ++ config->desc.bLength > size) {
9738 + dev_err(ddev, "invalid descriptor for config index %d: "
9739 + "type = 0x%X, length = %d\n", cfgidx,
9740 + config->desc.bDescriptorType, config->desc.bLength);
9741 +diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
9742 +index a797d51e..77477ca5 100644
9743 +--- a/drivers/usb/host/ehci-mxc.c
9744 ++++ b/drivers/usb/host/ehci-mxc.c
9745 +@@ -298,7 +298,7 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
9746 + if (pdata && pdata->exit)
9747 + pdata->exit(pdev);
9748 +
9749 +- if (pdata->otg)
9750 ++ if (pdata && pdata->otg)
9751 + usb_phy_shutdown(pdata->otg);
9752 +
9753 + usb_remove_hcd(hcd);
9754 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
9755 +index 93ad67ec..6e70ce97 100644
9756 +--- a/drivers/usb/host/xhci-plat.c
9757 ++++ b/drivers/usb/host/xhci-plat.c
9758 +@@ -24,7 +24,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
9759 + * here that the generic code does not try to make a pci_dev from our
9760 + * dev struct in order to setup MSI
9761 + */
9762 +- xhci->quirks |= XHCI_BROKEN_MSI;
9763 ++ xhci->quirks |= XHCI_PLAT;
9764 + }
9765 +
9766 + /* called during probe() after chip reset completes */
9767 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
9768 +index 8072a932..1504946c 100644
9769 +--- a/drivers/usb/host/xhci.c
9770 ++++ b/drivers/usb/host/xhci.c
9771 +@@ -342,9 +342,14 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
9772 + static int xhci_try_enable_msi(struct usb_hcd *hcd)
9773 + {
9774 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
9775 +- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
9776 ++ struct pci_dev *pdev;
9777 + int ret;
9778 +
9779 ++ /* The xhci platform device has set up IRQs through usb_add_hcd. */
9780 ++ if (xhci->quirks & XHCI_PLAT)
9781 ++ return 0;
9782 ++
9783 ++ pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
9784 + /*
9785 + * Some Fresco Logic host controllers advertise MSI, but fail to
9786 + * generate interrupts. Don't even try to enable MSI.
9787 +@@ -3496,10 +3501,21 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
9788 + {
9789 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
9790 + struct xhci_virt_device *virt_dev;
9791 ++ struct device *dev = hcd->self.controller;
9792 + unsigned long flags;
9793 + u32 state;
9794 + int i, ret;
9795 +
9796 ++#ifndef CONFIG_USB_DEFAULT_PERSIST
9797 ++ /*
9798 ++ * We called pm_runtime_get_noresume when the device was attached.
9799 ++ * Decrement the counter here to allow controller to runtime suspend
9800 ++ * if no devices remain.
9801 ++ */
9802 ++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
9803 ++ pm_runtime_put_noidle(dev);
9804 ++#endif
9805 ++
9806 + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
9807 + /* If the host is halted due to driver unload, we still need to free the
9808 + * device.
9809 +@@ -3571,6 +3587,7 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
9810 + int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
9811 + {
9812 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
9813 ++ struct device *dev = hcd->self.controller;
9814 + unsigned long flags;
9815 + int timeleft;
9816 + int ret;
9817 +@@ -3623,6 +3640,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
9818 + goto disable_slot;
9819 + }
9820 + udev->slot_id = xhci->slot_id;
9821 ++
9822 ++#ifndef CONFIG_USB_DEFAULT_PERSIST
9823 ++ /*
9824 ++ * If resetting upon resume, we can't put the controller into runtime
9825 ++ * suspend if there is a device attached.
9826 ++ */
9827 ++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
9828 ++ pm_runtime_get_noresume(dev);
9829 ++#endif
9830 ++
9831 + /* Is this a LS or FS device under a HS hub? */
9832 + /* Hub or peripherial? */
9833 + return 1;
9834 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
9835 +index 15aaf58c..a54a408c 100644
9836 +--- a/drivers/usb/host/xhci.h
9837 ++++ b/drivers/usb/host/xhci.h
9838 +@@ -1508,6 +1508,7 @@ struct xhci_hcd {
9839 + #define XHCI_SPURIOUS_REBOOT (1 << 13)
9840 + #define XHCI_COMP_MODE_QUIRK (1 << 14)
9841 + #define XHCI_AVOID_BEI (1 << 15)
9842 ++#define XHCI_PLAT (1 << 16)
9843 + unsigned int num_active_eps;
9844 + unsigned int limit_active_eps;
9845 + /* There are two roothubs to keep track of bus suspend info for */
9846 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
9847 +index cdde45de..4491830b 100644
9848 +--- a/drivers/usb/serial/mos7720.c
9849 ++++ b/drivers/usb/serial/mos7720.c
9850 +@@ -383,7 +383,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
9851 + kfree(urbtrack);
9852 + return -ENOMEM;
9853 + }
9854 +- urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
9855 ++ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC);
9856 + if (!urbtrack->setup) {
9857 + usb_free_urb(urbtrack->urb);
9858 + kfree(urbtrack);
9859 +@@ -391,8 +391,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
9860 + }
9861 + urbtrack->setup->bRequestType = (__u8)0x40;
9862 + urbtrack->setup->bRequest = (__u8)0x0e;
9863 +- urbtrack->setup->wValue = get_reg_value(reg, dummy);
9864 +- urbtrack->setup->wIndex = get_reg_index(reg);
9865 ++ urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy));
9866 ++ urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg));
9867 + urbtrack->setup->wLength = 0;
9868 + usb_fill_control_urb(urbtrack->urb, usbdev,
9869 + usb_sndctrlpipe(usbdev, 0),
9870 +diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
9871 +index 7e34beed..3275bde6 100644
9872 +--- a/drivers/xen/grant-table.c
9873 ++++ b/drivers/xen/grant-table.c
9874 +@@ -641,9 +641,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback,
9875 + void (*fn)(void *), void *arg, u16 count)
9876 + {
9877 + unsigned long flags;
9878 ++ struct gnttab_free_callback *cb;
9879 ++
9880 + spin_lock_irqsave(&gnttab_list_lock, flags);
9881 +- if (callback->next)
9882 +- goto out;
9883 ++
9884 ++ /* Check if the callback is already on the list */
9885 ++ cb = gnttab_free_callback_list;
9886 ++ while (cb) {
9887 ++ if (cb == callback)
9888 ++ goto out;
9889 ++ cb = cb->next;
9890 ++ }
9891 ++
9892 + callback->fn = fn;
9893 + callback->arg = arg;
9894 + callback->count = count;
9895 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
9896 +index e7fe81d3..4ac06b08 100644
9897 +--- a/fs/cifs/connect.c
9898 ++++ b/fs/cifs/connect.c
9899 +@@ -362,6 +362,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
9900 + try_to_freeze();
9901 +
9902 + /* we should try only the port we connected to before */
9903 ++ mutex_lock(&server->srv_mutex);
9904 + rc = generic_ip_connect(server);
9905 + if (rc) {
9906 + cFYI(1, "reconnect error %d", rc);
9907 +@@ -373,6 +374,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
9908 + server->tcpStatus = CifsNeedNegotiate;
9909 + spin_unlock(&GlobalMid_Lock);
9910 + }
9911 ++ mutex_unlock(&server->srv_mutex);
9912 + } while (server->tcpStatus == CifsNeedReconnect);
9913 +
9914 + return rc;
9915 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
9916 +index d48478a8..373b2514 100644
9917 +--- a/fs/fuse/dir.c
9918 ++++ b/fs/fuse/dir.c
9919 +@@ -1503,6 +1503,8 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
9920 + fc->no_setxattr = 1;
9921 + err = -EOPNOTSUPP;
9922 + }
9923 ++ if (!err)
9924 ++ fuse_invalidate_attr(inode);
9925 + return err;
9926 + }
9927 +
9928 +@@ -1632,6 +1634,8 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
9929 + fc->no_removexattr = 1;
9930 + err = -EOPNOTSUPP;
9931 + }
9932 ++ if (!err)
9933 ++ fuse_invalidate_attr(inode);
9934 + return err;
9935 + }
9936 +
9937 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
9938 +index 8e6381a1..df25454e 100644
9939 +--- a/fs/fuse/file.c
9940 ++++ b/fs/fuse/file.c
9941 +@@ -1294,7 +1294,6 @@ static int fuse_writepage_locked(struct page *page)
9942 +
9943 + inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
9944 + inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
9945 +- end_page_writeback(page);
9946 +
9947 + spin_lock(&fc->lock);
9948 + list_add(&req->writepages_entry, &fi->writepages);
9949 +@@ -1302,6 +1301,8 @@ static int fuse_writepage_locked(struct page *page)
9950 + fuse_flush_writepages(inode);
9951 + spin_unlock(&fc->lock);
9952 +
9953 ++ end_page_writeback(page);
9954 ++
9955 + return 0;
9956 +
9957 + err_free:
9958 +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
9959 +index 29037c36..e92a342f 100644
9960 +--- a/fs/isofs/inode.c
9961 ++++ b/fs/isofs/inode.c
9962 +@@ -119,8 +119,8 @@ static void destroy_inodecache(void)
9963 +
9964 + static int isofs_remount(struct super_block *sb, int *flags, char *data)
9965 + {
9966 +- /* we probably want a lot more here */
9967 +- *flags |= MS_RDONLY;
9968 ++ if (!(*flags & MS_RDONLY))
9969 ++ return -EROFS;
9970 + return 0;
9971 + }
9972 +
9973 +@@ -769,15 +769,6 @@ root_found:
9974 + */
9975 + s->s_maxbytes = 0x80000000000LL;
9976 +
9977 +- /*
9978 +- * The CDROM is read-only, has no nodes (devices) on it, and since
9979 +- * all of the files appear to be owned by root, we really do not want
9980 +- * to allow suid. (suid or devices will not show up unless we have
9981 +- * Rock Ridge extensions)
9982 +- */
9983 +-
9984 +- s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
9985 +-
9986 + /* Set this for reference. Its not currently used except on write
9987 + which we don't have .. */
9988 +
9989 +@@ -1536,6 +1527,9 @@ struct inode *isofs_iget(struct super_block *sb,
9990 + static struct dentry *isofs_mount(struct file_system_type *fs_type,
9991 + int flags, const char *dev_name, void *data)
9992 + {
9993 ++ /* We don't support read-write mounts */
9994 ++ if (!(flags & MS_RDONLY))
9995 ++ return ERR_PTR(-EACCES);
9996 + return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
9997 + }
9998 +
9999 +diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
10000 +index 7eb1c0c7..cf228479 100644
10001 +--- a/fs/ocfs2/extent_map.c
10002 ++++ b/fs/ocfs2/extent_map.c
10003 +@@ -782,7 +782,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
10004 + cpos = map_start >> osb->s_clustersize_bits;
10005 + mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
10006 + map_start + map_len);
10007 +- mapping_end -= cpos;
10008 + is_last = 0;
10009 + while (cpos < mapping_end && !is_last) {
10010 + u32 fe_flags;
10011 +diff --git a/include/linux/hid.h b/include/linux/hid.h
10012 +index 3a95da60..8c933a86 100644
10013 +--- a/include/linux/hid.h
10014 ++++ b/include/linux/hid.h
10015 +@@ -420,10 +420,12 @@ struct hid_report {
10016 + struct hid_device *device; /* associated device */
10017 + };
10018 +
10019 ++#define HID_MAX_IDS 256
10020 ++
10021 + struct hid_report_enum {
10022 + unsigned numbered;
10023 + struct list_head report_list;
10024 +- struct hid_report *report_id_hash[256];
10025 ++ struct hid_report *report_id_hash[HID_MAX_IDS];
10026 + };
10027 +
10028 + #define HID_REPORT_TYPES 3
10029 +diff --git a/include/linux/rculist.h b/include/linux/rculist.h
10030 +index 6f95e241..38633526 100644
10031 +--- a/include/linux/rculist.h
10032 ++++ b/include/linux/rculist.h
10033 +@@ -254,8 +254,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
10034 + */
10035 + #define list_first_or_null_rcu(ptr, type, member) \
10036 + ({struct list_head *__ptr = (ptr); \
10037 +- struct list_head __rcu *__next = list_next_rcu(__ptr); \
10038 +- likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
10039 ++ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
10040 ++ likely(__ptr != __next) ? \
10041 ++ list_entry_rcu(__next, type, member) : NULL; \
10042 + })
10043 +
10044 + /**
10045 +diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
10046 +index 11e67562..ab45ea5b 100644
10047 +--- a/include/media/v4l2-ctrls.h
10048 ++++ b/include/media/v4l2-ctrls.h
10049 +@@ -22,6 +22,7 @@
10050 + #define _V4L2_CTRLS_H
10051 +
10052 + #include <linux/list.h>
10053 ++#include <linux/mutex.h>
10054 + #include <linux/videodev2.h>
10055 +
10056 + /* forward references */
10057 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
10058 +index ef99c15f..3da5c0bf 100644
10059 +--- a/mm/huge_memory.c
10060 ++++ b/mm/huge_memory.c
10061 +@@ -1894,6 +1894,8 @@ static void collapse_huge_page(struct mm_struct *mm,
10062 + goto out;
10063 +
10064 + vma = find_vma(mm, address);
10065 ++ if (!vma)
10066 ++ goto out;
10067 + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
10068 + hend = vma->vm_end & HPAGE_PMD_MASK;
10069 + if (address < hstart || address + HPAGE_PMD_SIZE > hend)
10070 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
10071 +index 81c275b3..9c364428 100644
10072 +--- a/mm/memcontrol.c
10073 ++++ b/mm/memcontrol.c
10074 +@@ -4349,7 +4349,13 @@ static int compare_thresholds(const void *a, const void *b)
10075 + const struct mem_cgroup_threshold *_a = a;
10076 + const struct mem_cgroup_threshold *_b = b;
10077 +
10078 +- return _a->threshold - _b->threshold;
10079 ++ if (_a->threshold > _b->threshold)
10080 ++ return 1;
10081 ++
10082 ++ if (_a->threshold < _b->threshold)
10083 ++ return -1;
10084 ++
10085 ++ return 0;
10086 + }
10087 +
10088 + static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
10089 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
10090 +index 1f350522..e6083c14 100644
10091 +--- a/sound/pci/hda/hda_intel.c
10092 ++++ b/sound/pci/hda/hda_intel.c
10093 +@@ -2602,6 +2602,7 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
10094 + SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
10095 + SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
10096 + SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
10097 ++ SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */
10098 + SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
10099 + SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
10100 + {}
10101 +diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
10102 +index 840d7208..ddb0d904 100644
10103 +--- a/sound/soc/codecs/wm8960.c
10104 ++++ b/sound/soc/codecs/wm8960.c
10105 +@@ -790,9 +790,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
10106 + if (pll_div.k) {
10107 + reg |= 0x20;
10108 +
10109 +- snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
10110 +- snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
10111 +- snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
10112 ++ snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
10113 ++ snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
10114 ++ snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
10115 + }
10116 + snd_soc_write(codec, WM8960_PLL1, reg);
10117 +