Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2718 - genpatches-2.6/trunk/3.13
Date: Mon, 31 Mar 2014 17:53:22
Message-Id: 20140331175318.9F3282005C@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2014-03-31 17:53:18 +0000 (Mon, 31 Mar 2014)
3 New Revision: 2718
4
5 Added:
6 genpatches-2.6/trunk/3.13/1007_linux-3.13.8.patch
7 Modified:
8 genpatches-2.6/trunk/3.13/0000_README
9 Log:
10 Linux patch 3.13.8
11
12 Modified: genpatches-2.6/trunk/3.13/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/3.13/0000_README 2014-03-31 12:04:13 UTC (rev 2717)
15 +++ genpatches-2.6/trunk/3.13/0000_README 2014-03-31 17:53:18 UTC (rev 2718)
16 @@ -70,6 +70,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 3.13.7
19
20 +Patch: 1007_linux-3.13.8.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 3.13.8
23 +
24 Patch: 1500_XATTR_USER_PREFIX.patch
25 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
26 Desc: Support for namespace user.pax.* on tmpfs.
27
28 Added: genpatches-2.6/trunk/3.13/1007_linux-3.13.8.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/3.13/1007_linux-3.13.8.patch (rev 0)
31 +++ genpatches-2.6/trunk/3.13/1007_linux-3.13.8.patch 2014-03-31 17:53:18 UTC (rev 2718)
32 @@ -0,0 +1,2669 @@
33 +diff --git a/Makefile b/Makefile
34 +index 9f214b4ed73c..4cab13b605c1 100644
35 +--- a/Makefile
36 ++++ b/Makefile
37 +@@ -1,6 +1,6 @@
38 + VERSION = 3
39 + PATCHLEVEL = 13
40 +-SUBLEVEL = 7
41 ++SUBLEVEL = 8
42 + EXTRAVERSION =
43 + NAME = One Giant Leap for Frogkind
44 +
45 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
46 +index 3e8f106ee5fe..ac1d883460c7 100644
47 +--- a/arch/arm/mm/init.c
48 ++++ b/arch/arm/mm/init.c
49 +@@ -345,10 +345,11 @@ void __init arm_memblock_init(struct meminfo *mi,
50 + #endif
51 + #ifdef CONFIG_BLK_DEV_INITRD
52 + /* FDT scan will populate initrd_start */
53 +- if (initrd_start) {
54 ++ if (initrd_start && !phys_initrd_size) {
55 + phys_initrd_start = __virt_to_phys(initrd_start);
56 + phys_initrd_size = initrd_end - initrd_start;
57 + }
58 ++ initrd_start = initrd_end = 0;
59 + if (phys_initrd_size &&
60 + !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
61 + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
62 +diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
63 +index 81c89132c59d..33e8dbfc1b63 100644
64 +--- a/arch/mips/include/asm/syscall.h
65 ++++ b/arch/mips/include/asm/syscall.h
66 +@@ -29,7 +29,7 @@ static inline long syscall_get_nr(struct task_struct *task,
67 + static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
68 + struct task_struct *task, struct pt_regs *regs, unsigned int n)
69 + {
70 +- unsigned long usp = regs->regs[29];
71 ++ unsigned long usp __maybe_unused = regs->regs[29];
72 +
73 + switch (n) {
74 + case 0: case 1: case 2: case 3:
75 +diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
76 +index d3e5e9bc8f94..e37db7f2a5fa 100644
77 +--- a/arch/powerpc/include/asm/eeh.h
78 ++++ b/arch/powerpc/include/asm/eeh.h
79 +@@ -117,6 +117,16 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
80 + return edev ? edev->pdev : NULL;
81 + }
82 +
83 ++/* Return values from eeh_ops::next_error */
84 ++enum {
85 ++ EEH_NEXT_ERR_NONE = 0,
86 ++ EEH_NEXT_ERR_INF,
87 ++ EEH_NEXT_ERR_FROZEN_PE,
88 ++ EEH_NEXT_ERR_FENCED_PHB,
89 ++ EEH_NEXT_ERR_DEAD_PHB,
90 ++ EEH_NEXT_ERR_DEAD_IOC
91 ++};
92 ++
93 + /*
94 + * The struct is used to trace the registered EEH operation
95 + * callback functions. Actually, those operation callback
96 +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
97 +index 36bed5a12750..d3a132c9127c 100644
98 +--- a/arch/powerpc/kernel/eeh_driver.c
99 ++++ b/arch/powerpc/kernel/eeh_driver.c
100 +@@ -626,84 +626,90 @@ static void eeh_handle_special_event(void)
101 + {
102 + struct eeh_pe *pe, *phb_pe;
103 + struct pci_bus *bus;
104 +- struct pci_controller *hose, *tmp;
105 ++ struct pci_controller *hose;
106 + unsigned long flags;
107 +- int rc = 0;
108 ++ int rc;
109 +
110 +- /*
111 +- * The return value from next_error() has been classified as follows.
112 +- * It might be good to enumerate them. However, next_error() is only
113 +- * supported by PowerNV platform for now. So it would be fine to use
114 +- * integer directly:
115 +- *
116 +- * 4 - Dead IOC 3 - Dead PHB
117 +- * 2 - Fenced PHB 1 - Frozen PE
118 +- * 0 - No error found
119 +- *
120 +- */
121 +- rc = eeh_ops->next_error(&pe);
122 +- if (rc <= 0)
123 +- return;
124 +
125 +- switch (rc) {
126 +- case 4:
127 +- /* Mark all PHBs in dead state */
128 +- eeh_serialize_lock(&flags);
129 +- list_for_each_entry_safe(hose, tmp,
130 +- &hose_list, list_node) {
131 +- phb_pe = eeh_phb_pe_get(hose);
132 +- if (!phb_pe) continue;
133 +-
134 +- eeh_pe_state_mark(phb_pe,
135 +- EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
136 ++ do {
137 ++ rc = eeh_ops->next_error(&pe);
138 ++
139 ++ switch (rc) {
140 ++ case EEH_NEXT_ERR_DEAD_IOC:
141 ++ /* Mark all PHBs in dead state */
142 ++ eeh_serialize_lock(&flags);
143 ++
144 ++ /* Purge all events */
145 ++ eeh_remove_event(NULL);
146 ++
147 ++ list_for_each_entry(hose, &hose_list, list_node) {
148 ++ phb_pe = eeh_phb_pe_get(hose);
149 ++ if (!phb_pe) continue;
150 ++
151 ++ eeh_pe_state_mark(phb_pe,
152 ++ EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
153 ++ }
154 ++
155 ++ eeh_serialize_unlock(flags);
156 ++
157 ++ break;
158 ++ case EEH_NEXT_ERR_FROZEN_PE:
159 ++ case EEH_NEXT_ERR_FENCED_PHB:
160 ++ case EEH_NEXT_ERR_DEAD_PHB:
161 ++ /* Mark the PE in fenced state */
162 ++ eeh_serialize_lock(&flags);
163 ++
164 ++ /* Purge all events of the PHB */
165 ++ eeh_remove_event(pe);
166 ++
167 ++ if (rc == EEH_NEXT_ERR_DEAD_PHB)
168 ++ eeh_pe_state_mark(pe,
169 ++ EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
170 ++ else
171 ++ eeh_pe_state_mark(pe,
172 ++ EEH_PE_ISOLATED | EEH_PE_RECOVERING);
173 ++
174 ++ eeh_serialize_unlock(flags);
175 ++
176 ++ break;
177 ++ case EEH_NEXT_ERR_NONE:
178 ++ return;
179 ++ default:
180 ++ pr_warn("%s: Invalid value %d from next_error()\n",
181 ++ __func__, rc);
182 ++ return;
183 + }
184 +- eeh_serialize_unlock(flags);
185 +-
186 +- /* Purge all events */
187 +- eeh_remove_event(NULL);
188 +- break;
189 +- case 3:
190 +- case 2:
191 +- case 1:
192 +- /* Mark the PE in fenced state */
193 +- eeh_serialize_lock(&flags);
194 +- if (rc == 3)
195 +- eeh_pe_state_mark(pe,
196 +- EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
197 +- else
198 +- eeh_pe_state_mark(pe,
199 +- EEH_PE_ISOLATED | EEH_PE_RECOVERING);
200 +- eeh_serialize_unlock(flags);
201 +-
202 +- /* Purge all events of the PHB */
203 +- eeh_remove_event(pe);
204 +- break;
205 +- default:
206 +- pr_err("%s: Invalid value %d from next_error()\n",
207 +- __func__, rc);
208 +- return;
209 +- }
210 +
211 +- /*
212 +- * For fenced PHB and frozen PE, it's handled as normal
213 +- * event. We have to remove the affected PHBs for dead
214 +- * PHB and IOC
215 +- */
216 +- if (rc == 2 || rc == 1)
217 +- eeh_handle_normal_event(pe);
218 +- else {
219 +- list_for_each_entry_safe(hose, tmp,
220 +- &hose_list, list_node) {
221 +- phb_pe = eeh_phb_pe_get(hose);
222 +- if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD))
223 +- continue;
224 +-
225 +- bus = eeh_pe_bus_get(phb_pe);
226 +- /* Notify all devices that they're about to go down. */
227 +- eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
228 +- pcibios_remove_pci_devices(bus);
229 ++ /*
230 ++ * For fenced PHB and frozen PE, it's handled as normal
231 ++ * event. We have to remove the affected PHBs for dead
232 ++ * PHB and IOC
233 ++ */
234 ++ if (rc == EEH_NEXT_ERR_FROZEN_PE ||
235 ++ rc == EEH_NEXT_ERR_FENCED_PHB) {
236 ++ eeh_handle_normal_event(pe);
237 ++ } else {
238 ++ list_for_each_entry(hose, &hose_list, list_node) {
239 ++ phb_pe = eeh_phb_pe_get(hose);
240 ++ if (!phb_pe ||
241 ++ !(phb_pe->state & EEH_PE_PHB_DEAD))
242 ++ continue;
243 ++
244 ++ /* Notify all devices to be down */
245 ++ bus = eeh_pe_bus_get(phb_pe);
246 ++ eeh_pe_dev_traverse(pe,
247 ++ eeh_report_failure, NULL);
248 ++ pcibios_remove_pci_devices(bus);
249 ++ }
250 + }
251 +- }
252 ++
253 ++ /*
254 ++ * If we have detected dead IOC, we needn't proceed
255 ++ * any more since all PHBs would have been removed
256 ++ */
257 ++ if (rc == EEH_NEXT_ERR_DEAD_IOC)
258 ++ break;
259 ++ } while (rc != EEH_NEXT_ERR_NONE);
260 + }
261 +
262 + /**
263 +diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
264 +index 9fd23ce4a04a..27e5f02a45a0 100644
265 +--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
266 ++++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
267 +@@ -114,6 +114,7 @@ DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
268 + ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
269 + #endif /* CONFIG_DEBUG_FS */
270 +
271 ++
272 + /**
273 + * ioda_eeh_post_init - Chip dependent post initialization
274 + * @hose: PCI controller
275 +@@ -221,6 +222,22 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
276 + return ret;
277 + }
278 +
279 ++static void ioda_eeh_phb_diag(struct pci_controller *hose)
280 ++{
281 ++ struct pnv_phb *phb = hose->private_data;
282 ++ long rc;
283 ++
284 ++ rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
285 ++ PNV_PCI_DIAG_BUF_SIZE);
286 ++ if (rc != OPAL_SUCCESS) {
287 ++ pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
288 ++ __func__, hose->global_number, rc);
289 ++ return;
290 ++ }
291 ++
292 ++ pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
293 ++}
294 ++
295 + /**
296 + * ioda_eeh_get_state - Retrieve the state of PE
297 + * @pe: EEH PE
298 +@@ -272,6 +289,9 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
299 + result |= EEH_STATE_DMA_ACTIVE;
300 + result |= EEH_STATE_MMIO_ENABLED;
301 + result |= EEH_STATE_DMA_ENABLED;
302 ++ } else if (!(pe->state & EEH_PE_ISOLATED)) {
303 ++ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
304 ++ ioda_eeh_phb_diag(hose);
305 + }
306 +
307 + return result;
308 +@@ -315,6 +335,15 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
309 + __func__, fstate, hose->global_number, pe_no);
310 + }
311 +
312 ++ /* Dump PHB diag-data for frozen PE */
313 ++ if (result != EEH_STATE_NOT_SUPPORT &&
314 ++ (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
315 ++ (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
316 ++ !(pe->state & EEH_PE_ISOLATED)) {
317 ++ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
318 ++ ioda_eeh_phb_diag(hose);
319 ++ }
320 ++
321 + return result;
322 + }
323 +
324 +@@ -530,45 +559,6 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
325 + }
326 +
327 + /**
328 +- * ioda_eeh_get_log - Retrieve error log
329 +- * @pe: EEH PE
330 +- * @severity: Severity level of the log
331 +- * @drv_log: buffer to store the log
332 +- * @len: space of the log buffer
333 +- *
334 +- * The function is used to retrieve error log from P7IOC.
335 +- */
336 +-static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
337 +- char *drv_log, unsigned long len)
338 +-{
339 +- s64 ret;
340 +- unsigned long flags;
341 +- struct pci_controller *hose = pe->phb;
342 +- struct pnv_phb *phb = hose->private_data;
343 +-
344 +- spin_lock_irqsave(&phb->lock, flags);
345 +-
346 +- ret = opal_pci_get_phb_diag_data2(phb->opal_id,
347 +- phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
348 +- if (ret) {
349 +- spin_unlock_irqrestore(&phb->lock, flags);
350 +- pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
351 +- __func__, hose->global_number, pe->addr, ret);
352 +- return -EIO;
353 +- }
354 +-
355 +- /*
356 +- * FIXME: We probably need log the error in somewhere.
357 +- * Lets make it up in future.
358 +- */
359 +- /* pr_info("%s", phb->diag.blob); */
360 +-
361 +- spin_unlock_irqrestore(&phb->lock, flags);
362 +-
363 +- return 0;
364 +-}
365 +-
366 +-/**
367 + * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
368 + * @pe: EEH PE
369 + *
370 +@@ -649,167 +639,6 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose)
371 + }
372 + }
373 +
374 +-static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose,
375 +- struct OpalIoPhbErrorCommon *common)
376 +-{
377 +- struct OpalIoP7IOCPhbErrorData *data;
378 +- int i;
379 +-
380 +- data = (struct OpalIoP7IOCPhbErrorData *)common;
381 +-
382 +- pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n\n",
383 +- hose->global_number, common->version);
384 +-
385 +- pr_info(" brdgCtl: %08x\n", data->brdgCtl);
386 +-
387 +- pr_info(" portStatusReg: %08x\n", data->portStatusReg);
388 +- pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
389 +- pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
390 +-
391 +- pr_info(" deviceStatus: %08x\n", data->deviceStatus);
392 +- pr_info(" slotStatus: %08x\n", data->slotStatus);
393 +- pr_info(" linkStatus: %08x\n", data->linkStatus);
394 +- pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
395 +- pr_info(" devSecStatus: %08x\n", data->devSecStatus);
396 +-
397 +- pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
398 +- pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
399 +- pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
400 +- pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
401 +- pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
402 +- pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
403 +- pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
404 +- pr_info(" sourceId: %08x\n", data->sourceId);
405 +-
406 +- pr_info(" errorClass: %016llx\n", data->errorClass);
407 +- pr_info(" correlator: %016llx\n", data->correlator);
408 +- pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
409 +- pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
410 +- pr_info(" lemFir: %016llx\n", data->lemFir);
411 +- pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
412 +- pr_info(" lemWOF: %016llx\n", data->lemWOF);
413 +- pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
414 +- pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
415 +- pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
416 +- pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
417 +- pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
418 +- pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
419 +- pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
420 +- pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
421 +- pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
422 +- pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
423 +- pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
424 +- pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
425 +- pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
426 +- pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
427 +- pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
428 +- pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
429 +-
430 +- for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
431 +- if ((data->pestA[i] >> 63) == 0 &&
432 +- (data->pestB[i] >> 63) == 0)
433 +- continue;
434 +-
435 +- pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
436 +- pr_info(" PESTB: %016llx\n", data->pestB[i]);
437 +- }
438 +-}
439 +-
440 +-static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose,
441 +- struct OpalIoPhbErrorCommon *common)
442 +-{
443 +- struct OpalIoPhb3ErrorData *data;
444 +- int i;
445 +-
446 +- data = (struct OpalIoPhb3ErrorData*)common;
447 +- pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n",
448 +- hose->global_number, common->version);
449 +-
450 +- pr_info(" brdgCtl: %08x\n", data->brdgCtl);
451 +-
452 +- pr_info(" portStatusReg: %08x\n", data->portStatusReg);
453 +- pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
454 +- pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
455 +-
456 +- pr_info(" deviceStatus: %08x\n", data->deviceStatus);
457 +- pr_info(" slotStatus: %08x\n", data->slotStatus);
458 +- pr_info(" linkStatus: %08x\n", data->linkStatus);
459 +- pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
460 +- pr_info(" devSecStatus: %08x\n", data->devSecStatus);
461 +-
462 +- pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
463 +- pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
464 +- pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
465 +- pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
466 +- pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
467 +- pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
468 +- pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
469 +- pr_info(" sourceId: %08x\n", data->sourceId);
470 +- pr_info(" errorClass: %016llx\n", data->errorClass);
471 +- pr_info(" correlator: %016llx\n", data->correlator);
472 +- pr_info(" nFir: %016llx\n", data->nFir);
473 +- pr_info(" nFirMask: %016llx\n", data->nFirMask);
474 +- pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
475 +- pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
476 +- pr_info(" PhbCsr: %016llx\n", data->phbCsr);
477 +- pr_info(" lemFir: %016llx\n", data->lemFir);
478 +- pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
479 +- pr_info(" lemWOF: %016llx\n", data->lemWOF);
480 +- pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
481 +- pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
482 +- pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
483 +- pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
484 +- pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
485 +- pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
486 +- pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
487 +- pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
488 +- pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
489 +- pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
490 +- pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
491 +- pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
492 +- pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
493 +- pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
494 +- pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
495 +- pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
496 +-
497 +- for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
498 +- if ((data->pestA[i] >> 63) == 0 &&
499 +- (data->pestB[i] >> 63) == 0)
500 +- continue;
501 +-
502 +- pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
503 +- pr_info(" PESTB: %016llx\n", data->pestB[i]);
504 +- }
505 +-}
506 +-
507 +-static void ioda_eeh_phb_diag(struct pci_controller *hose)
508 +-{
509 +- struct pnv_phb *phb = hose->private_data;
510 +- struct OpalIoPhbErrorCommon *common;
511 +- long rc;
512 +-
513 +- rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
514 +- PNV_PCI_DIAG_BUF_SIZE);
515 +- if (rc != OPAL_SUCCESS) {
516 +- pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
517 +- __func__, hose->global_number, rc);
518 +- return;
519 +- }
520 +-
521 +- common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
522 +- switch (common->ioType) {
523 +- case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
524 +- ioda_eeh_p7ioc_phb_diag(hose, common);
525 +- break;
526 +- case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
527 +- ioda_eeh_phb3_phb_diag(hose, common);
528 +- break;
529 +- default:
530 +- pr_warning("%s: Unrecognized I/O chip %d\n",
531 +- __func__, common->ioType);
532 +- }
533 +-}
534 +-
535 + static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
536 + struct eeh_pe **pe)
537 + {
538 +@@ -863,12 +692,12 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
539 + */
540 + static int ioda_eeh_next_error(struct eeh_pe **pe)
541 + {
542 +- struct pci_controller *hose, *tmp;
543 ++ struct pci_controller *hose;
544 + struct pnv_phb *phb;
545 + u64 frozen_pe_no;
546 + u16 err_type, severity;
547 + long rc;
548 +- int ret = 1;
549 ++ int ret = EEH_NEXT_ERR_NONE;
550 +
551 + /*
552 + * While running here, it's safe to purge the event queue.
553 +@@ -878,7 +707,7 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
554 + eeh_remove_event(NULL);
555 + opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
556 +
557 +- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
558 ++ list_for_each_entry(hose, &hose_list, list_node) {
559 + /*
560 + * If the subordinate PCI buses of the PHB has been
561 + * removed, we needn't take care of it any more.
562 +@@ -917,19 +746,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
563 + switch (err_type) {
564 + case OPAL_EEH_IOC_ERROR:
565 + if (severity == OPAL_EEH_SEV_IOC_DEAD) {
566 +- list_for_each_entry_safe(hose, tmp,
567 +- &hose_list, list_node) {
568 ++ list_for_each_entry(hose, &hose_list,
569 ++ list_node) {
570 + phb = hose->private_data;
571 + phb->eeh_state |= PNV_EEH_STATE_REMOVED;
572 + }
573 +
574 + pr_err("EEH: dead IOC detected\n");
575 +- ret = 4;
576 +- goto out;
577 ++ ret = EEH_NEXT_ERR_DEAD_IOC;
578 + } else if (severity == OPAL_EEH_SEV_INF) {
579 + pr_info("EEH: IOC informative error "
580 + "detected\n");
581 + ioda_eeh_hub_diag(hose);
582 ++ ret = EEH_NEXT_ERR_NONE;
583 + }
584 +
585 + break;
586 +@@ -941,21 +770,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
587 + pr_err("EEH: dead PHB#%x detected\n",
588 + hose->global_number);
589 + phb->eeh_state |= PNV_EEH_STATE_REMOVED;
590 +- ret = 3;
591 +- goto out;
592 ++ ret = EEH_NEXT_ERR_DEAD_PHB;
593 + } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
594 + if (ioda_eeh_get_phb_pe(hose, pe))
595 + break;
596 +
597 + pr_err("EEH: fenced PHB#%x detected\n",
598 + hose->global_number);
599 +- ret = 2;
600 +- goto out;
601 ++ ret = EEH_NEXT_ERR_FENCED_PHB;
602 + } else if (severity == OPAL_EEH_SEV_INF) {
603 + pr_info("EEH: PHB#%x informative error "
604 + "detected\n",
605 + hose->global_number);
606 + ioda_eeh_phb_diag(hose);
607 ++ ret = EEH_NEXT_ERR_NONE;
608 + }
609 +
610 + break;
611 +@@ -965,13 +793,37 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
612 +
613 + pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
614 + (*pe)->addr, (*pe)->phb->global_number);
615 +- ret = 1;
616 +- goto out;
617 ++ ret = EEH_NEXT_ERR_FROZEN_PE;
618 ++ break;
619 ++ default:
620 ++ pr_warn("%s: Unexpected error type %d\n",
621 ++ __func__, err_type);
622 ++ }
623 ++
624 ++ /*
625 ++ * EEH core will try recover from fenced PHB or
626 ++ * frozen PE. In the time for frozen PE, EEH core
627 ++ * enable IO path for that before collecting logs,
628 ++ * but it ruins the site. So we have to dump the
629 ++ * log in advance here.
630 ++ */
631 ++ if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
632 ++ ret == EEH_NEXT_ERR_FENCED_PHB) &&
633 ++ !((*pe)->state & EEH_PE_ISOLATED)) {
634 ++ eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
635 ++ ioda_eeh_phb_diag(hose);
636 + }
637 ++
638 ++ /*
639 ++ * If we have no errors on the specific PHB or only
640 ++ * informative error there, we continue poking it.
641 ++ * Otherwise, we need actions to be taken by upper
642 ++ * layer.
643 ++ */
644 ++ if (ret > EEH_NEXT_ERR_INF)
645 ++ break;
646 + }
647 +
648 +- ret = 0;
649 +-out:
650 + return ret;
651 + }
652 +
653 +@@ -980,7 +832,6 @@ struct pnv_eeh_ops ioda_eeh_ops = {
654 + .set_option = ioda_eeh_set_option,
655 + .get_state = ioda_eeh_get_state,
656 + .reset = ioda_eeh_reset,
657 +- .get_log = ioda_eeh_get_log,
658 + .configure_bridge = ioda_eeh_configure_bridge,
659 + .next_error = ioda_eeh_next_error
660 + };
661 +diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
662 +index 4eb33a9ed532..437bbda26d1d 100644
663 +--- a/arch/powerpc/platforms/powernv/pci.c
664 ++++ b/arch/powerpc/platforms/powernv/pci.c
665 +@@ -124,77 +124,187 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
666 + }
667 + #endif /* CONFIG_PCI_MSI */
668 +
669 +-static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb)
670 ++static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
671 ++ struct OpalIoPhbErrorCommon *common)
672 + {
673 +- struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc;
674 ++ struct OpalIoP7IOCPhbErrorData *data;
675 + int i;
676 +
677 +- pr_info("PHB %d diagnostic data:\n", phb->hose->global_number);
678 +-
679 +- pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl);
680 +-
681 +- pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg);
682 +- pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus);
683 +- pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus);
684 +-
685 +- pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus);
686 +- pr_info(" slotStatus = 0x%08x\n", data->slotStatus);
687 +- pr_info(" linkStatus = 0x%08x\n", data->linkStatus);
688 +- pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus);
689 +- pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus);
690 +-
691 +- pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus);
692 +- pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus);
693 +- pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus);
694 +- pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1);
695 +- pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2);
696 +- pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3);
697 +- pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4);
698 +- pr_info(" sourceId = 0x%08x\n", data->sourceId);
699 +-
700 +- pr_info(" errorClass = 0x%016llx\n", data->errorClass);
701 +- pr_info(" correlator = 0x%016llx\n", data->correlator);
702 +-
703 +- pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr);
704 +- pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr);
705 +- pr_info(" lemFir = 0x%016llx\n", data->lemFir);
706 +- pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask);
707 +- pr_info(" lemWOF = 0x%016llx\n", data->lemWOF);
708 +- pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus);
709 +- pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus);
710 +- pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0);
711 +- pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1);
712 +- pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus);
713 +- pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus);
714 +- pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0);
715 +- pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1);
716 +- pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus);
717 +- pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus);
718 +- pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0);
719 +- pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1);
720 +- pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus);
721 +- pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus);
722 +- pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0);
723 +- pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1);
724 ++ data = (struct OpalIoP7IOCPhbErrorData *)common;
725 ++ pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n",
726 ++ hose->global_number, common->version);
727 ++
728 ++ if (data->brdgCtl)
729 ++ pr_info(" brdgCtl: %08x\n",
730 ++ data->brdgCtl);
731 ++ if (data->portStatusReg || data->rootCmplxStatus ||
732 ++ data->busAgentStatus)
733 ++ pr_info(" UtlSts: %08x %08x %08x\n",
734 ++ data->portStatusReg, data->rootCmplxStatus,
735 ++ data->busAgentStatus);
736 ++ if (data->deviceStatus || data->slotStatus ||
737 ++ data->linkStatus || data->devCmdStatus ||
738 ++ data->devSecStatus)
739 ++ pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
740 ++ data->deviceStatus, data->slotStatus,
741 ++ data->linkStatus, data->devCmdStatus,
742 ++ data->devSecStatus);
743 ++ if (data->rootErrorStatus || data->uncorrErrorStatus ||
744 ++ data->corrErrorStatus)
745 ++ pr_info(" RootErrSts: %08x %08x %08x\n",
746 ++ data->rootErrorStatus, data->uncorrErrorStatus,
747 ++ data->corrErrorStatus);
748 ++ if (data->tlpHdr1 || data->tlpHdr2 ||
749 ++ data->tlpHdr3 || data->tlpHdr4)
750 ++ pr_info(" RootErrLog: %08x %08x %08x %08x\n",
751 ++ data->tlpHdr1, data->tlpHdr2,
752 ++ data->tlpHdr3, data->tlpHdr4);
753 ++ if (data->sourceId || data->errorClass ||
754 ++ data->correlator)
755 ++ pr_info(" RootErrLog1: %08x %016llx %016llx\n",
756 ++ data->sourceId, data->errorClass,
757 ++ data->correlator);
758 ++ if (data->p7iocPlssr || data->p7iocCsr)
759 ++ pr_info(" PhbSts: %016llx %016llx\n",
760 ++ data->p7iocPlssr, data->p7iocCsr);
761 ++ if (data->lemFir || data->lemErrorMask ||
762 ++ data->lemWOF)
763 ++ pr_info(" Lem: %016llx %016llx %016llx\n",
764 ++ data->lemFir, data->lemErrorMask,
765 ++ data->lemWOF);
766 ++ if (data->phbErrorStatus || data->phbFirstErrorStatus ||
767 ++ data->phbErrorLog0 || data->phbErrorLog1)
768 ++ pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
769 ++ data->phbErrorStatus, data->phbFirstErrorStatus,
770 ++ data->phbErrorLog0, data->phbErrorLog1);
771 ++ if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
772 ++ data->mmioErrorLog0 || data->mmioErrorLog1)
773 ++ pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
774 ++ data->mmioErrorStatus, data->mmioFirstErrorStatus,
775 ++ data->mmioErrorLog0, data->mmioErrorLog1);
776 ++ if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
777 ++ data->dma0ErrorLog0 || data->dma0ErrorLog1)
778 ++ pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
779 ++ data->dma0ErrorStatus, data->dma0FirstErrorStatus,
780 ++ data->dma0ErrorLog0, data->dma0ErrorLog1);
781 ++ if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
782 ++ data->dma1ErrorLog0 || data->dma1ErrorLog1)
783 ++ pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
784 ++ data->dma1ErrorStatus, data->dma1FirstErrorStatus,
785 ++ data->dma1ErrorLog0, data->dma1ErrorLog1);
786 +
787 + for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
788 + if ((data->pestA[i] >> 63) == 0 &&
789 + (data->pestB[i] >> 63) == 0)
790 + continue;
791 +- pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]);
792 +- pr_info(" PESTB = 0x%016llx\n", data->pestB[i]);
793 ++
794 ++ pr_info(" PE[%3d] A/B: %016llx %016llx\n",
795 ++ i, data->pestA[i], data->pestB[i]);
796 + }
797 + }
798 +
799 +-static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb)
800 ++static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
801 ++ struct OpalIoPhbErrorCommon *common)
802 + {
803 +- switch(phb->model) {
804 +- case PNV_PHB_MODEL_P7IOC:
805 +- pnv_pci_dump_p7ioc_diag_data(phb);
806 ++ struct OpalIoPhb3ErrorData *data;
807 ++ int i;
808 ++
809 ++ data = (struct OpalIoPhb3ErrorData*)common;
810 ++ pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n",
811 ++ hose->global_number, common->version);
812 ++ if (data->brdgCtl)
813 ++ pr_info(" brdgCtl: %08x\n",
814 ++ data->brdgCtl);
815 ++ if (data->portStatusReg || data->rootCmplxStatus ||
816 ++ data->busAgentStatus)
817 ++ pr_info(" UtlSts: %08x %08x %08x\n",
818 ++ data->portStatusReg, data->rootCmplxStatus,
819 ++ data->busAgentStatus);
820 ++ if (data->deviceStatus || data->slotStatus ||
821 ++ data->linkStatus || data->devCmdStatus ||
822 ++ data->devSecStatus)
823 ++ pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
824 ++ data->deviceStatus, data->slotStatus,
825 ++ data->linkStatus, data->devCmdStatus,
826 ++ data->devSecStatus);
827 ++ if (data->rootErrorStatus || data->uncorrErrorStatus ||
828 ++ data->corrErrorStatus)
829 ++ pr_info(" RootErrSts: %08x %08x %08x\n",
830 ++ data->rootErrorStatus, data->uncorrErrorStatus,
831 ++ data->corrErrorStatus);
832 ++ if (data->tlpHdr1 || data->tlpHdr2 ||
833 ++ data->tlpHdr3 || data->tlpHdr4)
834 ++ pr_info(" RootErrLog: %08x %08x %08x %08x\n",
835 ++ data->tlpHdr1, data->tlpHdr2,
836 ++ data->tlpHdr3, data->tlpHdr4);
837 ++ if (data->sourceId || data->errorClass ||
838 ++ data->correlator)
839 ++ pr_info(" RootErrLog1: %08x %016llx %016llx\n",
840 ++ data->sourceId, data->errorClass,
841 ++ data->correlator);
842 ++ if (data->nFir || data->nFirMask ||
843 ++ data->nFirWOF)
844 ++ pr_info(" nFir: %016llx %016llx %016llx\n",
845 ++ data->nFir, data->nFirMask,
846 ++ data->nFirWOF);
847 ++ if (data->phbPlssr || data->phbCsr)
848 ++ pr_info(" PhbSts: %016llx %016llx\n",
849 ++ data->phbPlssr, data->phbCsr);
850 ++ if (data->lemFir || data->lemErrorMask ||
851 ++ data->lemWOF)
852 ++ pr_info(" Lem: %016llx %016llx %016llx\n",
853 ++ data->lemFir, data->lemErrorMask,
854 ++ data->lemWOF);
855 ++ if (data->phbErrorStatus || data->phbFirstErrorStatus ||
856 ++ data->phbErrorLog0 || data->phbErrorLog1)
857 ++ pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
858 ++ data->phbErrorStatus, data->phbFirstErrorStatus,
859 ++ data->phbErrorLog0, data->phbErrorLog1);
860 ++ if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
861 ++ data->mmioErrorLog0 || data->mmioErrorLog1)
862 ++ pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
863 ++ data->mmioErrorStatus, data->mmioFirstErrorStatus,
864 ++ data->mmioErrorLog0, data->mmioErrorLog1);
865 ++ if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
866 ++ data->dma0ErrorLog0 || data->dma0ErrorLog1)
867 ++ pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
868 ++ data->dma0ErrorStatus, data->dma0FirstErrorStatus,
869 ++ data->dma0ErrorLog0, data->dma0ErrorLog1);
870 ++ if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
871 ++ data->dma1ErrorLog0 || data->dma1ErrorLog1)
872 ++ pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
873 ++ data->dma1ErrorStatus, data->dma1FirstErrorStatus,
874 ++ data->dma1ErrorLog0, data->dma1ErrorLog1);
875 ++
876 ++ for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
877 ++ if ((data->pestA[i] >> 63) == 0 &&
878 ++ (data->pestB[i] >> 63) == 0)
879 ++ continue;
880 ++
881 ++ pr_info(" PE[%3d] A/B: %016llx %016llx\n",
882 ++ i, data->pestA[i], data->pestB[i]);
883 ++ }
884 ++}
885 ++
886 ++void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
887 ++ unsigned char *log_buff)
888 ++{
889 ++ struct OpalIoPhbErrorCommon *common;
890 ++
891 ++ if (!hose || !log_buff)
892 ++ return;
893 ++
894 ++ common = (struct OpalIoPhbErrorCommon *)log_buff;
895 ++ switch (common->ioType) {
896 ++ case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
897 ++ pnv_pci_dump_p7ioc_diag_data(hose, common);
898 ++ break;
899 ++ case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
900 ++ pnv_pci_dump_phb3_diag_data(hose, common);
901 + break;
902 + default:
903 +- pr_warning("PCI %d: Can't decode this PHB diag data\n",
904 +- phb->hose->global_number);
905 ++ pr_warn("%s: Unrecognized ioType %d\n",
906 ++ __func__, common->ioType);
907 + }
908 + }
909 +
910 +@@ -222,7 +332,7 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
911 + * with the normal errors generated when probing empty slots
912 + */
913 + if (has_diag)
914 +- pnv_pci_dump_phb_diag_data(phb);
915 ++ pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
916 + else
917 + pr_warning("PCI %d: No diag data available\n",
918 + phb->hose->global_number);
919 +diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
920 +index 1ed8d5f40f5a..9365ace1b8a3 100644
921 +--- a/arch/powerpc/platforms/powernv/pci.h
922 ++++ b/arch/powerpc/platforms/powernv/pci.h
923 +@@ -177,6 +177,7 @@ struct pnv_phb {
924 + unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
925 + struct OpalIoP7IOCPhbErrorData p7ioc;
926 + struct OpalIoP7IOCErrorData hub_diag;
927 ++ struct OpalIoPhb3ErrorData phb3;
928 + } diag;
929 +
930 + };
931 +@@ -186,6 +187,8 @@ extern struct pci_ops pnv_pci_ops;
932 + extern struct pnv_eeh_ops ioda_eeh_ops;
933 + #endif
934 +
935 ++void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
936 ++ unsigned char *log_buff);
937 + int pnv_pci_cfg_read(struct device_node *dn,
938 + int where, int size, u32 *val);
939 + int pnv_pci_cfg_write(struct device_node *dn,
940 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
941 +index 40772ef0f2b1..e50425d0f5f7 100644
942 +--- a/arch/x86/kvm/mmu.c
943 ++++ b/arch/x86/kvm/mmu.c
944 +@@ -2659,6 +2659,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
945 + int emulate = 0;
946 + gfn_t pseudo_gfn;
947 +
948 ++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
949 ++ return 0;
950 ++
951 + for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
952 + if (iterator.level == level) {
953 + mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
954 +@@ -2829,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
955 + bool ret = false;
956 + u64 spte = 0ull;
957 +
958 ++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
959 ++ return false;
960 ++
961 + if (!page_fault_can_be_fast(error_code))
962 + return false;
963 +
964 +@@ -3224,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
965 + struct kvm_shadow_walk_iterator iterator;
966 + u64 spte = 0ull;
967 +
968 ++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
969 ++ return spte;
970 ++
971 + walk_shadow_page_lockless_begin(vcpu);
972 + for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
973 + if (!is_shadow_present_pte(spte))
974 +@@ -4510,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
975 + u64 spte;
976 + int nr_sptes = 0;
977 +
978 ++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
979 ++ return nr_sptes;
980 ++
981 + walk_shadow_page_lockless_begin(vcpu);
982 + for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
983 + sptes[iterator.level-1] = spte;
984 +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
985 +index ad75d77999d0..cba218a2f08d 100644
986 +--- a/arch/x86/kvm/paging_tmpl.h
987 ++++ b/arch/x86/kvm/paging_tmpl.h
988 +@@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
989 + if (FNAME(gpte_changed)(vcpu, gw, top_level))
990 + goto out_gpte_changed;
991 +
992 ++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
993 ++ goto out_gpte_changed;
994 ++
995 + for (shadow_walk_init(&it, vcpu, addr);
996 + shadow_walk_okay(&it) && it.level > gw->level;
997 + shadow_walk_next(&it)) {
998 +@@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
999 + */
1000 + mmu_topup_memory_caches(vcpu);
1001 +
1002 ++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1003 ++ WARN_ON(1);
1004 ++ return;
1005 ++ }
1006 ++
1007 + spin_lock(&vcpu->kvm->mmu_lock);
1008 + for_each_shadow_entry(vcpu, gva, iterator) {
1009 + level = iterator.level;
1010 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1011 +index dcc4de3ee57a..31c3e8bf1b82 100644
1012 +--- a/arch/x86/kvm/vmx.c
1013 ++++ b/arch/x86/kvm/vmx.c
1014 +@@ -7332,8 +7332,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
1015 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1016 +
1017 + free_vpid(vmx);
1018 +- free_nested(vmx);
1019 + free_loaded_vmcs(vmx->loaded_vmcs);
1020 ++ free_nested(vmx);
1021 + kfree(vmx->guest_msrs);
1022 + kvm_vcpu_uninit(vcpu);
1023 + kmem_cache_free(kvm_vcpu_cache, vmx);
1024 +diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
1025 +index 877b9a1b2152..01495755701b 100644
1026 +--- a/arch/x86/net/bpf_jit.S
1027 ++++ b/arch/x86/net/bpf_jit.S
1028 +@@ -140,7 +140,7 @@ bpf_slow_path_byte_msh:
1029 + push %r9; \
1030 + push SKBDATA; \
1031 + /* rsi already has offset */ \
1032 +- mov $SIZE,%ecx; /* size */ \
1033 ++ mov $SIZE,%edx; /* size */ \
1034 + call bpf_internal_load_pointer_neg_helper; \
1035 + test %rax,%rax; \
1036 + pop SKBDATA; \
1037 +diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
1038 +index 02821b06a39e..a918bc481c52 100644
1039 +--- a/drivers/clocksource/vf_pit_timer.c
1040 ++++ b/drivers/clocksource/vf_pit_timer.c
1041 +@@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void)
1042 +
1043 + static u64 pit_read_sched_clock(void)
1044 + {
1045 +- return __raw_readl(clksrc_base + PITCVAL);
1046 ++ return ~__raw_readl(clksrc_base + PITCVAL);
1047 + }
1048 +
1049 + static int __init pit_clocksource_init(unsigned long rate)
1050 +diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
1051 +index 61c09101ed18..4a699d26ebf3 100644
1052 +--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
1053 ++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
1054 +@@ -201,6 +201,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
1055 + struct drm_i915_private *dev_priv = dev->dev_private;
1056 + int bios_reserved = 0;
1057 +
1058 ++#ifdef CONFIG_INTEL_IOMMU
1059 ++ if (intel_iommu_gfx_mapped) {
1060 ++ DRM_INFO("DMAR active, disabling use of stolen memory\n");
1061 ++ return 0;
1062 ++ }
1063 ++#endif
1064 ++
1065 + if (dev_priv->gtt.stolen_size == 0)
1066 + return 0;
1067 +
1068 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
1069 +index a20917789c59..970270432303 100644
1070 +--- a/drivers/gpu/drm/i915/i915_irq.c
1071 ++++ b/drivers/gpu/drm/i915/i915_irq.c
1072 +@@ -2760,10 +2760,9 @@ static void ibx_irq_postinstall(struct drm_device *dev)
1073 + return;
1074 +
1075 + if (HAS_PCH_IBX(dev)) {
1076 +- mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
1077 +- SDE_TRANSA_FIFO_UNDER | SDE_POISON;
1078 ++ mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
1079 + } else {
1080 +- mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
1081 ++ mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
1082 +
1083 + I915_WRITE(SERR_INT, I915_READ(SERR_INT));
1084 + }
1085 +@@ -2823,20 +2822,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1086 + display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1087 + DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
1088 + DE_PLANEB_FLIP_DONE_IVB |
1089 +- DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
1090 +- DE_ERR_INT_IVB);
1091 ++ DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
1092 + extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
1093 +- DE_PIPEA_VBLANK_IVB);
1094 ++ DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
1095 +
1096 + I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
1097 + } else {
1098 + display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1099 + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
1100 + DE_AUX_CHANNEL_A |
1101 +- DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
1102 + DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
1103 + DE_POISON);
1104 +- extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
1105 ++ extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
1106 ++ DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
1107 + }
1108 +
1109 + dev_priv->irq_mask = ~display_mask;
1110 +@@ -2952,9 +2950,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
1111 + struct drm_device *dev = dev_priv->dev;
1112 + uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
1113 + GEN8_PIPE_CDCLK_CRC_DONE |
1114 +- GEN8_PIPE_FIFO_UNDERRUN |
1115 + GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
1116 +- uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
1117 ++ uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
1118 ++ GEN8_PIPE_FIFO_UNDERRUN;
1119 + int pipe;
1120 + dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
1121 + dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
1122 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1123 +index 3e1b348de244..6b50a14449a3 100644
1124 +--- a/drivers/gpu/drm/i915/intel_dp.c
1125 ++++ b/drivers/gpu/drm/i915/intel_dp.c
1126 +@@ -1634,7 +1634,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1127 + val |= EDP_PSR_LINK_DISABLE;
1128 +
1129 + I915_WRITE(EDP_PSR_CTL(dev), val |
1130 +- IS_BROADWELL(dev) ? 0 : link_entry_time |
1131 ++ (IS_BROADWELL(dev) ? 0 : link_entry_time) |
1132 + max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1133 + idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1134 + EDP_PSR_ENABLE);
1135 +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
1136 +index 6a6dd5cd7833..d0a0034d1734 100644
1137 +--- a/drivers/hid/hidraw.c
1138 ++++ b/drivers/hid/hidraw.c
1139 +@@ -313,13 +313,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
1140 + hid_hw_close(hidraw->hid);
1141 + wake_up_interruptible(&hidraw->wait);
1142 + }
1143 ++ device_destroy(hidraw_class,
1144 ++ MKDEV(hidraw_major, hidraw->minor));
1145 + } else {
1146 + --hidraw->open;
1147 + }
1148 + if (!hidraw->open) {
1149 + if (!hidraw->exist) {
1150 +- device_destroy(hidraw_class,
1151 +- MKDEV(hidraw_major, hidraw->minor));
1152 + hidraw_table[hidraw->minor] = NULL;
1153 + kfree(hidraw);
1154 + } else {
1155 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1156 +index 597e9b8fc18d..ef1cf52f8bb9 100644
1157 +--- a/drivers/input/mouse/elantech.c
1158 ++++ b/drivers/input/mouse/elantech.c
1159 +@@ -486,6 +486,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
1160 + unsigned char *packet = psmouse->packet;
1161 +
1162 + input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
1163 ++ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
1164 + input_mt_report_pointer_emulation(dev, true);
1165 + input_sync(dev);
1166 + }
1167 +@@ -984,6 +985,44 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1168 + }
1169 +
1170 + /*
1171 ++ * Advertise INPUT_PROP_BUTTONPAD for clickpads. The testing of bit 12 in
1172 ++ * fw_version for this is based on the following fw_version & caps table:
1173 ++ *
1174 ++ * Laptop-model: fw_version: caps: buttons:
1175 ++ * Acer S3 0x461f00 10, 13, 0e clickpad
1176 ++ * Acer S7-392 0x581f01 50, 17, 0d clickpad
1177 ++ * Acer V5-131 0x461f02 01, 16, 0c clickpad
1178 ++ * Acer V5-551 0x461f00 ? clickpad
1179 ++ * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
1180 ++ * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
1181 ++ * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
1182 ++ * Asus UX31 0x361f00 20, 15, 0e clickpad
1183 ++ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1184 ++ * Avatar AVIU-145A2 0x361f00 ? clickpad
1185 ++ * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
1186 ++ * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
1187 ++ * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons
1188 ++ * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad
1189 ++ * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad
1190 ++ * Samsung NP900X3E-A02 0x575f03 ? clickpad
1191 ++ * Samsung NP-QX410 0x851b00 19, 14, 0c clickpad
1192 ++ * Samsung RC512 0x450f00 08, 15, 0c 2 hw buttons
1193 ++ * Samsung RF710 0x450f00 ? 2 hw buttons
1194 ++ * System76 Pangolin 0x250f01 ? 2 hw buttons
1195 ++ * (*) + 3 trackpoint buttons
1196 ++ */
1197 ++static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
1198 ++{
1199 ++ struct input_dev *dev = psmouse->dev;
1200 ++ struct elantech_data *etd = psmouse->private;
1201 ++
1202 ++ if (etd->fw_version & 0x001000) {
1203 ++ __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1204 ++ __clear_bit(BTN_RIGHT, dev->keybit);
1205 ++ }
1206 ++}
1207 ++
1208 ++/*
1209 + * Set the appropriate event bits for the input subsystem
1210 + */
1211 + static int elantech_set_input_params(struct psmouse *psmouse)
1212 +@@ -1026,6 +1065,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1213 + __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
1214 + /* fall through */
1215 + case 3:
1216 ++ if (etd->hw_version == 3)
1217 ++ elantech_set_buttonpad_prop(psmouse);
1218 + input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
1219 + input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
1220 + if (etd->reports_pressure) {
1221 +@@ -1047,9 +1088,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1222 + */
1223 + psmouse_warn(psmouse, "couldn't query resolution data.\n");
1224 + }
1225 +- /* v4 is clickpad, with only one button. */
1226 +- __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1227 +- __clear_bit(BTN_RIGHT, dev->keybit);
1228 ++ elantech_set_buttonpad_prop(psmouse);
1229 + __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
1230 + /* For X to recognize me as touchpad. */
1231 + input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
1232 +diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
1233 +index 867e7c33ac55..b16ebef5b911 100644
1234 +--- a/drivers/input/tablet/wacom_sys.c
1235 ++++ b/drivers/input/tablet/wacom_sys.c
1236 +@@ -304,7 +304,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
1237 + struct usb_device *dev = interface_to_usbdev(intf);
1238 + char limit = 0;
1239 + /* result has to be defined as int for some devices */
1240 +- int result = 0;
1241 ++ int result = 0, touch_max = 0;
1242 + int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
1243 + unsigned char *report;
1244 +
1245 +@@ -351,7 +351,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
1246 + if (usage == WCM_DESKTOP) {
1247 + if (finger) {
1248 + features->device_type = BTN_TOOL_FINGER;
1249 +-
1250 ++ /* touch device at least supports one touch point */
1251 ++ touch_max = 1;
1252 + switch (features->type) {
1253 + case TABLETPC2FG:
1254 + features->pktlen = WACOM_PKGLEN_TPC2FG;
1255 +@@ -504,6 +505,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
1256 + }
1257 +
1258 + out:
1259 ++ if (!features->touch_max && touch_max)
1260 ++ features->touch_max = touch_max;
1261 + result = 0;
1262 + kfree(report);
1263 + return result;
1264 +@@ -1194,12 +1197,15 @@ static void wacom_wireless_work(struct work_struct *work)
1265 + wacom_wac1->features.device_type = BTN_TOOL_PEN;
1266 + snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen",
1267 + wacom_wac1->features.name);
1268 ++ wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max;
1269 ++ wacom_wac1->shared->type = wacom_wac1->features.type;
1270 + error = wacom_register_input(wacom1);
1271 + if (error)
1272 + goto fail;
1273 +
1274 + /* Touch interface */
1275 +- if (wacom_wac1->features.touch_max) {
1276 ++ if (wacom_wac1->features.touch_max ||
1277 ++ wacom_wac1->features.type == INTUOSHT) {
1278 + wacom_wac2->features =
1279 + *((struct wacom_features *)id->driver_info);
1280 + wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
1281 +@@ -1214,6 +1220,10 @@ static void wacom_wireless_work(struct work_struct *work)
1282 + error = wacom_register_input(wacom2);
1283 + if (error)
1284 + goto fail;
1285 ++
1286 ++ if (wacom_wac1->features.type == INTUOSHT &&
1287 ++ wacom_wac1->features.touch_max)
1288 ++ wacom_wac->shared->touch_input = wacom_wac2->input;
1289 + }
1290 +
1291 + error = wacom_initialize_battery(wacom);
1292 +@@ -1322,7 +1332,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
1293 + * HID descriptor. If this is the touch interface (wMaxPacketSize
1294 + * of WACOM_PKGLEN_BBTOUCH3), override the table values.
1295 + */
1296 +- if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
1297 ++ if (features->type >= INTUOS5S && features->type <= INTUOSHT) {
1298 + if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
1299 + features->device_type = BTN_TOOL_FINGER;
1300 + features->pktlen = WACOM_PKGLEN_BBTOUCH3;
1301 +@@ -1393,6 +1403,11 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
1302 + }
1303 + }
1304 +
1305 ++ if (wacom_wac->features.type == INTUOSHT && wacom_wac->features.touch_max) {
1306 ++ if (wacom_wac->features.device_type == BTN_TOOL_FINGER)
1307 ++ wacom_wac->shared->touch_input = wacom_wac->input;
1308 ++ }
1309 ++
1310 + return 0;
1311 +
1312 + fail5: wacom_destroy_leds(wacom);
1313 +diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
1314 +index 782c2535f1d8..4cabeaa086cd 100644
1315 +--- a/drivers/input/tablet/wacom_wac.c
1316 ++++ b/drivers/input/tablet/wacom_wac.c
1317 +@@ -1176,10 +1176,16 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1318 + static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
1319 + {
1320 + struct input_dev *input = wacom->input;
1321 ++ struct wacom_features *features = &wacom->features;
1322 +
1323 +- input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
1324 ++ if (features->type == INTUOSHT) {
1325 ++ input_report_key(input, BTN_LEFT, (data[1] & 0x02) != 0);
1326 ++ input_report_key(input, BTN_BACK, (data[1] & 0x08) != 0);
1327 ++ } else {
1328 ++ input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
1329 ++ input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
1330 ++ }
1331 + input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
1332 +- input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
1333 + input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0);
1334 + }
1335 +
1336 +@@ -1213,13 +1219,23 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
1337 +
1338 + static int wacom_bpt_pen(struct wacom_wac *wacom)
1339 + {
1340 ++ struct wacom_features *features = &wacom->features;
1341 + struct input_dev *input = wacom->input;
1342 + unsigned char *data = wacom->data;
1343 + int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
1344 +
1345 +- if (data[0] != 0x02)
1346 ++ if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_USB)
1347 + return 0;
1348 +
1349 ++ if (data[0] == WACOM_REPORT_USB) {
1350 ++ if (features->type == INTUOSHT && features->touch_max) {
1351 ++ input_report_switch(wacom->shared->touch_input,
1352 ++ SW_MUTE_DEVICE, data[8] & 0x40);
1353 ++ input_sync(wacom->shared->touch_input);
1354 ++ }
1355 ++ return 0;
1356 ++ }
1357 ++
1358 + prox = (data[1] & 0x20) == 0x20;
1359 +
1360 + /*
1361 +@@ -1252,8 +1268,8 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
1362 + * touching and applying pressure; do not report negative
1363 + * distance.
1364 + */
1365 +- if (data[8] <= wacom->features.distance_max)
1366 +- d = wacom->features.distance_max - data[8];
1367 ++ if (data[8] <= features->distance_max)
1368 ++ d = features->distance_max - data[8];
1369 +
1370 + pen = data[1] & 0x01;
1371 + btn1 = data[1] & 0x02;
1372 +@@ -1297,13 +1313,20 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len)
1373 + unsigned char *data = wacom->data;
1374 + int connected;
1375 +
1376 +- if (len != WACOM_PKGLEN_WIRELESS || data[0] != 0x80)
1377 ++ if (len != WACOM_PKGLEN_WIRELESS || data[0] != WACOM_REPORT_WL)
1378 + return 0;
1379 +
1380 + connected = data[1] & 0x01;
1381 + if (connected) {
1382 + int pid, battery;
1383 +
1384 ++ if ((wacom->shared->type == INTUOSHT) &&
1385 ++ wacom->shared->touch_max) {
1386 ++ input_report_switch(wacom->shared->touch_input,
1387 ++ SW_MUTE_DEVICE, data[5] & 0x40);
1388 ++ input_sync(wacom->shared->touch_input);
1389 ++ }
1390 ++
1391 + pid = get_unaligned_be16(&data[6]);
1392 + battery = data[5] & 0x3f;
1393 + if (wacom->pid != pid) {
1394 +@@ -1391,6 +1414,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
1395 + break;
1396 +
1397 + case BAMBOO_PT:
1398 ++ case INTUOSHT:
1399 + sync = wacom_bpt_irq(wacom_wac, len);
1400 + break;
1401 +
1402 +@@ -1459,7 +1483,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
1403 +
1404 + /* these device have multiple inputs */
1405 + if (features->type >= WIRELESS ||
1406 +- (features->type >= INTUOS5S && features->type <= INTUOSPL) ||
1407 ++ (features->type >= INTUOS5S && features->type <= INTUOSHT) ||
1408 + (features->oVid && features->oPid))
1409 + features->quirks |= WACOM_QUIRK_MULTI_INPUT;
1410 +
1411 +@@ -1771,33 +1795,50 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1412 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1413 + break;
1414 +
1415 ++ case INTUOSHT:
1416 ++ if (features->touch_max &&
1417 ++ features->device_type == BTN_TOOL_FINGER) {
1418 ++ input_dev->evbit[0] |= BIT_MASK(EV_SW);
1419 ++ __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
1420 ++ }
1421 ++ /* fall through */
1422 ++
1423 + case BAMBOO_PT:
1424 + __clear_bit(ABS_MISC, input_dev->absbit);
1425 +
1426 +- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1427 +-
1428 + if (features->device_type == BTN_TOOL_FINGER) {
1429 +- unsigned int flags = INPUT_MT_POINTER;
1430 +
1431 + __set_bit(BTN_LEFT, input_dev->keybit);
1432 + __set_bit(BTN_FORWARD, input_dev->keybit);
1433 + __set_bit(BTN_BACK, input_dev->keybit);
1434 + __set_bit(BTN_RIGHT, input_dev->keybit);
1435 +
1436 +- if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
1437 +- input_set_abs_params(input_dev,
1438 ++ if (features->touch_max) {
1439 ++ /* touch interface */
1440 ++ unsigned int flags = INPUT_MT_POINTER;
1441 ++
1442 ++ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1443 ++ if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
1444 ++ input_set_abs_params(input_dev,
1445 + ABS_MT_TOUCH_MAJOR,
1446 + 0, features->x_max, 0, 0);
1447 +- input_set_abs_params(input_dev,
1448 ++ input_set_abs_params(input_dev,
1449 + ABS_MT_TOUCH_MINOR,
1450 + 0, features->y_max, 0, 0);
1451 ++ } else {
1452 ++ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1453 ++ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1454 ++ flags = 0;
1455 ++ }
1456 ++ input_mt_init_slots(input_dev, features->touch_max, flags);
1457 + } else {
1458 +- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1459 +- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1460 +- flags = 0;
1461 ++ /* buttons/keys only interface */
1462 ++ __clear_bit(ABS_X, input_dev->absbit);
1463 ++ __clear_bit(ABS_Y, input_dev->absbit);
1464 ++ __clear_bit(BTN_TOUCH, input_dev->keybit);
1465 + }
1466 +- input_mt_init_slots(input_dev, features->touch_max, flags);
1467 + } else if (features->device_type == BTN_TOOL_PEN) {
1468 ++ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1469 + __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
1470 + __set_bit(BTN_TOOL_PEN, input_dev->keybit);
1471 + __set_bit(BTN_STYLUS, input_dev->keybit);
1472 +@@ -2200,6 +2241,17 @@ static const struct wacom_features wacom_features_0x300 =
1473 + static const struct wacom_features wacom_features_0x301 =
1474 + { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023,
1475 + 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1476 ++static const struct wacom_features wacom_features_0x302 =
1477 ++ { "Wacom Intuos PT S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023,
1478 ++ 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
1479 ++ .touch_max = 16 };
1480 ++static const struct wacom_features wacom_features_0x303 =
1481 ++ { "Wacom Intuos PT M", WACOM_PKGLEN_BBPEN, 21600, 13500, 1023,
1482 ++ 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
1483 ++ .touch_max = 16 };
1484 ++static const struct wacom_features wacom_features_0x30E =
1485 ++ { "Wacom Intuos S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023,
1486 ++ 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1487 + static const struct wacom_features wacom_features_0x6004 =
1488 + { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
1489 + 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1490 +@@ -2337,6 +2389,9 @@ const struct usb_device_id wacom_ids[] = {
1491 + { USB_DEVICE_WACOM(0x10F) },
1492 + { USB_DEVICE_WACOM(0x300) },
1493 + { USB_DEVICE_WACOM(0x301) },
1494 ++ { USB_DEVICE_DETAILED(0x302, USB_CLASS_HID, 0, 0) },
1495 ++ { USB_DEVICE_DETAILED(0x303, USB_CLASS_HID, 0, 0) },
1496 ++ { USB_DEVICE_DETAILED(0x30E, USB_CLASS_HID, 0, 0) },
1497 + { USB_DEVICE_WACOM(0x304) },
1498 + { USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) },
1499 + { USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) },
1500 +diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
1501 +index fd23a3790605..3600cf705fb1 100644
1502 +--- a/drivers/input/tablet/wacom_wac.h
1503 ++++ b/drivers/input/tablet/wacom_wac.h
1504 +@@ -54,6 +54,8 @@
1505 + #define WACOM_REPORT_TPCST 16
1506 + #define WACOM_REPORT_TPC1FGE 18
1507 + #define WACOM_REPORT_24HDT 1
1508 ++#define WACOM_REPORT_WL 128
1509 ++#define WACOM_REPORT_USB 192
1510 +
1511 + /* device quirks */
1512 + #define WACOM_QUIRK_MULTI_INPUT 0x0001
1513 +@@ -81,6 +83,7 @@ enum {
1514 + INTUOSPS,
1515 + INTUOSPM,
1516 + INTUOSPL,
1517 ++ INTUOSHT,
1518 + WACOM_21UX2,
1519 + WACOM_22HD,
1520 + DTK,
1521 +@@ -129,6 +132,10 @@ struct wacom_features {
1522 + struct wacom_shared {
1523 + bool stylus_in_proximity;
1524 + bool touch_down;
1525 ++ /* for wireless device to access USB interfaces */
1526 ++ unsigned touch_max;
1527 ++ int type;
1528 ++ struct input_dev *touch_input;
1529 + };
1530 +
1531 + struct wacom_wac {
1532 +diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
1533 +index c1f8cc6f14b2..716bdc57fac6 100644
1534 +--- a/drivers/media/pci/cx18/cx18-driver.c
1535 ++++ b/drivers/media/pci/cx18/cx18-driver.c
1536 +@@ -327,13 +327,16 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
1537 + struct i2c_client *c;
1538 + u8 eedata[256];
1539 +
1540 ++ memset(tv, 0, sizeof(*tv));
1541 ++
1542 + c = kzalloc(sizeof(*c), GFP_KERNEL);
1543 ++ if (!c)
1544 ++ return;
1545 +
1546 + strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name));
1547 + c->adapter = &cx->i2c_adap[0];
1548 + c->addr = 0xa0 >> 1;
1549 +
1550 +- memset(tv, 0, sizeof(*tv));
1551 + if (tveeprom_read(c, eedata, sizeof(eedata)))
1552 + goto ret;
1553 +
1554 +diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
1555 +index 20e345d9fe8f..a1c641e18362 100644
1556 +--- a/drivers/media/usb/dvb-usb/cxusb.c
1557 ++++ b/drivers/media/usb/dvb-usb/cxusb.c
1558 +@@ -149,6 +149,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1559 + int num)
1560 + {
1561 + struct dvb_usb_device *d = i2c_get_adapdata(adap);
1562 ++ int ret;
1563 + int i;
1564 +
1565 + if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
1566 +@@ -173,7 +174,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1567 + if (1 + msg[i].len > sizeof(ibuf)) {
1568 + warn("i2c rd: len=%d is too big!\n",
1569 + msg[i].len);
1570 +- return -EOPNOTSUPP;
1571 ++ ret = -EOPNOTSUPP;
1572 ++ goto unlock;
1573 + }
1574 + obuf[0] = 0;
1575 + obuf[1] = msg[i].len;
1576 +@@ -193,12 +195,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1577 + if (3 + msg[i].len > sizeof(obuf)) {
1578 + warn("i2c wr: len=%d is too big!\n",
1579 + msg[i].len);
1580 +- return -EOPNOTSUPP;
1581 ++ ret = -EOPNOTSUPP;
1582 ++ goto unlock;
1583 + }
1584 + if (1 + msg[i + 1].len > sizeof(ibuf)) {
1585 + warn("i2c rd: len=%d is too big!\n",
1586 + msg[i + 1].len);
1587 +- return -EOPNOTSUPP;
1588 ++ ret = -EOPNOTSUPP;
1589 ++ goto unlock;
1590 + }
1591 + obuf[0] = msg[i].len;
1592 + obuf[1] = msg[i+1].len;
1593 +@@ -223,7 +227,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1594 + if (2 + msg[i].len > sizeof(obuf)) {
1595 + warn("i2c wr: len=%d is too big!\n",
1596 + msg[i].len);
1597 +- return -EOPNOTSUPP;
1598 ++ ret = -EOPNOTSUPP;
1599 ++ goto unlock;
1600 + }
1601 + obuf[0] = msg[i].addr;
1602 + obuf[1] = msg[i].len;
1603 +@@ -237,8 +242,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1604 + }
1605 + }
1606 +
1607 ++ if (i == num)
1608 ++ ret = num;
1609 ++ else
1610 ++ ret = -EREMOTEIO;
1611 ++
1612 ++unlock:
1613 + mutex_unlock(&d->i2c_mutex);
1614 +- return i == num ? num : -EREMOTEIO;
1615 ++ return ret;
1616 + }
1617 +
1618 + static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
1619 +diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
1620 +index c1a63b2a6baa..f272ed86d467 100644
1621 +--- a/drivers/media/usb/dvb-usb/dw2102.c
1622 ++++ b/drivers/media/usb/dvb-usb/dw2102.c
1623 +@@ -301,6 +301,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
1624 + static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
1625 + {
1626 + struct dvb_usb_device *d = i2c_get_adapdata(adap);
1627 ++ int ret;
1628 +
1629 + if (!d)
1630 + return -ENODEV;
1631 +@@ -316,7 +317,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
1632 + if (2 + msg[1].len > sizeof(ibuf)) {
1633 + warn("i2c rd: len=%d is too big!\n",
1634 + msg[1].len);
1635 +- return -EOPNOTSUPP;
1636 ++ ret = -EOPNOTSUPP;
1637 ++ goto unlock;
1638 + }
1639 +
1640 + obuf[0] = msg[0].addr << 1;
1641 +@@ -340,7 +342,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
1642 + if (2 + msg[0].len > sizeof(obuf)) {
1643 + warn("i2c wr: len=%d is too big!\n",
1644 + msg[1].len);
1645 +- return -EOPNOTSUPP;
1646 ++ ret = -EOPNOTSUPP;
1647 ++ goto unlock;
1648 + }
1649 +
1650 + obuf[0] = msg[0].addr << 1;
1651 +@@ -357,7 +360,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
1652 + if (2 + msg[0].len > sizeof(obuf)) {
1653 + warn("i2c wr: len=%d is too big!\n",
1654 + msg[1].len);
1655 +- return -EOPNOTSUPP;
1656 ++ ret = -EOPNOTSUPP;
1657 ++ goto unlock;
1658 + }
1659 +
1660 + obuf[0] = msg[0].addr << 1;
1661 +@@ -386,15 +390,17 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
1662 +
1663 + break;
1664 + }
1665 ++ ret = num;
1666 +
1667 ++unlock:
1668 + mutex_unlock(&d->i2c_mutex);
1669 +- return num;
1670 ++ return ret;
1671 + }
1672 +
1673 + static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
1674 + {
1675 + struct dvb_usb_device *d = i2c_get_adapdata(adap);
1676 +- int len, i, j;
1677 ++ int len, i, j, ret;
1678 +
1679 + if (!d)
1680 + return -ENODEV;
1681 +@@ -430,7 +436,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
1682 + if (2 + msg[j].len > sizeof(ibuf)) {
1683 + warn("i2c rd: len=%d is too big!\n",
1684 + msg[j].len);
1685 +- return -EOPNOTSUPP;
1686 ++ ret = -EOPNOTSUPP;
1687 ++ goto unlock;
1688 + }
1689 +
1690 + dw210x_op_rw(d->udev, 0xc3,
1691 +@@ -466,7 +473,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
1692 + if (2 + msg[j].len > sizeof(obuf)) {
1693 + warn("i2c wr: len=%d is too big!\n",
1694 + msg[j].len);
1695 +- return -EOPNOTSUPP;
1696 ++ ret = -EOPNOTSUPP;
1697 ++ goto unlock;
1698 + }
1699 +
1700 + obuf[0] = msg[j].addr << 1;
1701 +@@ -481,15 +489,18 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
1702 + }
1703 +
1704 + }
1705 ++ ret = num;
1706 +
1707 ++unlock:
1708 + mutex_unlock(&d->i2c_mutex);
1709 +- return num;
1710 ++ return ret;
1711 + }
1712 +
1713 + static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1714 + int num)
1715 + {
1716 + struct dvb_usb_device *d = i2c_get_adapdata(adap);
1717 ++ int ret;
1718 + int i;
1719 +
1720 + if (!d)
1721 +@@ -506,7 +517,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1722 + if (2 + msg[1].len > sizeof(ibuf)) {
1723 + warn("i2c rd: len=%d is too big!\n",
1724 + msg[1].len);
1725 +- return -EOPNOTSUPP;
1726 ++ ret = -EOPNOTSUPP;
1727 ++ goto unlock;
1728 + }
1729 + obuf[0] = msg[0].addr << 1;
1730 + obuf[1] = msg[0].len;
1731 +@@ -530,7 +542,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1732 + if (2 + msg[0].len > sizeof(obuf)) {
1733 + warn("i2c wr: len=%d is too big!\n",
1734 + msg[0].len);
1735 +- return -EOPNOTSUPP;
1736 ++ ret = -EOPNOTSUPP;
1737 ++ goto unlock;
1738 + }
1739 + obuf[0] = msg[0].addr << 1;
1740 + obuf[1] = msg[0].len;
1741 +@@ -556,9 +569,11 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1742 + msg[i].flags == 0 ? ">>>" : "<<<");
1743 + debug_dump(msg[i].buf, msg[i].len, deb_xfer);
1744 + }
1745 ++ ret = num;
1746 +
1747 ++unlock:
1748 + mutex_unlock(&d->i2c_mutex);
1749 +- return num;
1750 ++ return ret;
1751 + }
1752 +
1753 + static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1754 +@@ -566,7 +581,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1755 + {
1756 + struct dvb_usb_device *d = i2c_get_adapdata(adap);
1757 + struct usb_device *udev;
1758 +- int len, i, j;
1759 ++ int len, i, j, ret;
1760 +
1761 + if (!d)
1762 + return -ENODEV;
1763 +@@ -618,7 +633,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1764 + if (msg[j].len > sizeof(ibuf)) {
1765 + warn("i2c rd: len=%d is too big!\n",
1766 + msg[j].len);
1767 +- return -EOPNOTSUPP;
1768 ++ ret = -EOPNOTSUPP;
1769 ++ goto unlock;
1770 + }
1771 +
1772 + dw210x_op_rw(d->udev, 0x91, 0, 0,
1773 +@@ -652,7 +668,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1774 + if (2 + msg[j].len > sizeof(obuf)) {
1775 + warn("i2c wr: len=%d is too big!\n",
1776 + msg[j].len);
1777 +- return -EOPNOTSUPP;
1778 ++ ret = -EOPNOTSUPP;
1779 ++ goto unlock;
1780 + }
1781 +
1782 + obuf[0] = msg[j + 1].len;
1783 +@@ -671,7 +688,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1784 + if (2 + msg[j].len > sizeof(obuf)) {
1785 + warn("i2c wr: len=%d is too big!\n",
1786 + msg[j].len);
1787 +- return -EOPNOTSUPP;
1788 ++ ret = -EOPNOTSUPP;
1789 ++ goto unlock;
1790 + }
1791 + obuf[0] = msg[j].len + 1;
1792 + obuf[1] = (msg[j].addr << 1);
1793 +@@ -685,9 +703,11 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1794 + }
1795 + }
1796 + }
1797 ++ ret = num;
1798 +
1799 ++unlock:
1800 + mutex_unlock(&d->i2c_mutex);
1801 +- return num;
1802 ++ return ret;
1803 + }
1804 +
1805 + static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1806 +diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
1807 +index 357bbc54fe4b..3e049c13429c 100644
1808 +--- a/drivers/mmc/card/queue.c
1809 ++++ b/drivers/mmc/card/queue.c
1810 +@@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
1811 + struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
1812 +
1813 + if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
1814 +- limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
1815 ++ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
1816 +
1817 + mq->card = card;
1818 + mq->queue = blk_init_queue(mmc_request_fn, lock);
1819 +diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
1820 +index cbaba4442d4b..bf7a01ef9a57 100644
1821 +--- a/drivers/net/ethernet/intel/e100.c
1822 ++++ b/drivers/net/ethernet/intel/e100.c
1823 +@@ -3034,7 +3034,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
1824 + *enable_wake = false;
1825 + }
1826 +
1827 +- pci_disable_device(pdev);
1828 ++ pci_clear_master(pdev);
1829 + }
1830 +
1831 + static int __e100_power_off(struct pci_dev *pdev, bool wake)
1832 +diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
1833 +index b8235ee5d7d7..d9bf88b654ec 100644
1834 +--- a/drivers/net/ethernet/sfc/efx.h
1835 ++++ b/drivers/net/ethernet/sfc/efx.h
1836 +@@ -66,6 +66,9 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
1837 + #define EFX_RXQ_MIN_ENT 128U
1838 + #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
1839 +
1840 ++#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
1841 ++ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
1842 ++
1843 + /* Filters */
1844 +
1845 + /**
1846 +diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
1847 +index 1f529fa2edb1..fcfb44e1ba47 100644
1848 +--- a/drivers/net/ethernet/sfc/ethtool.c
1849 ++++ b/drivers/net/ethernet/sfc/ethtool.c
1850 +@@ -583,7 +583,7 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
1851 + struct efx_nic *efx = netdev_priv(net_dev);
1852 +
1853 + ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
1854 +- ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
1855 ++ ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
1856 + ring->rx_pending = efx->rxq_entries;
1857 + ring->tx_pending = efx->txq_entries;
1858 + }
1859 +@@ -596,7 +596,7 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
1860 +
1861 + if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
1862 + ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
1863 +- ring->tx_pending > EFX_MAX_DMAQ_SIZE)
1864 ++ ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
1865 + return -EINVAL;
1866 +
1867 + if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
1868 +diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
1869 +index cd9b164a0434..251430450005 100644
1870 +--- a/drivers/net/ethernet/ti/davinci_emac.c
1871 ++++ b/drivers/net/ethernet/ti/davinci_emac.c
1872 +@@ -1532,7 +1532,7 @@ static int emac_dev_open(struct net_device *ndev)
1873 + struct device *emac_dev = &ndev->dev;
1874 + u32 cnt;
1875 + struct resource *res;
1876 +- int ret;
1877 ++ int q, m, ret;
1878 + int i = 0;
1879 + int k = 0;
1880 + struct emac_priv *priv = netdev_priv(ndev);
1881 +@@ -1567,8 +1567,7 @@ static int emac_dev_open(struct net_device *ndev)
1882 +
1883 + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1884 + for (i = res->start; i <= res->end; i++) {
1885 +- if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
1886 +- 0, ndev->name, ndev))
1887 ++ if (request_irq(i, emac_irq, 0, ndev->name, ndev))
1888 + goto rollback;
1889 + }
1890 + k++;
1891 +@@ -1641,7 +1640,15 @@ static int emac_dev_open(struct net_device *ndev)
1892 +
1893 + rollback:
1894 +
1895 +- dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
1896 ++ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed");
1897 ++
1898 ++ for (q = k; k >= 0; k--) {
1899 ++ for (m = i; m >= res->start; m--)
1900 ++ free_irq(m, ndev);
1901 ++ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
1902 ++ m = res->end;
1903 ++ }
1904 ++
1905 + ret = -EBUSY;
1906 + err:
1907 + pm_runtime_put(&priv->pdev->dev);
1908 +@@ -1659,6 +1666,9 @@ err:
1909 + */
1910 + static int emac_dev_stop(struct net_device *ndev)
1911 + {
1912 ++ struct resource *res;
1913 ++ int i = 0;
1914 ++ int irq_num;
1915 + struct emac_priv *priv = netdev_priv(ndev);
1916 + struct device *emac_dev = &ndev->dev;
1917 +
1918 +@@ -1674,6 +1684,13 @@ static int emac_dev_stop(struct net_device *ndev)
1919 + if (priv->phydev)
1920 + phy_disconnect(priv->phydev);
1921 +
1922 ++ /* Free IRQ */
1923 ++ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
1924 ++ for (irq_num = res->start; irq_num <= res->end; irq_num++)
1925 ++ free_irq(irq_num, priv->ndev);
1926 ++ i++;
1927 ++ }
1928 ++
1929 + if (netif_msg_drv(priv))
1930 + dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
1931 +
1932 +diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
1933 +index f95de0d16216..1de59b0f8fa8 100644
1934 +--- a/drivers/net/wireless/p54/txrx.c
1935 ++++ b/drivers/net/wireless/p54/txrx.c
1936 +@@ -587,7 +587,7 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
1937 + chan = priv->curchan;
1938 + if (chan) {
1939 + struct survey_info *survey = &priv->survey[chan->hw_value];
1940 +- survey->noise = clamp_t(s8, priv->noise, -128, 127);
1941 ++ survey->noise = clamp(priv->noise, -128, 127);
1942 + survey->channel_time = priv->survey_raw.active;
1943 + survey->channel_time_tx = priv->survey_raw.tx;
1944 + survey->channel_time_busy = priv->survey_raw.tx +
1945 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
1946 +index 75dffb79ad32..7271299823b7 100644
1947 +--- a/drivers/regulator/core.c
1948 ++++ b/drivers/regulator/core.c
1949 +@@ -1908,8 +1908,6 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
1950 +
1951 + trace_regulator_disable_complete(rdev_get_name(rdev));
1952 +
1953 +- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
1954 +- NULL);
1955 + return 0;
1956 + }
1957 +
1958 +@@ -1933,6 +1931,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
1959 + rdev_err(rdev, "failed to disable\n");
1960 + return ret;
1961 + }
1962 ++ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
1963 ++ NULL);
1964 + }
1965 +
1966 + rdev->use_count = 0;
1967 +@@ -1985,20 +1985,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
1968 + {
1969 + int ret = 0;
1970 +
1971 +- /* force disable */
1972 +- if (rdev->desc->ops->disable) {
1973 +- /* ah well, who wants to live forever... */
1974 +- ret = rdev->desc->ops->disable(rdev);
1975 +- if (ret < 0) {
1976 +- rdev_err(rdev, "failed to force disable\n");
1977 +- return ret;
1978 +- }
1979 +- /* notify other consumers that power has been forced off */
1980 +- _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
1981 +- REGULATOR_EVENT_DISABLE, NULL);
1982 ++ ret = _regulator_do_disable(rdev);
1983 ++ if (ret < 0) {
1984 ++ rdev_err(rdev, "failed to force disable\n");
1985 ++ return ret;
1986 + }
1987 +
1988 +- return ret;
1989 ++ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
1990 ++ REGULATOR_EVENT_DISABLE, NULL);
1991 ++
1992 ++ return 0;
1993 + }
1994 +
1995 + /**
1996 +@@ -3631,8 +3627,6 @@ int regulator_suspend_finish(void)
1997 +
1998 + mutex_lock(&regulator_list_mutex);
1999 + list_for_each_entry(rdev, &regulator_list, list) {
2000 +- struct regulator_ops *ops = rdev->desc->ops;
2001 +-
2002 + mutex_lock(&rdev->mutex);
2003 + if (rdev->use_count > 0 || rdev->constraints->always_on) {
2004 + error = _regulator_do_enable(rdev);
2005 +@@ -3641,12 +3635,10 @@ int regulator_suspend_finish(void)
2006 + } else {
2007 + if (!have_full_constraints())
2008 + goto unlock;
2009 +- if (!ops->disable)
2010 +- goto unlock;
2011 + if (!_regulator_is_enabled(rdev))
2012 + goto unlock;
2013 +
2014 +- error = ops->disable(rdev);
2015 ++ error = _regulator_do_disable(rdev);
2016 + if (error)
2017 + ret = error;
2018 + }
2019 +@@ -3820,7 +3812,7 @@ static int __init regulator_init_complete(void)
2020 + ops = rdev->desc->ops;
2021 + c = rdev->constraints;
2022 +
2023 +- if (!ops->disable || (c && c->always_on))
2024 ++ if (c && c->always_on)
2025 + continue;
2026 +
2027 + mutex_lock(&rdev->mutex);
2028 +@@ -3841,7 +3833,7 @@ static int __init regulator_init_complete(void)
2029 + /* We log since this may kill the system if it
2030 + * goes wrong. */
2031 + rdev_info(rdev, "disabling\n");
2032 +- ret = ops->disable(rdev);
2033 ++ ret = _regulator_do_disable(rdev);
2034 + if (ret != 0) {
2035 + rdev_err(rdev, "couldn't disable: %d\n", ret);
2036 + }
2037 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2038 +index 7bd7f0d5f050..62ec84b42e31 100644
2039 +--- a/drivers/scsi/scsi_lib.c
2040 ++++ b/drivers/scsi/scsi_lib.c
2041 +@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
2042 +
2043 + host_dev = scsi_get_device(shost);
2044 + if (host_dev && host_dev->dma_mask)
2045 +- bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
2046 ++ bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
2047 +
2048 + return bounce_limit;
2049 + }
2050 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
2051 +index 73f5208714a4..1af67a214d33 100644
2052 +--- a/drivers/usb/host/xhci-pci.c
2053 ++++ b/drivers/usb/host/xhci-pci.c
2054 +@@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2055 + "QUIRK: Resetting on resume");
2056 + xhci->quirks |= XHCI_TRUST_TX_LENGTH;
2057 + }
2058 ++ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
2059 ++ pdev->device == 0x0015 &&
2060 ++ pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
2061 ++ pdev->subsystem_device == 0xc0cd)
2062 ++ xhci->quirks |= XHCI_RESET_ON_RESUME;
2063 + if (pdev->vendor == PCI_VENDOR_ID_VIA)
2064 + xhci->quirks |= XHCI_RESET_ON_RESUME;
2065 + }
2066 +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
2067 +index fafdddac8271..b46cf5a67329 100644
2068 +--- a/fs/nfs/internal.h
2069 ++++ b/fs/nfs/internal.h
2070 +@@ -176,7 +176,8 @@ extern struct nfs_server *nfs4_create_server(
2071 + extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
2072 + struct nfs_fh *);
2073 + extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
2074 +- struct sockaddr *sap, size_t salen);
2075 ++ struct sockaddr *sap, size_t salen,
2076 ++ struct net *net);
2077 + extern void nfs_free_server(struct nfs_server *server);
2078 + extern struct nfs_server *nfs_clone_server(struct nfs_server *,
2079 + struct nfs_fh *,
2080 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
2081 +index 4c7a4f4a4da1..cb70bdd7f320 100644
2082 +--- a/fs/nfs/nfs4client.c
2083 ++++ b/fs/nfs/nfs4client.c
2084 +@@ -1133,6 +1133,7 @@ static int nfs_probe_destination(struct nfs_server *server)
2085 + * @hostname: new end-point's hostname
2086 + * @sap: new end-point's socket address
2087 + * @salen: size of "sap"
2088 ++ * @net: net namespace
2089 + *
2090 + * The nfs_server must be quiescent before this function is invoked.
2091 + * Either its session is drained (NFSv4.1+), or its transport is
2092 +@@ -1141,13 +1142,13 @@ static int nfs_probe_destination(struct nfs_server *server)
2093 + * Returns zero on success, or a negative errno value.
2094 + */
2095 + int nfs4_update_server(struct nfs_server *server, const char *hostname,
2096 +- struct sockaddr *sap, size_t salen)
2097 ++ struct sockaddr *sap, size_t salen, struct net *net)
2098 + {
2099 + struct nfs_client *clp = server->nfs_client;
2100 + struct rpc_clnt *clnt = server->client;
2101 + struct xprt_create xargs = {
2102 + .ident = clp->cl_proto,
2103 +- .net = &init_net,
2104 ++ .net = net,
2105 + .dstaddr = sap,
2106 + .addrlen = salen,
2107 + .servername = hostname,
2108 +@@ -1187,7 +1188,7 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
2109 + error = nfs4_set_client(server, hostname, sap, salen, buf,
2110 + clp->cl_rpcclient->cl_auth->au_flavor,
2111 + clp->cl_proto, clnt->cl_timeout,
2112 +- clp->cl_minorversion, clp->cl_net);
2113 ++ clp->cl_minorversion, net);
2114 + nfs_put_client(clp);
2115 + if (error != 0) {
2116 + nfs_server_insert_lists(server);
2117 +diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
2118 +index 4e7f05d3e9db..3d5dbf80d46a 100644
2119 +--- a/fs/nfs/nfs4namespace.c
2120 ++++ b/fs/nfs/nfs4namespace.c
2121 +@@ -121,9 +121,8 @@ static int nfs4_validate_fspath(struct dentry *dentry,
2122 + }
2123 +
2124 + static size_t nfs_parse_server_name(char *string, size_t len,
2125 +- struct sockaddr *sa, size_t salen, struct nfs_server *server)
2126 ++ struct sockaddr *sa, size_t salen, struct net *net)
2127 + {
2128 +- struct net *net = rpc_net_ns(server->client);
2129 + ssize_t ret;
2130 +
2131 + ret = rpc_pton(net, string, len, sa, salen);
2132 +@@ -223,6 +222,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
2133 + const struct nfs4_fs_location *location)
2134 + {
2135 + const size_t addr_bufsize = sizeof(struct sockaddr_storage);
2136 ++ struct net *net = rpc_net_ns(NFS_SB(mountdata->sb)->client);
2137 + struct vfsmount *mnt = ERR_PTR(-ENOENT);
2138 + char *mnt_path;
2139 + unsigned int maxbuflen;
2140 +@@ -248,8 +248,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
2141 + continue;
2142 +
2143 + mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
2144 +- mountdata->addr, addr_bufsize,
2145 +- NFS_SB(mountdata->sb));
2146 ++ mountdata->addr, addr_bufsize, net);
2147 + if (mountdata->addrlen == 0)
2148 + continue;
2149 +
2150 +@@ -419,6 +418,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
2151 + const struct nfs4_fs_location *location)
2152 + {
2153 + const size_t addr_bufsize = sizeof(struct sockaddr_storage);
2154 ++ struct net *net = rpc_net_ns(server->client);
2155 + struct sockaddr *sap;
2156 + unsigned int s;
2157 + size_t salen;
2158 +@@ -440,7 +440,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
2159 + continue;
2160 +
2161 + salen = nfs_parse_server_name(buf->data, buf->len,
2162 +- sap, addr_bufsize, server);
2163 ++ sap, addr_bufsize, net);
2164 + if (salen == 0)
2165 + continue;
2166 + rpc_set_port(sap, NFS_PORT);
2167 +@@ -450,7 +450,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
2168 + if (hostname == NULL)
2169 + break;
2170 +
2171 +- error = nfs4_update_server(server, hostname, sap, salen);
2172 ++ error = nfs4_update_server(server, hostname, sap, salen, net);
2173 + kfree(hostname);
2174 + if (error == 0)
2175 + break;
2176 +diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
2177 +index 70779b2fc209..6b85db037b92 100644
2178 +--- a/fs/proc/proc_devtree.c
2179 ++++ b/fs/proc/proc_devtree.c
2180 +@@ -232,6 +232,7 @@ void __init proc_device_tree_init(void)
2181 + return;
2182 + root = of_find_node_by_path("/");
2183 + if (root == NULL) {
2184 ++ remove_proc_entry("device-tree", NULL);
2185 + pr_debug("/proc/device-tree: can't find root\n");
2186 + return;
2187 + }
2188 +diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
2189 +index 7c1420bb1dce..6ade97de7a85 100644
2190 +--- a/include/linux/ceph/messenger.h
2191 ++++ b/include/linux/ceph/messenger.h
2192 +@@ -157,7 +157,7 @@ struct ceph_msg {
2193 + bool front_is_vmalloc;
2194 + bool more_to_follow;
2195 + bool needs_out_seq;
2196 +- int front_max;
2197 ++ int front_alloc_len;
2198 + unsigned long ack_stamp; /* tx: when we were acked */
2199 +
2200 + struct ceph_msgpool *pool;
2201 +diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
2202 +index 8f47625a0661..4fb6a8938957 100644
2203 +--- a/include/linux/ceph/osd_client.h
2204 ++++ b/include/linux/ceph/osd_client.h
2205 +@@ -138,6 +138,7 @@ struct ceph_osd_request {
2206 + __le64 *r_request_pool;
2207 + void *r_request_pgid;
2208 + __le32 *r_request_attempts;
2209 ++ bool r_paused;
2210 + struct ceph_eversion *r_request_reassert_version;
2211 +
2212 + int r_result;
2213 +diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
2214 +index 8c9b7a1c4138..ac17872d6c1c 100644
2215 +--- a/include/linux/ftrace_event.h
2216 ++++ b/include/linux/ftrace_event.h
2217 +@@ -356,10 +356,6 @@ enum {
2218 + FILTER_TRACE_FN,
2219 + };
2220 +
2221 +-#define EVENT_STORAGE_SIZE 128
2222 +-extern struct mutex event_storage_mutex;
2223 +-extern char event_storage[EVENT_STORAGE_SIZE];
2224 +-
2225 + extern int trace_event_raw_init(struct ftrace_event_call *call);
2226 + extern int trace_define_field(struct ftrace_event_call *call, const char *type,
2227 + const char *name, int offset, int size,
2228 +diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
2229 +index 5c38606613d8..40701fa1b97f 100644
2230 +--- a/include/trace/ftrace.h
2231 ++++ b/include/trace/ftrace.h
2232 +@@ -310,15 +310,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
2233 + #undef __array
2234 + #define __array(type, item, len) \
2235 + do { \
2236 +- mutex_lock(&event_storage_mutex); \
2237 ++ char *type_str = #type"["__stringify(len)"]"; \
2238 + BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
2239 +- snprintf(event_storage, sizeof(event_storage), \
2240 +- "%s[%d]", #type, len); \
2241 +- ret = trace_define_field(event_call, event_storage, #item, \
2242 ++ ret = trace_define_field(event_call, type_str, #item, \
2243 + offsetof(typeof(field), item), \
2244 + sizeof(field.item), \
2245 + is_signed_type(type), FILTER_OTHER); \
2246 +- mutex_unlock(&event_storage_mutex); \
2247 + if (ret) \
2248 + return ret; \
2249 + } while (0);
2250 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
2251 +index be7c86bae576..97fb83422c2f 100644
2252 +--- a/kernel/printk/printk.c
2253 ++++ b/kernel/printk/printk.c
2254 +@@ -1080,7 +1080,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
2255 + next_seq = log_next_seq;
2256 +
2257 + len = 0;
2258 +- prev = 0;
2259 + while (len >= 0 && seq < next_seq) {
2260 + struct printk_log *msg = log_from_idx(idx);
2261 + int textlen;
2262 +@@ -2789,7 +2788,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
2263 + next_idx = idx;
2264 +
2265 + l = 0;
2266 +- prev = 0;
2267 + while (seq < dumper->next_seq) {
2268 + struct printk_log *msg = log_from_idx(idx);
2269 +
2270 +diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
2271 +index 84571e09c907..01fbae5b97b7 100644
2272 +--- a/kernel/stop_machine.c
2273 ++++ b/kernel/stop_machine.c
2274 +@@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
2275 + */
2276 + smp_call_function_single(min(cpu1, cpu2),
2277 + &irq_cpu_stop_queue_work,
2278 +- &call_args, 0);
2279 ++ &call_args, 1);
2280 + lg_local_unlock(&stop_cpus_lock);
2281 + preempt_enable();
2282 +
2283 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
2284 +index 2e58196e41c7..ba983dc41418 100644
2285 +--- a/kernel/trace/trace_events.c
2286 ++++ b/kernel/trace/trace_events.c
2287 +@@ -27,12 +27,6 @@
2288 +
2289 + DEFINE_MUTEX(event_mutex);
2290 +
2291 +-DEFINE_MUTEX(event_storage_mutex);
2292 +-EXPORT_SYMBOL_GPL(event_storage_mutex);
2293 +-
2294 +-char event_storage[EVENT_STORAGE_SIZE];
2295 +-EXPORT_SYMBOL_GPL(event_storage);
2296 +-
2297 + LIST_HEAD(ftrace_events);
2298 + static LIST_HEAD(ftrace_common_fields);
2299 +
2300 +diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
2301 +index 7c3e3e72e2b6..ee0a5098ac43 100644
2302 +--- a/kernel/trace/trace_export.c
2303 ++++ b/kernel/trace/trace_export.c
2304 +@@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \
2305 + #undef __array
2306 + #define __array(type, item, len) \
2307 + do { \
2308 ++ char *type_str = #type"["__stringify(len)"]"; \
2309 + BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
2310 +- mutex_lock(&event_storage_mutex); \
2311 +- snprintf(event_storage, sizeof(event_storage), \
2312 +- "%s[%d]", #type, len); \
2313 +- ret = trace_define_field(event_call, event_storage, #item, \
2314 ++ ret = trace_define_field(event_call, type_str, #item, \
2315 + offsetof(typeof(field), item), \
2316 + sizeof(field.item), \
2317 + is_signed_type(type), filter_type); \
2318 +- mutex_unlock(&event_storage_mutex); \
2319 + if (ret) \
2320 + return ret; \
2321 + } while (0);
2322 +diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
2323 +index 4dc1b990aa23..34fd931b54b5 100644
2324 +--- a/lib/fonts/Kconfig
2325 ++++ b/lib/fonts/Kconfig
2326 +@@ -9,7 +9,7 @@ if FONT_SUPPORT
2327 +
2328 + config FONTS
2329 + bool "Select compiled-in fonts"
2330 +- depends on FRAMEBUFFER_CONSOLE
2331 ++ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
2332 + help
2333 + Say Y here if you would like to use fonts other than the default
2334 + your frame buffer console usually use.
2335 +@@ -22,7 +22,7 @@ config FONTS
2336 +
2337 + config FONT_8x8
2338 + bool "VGA 8x8 font" if FONTS
2339 +- depends on FRAMEBUFFER_CONSOLE
2340 ++ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
2341 + default y if !SPARC && !FONTS
2342 + help
2343 + This is the "high resolution" font for the VGA frame buffer (the one
2344 +@@ -45,7 +45,7 @@ config FONT_8x16
2345 +
2346 + config FONT_6x11
2347 + bool "Mac console 6x11 font (not supported by all drivers)" if FONTS
2348 +- depends on FRAMEBUFFER_CONSOLE
2349 ++ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
2350 + default y if !SPARC && !FONTS && MAC
2351 + help
2352 + Small console font with Macintosh-style high-half glyphs. Some Mac
2353 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
2354 +index 4a5df7b1cc9f..464303f61730 100644
2355 +--- a/net/ceph/messenger.c
2356 ++++ b/net/ceph/messenger.c
2357 +@@ -3126,7 +3126,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2358 + INIT_LIST_HEAD(&m->data);
2359 +
2360 + /* front */
2361 +- m->front_max = front_len;
2362 + if (front_len) {
2363 + if (front_len > PAGE_CACHE_SIZE) {
2364 + m->front.iov_base = __vmalloc(front_len, flags,
2365 +@@ -3143,7 +3142,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2366 + } else {
2367 + m->front.iov_base = NULL;
2368 + }
2369 +- m->front.iov_len = front_len;
2370 ++ m->front_alloc_len = m->front.iov_len = front_len;
2371 +
2372 + dout("ceph_msg_new %p front %d\n", m, front_len);
2373 + return m;
2374 +@@ -3301,8 +3300,8 @@ EXPORT_SYMBOL(ceph_msg_last_put);
2375 +
2376 + void ceph_msg_dump(struct ceph_msg *msg)
2377 + {
2378 +- pr_debug("msg_dump %p (front_max %d length %zd)\n", msg,
2379 +- msg->front_max, msg->data_length);
2380 ++ pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
2381 ++ msg->front_alloc_len, msg->data_length);
2382 + print_hex_dump(KERN_DEBUG, "header: ",
2383 + DUMP_PREFIX_OFFSET, 16, 1,
2384 + &msg->hdr, sizeof(msg->hdr), true);
2385 +diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
2386 +index 1fe25cd29d0e..2ac9ef35110b 100644
2387 +--- a/net/ceph/mon_client.c
2388 ++++ b/net/ceph/mon_client.c
2389 +@@ -152,7 +152,7 @@ static int __open_session(struct ceph_mon_client *monc)
2390 + /* initiatiate authentication handshake */
2391 + ret = ceph_auth_build_hello(monc->auth,
2392 + monc->m_auth->front.iov_base,
2393 +- monc->m_auth->front_max);
2394 ++ monc->m_auth->front_alloc_len);
2395 + __send_prepared_auth_request(monc, ret);
2396 + } else {
2397 + dout("open_session mon%d already open\n", monc->cur_mon);
2398 +@@ -196,7 +196,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
2399 + int num;
2400 +
2401 + p = msg->front.iov_base;
2402 +- end = p + msg->front_max;
2403 ++ end = p + msg->front_alloc_len;
2404 +
2405 + num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
2406 + ceph_encode_32(&p, num);
2407 +@@ -897,7 +897,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
2408 + ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
2409 + msg->front.iov_len,
2410 + monc->m_auth->front.iov_base,
2411 +- monc->m_auth->front_max);
2412 ++ monc->m_auth->front_alloc_len);
2413 + if (ret < 0) {
2414 + monc->client->auth_err = ret;
2415 + wake_up_all(&monc->client->auth_wq);
2416 +@@ -939,7 +939,7 @@ static int __validate_auth(struct ceph_mon_client *monc)
2417 + return 0;
2418 +
2419 + ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
2420 +- monc->m_auth->front_max);
2421 ++ monc->m_auth->front_alloc_len);
2422 + if (ret <= 0)
2423 + return ret; /* either an error, or no need to authenticate */
2424 + __send_prepared_auth_request(monc, ret);
2425 +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
2426 +index 2b4b32aaa893..e6b2db68b4fa 100644
2427 +--- a/net/ceph/osd_client.c
2428 ++++ b/net/ceph/osd_client.c
2429 +@@ -1232,6 +1232,22 @@ void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
2430 + EXPORT_SYMBOL(ceph_osdc_set_request_linger);
2431 +
2432 + /*
2433 ++ * Returns whether a request should be blocked from being sent
2434 ++ * based on the current osdmap and osd_client settings.
2435 ++ *
2436 ++ * Caller should hold map_sem for read.
2437 ++ */
2438 ++static bool __req_should_be_paused(struct ceph_osd_client *osdc,
2439 ++ struct ceph_osd_request *req)
2440 ++{
2441 ++ bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
2442 ++ bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
2443 ++ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
2444 ++ return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) ||
2445 ++ (req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr);
2446 ++}
2447 ++
2448 ++/*
2449 + * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
2450 + * (as needed), and set the request r_osd appropriately. If there is
2451 + * no up osd, set r_osd to NULL. Move the request to the appropriate list
2452 +@@ -1248,6 +1264,7 @@ static int __map_request(struct ceph_osd_client *osdc,
2453 + int acting[CEPH_PG_MAX_SIZE];
2454 + int o = -1, num = 0;
2455 + int err;
2456 ++ bool was_paused;
2457 +
2458 + dout("map_request %p tid %lld\n", req, req->r_tid);
2459 + err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap,
2460 +@@ -1264,12 +1281,18 @@ static int __map_request(struct ceph_osd_client *osdc,
2461 + num = err;
2462 + }
2463 +
2464 ++ was_paused = req->r_paused;
2465 ++ req->r_paused = __req_should_be_paused(osdc, req);
2466 ++ if (was_paused && !req->r_paused)
2467 ++ force_resend = 1;
2468 ++
2469 + if ((!force_resend &&
2470 + req->r_osd && req->r_osd->o_osd == o &&
2471 + req->r_sent >= req->r_osd->o_incarnation &&
2472 + req->r_num_pg_osds == num &&
2473 + memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
2474 +- (req->r_osd == NULL && o == -1))
2475 ++ (req->r_osd == NULL && o == -1) ||
2476 ++ req->r_paused)
2477 + return 0; /* no change */
2478 +
2479 + dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
2480 +@@ -1613,14 +1636,17 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
2481 + *
2482 + * Caller should hold map_sem for read.
2483 + */
2484 +-static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
2485 ++static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
2486 ++ bool force_resend_writes)
2487 + {
2488 + struct ceph_osd_request *req, *nreq;
2489 + struct rb_node *p;
2490 + int needmap = 0;
2491 + int err;
2492 ++ bool force_resend_req;
2493 +
2494 +- dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
2495 ++ dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "",
2496 ++ force_resend_writes ? " (force resend writes)" : "");
2497 + mutex_lock(&osdc->request_mutex);
2498 + for (p = rb_first(&osdc->requests); p; ) {
2499 + req = rb_entry(p, struct ceph_osd_request, r_node);
2500 +@@ -1645,7 +1671,10 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
2501 + continue;
2502 + }
2503 +
2504 +- err = __map_request(osdc, req, force_resend);
2505 ++ force_resend_req = force_resend ||
2506 ++ (force_resend_writes &&
2507 ++ req->r_flags & CEPH_OSD_FLAG_WRITE);
2508 ++ err = __map_request(osdc, req, force_resend_req);
2509 + if (err < 0)
2510 + continue; /* error */
2511 + if (req->r_osd == NULL) {
2512 +@@ -1665,7 +1694,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
2513 + r_linger_item) {
2514 + dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
2515 +
2516 +- err = __map_request(osdc, req, force_resend);
2517 ++ err = __map_request(osdc, req,
2518 ++ force_resend || force_resend_writes);
2519 + dout("__map_request returned %d\n", err);
2520 + if (err == 0)
2521 + continue; /* no change and no osd was specified */
2522 +@@ -1707,6 +1737,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
2523 + struct ceph_osdmap *newmap = NULL, *oldmap;
2524 + int err;
2525 + struct ceph_fsid fsid;
2526 ++ bool was_full;
2527 +
2528 + dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
2529 + p = msg->front.iov_base;
2530 +@@ -1720,6 +1751,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
2531 +
2532 + down_write(&osdc->map_sem);
2533 +
2534 ++ was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
2535 ++
2536 + /* incremental maps */
2537 + ceph_decode_32_safe(&p, end, nr_maps, bad);
2538 + dout(" %d inc maps\n", nr_maps);
2539 +@@ -1744,7 +1777,10 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
2540 + ceph_osdmap_destroy(osdc->osdmap);
2541 + osdc->osdmap = newmap;
2542 + }
2543 +- kick_requests(osdc, 0);
2544 ++ was_full = was_full ||
2545 ++ ceph_osdmap_flag(osdc->osdmap,
2546 ++ CEPH_OSDMAP_FULL);
2547 ++ kick_requests(osdc, 0, was_full);
2548 + } else {
2549 + dout("ignoring incremental map %u len %d\n",
2550 + epoch, maplen);
2551 +@@ -1787,7 +1823,10 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
2552 + skipped_map = 1;
2553 + ceph_osdmap_destroy(oldmap);
2554 + }
2555 +- kick_requests(osdc, skipped_map);
2556 ++ was_full = was_full ||
2557 ++ ceph_osdmap_flag(osdc->osdmap,
2558 ++ CEPH_OSDMAP_FULL);
2559 ++ kick_requests(osdc, skipped_map, was_full);
2560 + }
2561 + p += maplen;
2562 + nr_maps--;
2563 +@@ -1804,7 +1843,9 @@ done:
2564 + * we find out when we are no longer full and stop returning
2565 + * ENOSPC.
2566 + */
2567 +- if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
2568 ++ if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
2569 ++ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
2570 ++ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR))
2571 + ceph_monc_request_next_osdmap(&osdc->client->monc);
2572 +
2573 + mutex_lock(&osdc->request_mutex);
2574 +@@ -2454,7 +2495,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2575 + struct ceph_osd_client *osdc = osd->o_osdc;
2576 + struct ceph_msg *m;
2577 + struct ceph_osd_request *req;
2578 +- int front = le32_to_cpu(hdr->front_len);
2579 ++ int front_len = le32_to_cpu(hdr->front_len);
2580 + int data_len = le32_to_cpu(hdr->data_len);
2581 + u64 tid;
2582 +
2583 +@@ -2474,12 +2515,13 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2584 + req->r_reply, req->r_reply->con);
2585 + ceph_msg_revoke_incoming(req->r_reply);
2586 +
2587 +- if (front > req->r_reply->front.iov_len) {
2588 ++ if (front_len > req->r_reply->front_alloc_len) {
2589 + pr_warning("get_reply front %d > preallocated %d (%u#%llu)\n",
2590 +- front, (int)req->r_reply->front.iov_len,
2591 ++ front_len, req->r_reply->front_alloc_len,
2592 + (unsigned int)con->peer_name.type,
2593 + le64_to_cpu(con->peer_name.num));
2594 +- m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
2595 ++ m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
2596 ++ false);
2597 + if (!m)
2598 + goto out;
2599 + ceph_msg_put(req->r_reply);
2600 +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
2601 +index a642fd616d3a..1eebf2251b01 100644
2602 +--- a/net/sunrpc/auth_gss/auth_gss.c
2603 ++++ b/net/sunrpc/auth_gss/auth_gss.c
2604 +@@ -500,10 +500,12 @@ gss_alloc_msg(struct gss_auth *gss_auth,
2605 + default:
2606 + err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
2607 + if (err)
2608 +- goto err_free_msg;
2609 ++ goto err_put_pipe_version;
2610 + };
2611 + kref_get(&gss_auth->kref);
2612 + return gss_msg;
2613 ++err_put_pipe_version:
2614 ++ put_pipe_version(gss_auth->net);
2615 + err_free_msg:
2616 + kfree(gss_msg);
2617 + err:
2618 +diff --git a/scripts/package/builddeb b/scripts/package/builddeb
2619 +index 90e521fde35f..c1bb9be00fa0 100644
2620 +--- a/scripts/package/builddeb
2621 ++++ b/scripts/package/builddeb
2622 +@@ -41,9 +41,9 @@ create_package() {
2623 + parisc*)
2624 + debarch=hppa ;;
2625 + mips*)
2626 +- debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el) ;;
2627 ++ debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
2628 + arm*)
2629 +- debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el) ;;
2630 ++ debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el || true) ;;
2631 + *)
2632 + echo "" >&2
2633 + echo "** ** ** WARNING ** ** **" >&2
2634 +@@ -62,7 +62,7 @@ create_package() {
2635 + fi
2636 +
2637 + # Create the package
2638 +- dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
2639 ++ dpkg-gencontrol -isp $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir"
2640 + dpkg --build "$pdir" ..
2641 + }
2642 +
2643 +@@ -288,15 +288,14 @@ mkdir -p "$destdir"
2644 + (cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
2645 + ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
2646 + rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
2647 +-arch=$(dpkg --print-architecture)
2648 +
2649 + cat <<EOF >> debian/control
2650 +
2651 + Package: $kernel_headers_packagename
2652 + Provides: linux-headers, linux-headers-2.6
2653 +-Architecture: $arch
2654 +-Description: Linux kernel headers for $KERNELRELEASE on $arch
2655 +- This package provides kernel header files for $KERNELRELEASE on $arch
2656 ++Architecture: any
2657 ++Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch}
2658 ++ This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch}
2659 + .
2660 + This is useful for people who need to build external modules
2661 + EOF
2662 +diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
2663 +index 9d518ac73eea..85fde7e19dd2 100644
2664 +--- a/sound/core/compress_offload.c
2665 ++++ b/sound/core/compress_offload.c
2666 +@@ -133,7 +133,7 @@ static int snd_compr_open(struct inode *inode, struct file *f)
2667 + kfree(data);
2668 + }
2669 + snd_card_unref(compr->card);
2670 +- return 0;
2671 ++ return ret;
2672 + }
2673 +
2674 + static int snd_compr_free(struct inode *inode, struct file *f)
2675 +diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
2676 +index 5f728808eed4..8bddf3f20a5e 100644
2677 +--- a/sound/soc/codecs/max98090.c
2678 ++++ b/sound/soc/codecs/max98090.c
2679 +@@ -336,6 +336,7 @@ static bool max98090_readable_register(struct device *dev, unsigned int reg)
2680 + case M98090_REG_RECORD_TDM_SLOT:
2681 + case M98090_REG_SAMPLE_RATE:
2682 + case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
2683 ++ case M98090_REG_REVISION_ID:
2684 + return true;
2685 + default:
2686 + return false;
2687 +diff --git a/tools/perf/perf.h b/tools/perf/perf.h
2688 +index b079304bd53d..1e2e5ac52248 100644
2689 +--- a/tools/perf/perf.h
2690 ++++ b/tools/perf/perf.h
2691 +@@ -100,8 +100,8 @@
2692 +
2693 + #ifdef __aarch64__
2694 + #define mb() asm volatile("dmb ish" ::: "memory")
2695 +-#define wmb() asm volatile("dmb ishld" ::: "memory")
2696 +-#define rmb() asm volatile("dmb ishst" ::: "memory")
2697 ++#define wmb() asm volatile("dmb ishst" ::: "memory")
2698 ++#define rmb() asm volatile("dmb ishld" ::: "memory")
2699 + #define cpu_relax() asm volatile("yield" ::: "memory")
2700 + #endif
2701 +