Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.17 commit in: /
Date: Mon, 09 Jul 2018 15:01:13
Message-Id: 1531148404.6ed4528b54ca6f6a9836bb1b132e41d96885579f.alicef@gentoo
1 commit: 6ed4528b54ca6f6a9836bb1b132e41d96885579f
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jul 9 15:00:04 2018 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Mon Jul 9 15:00:04 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6ed4528b
7
8 linux kernel 4.17.5
9
10 0000_README | 4 +
11 1004_linux-4.17.5.patch | 1735 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1739 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 76ef096..33f7bd8 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -59,6 +59,10 @@ Patch: 1003_linux-4.17.4.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.17.4
21
22 +Patch: 1004_linux-4.17.5.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.17.5
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1004_linux-4.17.5.patch b/1004_linux-4.17.5.patch
31 new file mode 100644
32 index 0000000..feb534b
33 --- /dev/null
34 +++ b/1004_linux-4.17.5.patch
35 @@ -0,0 +1,1735 @@
36 +diff --git a/Makefile b/Makefile
37 +index 1d740dbe676d..e4ddbad49636 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 17
44 +-SUBLEVEL = 4
45 ++SUBLEVEL = 5
46 + EXTRAVERSION =
47 + NAME = Merciless Moray
48 +
49 +diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
50 +index ae7b3f107893..5185300cc11f 100644
51 +--- a/arch/arm/boot/dts/imx6q.dtsi
52 ++++ b/arch/arm/boot/dts/imx6q.dtsi
53 +@@ -96,7 +96,7 @@
54 + clocks = <&clks IMX6Q_CLK_ECSPI5>,
55 + <&clks IMX6Q_CLK_ECSPI5>;
56 + clock-names = "ipg", "per";
57 +- dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
58 ++ dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
59 + dma-names = "rx", "tx";
60 + status = "disabled";
61 + };
62 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
63 +index 0cfd701809de..a1b31013ab6e 100644
64 +--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
65 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
66 +@@ -189,3 +189,10 @@
67 + &usb0 {
68 + status = "okay";
69 + };
70 ++
71 ++&usb2_phy0 {
72 ++ /*
73 ++ * HDMI_5V is also used as supply for the USB VBUS.
74 ++ */
75 ++ phy-supply = <&hdmi_5v>;
76 ++};
77 +diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
78 +index 263c142a6a6c..f65e9e1cea4c 100644
79 +--- a/arch/x86/include/asm/pgalloc.h
80 ++++ b/arch/x86/include/asm/pgalloc.h
81 +@@ -184,6 +184,9 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
82 +
83 + static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
84 + {
85 ++ if (!pgtable_l5_enabled)
86 ++ return;
87 ++
88 + BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
89 + free_page((unsigned long)p4d);
90 + }
91 +diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
92 +index 7ca41bf023c9..8df9abfa947b 100644
93 +--- a/drivers/acpi/osl.c
94 ++++ b/drivers/acpi/osl.c
95 +@@ -45,6 +45,8 @@
96 + #include <linux/uaccess.h>
97 + #include <linux/io-64-nonatomic-lo-hi.h>
98 +
99 ++#include "acpica/accommon.h"
100 ++#include "acpica/acnamesp.h"
101 + #include "internal.h"
102 +
103 + #define _COMPONENT ACPI_OS_SERVICES
104 +@@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
105 + }
106 + EXPORT_SYMBOL(acpi_check_region);
107 +
108 ++static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
109 ++ void *_res, void **return_value)
110 ++{
111 ++ struct acpi_mem_space_context **mem_ctx;
112 ++ union acpi_operand_object *handler_obj;
113 ++ union acpi_operand_object *region_obj2;
114 ++ union acpi_operand_object *region_obj;
115 ++ struct resource *res = _res;
116 ++ acpi_status status;
117 ++
118 ++ region_obj = acpi_ns_get_attached_object(handle);
119 ++ if (!region_obj)
120 ++ return AE_OK;
121 ++
122 ++ handler_obj = region_obj->region.handler;
123 ++ if (!handler_obj)
124 ++ return AE_OK;
125 ++
126 ++ if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
127 ++ return AE_OK;
128 ++
129 ++ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
130 ++ return AE_OK;
131 ++
132 ++ region_obj2 = acpi_ns_get_secondary_object(region_obj);
133 ++ if (!region_obj2)
134 ++ return AE_OK;
135 ++
136 ++ mem_ctx = (void *)&region_obj2->extra.region_context;
137 ++
138 ++ if (!(mem_ctx[0]->address >= res->start &&
139 ++ mem_ctx[0]->address < res->end))
140 ++ return AE_OK;
141 ++
142 ++ status = handler_obj->address_space.setup(region_obj,
143 ++ ACPI_REGION_DEACTIVATE,
144 ++ NULL, (void **)mem_ctx);
145 ++ if (ACPI_SUCCESS(status))
146 ++ region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
147 ++
148 ++ return status;
149 ++}
150 ++
151 ++/**
152 ++ * acpi_release_memory - Release any mappings done to a memory region
153 ++ * @handle: Handle to namespace node
154 ++ * @res: Memory resource
155 ++ * @level: A level that terminates the search
156 ++ *
157 ++ * Walks through @handle and unmaps all SystemMemory Operation Regions that
158 ++ * overlap with @res and that have already been activated (mapped).
159 ++ *
160 ++ * This is a helper that allows drivers to place special requirements on memory
161 ++ * region that may overlap with operation regions, primarily allowing them to
162 ++ * safely map the region as non-cached memory.
163 ++ *
164 ++ * The unmapped Operation Regions will be automatically remapped next time they
165 ++ * are called, so the drivers do not need to do anything else.
166 ++ */
167 ++acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
168 ++ u32 level)
169 ++{
170 ++ if (!(res->flags & IORESOURCE_MEM))
171 ++ return AE_TYPE;
172 ++
173 ++ return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
174 ++ acpi_deactivate_mem_region, NULL, res, NULL);
175 ++}
176 ++EXPORT_SYMBOL_GPL(acpi_release_memory);
177 ++
178 + /*
179 + * Let drivers know whether the resource checks are effective
180 + */
181 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
182 +index 34af664b9f93..6fcc537d7779 100644
183 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
184 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
185 +@@ -2080,10 +2080,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
186 + switch (asic_type) {
187 + #if defined(CONFIG_DRM_AMD_DC)
188 + case CHIP_BONAIRE:
189 +- case CHIP_HAWAII:
190 + case CHIP_KAVERI:
191 + case CHIP_KABINI:
192 + case CHIP_MULLINS:
193 ++ /*
194 ++ * We have systems in the wild with these ASICs that require
195 ++ * LVDS and VGA support which is not supported with DC.
196 ++ *
197 ++ * Fallback to the non-DC driver here by default so as not to
198 ++ * cause regressions.
199 ++ */
200 ++ return amdgpu_dc > 0;
201 ++ case CHIP_HAWAII:
202 + case CHIP_CARRIZO:
203 + case CHIP_STONEY:
204 + case CHIP_POLARIS11:
205 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
206 +index 6d08cde8443c..b52f26e7db98 100644
207 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
208 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
209 +@@ -749,8 +749,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
210 + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
211 + if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
212 + adev->vram_pin_size += amdgpu_bo_size(bo);
213 +- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
214 +- adev->invisible_pin_size += amdgpu_bo_size(bo);
215 ++ adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
216 + } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
217 + adev->gart_pin_size += amdgpu_bo_size(bo);
218 + }
219 +@@ -777,25 +776,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
220 + bo->pin_count--;
221 + if (bo->pin_count)
222 + return 0;
223 +- for (i = 0; i < bo->placement.num_placement; i++) {
224 +- bo->placements[i].lpfn = 0;
225 +- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
226 +- }
227 +- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
228 +- if (unlikely(r)) {
229 +- dev_err(adev->dev, "%p validate failed for unpin\n", bo);
230 +- goto error;
231 +- }
232 +
233 + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
234 + adev->vram_pin_size -= amdgpu_bo_size(bo);
235 +- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
236 +- adev->invisible_pin_size -= amdgpu_bo_size(bo);
237 ++ adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
238 + } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
239 + adev->gart_pin_size -= amdgpu_bo_size(bo);
240 + }
241 +
242 +-error:
243 ++ for (i = 0; i < bo->placement.num_placement; i++) {
244 ++ bo->placements[i].lpfn = 0;
245 ++ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
246 ++ }
247 ++ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
248 ++ if (unlikely(r))
249 ++ dev_err(adev->dev, "%p validate failed for unpin\n", bo);
250 ++
251 + return r;
252 + }
253 +
254 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
255 +index 6ea7de863041..379e9ff173f1 100644
256 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
257 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
258 +@@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
259 + uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
260 + int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
261 +
262 ++u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
263 + uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
264 + uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
265 +
266 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
267 +index 58e495330b38..87e89cc12397 100644
268 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
269 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
270 +@@ -84,6 +84,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
271 + }
272 +
273 + hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
274 ++ adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
275 + family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
276 + version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
277 + version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
278 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
279 +index da55a78d7380..11aa36aa304b 100644
280 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
281 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
282 +@@ -1442,7 +1442,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
283 + uint64_t count;
284 +
285 + max_entries = min(max_entries, 16ull * 1024ull);
286 +- for (count = 1; count < max_entries; ++count) {
287 ++ for (count = 1;
288 ++ count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
289 ++ ++count) {
290 + uint64_t idx = pfn + count;
291 +
292 + if (pages_addr[idx] !=
293 +@@ -1455,7 +1457,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
294 + dma_addr = pages_addr;
295 + } else {
296 + addr = pages_addr[pfn];
297 +- max_entries = count;
298 ++ max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
299 + }
300 +
301 + } else if (flags & AMDGPU_PTE_VALID) {
302 +@@ -1470,7 +1472,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
303 + if (r)
304 + return r;
305 +
306 +- pfn += last - start + 1;
307 ++ pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
308 + if (nodes && nodes->size == pfn) {
309 + pfn = 0;
310 + ++nodes;
311 +@@ -2112,7 +2114,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
312 + before->last = saddr - 1;
313 + before->offset = tmp->offset;
314 + before->flags = tmp->flags;
315 +- list_add(&before->list, &tmp->list);
316 ++ before->bo_va = tmp->bo_va;
317 ++ list_add(&before->list, &tmp->bo_va->invalids);
318 + }
319 +
320 + /* Remember mapping split at the end */
321 +@@ -2122,7 +2125,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
322 + after->offset = tmp->offset;
323 + after->offset += after->start - tmp->start;
324 + after->flags = tmp->flags;
325 +- list_add(&after->list, &tmp->list);
326 ++ after->bo_va = tmp->bo_va;
327 ++ list_add(&after->list, &tmp->bo_va->invalids);
328 + }
329 +
330 + list_del(&tmp->list);
331 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
332 +index 9aca653bec07..b6333f92ba45 100644
333 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
334 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
335 +@@ -96,6 +96,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
336 + adev->gmc.visible_vram_size : end) - start;
337 + }
338 +
339 ++/**
340 ++ * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
341 ++ *
342 ++ * @bo: &amdgpu_bo buffer object (must be in VRAM)
343 ++ *
344 ++ * Returns:
345 ++ * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
346 ++ */
347 ++u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
348 ++{
349 ++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
350 ++ struct ttm_mem_reg *mem = &bo->tbo.mem;
351 ++ struct drm_mm_node *nodes = mem->mm_node;
352 ++ unsigned pages = mem->num_pages;
353 ++ u64 usage = 0;
354 ++
355 ++ if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
356 ++ return 0;
357 ++
358 ++ if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
359 ++ return amdgpu_bo_size(bo);
360 ++
361 ++ while (nodes && pages) {
362 ++ usage += nodes->size << PAGE_SHIFT;
363 ++ usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
364 ++ pages -= nodes->size;
365 ++ ++nodes;
366 ++ }
367 ++
368 ++ return usage;
369 ++}
370 ++
371 + /**
372 + * amdgpu_vram_mgr_new - allocate new ranges
373 + *
374 +@@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
375 + num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
376 + }
377 +
378 +- nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
379 ++ nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
380 ++ GFP_KERNEL | __GFP_ZERO);
381 + if (!nodes)
382 + return -ENOMEM;
383 +
384 +@@ -190,7 +223,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
385 + drm_mm_remove_node(&nodes[i]);
386 + spin_unlock(&mgr->lock);
387 +
388 +- kfree(nodes);
389 ++ kvfree(nodes);
390 + return r == -ENOSPC ? 0 : r;
391 + }
392 +
393 +@@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
394 + atomic64_sub(usage, &mgr->usage);
395 + atomic64_sub(vis_usage, &mgr->vis_usage);
396 +
397 +- kfree(mem->mm_node);
398 ++ kvfree(mem->mm_node);
399 + mem->mm_node = NULL;
400 + }
401 +
402 +diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
403 +index 428d1928e44e..ac9617269a2f 100644
404 +--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
405 ++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
406 +@@ -467,8 +467,8 @@ static int vce_v3_0_hw_init(void *handle)
407 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
408 +
409 + vce_v3_0_override_vce_clock_gating(adev, true);
410 +- if (!(adev->flags & AMD_IS_APU))
411 +- amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
412 ++
413 ++ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
414 +
415 + for (i = 0; i < adev->vce.num_rings; i++)
416 + adev->vce.ring[i].ready = false;
417 +diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
418 +index 126f1276d347..9ae350dad235 100644
419 +--- a/drivers/gpu/drm/amd/amdgpu/vi.c
420 ++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
421 +@@ -728,33 +728,59 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
422 + return r;
423 +
424 + tmp = RREG32_SMC(cntl_reg);
425 +- tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
426 +- CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
427 ++
428 ++ if (adev->flags & AMD_IS_APU)
429 ++ tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
430 ++ else
431 ++ tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
432 ++ CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
433 + tmp |= dividers.post_divider;
434 + WREG32_SMC(cntl_reg, tmp);
435 +
436 + for (i = 0; i < 100; i++) {
437 +- if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
438 +- break;
439 ++ tmp = RREG32_SMC(status_reg);
440 ++ if (adev->flags & AMD_IS_APU) {
441 ++ if (tmp & 0x10000)
442 ++ break;
443 ++ } else {
444 ++ if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
445 ++ break;
446 ++ }
447 + mdelay(10);
448 + }
449 + if (i == 100)
450 + return -ETIMEDOUT;
451 +-
452 + return 0;
453 + }
454 +
455 ++#define ixGNB_CLK1_DFS_CNTL 0xD82200F0
456 ++#define ixGNB_CLK1_STATUS 0xD822010C
457 ++#define ixGNB_CLK2_DFS_CNTL 0xD8220110
458 ++#define ixGNB_CLK2_STATUS 0xD822012C
459 ++#define ixGNB_CLK3_DFS_CNTL 0xD8220130
460 ++#define ixGNB_CLK3_STATUS 0xD822014C
461 ++
462 + static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
463 + {
464 + int r;
465 +
466 +- r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
467 +- if (r)
468 +- return r;
469 ++ if (adev->flags & AMD_IS_APU) {
470 ++ r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
471 ++ if (r)
472 ++ return r;
473 +
474 +- r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
475 +- if (r)
476 +- return r;
477 ++ r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
478 ++ if (r)
479 ++ return r;
480 ++ } else {
481 ++ r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
482 ++ if (r)
483 ++ return r;
484 ++
485 ++ r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
486 ++ if (r)
487 ++ return r;
488 ++ }
489 +
490 + return 0;
491 + }
492 +@@ -764,6 +790,22 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
493 + int r, i;
494 + struct atom_clock_dividers dividers;
495 + u32 tmp;
496 ++ u32 reg_ctrl;
497 ++ u32 reg_status;
498 ++ u32 status_mask;
499 ++ u32 reg_mask;
500 ++
501 ++ if (adev->flags & AMD_IS_APU) {
502 ++ reg_ctrl = ixGNB_CLK3_DFS_CNTL;
503 ++ reg_status = ixGNB_CLK3_STATUS;
504 ++ status_mask = 0x00010000;
505 ++ reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
506 ++ } else {
507 ++ reg_ctrl = ixCG_ECLK_CNTL;
508 ++ reg_status = ixCG_ECLK_STATUS;
509 ++ status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
510 ++ reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
511 ++ }
512 +
513 + r = amdgpu_atombios_get_clock_dividers(adev,
514 + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
515 +@@ -772,24 +814,25 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
516 + return r;
517 +
518 + for (i = 0; i < 100; i++) {
519 +- if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
520 ++ if (RREG32_SMC(reg_status) & status_mask)
521 + break;
522 + mdelay(10);
523 + }
524 ++
525 + if (i == 100)
526 + return -ETIMEDOUT;
527 +
528 +- tmp = RREG32_SMC(ixCG_ECLK_CNTL);
529 +- tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
530 +- CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
531 ++ tmp = RREG32_SMC(reg_ctrl);
532 ++ tmp &= ~reg_mask;
533 + tmp |= dividers.post_divider;
534 +- WREG32_SMC(ixCG_ECLK_CNTL, tmp);
535 ++ WREG32_SMC(reg_ctrl, tmp);
536 +
537 + for (i = 0; i < 100; i++) {
538 +- if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
539 ++ if (RREG32_SMC(reg_status) & status_mask)
540 + break;
541 + mdelay(10);
542 + }
543 ++
544 + if (i == 100)
545 + return -ETIMEDOUT;
546 +
547 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
548 +index 27579443cdc5..79afffa00772 100644
549 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
550 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
551 +@@ -46,6 +46,7 @@
552 + #include <linux/moduleparam.h>
553 + #include <linux/version.h>
554 + #include <linux/types.h>
555 ++#include <linux/pm_runtime.h>
556 +
557 + #include <drm/drmP.h>
558 + #include <drm/drm_atomic.h>
559 +@@ -927,6 +928,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
560 + drm_mode_connector_update_edid_property(connector, NULL);
561 + aconnector->num_modes = 0;
562 + aconnector->dc_sink = NULL;
563 ++ aconnector->edid = NULL;
564 + }
565 +
566 + mutex_unlock(&dev->mode_config.mutex);
567 +@@ -3965,10 +3967,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
568 + if (acrtc->base.state->event)
569 + prepare_flip_isr(acrtc);
570 +
571 ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
572 ++
573 + surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
574 + surface_updates->flip_addr = &addr;
575 +
576 +-
577 + dc_commit_updates_for_stream(adev->dm.dc,
578 + surface_updates,
579 + 1,
580 +@@ -3981,9 +3984,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
581 + __func__,
582 + addr.address.grph.addr.high_part,
583 + addr.address.grph.addr.low_part);
584 +-
585 +-
586 +- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
587 + }
588 +
589 + static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
590 +@@ -4149,6 +4149,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
591 + struct drm_connector *connector;
592 + struct drm_connector_state *old_con_state, *new_con_state;
593 + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
594 ++ int crtc_disable_count = 0;
595 +
596 + drm_atomic_helper_update_legacy_modeset_state(dev, state);
597 +
598 +@@ -4211,6 +4212,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
599 + if (dm_old_crtc_state->stream)
600 + remove_stream(adev, acrtc, dm_old_crtc_state->stream);
601 +
602 ++ pm_runtime_get_noresume(dev->dev);
603 ++
604 + acrtc->enabled = true;
605 + acrtc->hw_mode = new_crtc_state->mode;
606 + crtc->hwmode = new_crtc_state->mode;
607 +@@ -4348,6 +4351,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
608 + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
609 + bool modeset_needed;
610 +
611 ++ if (old_crtc_state->active && !new_crtc_state->active)
612 ++ crtc_disable_count++;
613 ++
614 + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
615 + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
616 + modeset_needed = modeset_required(
617 +@@ -4396,6 +4402,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
618 + drm_atomic_helper_wait_for_flip_done(dev, state);
619 +
620 + drm_atomic_helper_cleanup_planes(dev, state);
621 ++
622 ++ /* Finally, drop a runtime PM reference for each newly disabled CRTC,
623 ++ * so we can put the GPU into runtime suspend if we're not driving any
624 ++ * displays anymore
625 ++ */
626 ++ for (i = 0; i < crtc_disable_count; i++)
627 ++ pm_runtime_put_autosuspend(dev->dev);
628 ++ pm_runtime_mark_last_busy(dev->dev);
629 + }
630 +
631 +
632 +diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
633 +index e18800ed7cd1..7b8191eae68a 100644
634 +--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
635 ++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
636 +@@ -875,7 +875,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
637 + drm_object_attach_property(&plane->base.base,
638 + props->alpha, 255);
639 +
640 +- if (desc->layout.xstride && desc->layout.pstride) {
641 ++ if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
642 + int ret;
643 +
644 + ret = drm_plane_create_rotation_property(&plane->base,
645 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
646 +index 633c18785c1e..b25cc5aa8fbe 100644
647 +--- a/drivers/gpu/drm/i915/i915_irq.c
648 ++++ b/drivers/gpu/drm/i915/i915_irq.c
649 +@@ -1862,9 +1862,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
650 +
651 + /*
652 + * Clear the PIPE*STAT regs before the IIR
653 ++ *
654 ++ * Toggle the enable bits to make sure we get an
655 ++ * edge in the ISR pipe event bit if we don't clear
656 ++ * all the enabled status bits. Otherwise the edge
657 ++ * triggered IIR on i965/g4x wouldn't notice that
658 ++ * an interrupt is still pending.
659 + */
660 +- if (pipe_stats[pipe])
661 +- I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
662 ++ if (pipe_stats[pipe]) {
663 ++ I915_WRITE(reg, pipe_stats[pipe]);
664 ++ I915_WRITE(reg, enable_mask);
665 ++ }
666 + }
667 + spin_unlock(&dev_priv->irq_lock);
668 + }
669 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
670 +index 8a69a9275e28..29dc0a57e466 100644
671 +--- a/drivers/gpu/drm/i915/i915_reg.h
672 ++++ b/drivers/gpu/drm/i915/i915_reg.h
673 +@@ -2565,12 +2565,17 @@ enum i915_power_well_id {
674 + #define _3D_CHICKEN _MMIO(0x2084)
675 + #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
676 + #define _3D_CHICKEN2 _MMIO(0x208c)
677 ++
678 ++#define FF_SLICE_CHICKEN _MMIO(0x2088)
679 ++#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
680 ++
681 + /* Disables pipelining of read flushes past the SF-WIZ interface.
682 + * Required on all Ironlake steppings according to the B-Spec, but the
683 + * particular danger of not doing so is not specified.
684 + */
685 + # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
686 + #define _3D_CHICKEN3 _MMIO(0x2090)
687 ++#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
688 + #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
689 + #define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
690 + #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
691 +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
692 +index c0a8805b277f..d26827c44fb0 100644
693 +--- a/drivers/gpu/drm/i915/intel_crt.c
694 ++++ b/drivers/gpu/drm/i915/intel_crt.c
695 +@@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
696 + int max_dotclk = dev_priv->max_dotclk_freq;
697 + int max_clock;
698 +
699 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
700 ++ return MODE_NO_DBLESCAN;
701 ++
702 + if (mode->clock < 25000)
703 + return MODE_CLOCK_LOW;
704 +
705 +@@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
706 + struct intel_crtc_state *pipe_config,
707 + struct drm_connector_state *conn_state)
708 + {
709 ++ struct drm_display_mode *adjusted_mode =
710 ++ &pipe_config->base.adjusted_mode;
711 ++
712 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
713 ++ return false;
714 ++
715 + return true;
716 + }
717 +
718 +@@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
719 + struct intel_crtc_state *pipe_config,
720 + struct drm_connector_state *conn_state)
721 + {
722 ++ struct drm_display_mode *adjusted_mode =
723 ++ &pipe_config->base.adjusted_mode;
724 ++
725 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
726 ++ return false;
727 ++
728 + pipe_config->has_pch_encoder = true;
729 +
730 + return true;
731 +@@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
732 + struct drm_connector_state *conn_state)
733 + {
734 + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
735 ++ struct drm_display_mode *adjusted_mode =
736 ++ &pipe_config->base.adjusted_mode;
737 ++
738 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
739 ++ return false;
740 +
741 + pipe_config->has_pch_encoder = true;
742 +
743 +diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
744 +index 8c2d778560f0..1d14ebc7480d 100644
745 +--- a/drivers/gpu/drm/i915/intel_ddi.c
746 ++++ b/drivers/gpu/drm/i915/intel_ddi.c
747 +@@ -2205,7 +2205,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
748 + intel_prepare_dp_ddi_buffers(encoder, crtc_state);
749 +
750 + intel_ddi_init_dp_buf_reg(encoder);
751 +- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
752 ++ if (!is_mst)
753 ++ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
754 + intel_dp_start_link_train(intel_dp);
755 + if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
756 + intel_dp_stop_link_train(intel_dp);
757 +@@ -2303,12 +2304,15 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
758 + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
759 + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
760 + struct intel_dp *intel_dp = &dig_port->dp;
761 ++ bool is_mst = intel_crtc_has_type(old_crtc_state,
762 ++ INTEL_OUTPUT_DP_MST);
763 +
764 + /*
765 + * Power down sink before disabling the port, otherwise we end
766 + * up getting interrupts from the sink on detecting link loss.
767 + */
768 +- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
769 ++ if (!is_mst)
770 ++ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
771 +
772 + intel_disable_ddi_buf(encoder);
773 +
774 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
775 +index 56004ffbd8bb..84011e08adc3 100644
776 +--- a/drivers/gpu/drm/i915/intel_display.c
777 ++++ b/drivers/gpu/drm/i915/intel_display.c
778 +@@ -14211,12 +14211,22 @@ static enum drm_mode_status
779 + intel_mode_valid(struct drm_device *dev,
780 + const struct drm_display_mode *mode)
781 + {
782 ++ /*
783 ++ * Can't reject DBLSCAN here because Xorg ddxen can add piles
784 ++ * of DBLSCAN modes to the output's mode list when they detect
785 ++ * the scaling mode property on the connector. And they don't
786 ++ * ask the kernel to validate those modes in any way until
787 ++ * modeset time at which point the client gets a protocol error.
788 ++ * So in order to not upset those clients we silently ignore the
789 ++ * DBLSCAN flag on such connectors. For other connectors we will
790 ++ * reject modes with the DBLSCAN flag in encoder->compute_config().
791 ++ * And we always reject DBLSCAN modes in connector->mode_valid()
792 ++ * as we never want such modes on the connector's mode list.
793 ++ */
794 ++
795 + if (mode->vscan > 1)
796 + return MODE_NO_VSCAN;
797 +
798 +- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
799 +- return MODE_NO_DBLESCAN;
800 +-
801 + if (mode->flags & DRM_MODE_FLAG_HSKEW)
802 + return MODE_H_ILLEGAL;
803 +
804 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
805 +index b7b4cfdeb974..cd6e87756509 100644
806 +--- a/drivers/gpu/drm/i915/intel_dp.c
807 ++++ b/drivers/gpu/drm/i915/intel_dp.c
808 +@@ -423,6 +423,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
809 + int max_rate, mode_rate, max_lanes, max_link_clock;
810 + int max_dotclk;
811 +
812 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
813 ++ return MODE_NO_DBLESCAN;
814 ++
815 + max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
816 +
817 + if (intel_dp_is_edp(intel_dp) && fixed_mode) {
818 +@@ -1760,7 +1763,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
819 + conn_state->scaling_mode);
820 + }
821 +
822 +- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
823 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
824 ++ return false;
825 ++
826 ++ if (HAS_GMCH_DISPLAY(dev_priv) &&
827 + adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
828 + return false;
829 +
830 +@@ -2759,16 +2765,6 @@ static void intel_disable_dp(struct intel_encoder *encoder,
831 + static void g4x_disable_dp(struct intel_encoder *encoder,
832 + const struct intel_crtc_state *old_crtc_state,
833 + const struct drm_connector_state *old_conn_state)
834 +-{
835 +- intel_disable_dp(encoder, old_crtc_state, old_conn_state);
836 +-
837 +- /* disable the port before the pipe on g4x */
838 +- intel_dp_link_down(encoder, old_crtc_state);
839 +-}
840 +-
841 +-static void ilk_disable_dp(struct intel_encoder *encoder,
842 +- const struct intel_crtc_state *old_crtc_state,
843 +- const struct drm_connector_state *old_conn_state)
844 + {
845 + intel_disable_dp(encoder, old_crtc_state, old_conn_state);
846 + }
847 +@@ -2784,13 +2780,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
848 + intel_disable_dp(encoder, old_crtc_state, old_conn_state);
849 + }
850 +
851 +-static void ilk_post_disable_dp(struct intel_encoder *encoder,
852 ++static void g4x_post_disable_dp(struct intel_encoder *encoder,
853 + const struct intel_crtc_state *old_crtc_state,
854 + const struct drm_connector_state *old_conn_state)
855 + {
856 + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
857 + enum port port = encoder->port;
858 +
859 ++ /*
860 ++ * Bspec does not list a specific disable sequence for g4x DP.
861 ++ * Follow the ilk+ sequence (disable pipe before the port) for
862 ++ * g4x DP as it does not suffer from underruns like the normal
863 ++ * g4x modeset sequence (disable pipe after the port).
864 ++ */
865 + intel_dp_link_down(encoder, old_crtc_state);
866 +
867 + /* Only ilk+ has port A */
868 +@@ -6327,7 +6329,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
869 + drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
870 + drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
871 +
872 +- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
873 ++ if (!HAS_GMCH_DISPLAY(dev_priv))
874 + connector->interlace_allowed = true;
875 + connector->doublescan_allowed = 0;
876 +
877 +@@ -6426,15 +6428,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
878 + intel_encoder->enable = vlv_enable_dp;
879 + intel_encoder->disable = vlv_disable_dp;
880 + intel_encoder->post_disable = vlv_post_disable_dp;
881 +- } else if (INTEL_GEN(dev_priv) >= 5) {
882 +- intel_encoder->pre_enable = g4x_pre_enable_dp;
883 +- intel_encoder->enable = g4x_enable_dp;
884 +- intel_encoder->disable = ilk_disable_dp;
885 +- intel_encoder->post_disable = ilk_post_disable_dp;
886 + } else {
887 + intel_encoder->pre_enable = g4x_pre_enable_dp;
888 + intel_encoder->enable = g4x_enable_dp;
889 + intel_encoder->disable = g4x_disable_dp;
890 ++ intel_encoder->post_disable = g4x_post_disable_dp;
891 + }
892 +
893 + intel_dig_port->dp.output_reg = output_reg;
894 +diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
895 +index c3de0918ee13..5890500a3a8b 100644
896 +--- a/drivers/gpu/drm/i915/intel_dp_mst.c
897 ++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
898 +@@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
899 + bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
900 + DP_DPCD_QUIRK_LIMITED_M_N);
901 +
902 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
903 ++ return false;
904 ++
905 + pipe_config->has_pch_encoder = false;
906 + bpp = 24;
907 + if (intel_dp->compliance.test_data.bpc) {
908 +@@ -180,9 +183,11 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
909 + intel_dp->active_mst_links--;
910 +
911 + intel_mst->connector = NULL;
912 +- if (intel_dp->active_mst_links == 0)
913 ++ if (intel_dp->active_mst_links == 0) {
914 ++ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
915 + intel_dig_port->base.post_disable(&intel_dig_port->base,
916 + old_crtc_state, NULL);
917 ++ }
918 +
919 + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
920 + }
921 +@@ -223,7 +228,11 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
922 +
923 + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
924 +
925 ++ if (intel_dp->active_mst_links == 0)
926 ++ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
927 ++
928 + drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
929 ++
930 + if (intel_dp->active_mst_links == 0)
931 + intel_dig_port->base.pre_enable(&intel_dig_port->base,
932 + pipe_config, NULL);
933 +@@ -360,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
934 + if (!intel_dp)
935 + return MODE_ERROR;
936 +
937 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
938 ++ return MODE_NO_DBLESCAN;
939 ++
940 + max_link_clock = intel_dp_max_link_rate(intel_dp);
941 + max_lanes = intel_dp_max_lane_count(intel_dp);
942 +
943 +diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
944 +index 51a1d6868b1e..384b37e2da70 100644
945 +--- a/drivers/gpu/drm/i915/intel_dsi.c
946 ++++ b/drivers/gpu/drm/i915/intel_dsi.c
947 +@@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
948 + conn_state->scaling_mode);
949 + }
950 +
951 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
952 ++ return false;
953 ++
954 + /* DSI uses short packets for sync events, so clear mode flags for DSI */
955 + adjusted_mode->flags = 0;
956 +
957 +@@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
958 +
959 + DRM_DEBUG_KMS("\n");
960 +
961 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
962 ++ return MODE_NO_DBLESCAN;
963 ++
964 + if (fixed_mode) {
965 + if (mode->hdisplay > fixed_mode->hdisplay)
966 + return MODE_PANEL;
967 +diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
968 +index eb0c559b2715..6604806f89d5 100644
969 +--- a/drivers/gpu/drm/i915/intel_dvo.c
970 ++++ b/drivers/gpu/drm/i915/intel_dvo.c
971 +@@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
972 + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
973 + int target_clock = mode->clock;
974 +
975 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
976 ++ return MODE_NO_DBLESCAN;
977 ++
978 + /* XXX: Validate clock range */
979 +
980 + if (fixed_mode) {
981 +@@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
982 + if (fixed_mode)
983 + intel_fixed_panel_mode(fixed_mode, adjusted_mode);
984 +
985 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
986 ++ return false;
987 ++
988 + return true;
989 + }
990 +
991 +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
992 +index 1baef4ac7ecb..383f9df4145e 100644
993 +--- a/drivers/gpu/drm/i915/intel_hdmi.c
994 ++++ b/drivers/gpu/drm/i915/intel_hdmi.c
995 +@@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
996 + bool force_dvi =
997 + READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
998 +
999 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1000 ++ return MODE_NO_DBLESCAN;
1001 ++
1002 + clock = mode->clock;
1003 +
1004 + if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
1005 +@@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1006 + int desired_bpp;
1007 + bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
1008 +
1009 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1010 ++ return false;
1011 ++
1012 + pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
1013 +
1014 + if (pipe_config->has_hdmi_sink)
1015 +diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
1016 +index 8704f7f8d072..df5ba1de8aea 100644
1017 +--- a/drivers/gpu/drm/i915/intel_lrc.c
1018 ++++ b/drivers/gpu/drm/i915/intel_lrc.c
1019 +@@ -1386,11 +1386,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1020 + /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
1021 + batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1022 +
1023 ++ *batch++ = MI_LOAD_REGISTER_IMM(3);
1024 ++
1025 + /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
1026 +- *batch++ = MI_LOAD_REGISTER_IMM(1);
1027 + *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
1028 + *batch++ = _MASKED_BIT_DISABLE(
1029 + GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
1030 ++
1031 ++ /* BSpec: 11391 */
1032 ++ *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
1033 ++ *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
1034 ++
1035 ++ /* BSpec: 11299 */
1036 ++ *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
1037 ++ *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
1038 ++
1039 + *batch++ = MI_NOOP;
1040 +
1041 + /* WaClearSlmSpaceAtContextSwitch:kbl */
1042 +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
1043 +index e125d16a1aa7..34dd1e5233ac 100644
1044 +--- a/drivers/gpu/drm/i915/intel_lvds.c
1045 ++++ b/drivers/gpu/drm/i915/intel_lvds.c
1046 +@@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
1047 + struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
1048 + int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
1049 +
1050 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1051 ++ return MODE_NO_DBLESCAN;
1052 + if (mode->hdisplay > fixed_mode->hdisplay)
1053 + return MODE_PANEL;
1054 + if (mode->vdisplay > fixed_mode->vdisplay)
1055 +@@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
1056 + intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1057 + adjusted_mode);
1058 +
1059 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1060 ++ return false;
1061 ++
1062 + if (HAS_PCH_SPLIT(dev_priv)) {
1063 + pipe_config->has_pch_encoder = true;
1064 +
1065 +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
1066 +index 96e213ec202d..d253e3a06e30 100644
1067 +--- a/drivers/gpu/drm/i915/intel_sdvo.c
1068 ++++ b/drivers/gpu/drm/i915/intel_sdvo.c
1069 +@@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1070 + adjusted_mode);
1071 + }
1072 +
1073 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1074 ++ return false;
1075 ++
1076 + /*
1077 + * Make the CRTC code factor in the SDVO pixel multiplier. The
1078 + * SDVO device will factor out the multiplier during mode_set.
1079 +@@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
1080 + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1081 + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1082 +
1083 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1084 ++ return MODE_NO_DBLESCAN;
1085 ++
1086 + if (intel_sdvo->pixel_clock_min > mode->clock)
1087 + return MODE_CLOCK_LOW;
1088 +
1089 +diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
1090 +index 885fc3809f7f..b55b5c157e38 100644
1091 +--- a/drivers/gpu/drm/i915/intel_tv.c
1092 ++++ b/drivers/gpu/drm/i915/intel_tv.c
1093 +@@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
1094 + const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
1095 + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1096 +
1097 ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1098 ++ return MODE_NO_DBLESCAN;
1099 ++
1100 + if (mode->clock > max_dotclk)
1101 + return MODE_CLOCK_HIGH;
1102 +
1103 +@@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
1104 + struct drm_connector_state *conn_state)
1105 + {
1106 + const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
1107 ++ struct drm_display_mode *adjusted_mode =
1108 ++ &pipe_config->base.adjusted_mode;
1109 +
1110 + if (!tv_mode)
1111 + return false;
1112 +
1113 +- pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
1114 ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1115 ++ return false;
1116 ++
1117 ++ adjusted_mode->crtc_clock = tv_mode->clock;
1118 + DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
1119 + pipe_config->pipe_bpp = 8*3;
1120 +
1121 + /* TV has it's own notion of sync and other mode flags, so clear them. */
1122 +- pipe_config->base.adjusted_mode.flags = 0;
1123 ++ adjusted_mode->flags = 0;
1124 +
1125 + /*
1126 + * FIXME: We don't check whether the input mode is actually what we want
1127 +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
1128 +index ecb35ed0eac8..61e51516fec5 100644
1129 +--- a/drivers/gpu/drm/qxl/qxl_display.c
1130 ++++ b/drivers/gpu/drm/qxl/qxl_display.c
1131 +@@ -630,7 +630,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
1132 + struct qxl_cursor_cmd *cmd;
1133 + struct qxl_cursor *cursor;
1134 + struct drm_gem_object *obj;
1135 +- struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
1136 ++ struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
1137 + int ret;
1138 + void *user_ptr;
1139 + int size = 64*64*4;
1140 +@@ -684,7 +684,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
1141 + cursor_bo, 0);
1142 + cmd->type = QXL_CURSOR_SET;
1143 +
1144 +- qxl_bo_unref(&qcrtc->cursor_bo);
1145 ++ old_cursor_bo = qcrtc->cursor_bo;
1146 + qcrtc->cursor_bo = cursor_bo;
1147 + cursor_bo = NULL;
1148 + } else {
1149 +@@ -704,6 +704,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
1150 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
1151 + qxl_release_fence_buffer_objects(release);
1152 +
1153 ++ if (old_cursor_bo)
1154 ++ qxl_bo_unref(&old_cursor_bo);
1155 ++
1156 + qxl_bo_unref(&cursor_bo);
1157 +
1158 + return;
1159 +diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
1160 +index cca4b3c9aeb5..1963cc1b1cc5 100644
1161 +--- a/drivers/gpu/drm/sti/Kconfig
1162 ++++ b/drivers/gpu/drm/sti/Kconfig
1163 +@@ -1,6 +1,6 @@
1164 + config DRM_STI
1165 + tristate "DRM Support for STMicroelectronics SoC stiH4xx Series"
1166 +- depends on DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
1167 ++ depends on OF && DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
1168 + select RESET_CONTROLLER
1169 + select DRM_KMS_HELPER
1170 + select DRM_GEM_CMA_HELPER
1171 +@@ -8,6 +8,5 @@ config DRM_STI
1172 + select DRM_PANEL
1173 + select FW_LOADER
1174 + select SND_SOC_HDMI_CODEC if SND_SOC
1175 +- select OF
1176 + help
1177 + Choose this option to enable DRM on STM stiH4xx chipset
1178 +diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
1179 +index c3d92d537240..8045871335b5 100644
1180 +--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
1181 ++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
1182 +@@ -17,7 +17,6 @@
1183 + #include <drm/drm_encoder.h>
1184 + #include <drm/drm_modes.h>
1185 + #include <drm/drm_of.h>
1186 +-#include <drm/drm_panel.h>
1187 +
1188 + #include <uapi/drm/drm_mode.h>
1189 +
1190 +@@ -350,9 +349,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
1191 + static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
1192 + const struct drm_display_mode *mode)
1193 + {
1194 +- struct drm_panel *panel = tcon->panel;
1195 +- struct drm_connector *connector = panel->connector;
1196 +- struct drm_display_info display_info = connector->display_info;
1197 + unsigned int bp, hsync, vsync;
1198 + u8 clk_delay;
1199 + u32 val = 0;
1200 +@@ -410,27 +406,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
1201 + if (mode->flags & DRM_MODE_FLAG_PVSYNC)
1202 + val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
1203 +
1204 +- /*
1205 +- * On A20 and similar SoCs, the only way to achieve Positive Edge
1206 +- * (Rising Edge), is setting dclk clock phase to 2/3(240°).
1207 +- * By default TCON works in Negative Edge(Falling Edge),
1208 +- * this is why phase is set to 0 in that case.
1209 +- * Unfortunately there's no way to logically invert dclk through
1210 +- * IO_POL register.
1211 +- * The only acceptable way to work, triple checked with scope,
1212 +- * is using clock phase set to 0° for Negative Edge and set to 240°
1213 +- * for Positive Edge.
1214 +- * On A33 and similar SoCs there would be a 90° phase option,
1215 +- * but it divides also dclk by 2.
1216 +- * Following code is a way to avoid quirks all around TCON
1217 +- * and DOTCLOCK drivers.
1218 +- */
1219 +- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
1220 +- clk_set_phase(tcon->dclk, 240);
1221 +-
1222 +- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
1223 +- clk_set_phase(tcon->dclk, 0);
1224 +-
1225 + regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
1226 + SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
1227 + val);
1228 +diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
1229 +index 7a2da7f9d4dc..5485b35fe553 100644
1230 +--- a/drivers/iio/accel/mma8452.c
1231 ++++ b/drivers/iio/accel/mma8452.c
1232 +@@ -1034,7 +1034,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
1233 + if (src < 0)
1234 + return IRQ_NONE;
1235 +
1236 +- if (!(src & data->chip_info->enabled_events))
1237 ++ if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY)))
1238 + return IRQ_NONE;
1239 +
1240 + if (src & MMA8452_INT_DRDY) {
1241 +diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
1242 +index 772dad65396e..f32c12439eee 100644
1243 +--- a/drivers/staging/android/ion/ion_heap.c
1244 ++++ b/drivers/staging/android/ion/ion_heap.c
1245 +@@ -29,7 +29,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
1246 + struct page **tmp = pages;
1247 +
1248 + if (!pages)
1249 +- return NULL;
1250 ++ return ERR_PTR(-ENOMEM);
1251 +
1252 + if (buffer->flags & ION_FLAG_CACHED)
1253 + pgprot = PAGE_KERNEL;
1254 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
1255 +index cbe98bc2b998..431742201709 100644
1256 +--- a/drivers/tty/n_tty.c
1257 ++++ b/drivers/tty/n_tty.c
1258 +@@ -124,6 +124,8 @@ struct n_tty_data {
1259 + struct mutex output_lock;
1260 + };
1261 +
1262 ++#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
1263 ++
1264 + static inline size_t read_cnt(struct n_tty_data *ldata)
1265 + {
1266 + return ldata->read_head - ldata->read_tail;
1267 +@@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
1268 +
1269 + static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
1270 + {
1271 ++ smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
1272 + return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
1273 + }
1274 +
1275 +@@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
1276 + static void reset_buffer_flags(struct n_tty_data *ldata)
1277 + {
1278 + ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
1279 +- ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
1280 + ldata->commit_head = 0;
1281 +- ldata->echo_mark = 0;
1282 + ldata->line_start = 0;
1283 +
1284 + ldata->erasing = 0;
1285 +@@ -617,12 +618,19 @@ static size_t __process_echoes(struct tty_struct *tty)
1286 + old_space = space = tty_write_room(tty);
1287 +
1288 + tail = ldata->echo_tail;
1289 +- while (ldata->echo_commit != tail) {
1290 ++ while (MASK(ldata->echo_commit) != MASK(tail)) {
1291 + c = echo_buf(ldata, tail);
1292 + if (c == ECHO_OP_START) {
1293 + unsigned char op;
1294 + int no_space_left = 0;
1295 +
1296 ++ /*
1297 ++ * Since add_echo_byte() is called without holding
1298 ++ * output_lock, we might see only portion of multi-byte
1299 ++ * operation.
1300 ++ */
1301 ++ if (MASK(ldata->echo_commit) == MASK(tail + 1))
1302 ++ goto not_yet_stored;
1303 + /*
1304 + * If the buffer byte is the start of a multi-byte
1305 + * operation, get the next byte, which is either the
1306 +@@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty)
1307 + unsigned int num_chars, num_bs;
1308 +
1309 + case ECHO_OP_ERASE_TAB:
1310 ++ if (MASK(ldata->echo_commit) == MASK(tail + 2))
1311 ++ goto not_yet_stored;
1312 + num_chars = echo_buf(ldata, tail + 2);
1313 +
1314 + /*
1315 +@@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty)
1316 + /* If the echo buffer is nearly full (so that the possibility exists
1317 + * of echo overrun before the next commit), then discard enough
1318 + * data at the tail to prevent a subsequent overrun */
1319 +- while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
1320 ++ while (ldata->echo_commit > tail &&
1321 ++ ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
1322 + if (echo_buf(ldata, tail) == ECHO_OP_START) {
1323 + if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
1324 + tail += 3;
1325 +@@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty)
1326 + tail++;
1327 + }
1328 +
1329 ++ not_yet_stored:
1330 + ldata->echo_tail = tail;
1331 + return old_space - space;
1332 + }
1333 +@@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty)
1334 + size_t nr, old, echoed;
1335 + size_t head;
1336 +
1337 ++ mutex_lock(&ldata->output_lock);
1338 + head = ldata->echo_head;
1339 + ldata->echo_mark = head;
1340 + old = ldata->echo_commit - ldata->echo_tail;
1341 +@@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty)
1342 + * is over the threshold (and try again each time another
1343 + * block is accumulated) */
1344 + nr = head - ldata->echo_tail;
1345 +- if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
1346 ++ if (nr < ECHO_COMMIT_WATERMARK ||
1347 ++ (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
1348 ++ mutex_unlock(&ldata->output_lock);
1349 + return;
1350 ++ }
1351 +
1352 +- mutex_lock(&ldata->output_lock);
1353 + ldata->echo_commit = head;
1354 + echoed = __process_echoes(tty);
1355 + mutex_unlock(&ldata->output_lock);
1356 +@@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty)
1357 +
1358 + static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
1359 + {
1360 +- *echo_buf_addr(ldata, ldata->echo_head++) = c;
1361 ++ *echo_buf_addr(ldata, ldata->echo_head) = c;
1362 ++ smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
1363 ++ ldata->echo_head++;
1364 + }
1365 +
1366 + /**
1367 +@@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1368 + }
1369 +
1370 + seen_alnums = 0;
1371 +- while (ldata->read_head != ldata->canon_head) {
1372 ++ while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
1373 + head = ldata->read_head;
1374 +
1375 + /* erase a single possibly multibyte character */
1376 + do {
1377 + head--;
1378 + c = read_buf(ldata, head);
1379 +- } while (is_continuation(c, tty) && head != ldata->canon_head);
1380 ++ } while (is_continuation(c, tty) &&
1381 ++ MASK(head) != MASK(ldata->canon_head));
1382 +
1383 + /* do not partially erase */
1384 + if (is_continuation(c, tty))
1385 +@@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1386 + * This info is used to go back the correct
1387 + * number of columns.
1388 + */
1389 +- while (tail != ldata->canon_head) {
1390 ++ while (MASK(tail) != MASK(ldata->canon_head)) {
1391 + tail--;
1392 + c = read_buf(ldata, tail);
1393 + if (c == '\t') {
1394 +@@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
1395 + finish_erasing(ldata);
1396 + echo_char(c, tty);
1397 + echo_char_raw('\n', ldata);
1398 +- while (tail != ldata->read_head) {
1399 ++ while (MASK(tail) != MASK(ldata->read_head)) {
1400 + echo_char(read_buf(ldata, tail), tty);
1401 + tail++;
1402 + }
1403 +@@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty)
1404 + struct n_tty_data *ldata;
1405 +
1406 + /* Currently a malloc failure here can panic */
1407 +- ldata = vmalloc(sizeof(*ldata));
1408 ++ ldata = vzalloc(sizeof(*ldata));
1409 + if (!ldata)
1410 +- goto err;
1411 ++ return -ENOMEM;
1412 +
1413 + ldata->overrun_time = jiffies;
1414 + mutex_init(&ldata->atomic_read_lock);
1415 + mutex_init(&ldata->output_lock);
1416 +
1417 + tty->disc_data = ldata;
1418 +- reset_buffer_flags(tty->disc_data);
1419 +- ldata->column = 0;
1420 +- ldata->canon_column = 0;
1421 +- ldata->num_overrun = 0;
1422 +- ldata->no_room = 0;
1423 +- ldata->lnext = 0;
1424 + tty->closing = 0;
1425 + /* indicate buffer work may resume */
1426 + clear_bit(TTY_LDISC_HALTED, &tty->flags);
1427 + n_tty_set_termios(tty, NULL);
1428 + tty_unthrottle(tty);
1429 +-
1430 + return 0;
1431 +-err:
1432 +- return -ENOMEM;
1433 + }
1434 +
1435 + static inline int input_available_p(struct tty_struct *tty, int poll)
1436 +@@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
1437 + tail = ldata->read_tail;
1438 + nr = head - tail;
1439 + /* Skip EOF-chars.. */
1440 +- while (head != tail) {
1441 ++ while (MASK(head) != MASK(tail)) {
1442 + if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
1443 + read_buf(ldata, tail) == __DISABLED_CHAR)
1444 + nr--;
1445 +diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
1446 +index df93b727e984..9e59f4788589 100644
1447 +--- a/drivers/tty/serdev/core.c
1448 ++++ b/drivers/tty/serdev/core.c
1449 +@@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
1450 + static void __exit serdev_exit(void)
1451 + {
1452 + bus_unregister(&serdev_bus_type);
1453 ++ ida_destroy(&ctrl_ida);
1454 + }
1455 + module_exit(serdev_exit);
1456 +
1457 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1458 +index 3296a05cda2d..f80a300b5d68 100644
1459 +--- a/drivers/tty/serial/8250/8250_pci.c
1460 ++++ b/drivers/tty/serial/8250/8250_pci.c
1461 +@@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = {
1462 + /* multi-io cards handled by parport_serial */
1463 + { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
1464 + { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
1465 +- { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
1466 + { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
1467 +- { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
1468 +
1469 + /* Moxa Smartio MUE boards handled by 8250_moxa */
1470 + { PCI_VDEVICE(MOXA, 0x1024), },
1471 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1472 +index f97251f39c26..ec17c9fd6470 100644
1473 +--- a/drivers/tty/vt/vt.c
1474 ++++ b/drivers/tty/vt/vt.c
1475 +@@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
1476 + if (!*vc->vc_uni_pagedir_loc)
1477 + con_set_default_unimap(vc);
1478 +
1479 +- vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
1480 ++ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
1481 + if (!vc->vc_screenbuf)
1482 + goto err_free;
1483 +
1484 +@@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1485 +
1486 + if (new_screen_size > (4 << 20))
1487 + return -EINVAL;
1488 +- newscreen = kmalloc(new_screen_size, GFP_USER);
1489 ++ newscreen = kzalloc(new_screen_size, GFP_USER);
1490 + if (!newscreen)
1491 + return -ENOMEM;
1492 +
1493 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1494 +index 7b366a6c0b49..998b32d0167e 100644
1495 +--- a/drivers/usb/class/cdc-acm.c
1496 ++++ b/drivers/usb/class/cdc-acm.c
1497 +@@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = {
1498 + { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
1499 + .driver_info = SINGLE_RX_URB,
1500 + },
1501 ++ { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
1502 ++ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1503 ++ },
1504 + { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
1505 + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1506 + },
1507 +diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
1508 +index e34ad5e65350..6baa75da7907 100644
1509 +--- a/drivers/usb/dwc2/hcd_queue.c
1510 ++++ b/drivers/usb/dwc2/hcd_queue.c
1511 +@@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
1512 + /* Get the map and adjust if this is a multi_tt hub */
1513 + map = qh->dwc_tt->periodic_bitmaps;
1514 + if (qh->dwc_tt->usb_tt->multi)
1515 +- map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
1516 ++ map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
1517 +
1518 + return map;
1519 + }
1520 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
1521 +index e5ace8995b3b..99e7547f234f 100644
1522 +--- a/drivers/usb/host/xhci-mem.c
1523 ++++ b/drivers/usb/host/xhci-mem.c
1524 +@@ -878,12 +878,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
1525 +
1526 + dev = xhci->devs[slot_id];
1527 +
1528 +- trace_xhci_free_virt_device(dev);
1529 +-
1530 + xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
1531 + if (!dev)
1532 + return;
1533 +
1534 ++ trace_xhci_free_virt_device(dev);
1535 ++
1536 + if (dev->tt_info)
1537 + old_active_eps = dev->tt_info->active_eps;
1538 +
1539 +diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
1540 +index 410544ffe78f..88b427434bd8 100644
1541 +--- a/drivers/usb/host/xhci-trace.h
1542 ++++ b/drivers/usb/host/xhci-trace.h
1543 +@@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
1544 + TP_ARGS(ring, trb)
1545 + );
1546 +
1547 ++DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
1548 ++ TP_PROTO(struct xhci_virt_device *vdev),
1549 ++ TP_ARGS(vdev),
1550 ++ TP_STRUCT__entry(
1551 ++ __field(void *, vdev)
1552 ++ __field(unsigned long long, out_ctx)
1553 ++ __field(unsigned long long, in_ctx)
1554 ++ __field(u8, fake_port)
1555 ++ __field(u8, real_port)
1556 ++ __field(u16, current_mel)
1557 ++
1558 ++ ),
1559 ++ TP_fast_assign(
1560 ++ __entry->vdev = vdev;
1561 ++ __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
1562 ++ __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
1563 ++ __entry->fake_port = (u8) vdev->fake_port;
1564 ++ __entry->real_port = (u8) vdev->real_port;
1565 ++ __entry->current_mel = (u16) vdev->current_mel;
1566 ++ ),
1567 ++ TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
1568 ++ __entry->vdev, __entry->in_ctx, __entry->out_ctx,
1569 ++ __entry->fake_port, __entry->real_port, __entry->current_mel
1570 ++ )
1571 ++);
1572 ++
1573 ++DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
1574 ++ TP_PROTO(struct xhci_virt_device *vdev),
1575 ++ TP_ARGS(vdev)
1576 ++);
1577 ++
1578 + DECLARE_EVENT_CLASS(xhci_log_virt_dev,
1579 + TP_PROTO(struct xhci_virt_device *vdev),
1580 + TP_ARGS(vdev),
1581 +@@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
1582 + TP_ARGS(vdev)
1583 + );
1584 +
1585 +-DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
1586 +- TP_PROTO(struct xhci_virt_device *vdev),
1587 +- TP_ARGS(vdev)
1588 +-);
1589 +-
1590 + DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
1591 + TP_PROTO(struct xhci_virt_device *vdev),
1592 + TP_ARGS(vdev)
1593 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1594 +index eb6c26cbe579..ee0cc1d90b51 100644
1595 +--- a/drivers/usb/serial/cp210x.c
1596 ++++ b/drivers/usb/serial/cp210x.c
1597 +@@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = {
1598 + { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
1599 + { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
1600 + { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
1601 ++ { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
1602 ++ { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
1603 ++ { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
1604 + { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
1605 + { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
1606 + { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
1607 +@@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = {
1608 + { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
1609 + { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
1610 + { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
1611 ++ { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
1612 ++ { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
1613 ++ { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
1614 + { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
1615 + { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
1616 + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
1617 +@@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = {
1618 + { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
1619 + { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
1620 + { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
1621 ++ { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
1622 + { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
1623 ++ { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
1624 + { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
1625 + { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
1626 + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
1627 +@@ -134,17 +142,23 @@ static const struct usb_device_id id_table[] = {
1628 + { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
1629 + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
1630 + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
1631 ++ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
1632 ++ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
1633 + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
1634 + { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
1635 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
1636 + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
1637 ++ { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
1638 + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
1639 + { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
1640 + { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
1641 + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
1642 + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
1643 ++ { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
1644 + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
1645 + { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
1646 ++ { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
1647 ++ { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
1648 + { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
1649 + { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
1650 + { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
1651 +diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
1652 +index ded49e3bf2b0..9b29b67191bc 100644
1653 +--- a/drivers/usb/typec/tcpm.c
1654 ++++ b/drivers/usb/typec/tcpm.c
1655 +@@ -388,17 +388,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
1656 + u64 ts_nsec = local_clock();
1657 + unsigned long rem_nsec;
1658 +
1659 ++ mutex_lock(&port->logbuffer_lock);
1660 + if (!port->logbuffer[port->logbuffer_head]) {
1661 + port->logbuffer[port->logbuffer_head] =
1662 + kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
1663 +- if (!port->logbuffer[port->logbuffer_head])
1664 ++ if (!port->logbuffer[port->logbuffer_head]) {
1665 ++ mutex_unlock(&port->logbuffer_lock);
1666 + return;
1667 ++ }
1668 + }
1669 +
1670 + vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
1671 +
1672 +- mutex_lock(&port->logbuffer_lock);
1673 +-
1674 + if (tcpm_log_full(port)) {
1675 + port->logbuffer_head = max(port->logbuffer_head - 1, 0);
1676 + strcpy(tmpbuffer, "overflow");
1677 +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
1678 +index bd5cca5632b3..8d0a6fe748bd 100644
1679 +--- a/drivers/usb/typec/ucsi/ucsi.c
1680 ++++ b/drivers/usb/typec/ucsi/ucsi.c
1681 +@@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work)
1682 + }
1683 +
1684 + if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
1685 ++ typec_set_pwr_role(con->port, con->status.pwr_dir);
1686 ++
1687 ++ switch (con->status.partner_type) {
1688 ++ case UCSI_CONSTAT_PARTNER_TYPE_UFP:
1689 ++ typec_set_data_role(con->port, TYPEC_HOST);
1690 ++ break;
1691 ++ case UCSI_CONSTAT_PARTNER_TYPE_DFP:
1692 ++ typec_set_data_role(con->port, TYPEC_DEVICE);
1693 ++ break;
1694 ++ default:
1695 ++ break;
1696 ++ }
1697 ++
1698 + if (con->status.connected)
1699 + ucsi_register_partner(con);
1700 + else
1701 +diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
1702 +index 44eb4e1ea817..a18112a83fae 100644
1703 +--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
1704 ++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
1705 +@@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
1706 + return -ENODEV;
1707 + }
1708 +
1709 ++ /* This will make sure we can use ioremap_nocache() */
1710 ++ status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
1711 ++ if (ACPI_FAILURE(status))
1712 ++ return -ENOMEM;
1713 ++
1714 + /*
1715 + * NOTE: The memory region for the data structures is used also in an
1716 + * operation region, which means ACPI has already reserved it. Therefore
1717 +diff --git a/include/linux/acpi.h b/include/linux/acpi.h
1718 +index 15bfb15c2fa5..a6a7ae897b40 100644
1719 +--- a/include/linux/acpi.h
1720 ++++ b/include/linux/acpi.h
1721 +@@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res);
1722 + int acpi_check_region(resource_size_t start, resource_size_t n,
1723 + const char *name);
1724 +
1725 ++acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
1726 ++ u32 level);
1727 ++
1728 + int acpi_resources_are_enforced(void);
1729 +
1730 + #ifdef CONFIG_HIBERNATION
1731 +diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
1732 +index d12f511929f5..0fe61ede77c6 100644
1733 +--- a/net/ipv6/netfilter/ip6t_rpfilter.c
1734 ++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
1735 +@@ -48,6 +48,8 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
1736 + }
1737 +
1738 + fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
1739 ++ if ((flags & XT_RPFILTER_LOOSE) == 0)
1740 ++ fl6.flowi6_oif = dev->ifindex;
1741 +
1742 + rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
1743 + if (rt->dst.error)
1744 +diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
1745 +index 40e744572283..32b7896929f3 100644
1746 +--- a/net/netfilter/nf_tables_core.c
1747 ++++ b/net/netfilter/nf_tables_core.c
1748 +@@ -208,7 +208,8 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv)
1749 +
1750 + switch (regs.verdict.code) {
1751 + case NFT_JUMP:
1752 +- BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
1753 ++ if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
1754 ++ return NF_DROP;
1755 + jumpstack[stackptr].chain = chain;
1756 + jumpstack[stackptr].rule = rule;
1757 + jumpstack[stackptr].rulenum = rulenum;
1758 +diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
1759 +index 94df000abb92..29c38aa7f726 100644
1760 +--- a/net/netfilter/xt_connmark.c
1761 ++++ b/net/netfilter/xt_connmark.c
1762 +@@ -211,7 +211,7 @@ static int __init connmark_mt_init(void)
1763 + static void __exit connmark_mt_exit(void)
1764 + {
1765 + xt_unregister_match(&connmark_mt_reg);
1766 +- xt_unregister_target(connmark_tg_reg);
1767 ++ xt_unregister_targets(connmark_tg_reg, ARRAY_SIZE(connmark_tg_reg));
1768 + }
1769 +
1770 + module_init(connmark_mt_init);