Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 14 Nov 2018 14:00:59
Message-Id: 1542204040.d75e9d1e5bcb71ad47e945bd030cdc8fa07719ed.mpagano@gentoo
1 commit: d75e9d1e5bcb71ad47e945bd030cdc8fa07719ed
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Aug 9 10:54:46 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 14 14:00:40 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d75e9d1e
7
8 Linux patch 4.14.62
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1061_linux-4.14.62.patch | 797 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 801 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 64029e1..b530931 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -287,6 +287,10 @@ Patch: 1060_linux-4.14.61.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.61
23
24 +Patch: 1061_linux-4.14.62.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.62
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1061_linux-4.14.62.patch b/1061_linux-4.14.62.patch
33 new file mode 100644
34 index 0000000..a1d7ceb
35 --- /dev/null
36 +++ b/1061_linux-4.14.62.patch
37 @@ -0,0 +1,797 @@
38 +diff --git a/Makefile b/Makefile
39 +index 4bd65eabd298..d407ecfdee0b 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 61
47 ++SUBLEVEL = 62
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
52 +index f96830ffd9f1..75c6b98585ba 100644
53 +--- a/drivers/i2c/busses/i2c-imx.c
54 ++++ b/drivers/i2c/busses/i2c-imx.c
55 +@@ -376,6 +376,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
56 + goto err_desc;
57 + }
58 +
59 ++ reinit_completion(&dma->cmd_complete);
60 + txdesc->callback = i2c_imx_dma_callback;
61 + txdesc->callback_param = i2c_imx;
62 + if (dma_submit_error(dmaengine_submit(txdesc))) {
63 +@@ -619,7 +620,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
64 + * The first byte must be transmitted by the CPU.
65 + */
66 + imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
67 +- reinit_completion(&i2c_imx->dma->cmd_complete);
68 + time_left = wait_for_completion_timeout(
69 + &i2c_imx->dma->cmd_complete,
70 + msecs_to_jiffies(DMA_TIMEOUT));
71 +@@ -678,7 +678,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
72 + if (result)
73 + return result;
74 +
75 +- reinit_completion(&i2c_imx->dma->cmd_complete);
76 + time_left = wait_for_completion_timeout(
77 + &i2c_imx->dma->cmd_complete,
78 + msecs_to_jiffies(DMA_TIMEOUT));
79 +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
80 +index f0b06b14e782..16249b0953ff 100644
81 +--- a/drivers/idle/intel_idle.c
82 ++++ b/drivers/idle/intel_idle.c
83 +@@ -1061,7 +1061,7 @@ static const struct idle_cpu idle_cpu_dnv = {
84 + };
85 +
86 + #define ICPU(model, cpu) \
87 +- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
88 ++ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
89 +
90 + static const struct x86_cpu_id intel_idle_ids[] __initconst = {
91 + ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
92 +@@ -1125,6 +1125,11 @@ static int __init intel_idle_probe(void)
93 + return -ENODEV;
94 + }
95 +
96 ++ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
97 ++ pr_debug("Please enable MWAIT in BIOS SETUP\n");
98 ++ return -ENODEV;
99 ++ }
100 ++
101 + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
102 + return -ENODEV;
103 +
104 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
105 +index f5643d107cc6..a67d03716510 100644
106 +--- a/drivers/nvme/host/pci.c
107 ++++ b/drivers/nvme/host/pci.c
108 +@@ -77,7 +77,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
109 + * Represents an NVM Express device. Each nvme_dev is a PCI function.
110 + */
111 + struct nvme_dev {
112 +- struct nvme_queue **queues;
113 ++ struct nvme_queue *queues;
114 + struct blk_mq_tag_set tagset;
115 + struct blk_mq_tag_set admin_tagset;
116 + u32 __iomem *dbs;
117 +@@ -348,7 +348,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
118 + unsigned int hctx_idx)
119 + {
120 + struct nvme_dev *dev = data;
121 +- struct nvme_queue *nvmeq = dev->queues[0];
122 ++ struct nvme_queue *nvmeq = &dev->queues[0];
123 +
124 + WARN_ON(hctx_idx != 0);
125 + WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
126 +@@ -370,7 +370,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
127 + unsigned int hctx_idx)
128 + {
129 + struct nvme_dev *dev = data;
130 +- struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
131 ++ struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
132 +
133 + if (!nvmeq->tags)
134 + nvmeq->tags = &dev->tagset.tags[hctx_idx];
135 +@@ -386,7 +386,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
136 + struct nvme_dev *dev = set->driver_data;
137 + struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
138 + int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
139 +- struct nvme_queue *nvmeq = dev->queues[queue_idx];
140 ++ struct nvme_queue *nvmeq = &dev->queues[queue_idx];
141 +
142 + BUG_ON(!nvmeq);
143 + iod->nvmeq = nvmeq;
144 +@@ -900,7 +900,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
145 + static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
146 + {
147 + struct nvme_dev *dev = to_nvme_dev(ctrl);
148 +- struct nvme_queue *nvmeq = dev->queues[0];
149 ++ struct nvme_queue *nvmeq = &dev->queues[0];
150 + struct nvme_command c;
151 +
152 + memset(&c, 0, sizeof(c));
153 +@@ -1146,7 +1146,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
154 + if (nvmeq->sq_cmds)
155 + dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
156 + nvmeq->sq_cmds, nvmeq->sq_dma_addr);
157 +- kfree(nvmeq);
158 + }
159 +
160 + static void nvme_free_queues(struct nvme_dev *dev, int lowest)
161 +@@ -1154,10 +1153,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
162 + int i;
163 +
164 + for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
165 +- struct nvme_queue *nvmeq = dev->queues[i];
166 + dev->ctrl.queue_count--;
167 +- dev->queues[i] = NULL;
168 +- nvme_free_queue(nvmeq);
169 ++ nvme_free_queue(&dev->queues[i]);
170 + }
171 + }
172 +
173 +@@ -1189,10 +1186,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
174 +
175 + static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
176 + {
177 +- struct nvme_queue *nvmeq = dev->queues[0];
178 ++ struct nvme_queue *nvmeq = &dev->queues[0];
179 +
180 +- if (!nvmeq)
181 +- return;
182 + if (nvme_suspend_queue(nvmeq))
183 + return;
184 +
185 +@@ -1246,13 +1241,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
186 + return 0;
187 + }
188 +
189 +-static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
190 +- int depth, int node)
191 ++static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
192 ++ int depth, int node)
193 + {
194 +- struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
195 +- node);
196 +- if (!nvmeq)
197 +- return NULL;
198 ++ struct nvme_queue *nvmeq = &dev->queues[qid];
199 ++
200 ++ if (dev->ctrl.queue_count > qid)
201 ++ return 0;
202 +
203 + nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
204 + &nvmeq->cq_dma_addr, GFP_KERNEL);
205 +@@ -1271,17 +1266,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
206 + nvmeq->q_depth = depth;
207 + nvmeq->qid = qid;
208 + nvmeq->cq_vector = -1;
209 +- dev->queues[qid] = nvmeq;
210 + dev->ctrl.queue_count++;
211 +
212 +- return nvmeq;
213 ++ return 0;
214 +
215 + free_cqdma:
216 + dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
217 + nvmeq->cq_dma_addr);
218 + free_nvmeq:
219 +- kfree(nvmeq);
220 +- return NULL;
221 ++ return -ENOMEM;
222 + }
223 +
224 + static int queue_request_irq(struct nvme_queue *nvmeq)
225 +@@ -1468,14 +1461,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
226 + if (result < 0)
227 + return result;
228 +
229 +- nvmeq = dev->queues[0];
230 +- if (!nvmeq) {
231 +- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
232 +- dev_to_node(dev->dev));
233 +- if (!nvmeq)
234 +- return -ENOMEM;
235 +- }
236 ++ result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
237 ++ dev_to_node(dev->dev));
238 ++ if (result)
239 ++ return result;
240 +
241 ++ nvmeq = &dev->queues[0];
242 + aqa = nvmeq->q_depth - 1;
243 + aqa |= aqa << 16;
244 +
245 +@@ -1505,7 +1496,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
246 +
247 + for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
248 + /* vector == qid - 1, match nvme_create_queue */
249 +- if (!nvme_alloc_queue(dev, i, dev->q_depth,
250 ++ if (nvme_alloc_queue(dev, i, dev->q_depth,
251 + pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
252 + ret = -ENOMEM;
253 + break;
254 +@@ -1514,7 +1505,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
255 +
256 + max = min(dev->max_qid, dev->ctrl.queue_count - 1);
257 + for (i = dev->online_queues; i <= max; i++) {
258 +- ret = nvme_create_queue(dev->queues[i], i);
259 ++ ret = nvme_create_queue(&dev->queues[i], i);
260 + if (ret)
261 + break;
262 + }
263 +@@ -1770,7 +1761,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
264 +
265 + static int nvme_setup_io_queues(struct nvme_dev *dev)
266 + {
267 +- struct nvme_queue *adminq = dev->queues[0];
268 ++ struct nvme_queue *adminq = &dev->queues[0];
269 + struct pci_dev *pdev = to_pci_dev(dev->dev);
270 + int result, nr_io_queues;
271 + unsigned long size;
272 +@@ -1896,7 +1887,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
273 + retry:
274 + timeout = ADMIN_TIMEOUT;
275 + for (; i > 0; i--, sent++)
276 +- if (nvme_delete_queue(dev->queues[i], opcode))
277 ++ if (nvme_delete_queue(&dev->queues[i], opcode))
278 + break;
279 +
280 + while (sent--) {
281 +@@ -2081,7 +2072,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
282 +
283 + queues = dev->online_queues - 1;
284 + for (i = dev->ctrl.queue_count - 1; i > 0; i--)
285 +- nvme_suspend_queue(dev->queues[i]);
286 ++ nvme_suspend_queue(&dev->queues[i]);
287 +
288 + if (dead) {
289 + /* A device might become IO incapable very soon during
290 +@@ -2089,7 +2080,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
291 + * queue_count can be 0 here.
292 + */
293 + if (dev->ctrl.queue_count)
294 +- nvme_suspend_queue(dev->queues[0]);
295 ++ nvme_suspend_queue(&dev->queues[0]);
296 + } else {
297 + nvme_disable_io_queues(dev, queues);
298 + nvme_disable_admin_queue(dev, shutdown);
299 +@@ -2345,7 +2336,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
300 + dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
301 + if (!dev)
302 + return -ENOMEM;
303 +- dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
304 ++
305 ++ dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(struct nvme_queue),
306 + GFP_KERNEL, node);
307 + if (!dev->queues)
308 + goto free;
309 +diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
310 +index 8e21211b904b..b7a5d1065378 100644
311 +--- a/drivers/nvme/target/fc.c
312 ++++ b/drivers/nvme/target/fc.c
313 +@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
314 + struct work_struct work;
315 + } __aligned(sizeof(unsigned long long));
316 +
317 ++/* desired maximum for a single sequence - if sg list allows it */
318 + #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
319 +-#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
320 +
321 + enum nvmet_fcp_datadir {
322 + NVMET_FCP_NODATA,
323 +@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
324 + struct nvme_fc_cmd_iu cmdiubuf;
325 + struct nvme_fc_ersp_iu rspiubuf;
326 + dma_addr_t rspdma;
327 ++ struct scatterlist *next_sg;
328 + struct scatterlist *data_sg;
329 + int data_sg_cnt;
330 + u32 total_length;
331 +@@ -1000,8 +1001,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
332 + INIT_LIST_HEAD(&newrec->assoc_list);
333 + kref_init(&newrec->ref);
334 + ida_init(&newrec->assoc_cnt);
335 +- newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
336 +- template->max_sgl_segments);
337 ++ newrec->max_sg_cnt = template->max_sgl_segments;
338 +
339 + ret = nvmet_fc_alloc_ls_iodlist(newrec);
340 + if (ret) {
341 +@@ -1717,6 +1717,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
342 + ((fod->io_dir == NVMET_FCP_WRITE) ?
343 + DMA_FROM_DEVICE : DMA_TO_DEVICE));
344 + /* note: write from initiator perspective */
345 ++ fod->next_sg = fod->data_sg;
346 +
347 + return 0;
348 +
349 +@@ -1874,24 +1875,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
350 + struct nvmet_fc_fcp_iod *fod, u8 op)
351 + {
352 + struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
353 ++ struct scatterlist *sg = fod->next_sg;
354 + unsigned long flags;
355 +- u32 tlen;
356 ++ u32 remaininglen = fod->total_length - fod->offset;
357 ++ u32 tlen = 0;
358 + int ret;
359 +
360 + fcpreq->op = op;
361 + fcpreq->offset = fod->offset;
362 + fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
363 +
364 +- tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
365 +- (fod->total_length - fod->offset));
366 ++ /*
367 ++ * for next sequence:
368 ++ * break at a sg element boundary
369 ++ * attempt to keep sequence length capped at
370 ++ * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
371 ++ * be longer if a single sg element is larger
372 ++ * than that amount. This is done to avoid creating
373 ++ * a new sg list to use for the tgtport api.
374 ++ */
375 ++ fcpreq->sg = sg;
376 ++ fcpreq->sg_cnt = 0;
377 ++ while (tlen < remaininglen &&
378 ++ fcpreq->sg_cnt < tgtport->max_sg_cnt &&
379 ++ tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
380 ++ fcpreq->sg_cnt++;
381 ++ tlen += sg_dma_len(sg);
382 ++ sg = sg_next(sg);
383 ++ }
384 ++ if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
385 ++ fcpreq->sg_cnt++;
386 ++ tlen += min_t(u32, sg_dma_len(sg), remaininglen);
387 ++ sg = sg_next(sg);
388 ++ }
389 ++ if (tlen < remaininglen)
390 ++ fod->next_sg = sg;
391 ++ else
392 ++ fod->next_sg = NULL;
393 ++
394 + fcpreq->transfer_length = tlen;
395 + fcpreq->transferred_length = 0;
396 + fcpreq->fcp_error = 0;
397 + fcpreq->rsplen = 0;
398 +
399 +- fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
400 +- fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
401 +-
402 + /*
403 + * If the last READDATA request: check if LLDD supports
404 + * combined xfr with response.
405 +diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
406 +index a8da543b3814..4708eb9df71b 100644
407 +--- a/drivers/pci/pci-acpi.c
408 ++++ b/drivers/pci/pci-acpi.c
409 +@@ -624,7 +624,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
410 + union acpi_object *obj;
411 + struct pci_host_bridge *bridge;
412 +
413 +- if (acpi_pci_disabled || !bus->bridge)
414 ++ if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
415 + return;
416 +
417 + acpi_pci_slot_enumerate(bus);
418 +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
419 +index 9ce28c4f9812..b09d29931393 100644
420 +--- a/drivers/scsi/qla2xxx/qla_attr.c
421 ++++ b/drivers/scsi/qla2xxx/qla_attr.c
422 +@@ -2142,6 +2142,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
423 + msleep(1000);
424 +
425 + qla24xx_disable_vp(vha);
426 ++ qla2x00_wait_for_sess_deletion(vha);
427 +
428 + vha->flags.delete_progress = 1;
429 +
430 +diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
431 +index f852ca60c49f..89706341514e 100644
432 +--- a/drivers/scsi/qla2xxx/qla_gbl.h
433 ++++ b/drivers/scsi/qla2xxx/qla_gbl.h
434 +@@ -200,6 +200,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
435 + uint16_t *);
436 + int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
437 + int qla24xx_async_abort_cmd(srb_t *);
438 ++void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
439 +
440 + /*
441 + * Global Functions in qla_mid.c source file.
442 +diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
443 +index 59ecc4eda6cd..2a19ec0660cb 100644
444 +--- a/drivers/scsi/qla2xxx/qla_gs.c
445 ++++ b/drivers/scsi/qla2xxx/qla_gs.c
446 +@@ -3368,6 +3368,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
447 + return rval;
448 +
449 + done_free_sp:
450 ++ spin_lock_irqsave(&vha->hw->vport_slock, flags);
451 ++ list_del(&sp->elem);
452 ++ spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
453 ++
454 + if (sp->u.iocb_cmd.u.ctarg.req) {
455 + dma_free_coherent(&vha->hw->pdev->dev,
456 + sizeof(struct ct_sns_pkt),
457 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
458 +index bcde6130f121..1d42d38f5a45 100644
459 +--- a/drivers/scsi/qla2xxx/qla_init.c
460 ++++ b/drivers/scsi/qla2xxx/qla_init.c
461 +@@ -1326,11 +1326,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
462 +
463 + wait_for_completion(&tm_iocb->u.tmf.comp);
464 +
465 +- rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
466 +- QLA_SUCCESS : QLA_FUNCTION_FAILED;
467 ++ rval = tm_iocb->u.tmf.data;
468 +
469 +- if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
470 +- ql_dbg(ql_dbg_taskm, vha, 0x8030,
471 ++ if (rval != QLA_SUCCESS) {
472 ++ ql_log(ql_log_warn, vha, 0x8030,
473 + "TM IOCB failed (%x).\n", rval);
474 + }
475 +
476 +diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
477 +index 9a2c86eacf44..3f5a0f0f8b62 100644
478 +--- a/drivers/scsi/qla2xxx/qla_inline.h
479 ++++ b/drivers/scsi/qla2xxx/qla_inline.h
480 +@@ -221,6 +221,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
481 + sp->fcport = fcport;
482 + sp->iocbs = 1;
483 + sp->vha = qpair->vha;
484 ++ INIT_LIST_HEAD(&sp->elem);
485 ++
486 + done:
487 + if (!sp)
488 + QLA_QPAIR_MARK_NOT_BUSY(qpair);
489 +diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
490 +index d77dde89118e..375a88e18afe 100644
491 +--- a/drivers/scsi/qla2xxx/qla_mid.c
492 ++++ b/drivers/scsi/qla2xxx/qla_mid.c
493 +@@ -152,10 +152,15 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
494 + {
495 + unsigned long flags;
496 + int ret;
497 ++ fc_port_t *fcport;
498 +
499 + ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
500 + atomic_set(&vha->loop_state, LOOP_DOWN);
501 + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
502 ++ list_for_each_entry(fcport, &vha->vp_fcports, list)
503 ++ fcport->logout_on_delete = 0;
504 ++
505 ++ qla2x00_mark_all_devices_lost(vha, 0);
506 +
507 + /* Remove port id from vp target map */
508 + spin_lock_irqsave(&vha->hw->vport_slock, flags);
509 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
510 +index 1be76695e692..7d7fb5bbb600 100644
511 +--- a/drivers/scsi/qla2xxx/qla_os.c
512 ++++ b/drivers/scsi/qla2xxx/qla_os.c
513 +@@ -1136,7 +1136,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
514 + * qla2x00_wait_for_sess_deletion can only be called from remove_one.
515 + * it has dependency on UNLOADING flag to stop device discovery
516 + */
517 +-static void
518 ++void
519 + qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
520 + {
521 + qla2x00_mark_all_devices_lost(vha, 0);
522 +@@ -5794,8 +5794,9 @@ qla2x00_do_dpc(void *data)
523 + set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
524 + }
525 +
526 +- if (test_and_clear_bit(ISP_ABORT_NEEDED,
527 +- &base_vha->dpc_flags)) {
528 ++ if (test_and_clear_bit
529 ++ (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
530 ++ !test_bit(UNLOADING, &base_vha->dpc_flags)) {
531 +
532 + ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
533 + "ISP abort scheduled.\n");
534 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
535 +index 7fa50e12f18e..5b62e06567a3 100644
536 +--- a/fs/btrfs/extent_io.c
537 ++++ b/fs/btrfs/extent_io.c
538 +@@ -4280,6 +4280,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
539 + struct extent_map *em;
540 + u64 start = page_offset(page);
541 + u64 end = start + PAGE_SIZE - 1;
542 ++ struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
543 +
544 + if (gfpflags_allow_blocking(mask) &&
545 + page->mapping->host->i_size > SZ_16M) {
546 +@@ -4302,6 +4303,8 @@ int try_release_extent_mapping(struct extent_map_tree *map,
547 + extent_map_end(em) - 1,
548 + EXTENT_LOCKED | EXTENT_WRITEBACK,
549 + 0, NULL)) {
550 ++ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
551 ++ &btrfs_inode->runtime_flags);
552 + remove_extent_mapping(map, em);
553 + /* once for the rb tree */
554 + free_extent_map(em);
555 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
556 +index 6b0c1ea95196..f30d2bf40471 100644
557 +--- a/fs/ext4/super.c
558 ++++ b/fs/ext4/super.c
559 +@@ -2301,7 +2301,7 @@ static int ext4_check_descriptors(struct super_block *sb,
560 + struct ext4_sb_info *sbi = EXT4_SB(sb);
561 + ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
562 + ext4_fsblk_t last_block;
563 +- ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
564 ++ ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
565 + ext4_fsblk_t block_bitmap;
566 + ext4_fsblk_t inode_bitmap;
567 + ext4_fsblk_t inode_table;
568 +@@ -4038,13 +4038,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
569 + goto failed_mount2;
570 + }
571 + }
572 ++ sbi->s_gdb_count = db_count;
573 + if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
574 + ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
575 + ret = -EFSCORRUPTED;
576 + goto failed_mount2;
577 + }
578 +
579 +- sbi->s_gdb_count = db_count;
580 + get_random_bytes(&sbi->s_next_generation, sizeof(u32));
581 + spin_lock_init(&sbi->s_next_gen_lock);
582 +
583 +diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
584 +index c60f3d32ee91..a6797986b625 100644
585 +--- a/fs/jfs/xattr.c
586 ++++ b/fs/jfs/xattr.c
587 +@@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
588 + if (size > PSIZE) {
589 + /*
590 + * To keep the rest of the code simple. Allocate a
591 +- * contiguous buffer to work with
592 ++ * contiguous buffer to work with. Make the buffer large
593 ++ * enough to make use of the whole extent.
594 + */
595 +- ea_buf->xattr = kmalloc(size, GFP_KERNEL);
596 ++ ea_buf->max_size = (size + sb->s_blocksize - 1) &
597 ++ ~(sb->s_blocksize - 1);
598 ++
599 ++ ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
600 + if (ea_buf->xattr == NULL)
601 + return -ENOMEM;
602 +
603 + ea_buf->flag = EA_MALLOC;
604 +- ea_buf->max_size = (size + sb->s_blocksize - 1) &
605 +- ~(sb->s_blocksize - 1);
606 +
607 + if (ea_size == 0)
608 + return 0;
609 +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
610 +index 5c16db86b38f..40e53a4fc0a6 100644
611 +--- a/fs/xfs/libxfs/xfs_attr_leaf.c
612 ++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
613 +@@ -785,9 +785,8 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
614 + ASSERT(blkno == 0);
615 + error = xfs_attr3_leaf_create(args, blkno, &bp);
616 + if (error) {
617 +- error = xfs_da_shrink_inode(args, 0, bp);
618 +- bp = NULL;
619 +- if (error)
620 ++ /* xfs_attr3_leaf_create may not have instantiated a block */
621 ++ if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0))
622 + goto out;
623 + xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
624 + memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
625 +diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
626 +index 43005fbe8b1e..544b5211221c 100644
627 +--- a/fs/xfs/xfs_icache.c
628 ++++ b/fs/xfs/xfs_icache.c
629 +@@ -305,6 +305,46 @@ xfs_reinit_inode(
630 + return error;
631 + }
632 +
633 ++/*
634 ++ * If we are allocating a new inode, then check what was returned is
635 ++ * actually a free, empty inode. If we are not allocating an inode,
636 ++ * then check we didn't find a free inode.
637 ++ *
638 ++ * Returns:
639 ++ * 0 if the inode free state matches the lookup context
640 ++ * -ENOENT if the inode is free and we are not allocating
641 ++ * -EFSCORRUPTED if there is any state mismatch at all
642 ++ */
643 ++static int
644 ++xfs_iget_check_free_state(
645 ++ struct xfs_inode *ip,
646 ++ int flags)
647 ++{
648 ++ if (flags & XFS_IGET_CREATE) {
649 ++ /* should be a free inode */
650 ++ if (VFS_I(ip)->i_mode != 0) {
651 ++ xfs_warn(ip->i_mount,
652 ++"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
653 ++ ip->i_ino, VFS_I(ip)->i_mode);
654 ++ return -EFSCORRUPTED;
655 ++ }
656 ++
657 ++ if (ip->i_d.di_nblocks != 0) {
658 ++ xfs_warn(ip->i_mount,
659 ++"Corruption detected! Free inode 0x%llx has blocks allocated!",
660 ++ ip->i_ino);
661 ++ return -EFSCORRUPTED;
662 ++ }
663 ++ return 0;
664 ++ }
665 ++
666 ++ /* should be an allocated inode */
667 ++ if (VFS_I(ip)->i_mode == 0)
668 ++ return -ENOENT;
669 ++
670 ++ return 0;
671 ++}
672 ++
673 + /*
674 + * Check the validity of the inode we just found it the cache
675 + */
676 +@@ -354,12 +394,12 @@ xfs_iget_cache_hit(
677 + }
678 +
679 + /*
680 +- * If lookup is racing with unlink return an error immediately.
681 ++ * Check the inode free state is valid. This also detects lookup
682 ++ * racing with unlinks.
683 + */
684 +- if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
685 +- error = -ENOENT;
686 ++ error = xfs_iget_check_free_state(ip, flags);
687 ++ if (error)
688 + goto out_error;
689 +- }
690 +
691 + /*
692 + * If IRECLAIMABLE is set, we've torn down the VFS inode already.
693 +@@ -475,10 +515,14 @@ xfs_iget_cache_miss(
694 +
695 + trace_xfs_iget_miss(ip);
696 +
697 +- if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
698 +- error = -ENOENT;
699 ++
700 ++ /*
701 ++ * Check the inode free state is valid. This also detects lookup
702 ++ * racing with unlinks.
703 ++ */
704 ++ error = xfs_iget_check_free_state(ip, flags);
705 ++ if (error)
706 + goto out_destroy;
707 +- }
708 +
709 + /*
710 + * Preload the radix tree so we can insert safely under the
711 +diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
712 +index 289e4d54e3e0..5caa062a02b2 100644
713 +--- a/include/linux/ring_buffer.h
714 ++++ b/include/linux/ring_buffer.h
715 +@@ -160,6 +160,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
716 + void ring_buffer_record_off(struct ring_buffer *buffer);
717 + void ring_buffer_record_on(struct ring_buffer *buffer);
718 + int ring_buffer_record_is_on(struct ring_buffer *buffer);
719 ++int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
720 + void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
721 + void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
722 +
723 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
724 +index b02caa442776..069311541577 100644
725 +--- a/kernel/irq/manage.c
726 ++++ b/kernel/irq/manage.c
727 +@@ -1030,6 +1030,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
728 + if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
729 + return 0;
730 +
731 ++ /*
732 ++ * No further action required for interrupts which are requested as
733 ++ * threaded interrupts already
734 ++ */
735 ++ if (new->handler == irq_default_primary_handler)
736 ++ return 0;
737 ++
738 + new->flags |= IRQF_ONESHOT;
739 +
740 + /*
741 +@@ -1037,7 +1044,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
742 + * thread handler. We force thread them as well by creating a
743 + * secondary action.
744 + */
745 +- if (new->handler != irq_default_primary_handler && new->thread_fn) {
746 ++ if (new->handler && new->thread_fn) {
747 + /* Allocate the secondary action */
748 + new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
749 + if (!new->secondary)
750 +diff --git a/kernel/softirq.c b/kernel/softirq.c
751 +index e89c3b0cff6d..f40ac7191257 100644
752 +--- a/kernel/softirq.c
753 ++++ b/kernel/softirq.c
754 +@@ -382,7 +382,7 @@ static inline void tick_irq_exit(void)
755 +
756 + /* Make sure that timer wheel updates are propagated */
757 + if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
758 +- if (!in_interrupt())
759 ++ if (!in_irq())
760 + tick_nohz_irq_exit();
761 + }
762 + #endif
763 +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
764 +index bb2af74e6b62..ea3c062e7e1c 100644
765 +--- a/kernel/time/tick-sched.c
766 ++++ b/kernel/time/tick-sched.c
767 +@@ -676,7 +676,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
768 +
769 + static inline bool local_timer_softirq_pending(void)
770 + {
771 +- return local_softirq_pending() & TIMER_SOFTIRQ;
772 ++ return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
773 + }
774 +
775 + static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
776 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
777 +index 36f018b15392..fd7809004297 100644
778 +--- a/kernel/trace/ring_buffer.c
779 ++++ b/kernel/trace/ring_buffer.c
780 +@@ -3109,6 +3109,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
781 + return !atomic_read(&buffer->record_disabled);
782 + }
783 +
784 ++/**
785 ++ * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
786 ++ * @buffer: The ring buffer to see if write is set enabled
787 ++ *
788 ++ * Returns true if the ring buffer is set writable by ring_buffer_record_on().
789 ++ * Note that this does NOT mean it is in a writable state.
790 ++ *
791 ++ * It may return true when the ring buffer has been disabled by
792 ++ * ring_buffer_record_disable(), as that is a temporary disabling of
793 ++ * the ring buffer.
794 ++ */
795 ++int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
796 ++{
797 ++ return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
798 ++}
799 ++
800 + /**
801 + * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
802 + * @buffer: The ring buffer to stop writes to.
803 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
804 +index e268750bd4ad..20919489883f 100644
805 +--- a/kernel/trace/trace.c
806 ++++ b/kernel/trace/trace.c
807 +@@ -1366,6 +1366,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
808 +
809 + arch_spin_lock(&tr->max_lock);
810 +
811 ++ /* Inherit the recordable setting from trace_buffer */
812 ++ if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
813 ++ ring_buffer_record_on(tr->max_buffer.buffer);
814 ++ else
815 ++ ring_buffer_record_off(tr->max_buffer.buffer);
816 ++
817 + buf = tr->trace_buffer.buffer;
818 + tr->trace_buffer.buffer = tr->max_buffer.buffer;
819 + tr->max_buffer.buffer = buf;
820 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
821 +index 68c9d1833b95..c67abda5d639 100644
822 +--- a/net/netlink/af_netlink.c
823 ++++ b/net/netlink/af_netlink.c
824 +@@ -981,8 +981,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
825 +
826 + if (nlk->ngroups == 0)
827 + groups = 0;
828 +- else
829 +- groups &= (1ULL << nlk->ngroups) - 1;
830 ++ else if (nlk->ngroups < 8*sizeof(groups))
831 ++ groups &= (1UL << nlk->ngroups) - 1;
832 +
833 + bound = nlk->bound;
834 + if (bound) {