Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sat, 09 Jan 2021 12:56:18
Message-Id: 1610196964.e15331f8e11c18c10e0e46ad38741a0b33aebef6.mpagano@gentoo
1 commit: e15331f8e11c18c10e0e46ad38741a0b33aebef6
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jan 9 12:56:04 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Jan 9 12:56:04 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e15331f8
7
8 Linux patch 4.14.214
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1213_linux-4.14.214.patch | 1567 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1571 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index ff5e177..f54adb5 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -895,6 +895,10 @@ Patch: 1212_linux-4.14.213.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.213
23
24 +Patch: 1213_linux-4.14.214.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.214
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1213_linux-4.14.214.patch b/1213_linux-4.14.214.patch
33 new file mode 100644
34 index 0000000..46dae79
35 --- /dev/null
36 +++ b/1213_linux-4.14.214.patch
37 @@ -0,0 +1,1567 @@
38 +diff --git a/Makefile b/Makefile
39 +index d059e257b976a..d36b8f4228a47 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 213
47 ++SUBLEVEL = 214
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
52 +index b750ffef83c7d..0ec93d940d12c 100644
53 +--- a/arch/powerpc/include/asm/bitops.h
54 ++++ b/arch/powerpc/include/asm/bitops.h
55 +@@ -220,15 +220,34 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
56 + */
57 + static __inline__ int fls(unsigned int x)
58 + {
59 +- return 32 - __builtin_clz(x);
60 ++ int lz;
61 ++
62 ++ if (__builtin_constant_p(x))
63 ++ return x ? 32 - __builtin_clz(x) : 0;
64 ++ asm("cntlzw %0,%1" : "=r" (lz) : "r" (x));
65 ++ return 32 - lz;
66 + }
67 +
68 + #include <asm-generic/bitops/builtin-__fls.h>
69 +
70 ++/*
71 ++ * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
72 ++ * instruction; for 32-bit we use the generic version, which does two
73 ++ * 32-bit fls calls.
74 ++ */
75 ++#ifdef CONFIG_PPC64
76 + static __inline__ int fls64(__u64 x)
77 + {
78 +- return 64 - __builtin_clzll(x);
79 ++ int lz;
80 ++
81 ++ if (__builtin_constant_p(x))
82 ++ return x ? 64 - __builtin_clzll(x) : 0;
83 ++ asm("cntlzd %0,%1" : "=r" (lz) : "r" (x));
84 ++ return 64 - lz;
85 + }
86 ++#else
87 ++#include <asm-generic/bitops/fls64.h>
88 ++#endif
89 +
90 + #ifdef CONFIG_PPC64
91 + unsigned int __arch_hweight8(unsigned int w);
92 +diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
93 +index 280e964e1aa88..497e86cfb12e0 100644
94 +--- a/arch/powerpc/sysdev/mpic_msgr.c
95 ++++ b/arch/powerpc/sysdev/mpic_msgr.c
96 +@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
97 +
98 + /* IO map the message register block. */
99 + of_address_to_resource(np, 0, &rsrc);
100 +- msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
101 ++ msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc));
102 + if (!msgr_block_addr) {
103 + dev_err(&dev->dev, "Failed to iomap MPIC message registers");
104 + return -EFAULT;
105 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
106 +index f24974bddfc96..ac389ffb1822b 100644
107 +--- a/arch/x86/entry/entry_64.S
108 ++++ b/arch/x86/entry/entry_64.S
109 +@@ -55,7 +55,7 @@ END(native_usergs_sysret64)
110 +
111 + .macro TRACE_IRQS_IRETQ
112 + #ifdef CONFIG_TRACE_IRQFLAGS
113 +- bt $9, EFLAGS(%rsp) /* interrupts off? */
114 ++ btl $9, EFLAGS(%rsp) /* interrupts off? */
115 + jnc 1f
116 + TRACE_IRQS_ON
117 + 1:
118 +diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
119 +index 01fcd715485c5..c56512297fb9e 100644
120 +--- a/drivers/iio/imu/bmi160/bmi160_core.c
121 ++++ b/drivers/iio/imu/bmi160/bmi160_core.c
122 +@@ -110,6 +110,13 @@ enum bmi160_sensor_type {
123 +
124 + struct bmi160_data {
125 + struct regmap *regmap;
126 ++ /*
127 ++ * Ensure natural alignment for timestamp if present.
128 ++ * Max length needed: 2 * 3 channels + 4 bytes padding + 8 byte ts.
129 ++ * If fewer channels are enabled, less space may be needed, as
130 ++ * long as the timestamp is still aligned to 8 bytes.
131 ++ */
132 ++ __le16 buf[12] __aligned(8);
133 + };
134 +
135 + const struct regmap_config bmi160_regmap_config = {
136 +@@ -385,8 +392,6 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
137 + struct iio_poll_func *pf = p;
138 + struct iio_dev *indio_dev = pf->indio_dev;
139 + struct bmi160_data *data = iio_priv(indio_dev);
140 +- __le16 buf[12];
141 +- /* 2 sens x 3 axis x __le16 + 2 x __le16 pad + 4 x __le16 tstamp */
142 + int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
143 + __le16 sample;
144 +
145 +@@ -396,10 +401,10 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
146 + &sample, sizeof(sample));
147 + if (ret < 0)
148 + goto done;
149 +- buf[j++] = sample;
150 ++ data->buf[j++] = sample;
151 + }
152 +
153 +- iio_push_to_buffers_with_timestamp(indio_dev, buf,
154 ++ iio_push_to_buffers_with_timestamp(indio_dev, data->buf,
155 + iio_get_time_ns(indio_dev));
156 + done:
157 + iio_trigger_notify_done(indio_dev->trig);
158 +diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
159 +index dad8d57f7402b..974e141c0dc04 100644
160 +--- a/drivers/iio/magnetometer/mag3110.c
161 ++++ b/drivers/iio/magnetometer/mag3110.c
162 +@@ -52,6 +52,12 @@ struct mag3110_data {
163 + struct i2c_client *client;
164 + struct mutex lock;
165 + u8 ctrl_reg1;
166 ++ /* Ensure natural alignment of timestamp */
167 ++ struct {
168 ++ __be16 channels[3];
169 ++ u8 temperature;
170 ++ s64 ts __aligned(8);
171 ++ } scan;
172 + };
173 +
174 + static int mag3110_request(struct mag3110_data *data)
175 +@@ -262,10 +268,9 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
176 + struct iio_poll_func *pf = p;
177 + struct iio_dev *indio_dev = pf->indio_dev;
178 + struct mag3110_data *data = iio_priv(indio_dev);
179 +- u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */
180 + int ret;
181 +
182 +- ret = mag3110_read(data, (__be16 *) buffer);
183 ++ ret = mag3110_read(data, data->scan.channels);
184 + if (ret < 0)
185 + goto done;
186 +
187 +@@ -274,10 +279,10 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
188 + MAG3110_DIE_TEMP);
189 + if (ret < 0)
190 + goto done;
191 +- buffer[6] = ret;
192 ++ data->scan.temperature = ret;
193 + }
194 +
195 +- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
196 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
197 + iio_get_time_ns(indio_dev));
198 +
199 + done:
200 +diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
201 +index e705799976c2c..2dae30713eb3d 100644
202 +--- a/drivers/md/dm-verity-target.c
203 ++++ b/drivers/md/dm-verity-target.c
204 +@@ -551,6 +551,15 @@ static int verity_verify_io(struct dm_verity_io *io)
205 + return 0;
206 + }
207 +
208 ++/*
209 ++ * Skip verity work in response to I/O error when system is shutting down.
210 ++ */
211 ++static inline bool verity_is_system_shutting_down(void)
212 ++{
213 ++ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
214 ++ || system_state == SYSTEM_RESTART;
215 ++}
216 ++
217 + /*
218 + * End one "io" structure with a given error.
219 + */
220 +@@ -578,7 +587,8 @@ static void verity_end_io(struct bio *bio)
221 + {
222 + struct dm_verity_io *io = bio->bi_private;
223 +
224 +- if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
225 ++ if (bio->bi_status &&
226 ++ (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
227 + verity_finish_io(io, bio->bi_status);
228 + return;
229 + }
230 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
231 +index d08d77b9674ff..419ecdd914f4c 100644
232 +--- a/drivers/md/raid10.c
233 ++++ b/drivers/md/raid10.c
234 +@@ -1120,7 +1120,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
235 + struct md_rdev *err_rdev = NULL;
236 + gfp_t gfp = GFP_NOIO;
237 +
238 +- if (r10_bio->devs[slot].rdev) {
239 ++ if (slot >= 0 && r10_bio->devs[slot].rdev) {
240 + /*
241 + * This is an error retry, but we cannot
242 + * safely dereference the rdev in the r10_bio,
243 +@@ -1513,6 +1513,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
244 + r10_bio->mddev = mddev;
245 + r10_bio->sector = bio->bi_iter.bi_sector;
246 + r10_bio->state = 0;
247 ++ r10_bio->read_slot = -1;
248 + memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
249 +
250 + if (bio_data_dir(bio) == READ)
251 +diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
252 +index 37f062225ed21..aac677f6aaa4f 100644
253 +--- a/drivers/media/usb/dvb-usb/gp8psk.c
254 ++++ b/drivers/media/usb/dvb-usb/gp8psk.c
255 +@@ -185,7 +185,7 @@ out_rel_fw:
256 +
257 + static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
258 + {
259 +- u8 status, buf;
260 ++ u8 status = 0, buf;
261 + int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
262 +
263 + if (onoff) {
264 +diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
265 +index bc089e634a751..26e20b091160a 100644
266 +--- a/drivers/misc/vmw_vmci/vmci_context.c
267 ++++ b/drivers/misc/vmw_vmci/vmci_context.c
268 +@@ -751,7 +751,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
269 + return VMCI_ERROR_MORE_DATA;
270 + }
271 +
272 +- dbells = kmalloc(data_size, GFP_ATOMIC);
273 ++ dbells = kzalloc(data_size, GFP_ATOMIC);
274 + if (!dbells)
275 + return VMCI_ERROR_NO_MEM;
276 +
277 +diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
278 +index d87aeff70cefb..c2cb1e711c06e 100644
279 +--- a/drivers/net/wireless/marvell/mwifiex/join.c
280 ++++ b/drivers/net/wireless/marvell/mwifiex/join.c
281 +@@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
282 +
283 + memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);
284 +
285 ++ if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN)
286 ++ req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN;
287 + memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
288 +
289 + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",
290 +diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
291 +index 8eb2b6dd36fea..1d0d9c8d0085d 100644
292 +--- a/drivers/rtc/rtc-sun6i.c
293 ++++ b/drivers/rtc/rtc-sun6i.c
294 +@@ -230,7 +230,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
295 + 300000000);
296 + if (IS_ERR(rtc->int_osc)) {
297 + pr_crit("Couldn't register the internal oscillator\n");
298 +- return;
299 ++ goto err;
300 + }
301 +
302 + parents[0] = clk_hw_get_name(rtc->int_osc);
303 +@@ -246,7 +246,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
304 + rtc->losc = clk_register(NULL, &rtc->hw);
305 + if (IS_ERR(rtc->losc)) {
306 + pr_crit("Couldn't register the LOSC clock\n");
307 +- return;
308 ++ goto err_register;
309 + }
310 +
311 + of_property_read_string_index(node, "clock-output-names", 1,
312 +@@ -257,7 +257,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
313 + &rtc->lock);
314 + if (IS_ERR(rtc->ext_losc)) {
315 + pr_crit("Couldn't register the LOSC external gate\n");
316 +- return;
317 ++ goto err_register;
318 + }
319 +
320 + clk_data->num = 2;
321 +@@ -266,6 +266,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
322 + of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
323 + return;
324 +
325 ++err_register:
326 ++ clk_hw_unregister_fixed_rate(rtc->int_osc);
327 + err:
328 + kfree(clk_data);
329 + }
330 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
331 +index 487b16ace0060..0f70cae1c01e3 100644
332 +--- a/drivers/s390/block/dasd_alias.c
333 ++++ b/drivers/s390/block/dasd_alias.c
334 +@@ -462,11 +462,19 @@ static int read_unit_address_configuration(struct dasd_device *device,
335 + spin_unlock_irqrestore(&lcu->lock, flags);
336 +
337 + rc = dasd_sleep_on(cqr);
338 +- if (rc && !suborder_not_supported(cqr)) {
339 ++ if (!rc)
340 ++ goto out;
341 ++
342 ++ if (suborder_not_supported(cqr)) {
343 ++ /* suborder not supported or device unusable for IO */
344 ++ rc = -EOPNOTSUPP;
345 ++ } else {
346 ++ /* IO failed but should be retried */
347 + spin_lock_irqsave(&lcu->lock, flags);
348 + lcu->flags |= NEED_UAC_UPDATE;
349 + spin_unlock_irqrestore(&lcu->lock, flags);
350 + }
351 ++out:
352 + dasd_kfree_request(cqr, cqr->memdev);
353 + return rc;
354 + }
355 +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
356 +index 2ce39af32cfa6..e494ffdc06bc9 100644
357 +--- a/drivers/usb/serial/digi_acceleport.c
358 ++++ b/drivers/usb/serial/digi_acceleport.c
359 +@@ -23,7 +23,6 @@
360 + #include <linux/tty_flip.h>
361 + #include <linux/module.h>
362 + #include <linux/spinlock.h>
363 +-#include <linux/workqueue.h>
364 + #include <linux/uaccess.h>
365 + #include <linux/usb.h>
366 + #include <linux/wait.h>
367 +@@ -202,14 +201,12 @@ struct digi_port {
368 + int dp_throttle_restart;
369 + wait_queue_head_t dp_flush_wait;
370 + wait_queue_head_t dp_close_wait; /* wait queue for close */
371 +- struct work_struct dp_wakeup_work;
372 + struct usb_serial_port *dp_port;
373 + };
374 +
375 +
376 + /* Local Function Declarations */
377 +
378 +-static void digi_wakeup_write_lock(struct work_struct *work);
379 + static int digi_write_oob_command(struct usb_serial_port *port,
380 + unsigned char *buf, int count, int interruptible);
381 + static int digi_write_inb_command(struct usb_serial_port *port,
382 +@@ -360,26 +357,6 @@ __releases(lock)
383 + return timeout;
384 + }
385 +
386 +-
387 +-/*
388 +- * Digi Wakeup Write
389 +- *
390 +- * Wake up port, line discipline, and tty processes sleeping
391 +- * on writes.
392 +- */
393 +-
394 +-static void digi_wakeup_write_lock(struct work_struct *work)
395 +-{
396 +- struct digi_port *priv =
397 +- container_of(work, struct digi_port, dp_wakeup_work);
398 +- struct usb_serial_port *port = priv->dp_port;
399 +- unsigned long flags;
400 +-
401 +- spin_lock_irqsave(&priv->dp_port_lock, flags);
402 +- tty_port_tty_wakeup(&port->port);
403 +- spin_unlock_irqrestore(&priv->dp_port_lock, flags);
404 +-}
405 +-
406 + /*
407 + * Digi Write OOB Command
408 + *
409 +@@ -990,6 +967,7 @@ static void digi_write_bulk_callback(struct urb *urb)
410 + struct digi_serial *serial_priv;
411 + int ret = 0;
412 + int status = urb->status;
413 ++ bool wakeup;
414 +
415 + /* port and serial sanity check */
416 + if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
417 +@@ -1016,6 +994,7 @@ static void digi_write_bulk_callback(struct urb *urb)
418 + }
419 +
420 + /* try to send any buffered data on this port */
421 ++ wakeup = true;
422 + spin_lock(&priv->dp_port_lock);
423 + priv->dp_write_urb_in_use = 0;
424 + if (priv->dp_out_buf_len > 0) {
425 +@@ -1031,19 +1010,18 @@ static void digi_write_bulk_callback(struct urb *urb)
426 + if (ret == 0) {
427 + priv->dp_write_urb_in_use = 1;
428 + priv->dp_out_buf_len = 0;
429 ++ wakeup = false;
430 + }
431 + }
432 +- /* wake up processes sleeping on writes immediately */
433 +- tty_port_tty_wakeup(&port->port);
434 +- /* also queue up a wakeup at scheduler time, in case we */
435 +- /* lost the race in write_chan(). */
436 +- schedule_work(&priv->dp_wakeup_work);
437 +-
438 + spin_unlock(&priv->dp_port_lock);
439 ++
440 + if (ret && ret != -EPERM)
441 + dev_err_console(port,
442 + "%s: usb_submit_urb failed, ret=%d, port=%d\n",
443 + __func__, ret, priv->dp_port_num);
444 ++
445 ++ if (wakeup)
446 ++ tty_port_tty_wakeup(&port->port);
447 + }
448 +
449 + static int digi_write_room(struct tty_struct *tty)
450 +@@ -1243,7 +1221,6 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
451 + init_waitqueue_head(&priv->dp_transmit_idle_wait);
452 + init_waitqueue_head(&priv->dp_flush_wait);
453 + init_waitqueue_head(&priv->dp_close_wait);
454 +- INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
455 + priv->dp_port = port;
456 +
457 + init_waitqueue_head(&port->write_wait);
458 +@@ -1510,13 +1487,14 @@ static int digi_read_oob_callback(struct urb *urb)
459 + rts = C_CRTSCTS(tty);
460 +
461 + if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
462 ++ bool wakeup = false;
463 ++
464 + spin_lock(&priv->dp_port_lock);
465 + /* convert from digi flags to termiox flags */
466 + if (val & DIGI_READ_INPUT_SIGNALS_CTS) {
467 + priv->dp_modem_signals |= TIOCM_CTS;
468 +- /* port must be open to use tty struct */
469 + if (rts)
470 +- tty_port_tty_wakeup(&port->port);
471 ++ wakeup = true;
472 + } else {
473 + priv->dp_modem_signals &= ~TIOCM_CTS;
474 + /* port must be open to use tty struct */
475 +@@ -1535,6 +1513,9 @@ static int digi_read_oob_callback(struct urb *urb)
476 + priv->dp_modem_signals &= ~TIOCM_CD;
477 +
478 + spin_unlock(&priv->dp_port_lock);
479 ++
480 ++ if (wakeup)
481 ++ tty_port_tty_wakeup(&port->port);
482 + } else if (opcode == DIGI_CMD_TRANSMIT_IDLE) {
483 + spin_lock(&priv->dp_port_lock);
484 + priv->dp_transmit_idle = 1;
485 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
486 +index 6fceefcab81db..dedc7edea5178 100644
487 +--- a/drivers/vfio/pci/vfio_pci.c
488 ++++ b/drivers/vfio/pci/vfio_pci.c
489 +@@ -118,8 +118,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
490 + int bar;
491 + struct vfio_pci_dummy_resource *dummy_res;
492 +
493 +- INIT_LIST_HEAD(&vdev->dummy_resources_list);
494 +-
495 + for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
496 + res = vdev->pdev->resource + bar;
497 +
498 +@@ -1524,6 +1522,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
499 + vdev->irq_type = VFIO_PCI_NUM_IRQS;
500 + mutex_init(&vdev->igate);
501 + spin_lock_init(&vdev->irqlock);
502 ++ INIT_LIST_HEAD(&vdev->dummy_resources_list);
503 + mutex_init(&vdev->vma_lock);
504 + INIT_LIST_HEAD(&vdev->vma_list);
505 + init_rwsem(&vdev->memory_lock);
506 +diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
507 +index bb3f59bcfcf5b..656f9ff63edda 100644
508 +--- a/fs/quota/quota_tree.c
509 ++++ b/fs/quota/quota_tree.c
510 +@@ -61,7 +61,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
511 +
512 + memset(buf, 0, info->dqi_usable_bs);
513 + return sb->s_op->quota_read(sb, info->dqi_type, buf,
514 +- info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
515 ++ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
516 + }
517 +
518 + static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
519 +@@ -70,7 +70,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
520 + ssize_t ret;
521 +
522 + ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
523 +- info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
524 ++ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
525 + if (ret != info->dqi_usable_bs) {
526 + quota_error(sb, "dquota write failed");
527 + if (ret >= 0)
528 +@@ -283,7 +283,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
529 + blk);
530 + goto out_buf;
531 + }
532 +- dquot->dq_off = (blk << info->dqi_blocksize_bits) +
533 ++ dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
534 + sizeof(struct qt_disk_dqdbheader) +
535 + i * info->dqi_entry_size;
536 + kfree(buf);
537 +@@ -558,7 +558,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
538 + ret = -EIO;
539 + goto out_buf;
540 + } else {
541 +- ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
542 ++ ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
543 + qt_disk_dqdbheader) + i * info->dqi_entry_size;
544 + }
545 + out_buf:
546 +diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
547 +index 2946713cb00d6..5229038852ca1 100644
548 +--- a/fs/reiserfs/stree.c
549 ++++ b/fs/reiserfs/stree.c
550 +@@ -454,6 +454,12 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
551 + "(second one): %h", ih);
552 + return 0;
553 + }
554 ++ if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
555 ++ reiserfs_warning(NULL, "reiserfs-5093",
556 ++ "item entry count seems wrong %h",
557 ++ ih);
558 ++ return 0;
559 ++ }
560 + prev_location = ih_location(ih);
561 + }
562 +
563 +diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
564 +index 85b5151911cfd..4856706fbfeb4 100644
565 +--- a/include/linux/kdev_t.h
566 ++++ b/include/linux/kdev_t.h
567 +@@ -21,61 +21,61 @@
568 + })
569 +
570 + /* acceptable for old filesystems */
571 +-static inline bool old_valid_dev(dev_t dev)
572 ++static __always_inline bool old_valid_dev(dev_t dev)
573 + {
574 + return MAJOR(dev) < 256 && MINOR(dev) < 256;
575 + }
576 +
577 +-static inline u16 old_encode_dev(dev_t dev)
578 ++static __always_inline u16 old_encode_dev(dev_t dev)
579 + {
580 + return (MAJOR(dev) << 8) | MINOR(dev);
581 + }
582 +
583 +-static inline dev_t old_decode_dev(u16 val)
584 ++static __always_inline dev_t old_decode_dev(u16 val)
585 + {
586 + return MKDEV((val >> 8) & 255, val & 255);
587 + }
588 +
589 +-static inline u32 new_encode_dev(dev_t dev)
590 ++static __always_inline u32 new_encode_dev(dev_t dev)
591 + {
592 + unsigned major = MAJOR(dev);
593 + unsigned minor = MINOR(dev);
594 + return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
595 + }
596 +
597 +-static inline dev_t new_decode_dev(u32 dev)
598 ++static __always_inline dev_t new_decode_dev(u32 dev)
599 + {
600 + unsigned major = (dev & 0xfff00) >> 8;
601 + unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
602 + return MKDEV(major, minor);
603 + }
604 +
605 +-static inline u64 huge_encode_dev(dev_t dev)
606 ++static __always_inline u64 huge_encode_dev(dev_t dev)
607 + {
608 + return new_encode_dev(dev);
609 + }
610 +
611 +-static inline dev_t huge_decode_dev(u64 dev)
612 ++static __always_inline dev_t huge_decode_dev(u64 dev)
613 + {
614 + return new_decode_dev(dev);
615 + }
616 +
617 +-static inline int sysv_valid_dev(dev_t dev)
618 ++static __always_inline int sysv_valid_dev(dev_t dev)
619 + {
620 + return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
621 + }
622 +
623 +-static inline u32 sysv_encode_dev(dev_t dev)
624 ++static __always_inline u32 sysv_encode_dev(dev_t dev)
625 + {
626 + return MINOR(dev) | (MAJOR(dev) << 18);
627 + }
628 +
629 +-static inline unsigned sysv_major(u32 dev)
630 ++static __always_inline unsigned sysv_major(u32 dev)
631 + {
632 + return (dev >> 18) & 0x3fff;
633 + }
634 +
635 +-static inline unsigned sysv_minor(u32 dev)
636 ++static __always_inline unsigned sysv_minor(u32 dev)
637 + {
638 + return dev & 0x3ffff;
639 + }
640 +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
641 +index 69966c461d1c1..8820468635810 100644
642 +--- a/include/linux/memcontrol.h
643 ++++ b/include/linux/memcontrol.h
644 +@@ -108,7 +108,10 @@ struct lruvec_stat {
645 + */
646 + struct mem_cgroup_per_node {
647 + struct lruvec lruvec;
648 +- struct lruvec_stat __percpu *lruvec_stat;
649 ++
650 ++ struct lruvec_stat __percpu *lruvec_stat_cpu;
651 ++ atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
652 ++
653 + unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
654 +
655 + struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
656 +@@ -227,10 +230,10 @@ struct mem_cgroup {
657 + spinlock_t move_lock;
658 + struct task_struct *move_lock_task;
659 + unsigned long move_lock_flags;
660 +- /*
661 +- * percpu counter.
662 +- */
663 +- struct mem_cgroup_stat_cpu __percpu *stat;
664 ++
665 ++ struct mem_cgroup_stat_cpu __percpu *stat_cpu;
666 ++ atomic_long_t stat[MEMCG_NR_STAT];
667 ++ atomic_long_t events[MEMCG_NR_EVENTS];
668 +
669 + unsigned long socket_pressure;
670 +
671 +@@ -265,6 +268,12 @@ struct mem_cgroup {
672 + /* WARNING: nodeinfo must be the last member here */
673 + };
674 +
675 ++/*
676 ++ * size of first charge trial. "32" comes from vmscan.c's magic value.
677 ++ * TODO: maybe necessary to use big numbers in big irons.
678 ++ */
679 ++#define MEMCG_CHARGE_BATCH 32U
680 ++
681 + extern struct mem_cgroup *root_mem_cgroup;
682 +
683 + static inline bool mem_cgroup_disabled(void)
684 +@@ -272,13 +281,6 @@ static inline bool mem_cgroup_disabled(void)
685 + return !cgroup_subsys_enabled(memory_cgrp_subsys);
686 + }
687 +
688 +-static inline void mem_cgroup_event(struct mem_cgroup *memcg,
689 +- enum memcg_event_item event)
690 +-{
691 +- this_cpu_inc(memcg->stat->events[event]);
692 +- cgroup_file_notify(&memcg->events_file);
693 +-}
694 +-
695 + bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
696 +
697 + int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
698 +@@ -492,32 +494,38 @@ void unlock_page_memcg(struct page *page);
699 + static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
700 + int idx)
701 + {
702 +- long val = 0;
703 +- int cpu;
704 +-
705 +- for_each_possible_cpu(cpu)
706 +- val += per_cpu(memcg->stat->count[idx], cpu);
707 +-
708 +- if (val < 0)
709 +- val = 0;
710 +-
711 +- return val;
712 ++ long x = atomic_long_read(&memcg->stat[idx]);
713 ++#ifdef CONFIG_SMP
714 ++ if (x < 0)
715 ++ x = 0;
716 ++#endif
717 ++ return x;
718 + }
719 +
720 + /* idx can be of type enum memcg_stat_item or node_stat_item */
721 + static inline void __mod_memcg_state(struct mem_cgroup *memcg,
722 + int idx, int val)
723 + {
724 +- if (!mem_cgroup_disabled())
725 +- __this_cpu_add(memcg->stat->count[idx], val);
726 ++ long x;
727 ++
728 ++ if (mem_cgroup_disabled())
729 ++ return;
730 ++
731 ++ x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
732 ++ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
733 ++ atomic_long_add(x, &memcg->stat[idx]);
734 ++ x = 0;
735 ++ }
736 ++ __this_cpu_write(memcg->stat_cpu->count[idx], x);
737 + }
738 +
739 + /* idx can be of type enum memcg_stat_item or node_stat_item */
740 + static inline void mod_memcg_state(struct mem_cgroup *memcg,
741 + int idx, int val)
742 + {
743 +- if (!mem_cgroup_disabled())
744 +- this_cpu_add(memcg->stat->count[idx], val);
745 ++ preempt_disable();
746 ++ __mod_memcg_state(memcg, idx, val);
747 ++ preempt_enable();
748 + }
749 +
750 + /**
751 +@@ -555,87 +563,108 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
752 + enum node_stat_item idx)
753 + {
754 + struct mem_cgroup_per_node *pn;
755 +- long val = 0;
756 +- int cpu;
757 ++ long x;
758 +
759 + if (mem_cgroup_disabled())
760 + return node_page_state(lruvec_pgdat(lruvec), idx);
761 +
762 + pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
763 +- for_each_possible_cpu(cpu)
764 +- val += per_cpu(pn->lruvec_stat->count[idx], cpu);
765 +-
766 +- if (val < 0)
767 +- val = 0;
768 +-
769 +- return val;
770 ++ x = atomic_long_read(&pn->lruvec_stat[idx]);
771 ++#ifdef CONFIG_SMP
772 ++ if (x < 0)
773 ++ x = 0;
774 ++#endif
775 ++ return x;
776 + }
777 +
778 + static inline void __mod_lruvec_state(struct lruvec *lruvec,
779 + enum node_stat_item idx, int val)
780 + {
781 + struct mem_cgroup_per_node *pn;
782 ++ long x;
783 +
784 ++ /* Update node */
785 + __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
786 ++
787 + if (mem_cgroup_disabled())
788 + return;
789 ++
790 + pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
791 ++
792 ++ /* Update memcg */
793 + __mod_memcg_state(pn->memcg, idx, val);
794 +- __this_cpu_add(pn->lruvec_stat->count[idx], val);
795 ++
796 ++ /* Update lruvec */
797 ++ x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
798 ++ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
799 ++ atomic_long_add(x, &pn->lruvec_stat[idx]);
800 ++ x = 0;
801 ++ }
802 ++ __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
803 + }
804 +
805 + static inline void mod_lruvec_state(struct lruvec *lruvec,
806 + enum node_stat_item idx, int val)
807 + {
808 +- struct mem_cgroup_per_node *pn;
809 +-
810 +- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
811 +- if (mem_cgroup_disabled())
812 +- return;
813 +- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
814 +- mod_memcg_state(pn->memcg, idx, val);
815 +- this_cpu_add(pn->lruvec_stat->count[idx], val);
816 ++ preempt_disable();
817 ++ __mod_lruvec_state(lruvec, idx, val);
818 ++ preempt_enable();
819 + }
820 +
821 + static inline void __mod_lruvec_page_state(struct page *page,
822 + enum node_stat_item idx, int val)
823 + {
824 +- struct mem_cgroup_per_node *pn;
825 ++ pg_data_t *pgdat = page_pgdat(page);
826 ++ struct lruvec *lruvec;
827 +
828 +- __mod_node_page_state(page_pgdat(page), idx, val);
829 +- if (mem_cgroup_disabled() || !page->mem_cgroup)
830 ++ /* Untracked pages have no memcg, no lruvec. Update only the node */
831 ++ if (!page->mem_cgroup) {
832 ++ __mod_node_page_state(pgdat, idx, val);
833 + return;
834 +- __mod_memcg_state(page->mem_cgroup, idx, val);
835 +- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
836 +- __this_cpu_add(pn->lruvec_stat->count[idx], val);
837 ++ }
838 ++
839 ++ lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
840 ++ __mod_lruvec_state(lruvec, idx, val);
841 + }
842 +
843 + static inline void mod_lruvec_page_state(struct page *page,
844 + enum node_stat_item idx, int val)
845 + {
846 +- struct mem_cgroup_per_node *pn;
847 +-
848 +- mod_node_page_state(page_pgdat(page), idx, val);
849 +- if (mem_cgroup_disabled() || !page->mem_cgroup)
850 +- return;
851 +- mod_memcg_state(page->mem_cgroup, idx, val);
852 +- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
853 +- this_cpu_add(pn->lruvec_stat->count[idx], val);
854 ++ preempt_disable();
855 ++ __mod_lruvec_page_state(page, idx, val);
856 ++ preempt_enable();
857 + }
858 +
859 + unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
860 + gfp_t gfp_mask,
861 + unsigned long *total_scanned);
862 +
863 ++/* idx can be of type enum memcg_event_item or vm_event_item */
864 ++static inline void __count_memcg_events(struct mem_cgroup *memcg,
865 ++ int idx, unsigned long count)
866 ++{
867 ++ unsigned long x;
868 ++
869 ++ if (mem_cgroup_disabled())
870 ++ return;
871 ++
872 ++ x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
873 ++ if (unlikely(x > MEMCG_CHARGE_BATCH)) {
874 ++ atomic_long_add(x, &memcg->events[idx]);
875 ++ x = 0;
876 ++ }
877 ++ __this_cpu_write(memcg->stat_cpu->events[idx], x);
878 ++}
879 ++
880 + static inline void count_memcg_events(struct mem_cgroup *memcg,
881 +- enum vm_event_item idx,
882 +- unsigned long count)
883 ++ int idx, unsigned long count)
884 + {
885 +- if (!mem_cgroup_disabled())
886 +- this_cpu_add(memcg->stat->events[idx], count);
887 ++ preempt_disable();
888 ++ __count_memcg_events(memcg, idx, count);
889 ++ preempt_enable();
890 + }
891 +
892 +-/* idx can be of type enum memcg_stat_item or node_stat_item */
893 ++/* idx can be of type enum memcg_event_item or vm_event_item */
894 + static inline void count_memcg_page_event(struct page *page,
895 + int idx)
896 + {
897 +@@ -654,12 +683,20 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
898 + rcu_read_lock();
899 + memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
900 + if (likely(memcg)) {
901 +- this_cpu_inc(memcg->stat->events[idx]);
902 ++ count_memcg_events(memcg, idx, 1);
903 + if (idx == OOM_KILL)
904 + cgroup_file_notify(&memcg->events_file);
905 + }
906 + rcu_read_unlock();
907 + }
908 ++
909 ++static inline void mem_cgroup_event(struct mem_cgroup *memcg,
910 ++ enum memcg_event_item event)
911 ++{
912 ++ count_memcg_events(memcg, event, 1);
913 ++ cgroup_file_notify(&memcg->events_file);
914 ++}
915 ++
916 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
917 + void mem_cgroup_split_huge_fixup(struct page *head);
918 + #endif
919 +diff --git a/include/linux/of.h b/include/linux/of.h
920 +index 3c108f9be5e7c..af10856159226 100644
921 +--- a/include/linux/of.h
922 ++++ b/include/linux/of.h
923 +@@ -1163,6 +1163,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
924 + #define _OF_DECLARE(table, name, compat, fn, fn_type) \
925 + static const struct of_device_id __of_table_##name \
926 + __used __section(__##table##_of_table) \
927 ++ __aligned(__alignof__(struct of_device_id)) \
928 + = { .compatible = compat, \
929 + .data = (fn == (fn_type)NULL) ? fn : fn }
930 + #else
931 +diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
932 +index 92537757590aa..dab9f34383e5b 100644
933 +--- a/include/uapi/linux/const.h
934 ++++ b/include/uapi/linux/const.h
935 +@@ -25,4 +25,9 @@
936 + #define _BITUL(x) (_AC(1,UL) << (x))
937 + #define _BITULL(x) (_AC(1,ULL) << (x))
938 +
939 ++#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
940 ++#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
941 ++
942 ++#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
943 ++
944 + #endif /* !(_LINUX_CONST_H) */
945 +diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
946 +index 9eae13eefc49e..1e3f1a43bf1d9 100644
947 +--- a/include/uapi/linux/ethtool.h
948 ++++ b/include/uapi/linux/ethtool.h
949 +@@ -14,7 +14,7 @@
950 + #ifndef _UAPI_LINUX_ETHTOOL_H
951 + #define _UAPI_LINUX_ETHTOOL_H
952 +
953 +-#include <linux/kernel.h>
954 ++#include <linux/const.h>
955 + #include <linux/types.h>
956 + #include <linux/if_ether.h>
957 +
958 +diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h
959 +index 0ff8f7477847c..fadf2db71fe8a 100644
960 +--- a/include/uapi/linux/kernel.h
961 ++++ b/include/uapi/linux/kernel.h
962 +@@ -3,13 +3,6 @@
963 + #define _UAPI_LINUX_KERNEL_H
964 +
965 + #include <linux/sysinfo.h>
966 +-
967 +-/*
968 +- * 'kernel.h' contains some often-used function prototypes etc
969 +- */
970 +-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
971 +-#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
972 +-
973 +-#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
974 ++#include <linux/const.h>
975 +
976 + #endif /* _UAPI_LINUX_KERNEL_H */
977 +diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
978 +index 42d1a434af296..0d44ebba00932 100644
979 +--- a/include/uapi/linux/lightnvm.h
980 ++++ b/include/uapi/linux/lightnvm.h
981 +@@ -21,7 +21,7 @@
982 + #define _UAPI_LINUX_LIGHTNVM_H
983 +
984 + #ifdef __KERNEL__
985 +-#include <linux/kernel.h>
986 ++#include <linux/const.h>
987 + #include <linux/ioctl.h>
988 + #else /* __KERNEL__ */
989 + #include <stdio.h>
990 +diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
991 +index 9999cc006390d..1617eb9949a5d 100644
992 +--- a/include/uapi/linux/mroute6.h
993 ++++ b/include/uapi/linux/mroute6.h
994 +@@ -2,7 +2,7 @@
995 + #ifndef _UAPI__LINUX_MROUTE6_H
996 + #define _UAPI__LINUX_MROUTE6_H
997 +
998 +-#include <linux/kernel.h>
999 ++#include <linux/const.h>
1000 + #include <linux/types.h>
1001 + #include <linux/sockios.h>
1002 + #include <linux/in6.h> /* For struct sockaddr_in6. */
1003 +diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h
1004 +index a8283f7dbc519..b8c6bb233ac1c 100644
1005 +--- a/include/uapi/linux/netfilter/x_tables.h
1006 ++++ b/include/uapi/linux/netfilter/x_tables.h
1007 +@@ -1,7 +1,7 @@
1008 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1009 + #ifndef _UAPI_X_TABLES_H
1010 + #define _UAPI_X_TABLES_H
1011 +-#include <linux/kernel.h>
1012 ++#include <linux/const.h>
1013 + #include <linux/types.h>
1014 +
1015 + #define XT_FUNCTION_MAXNAMELEN 30
1016 +diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
1017 +index 776bc92e91180..3481cde43a841 100644
1018 +--- a/include/uapi/linux/netlink.h
1019 ++++ b/include/uapi/linux/netlink.h
1020 +@@ -2,7 +2,7 @@
1021 + #ifndef _UAPI__LINUX_NETLINK_H
1022 + #define _UAPI__LINUX_NETLINK_H
1023 +
1024 +-#include <linux/kernel.h>
1025 ++#include <linux/const.h>
1026 + #include <linux/socket.h> /* for __kernel_sa_family_t */
1027 + #include <linux/types.h>
1028 +
1029 +diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
1030 +index 0f272818a4d27..5fc0b7fd08470 100644
1031 +--- a/include/uapi/linux/sysctl.h
1032 ++++ b/include/uapi/linux/sysctl.h
1033 +@@ -23,7 +23,7 @@
1034 + #ifndef _UAPI_LINUX_SYSCTL_H
1035 + #define _UAPI_LINUX_SYSCTL_H
1036 +
1037 +-#include <linux/kernel.h>
1038 ++#include <linux/const.h>
1039 + #include <linux/types.h>
1040 + #include <linux/compiler.h>
1041 +
1042 +diff --git a/kernel/module.c b/kernel/module.c
1043 +index 2806c9b6577c1..0b2654592d3a7 100644
1044 +--- a/kernel/module.c
1045 ++++ b/kernel/module.c
1046 +@@ -1789,7 +1789,6 @@ static int mod_sysfs_init(struct module *mod)
1047 + if (err)
1048 + mod_kobject_put(mod);
1049 +
1050 +- /* delay uevent until full sysfs population */
1051 + out:
1052 + return err;
1053 + }
1054 +@@ -1826,7 +1825,6 @@ static int mod_sysfs_setup(struct module *mod,
1055 + add_sect_attrs(mod, info);
1056 + add_notes_attrs(mod, info);
1057 +
1058 +- kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1059 + return 0;
1060 +
1061 + out_unreg_modinfo_attrs:
1062 +@@ -3481,6 +3479,9 @@ static noinline int do_init_module(struct module *mod)
1063 + blocking_notifier_call_chain(&module_notify_list,
1064 + MODULE_STATE_LIVE, mod);
1065 +
1066 ++ /* Delay uevent until module has finished its init routine */
1067 ++ kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1068 ++
1069 + /*
1070 + * We need to finish all async code before the module init sequence
1071 + * is done. This has potential to deadlock. For example, a newly
1072 +@@ -3801,6 +3802,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
1073 + MODULE_STATE_GOING, mod);
1074 + klp_module_going(mod);
1075 + bug_cleanup:
1076 ++ mod->state = MODULE_STATE_GOING;
1077 + /* module_bug_cleanup needs module_mutex protection */
1078 + mutex_lock(&module_mutex);
1079 + module_bug_cleanup(mod);
1080 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1081 +index 70707d44a6903..4e763cdccb335 100644
1082 +--- a/mm/memcontrol.c
1083 ++++ b/mm/memcontrol.c
1084 +@@ -542,39 +542,10 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
1085 + return mz;
1086 + }
1087 +
1088 +-/*
1089 +- * Return page count for single (non recursive) @memcg.
1090 +- *
1091 +- * Implementation Note: reading percpu statistics for memcg.
1092 +- *
1093 +- * Both of vmstat[] and percpu_counter has threshold and do periodic
1094 +- * synchronization to implement "quick" read. There are trade-off between
1095 +- * reading cost and precision of value. Then, we may have a chance to implement
1096 +- * a periodic synchronization of counter in memcg's counter.
1097 +- *
1098 +- * But this _read() function is used for user interface now. The user accounts
1099 +- * memory usage by memory cgroup and he _always_ requires exact value because
1100 +- * he accounts memory. Even if we provide quick-and-fuzzy read, we always
1101 +- * have to visit all online cpus and make sum. So, for now, unnecessary
1102 +- * synchronization is not implemented. (just implemented for cpu hotplug)
1103 +- *
1104 +- * If there are kernel internal actions which can make use of some not-exact
1105 +- * value, and reading all cpu value can be performance bottleneck in some
1106 +- * common workload, threshold and synchronization as vmstat[] should be
1107 +- * implemented.
1108 +- *
1109 +- * The parameter idx can be of type enum memcg_event_item or vm_event_item.
1110 +- */
1111 +-
1112 + static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
1113 + int event)
1114 + {
1115 +- unsigned long val = 0;
1116 +- int cpu;
1117 +-
1118 +- for_each_possible_cpu(cpu)
1119 +- val += per_cpu(memcg->stat->events[event], cpu);
1120 +- return val;
1121 ++ return atomic_long_read(&memcg->events[event]);
1122 + }
1123 +
1124 + static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
1125 +@@ -586,27 +557,27 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
1126 + * counted as CACHE even if it's on ANON LRU.
1127 + */
1128 + if (PageAnon(page))
1129 +- __this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);
1130 ++ __mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
1131 + else {
1132 +- __this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);
1133 ++ __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
1134 + if (PageSwapBacked(page))
1135 +- __this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);
1136 ++ __mod_memcg_state(memcg, NR_SHMEM, nr_pages);
1137 + }
1138 +
1139 + if (compound) {
1140 + VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1141 +- __this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);
1142 ++ __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
1143 + }
1144 +
1145 + /* pagein of a big page is an event. So, ignore page size */
1146 + if (nr_pages > 0)
1147 +- __this_cpu_inc(memcg->stat->events[PGPGIN]);
1148 ++ __count_memcg_events(memcg, PGPGIN, 1);
1149 + else {
1150 +- __this_cpu_inc(memcg->stat->events[PGPGOUT]);
1151 ++ __count_memcg_events(memcg, PGPGOUT, 1);
1152 + nr_pages = -nr_pages; /* for event */
1153 + }
1154 +
1155 +- __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
1156 ++ __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
1157 + }
1158 +
1159 + unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
1160 +@@ -642,8 +613,8 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1161 + {
1162 + unsigned long val, next;
1163 +
1164 +- val = __this_cpu_read(memcg->stat->nr_page_events);
1165 +- next = __this_cpu_read(memcg->stat->targets[target]);
1166 ++ val = __this_cpu_read(memcg->stat_cpu->nr_page_events);
1167 ++ next = __this_cpu_read(memcg->stat_cpu->targets[target]);
1168 + /* from time_after() in jiffies.h */
1169 + if ((long)(next - val) < 0) {
1170 + switch (target) {
1171 +@@ -659,7 +630,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1172 + default:
1173 + break;
1174 + }
1175 +- __this_cpu_write(memcg->stat->targets[target], next);
1176 ++ __this_cpu_write(memcg->stat_cpu->targets[target], next);
1177 + return true;
1178 + }
1179 + return false;
1180 +@@ -1726,11 +1697,6 @@ void unlock_page_memcg(struct page *page)
1181 + }
1182 + EXPORT_SYMBOL(unlock_page_memcg);
1183 +
1184 +-/*
1185 +- * size of first charge trial. "32" comes from vmscan.c's magic value.
1186 +- * TODO: maybe necessary to use big numbers in big irons.
1187 +- */
1188 +-#define CHARGE_BATCH 32U
1189 + struct memcg_stock_pcp {
1190 + struct mem_cgroup *cached; /* this never be root cgroup */
1191 + unsigned int nr_pages;
1192 +@@ -1758,7 +1724,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1193 + unsigned long flags;
1194 + bool ret = false;
1195 +
1196 +- if (nr_pages > CHARGE_BATCH)
1197 ++ if (nr_pages > MEMCG_CHARGE_BATCH)
1198 + return ret;
1199 +
1200 + local_irq_save(flags);
1201 +@@ -1827,7 +1793,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1202 + }
1203 + stock->nr_pages += nr_pages;
1204 +
1205 +- if (stock->nr_pages > CHARGE_BATCH)
1206 ++ if (stock->nr_pages > MEMCG_CHARGE_BATCH)
1207 + drain_stock(stock);
1208 +
1209 + local_irq_restore(flags);
1210 +@@ -1877,9 +1843,44 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
1211 + static int memcg_hotplug_cpu_dead(unsigned int cpu)
1212 + {
1213 + struct memcg_stock_pcp *stock;
1214 ++ struct mem_cgroup *memcg;
1215 +
1216 + stock = &per_cpu(memcg_stock, cpu);
1217 + drain_stock(stock);
1218 ++
1219 ++ for_each_mem_cgroup(memcg) {
1220 ++ int i;
1221 ++
1222 ++ for (i = 0; i < MEMCG_NR_STAT; i++) {
1223 ++ int nid;
1224 ++ long x;
1225 ++
1226 ++ x = this_cpu_xchg(memcg->stat_cpu->count[i], 0);
1227 ++ if (x)
1228 ++ atomic_long_add(x, &memcg->stat[i]);
1229 ++
1230 ++ if (i >= NR_VM_NODE_STAT_ITEMS)
1231 ++ continue;
1232 ++
1233 ++ for_each_node(nid) {
1234 ++ struct mem_cgroup_per_node *pn;
1235 ++
1236 ++ pn = mem_cgroup_nodeinfo(memcg, nid);
1237 ++ x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
1238 ++ if (x)
1239 ++ atomic_long_add(x, &pn->lruvec_stat[i]);
1240 ++ }
1241 ++ }
1242 ++
1243 ++ for (i = 0; i < MEMCG_NR_EVENTS; i++) {
1244 ++ long x;
1245 ++
1246 ++ x = this_cpu_xchg(memcg->stat_cpu->events[i], 0);
1247 ++ if (x)
1248 ++ atomic_long_add(x, &memcg->events[i]);
1249 ++ }
1250 ++ }
1251 ++
1252 + return 0;
1253 + }
1254 +
1255 +@@ -1900,7 +1901,7 @@ static void high_work_func(struct work_struct *work)
1256 + struct mem_cgroup *memcg;
1257 +
1258 + memcg = container_of(work, struct mem_cgroup, high_work);
1259 +- reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1260 ++ reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
1261 + }
1262 +
1263 + /*
1264 +@@ -1924,7 +1925,7 @@ void mem_cgroup_handle_over_high(void)
1265 + static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1266 + unsigned int nr_pages)
1267 + {
1268 +- unsigned int batch = max(CHARGE_BATCH, nr_pages);
1269 ++ unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
1270 + int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1271 + struct mem_cgroup *mem_over_limit;
1272 + struct page_counter *counter;
1273 +@@ -2444,18 +2445,11 @@ void mem_cgroup_split_huge_fixup(struct page *head)
1274 + for (i = 1; i < HPAGE_PMD_NR; i++)
1275 + head[i].mem_cgroup = head->mem_cgroup;
1276 +
1277 +- __this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE],
1278 +- HPAGE_PMD_NR);
1279 ++ __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
1280 + }
1281 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1282 +
1283 + #ifdef CONFIG_MEMCG_SWAP
1284 +-static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
1285 +- int nr_entries)
1286 +-{
1287 +- this_cpu_add(memcg->stat->count[MEMCG_SWAP], nr_entries);
1288 +-}
1289 +-
1290 + /**
1291 + * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
1292 + * @entry: swap entry to be moved
1293 +@@ -2479,8 +2473,8 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
1294 + new_id = mem_cgroup_id(to);
1295 +
1296 + if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
1297 +- mem_cgroup_swap_statistics(from, -1);
1298 +- mem_cgroup_swap_statistics(to, 1);
1299 ++ mod_memcg_state(from, MEMCG_SWAP, -1);
1300 ++ mod_memcg_state(to, MEMCG_SWAP, 1);
1301 + return 0;
1302 + }
1303 + return -EINVAL;
1304 +@@ -4210,8 +4204,8 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
1305 + if (!pn)
1306 + return 1;
1307 +
1308 +- pn->lruvec_stat = alloc_percpu(struct lruvec_stat);
1309 +- if (!pn->lruvec_stat) {
1310 ++ pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
1311 ++ if (!pn->lruvec_stat_cpu) {
1312 + kfree(pn);
1313 + return 1;
1314 + }
1315 +@@ -4232,7 +4226,7 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
1316 + if (!pn)
1317 + return;
1318 +
1319 +- free_percpu(pn->lruvec_stat);
1320 ++ free_percpu(pn->lruvec_stat_cpu);
1321 + kfree(pn);
1322 + }
1323 +
1324 +@@ -4242,7 +4236,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
1325 +
1326 + for_each_node(node)
1327 + free_mem_cgroup_per_node_info(memcg, node);
1328 +- free_percpu(memcg->stat);
1329 ++ free_percpu(memcg->stat_cpu);
1330 + kfree(memcg);
1331 + }
1332 +
1333 +@@ -4271,8 +4265,8 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
1334 + if (memcg->id.id < 0)
1335 + goto fail;
1336 +
1337 +- memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
1338 +- if (!memcg->stat)
1339 ++ memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu);
1340 ++ if (!memcg->stat_cpu)
1341 + goto fail;
1342 +
1343 + for_each_node(node)
1344 +@@ -4632,8 +4626,8 @@ static int mem_cgroup_move_account(struct page *page,
1345 + spin_lock_irqsave(&from->move_lock, flags);
1346 +
1347 + if (!anon && page_mapped(page)) {
1348 +- __this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages);
1349 +- __this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages);
1350 ++ __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
1351 ++ __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
1352 + }
1353 +
1354 + /*
1355 +@@ -4645,16 +4639,14 @@ static int mem_cgroup_move_account(struct page *page,
1356 + struct address_space *mapping = page_mapping(page);
1357 +
1358 + if (mapping_cap_account_dirty(mapping)) {
1359 +- __this_cpu_sub(from->stat->count[NR_FILE_DIRTY],
1360 +- nr_pages);
1361 +- __this_cpu_add(to->stat->count[NR_FILE_DIRTY],
1362 +- nr_pages);
1363 ++ __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
1364 ++ __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
1365 + }
1366 + }
1367 +
1368 + if (PageWriteback(page)) {
1369 +- __this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages);
1370 +- __this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages);
1371 ++ __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
1372 ++ __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
1373 + }
1374 +
1375 + /*
1376 +@@ -5690,12 +5682,12 @@ static void uncharge_batch(const struct uncharge_gather *ug)
1377 + }
1378 +
1379 + local_irq_save(flags);
1380 +- __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS], ug->nr_anon);
1381 +- __this_cpu_sub(ug->memcg->stat->count[MEMCG_CACHE], ug->nr_file);
1382 +- __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS_HUGE], ug->nr_huge);
1383 +- __this_cpu_sub(ug->memcg->stat->count[NR_SHMEM], ug->nr_shmem);
1384 +- __this_cpu_add(ug->memcg->stat->events[PGPGOUT], ug->pgpgout);
1385 +- __this_cpu_add(ug->memcg->stat->nr_page_events, nr_pages);
1386 ++ __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
1387 ++ __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
1388 ++ __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
1389 ++ __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
1390 ++ __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
1391 ++ __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
1392 + memcg_check_events(ug->memcg, ug->dummy_page);
1393 + local_irq_restore(flags);
1394 +
1395 +@@ -5926,7 +5918,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
1396 + if (in_softirq())
1397 + gfp_mask = GFP_NOWAIT;
1398 +
1399 +- this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
1400 ++ mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
1401 +
1402 + if (try_charge(memcg, gfp_mask, nr_pages) == 0)
1403 + return true;
1404 +@@ -5947,7 +5939,7 @@ void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
1405 + return;
1406 + }
1407 +
1408 +- this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
1409 ++ mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
1410 +
1411 + refill_stock(memcg, nr_pages);
1412 + }
1413 +@@ -6071,7 +6063,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
1414 + oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
1415 + nr_entries);
1416 + VM_BUG_ON_PAGE(oldid, page);
1417 +- mem_cgroup_swap_statistics(swap_memcg, nr_entries);
1418 ++ mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
1419 +
1420 + page->mem_cgroup = NULL;
1421 +
1422 +@@ -6137,7 +6129,7 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
1423 + mem_cgroup_id_get_many(memcg, nr_pages - 1);
1424 + oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
1425 + VM_BUG_ON_PAGE(oldid, page);
1426 +- mem_cgroup_swap_statistics(memcg, nr_pages);
1427 ++ mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
1428 +
1429 + return 0;
1430 + }
1431 +@@ -6165,7 +6157,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
1432 + else
1433 + page_counter_uncharge(&memcg->memsw, nr_pages);
1434 + }
1435 +- mem_cgroup_swap_statistics(memcg, -nr_pages);
1436 ++ mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
1437 + mem_cgroup_id_put_many(memcg, nr_pages);
1438 + }
1439 + rcu_read_unlock();
1440 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
1441 +index 071e09c3d8557..c78db361cbbaa 100644
1442 +--- a/sound/core/pcm_native.c
1443 ++++ b/sound/core/pcm_native.c
1444 +@@ -721,8 +721,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
1445 + runtime->boundary *= 2;
1446 +
1447 + /* clear the buffer for avoiding possible kernel info leaks */
1448 +- if (runtime->dma_area && !substream->ops->copy_user)
1449 +- memset(runtime->dma_area, 0, runtime->dma_bytes);
1450 ++ if (runtime->dma_area && !substream->ops->copy_user) {
1451 ++ size_t size = runtime->dma_bytes;
1452 ++
1453 ++ if (runtime->info & SNDRV_PCM_INFO_MMAP)
1454 ++ size = PAGE_ALIGN(size);
1455 ++ memset(runtime->dma_area, 0, size);
1456 ++ }
1457 +
1458 + snd_pcm_timer_resolution_change(substream);
1459 + snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
1460 +diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
1461 +index 719093489a2c4..7909cf6040e3d 100644
1462 +--- a/sound/core/seq/seq_queue.h
1463 ++++ b/sound/core/seq/seq_queue.h
1464 +@@ -40,10 +40,10 @@ struct snd_seq_queue {
1465 +
1466 + struct snd_seq_timer *timer; /* time keeper for this queue */
1467 + int owner; /* client that 'owns' the timer */
1468 +- unsigned int locked:1, /* timer is only accesibble by owner if set */
1469 +- klocked:1, /* kernel lock (after START) */
1470 +- check_again:1,
1471 +- check_blocked:1;
1472 ++ bool locked; /* timer is only accesibble by owner if set */
1473 ++ bool klocked; /* kernel lock (after START) */
1474 ++ bool check_again; /* concurrent access happened during check */
1475 ++ bool check_blocked; /* queue being checked */
1476 +
1477 + unsigned int flags; /* status flags */
1478 + unsigned int info_flags; /* info for sync */
1479 +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
1480 +index 92f5f452bee2b..369f812d70722 100644
1481 +--- a/sound/pci/hda/patch_ca0132.c
1482 ++++ b/sound/pci/hda/patch_ca0132.c
1483 +@@ -4443,11 +4443,10 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
1484 + /* Delay enabling the HP amp, to let the mic-detection
1485 + * state machine run.
1486 + */
1487 +- cancel_delayed_work(&spec->unsol_hp_work);
1488 +- schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
1489 + tbl = snd_hda_jack_tbl_get(codec, cb->nid);
1490 + if (tbl)
1491 + tbl->block_report = 1;
1492 ++ schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
1493 + }
1494 +
1495 + static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
1496 +@@ -4625,12 +4624,25 @@ static void ca0132_free(struct hda_codec *codec)
1497 + kfree(codec->spec);
1498 + }
1499 +
1500 ++#ifdef CONFIG_PM
1501 ++static int ca0132_suspend(struct hda_codec *codec)
1502 ++{
1503 ++ struct ca0132_spec *spec = codec->spec;
1504 ++
1505 ++ cancel_delayed_work_sync(&spec->unsol_hp_work);
1506 ++ return 0;
1507 ++}
1508 ++#endif
1509 ++
1510 + static const struct hda_codec_ops ca0132_patch_ops = {
1511 + .build_controls = ca0132_build_controls,
1512 + .build_pcms = ca0132_build_pcms,
1513 + .init = ca0132_init,
1514 + .free = ca0132_free,
1515 + .unsol_event = snd_hda_jack_unsol_event,
1516 ++#ifdef CONFIG_PM
1517 ++ .suspend = ca0132_suspend,
1518 ++#endif
1519 + };
1520 +
1521 + static void ca0132_config(struct hda_codec *codec)
1522 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
1523 +index 6caf94581a0e8..ecdbdb26164ea 100644
1524 +--- a/sound/usb/pcm.c
1525 ++++ b/sound/usb/pcm.c
1526 +@@ -324,6 +324,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
1527 + struct usb_host_interface *alts;
1528 + struct usb_interface *iface;
1529 + unsigned int ep;
1530 ++ unsigned int ifnum;
1531 +
1532 + /* Implicit feedback sync EPs consumers are always playback EPs */
1533 + if (subs->direction != SNDRV_PCM_STREAM_PLAYBACK)
1534 +@@ -334,44 +335,23 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
1535 + case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
1536 + case USB_ID(0x22f0, 0x0006): /* Allen&Heath Qu-16 */
1537 + ep = 0x81;
1538 +- iface = usb_ifnum_to_if(dev, 3);
1539 +-
1540 +- if (!iface || iface->num_altsetting == 0)
1541 +- return -EINVAL;
1542 +-
1543 +- alts = &iface->altsetting[1];
1544 +- goto add_sync_ep;
1545 +- break;
1546 ++ ifnum = 3;
1547 ++ goto add_sync_ep_from_ifnum;
1548 + case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */
1549 + case USB_ID(0x0763, 0x2081):
1550 + ep = 0x81;
1551 +- iface = usb_ifnum_to_if(dev, 2);
1552 +-
1553 +- if (!iface || iface->num_altsetting == 0)
1554 +- return -EINVAL;
1555 +-
1556 +- alts = &iface->altsetting[1];
1557 +- goto add_sync_ep;
1558 +- case USB_ID(0x2466, 0x8003):
1559 ++ ifnum = 2;
1560 ++ goto add_sync_ep_from_ifnum;
1561 ++ case USB_ID(0x2466, 0x8003): /* Fractal Audio Axe-Fx II */
1562 + ep = 0x86;
1563 +- iface = usb_ifnum_to_if(dev, 2);
1564 +-
1565 +- if (!iface || iface->num_altsetting == 0)
1566 +- return -EINVAL;
1567 +-
1568 +- alts = &iface->altsetting[1];
1569 +- goto add_sync_ep;
1570 +- case USB_ID(0x1397, 0x0002):
1571 ++ ifnum = 2;
1572 ++ goto add_sync_ep_from_ifnum;
1573 ++ case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
1574 + ep = 0x81;
1575 +- iface = usb_ifnum_to_if(dev, 1);
1576 +-
1577 +- if (!iface || iface->num_altsetting == 0)
1578 +- return -EINVAL;
1579 +-
1580 +- alts = &iface->altsetting[1];
1581 +- goto add_sync_ep;
1582 +-
1583 ++ ifnum = 1;
1584 ++ goto add_sync_ep_from_ifnum;
1585 + }
1586 ++
1587 + if (attr == USB_ENDPOINT_SYNC_ASYNC &&
1588 + altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
1589 + altsd->bInterfaceProtocol == 2 &&
1590 +@@ -386,6 +366,14 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
1591 + /* No quirk */
1592 + return 0;
1593 +
1594 ++add_sync_ep_from_ifnum:
1595 ++ iface = usb_ifnum_to_if(dev, ifnum);
1596 ++
1597 ++ if (!iface || iface->num_altsetting < 2)
1598 ++ return -EINVAL;
1599 ++
1600 ++ alts = &iface->altsetting[1];
1601 ++
1602 + add_sync_ep:
1603 + subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip,
1604 + alts, ep, !subs->direction,