Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:master commit in: /
Date: Tue, 23 Jun 2015 12:54:30
Message-Id: 1435064077.3b05debcd4dd78810c90dc8e2a07b7843275df2c.mpagano@gentoo
1 commit: 3b05debcd4dd78810c90dc8e2a07b7843275df2c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jun 23 12:54:37 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jun 23 12:54:37 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3b05debc
7
8 Clean up master
9
10 1000_linux-4.0.1.patch | 479 --
11 1001_linux-4.0.2.patch | 8587 --------------------
12 1002_linux-4.0.3.patch | 2827 -------
13 1003_linux-4.0.4.patch | 2713 -------
14 1004_linux-4.0.5.patch | 4937 -----------
15 1500_XATTR_USER_PREFIX.patch | 54 -
16 ...ble-link-security-restrictions-by-default.patch | 22 -
17 2600_select-REGMAP_IRQ-for-rt5033.patch | 30 -
18 2700_ThinkPad-30-brightness-control-fix.patch | 67 -
19 2900_dev-root-proc-mount-fix.patch | 30 -
20 2905_2disk-resume-image-fix.patch | 24 -
21 2910_lz4-compression-fix.patch | 30 -
22 4200_fbcondecor-3.19.patch | 2119 -----
23 ...able-additional-cpu-optimizations-for-gcc.patch | 327 -
24 ...roups-kconfig-build-bits-for-BFQ-v7r7-4.0.patch | 104 -
25 ...introduce-the-BFQ-v7r7-I-O-sched-for-4.0.patch1 | 6966 ----------------
26 ...rly-Queue-Merge-EQM-to-BFQ-v7r7-for-4.0.0.patch | 1222 ---
27 ...-additional-cpu-optimizations-for-gcc-4.9.patch | 402 -
28 18 files changed, 30940 deletions(-)
29
30 diff --git a/1000_linux-4.0.1.patch b/1000_linux-4.0.1.patch
31 deleted file mode 100644
32 index ac58552..0000000
33 --- a/1000_linux-4.0.1.patch
34 +++ /dev/null
35 @@ -1,479 +0,0 @@
36 -diff --git a/Makefile b/Makefile
37 -index fbd43bfe4445..f499cd2f5738 100644
38 ---- a/Makefile
39 -+++ b/Makefile
40 -@@ -1,6 +1,6 @@
41 - VERSION = 4
42 - PATCHLEVEL = 0
43 --SUBLEVEL = 0
44 -+SUBLEVEL = 1
45 - EXTRAVERSION =
46 - NAME = Hurr durr I'ma sheep
47 -
48 -diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
49 -index 4085c4b31047..355d5fea5be9 100644
50 ---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
51 -+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
52 -@@ -531,20 +531,8 @@ struct bnx2x_fastpath {
53 - struct napi_struct napi;
54 -
55 - #ifdef CONFIG_NET_RX_BUSY_POLL
56 -- unsigned int state;
57 --#define BNX2X_FP_STATE_IDLE 0
58 --#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
59 --#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
60 --#define BNX2X_FP_STATE_DISABLED (1 << 2)
61 --#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
62 --#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
63 --#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
64 --#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
65 --#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
66 --#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
67 -- /* protect state */
68 -- spinlock_t lock;
69 --#endif /* CONFIG_NET_RX_BUSY_POLL */
70 -+ unsigned long busy_poll_state;
71 -+#endif
72 -
73 - union host_hc_status_block status_blk;
74 - /* chip independent shortcuts into sb structure */
75 -@@ -619,104 +607,83 @@ struct bnx2x_fastpath {
76 - #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
77 -
78 - #ifdef CONFIG_NET_RX_BUSY_POLL
79 --static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
80 -+
81 -+enum bnx2x_fp_state {
82 -+ BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
83 -+
84 -+ BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
85 -+ BNX2X_STATE_FP_NAPI_REQ = BIT(1),
86 -+
87 -+ BNX2X_STATE_FP_POLL_BIT = 2,
88 -+ BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
89 -+
90 -+ BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
91 -+};
92 -+
93 -+static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
94 - {
95 -- spin_lock_init(&fp->lock);
96 -- fp->state = BNX2X_FP_STATE_IDLE;
97 -+ WRITE_ONCE(fp->busy_poll_state, 0);
98 - }
99 -
100 - /* called from the device poll routine to get ownership of a FP */
101 - static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
102 - {
103 -- bool rc = true;
104 --
105 -- spin_lock_bh(&fp->lock);
106 -- if (fp->state & BNX2X_FP_LOCKED) {
107 -- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
108 -- fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
109 -- rc = false;
110 -- } else {
111 -- /* we don't care if someone yielded */
112 -- fp->state = BNX2X_FP_STATE_NAPI;
113 -+ unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
114 -+
115 -+ while (1) {
116 -+ switch (old) {
117 -+ case BNX2X_STATE_FP_POLL:
118 -+ /* make sure bnx2x_fp_lock_poll() wont starve us */
119 -+ set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
120 -+ &fp->busy_poll_state);
121 -+ /* fallthrough */
122 -+ case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
123 -+ return false;
124 -+ default:
125 -+ break;
126 -+ }
127 -+ prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
128 -+ if (unlikely(prev != old)) {
129 -+ old = prev;
130 -+ continue;
131 -+ }
132 -+ return true;
133 - }
134 -- spin_unlock_bh(&fp->lock);
135 -- return rc;
136 - }
137 -
138 --/* returns true is someone tried to get the FP while napi had it */
139 --static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
140 -+static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
141 - {
142 -- bool rc = false;
143 --
144 -- spin_lock_bh(&fp->lock);
145 -- WARN_ON(fp->state &
146 -- (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
147 --
148 -- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
149 -- rc = true;
150 --
151 -- /* state ==> idle, unless currently disabled */
152 -- fp->state &= BNX2X_FP_STATE_DISABLED;
153 -- spin_unlock_bh(&fp->lock);
154 -- return rc;
155 -+ smp_wmb();
156 -+ fp->busy_poll_state = 0;
157 - }
158 -
159 - /* called from bnx2x_low_latency_poll() */
160 - static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
161 - {
162 -- bool rc = true;
163 --
164 -- spin_lock_bh(&fp->lock);
165 -- if ((fp->state & BNX2X_FP_LOCKED)) {
166 -- fp->state |= BNX2X_FP_STATE_POLL_YIELD;
167 -- rc = false;
168 -- } else {
169 -- /* preserve yield marks */
170 -- fp->state |= BNX2X_FP_STATE_POLL;
171 -- }
172 -- spin_unlock_bh(&fp->lock);
173 -- return rc;
174 -+ return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
175 - }
176 -
177 --/* returns true if someone tried to get the FP while it was locked */
178 --static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
179 -+static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
180 - {
181 -- bool rc = false;
182 --
183 -- spin_lock_bh(&fp->lock);
184 -- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
185 --
186 -- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
187 -- rc = true;
188 --
189 -- /* state ==> idle, unless currently disabled */
190 -- fp->state &= BNX2X_FP_STATE_DISABLED;
191 -- spin_unlock_bh(&fp->lock);
192 -- return rc;
193 -+ smp_mb__before_atomic();
194 -+ clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
195 - }
196 -
197 --/* true if a socket is polling, even if it did not get the lock */
198 -+/* true if a socket is polling */
199 - static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
200 - {
201 -- WARN_ON(!(fp->state & BNX2X_FP_OWNED));
202 -- return fp->state & BNX2X_FP_USER_PEND;
203 -+ return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
204 - }
205 -
206 - /* false if fp is currently owned */
207 - static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
208 - {
209 -- int rc = true;
210 --
211 -- spin_lock_bh(&fp->lock);
212 -- if (fp->state & BNX2X_FP_OWNED)
213 -- rc = false;
214 -- fp->state |= BNX2X_FP_STATE_DISABLED;
215 -- spin_unlock_bh(&fp->lock);
216 -+ set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
217 -+ return !bnx2x_fp_ll_polling(fp);
218 -
219 -- return rc;
220 - }
221 - #else
222 --static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
223 -+static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
224 - {
225 - }
226 -
227 -@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
228 - return true;
229 - }
230 -
231 --static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
232 -+static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
233 - {
234 -- return false;
235 - }
236 -
237 - static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
238 -@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
239 - return false;
240 - }
241 -
242 --static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
243 -+static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
244 - {
245 -- return false;
246 - }
247 -
248 - static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
249 -diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
250 -index 0a9faa134a9a..2f63467bce46 100644
251 ---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
252 -+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
253 -@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
254 - int i;
255 -
256 - for_each_rx_queue_cnic(bp, i) {
257 -- bnx2x_fp_init_lock(&bp->fp[i]);
258 -+ bnx2x_fp_busy_poll_init(&bp->fp[i]);
259 - napi_enable(&bnx2x_fp(bp, i, napi));
260 - }
261 - }
262 -@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
263 - int i;
264 -
265 - for_each_eth_queue(bp, i) {
266 -- bnx2x_fp_init_lock(&bp->fp[i]);
267 -+ bnx2x_fp_busy_poll_init(&bp->fp[i]);
268 - napi_enable(&bnx2x_fp(bp, i, napi));
269 - }
270 - }
271 -@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
272 - }
273 - }
274 -
275 -+ bnx2x_fp_unlock_napi(fp);
276 -+
277 - /* Fall out from the NAPI loop if needed */
278 -- if (!bnx2x_fp_unlock_napi(fp) &&
279 -- !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
280 -+ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
281 -
282 - /* No need to update SB for FCoE L2 ring as long as
283 - * it's connected to the default SB and the SB
284 -diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
285 -index f8528a4cf54f..fceb637efd6b 100644
286 ---- a/drivers/net/vxlan.c
287 -+++ b/drivers/net/vxlan.c
288 -@@ -1713,12 +1713,6 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
289 - }
290 - }
291 -
292 -- skb = iptunnel_handle_offloads(skb, udp_sum, type);
293 -- if (IS_ERR(skb)) {
294 -- err = -EINVAL;
295 -- goto err;
296 -- }
297 --
298 - skb_scrub_packet(skb, xnet);
299 -
300 - min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
301 -@@ -1738,6 +1732,12 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
302 - goto err;
303 - }
304 -
305 -+ skb = iptunnel_handle_offloads(skb, udp_sum, type);
306 -+ if (IS_ERR(skb)) {
307 -+ err = -EINVAL;
308 -+ goto err;
309 -+ }
310 -+
311 - vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
312 - vxh->vx_flags = htonl(VXLAN_HF_VNI);
313 - vxh->vx_vni = md->vni;
314 -@@ -1798,10 +1798,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
315 - }
316 - }
317 -
318 -- skb = iptunnel_handle_offloads(skb, udp_sum, type);
319 -- if (IS_ERR(skb))
320 -- return PTR_ERR(skb);
321 --
322 - min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
323 - + VXLAN_HLEN + sizeof(struct iphdr)
324 - + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
325 -@@ -1817,6 +1813,10 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
326 - if (WARN_ON(!skb))
327 - return -ENOMEM;
328 -
329 -+ skb = iptunnel_handle_offloads(skb, udp_sum, type);
330 -+ if (IS_ERR(skb))
331 -+ return PTR_ERR(skb);
332 -+
333 - vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
334 - vxh->vx_flags = htonl(VXLAN_HF_VNI);
335 - vxh->vx_vni = md->vni;
336 -diff --git a/fs/exec.c b/fs/exec.c
337 -index c7f9b733406d..00400cf522dc 100644
338 ---- a/fs/exec.c
339 -+++ b/fs/exec.c
340 -@@ -1265,6 +1265,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
341 - spin_unlock(&p->fs->lock);
342 - }
343 -
344 -+static void bprm_fill_uid(struct linux_binprm *bprm)
345 -+{
346 -+ struct inode *inode;
347 -+ unsigned int mode;
348 -+ kuid_t uid;
349 -+ kgid_t gid;
350 -+
351 -+ /* clear any previous set[ug]id data from a previous binary */
352 -+ bprm->cred->euid = current_euid();
353 -+ bprm->cred->egid = current_egid();
354 -+
355 -+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
356 -+ return;
357 -+
358 -+ if (task_no_new_privs(current))
359 -+ return;
360 -+
361 -+ inode = file_inode(bprm->file);
362 -+ mode = READ_ONCE(inode->i_mode);
363 -+ if (!(mode & (S_ISUID|S_ISGID)))
364 -+ return;
365 -+
366 -+ /* Be careful if suid/sgid is set */
367 -+ mutex_lock(&inode->i_mutex);
368 -+
369 -+ /* reload atomically mode/uid/gid now that lock held */
370 -+ mode = inode->i_mode;
371 -+ uid = inode->i_uid;
372 -+ gid = inode->i_gid;
373 -+ mutex_unlock(&inode->i_mutex);
374 -+
375 -+ /* We ignore suid/sgid if there are no mappings for them in the ns */
376 -+ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
377 -+ !kgid_has_mapping(bprm->cred->user_ns, gid))
378 -+ return;
379 -+
380 -+ if (mode & S_ISUID) {
381 -+ bprm->per_clear |= PER_CLEAR_ON_SETID;
382 -+ bprm->cred->euid = uid;
383 -+ }
384 -+
385 -+ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
386 -+ bprm->per_clear |= PER_CLEAR_ON_SETID;
387 -+ bprm->cred->egid = gid;
388 -+ }
389 -+}
390 -+
391 - /*
392 - * Fill the binprm structure from the inode.
393 - * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
394 -@@ -1273,36 +1320,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
395 - */
396 - int prepare_binprm(struct linux_binprm *bprm)
397 - {
398 -- struct inode *inode = file_inode(bprm->file);
399 -- umode_t mode = inode->i_mode;
400 - int retval;
401 -
402 --
403 -- /* clear any previous set[ug]id data from a previous binary */
404 -- bprm->cred->euid = current_euid();
405 -- bprm->cred->egid = current_egid();
406 --
407 -- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
408 -- !task_no_new_privs(current) &&
409 -- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
410 -- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
411 -- /* Set-uid? */
412 -- if (mode & S_ISUID) {
413 -- bprm->per_clear |= PER_CLEAR_ON_SETID;
414 -- bprm->cred->euid = inode->i_uid;
415 -- }
416 --
417 -- /* Set-gid? */
418 -- /*
419 -- * If setgid is set but no group execute bit then this
420 -- * is a candidate for mandatory locking, not a setgid
421 -- * executable.
422 -- */
423 -- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
424 -- bprm->per_clear |= PER_CLEAR_ON_SETID;
425 -- bprm->cred->egid = inode->i_gid;
426 -- }
427 -- }
428 -+ bprm_fill_uid(bprm);
429 -
430 - /* fill in binprm security blob */
431 - retval = security_bprm_set_creds(bprm);
432 -diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
433 -index a28e09c7825d..36508e69e92a 100644
434 ---- a/kernel/bpf/verifier.c
435 -+++ b/kernel/bpf/verifier.c
436 -@@ -1380,7 +1380,8 @@ peek_stack:
437 - /* tell verifier to check for equivalent states
438 - * after every call and jump
439 - */
440 -- env->explored_states[t + 1] = STATE_LIST_MARK;
441 -+ if (t + 1 < insn_cnt)
442 -+ env->explored_states[t + 1] = STATE_LIST_MARK;
443 - } else {
444 - /* conditional jump with two edges */
445 - ret = push_insn(t, t + 1, FALLTHROUGH, env);
446 -diff --git a/net/core/skbuff.c b/net/core/skbuff.c
447 -index 8e4ac97c8477..98d45fe72f51 100644
448 ---- a/net/core/skbuff.c
449 -+++ b/net/core/skbuff.c
450 -@@ -4169,19 +4169,21 @@ EXPORT_SYMBOL(skb_try_coalesce);
451 - */
452 - void skb_scrub_packet(struct sk_buff *skb, bool xnet)
453 - {
454 -- if (xnet)
455 -- skb_orphan(skb);
456 - skb->tstamp.tv64 = 0;
457 - skb->pkt_type = PACKET_HOST;
458 - skb->skb_iif = 0;
459 - skb->ignore_df = 0;
460 - skb_dst_drop(skb);
461 -- skb->mark = 0;
462 - skb_sender_cpu_clear(skb);
463 -- skb_init_secmark(skb);
464 - secpath_reset(skb);
465 - nf_reset(skb);
466 - nf_reset_trace(skb);
467 -+
468 -+ if (!xnet)
469 -+ return;
470 -+
471 -+ skb_orphan(skb);
472 -+ skb->mark = 0;
473 - }
474 - EXPORT_SYMBOL_GPL(skb_scrub_packet);
475 -
476 -diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
477 -index 5a4828ba05ad..a566a2e4715b 100644
478 ---- a/net/ipv4/geneve.c
479 -+++ b/net/ipv4/geneve.c
480 -@@ -113,10 +113,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
481 - int min_headroom;
482 - int err;
483 -
484 -- skb = udp_tunnel_handle_offloads(skb, csum);
485 -- if (IS_ERR(skb))
486 -- return PTR_ERR(skb);
487 --
488 - min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
489 - + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
490 - + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
491 -@@ -131,6 +127,10 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
492 - if (unlikely(!skb))
493 - return -ENOMEM;
494 -
495 -+ skb = udp_tunnel_handle_offloads(skb, csum);
496 -+ if (IS_ERR(skb))
497 -+ return PTR_ERR(skb);
498 -+
499 - gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
500 - geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
501 -
502 -diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
503 -index 1db253e36045..d520492ba698 100644
504 ---- a/net/ipv4/tcp_output.c
505 -+++ b/net/ipv4/tcp_output.c
506 -@@ -2929,6 +2929,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
507 - }
508 - #endif
509 -
510 -+ /* Do not fool tcpdump (if any), clean our debris */
511 -+ skb->tstamp.tv64 = 0;
512 - return skb;
513 - }
514 - EXPORT_SYMBOL(tcp_make_synack);
515
516 diff --git a/1001_linux-4.0.2.patch b/1001_linux-4.0.2.patch
517 deleted file mode 100644
518 index 38a75b2..0000000
519 --- a/1001_linux-4.0.2.patch
520 +++ /dev/null
521 @@ -1,8587 +0,0 @@
522 -diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
523 -index 99ca40e..5c204df 100644
524 ---- a/Documentation/networking/scaling.txt
525 -+++ b/Documentation/networking/scaling.txt
526 -@@ -282,7 +282,7 @@ following is true:
527 -
528 - - The current CPU's queue head counter >= the recorded tail counter
529 - value in rps_dev_flow[i]
530 --- The current CPU is unset (equal to RPS_NO_CPU)
531 -+- The current CPU is unset (>= nr_cpu_ids)
532 - - The current CPU is offline
533 -
534 - After this check, the packet is sent to the (possibly updated) current
535 -diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
536 -index 4ceef53..d1ad9d5 100644
537 ---- a/Documentation/virtual/kvm/devices/s390_flic.txt
538 -+++ b/Documentation/virtual/kvm/devices/s390_flic.txt
539 -@@ -27,6 +27,9 @@ Groups:
540 - Copies all floating interrupts into a buffer provided by userspace.
541 - When the buffer is too small it returns -ENOMEM, which is the indication
542 - for userspace to try again with a bigger buffer.
543 -+ -ENOBUFS is returned when the allocation of a kernelspace buffer has
544 -+ failed.
545 -+ -EFAULT is returned when copying data to userspace failed.
546 - All interrupts remain pending, i.e. are not deleted from the list of
547 - currently pending interrupts.
548 - attr->addr contains the userspace address of the buffer into which all
549 -diff --git a/Makefile b/Makefile
550 -index f499cd2..0649a60 100644
551 ---- a/Makefile
552 -+++ b/Makefile
553 -@@ -1,6 +1,6 @@
554 - VERSION = 4
555 - PATCHLEVEL = 0
556 --SUBLEVEL = 1
557 -+SUBLEVEL = 2
558 - EXTRAVERSION =
559 - NAME = Hurr durr I'ma sheep
560 -
561 -diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
562 -index fec1fca..6c4bc53 100644
563 ---- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
564 -+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
565 -@@ -167,7 +167,13 @@
566 -
567 - macb1: ethernet@f802c000 {
568 - phy-mode = "rmii";
569 -+ #address-cells = <1>;
570 -+ #size-cells = <0>;
571 - status = "okay";
572 -+
573 -+ ethernet-phy@1 {
574 -+ reg = <0x1>;
575 -+ };
576 - };
577 -
578 - dbgu: serial@ffffee00 {
579 -diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
580 -index a5441d5..3cc8b83 100644
581 ---- a/arch/arm/boot/dts/dove.dtsi
582 -+++ b/arch/arm/boot/dts/dove.dtsi
583 -@@ -154,7 +154,7 @@
584 -
585 - uart2: serial@12200 {
586 - compatible = "ns16550a";
587 -- reg = <0x12000 0x100>;
588 -+ reg = <0x12200 0x100>;
589 - reg-shift = <2>;
590 - interrupts = <9>;
591 - clocks = <&core_clk 0>;
592 -@@ -163,7 +163,7 @@
593 -
594 - uart3: serial@12300 {
595 - compatible = "ns16550a";
596 -- reg = <0x12100 0x100>;
597 -+ reg = <0x12300 0x100>;
598 - reg-shift = <2>;
599 - interrupts = <10>;
600 - clocks = <&core_clk 0>;
601 -diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
602 -index f027754..c41600e 100644
603 ---- a/arch/arm/boot/dts/exynos5250-spring.dts
604 -+++ b/arch/arm/boot/dts/exynos5250-spring.dts
605 -@@ -429,7 +429,6 @@
606 - &mmc_0 {
607 - status = "okay";
608 - num-slots = <1>;
609 -- supports-highspeed;
610 - broken-cd;
611 - card-detect-delay = <200>;
612 - samsung,dw-mshc-ciu-div = <3>;
613 -@@ -437,11 +436,8 @@
614 - samsung,dw-mshc-ddr-timing = <1 2>;
615 - pinctrl-names = "default";
616 - pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4 &sd0_bus8>;
617 --
618 -- slot@0 {
619 -- reg = <0>;
620 -- bus-width = <8>;
621 -- };
622 -+ bus-width = <8>;
623 -+ cap-mmc-highspeed;
624 - };
625 -
626 - /*
627 -@@ -451,7 +447,6 @@
628 - &mmc_1 {
629 - status = "okay";
630 - num-slots = <1>;
631 -- supports-highspeed;
632 - broken-cd;
633 - card-detect-delay = <200>;
634 - samsung,dw-mshc-ciu-div = <3>;
635 -@@ -459,11 +454,8 @@
636 - samsung,dw-mshc-ddr-timing = <1 2>;
637 - pinctrl-names = "default";
638 - pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_cd &sd1_bus4>;
639 --
640 -- slot@0 {
641 -- reg = <0>;
642 -- bus-width = <4>;
643 -- };
644 -+ bus-width = <4>;
645 -+ cap-sd-highspeed;
646 - };
647 -
648 - &pinctrl_0 {
649 -diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
650 -index afb9caf..674d03f 100644
651 ---- a/arch/arm/include/asm/elf.h
652 -+++ b/arch/arm/include/asm/elf.h
653 -@@ -115,7 +115,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
654 - the loader. We need to make sure that it is out of the way of the program
655 - that it will "exec", and that there is sufficient room for the brk. */
656 -
657 --#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
658 -+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
659 -
660 - /* When the program starts, a1 contains a pointer to a function to be
661 - registered with atexit, as per the SVR4 ABI. A value of 0 means we
662 -diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
663 -index 0db25bc..3a42ac6 100644
664 ---- a/arch/arm/include/uapi/asm/kvm.h
665 -+++ b/arch/arm/include/uapi/asm/kvm.h
666 -@@ -195,8 +195,14 @@ struct kvm_arch_memory_slot {
667 - #define KVM_ARM_IRQ_CPU_IRQ 0
668 - #define KVM_ARM_IRQ_CPU_FIQ 1
669 -
670 --/* Highest supported SPI, from VGIC_NR_IRQS */
671 -+/*
672 -+ * This used to hold the highest supported SPI, but it is now obsolete
673 -+ * and only here to provide source code level compatibility with older
674 -+ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
675 -+ */
676 -+#ifndef __KERNEL__
677 - #define KVM_ARM_IRQ_GIC_MAX 127
678 -+#endif
679 -
680 - /* PSCI interface */
681 - #define KVM_PSCI_FN_BASE 0x95c1ba5e
682 -diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
683 -index c4cc50e..cfb354f 100644
684 ---- a/arch/arm/kernel/hibernate.c
685 -+++ b/arch/arm/kernel/hibernate.c
686 -@@ -22,6 +22,7 @@
687 - #include <asm/suspend.h>
688 - #include <asm/memory.h>
689 - #include <asm/sections.h>
690 -+#include "reboot.h"
691 -
692 - int pfn_is_nosave(unsigned long pfn)
693 - {
694 -@@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
695 -
696 - ret = swsusp_save();
697 - if (ret == 0)
698 -- soft_restart(virt_to_phys(cpu_resume));
699 -+ _soft_restart(virt_to_phys(cpu_resume), false);
700 - return ret;
701 - }
702 -
703 -@@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
704 - for (pbe = restore_pblist; pbe; pbe = pbe->next)
705 - copy_page(pbe->orig_address, pbe->address);
706 -
707 -- soft_restart(virt_to_phys(cpu_resume));
708 -+ _soft_restart(virt_to_phys(cpu_resume), false);
709 - }
710 -
711 - static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
712 -diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
713 -index fdfa3a7..2bf1a16 100644
714 ---- a/arch/arm/kernel/process.c
715 -+++ b/arch/arm/kernel/process.c
716 -@@ -41,6 +41,7 @@
717 - #include <asm/system_misc.h>
718 - #include <asm/mach/time.h>
719 - #include <asm/tls.h>
720 -+#include "reboot.h"
721 -
722 - #ifdef CONFIG_CC_STACKPROTECTOR
723 - #include <linux/stackprotector.h>
724 -@@ -95,7 +96,7 @@ static void __soft_restart(void *addr)
725 - BUG();
726 - }
727 -
728 --void soft_restart(unsigned long addr)
729 -+void _soft_restart(unsigned long addr, bool disable_l2)
730 - {
731 - u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
732 -
733 -@@ -104,7 +105,7 @@ void soft_restart(unsigned long addr)
734 - local_fiq_disable();
735 -
736 - /* Disable the L2 if we're the last man standing. */
737 -- if (num_online_cpus() == 1)
738 -+ if (disable_l2)
739 - outer_disable();
740 -
741 - /* Change to the new stack and continue with the reset. */
742 -@@ -114,6 +115,11 @@ void soft_restart(unsigned long addr)
743 - BUG();
744 - }
745 -
746 -+void soft_restart(unsigned long addr)
747 -+{
748 -+ _soft_restart(addr, num_online_cpus() == 1);
749 -+}
750 -+
751 - /*
752 - * Function pointers to optional machine specific functions
753 - */
754 -diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
755 -new file mode 100644
756 -index 0000000..c87f058
757 ---- /dev/null
758 -+++ b/arch/arm/kernel/reboot.h
759 -@@ -0,0 +1,6 @@
760 -+#ifndef REBOOT_H
761 -+#define REBOOT_H
762 -+
763 -+extern void _soft_restart(unsigned long addr, bool disable_l2);
764 -+
765 -+#endif
766 -diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
767 -index 5560f74..b652af5 100644
768 ---- a/arch/arm/kvm/arm.c
769 -+++ b/arch/arm/kvm/arm.c
770 -@@ -651,8 +651,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
771 - if (!irqchip_in_kernel(kvm))
772 - return -ENXIO;
773 -
774 -- if (irq_num < VGIC_NR_PRIVATE_IRQS ||
775 -- irq_num > KVM_ARM_IRQ_GIC_MAX)
776 -+ if (irq_num < VGIC_NR_PRIVATE_IRQS)
777 - return -EINVAL;
778 -
779 - return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
780 -diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
781 -index 8b9f5e2..4f4e222 100644
782 ---- a/arch/arm/mach-mvebu/pmsu.c
783 -+++ b/arch/arm/mach-mvebu/pmsu.c
784 -@@ -415,6 +415,9 @@ static __init int armada_38x_cpuidle_init(void)
785 - void __iomem *mpsoc_base;
786 - u32 reg;
787 -
788 -+ pr_warn("CPU idle is currently broken on Armada 38x: disabling");
789 -+ return 0;
790 -+
791 - np = of_find_compatible_node(NULL, NULL,
792 - "marvell,armada-380-coherency-fabric");
793 - if (!np)
794 -@@ -476,6 +479,16 @@ static int __init mvebu_v7_cpu_pm_init(void)
795 - return 0;
796 - of_node_put(np);
797 -
798 -+ /*
799 -+ * Currently the CPU idle support for Armada 38x is broken, as
800 -+ * the CPU hotplug uses some of the CPU idle functions it is
801 -+ * broken too, so let's disable it
802 -+ */
803 -+ if (of_machine_is_compatible("marvell,armada380")) {
804 -+ cpu_hotplug_disable();
805 -+ pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling");
806 -+ }
807 -+
808 - if (of_machine_is_compatible("marvell,armadaxp"))
809 - ret = armada_xp_cpuidle_init();
810 - else if (of_machine_is_compatible("marvell,armada370"))
811 -@@ -489,7 +502,8 @@ static int __init mvebu_v7_cpu_pm_init(void)
812 - return ret;
813 -
814 - mvebu_v7_pmsu_enable_l2_powerdown_onidle();
815 -- platform_device_register(&mvebu_v7_cpuidle_device);
816 -+ if (mvebu_v7_cpuidle_device.name)
817 -+ platform_device_register(&mvebu_v7_cpuidle_device);
818 - cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);
819 -
820 - return 0;
821 -diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
822 -index 7bc6668..dcbe17f 100644
823 ---- a/arch/arm/mach-s3c64xx/crag6410.h
824 -+++ b/arch/arm/mach-s3c64xx/crag6410.h
825 -@@ -14,6 +14,7 @@
826 - #include <mach/gpio-samsung.h>
827 -
828 - #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
829 -+#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
830 -
831 - #define PCA935X_GPIO_BASE GPIO_BOARD_START
832 - #define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
833 -diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
834 -index 10b913b..65c426b 100644
835 ---- a/arch/arm/mach-s3c64xx/mach-crag6410.c
836 -+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
837 -@@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = {
838 -
839 - static struct wm831x_pdata crag_pmic_pdata = {
840 - .wm831x_num = 1,
841 -+ .irq_base = BANFF_PMIC_IRQ_BASE,
842 - .gpio_base = BANFF_PMIC_GPIO_BASE,
843 - .soft_shutdown = true,
844 -
845 -diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
846 -index 1b8e973..a6186c2 100644
847 ---- a/arch/arm64/Kconfig
848 -+++ b/arch/arm64/Kconfig
849 -@@ -361,6 +361,27 @@ config ARM64_ERRATUM_832075
850 -
851 - If unsure, say Y.
852 -
853 -+config ARM64_ERRATUM_845719
854 -+ bool "Cortex-A53: 845719: a load might read incorrect data"
855 -+ depends on COMPAT
856 -+ default y
857 -+ help
858 -+ This option adds an alternative code sequence to work around ARM
859 -+ erratum 845719 on Cortex-A53 parts up to r0p4.
860 -+
861 -+ When running a compat (AArch32) userspace on an affected Cortex-A53
862 -+ part, a load at EL0 from a virtual address that matches the bottom 32
863 -+ bits of the virtual address used by a recent load at (AArch64) EL1
864 -+ might return incorrect data.
865 -+
866 -+ The workaround is to write the contextidr_el1 register on exception
867 -+ return to a 32-bit task.
868 -+ Please note that this does not necessarily enable the workaround,
869 -+ as it depends on the alternative framework, which will only patch
870 -+ the kernel if an affected CPU is detected.
871 -+
872 -+ If unsure, say Y.
873 -+
874 - endmenu
875 -
876 -
877 -@@ -470,6 +491,10 @@ config HOTPLUG_CPU
878 -
879 - source kernel/Kconfig.preempt
880 -
881 -+config UP_LATE_INIT
882 -+ def_bool y
883 -+ depends on !SMP
884 -+
885 - config HZ
886 - int
887 - default 100
888 -diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
889 -index 69ceedc..4d2a925 100644
890 ---- a/arch/arm64/Makefile
891 -+++ b/arch/arm64/Makefile
892 -@@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/
893 - core-$(CONFIG_XEN) += arch/arm64/xen/
894 - core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
895 - libs-y := arch/arm64/lib/ $(libs-y)
896 --libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/
897 -+core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
898 -
899 - # Default target when executing plain make
900 - KBUILD_IMAGE := Image.gz
901 -diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
902 -index b6c16d5..3f0c53c 100644
903 ---- a/arch/arm64/include/asm/cpufeature.h
904 -+++ b/arch/arm64/include/asm/cpufeature.h
905 -@@ -23,8 +23,9 @@
906 -
907 - #define ARM64_WORKAROUND_CLEAN_CACHE 0
908 - #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
909 -+#define ARM64_WORKAROUND_845719 2
910 -
911 --#define ARM64_NCAPS 2
912 -+#define ARM64_NCAPS 3
913 -
914 - #ifndef __ASSEMBLY__
915 -
916 -diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
917 -index 59e2823..8dcd61e 100644
918 ---- a/arch/arm64/include/asm/smp_plat.h
919 -+++ b/arch/arm64/include/asm/smp_plat.h
920 -@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void)
921 - extern u64 __cpu_logical_map[NR_CPUS];
922 - #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
923 -
924 -+void __init do_post_cpus_up_work(void);
925 -+
926 - #endif /* __ASM_SMP_PLAT_H */
927 -diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
928 -index 3ef77a4..bc49a18 100644
929 ---- a/arch/arm64/include/uapi/asm/kvm.h
930 -+++ b/arch/arm64/include/uapi/asm/kvm.h
931 -@@ -188,8 +188,14 @@ struct kvm_arch_memory_slot {
932 - #define KVM_ARM_IRQ_CPU_IRQ 0
933 - #define KVM_ARM_IRQ_CPU_FIQ 1
934 -
935 --/* Highest supported SPI, from VGIC_NR_IRQS */
936 -+/*
937 -+ * This used to hold the highest supported SPI, but it is now obsolete
938 -+ * and only here to provide source code level compatibility with older
939 -+ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
940 -+ */
941 -+#ifndef __KERNEL__
942 - #define KVM_ARM_IRQ_GIC_MAX 127
943 -+#endif
944 -
945 - /* PSCI interface */
946 - #define KVM_PSCI_FN_BASE 0x95c1ba5e
947 -diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
948 -index fa62637..ad6d523 100644
949 ---- a/arch/arm64/kernel/cpu_errata.c
950 -+++ b/arch/arm64/kernel/cpu_errata.c
951 -@@ -88,7 +88,16 @@ struct arm64_cpu_capabilities arm64_errata[] = {
952 - /* Cortex-A57 r0p0 - r1p2 */
953 - .desc = "ARM erratum 832075",
954 - .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
955 -- MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
956 -+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
957 -+ (1 << MIDR_VARIANT_SHIFT) | 2),
958 -+ },
959 -+#endif
960 -+#ifdef CONFIG_ARM64_ERRATUM_845719
961 -+ {
962 -+ /* Cortex-A53 r0p[01234] */
963 -+ .desc = "ARM erratum 845719",
964 -+ .capability = ARM64_WORKAROUND_845719,
965 -+ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
966 - },
967 - #endif
968 - {
969 -diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
970 -index cf21bb3..959fe87 100644
971 ---- a/arch/arm64/kernel/entry.S
972 -+++ b/arch/arm64/kernel/entry.S
973 -@@ -21,8 +21,10 @@
974 - #include <linux/init.h>
975 - #include <linux/linkage.h>
976 -
977 -+#include <asm/alternative-asm.h>
978 - #include <asm/assembler.h>
979 - #include <asm/asm-offsets.h>
980 -+#include <asm/cpufeature.h>
981 - #include <asm/errno.h>
982 - #include <asm/esr.h>
983 - #include <asm/thread_info.h>
984 -@@ -120,6 +122,24 @@
985 - ct_user_enter
986 - ldr x23, [sp, #S_SP] // load return stack pointer
987 - msr sp_el0, x23
988 -+
989 -+#ifdef CONFIG_ARM64_ERRATUM_845719
990 -+ alternative_insn \
991 -+ "nop", \
992 -+ "tbz x22, #4, 1f", \
993 -+ ARM64_WORKAROUND_845719
994 -+#ifdef CONFIG_PID_IN_CONTEXTIDR
995 -+ alternative_insn \
996 -+ "nop; nop", \
997 -+ "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
998 -+ ARM64_WORKAROUND_845719
999 -+#else
1000 -+ alternative_insn \
1001 -+ "nop", \
1002 -+ "msr contextidr_el1, xzr; 1:", \
1003 -+ ARM64_WORKAROUND_845719
1004 -+#endif
1005 -+#endif
1006 - .endif
1007 - msr elr_el1, x21 // set up the return data
1008 - msr spsr_el1, x22
1009 -diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
1010 -index 07f9305..c237ffb 100644
1011 ---- a/arch/arm64/kernel/head.S
1012 -+++ b/arch/arm64/kernel/head.S
1013 -@@ -426,6 +426,7 @@ __create_page_tables:
1014 - */
1015 - mov x0, x25
1016 - add x1, x26, #SWAPPER_DIR_SIZE
1017 -+ dmb sy
1018 - bl __inval_cache_range
1019 -
1020 - mov lr, x27
1021 -diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
1022 -index e8420f6..781f469 100644
1023 ---- a/arch/arm64/kernel/setup.c
1024 -+++ b/arch/arm64/kernel/setup.c
1025 -@@ -207,6 +207,18 @@ static void __init smp_build_mpidr_hash(void)
1026 - }
1027 - #endif
1028 -
1029 -+void __init do_post_cpus_up_work(void)
1030 -+{
1031 -+ apply_alternatives_all();
1032 -+}
1033 -+
1034 -+#ifdef CONFIG_UP_LATE_INIT
1035 -+void __init up_late_init(void)
1036 -+{
1037 -+ do_post_cpus_up_work();
1038 -+}
1039 -+#endif /* CONFIG_UP_LATE_INIT */
1040 -+
1041 - static void __init setup_processor(void)
1042 - {
1043 - struct cpu_info *cpu_info;
1044 -diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
1045 -index 328b8ce..4257369 100644
1046 ---- a/arch/arm64/kernel/smp.c
1047 -+++ b/arch/arm64/kernel/smp.c
1048 -@@ -309,7 +309,7 @@ void cpu_die(void)
1049 - void __init smp_cpus_done(unsigned int max_cpus)
1050 - {
1051 - pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
1052 -- apply_alternatives_all();
1053 -+ do_post_cpus_up_work();
1054 - }
1055 -
1056 - void __init smp_prepare_boot_cpu(void)
1057 -diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
1058 -index 356ee84..04845aa 100644
1059 ---- a/arch/c6x/kernel/time.c
1060 -+++ b/arch/c6x/kernel/time.c
1061 -@@ -49,7 +49,7 @@ u64 sched_clock(void)
1062 - return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
1063 - }
1064 -
1065 --void time_init(void)
1066 -+void __init time_init(void)
1067 - {
1068 - u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
1069 -
1070 -diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
1071 -index e41c56e..1e38f0e 100644
1072 ---- a/arch/mips/include/asm/asm-eva.h
1073 -+++ b/arch/mips/include/asm/asm-eva.h
1074 -@@ -11,6 +11,36 @@
1075 - #define __ASM_ASM_EVA_H
1076 -
1077 - #ifndef __ASSEMBLY__
1078 -+
1079 -+/* Kernel variants */
1080 -+
1081 -+#define kernel_cache(op, base) "cache " op ", " base "\n"
1082 -+#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
1083 -+#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
1084 -+#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
1085 -+#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
1086 -+#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
1087 -+#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
1088 -+#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
1089 -+#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
1090 -+#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
1091 -+#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
1092 -+#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
1093 -+#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
1094 -+#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
1095 -+
1096 -+#ifdef CONFIG_32BIT
1097 -+/*
1098 -+ * No 'sd' or 'ld' instructions in 32-bit but the code will
1099 -+ * do the correct thing
1100 -+ */
1101 -+#define kernel_sd(reg, addr) user_sw(reg, addr)
1102 -+#define kernel_ld(reg, addr) user_lw(reg, addr)
1103 -+#else
1104 -+#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
1105 -+#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
1106 -+#endif /* CONFIG_32BIT */
1107 -+
1108 - #ifdef CONFIG_EVA
1109 -
1110 - #define __BUILD_EVA_INSN(insn, reg, addr) \
1111 -@@ -41,37 +71,60 @@
1112 -
1113 - #else
1114 -
1115 --#define user_cache(op, base) "cache " op ", " base "\n"
1116 --#define user_ll(reg, addr) "ll " reg ", " addr "\n"
1117 --#define user_sc(reg, addr) "sc " reg ", " addr "\n"
1118 --#define user_lw(reg, addr) "lw " reg ", " addr "\n"
1119 --#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
1120 --#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
1121 --#define user_lh(reg, addr) "lh " reg ", " addr "\n"
1122 --#define user_lb(reg, addr) "lb " reg ", " addr "\n"
1123 --#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
1124 --#define user_sw(reg, addr) "sw " reg ", " addr "\n"
1125 --#define user_swl(reg, addr) "swl " reg ", " addr "\n"
1126 --#define user_swr(reg, addr) "swr " reg ", " addr "\n"
1127 --#define user_sh(reg, addr) "sh " reg ", " addr "\n"
1128 --#define user_sb(reg, addr) "sb " reg ", " addr "\n"
1129 -+#define user_cache(op, base) kernel_cache(op, base)
1130 -+#define user_ll(reg, addr) kernel_ll(reg, addr)
1131 -+#define user_sc(reg, addr) kernel_sc(reg, addr)
1132 -+#define user_lw(reg, addr) kernel_lw(reg, addr)
1133 -+#define user_lwl(reg, addr) kernel_lwl(reg, addr)
1134 -+#define user_lwr(reg, addr) kernel_lwr(reg, addr)
1135 -+#define user_lh(reg, addr) kernel_lh(reg, addr)
1136 -+#define user_lb(reg, addr) kernel_lb(reg, addr)
1137 -+#define user_lbu(reg, addr) kernel_lbu(reg, addr)
1138 -+#define user_sw(reg, addr) kernel_sw(reg, addr)
1139 -+#define user_swl(reg, addr) kernel_swl(reg, addr)
1140 -+#define user_swr(reg, addr) kernel_swr(reg, addr)
1141 -+#define user_sh(reg, addr) kernel_sh(reg, addr)
1142 -+#define user_sb(reg, addr) kernel_sb(reg, addr)
1143 -
1144 - #ifdef CONFIG_32BIT
1145 --/*
1146 -- * No 'sd' or 'ld' instructions in 32-bit but the code will
1147 -- * do the correct thing
1148 -- */
1149 --#define user_sd(reg, addr) user_sw(reg, addr)
1150 --#define user_ld(reg, addr) user_lw(reg, addr)
1151 -+#define user_sd(reg, addr) kernel_sw(reg, addr)
1152 -+#define user_ld(reg, addr) kernel_lw(reg, addr)
1153 - #else
1154 --#define user_sd(reg, addr) "sd " reg", " addr "\n"
1155 --#define user_ld(reg, addr) "ld " reg", " addr "\n"
1156 -+#define user_sd(reg, addr) kernel_sd(reg, addr)
1157 -+#define user_ld(reg, addr) kernel_ld(reg, addr)
1158 - #endif /* CONFIG_32BIT */
1159 -
1160 - #endif /* CONFIG_EVA */
1161 -
1162 - #else /* __ASSEMBLY__ */
1163 -
1164 -+#define kernel_cache(op, base) cache op, base
1165 -+#define kernel_ll(reg, addr) ll reg, addr
1166 -+#define kernel_sc(reg, addr) sc reg, addr
1167 -+#define kernel_lw(reg, addr) lw reg, addr
1168 -+#define kernel_lwl(reg, addr) lwl reg, addr
1169 -+#define kernel_lwr(reg, addr) lwr reg, addr
1170 -+#define kernel_lh(reg, addr) lh reg, addr
1171 -+#define kernel_lb(reg, addr) lb reg, addr
1172 -+#define kernel_lbu(reg, addr) lbu reg, addr
1173 -+#define kernel_sw(reg, addr) sw reg, addr
1174 -+#define kernel_swl(reg, addr) swl reg, addr
1175 -+#define kernel_swr(reg, addr) swr reg, addr
1176 -+#define kernel_sh(reg, addr) sh reg, addr
1177 -+#define kernel_sb(reg, addr) sb reg, addr
1178 -+
1179 -+#ifdef CONFIG_32BIT
1180 -+/*
1181 -+ * No 'sd' or 'ld' instructions in 32-bit but the code will
1182 -+ * do the correct thing
1183 -+ */
1184 -+#define kernel_sd(reg, addr) user_sw(reg, addr)
1185 -+#define kernel_ld(reg, addr) user_lw(reg, addr)
1186 -+#else
1187 -+#define kernel_sd(reg, addr) sd reg, addr
1188 -+#define kernel_ld(reg, addr) ld reg, addr
1189 -+#endif /* CONFIG_32BIT */
1190 -+
1191 - #ifdef CONFIG_EVA
1192 -
1193 - #define __BUILD_EVA_INSN(insn, reg, addr) \
1194 -@@ -101,31 +154,27 @@
1195 - #define user_sd(reg, addr) user_sw(reg, addr)
1196 - #else
1197 -
1198 --#define user_cache(op, base) cache op, base
1199 --#define user_ll(reg, addr) ll reg, addr
1200 --#define user_sc(reg, addr) sc reg, addr
1201 --#define user_lw(reg, addr) lw reg, addr
1202 --#define user_lwl(reg, addr) lwl reg, addr
1203 --#define user_lwr(reg, addr) lwr reg, addr
1204 --#define user_lh(reg, addr) lh reg, addr
1205 --#define user_lb(reg, addr) lb reg, addr
1206 --#define user_lbu(reg, addr) lbu reg, addr
1207 --#define user_sw(reg, addr) sw reg, addr
1208 --#define user_swl(reg, addr) swl reg, addr
1209 --#define user_swr(reg, addr) swr reg, addr
1210 --#define user_sh(reg, addr) sh reg, addr
1211 --#define user_sb(reg, addr) sb reg, addr
1212 -+#define user_cache(op, base) kernel_cache(op, base)
1213 -+#define user_ll(reg, addr) kernel_ll(reg, addr)
1214 -+#define user_sc(reg, addr) kernel_sc(reg, addr)
1215 -+#define user_lw(reg, addr) kernel_lw(reg, addr)
1216 -+#define user_lwl(reg, addr) kernel_lwl(reg, addr)
1217 -+#define user_lwr(reg, addr) kernel_lwr(reg, addr)
1218 -+#define user_lh(reg, addr) kernel_lh(reg, addr)
1219 -+#define user_lb(reg, addr) kernel_lb(reg, addr)
1220 -+#define user_lbu(reg, addr) kernel_lbu(reg, addr)
1221 -+#define user_sw(reg, addr) kernel_sw(reg, addr)
1222 -+#define user_swl(reg, addr) kernel_swl(reg, addr)
1223 -+#define user_swr(reg, addr) kernel_swr(reg, addr)
1224 -+#define user_sh(reg, addr) kernel_sh(reg, addr)
1225 -+#define user_sb(reg, addr) kernel_sb(reg, addr)
1226 -
1227 - #ifdef CONFIG_32BIT
1228 --/*
1229 -- * No 'sd' or 'ld' instructions in 32-bit but the code will
1230 -- * do the correct thing
1231 -- */
1232 --#define user_sd(reg, addr) user_sw(reg, addr)
1233 --#define user_ld(reg, addr) user_lw(reg, addr)
1234 -+#define user_sd(reg, addr) kernel_sw(reg, addr)
1235 -+#define user_ld(reg, addr) kernel_lw(reg, addr)
1236 - #else
1237 --#define user_sd(reg, addr) sd reg, addr
1238 --#define user_ld(reg, addr) ld reg, addr
1239 -+#define user_sd(reg, addr) kernel_sd(reg, addr)
1240 -+#define user_ld(reg, addr) kernel_sd(reg, addr)
1241 - #endif /* CONFIG_32BIT */
1242 -
1243 - #endif /* CONFIG_EVA */
1244 -diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
1245 -index dd083e9..9f26b07 100644
1246 ---- a/arch/mips/include/asm/fpu.h
1247 -+++ b/arch/mips/include/asm/fpu.h
1248 -@@ -170,6 +170,7 @@ static inline void lose_fpu(int save)
1249 - }
1250 - disable_msa();
1251 - clear_thread_flag(TIF_USEDMSA);
1252 -+ __disable_fpu();
1253 - } else if (is_fpu_owner()) {
1254 - if (save)
1255 - _save_fp(current);
1256 -diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
1257 -index ac4fc71..f722b05 100644
1258 ---- a/arch/mips/include/asm/kvm_host.h
1259 -+++ b/arch/mips/include/asm/kvm_host.h
1260 -@@ -322,6 +322,7 @@ enum mips_mmu_types {
1261 - #define T_TRAP 13 /* Trap instruction */
1262 - #define T_VCEI 14 /* Virtual coherency exception */
1263 - #define T_FPE 15 /* Floating point exception */
1264 -+#define T_MSADIS 21 /* MSA disabled exception */
1265 - #define T_WATCH 23 /* Watch address reference */
1266 - #define T_VCED 31 /* Virtual coherency data */
1267 -
1268 -@@ -578,6 +579,7 @@ struct kvm_mips_callbacks {
1269 - int (*handle_syscall)(struct kvm_vcpu *vcpu);
1270 - int (*handle_res_inst)(struct kvm_vcpu *vcpu);
1271 - int (*handle_break)(struct kvm_vcpu *vcpu);
1272 -+ int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
1273 - int (*vm_init)(struct kvm *kvm);
1274 - int (*vcpu_init)(struct kvm_vcpu *vcpu);
1275 - int (*vcpu_setup)(struct kvm_vcpu *vcpu);
1276 -diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
1277 -index bbb6969..7659da2 100644
1278 ---- a/arch/mips/kernel/unaligned.c
1279 -+++ b/arch/mips/kernel/unaligned.c
1280 -@@ -109,10 +109,11 @@ static u32 unaligned_action;
1281 - extern void show_registers(struct pt_regs *regs);
1282 -
1283 - #ifdef __BIG_ENDIAN
1284 --#define LoadHW(addr, value, res) \
1285 -+#define _LoadHW(addr, value, res, type) \
1286 -+do { \
1287 - __asm__ __volatile__ (".set\tnoat\n" \
1288 -- "1:\t"user_lb("%0", "0(%2)")"\n" \
1289 -- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
1290 -+ "1:\t"type##_lb("%0", "0(%2)")"\n" \
1291 -+ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
1292 - "sll\t%0, 0x8\n\t" \
1293 - "or\t%0, $1\n\t" \
1294 - "li\t%1, 0\n" \
1295 -@@ -127,13 +128,15 @@ extern void show_registers(struct pt_regs *regs);
1296 - STR(PTR)"\t2b, 4b\n\t" \
1297 - ".previous" \
1298 - : "=&r" (value), "=r" (res) \
1299 -- : "r" (addr), "i" (-EFAULT));
1300 -+ : "r" (addr), "i" (-EFAULT)); \
1301 -+} while(0)
1302 -
1303 - #ifndef CONFIG_CPU_MIPSR6
1304 --#define LoadW(addr, value, res) \
1305 -+#define _LoadW(addr, value, res, type) \
1306 -+do { \
1307 - __asm__ __volatile__ ( \
1308 -- "1:\t"user_lwl("%0", "(%2)")"\n" \
1309 -- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
1310 -+ "1:\t"type##_lwl("%0", "(%2)")"\n" \
1311 -+ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
1312 - "li\t%1, 0\n" \
1313 - "3:\n\t" \
1314 - ".insn\n\t" \
1315 -@@ -146,21 +149,24 @@ extern void show_registers(struct pt_regs *regs);
1316 - STR(PTR)"\t2b, 4b\n\t" \
1317 - ".previous" \
1318 - : "=&r" (value), "=r" (res) \
1319 -- : "r" (addr), "i" (-EFAULT));
1320 -+ : "r" (addr), "i" (-EFAULT)); \
1321 -+} while(0)
1322 -+
1323 - #else
1324 - /* MIPSR6 has no lwl instruction */
1325 --#define LoadW(addr, value, res) \
1326 -+#define _LoadW(addr, value, res, type) \
1327 -+do { \
1328 - __asm__ __volatile__ ( \
1329 - ".set\tpush\n" \
1330 - ".set\tnoat\n\t" \
1331 -- "1:"user_lb("%0", "0(%2)")"\n\t" \
1332 -- "2:"user_lbu("$1", "1(%2)")"\n\t" \
1333 -+ "1:"type##_lb("%0", "0(%2)")"\n\t" \
1334 -+ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
1335 - "sll\t%0, 0x8\n\t" \
1336 - "or\t%0, $1\n\t" \
1337 -- "3:"user_lbu("$1", "2(%2)")"\n\t" \
1338 -+ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
1339 - "sll\t%0, 0x8\n\t" \
1340 - "or\t%0, $1\n\t" \
1341 -- "4:"user_lbu("$1", "3(%2)")"\n\t" \
1342 -+ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
1343 - "sll\t%0, 0x8\n\t" \
1344 - "or\t%0, $1\n\t" \
1345 - "li\t%1, 0\n" \
1346 -@@ -178,14 +184,17 @@ extern void show_registers(struct pt_regs *regs);
1347 - STR(PTR)"\t4b, 11b\n\t" \
1348 - ".previous" \
1349 - : "=&r" (value), "=r" (res) \
1350 -- : "r" (addr), "i" (-EFAULT));
1351 -+ : "r" (addr), "i" (-EFAULT)); \
1352 -+} while(0)
1353 -+
1354 - #endif /* CONFIG_CPU_MIPSR6 */
1355 -
1356 --#define LoadHWU(addr, value, res) \
1357 -+#define _LoadHWU(addr, value, res, type) \
1358 -+do { \
1359 - __asm__ __volatile__ ( \
1360 - ".set\tnoat\n" \
1361 -- "1:\t"user_lbu("%0", "0(%2)")"\n" \
1362 -- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
1363 -+ "1:\t"type##_lbu("%0", "0(%2)")"\n" \
1364 -+ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
1365 - "sll\t%0, 0x8\n\t" \
1366 - "or\t%0, $1\n\t" \
1367 - "li\t%1, 0\n" \
1368 -@@ -201,13 +210,15 @@ extern void show_registers(struct pt_regs *regs);
1369 - STR(PTR)"\t2b, 4b\n\t" \
1370 - ".previous" \
1371 - : "=&r" (value), "=r" (res) \
1372 -- : "r" (addr), "i" (-EFAULT));
1373 -+ : "r" (addr), "i" (-EFAULT)); \
1374 -+} while(0)
1375 -
1376 - #ifndef CONFIG_CPU_MIPSR6
1377 --#define LoadWU(addr, value, res) \
1378 -+#define _LoadWU(addr, value, res, type) \
1379 -+do { \
1380 - __asm__ __volatile__ ( \
1381 -- "1:\t"user_lwl("%0", "(%2)")"\n" \
1382 -- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
1383 -+ "1:\t"type##_lwl("%0", "(%2)")"\n" \
1384 -+ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
1385 - "dsll\t%0, %0, 32\n\t" \
1386 - "dsrl\t%0, %0, 32\n\t" \
1387 - "li\t%1, 0\n" \
1388 -@@ -222,9 +233,11 @@ extern void show_registers(struct pt_regs *regs);
1389 - STR(PTR)"\t2b, 4b\n\t" \
1390 - ".previous" \
1391 - : "=&r" (value), "=r" (res) \
1392 -- : "r" (addr), "i" (-EFAULT));
1393 -+ : "r" (addr), "i" (-EFAULT)); \
1394 -+} while(0)
1395 -
1396 --#define LoadDW(addr, value, res) \
1397 -+#define _LoadDW(addr, value, res) \
1398 -+do { \
1399 - __asm__ __volatile__ ( \
1400 - "1:\tldl\t%0, (%2)\n" \
1401 - "2:\tldr\t%0, 7(%2)\n\t" \
1402 -@@ -240,21 +253,24 @@ extern void show_registers(struct pt_regs *regs);
1403 - STR(PTR)"\t2b, 4b\n\t" \
1404 - ".previous" \
1405 - : "=&r" (value), "=r" (res) \
1406 -- : "r" (addr), "i" (-EFAULT));
1407 -+ : "r" (addr), "i" (-EFAULT)); \
1408 -+} while(0)
1409 -+
1410 - #else
1411 - /* MIPSR6 has not lwl and ldl instructions */
1412 --#define LoadWU(addr, value, res) \
1413 -+#define _LoadWU(addr, value, res, type) \
1414 -+do { \
1415 - __asm__ __volatile__ ( \
1416 - ".set\tpush\n\t" \
1417 - ".set\tnoat\n\t" \
1418 -- "1:"user_lbu("%0", "0(%2)")"\n\t" \
1419 -- "2:"user_lbu("$1", "1(%2)")"\n\t" \
1420 -+ "1:"type##_lbu("%0", "0(%2)")"\n\t" \
1421 -+ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
1422 - "sll\t%0, 0x8\n\t" \
1423 - "or\t%0, $1\n\t" \
1424 -- "3:"user_lbu("$1", "2(%2)")"\n\t" \
1425 -+ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
1426 - "sll\t%0, 0x8\n\t" \
1427 - "or\t%0, $1\n\t" \
1428 -- "4:"user_lbu("$1", "3(%2)")"\n\t" \
1429 -+ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
1430 - "sll\t%0, 0x8\n\t" \
1431 - "or\t%0, $1\n\t" \
1432 - "li\t%1, 0\n" \
1433 -@@ -272,9 +288,11 @@ extern void show_registers(struct pt_regs *regs);
1434 - STR(PTR)"\t4b, 11b\n\t" \
1435 - ".previous" \
1436 - : "=&r" (value), "=r" (res) \
1437 -- : "r" (addr), "i" (-EFAULT));
1438 -+ : "r" (addr), "i" (-EFAULT)); \
1439 -+} while(0)
1440 -
1441 --#define LoadDW(addr, value, res) \
1442 -+#define _LoadDW(addr, value, res) \
1443 -+do { \
1444 - __asm__ __volatile__ ( \
1445 - ".set\tpush\n\t" \
1446 - ".set\tnoat\n\t" \
1447 -@@ -319,16 +337,19 @@ extern void show_registers(struct pt_regs *regs);
1448 - STR(PTR)"\t8b, 11b\n\t" \
1449 - ".previous" \
1450 - : "=&r" (value), "=r" (res) \
1451 -- : "r" (addr), "i" (-EFAULT));
1452 -+ : "r" (addr), "i" (-EFAULT)); \
1453 -+} while(0)
1454 -+
1455 - #endif /* CONFIG_CPU_MIPSR6 */
1456 -
1457 -
1458 --#define StoreHW(addr, value, res) \
1459 -+#define _StoreHW(addr, value, res, type) \
1460 -+do { \
1461 - __asm__ __volatile__ ( \
1462 - ".set\tnoat\n" \
1463 -- "1:\t"user_sb("%1", "1(%2)")"\n" \
1464 -+ "1:\t"type##_sb("%1", "1(%2)")"\n" \
1465 - "srl\t$1, %1, 0x8\n" \
1466 -- "2:\t"user_sb("$1", "0(%2)")"\n" \
1467 -+ "2:\t"type##_sb("$1", "0(%2)")"\n" \
1468 - ".set\tat\n\t" \
1469 - "li\t%0, 0\n" \
1470 - "3:\n\t" \
1471 -@@ -342,13 +363,15 @@ extern void show_registers(struct pt_regs *regs);
1472 - STR(PTR)"\t2b, 4b\n\t" \
1473 - ".previous" \
1474 - : "=r" (res) \
1475 -- : "r" (value), "r" (addr), "i" (-EFAULT));
1476 -+ : "r" (value), "r" (addr), "i" (-EFAULT));\
1477 -+} while(0)
1478 -
1479 - #ifndef CONFIG_CPU_MIPSR6
1480 --#define StoreW(addr, value, res) \
1481 -+#define _StoreW(addr, value, res, type) \
1482 -+do { \
1483 - __asm__ __volatile__ ( \
1484 -- "1:\t"user_swl("%1", "(%2)")"\n" \
1485 -- "2:\t"user_swr("%1", "3(%2)")"\n\t" \
1486 -+ "1:\t"type##_swl("%1", "(%2)")"\n" \
1487 -+ "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
1488 - "li\t%0, 0\n" \
1489 - "3:\n\t" \
1490 - ".insn\n\t" \
1491 -@@ -361,9 +384,11 @@ extern void show_registers(struct pt_regs *regs);
1492 - STR(PTR)"\t2b, 4b\n\t" \
1493 - ".previous" \
1494 - : "=r" (res) \
1495 -- : "r" (value), "r" (addr), "i" (-EFAULT));
1496 -+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
1497 -+} while(0)
1498 -
1499 --#define StoreDW(addr, value, res) \
1500 -+#define _StoreDW(addr, value, res) \
1501 -+do { \
1502 - __asm__ __volatile__ ( \
1503 - "1:\tsdl\t%1,(%2)\n" \
1504 - "2:\tsdr\t%1, 7(%2)\n\t" \
1505 -@@ -379,20 +404,23 @@ extern void show_registers(struct pt_regs *regs);
1506 - STR(PTR)"\t2b, 4b\n\t" \
1507 - ".previous" \
1508 - : "=r" (res) \
1509 -- : "r" (value), "r" (addr), "i" (-EFAULT));
1510 -+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
1511 -+} while(0)
1512 -+
1513 - #else
1514 - /* MIPSR6 has no swl and sdl instructions */
1515 --#define StoreW(addr, value, res) \
1516 -+#define _StoreW(addr, value, res, type) \
1517 -+do { \
1518 - __asm__ __volatile__ ( \
1519 - ".set\tpush\n\t" \
1520 - ".set\tnoat\n\t" \
1521 -- "1:"user_sb("%1", "3(%2)")"\n\t" \
1522 -+ "1:"type##_sb("%1", "3(%2)")"\n\t" \
1523 - "srl\t$1, %1, 0x8\n\t" \
1524 -- "2:"user_sb("$1", "2(%2)")"\n\t" \
1525 -+ "2:"type##_sb("$1", "2(%2)")"\n\t" \
1526 - "srl\t$1, $1, 0x8\n\t" \
1527 -- "3:"user_sb("$1", "1(%2)")"\n\t" \
1528 -+ "3:"type##_sb("$1", "1(%2)")"\n\t" \
1529 - "srl\t$1, $1, 0x8\n\t" \
1530 -- "4:"user_sb("$1", "0(%2)")"\n\t" \
1531 -+ "4:"type##_sb("$1", "0(%2)")"\n\t" \
1532 - ".set\tpop\n\t" \
1533 - "li\t%0, 0\n" \
1534 - "10:\n\t" \
1535 -@@ -409,9 +437,11 @@ extern void show_registers(struct pt_regs *regs);
1536 - ".previous" \
1537 - : "=&r" (res) \
1538 - : "r" (value), "r" (addr), "i" (-EFAULT) \
1539 -- : "memory");
1540 -+ : "memory"); \
1541 -+} while(0)
1542 -
1543 - #define StoreDW(addr, value, res) \
1544 -+do { \
1545 - __asm__ __volatile__ ( \
1546 - ".set\tpush\n\t" \
1547 - ".set\tnoat\n\t" \
1548 -@@ -451,15 +481,18 @@ extern void show_registers(struct pt_regs *regs);
1549 - ".previous" \
1550 - : "=&r" (res) \
1551 - : "r" (value), "r" (addr), "i" (-EFAULT) \
1552 -- : "memory");
1553 -+ : "memory"); \
1554 -+} while(0)
1555 -+
1556 - #endif /* CONFIG_CPU_MIPSR6 */
1557 -
1558 - #else /* __BIG_ENDIAN */
1559 -
1560 --#define LoadHW(addr, value, res) \
1561 -+#define _LoadHW(addr, value, res, type) \
1562 -+do { \
1563 - __asm__ __volatile__ (".set\tnoat\n" \
1564 -- "1:\t"user_lb("%0", "1(%2)")"\n" \
1565 -- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
1566 -+ "1:\t"type##_lb("%0", "1(%2)")"\n" \
1567 -+ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
1568 - "sll\t%0, 0x8\n\t" \
1569 - "or\t%0, $1\n\t" \
1570 - "li\t%1, 0\n" \
1571 -@@ -474,13 +507,15 @@ extern void show_registers(struct pt_regs *regs);
1572 - STR(PTR)"\t2b, 4b\n\t" \
1573 - ".previous" \
1574 - : "=&r" (value), "=r" (res) \
1575 -- : "r" (addr), "i" (-EFAULT));
1576 -+ : "r" (addr), "i" (-EFAULT)); \
1577 -+} while(0)
1578 -
1579 - #ifndef CONFIG_CPU_MIPSR6
1580 --#define LoadW(addr, value, res) \
1581 -+#define _LoadW(addr, value, res, type) \
1582 -+do { \
1583 - __asm__ __volatile__ ( \
1584 -- "1:\t"user_lwl("%0", "3(%2)")"\n" \
1585 -- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
1586 -+ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
1587 -+ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
1588 - "li\t%1, 0\n" \
1589 - "3:\n\t" \
1590 - ".insn\n\t" \
1591 -@@ -493,21 +528,24 @@ extern void show_registers(struct pt_regs *regs);
1592 - STR(PTR)"\t2b, 4b\n\t" \
1593 - ".previous" \
1594 - : "=&r" (value), "=r" (res) \
1595 -- : "r" (addr), "i" (-EFAULT));
1596 -+ : "r" (addr), "i" (-EFAULT)); \
1597 -+} while(0)
1598 -+
1599 - #else
1600 - /* MIPSR6 has no lwl instruction */
1601 --#define LoadW(addr, value, res) \
1602 -+#define _LoadW(addr, value, res, type) \
1603 -+do { \
1604 - __asm__ __volatile__ ( \
1605 - ".set\tpush\n" \
1606 - ".set\tnoat\n\t" \
1607 -- "1:"user_lb("%0", "3(%2)")"\n\t" \
1608 -- "2:"user_lbu("$1", "2(%2)")"\n\t" \
1609 -+ "1:"type##_lb("%0", "3(%2)")"\n\t" \
1610 -+ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
1611 - "sll\t%0, 0x8\n\t" \
1612 - "or\t%0, $1\n\t" \
1613 -- "3:"user_lbu("$1", "1(%2)")"\n\t" \
1614 -+ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
1615 - "sll\t%0, 0x8\n\t" \
1616 - "or\t%0, $1\n\t" \
1617 -- "4:"user_lbu("$1", "0(%2)")"\n\t" \
1618 -+ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
1619 - "sll\t%0, 0x8\n\t" \
1620 - "or\t%0, $1\n\t" \
1621 - "li\t%1, 0\n" \
1622 -@@ -525,15 +563,18 @@ extern void show_registers(struct pt_regs *regs);
1623 - STR(PTR)"\t4b, 11b\n\t" \
1624 - ".previous" \
1625 - : "=&r" (value), "=r" (res) \
1626 -- : "r" (addr), "i" (-EFAULT));
1627 -+ : "r" (addr), "i" (-EFAULT)); \
1628 -+} while(0)
1629 -+
1630 - #endif /* CONFIG_CPU_MIPSR6 */
1631 -
1632 -
1633 --#define LoadHWU(addr, value, res) \
1634 -+#define _LoadHWU(addr, value, res, type) \
1635 -+do { \
1636 - __asm__ __volatile__ ( \
1637 - ".set\tnoat\n" \
1638 -- "1:\t"user_lbu("%0", "1(%2)")"\n" \
1639 -- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
1640 -+ "1:\t"type##_lbu("%0", "1(%2)")"\n" \
1641 -+ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
1642 - "sll\t%0, 0x8\n\t" \
1643 - "or\t%0, $1\n\t" \
1644 - "li\t%1, 0\n" \
1645 -@@ -549,13 +590,15 @@ extern void show_registers(struct pt_regs *regs);
1646 - STR(PTR)"\t2b, 4b\n\t" \
1647 - ".previous" \
1648 - : "=&r" (value), "=r" (res) \
1649 -- : "r" (addr), "i" (-EFAULT));
1650 -+ : "r" (addr), "i" (-EFAULT)); \
1651 -+} while(0)
1652 -
1653 - #ifndef CONFIG_CPU_MIPSR6
1654 --#define LoadWU(addr, value, res) \
1655 -+#define _LoadWU(addr, value, res, type) \
1656 -+do { \
1657 - __asm__ __volatile__ ( \
1658 -- "1:\t"user_lwl("%0", "3(%2)")"\n" \
1659 -- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
1660 -+ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
1661 -+ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
1662 - "dsll\t%0, %0, 32\n\t" \
1663 - "dsrl\t%0, %0, 32\n\t" \
1664 - "li\t%1, 0\n" \
1665 -@@ -570,9 +613,11 @@ extern void show_registers(struct pt_regs *regs);
1666 - STR(PTR)"\t2b, 4b\n\t" \
1667 - ".previous" \
1668 - : "=&r" (value), "=r" (res) \
1669 -- : "r" (addr), "i" (-EFAULT));
1670 -+ : "r" (addr), "i" (-EFAULT)); \
1671 -+} while(0)
1672 -
1673 --#define LoadDW(addr, value, res) \
1674 -+#define _LoadDW(addr, value, res) \
1675 -+do { \
1676 - __asm__ __volatile__ ( \
1677 - "1:\tldl\t%0, 7(%2)\n" \
1678 - "2:\tldr\t%0, (%2)\n\t" \
1679 -@@ -588,21 +633,24 @@ extern void show_registers(struct pt_regs *regs);
1680 - STR(PTR)"\t2b, 4b\n\t" \
1681 - ".previous" \
1682 - : "=&r" (value), "=r" (res) \
1683 -- : "r" (addr), "i" (-EFAULT));
1684 -+ : "r" (addr), "i" (-EFAULT)); \
1685 -+} while(0)
1686 -+
1687 - #else
1688 - /* MIPSR6 has not lwl and ldl instructions */
1689 --#define LoadWU(addr, value, res) \
1690 -+#define _LoadWU(addr, value, res, type) \
1691 -+do { \
1692 - __asm__ __volatile__ ( \
1693 - ".set\tpush\n\t" \
1694 - ".set\tnoat\n\t" \
1695 -- "1:"user_lbu("%0", "3(%2)")"\n\t" \
1696 -- "2:"user_lbu("$1", "2(%2)")"\n\t" \
1697 -+ "1:"type##_lbu("%0", "3(%2)")"\n\t" \
1698 -+ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
1699 - "sll\t%0, 0x8\n\t" \
1700 - "or\t%0, $1\n\t" \
1701 -- "3:"user_lbu("$1", "1(%2)")"\n\t" \
1702 -+ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
1703 - "sll\t%0, 0x8\n\t" \
1704 - "or\t%0, $1\n\t" \
1705 -- "4:"user_lbu("$1", "0(%2)")"\n\t" \
1706 -+ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
1707 - "sll\t%0, 0x8\n\t" \
1708 - "or\t%0, $1\n\t" \
1709 - "li\t%1, 0\n" \
1710 -@@ -620,9 +668,11 @@ extern void show_registers(struct pt_regs *regs);
1711 - STR(PTR)"\t4b, 11b\n\t" \
1712 - ".previous" \
1713 - : "=&r" (value), "=r" (res) \
1714 -- : "r" (addr), "i" (-EFAULT));
1715 -+ : "r" (addr), "i" (-EFAULT)); \
1716 -+} while(0)
1717 -
1718 --#define LoadDW(addr, value, res) \
1719 -+#define _LoadDW(addr, value, res) \
1720 -+do { \
1721 - __asm__ __volatile__ ( \
1722 - ".set\tpush\n\t" \
1723 - ".set\tnoat\n\t" \
1724 -@@ -667,15 +717,17 @@ extern void show_registers(struct pt_regs *regs);
1725 - STR(PTR)"\t8b, 11b\n\t" \
1726 - ".previous" \
1727 - : "=&r" (value), "=r" (res) \
1728 -- : "r" (addr), "i" (-EFAULT));
1729 -+ : "r" (addr), "i" (-EFAULT)); \
1730 -+} while(0)
1731 - #endif /* CONFIG_CPU_MIPSR6 */
1732 -
1733 --#define StoreHW(addr, value, res) \
1734 -+#define _StoreHW(addr, value, res, type) \
1735 -+do { \
1736 - __asm__ __volatile__ ( \
1737 - ".set\tnoat\n" \
1738 -- "1:\t"user_sb("%1", "0(%2)")"\n" \
1739 -+ "1:\t"type##_sb("%1", "0(%2)")"\n" \
1740 - "srl\t$1,%1, 0x8\n" \
1741 -- "2:\t"user_sb("$1", "1(%2)")"\n" \
1742 -+ "2:\t"type##_sb("$1", "1(%2)")"\n" \
1743 - ".set\tat\n\t" \
1744 - "li\t%0, 0\n" \
1745 - "3:\n\t" \
1746 -@@ -689,12 +741,15 @@ extern void show_registers(struct pt_regs *regs);
1747 - STR(PTR)"\t2b, 4b\n\t" \
1748 - ".previous" \
1749 - : "=r" (res) \
1750 -- : "r" (value), "r" (addr), "i" (-EFAULT));
1751 -+ : "r" (value), "r" (addr), "i" (-EFAULT));\
1752 -+} while(0)
1753 -+
1754 - #ifndef CONFIG_CPU_MIPSR6
1755 --#define StoreW(addr, value, res) \
1756 -+#define _StoreW(addr, value, res, type) \
1757 -+do { \
1758 - __asm__ __volatile__ ( \
1759 -- "1:\t"user_swl("%1", "3(%2)")"\n" \
1760 -- "2:\t"user_swr("%1", "(%2)")"\n\t" \
1761 -+ "1:\t"type##_swl("%1", "3(%2)")"\n" \
1762 -+ "2:\t"type##_swr("%1", "(%2)")"\n\t"\
1763 - "li\t%0, 0\n" \
1764 - "3:\n\t" \
1765 - ".insn\n\t" \
1766 -@@ -707,9 +762,11 @@ extern void show_registers(struct pt_regs *regs);
1767 - STR(PTR)"\t2b, 4b\n\t" \
1768 - ".previous" \
1769 - : "=r" (res) \
1770 -- : "r" (value), "r" (addr), "i" (-EFAULT));
1771 -+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
1772 -+} while(0)
1773 -
1774 --#define StoreDW(addr, value, res) \
1775 -+#define _StoreDW(addr, value, res) \
1776 -+do { \
1777 - __asm__ __volatile__ ( \
1778 - "1:\tsdl\t%1, 7(%2)\n" \
1779 - "2:\tsdr\t%1, (%2)\n\t" \
1780 -@@ -725,20 +782,23 @@ extern void show_registers(struct pt_regs *regs);
1781 - STR(PTR)"\t2b, 4b\n\t" \
1782 - ".previous" \
1783 - : "=r" (res) \
1784 -- : "r" (value), "r" (addr), "i" (-EFAULT));
1785 -+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
1786 -+} while(0)
1787 -+
1788 - #else
1789 - /* MIPSR6 has no swl and sdl instructions */
1790 --#define StoreW(addr, value, res) \
1791 -+#define _StoreW(addr, value, res, type) \
1792 -+do { \
1793 - __asm__ __volatile__ ( \
1794 - ".set\tpush\n\t" \
1795 - ".set\tnoat\n\t" \
1796 -- "1:"user_sb("%1", "0(%2)")"\n\t" \
1797 -+ "1:"type##_sb("%1", "0(%2)")"\n\t" \
1798 - "srl\t$1, %1, 0x8\n\t" \
1799 -- "2:"user_sb("$1", "1(%2)")"\n\t" \
1800 -+ "2:"type##_sb("$1", "1(%2)")"\n\t" \
1801 - "srl\t$1, $1, 0x8\n\t" \
1802 -- "3:"user_sb("$1", "2(%2)")"\n\t" \
1803 -+ "3:"type##_sb("$1", "2(%2)")"\n\t" \
1804 - "srl\t$1, $1, 0x8\n\t" \
1805 -- "4:"user_sb("$1", "3(%2)")"\n\t" \
1806 -+ "4:"type##_sb("$1", "3(%2)")"\n\t" \
1807 - ".set\tpop\n\t" \
1808 - "li\t%0, 0\n" \
1809 - "10:\n\t" \
1810 -@@ -755,9 +815,11 @@ extern void show_registers(struct pt_regs *regs);
1811 - ".previous" \
1812 - : "=&r" (res) \
1813 - : "r" (value), "r" (addr), "i" (-EFAULT) \
1814 -- : "memory");
1815 -+ : "memory"); \
1816 -+} while(0)
1817 -
1818 --#define StoreDW(addr, value, res) \
1819 -+#define _StoreDW(addr, value, res) \
1820 -+do { \
1821 - __asm__ __volatile__ ( \
1822 - ".set\tpush\n\t" \
1823 - ".set\tnoat\n\t" \
1824 -@@ -797,10 +859,28 @@ extern void show_registers(struct pt_regs *regs);
1825 - ".previous" \
1826 - : "=&r" (res) \
1827 - : "r" (value), "r" (addr), "i" (-EFAULT) \
1828 -- : "memory");
1829 -+ : "memory"); \
1830 -+} while(0)
1831 -+
1832 - #endif /* CONFIG_CPU_MIPSR6 */
1833 - #endif
1834 -
1835 -+#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
1836 -+#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
1837 -+#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
1838 -+#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
1839 -+#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
1840 -+#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
1841 -+#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
1842 -+#define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
1843 -+#define LoadDW(addr, value, res) _LoadDW(addr, value, res)
1844 -+
1845 -+#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
1846 -+#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
1847 -+#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
1848 -+#define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
1849 -+#define StoreDW(addr, value, res) _StoreDW(addr, value, res)
1850 -+
1851 - static void emulate_load_store_insn(struct pt_regs *regs,
1852 - void __user *addr, unsigned int __user *pc)
1853 - {
1854 -@@ -872,7 +952,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1855 - set_fs(seg);
1856 - goto sigbus;
1857 - }
1858 -- LoadHW(addr, value, res);
1859 -+ LoadHWE(addr, value, res);
1860 - if (res) {
1861 - set_fs(seg);
1862 - goto fault;
1863 -@@ -885,7 +965,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1864 - set_fs(seg);
1865 - goto sigbus;
1866 - }
1867 -- LoadW(addr, value, res);
1868 -+ LoadWE(addr, value, res);
1869 - if (res) {
1870 - set_fs(seg);
1871 - goto fault;
1872 -@@ -898,7 +978,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1873 - set_fs(seg);
1874 - goto sigbus;
1875 - }
1876 -- LoadHWU(addr, value, res);
1877 -+ LoadHWUE(addr, value, res);
1878 - if (res) {
1879 - set_fs(seg);
1880 - goto fault;
1881 -@@ -913,7 +993,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1882 - }
1883 - compute_return_epc(regs);
1884 - value = regs->regs[insn.spec3_format.rt];
1885 -- StoreHW(addr, value, res);
1886 -+ StoreHWE(addr, value, res);
1887 - if (res) {
1888 - set_fs(seg);
1889 - goto fault;
1890 -@@ -926,7 +1006,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1891 - }
1892 - compute_return_epc(regs);
1893 - value = regs->regs[insn.spec3_format.rt];
1894 -- StoreW(addr, value, res);
1895 -+ StoreWE(addr, value, res);
1896 - if (res) {
1897 - set_fs(seg);
1898 - goto fault;
1899 -@@ -943,7 +1023,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1900 - if (!access_ok(VERIFY_READ, addr, 2))
1901 - goto sigbus;
1902 -
1903 -- LoadHW(addr, value, res);
1904 -+ if (config_enabled(CONFIG_EVA)) {
1905 -+ if (segment_eq(get_fs(), get_ds()))
1906 -+ LoadHW(addr, value, res);
1907 -+ else
1908 -+ LoadHWE(addr, value, res);
1909 -+ } else {
1910 -+ LoadHW(addr, value, res);
1911 -+ }
1912 -+
1913 - if (res)
1914 - goto fault;
1915 - compute_return_epc(regs);
1916 -@@ -954,7 +1042,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1917 - if (!access_ok(VERIFY_READ, addr, 4))
1918 - goto sigbus;
1919 -
1920 -- LoadW(addr, value, res);
1921 -+ if (config_enabled(CONFIG_EVA)) {
1922 -+ if (segment_eq(get_fs(), get_ds()))
1923 -+ LoadW(addr, value, res);
1924 -+ else
1925 -+ LoadWE(addr, value, res);
1926 -+ } else {
1927 -+ LoadW(addr, value, res);
1928 -+ }
1929 -+
1930 - if (res)
1931 - goto fault;
1932 - compute_return_epc(regs);
1933 -@@ -965,7 +1061,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1934 - if (!access_ok(VERIFY_READ, addr, 2))
1935 - goto sigbus;
1936 -
1937 -- LoadHWU(addr, value, res);
1938 -+ if (config_enabled(CONFIG_EVA)) {
1939 -+ if (segment_eq(get_fs(), get_ds()))
1940 -+ LoadHWU(addr, value, res);
1941 -+ else
1942 -+ LoadHWUE(addr, value, res);
1943 -+ } else {
1944 -+ LoadHWU(addr, value, res);
1945 -+ }
1946 -+
1947 - if (res)
1948 - goto fault;
1949 - compute_return_epc(regs);
1950 -@@ -1024,7 +1128,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1951 -
1952 - compute_return_epc(regs);
1953 - value = regs->regs[insn.i_format.rt];
1954 -- StoreHW(addr, value, res);
1955 -+
1956 -+ if (config_enabled(CONFIG_EVA)) {
1957 -+ if (segment_eq(get_fs(), get_ds()))
1958 -+ StoreHW(addr, value, res);
1959 -+ else
1960 -+ StoreHWE(addr, value, res);
1961 -+ } else {
1962 -+ StoreHW(addr, value, res);
1963 -+ }
1964 -+
1965 - if (res)
1966 - goto fault;
1967 - break;
1968 -@@ -1035,7 +1148,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
1969 -
1970 - compute_return_epc(regs);
1971 - value = regs->regs[insn.i_format.rt];
1972 -- StoreW(addr, value, res);
1973 -+
1974 -+ if (config_enabled(CONFIG_EVA)) {
1975 -+ if (segment_eq(get_fs(), get_ds()))
1976 -+ StoreW(addr, value, res);
1977 -+ else
1978 -+ StoreWE(addr, value, res);
1979 -+ } else {
1980 -+ StoreW(addr, value, res);
1981 -+ }
1982 -+
1983 - if (res)
1984 - goto fault;
1985 - break;
1986 -diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
1987 -index fb3e8df..838d3a6 100644
1988 ---- a/arch/mips/kvm/emulate.c
1989 -+++ b/arch/mips/kvm/emulate.c
1990 -@@ -2176,6 +2176,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
1991 - case T_SYSCALL:
1992 - case T_BREAK:
1993 - case T_RES_INST:
1994 -+ case T_MSADIS:
1995 - break;
1996 -
1997 - case T_COP_UNUSABLE:
1998 -diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
1999 -index c9eccf5..f5e7dda 100644
2000 ---- a/arch/mips/kvm/mips.c
2001 -+++ b/arch/mips/kvm/mips.c
2002 -@@ -1119,6 +1119,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
2003 - ret = kvm_mips_callbacks->handle_break(vcpu);
2004 - break;
2005 -
2006 -+ case T_MSADIS:
2007 -+ ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
2008 -+ break;
2009 -+
2010 - default:
2011 - kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
2012 - exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
2013 -diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
2014 -index fd7257b..4372cc8 100644
2015 ---- a/arch/mips/kvm/trap_emul.c
2016 -+++ b/arch/mips/kvm/trap_emul.c
2017 -@@ -330,6 +330,33 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
2018 - return ret;
2019 - }
2020 -
2021 -+static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
2022 -+{
2023 -+ struct kvm_run *run = vcpu->run;
2024 -+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
2025 -+ unsigned long cause = vcpu->arch.host_cp0_cause;
2026 -+ enum emulation_result er = EMULATE_DONE;
2027 -+ int ret = RESUME_GUEST;
2028 -+
2029 -+ /* No MSA supported in guest, guest reserved instruction exception */
2030 -+ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2031 -+
2032 -+ switch (er) {
2033 -+ case EMULATE_DONE:
2034 -+ ret = RESUME_GUEST;
2035 -+ break;
2036 -+
2037 -+ case EMULATE_FAIL:
2038 -+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2039 -+ ret = RESUME_HOST;
2040 -+ break;
2041 -+
2042 -+ default:
2043 -+ BUG();
2044 -+ }
2045 -+ return ret;
2046 -+}
2047 -+
2048 - static int kvm_trap_emul_vm_init(struct kvm *kvm)
2049 - {
2050 - return 0;
2051 -@@ -470,6 +497,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
2052 - .handle_syscall = kvm_trap_emul_handle_syscall,
2053 - .handle_res_inst = kvm_trap_emul_handle_res_inst,
2054 - .handle_break = kvm_trap_emul_handle_break,
2055 -+ .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
2056 -
2057 - .vm_init = kvm_trap_emul_vm_init,
2058 - .vcpu_init = kvm_trap_emul_vcpu_init,
2059 -diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
2060 -index 21221ed..0f75b6b 100644
2061 ---- a/arch/mips/loongson/loongson-3/irq.c
2062 -+++ b/arch/mips/loongson/loongson-3/irq.c
2063 -@@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending)
2064 -
2065 - static struct irqaction cascade_irqaction = {
2066 - .handler = no_action,
2067 -+ .flags = IRQF_NO_SUSPEND,
2068 - .name = "cascade",
2069 - };
2070 -
2071 -diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
2072 -index 8fddd2cd..efe366d 100644
2073 ---- a/arch/mips/mti-malta/malta-memory.c
2074 -+++ b/arch/mips/mti-malta/malta-memory.c
2075 -@@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int eva)
2076 - pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
2077 - physical_memsize = 0x02000000;
2078 - } else {
2079 -+ if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
2080 -+ pr_warn("Unsupported memsize value (0x%lx) detected! "
2081 -+ "Using 0x10000000 (256M) instead\n",
2082 -+ memsize);
2083 -+ memsize = 256 << 20;
2084 -+ }
2085 - /* If ememsize is set, then set physical_memsize to that */
2086 - physical_memsize = ememsize ? : memsize;
2087 - }
2088 -diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
2089 -index 32a7c82..e7567c8 100644
2090 ---- a/arch/mips/power/hibernate.S
2091 -+++ b/arch/mips/power/hibernate.S
2092 -@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
2093 - END(swsusp_arch_suspend)
2094 -
2095 - LEAF(swsusp_arch_resume)
2096 -+ /* Avoid TLB mismatch during and after kernel resume */
2097 -+ jal local_flush_tlb_all
2098 - PTR_L t0, restore_pblist
2099 - 0:
2100 - PTR_L t1, PBE_ADDRESS(t0) /* source */
2101 -@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
2102 - bne t1, t3, 1b
2103 - PTR_L t0, PBE_NEXT(t0)
2104 - bnez t0, 0b
2105 -- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
2106 - PTR_LA t0, saved_regs
2107 - PTR_L ra, PT_R31(t0)
2108 - PTR_L sp, PT_R29(t0)
2109 -diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
2110 -index ae77b7e..c641983 100644
2111 ---- a/arch/powerpc/kernel/cacheinfo.c
2112 -+++ b/arch/powerpc/kernel/cacheinfo.c
2113 -@@ -61,12 +61,22 @@ struct cache_type_info {
2114 - };
2115 -
2116 - /* These are used to index the cache_type_info array. */
2117 --#define CACHE_TYPE_UNIFIED 0
2118 --#define CACHE_TYPE_INSTRUCTION 1
2119 --#define CACHE_TYPE_DATA 2
2120 -+#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
2121 -+#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
2122 -+#define CACHE_TYPE_INSTRUCTION 2
2123 -+#define CACHE_TYPE_DATA 3
2124 -
2125 - static const struct cache_type_info cache_type_info[] = {
2126 - {
2127 -+ /* Embedded systems that use cache-size, cache-block-size,
2128 -+ * etc. for the Unified (typically L2) cache. */
2129 -+ .name = "Unified",
2130 -+ .size_prop = "cache-size",
2131 -+ .line_size_props = { "cache-line-size",
2132 -+ "cache-block-size", },
2133 -+ .nr_sets_prop = "cache-sets",
2134 -+ },
2135 -+ {
2136 - /* PowerPC Processor binding says the [di]-cache-*
2137 - * must be equal on unified caches, so just use
2138 - * d-cache properties. */
2139 -@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
2140 - {
2141 - struct cache *iter;
2142 -
2143 -- if (cache->type == CACHE_TYPE_UNIFIED)
2144 -+ if (cache->type == CACHE_TYPE_UNIFIED ||
2145 -+ cache->type == CACHE_TYPE_UNIFIED_D)
2146 - return cache;
2147 -
2148 - list_for_each_entry(iter, &cache_list, list)
2149 -@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
2150 - return of_get_property(np, "cache-unified", NULL);
2151 - }
2152 -
2153 --static struct cache *cache_do_one_devnode_unified(struct device_node *node,
2154 -- int level)
2155 -+/*
2156 -+ * Unified caches can have two different sets of tags. Most embedded
2157 -+ * use cache-size, etc. for the unified cache size, but open firmware systems
2158 -+ * use d-cache-size, etc. Check on initialization for which type we have, and
2159 -+ * return the appropriate structure type. Assume it's embedded if it isn't
2160 -+ * open firmware. If it's yet a 3rd type, then there will be missing entries
2161 -+ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
2162 -+ * to be extended further.
2163 -+ */
2164 -+static int cache_is_unified_d(const struct device_node *np)
2165 - {
2166 -- struct cache *cache;
2167 -+ return of_get_property(np,
2168 -+ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
2169 -+ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
2170 -+}
2171 -
2172 -+/*
2173 -+ */
2174 -+static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
2175 -+{
2176 - pr_debug("creating L%d ucache for %s\n", level, node->full_name);
2177 -
2178 -- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
2179 --
2180 -- return cache;
2181 -+ return new_cache(cache_is_unified_d(node), level, node);
2182 - }
2183 -
2184 - static struct cache *cache_do_one_devnode_split(struct device_node *node,
2185 -diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
2186 -index 7e408bf..cecbe00 100644
2187 ---- a/arch/powerpc/mm/hugetlbpage.c
2188 -+++ b/arch/powerpc/mm/hugetlbpage.c
2189 -@@ -581,6 +581,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
2190 - pmd = pmd_offset(pud, start);
2191 - pud_clear(pud);
2192 - pmd_free_tlb(tlb, pmd, start);
2193 -+ mm_dec_nr_pmds(tlb->mm);
2194 - }
2195 -
2196 - static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
2197 -diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
2198 -index 2396dda..ead5535 100644
2199 ---- a/arch/powerpc/perf/callchain.c
2200 -+++ b/arch/powerpc/perf/callchain.c
2201 -@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
2202 - sp = regs->gpr[1];
2203 - perf_callchain_store(entry, next_ip);
2204 -
2205 -- for (;;) {
2206 -+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
2207 - fp = (unsigned long __user *) sp;
2208 - if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
2209 - return;
2210 -diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
2211 -index 4c11421..3af8324 100644
2212 ---- a/arch/powerpc/platforms/cell/interrupt.c
2213 -+++ b/arch/powerpc/platforms/cell/interrupt.c
2214 -@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
2215 -
2216 - void iic_setup_cpu(void)
2217 - {
2218 -- out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
2219 -+ out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
2220 - }
2221 -
2222 - u8 iic_get_target_id(int cpu)
2223 -diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
2224 -index c7c8720..63db1b0 100644
2225 ---- a/arch/powerpc/platforms/cell/iommu.c
2226 -+++ b/arch/powerpc/platforms/cell/iommu.c
2227 -@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
2228 -
2229 - io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
2230 -
2231 -- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
2232 -+ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
2233 - io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
2234 -
2235 - mb();
2236 -diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
2237 -index 6c9ff2b..1d9369e 100644
2238 ---- a/arch/powerpc/platforms/powernv/pci-ioda.c
2239 -+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
2240 -@@ -1777,7 +1777,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
2241 - region.start += phb->ioda.io_segsize;
2242 - index++;
2243 - }
2244 -- } else if (res->flags & IORESOURCE_MEM) {
2245 -+ } else if ((res->flags & IORESOURCE_MEM) &&
2246 -+ !pnv_pci_is_mem_pref_64(res->flags)) {
2247 - region.start = res->start -
2248 - hose->mem_offset[0] -
2249 - phb->ioda.m32_pci_base;
2250 -diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
2251 -index 1c4c5ac..d3236c9 100644
2252 ---- a/arch/s390/kernel/suspend.c
2253 -+++ b/arch/s390/kernel/suspend.c
2254 -@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
2255 - {
2256 - unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
2257 - unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
2258 -+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
2259 -+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
2260 -
2261 - /* Always save lowcore pages (LC protection might be enabled). */
2262 - if (pfn <= LC_PAGES)
2263 -@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
2264 - if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
2265 - return 1;
2266 - /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
2267 -+ if (pfn >= stext_pfn && pfn <= eshared_pfn)
2268 -+ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
2269 - if (tprot(PFN_PHYS(pfn)))
2270 - return 1;
2271 - return 0;
2272 -diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
2273 -index 073b5f3..e7bc2fd 100644
2274 ---- a/arch/s390/kvm/interrupt.c
2275 -+++ b/arch/s390/kvm/interrupt.c
2276 -@@ -17,6 +17,7 @@
2277 - #include <linux/signal.h>
2278 - #include <linux/slab.h>
2279 - #include <linux/bitmap.h>
2280 -+#include <linux/vmalloc.h>
2281 - #include <asm/asm-offsets.h>
2282 - #include <asm/uaccess.h>
2283 - #include <asm/sclp.h>
2284 -@@ -1332,10 +1333,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
2285 - return rc;
2286 - }
2287 -
2288 --void kvm_s390_reinject_io_int(struct kvm *kvm,
2289 -+int kvm_s390_reinject_io_int(struct kvm *kvm,
2290 - struct kvm_s390_interrupt_info *inti)
2291 - {
2292 -- __inject_vm(kvm, inti);
2293 -+ return __inject_vm(kvm, inti);
2294 - }
2295 -
2296 - int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
2297 -@@ -1455,61 +1456,66 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
2298 - spin_unlock(&fi->lock);
2299 - }
2300 -
2301 --static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
2302 -- u8 *addr)
2303 -+static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
2304 -+ struct kvm_s390_irq *irq)
2305 - {
2306 -- struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2307 -- struct kvm_s390_irq irq = {0};
2308 --
2309 -- irq.type = inti->type;
2310 -+ irq->type = inti->type;
2311 - switch (inti->type) {
2312 - case KVM_S390_INT_PFAULT_INIT:
2313 - case KVM_S390_INT_PFAULT_DONE:
2314 - case KVM_S390_INT_VIRTIO:
2315 - case KVM_S390_INT_SERVICE:
2316 -- irq.u.ext = inti->ext;
2317 -+ irq->u.ext = inti->ext;
2318 - break;
2319 - case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2320 -- irq.u.io = inti->io;
2321 -+ irq->u.io = inti->io;
2322 - break;
2323 - case KVM_S390_MCHK:
2324 -- irq.u.mchk = inti->mchk;
2325 -+ irq->u.mchk = inti->mchk;
2326 - break;
2327 -- default:
2328 -- return -EINVAL;
2329 - }
2330 --
2331 -- if (copy_to_user(uptr, &irq, sizeof(irq)))
2332 -- return -EFAULT;
2333 --
2334 -- return 0;
2335 - }
2336 -
2337 --static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
2338 -+static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
2339 - {
2340 - struct kvm_s390_interrupt_info *inti;
2341 - struct kvm_s390_float_interrupt *fi;
2342 -+ struct kvm_s390_irq *buf;
2343 -+ int max_irqs;
2344 - int ret = 0;
2345 - int n = 0;
2346 -
2347 -+ if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
2348 -+ return -EINVAL;
2349 -+
2350 -+ /*
2351 -+ * We are already using -ENOMEM to signal
2352 -+ * userspace it may retry with a bigger buffer,
2353 -+ * so we need to use something else for this case
2354 -+ */
2355 -+ buf = vzalloc(len);
2356 -+ if (!buf)
2357 -+ return -ENOBUFS;
2358 -+
2359 -+ max_irqs = len / sizeof(struct kvm_s390_irq);
2360 -+
2361 - fi = &kvm->arch.float_int;
2362 - spin_lock(&fi->lock);
2363 --
2364 - list_for_each_entry(inti, &fi->list, list) {
2365 -- if (len < sizeof(struct kvm_s390_irq)) {
2366 -+ if (n == max_irqs) {
2367 - /* signal userspace to try again */
2368 - ret = -ENOMEM;
2369 - break;
2370 - }
2371 -- ret = copy_irq_to_user(inti, buf);
2372 -- if (ret)
2373 -- break;
2374 -- buf += sizeof(struct kvm_s390_irq);
2375 -- len -= sizeof(struct kvm_s390_irq);
2376 -+ inti_to_irq(inti, &buf[n]);
2377 - n++;
2378 - }
2379 --
2380 - spin_unlock(&fi->lock);
2381 -+ if (!ret && n > 0) {
2382 -+ if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2383 -+ ret = -EFAULT;
2384 -+ }
2385 -+ vfree(buf);
2386 -
2387 - return ret < 0 ? ret : n;
2388 - }
2389 -@@ -1520,7 +1526,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2390 -
2391 - switch (attr->group) {
2392 - case KVM_DEV_FLIC_GET_ALL_IRQS:
2393 -- r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
2394 -+ r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
2395 - attr->attr);
2396 - break;
2397 - default:
2398 -diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
2399 -index c34109a..6995a30 100644
2400 ---- a/arch/s390/kvm/kvm-s390.h
2401 -+++ b/arch/s390/kvm/kvm-s390.h
2402 -@@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
2403 - int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
2404 - struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
2405 - u64 cr6, u64 schid);
2406 --void kvm_s390_reinject_io_int(struct kvm *kvm,
2407 -- struct kvm_s390_interrupt_info *inti);
2408 -+int kvm_s390_reinject_io_int(struct kvm *kvm,
2409 -+ struct kvm_s390_interrupt_info *inti);
2410 - int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
2411 -
2412 - /* implemented in intercept.c */
2413 -diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
2414 -index 3511169..b982fbc 100644
2415 ---- a/arch/s390/kvm/priv.c
2416 -+++ b/arch/s390/kvm/priv.c
2417 -@@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
2418 - struct kvm_s390_interrupt_info *inti;
2419 - unsigned long len;
2420 - u32 tpi_data[3];
2421 -- int cc, rc;
2422 -+ int rc;
2423 - u64 addr;
2424 -
2425 -- rc = 0;
2426 - addr = kvm_s390_get_base_disp_s(vcpu);
2427 - if (addr & 3)
2428 - return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
2429 -- cc = 0;
2430 -+
2431 - inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
2432 -- if (!inti)
2433 -- goto no_interrupt;
2434 -- cc = 1;
2435 -+ if (!inti) {
2436 -+ kvm_s390_set_psw_cc(vcpu, 0);
2437 -+ return 0;
2438 -+ }
2439 -+
2440 - tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
2441 - tpi_data[1] = inti->io.io_int_parm;
2442 - tpi_data[2] = inti->io.io_int_word;
2443 -@@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
2444 - */
2445 - len = sizeof(tpi_data) - 4;
2446 - rc = write_guest(vcpu, addr, &tpi_data, len);
2447 -- if (rc)
2448 -- return kvm_s390_inject_prog_cond(vcpu, rc);
2449 -+ if (rc) {
2450 -+ rc = kvm_s390_inject_prog_cond(vcpu, rc);
2451 -+ goto reinject_interrupt;
2452 -+ }
2453 - } else {
2454 - /*
2455 - * Store the three-word I/O interruption code into
2456 - * the appropriate lowcore area.
2457 - */
2458 - len = sizeof(tpi_data);
2459 -- if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
2460 -+ if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
2461 -+ /* failed writes to the low core are not recoverable */
2462 - rc = -EFAULT;
2463 -+ goto reinject_interrupt;
2464 -+ }
2465 - }
2466 -+
2467 -+ /* irq was successfully handed to the guest */
2468 -+ kfree(inti);
2469 -+ kvm_s390_set_psw_cc(vcpu, 1);
2470 -+ return 0;
2471 -+reinject_interrupt:
2472 - /*
2473 - * If we encounter a problem storing the interruption code, the
2474 - * instruction is suppressed from the guest's view: reinject the
2475 - * interrupt.
2476 - */
2477 -- if (!rc)
2478 -+ if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
2479 - kfree(inti);
2480 -- else
2481 -- kvm_s390_reinject_io_int(vcpu->kvm, inti);
2482 --no_interrupt:
2483 -- /* Set condition code and we're done. */
2484 -- if (!rc)
2485 -- kvm_s390_set_psw_cc(vcpu, cc);
2486 -+ rc = -EFAULT;
2487 -+ }
2488 -+ /* don't set the cc, a pgm irq was injected or we drop to user space */
2489 - return rc ? -EFAULT : 0;
2490 - }
2491 -
2492 -@@ -467,6 +476,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
2493 - for (n = mem->count - 1; n > 0 ; n--)
2494 - memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
2495 -
2496 -+ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
2497 - mem->vm[0].cpus_total = cpus;
2498 - mem->vm[0].cpus_configured = cpus;
2499 - mem->vm[0].cpus_standby = 0;
2500 -diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
2501 -index 47f29b1..e7814b7 100644
2502 ---- a/arch/x86/include/asm/insn.h
2503 -+++ b/arch/x86/include/asm/insn.h
2504 -@@ -69,7 +69,7 @@ struct insn {
2505 - const insn_byte_t *next_byte;
2506 - };
2507 -
2508 --#define MAX_INSN_SIZE 16
2509 -+#define MAX_INSN_SIZE 15
2510 -
2511 - #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
2512 - #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
2513 -diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
2514 -index a1410db..653dfa7 100644
2515 ---- a/arch/x86/include/asm/mwait.h
2516 -+++ b/arch/x86/include/asm/mwait.h
2517 -@@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
2518 - :: "a" (eax), "c" (ecx));
2519 - }
2520 -
2521 -+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
2522 -+{
2523 -+ trace_hardirqs_on();
2524 -+ /* "mwait %eax, %ecx;" */
2525 -+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
2526 -+ :: "a" (eax), "c" (ecx));
2527 -+}
2528 -+
2529 - /*
2530 - * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
2531 - * which can obviate IPI to trigger checking of need_resched.
2532 -diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
2533 -index d6b078e..25b1cc0 100644
2534 ---- a/arch/x86/include/asm/pvclock.h
2535 -+++ b/arch/x86/include/asm/pvclock.h
2536 -@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
2537 -
2538 - struct pvclock_vsyscall_time_info {
2539 - struct pvclock_vcpu_time_info pvti;
2540 -+ u32 migrate_count;
2541 - } __attribute__((__aligned__(SMP_CACHE_BYTES)));
2542 -
2543 - #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
2544 -diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
2545 -index 0739833..666bcf1 100644
2546 ---- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
2547 -+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
2548 -@@ -557,6 +557,8 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
2549 - INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
2550 - INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
2551 - INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
2552 -+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
2553 -+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
2554 - EVENT_CONSTRAINT_END
2555 - };
2556 -
2557 -@@ -564,6 +566,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
2558 - INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
2559 - INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
2560 - INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
2561 -+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
2562 -+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
2563 - EVENT_CONSTRAINT_END
2564 - };
2565 -
2566 -@@ -587,6 +591,8 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
2567 - INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
2568 - INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
2569 - INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
2570 -+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
2571 -+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
2572 - EVENT_CONSTRAINT_END
2573 - };
2574 -
2575 -@@ -602,6 +608,8 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
2576 - INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
2577 - INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
2578 - INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
2579 -+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
2580 -+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
2581 - EVENT_CONSTRAINT_END
2582 - };
2583 -
2584 -diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
2585 -index 046e2d6..a388bb8 100644
2586 ---- a/arch/x86/kernel/process.c
2587 -+++ b/arch/x86/kernel/process.c
2588 -@@ -24,6 +24,7 @@
2589 - #include <asm/syscalls.h>
2590 - #include <asm/idle.h>
2591 - #include <asm/uaccess.h>
2592 -+#include <asm/mwait.h>
2593 - #include <asm/i387.h>
2594 - #include <asm/fpu-internal.h>
2595 - #include <asm/debugreg.h>
2596 -@@ -399,6 +400,53 @@ static void amd_e400_idle(void)
2597 - default_idle();
2598 - }
2599 -
2600 -+/*
2601 -+ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
2602 -+ * We can't rely on cpuidle installing MWAIT, because it will not load
2603 -+ * on systems that support only C1 -- so the boot default must be MWAIT.
2604 -+ *
2605 -+ * Some AMD machines are the opposite, they depend on using HALT.
2606 -+ *
2607 -+ * So for default C1, which is used during boot until cpuidle loads,
2608 -+ * use MWAIT-C1 on Intel HW that has it, else use HALT.
2609 -+ */
2610 -+static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
2611 -+{
2612 -+ if (c->x86_vendor != X86_VENDOR_INTEL)
2613 -+ return 0;
2614 -+
2615 -+ if (!cpu_has(c, X86_FEATURE_MWAIT))
2616 -+ return 0;
2617 -+
2618 -+ return 1;
2619 -+}
2620 -+
2621 -+/*
2622 -+ * MONITOR/MWAIT with no hints, used for default default C1 state.
2623 -+ * This invokes MWAIT with interrutps enabled and no flags,
2624 -+ * which is backwards compatible with the original MWAIT implementation.
2625 -+ */
2626 -+
2627 -+static void mwait_idle(void)
2628 -+{
2629 -+ if (!current_set_polling_and_test()) {
2630 -+ if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
2631 -+ smp_mb(); /* quirk */
2632 -+ clflush((void *)&current_thread_info()->flags);
2633 -+ smp_mb(); /* quirk */
2634 -+ }
2635 -+
2636 -+ __monitor((void *)&current_thread_info()->flags, 0, 0);
2637 -+ if (!need_resched())
2638 -+ __sti_mwait(0, 0);
2639 -+ else
2640 -+ local_irq_enable();
2641 -+ } else {
2642 -+ local_irq_enable();
2643 -+ }
2644 -+ __current_clr_polling();
2645 -+}
2646 -+
2647 - void select_idle_routine(const struct cpuinfo_x86 *c)
2648 - {
2649 - #ifdef CONFIG_SMP
2650 -@@ -412,6 +460,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
2651 - /* E400: APIC timer interrupt does not wake up CPU from C1e */
2652 - pr_info("using AMD E400 aware idle routine\n");
2653 - x86_idle = amd_e400_idle;
2654 -+ } else if (prefer_mwait_c1_over_halt(c)) {
2655 -+ pr_info("using mwait in idle threads\n");
2656 -+ x86_idle = mwait_idle;
2657 - } else
2658 - x86_idle = default_idle;
2659 - }
2660 -diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
2661 -index 2f355d2..e5ecd20 100644
2662 ---- a/arch/x86/kernel/pvclock.c
2663 -+++ b/arch/x86/kernel/pvclock.c
2664 -@@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
2665 - set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
2666 - }
2667 -
2668 -+static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
2669 -+
2670 -+static struct pvclock_vsyscall_time_info *
2671 -+pvclock_get_vsyscall_user_time_info(int cpu)
2672 -+{
2673 -+ if (!pvclock_vdso_info) {
2674 -+ BUG();
2675 -+ return NULL;
2676 -+ }
2677 -+
2678 -+ return &pvclock_vdso_info[cpu];
2679 -+}
2680 -+
2681 -+struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
2682 -+{
2683 -+ return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
2684 -+}
2685 -+
2686 - #ifdef CONFIG_X86_64
2687 -+static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
2688 -+ void *v)
2689 -+{
2690 -+ struct task_migration_notifier *mn = v;
2691 -+ struct pvclock_vsyscall_time_info *pvti;
2692 -+
2693 -+ pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
2694 -+
2695 -+ /* this is NULL when pvclock vsyscall is not initialized */
2696 -+ if (unlikely(pvti == NULL))
2697 -+ return NOTIFY_DONE;
2698 -+
2699 -+ pvti->migrate_count++;
2700 -+
2701 -+ return NOTIFY_DONE;
2702 -+}
2703 -+
2704 -+static struct notifier_block pvclock_migrate = {
2705 -+ .notifier_call = pvclock_task_migrate,
2706 -+};
2707 -+
2708 - /*
2709 - * Initialize the generic pvclock vsyscall state. This will allocate
2710 - * a/some page(s) for the per-vcpu pvclock information, set up a
2711 -@@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
2712 -
2713 - WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
2714 -
2715 -+ pvclock_vdso_info = i;
2716 -+
2717 - for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
2718 - __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
2719 - __pa(i) + (idx*PAGE_SIZE),
2720 - PAGE_KERNEL_VVAR);
2721 - }
2722 -
2723 -+
2724 -+ register_task_migration_notifier(&pvclock_migrate);
2725 -+
2726 - return 0;
2727 - }
2728 - #endif
2729 -diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
2730 -index ae4f6d3..a60bd3a 100644
2731 ---- a/arch/x86/kvm/vmx.c
2732 -+++ b/arch/x86/kvm/vmx.c
2733 -@@ -3621,8 +3621,16 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
2734 -
2735 - static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2736 - {
2737 -- unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
2738 -- KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
2739 -+ /*
2740 -+ * Pass through host's Machine Check Enable value to hw_cr4, which
2741 -+ * is in force while we are in guest mode. Do not let guests control
2742 -+ * this bit, even if host CR4.MCE == 0.
2743 -+ */
2744 -+ unsigned long hw_cr4 =
2745 -+ (cr4_read_shadow() & X86_CR4_MCE) |
2746 -+ (cr4 & ~X86_CR4_MCE) |
2747 -+ (to_vmx(vcpu)->rmode.vm86_active ?
2748 -+ KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
2749 -
2750 - if (cr4 & X86_CR4_VMXE) {
2751 - /*
2752 -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2753 -index 32bf19e..e222ba5 100644
2754 ---- a/arch/x86/kvm/x86.c
2755 -+++ b/arch/x86/kvm/x86.c
2756 -@@ -5775,7 +5775,6 @@ int kvm_arch_init(void *opaque)
2757 - kvm_set_mmio_spte_mask();
2758 -
2759 - kvm_x86_ops = ops;
2760 -- kvm_init_msr_list();
2761 -
2762 - kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
2763 - PT_DIRTY_MASK, PT64_NX_MASK, 0);
2764 -@@ -7209,7 +7208,14 @@ void kvm_arch_hardware_disable(void)
2765 -
2766 - int kvm_arch_hardware_setup(void)
2767 - {
2768 -- return kvm_x86_ops->hardware_setup();
2769 -+ int r;
2770 -+
2771 -+ r = kvm_x86_ops->hardware_setup();
2772 -+ if (r != 0)
2773 -+ return r;
2774 -+
2775 -+ kvm_init_msr_list();
2776 -+ return 0;
2777 - }
2778 -
2779 - void kvm_arch_hardware_unsetup(void)
2780 -diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
2781 -index 1313ae6..85994f5 100644
2782 ---- a/arch/x86/lib/insn.c
2783 -+++ b/arch/x86/lib/insn.c
2784 -@@ -52,6 +52,13 @@
2785 - */
2786 - void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
2787 - {
2788 -+ /*
2789 -+ * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
2790 -+ * even if the input buffer is long enough to hold them.
2791 -+ */
2792 -+ if (buf_len > MAX_INSN_SIZE)
2793 -+ buf_len = MAX_INSN_SIZE;
2794 -+
2795 - memset(insn, 0, sizeof(*insn));
2796 - insn->kaddr = kaddr;
2797 - insn->end_kaddr = kaddr + buf_len;
2798 -diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
2799 -index 1f33b3d..0a42327 100644
2800 ---- a/arch/x86/lib/usercopy_64.c
2801 -+++ b/arch/x86/lib/usercopy_64.c
2802 -@@ -82,7 +82,7 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
2803 - clac();
2804 -
2805 - /* If the destination is a kernel buffer, we always clear the end */
2806 -- if ((unsigned long)to >= TASK_SIZE_MAX)
2807 -+ if (!__addr_ok(to))
2808 - memset(to, 0, len);
2809 - return len;
2810 - }
2811 -diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
2812 -index 9793322..40d2473 100644
2813 ---- a/arch/x86/vdso/vclock_gettime.c
2814 -+++ b/arch/x86/vdso/vclock_gettime.c
2815 -@@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode)
2816 - cycle_t ret;
2817 - u64 last;
2818 - u32 version;
2819 -+ u32 migrate_count;
2820 - u8 flags;
2821 - unsigned cpu, cpu1;
2822 -
2823 -
2824 - /*
2825 -- * Note: hypervisor must guarantee that:
2826 -- * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
2827 -- * 2. that per-CPU pvclock time info is updated if the
2828 -- * underlying CPU changes.
2829 -- * 3. that version is increased whenever underlying CPU
2830 -- * changes.
2831 -- *
2832 -+ * When looping to get a consistent (time-info, tsc) pair, we
2833 -+ * also need to deal with the possibility we can switch vcpus,
2834 -+ * so make sure we always re-fetch time-info for the current vcpu.
2835 - */
2836 - do {
2837 - cpu = __getcpu() & VGETCPU_CPU_MASK;
2838 -@@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode)
2839 - * __getcpu() calls (Gleb).
2840 - */
2841 -
2842 -- pvti = get_pvti(cpu);
2843 -+ /* Make sure migrate_count will change if we leave the VCPU. */
2844 -+ do {
2845 -+ pvti = get_pvti(cpu);
2846 -+ migrate_count = pvti->migrate_count;
2847 -+
2848 -+ cpu1 = cpu;
2849 -+ cpu = __getcpu() & VGETCPU_CPU_MASK;
2850 -+ } while (unlikely(cpu != cpu1));
2851 -
2852 - version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
2853 -
2854 - /*
2855 - * Test we're still on the cpu as well as the version.
2856 -- * We could have been migrated just after the first
2857 -- * vgetcpu but before fetching the version, so we
2858 -- * wouldn't notice a version change.
2859 -+ * - We must read TSC of pvti's VCPU.
2860 -+ * - KVM doesn't follow the versioning protocol, so data could
2861 -+ * change before version if we left the VCPU.
2862 - */
2863 -- cpu1 = __getcpu() & VGETCPU_CPU_MASK;
2864 -- } while (unlikely(cpu != cpu1 ||
2865 -- (pvti->pvti.version & 1) ||
2866 -- pvti->pvti.version != version));
2867 -+ smp_rmb();
2868 -+ } while (unlikely((pvti->pvti.version & 1) ||
2869 -+ pvti->pvti.version != version ||
2870 -+ pvti->migrate_count != migrate_count));
2871 -
2872 - if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
2873 - *mode = VCLOCK_NONE;
2874 -diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
2875 -index e31d494..87be10e 100644
2876 ---- a/arch/xtensa/Kconfig
2877 -+++ b/arch/xtensa/Kconfig
2878 -@@ -428,6 +428,36 @@ config DEFAULT_MEM_SIZE
2879 -
2880 - If unsure, leave the default value here.
2881 -
2882 -+config XTFPGA_LCD
2883 -+ bool "Enable XTFPGA LCD driver"
2884 -+ depends on XTENSA_PLATFORM_XTFPGA
2885 -+ default n
2886 -+ help
2887 -+ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
2888 -+ progress messages there during bootup/shutdown. It may be useful
2889 -+ during board bringup.
2890 -+
2891 -+ If unsure, say N.
2892 -+
2893 -+config XTFPGA_LCD_BASE_ADDR
2894 -+ hex "XTFPGA LCD base address"
2895 -+ depends on XTFPGA_LCD
2896 -+ default "0x0d0c0000"
2897 -+ help
2898 -+ Base address of the LCD controller inside KIO region.
2899 -+ Different boards from XTFPGA family have LCD controller at different
2900 -+ addresses. Please consult prototyping user guide for your board for
2901 -+ the correct address. Wrong address here may lead to hardware lockup.
2902 -+
2903 -+config XTFPGA_LCD_8BIT_ACCESS
2904 -+ bool "Use 8-bit access to XTFPGA LCD"
2905 -+ depends on XTFPGA_LCD
2906 -+ default n
2907 -+ help
2908 -+ LCD may be connected with 4- or 8-bit interface, 8-bit access may
2909 -+ only be used with 8-bit interface. Please consult prototyping user
2910 -+ guide for your board for the correct interface width.
2911 -+
2912 - endmenu
2913 -
2914 - menu "Executable file formats"
2915 -diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
2916 -index db5bb72..62d8465 100644
2917 ---- a/arch/xtensa/include/uapi/asm/unistd.h
2918 -+++ b/arch/xtensa/include/uapi/asm/unistd.h
2919 -@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
2920 - __SYSCALL(324, sys_name_to_handle_at, 5)
2921 - #define __NR_open_by_handle_at 325
2922 - __SYSCALL(325, sys_open_by_handle_at, 3)
2923 --#define __NR_sync_file_range 326
2924 -+#define __NR_sync_file_range2 326
2925 - __SYSCALL(326, sys_sync_file_range2, 6)
2926 - #define __NR_perf_event_open 327
2927 - __SYSCALL(327, sys_perf_event_open, 5)
2928 -diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
2929 -index d05f8fe..17b1ef3 100644
2930 ---- a/arch/xtensa/platforms/iss/network.c
2931 -+++ b/arch/xtensa/platforms/iss/network.c
2932 -@@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
2933 - {
2934 - struct iss_net_private *lp = (struct iss_net_private *)priv;
2935 -
2936 -- spin_lock(&lp->lock);
2937 - iss_net_poll();
2938 -+ spin_lock(&lp->lock);
2939 - mod_timer(&lp->timer, jiffies + lp->timer_val);
2940 - spin_unlock(&lp->lock);
2941 - }
2942 -@@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
2943 - struct iss_net_private *lp = netdev_priv(dev);
2944 - int err;
2945 -
2946 -- spin_lock(&lp->lock);
2947 -+ spin_lock_bh(&lp->lock);
2948 -
2949 - err = lp->tp.open(lp);
2950 - if (err < 0)
2951 -@@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
2952 - while ((err = iss_net_rx(dev)) > 0)
2953 - ;
2954 -
2955 -- spin_lock(&opened_lock);
2956 -+ spin_unlock_bh(&lp->lock);
2957 -+ spin_lock_bh(&opened_lock);
2958 - list_add(&lp->opened_list, &opened);
2959 -- spin_unlock(&opened_lock);
2960 -+ spin_unlock_bh(&opened_lock);
2961 -+ spin_lock_bh(&lp->lock);
2962 -
2963 - init_timer(&lp->timer);
2964 - lp->timer_val = ISS_NET_TIMER_VALUE;
2965 -@@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
2966 - mod_timer(&lp->timer, jiffies + lp->timer_val);
2967 -
2968 - out:
2969 -- spin_unlock(&lp->lock);
2970 -+ spin_unlock_bh(&lp->lock);
2971 - return err;
2972 - }
2973 -
2974 -@@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
2975 - {
2976 - struct iss_net_private *lp = netdev_priv(dev);
2977 - netif_stop_queue(dev);
2978 -- spin_lock(&lp->lock);
2979 -+ spin_lock_bh(&lp->lock);
2980 -
2981 - spin_lock(&opened_lock);
2982 - list_del(&opened);
2983 -@@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)
2984 -
2985 - lp->tp.close(lp);
2986 -
2987 -- spin_unlock(&lp->lock);
2988 -+ spin_unlock_bh(&lp->lock);
2989 - return 0;
2990 - }
2991 -
2992 - static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
2993 - {
2994 - struct iss_net_private *lp = netdev_priv(dev);
2995 -- unsigned long flags;
2996 - int len;
2997 -
2998 - netif_stop_queue(dev);
2999 -- spin_lock_irqsave(&lp->lock, flags);
3000 -+ spin_lock_bh(&lp->lock);
3001 -
3002 - len = lp->tp.write(lp, &skb);
3003 -
3004 -@@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
3005 - pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
3006 - }
3007 -
3008 -- spin_unlock_irqrestore(&lp->lock, flags);
3009 -+ spin_unlock_bh(&lp->lock);
3010 -
3011 - dev_kfree_skb(skb);
3012 - return NETDEV_TX_OK;
3013 -@@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
3014 -
3015 - if (!is_valid_ether_addr(hwaddr->sa_data))
3016 - return -EADDRNOTAVAIL;
3017 -- spin_lock(&lp->lock);
3018 -+ spin_lock_bh(&lp->lock);
3019 - memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
3020 -- spin_unlock(&lp->lock);
3021 -+ spin_unlock_bh(&lp->lock);
3022 - return 0;
3023 - }
3024 -
3025 -@@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
3026 - *lp = (struct iss_net_private) {
3027 - .device_list = LIST_HEAD_INIT(lp->device_list),
3028 - .opened_list = LIST_HEAD_INIT(lp->opened_list),
3029 -- .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
3030 - .dev = dev,
3031 - .index = index,
3032 -- };
3033 -+ };
3034 -
3035 -+ spin_lock_init(&lp->lock);
3036 - /*
3037 - * If this name ends up conflicting with an existing registered
3038 - * netdevice, that is OK, register_netdev{,ice}() will notice this
3039 -diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
3040 -index b9ae206..7839d38 100644
3041 ---- a/arch/xtensa/platforms/xtfpga/Makefile
3042 -+++ b/arch/xtensa/platforms/xtfpga/Makefile
3043 -@@ -6,4 +6,5 @@
3044 - #
3045 - # Note 2! The CFLAGS definitions are in the main makefile...
3046 -
3047 --obj-y = setup.o lcd.o
3048 -+obj-y += setup.o
3049 -+obj-$(CONFIG_XTFPGA_LCD) += lcd.o
3050 -diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
3051 -index 6edd20b..4e0af26 100644
3052 ---- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
3053 -+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
3054 -@@ -40,9 +40,6 @@
3055 -
3056 - /* UART */
3057 - #define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
3058 --/* LCD instruction and data addresses. */
3059 --#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
3060 --#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
3061 -
3062 - /* Misc. */
3063 - #define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
3064 -diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
3065 -index 0e43564..4c8541e 100644
3066 ---- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
3067 -+++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
3068 -@@ -11,10 +11,25 @@
3069 - #ifndef __XTENSA_XTAVNET_LCD_H
3070 - #define __XTENSA_XTAVNET_LCD_H
3071 -
3072 -+#ifdef CONFIG_XTFPGA_LCD
3073 - /* Display string STR at position POS on the LCD. */
3074 - void lcd_disp_at_pos(char *str, unsigned char pos);
3075 -
3076 - /* Shift the contents of the LCD display left or right. */
3077 - void lcd_shiftleft(void);
3078 - void lcd_shiftright(void);
3079 -+#else
3080 -+static inline void lcd_disp_at_pos(char *str, unsigned char pos)
3081 -+{
3082 -+}
3083 -+
3084 -+static inline void lcd_shiftleft(void)
3085 -+{
3086 -+}
3087 -+
3088 -+static inline void lcd_shiftright(void)
3089 -+{
3090 -+}
3091 -+#endif
3092 -+
3093 - #endif
3094 -diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
3095 -index 2872301..4dc0c1b 100644
3096 ---- a/arch/xtensa/platforms/xtfpga/lcd.c
3097 -+++ b/arch/xtensa/platforms/xtfpga/lcd.c
3098 -@@ -1,50 +1,63 @@
3099 - /*
3100 -- * Driver for the LCD display on the Tensilica LX60 Board.
3101 -+ * Driver for the LCD display on the Tensilica XTFPGA board family.
3102 -+ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
3103 - *
3104 - * This file is subject to the terms and conditions of the GNU General Public
3105 - * License. See the file "COPYING" in the main directory of this archive
3106 - * for more details.
3107 - *
3108 - * Copyright (C) 2001, 2006 Tensilica Inc.
3109 -+ * Copyright (C) 2015 Cadence Design Systems Inc.
3110 - */
3111 -
3112 --/*
3113 -- *
3114 -- * FIXME: this code is from the examples from the LX60 user guide.
3115 -- *
3116 -- * The lcd_pause function does busy waiting, which is probably not
3117 -- * great. Maybe the code could be changed to use kernel timers, or
3118 -- * change the hardware to not need to wait.
3119 -- */
3120 --
3121 -+#include <linux/delay.h>
3122 - #include <linux/init.h>
3123 - #include <linux/io.h>
3124 -
3125 - #include <platform/hardware.h>
3126 - #include <platform/lcd.h>
3127 --#include <linux/delay.h>
3128 -
3129 --#define LCD_PAUSE_ITERATIONS 4000
3130 -+/* LCD instruction and data addresses. */
3131 -+#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
3132 -+#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
3133 -+
3134 - #define LCD_CLEAR 0x1
3135 - #define LCD_DISPLAY_ON 0xc
3136 -
3137 - /* 8bit and 2 lines display */
3138 - #define LCD_DISPLAY_MODE8BIT 0x38
3139 -+#define LCD_DISPLAY_MODE4BIT 0x28
3140 - #define LCD_DISPLAY_POS 0x80
3141 - #define LCD_SHIFT_LEFT 0x18
3142 - #define LCD_SHIFT_RIGHT 0x1c
3143 -
3144 -+static void lcd_put_byte(u8 *addr, u8 data)
3145 -+{
3146 -+#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
3147 -+ ACCESS_ONCE(*addr) = data;
3148 -+#else
3149 -+ ACCESS_ONCE(*addr) = data & 0xf0;
3150 -+ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
3151 -+#endif
3152 -+}
3153 -+
3154 - static int __init lcd_init(void)
3155 - {
3156 -- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
3157 -+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
3158 - mdelay(5);
3159 -- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
3160 -+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
3161 - udelay(200);
3162 -- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
3163 -+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
3164 -+ udelay(50);
3165 -+#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
3166 -+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
3167 -+ udelay(50);
3168 -+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
3169 - udelay(50);
3170 -- *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
3171 -+#endif
3172 -+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
3173 - udelay(50);
3174 -- *LCD_INSTR_ADDR = LCD_CLEAR;
3175 -+ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
3176 - mdelay(10);
3177 - lcd_disp_at_pos("XTENSA LINUX", 0);
3178 - return 0;
3179 -@@ -52,10 +65,10 @@ static int __init lcd_init(void)
3180 -
3181 - void lcd_disp_at_pos(char *str, unsigned char pos)
3182 - {
3183 -- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
3184 -+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
3185 - udelay(100);
3186 - while (*str != 0) {
3187 -- *LCD_DATA_ADDR = *str;
3188 -+ lcd_put_byte(LCD_DATA_ADDR, *str);
3189 - udelay(200);
3190 - str++;
3191 - }
3192 -@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
3193 -
3194 - void lcd_shiftleft(void)
3195 - {
3196 -- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
3197 -+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
3198 - udelay(50);
3199 - }
3200 -
3201 - void lcd_shiftright(void)
3202 - {
3203 -- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
3204 -+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
3205 - udelay(50);
3206 - }
3207 -
3208 -diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
3209 -index 5ed064e..ccf7932 100644
3210 ---- a/drivers/acpi/acpica/evgpe.c
3211 -+++ b/drivers/acpi/acpica/evgpe.c
3212 -@@ -92,6 +92,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
3213 - ACPI_SET_BIT(gpe_register_info->enable_for_run,
3214 - (u8)register_bit);
3215 - }
3216 -+ gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
3217 -
3218 - return_ACPI_STATUS(AE_OK);
3219 - }
3220 -@@ -123,7 +124,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
3221 -
3222 - /* Enable the requested GPE */
3223 -
3224 -- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE);
3225 -+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
3226 - return_ACPI_STATUS(status);
3227 - }
3228 -
3229 -@@ -202,7 +203,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
3230 - if (ACPI_SUCCESS(status)) {
3231 - status =
3232 - acpi_hw_low_set_gpe(gpe_event_info,
3233 -- ACPI_GPE_DISABLE_SAVE);
3234 -+ ACPI_GPE_DISABLE);
3235 - }
3236 -
3237 - if (ACPI_FAILURE(status)) {
3238 -diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
3239 -index 84bc550..af6514e 100644
3240 ---- a/drivers/acpi/acpica/hwgpe.c
3241 -+++ b/drivers/acpi/acpica/hwgpe.c
3242 -@@ -89,6 +89,8 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
3243 - * RETURN: Status
3244 - *
3245 - * DESCRIPTION: Enable or disable a single GPE in the parent enable register.
3246 -+ * The enable_mask field of the involved GPE register must be
3247 -+ * updated by the caller if necessary.
3248 - *
3249 - ******************************************************************************/
3250 -
3251 -@@ -119,7 +121,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
3252 - /* Set or clear just the bit that corresponds to this GPE */
3253 -
3254 - register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
3255 -- switch (action & ~ACPI_GPE_SAVE_MASK) {
3256 -+ switch (action) {
3257 - case ACPI_GPE_CONDITIONAL_ENABLE:
3258 -
3259 - /* Only enable if the corresponding enable_mask bit is set */
3260 -@@ -149,9 +151,6 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
3261 - /* Write the updated enable mask */
3262 -
3263 - status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
3264 -- if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
3265 -- gpe_register_info->enable_mask = (u8)enable_mask;
3266 -- }
3267 - return (status);
3268 - }
3269 -
3270 -@@ -286,10 +285,8 @@ acpi_hw_gpe_enable_write(u8 enable_mask,
3271 - {
3272 - acpi_status status;
3273 -
3274 -+ gpe_register_info->enable_mask = enable_mask;
3275 - status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
3276 -- if (ACPI_SUCCESS(status)) {
3277 -- gpe_register_info->enable_mask = enable_mask;
3278 -- }
3279 - return (status);
3280 - }
3281 -
3282 -diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
3283 -index 9bad45e..7fbc2b9 100644
3284 ---- a/drivers/acpi/acpica/tbinstal.c
3285 -+++ b/drivers/acpi/acpica/tbinstal.c
3286 -@@ -346,7 +346,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
3287 - */
3288 - acpi_tb_uninstall_table(&new_table_desc);
3289 - *table_index = i;
3290 -- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
3291 - return_ACPI_STATUS(AE_OK);
3292 - }
3293 - }
3294 -diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
3295 -index bbca783..349f4fd 100644
3296 ---- a/drivers/acpi/scan.c
3297 -+++ b/drivers/acpi/scan.c
3298 -@@ -298,7 +298,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
3299 - struct acpi_device_physical_node *pn;
3300 - bool offline = true;
3301 -
3302 -- mutex_lock(&adev->physical_node_lock);
3303 -+ /*
3304 -+ * acpi_container_offline() calls this for all of the container's
3305 -+ * children under the container's physical_node_lock lock.
3306 -+ */
3307 -+ mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
3308 -
3309 - list_for_each_entry(pn, &adev->physical_node_list, node)
3310 - if (device_supports_offline(pn->dev) && !pn->dev->offline) {
3311 -diff --git a/drivers/base/bus.c b/drivers/base/bus.c
3312 -index 876bae5..79bc203 100644
3313 ---- a/drivers/base/bus.c
3314 -+++ b/drivers/base/bus.c
3315 -@@ -515,11 +515,11 @@ int bus_add_device(struct device *dev)
3316 - goto out_put;
3317 - error = device_add_groups(dev, bus->dev_groups);
3318 - if (error)
3319 -- goto out_groups;
3320 -+ goto out_id;
3321 - error = sysfs_create_link(&bus->p->devices_kset->kobj,
3322 - &dev->kobj, dev_name(dev));
3323 - if (error)
3324 -- goto out_id;
3325 -+ goto out_groups;
3326 - error = sysfs_create_link(&dev->kobj,
3327 - &dev->bus->p->subsys.kobj, "subsystem");
3328 - if (error)
3329 -diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
3330 -index 6e64563..9c2ba1c 100644
3331 ---- a/drivers/base/cacheinfo.c
3332 -+++ b/drivers/base/cacheinfo.c
3333 -@@ -62,15 +62,21 @@ static int cache_setup_of_node(unsigned int cpu)
3334 - return -ENOENT;
3335 - }
3336 -
3337 -- while (np && index < cache_leaves(cpu)) {
3338 -+ while (index < cache_leaves(cpu)) {
3339 - this_leaf = this_cpu_ci->info_list + index;
3340 - if (this_leaf->level != 1)
3341 - np = of_find_next_cache_node(np);
3342 - else
3343 - np = of_node_get(np);/* cpu node itself */
3344 -+ if (!np)
3345 -+ break;
3346 - this_leaf->of_node = np;
3347 - index++;
3348 - }
3349 -+
3350 -+ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
3351 -+ return -ENOENT;
3352 -+
3353 - return 0;
3354 - }
3355 -
3356 -@@ -189,8 +195,11 @@ static int detect_cache_attributes(unsigned int cpu)
3357 - * will be set up here only if they are not populated already
3358 - */
3359 - ret = cache_shared_cpu_map_setup(cpu);
3360 -- if (ret)
3361 -+ if (ret) {
3362 -+ pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n",
3363 -+ cpu);
3364 - goto free_ci;
3365 -+ }
3366 - return 0;
3367 -
3368 - free_ci:
3369 -diff --git a/drivers/base/platform.c b/drivers/base/platform.c
3370 -index 9421fed..e68ab79 100644
3371 ---- a/drivers/base/platform.c
3372 -+++ b/drivers/base/platform.c
3373 -@@ -101,6 +101,15 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
3374 - }
3375 -
3376 - r = platform_get_resource(dev, IORESOURCE_IRQ, num);
3377 -+ /*
3378 -+ * The resources may pass trigger flags to the irqs that need
3379 -+ * to be set up. It so happens that the trigger flags for
3380 -+ * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
3381 -+ * settings.
3382 -+ */
3383 -+ if (r && r->flags & IORESOURCE_BITS)
3384 -+ irqd_set_trigger_type(irq_get_irq_data(r->start),
3385 -+ r->flags & IORESOURCE_BITS);
3386 -
3387 - return r ? r->start : -ENXIO;
3388 - #endif
3389 -diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
3390 -index de4c849..288547a 100644
3391 ---- a/drivers/bluetooth/ath3k.c
3392 -+++ b/drivers/bluetooth/ath3k.c
3393 -@@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = {
3394 - /* Atheros AR3011 with sflash firmware*/
3395 - { USB_DEVICE(0x0489, 0xE027) },
3396 - { USB_DEVICE(0x0489, 0xE03D) },
3397 -+ { USB_DEVICE(0x04F2, 0xAFF1) },
3398 - { USB_DEVICE(0x0930, 0x0215) },
3399 - { USB_DEVICE(0x0CF3, 0x3002) },
3400 - { USB_DEVICE(0x0CF3, 0xE019) },
3401 -diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
3402 -index 8bfc4c2..2c527da 100644
3403 ---- a/drivers/bluetooth/btusb.c
3404 -+++ b/drivers/bluetooth/btusb.c
3405 -@@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = {
3406 - /* Atheros 3011 with sflash firmware */
3407 - { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
3408 - { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
3409 -+ { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
3410 - { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
3411 - { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
3412 - { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
3413 -diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
3414 -index e096e9c..283f00a 100644
3415 ---- a/drivers/char/tpm/tpm-chip.c
3416 -+++ b/drivers/char/tpm/tpm-chip.c
3417 -@@ -170,6 +170,41 @@ static void tpm_dev_del_device(struct tpm_chip *chip)
3418 - device_unregister(&chip->dev);
3419 - }
3420 -
3421 -+static int tpm1_chip_register(struct tpm_chip *chip)
3422 -+{
3423 -+ int rc;
3424 -+
3425 -+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
3426 -+ return 0;
3427 -+
3428 -+ rc = tpm_sysfs_add_device(chip);
3429 -+ if (rc)
3430 -+ return rc;
3431 -+
3432 -+ rc = tpm_add_ppi(chip);
3433 -+ if (rc) {
3434 -+ tpm_sysfs_del_device(chip);
3435 -+ return rc;
3436 -+ }
3437 -+
3438 -+ chip->bios_dir = tpm_bios_log_setup(chip->devname);
3439 -+
3440 -+ return 0;
3441 -+}
3442 -+
3443 -+static void tpm1_chip_unregister(struct tpm_chip *chip)
3444 -+{
3445 -+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
3446 -+ return;
3447 -+
3448 -+ if (chip->bios_dir)
3449 -+ tpm_bios_log_teardown(chip->bios_dir);
3450 -+
3451 -+ tpm_remove_ppi(chip);
3452 -+
3453 -+ tpm_sysfs_del_device(chip);
3454 -+}
3455 -+
3456 - /*
3457 - * tpm_chip_register() - create a character device for the TPM chip
3458 - * @chip: TPM chip to use.
3459 -@@ -185,22 +220,13 @@ int tpm_chip_register(struct tpm_chip *chip)
3460 - {
3461 - int rc;
3462 -
3463 -- /* Populate sysfs for TPM1 devices. */
3464 -- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
3465 -- rc = tpm_sysfs_add_device(chip);
3466 -- if (rc)
3467 -- goto del_misc;
3468 --
3469 -- rc = tpm_add_ppi(chip);
3470 -- if (rc)
3471 -- goto del_sysfs;
3472 --
3473 -- chip->bios_dir = tpm_bios_log_setup(chip->devname);
3474 -- }
3475 -+ rc = tpm1_chip_register(chip);
3476 -+ if (rc)
3477 -+ return rc;
3478 -
3479 - rc = tpm_dev_add_device(chip);
3480 - if (rc)
3481 -- return rc;
3482 -+ goto out_err;
3483 -
3484 - /* Make the chip available. */
3485 - spin_lock(&driver_lock);
3486 -@@ -210,10 +236,8 @@ int tpm_chip_register(struct tpm_chip *chip)
3487 - chip->flags |= TPM_CHIP_FLAG_REGISTERED;
3488 -
3489 - return 0;
3490 --del_sysfs:
3491 -- tpm_sysfs_del_device(chip);
3492 --del_misc:
3493 -- tpm_dev_del_device(chip);
3494 -+out_err:
3495 -+ tpm1_chip_unregister(chip);
3496 - return rc;
3497 - }
3498 - EXPORT_SYMBOL_GPL(tpm_chip_register);
3499 -@@ -238,13 +262,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
3500 - spin_unlock(&driver_lock);
3501 - synchronize_rcu();
3502 -
3503 -- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
3504 -- if (chip->bios_dir)
3505 -- tpm_bios_log_teardown(chip->bios_dir);
3506 -- tpm_remove_ppi(chip);
3507 -- tpm_sysfs_del_device(chip);
3508 -- }
3509 --
3510 -+ tpm1_chip_unregister(chip);
3511 - tpm_dev_del_device(chip);
3512 - }
3513 - EXPORT_SYMBOL_GPL(tpm_chip_unregister);
3514 -diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
3515 -index a23ac0c..0b7c3e8 100644
3516 ---- a/drivers/clk/at91/clk-usb.c
3517 -+++ b/drivers/clk/at91/clk-usb.c
3518 -@@ -56,22 +56,55 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
3519 - return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
3520 - }
3521 -
3522 --static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
3523 -- unsigned long *parent_rate)
3524 -+static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
3525 -+ unsigned long rate,
3526 -+ unsigned long min_rate,
3527 -+ unsigned long max_rate,
3528 -+ unsigned long *best_parent_rate,
3529 -+ struct clk_hw **best_parent_hw)
3530 - {
3531 -- unsigned long div;
3532 -+ struct clk *parent = NULL;
3533 -+ long best_rate = -EINVAL;
3534 -+ unsigned long tmp_rate;
3535 -+ int best_diff = -1;
3536 -+ int tmp_diff;
3537 -+ int i;
3538 -
3539 -- if (!rate)
3540 -- return -EINVAL;
3541 -+ for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
3542 -+ int div;
3543 -
3544 -- if (rate >= *parent_rate)
3545 -- return *parent_rate;
3546 -+ parent = clk_get_parent_by_index(hw->clk, i);
3547 -+ if (!parent)
3548 -+ continue;
3549 -+
3550 -+ for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) {
3551 -+ unsigned long tmp_parent_rate;
3552 -+
3553 -+ tmp_parent_rate = rate * div;
3554 -+ tmp_parent_rate = __clk_round_rate(parent,
3555 -+ tmp_parent_rate);
3556 -+ tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
3557 -+ if (tmp_rate < rate)
3558 -+ tmp_diff = rate - tmp_rate;
3559 -+ else
3560 -+ tmp_diff = tmp_rate - rate;
3561 -+
3562 -+ if (best_diff < 0 || best_diff > tmp_diff) {
3563 -+ best_rate = tmp_rate;
3564 -+ best_diff = tmp_diff;
3565 -+ *best_parent_rate = tmp_parent_rate;
3566 -+ *best_parent_hw = __clk_get_hw(parent);
3567 -+ }
3568 -+
3569 -+ if (!best_diff || tmp_rate < rate)
3570 -+ break;
3571 -+ }
3572 -
3573 -- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
3574 -- if (div > SAM9X5_USB_MAX_DIV + 1)
3575 -- div = SAM9X5_USB_MAX_DIV + 1;
3576 -+ if (!best_diff)
3577 -+ break;
3578 -+ }
3579 -
3580 -- return DIV_ROUND_CLOSEST(*parent_rate, div);
3581 -+ return best_rate;
3582 - }
3583 -
3584 - static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
3585 -@@ -121,7 +154,7 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
3586 -
3587 - static const struct clk_ops at91sam9x5_usb_ops = {
3588 - .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
3589 -- .round_rate = at91sam9x5_clk_usb_round_rate,
3590 -+ .determine_rate = at91sam9x5_clk_usb_determine_rate,
3591 - .get_parent = at91sam9x5_clk_usb_get_parent,
3592 - .set_parent = at91sam9x5_clk_usb_set_parent,
3593 - .set_rate = at91sam9x5_clk_usb_set_rate,
3594 -@@ -159,7 +192,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
3595 - .disable = at91sam9n12_clk_usb_disable,
3596 - .is_enabled = at91sam9n12_clk_usb_is_enabled,
3597 - .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
3598 -- .round_rate = at91sam9x5_clk_usb_round_rate,
3599 -+ .determine_rate = at91sam9x5_clk_usb_determine_rate,
3600 - .set_rate = at91sam9x5_clk_usb_set_rate,
3601 - };
3602 -
3603 -@@ -179,7 +212,8 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
3604 - init.ops = &at91sam9x5_usb_ops;
3605 - init.parent_names = parent_names;
3606 - init.num_parents = num_parents;
3607 -- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
3608 -+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
3609 -+ CLK_SET_RATE_PARENT;
3610 -
3611 - usb->hw.init = &init;
3612 - usb->pmc = pmc;
3613 -@@ -207,7 +241,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
3614 - init.ops = &at91sam9n12_usb_ops;
3615 - init.parent_names = &parent_name;
3616 - init.num_parents = 1;
3617 -- init.flags = CLK_SET_RATE_GATE;
3618 -+ init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
3619 -
3620 - usb->hw.init = &init;
3621 - usb->pmc = pmc;
3622 -diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
3623 -index 0039bd7..466f30c 100644
3624 ---- a/drivers/clk/qcom/clk-rcg.c
3625 -+++ b/drivers/clk/qcom/clk-rcg.c
3626 -@@ -495,6 +495,57 @@ static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
3627 - return __clk_rcg_set_rate(rcg, rcg->freq_tbl);
3628 - }
3629 -
3630 -+/*
3631 -+ * This type of clock has a glitch-free mux that switches between the output of
3632 -+ * the M/N counter and an always on clock source (XO). When clk_set_rate() is
3633 -+ * called we need to make sure that we don't switch to the M/N counter if it
3634 -+ * isn't clocking because the mux will get stuck and the clock will stop
3635 -+ * outputting a clock. This can happen if the framework isn't aware that this
3636 -+ * clock is on and so clk_set_rate() doesn't turn on the new parent. To fix
3637 -+ * this we switch the mux in the enable/disable ops and reprogram the M/N
3638 -+ * counter in the set_rate op. We also make sure to switch away from the M/N
3639 -+ * counter in set_rate if software thinks the clock is off.
3640 -+ */
3641 -+static int clk_rcg_lcc_set_rate(struct clk_hw *hw, unsigned long rate,
3642 -+ unsigned long parent_rate)
3643 -+{
3644 -+ struct clk_rcg *rcg = to_clk_rcg(hw);
3645 -+ const struct freq_tbl *f;
3646 -+ int ret;
3647 -+ u32 gfm = BIT(10);
3648 -+
3649 -+ f = qcom_find_freq(rcg->freq_tbl, rate);
3650 -+ if (!f)
3651 -+ return -EINVAL;
3652 -+
3653 -+ /* Switch to XO to avoid glitches */
3654 -+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
3655 -+ ret = __clk_rcg_set_rate(rcg, f);
3656 -+ /* Switch back to M/N if it's clocking */
3657 -+ if (__clk_is_enabled(hw->clk))
3658 -+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
3659 -+
3660 -+ return ret;
3661 -+}
3662 -+
3663 -+static int clk_rcg_lcc_enable(struct clk_hw *hw)
3664 -+{
3665 -+ struct clk_rcg *rcg = to_clk_rcg(hw);
3666 -+ u32 gfm = BIT(10);
3667 -+
3668 -+ /* Use M/N */
3669 -+ return regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
3670 -+}
3671 -+
3672 -+static void clk_rcg_lcc_disable(struct clk_hw *hw)
3673 -+{
3674 -+ struct clk_rcg *rcg = to_clk_rcg(hw);
3675 -+ u32 gfm = BIT(10);
3676 -+
3677 -+ /* Use XO */
3678 -+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
3679 -+}
3680 -+
3681 - static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
3682 - {
3683 - struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
3684 -@@ -543,6 +594,17 @@ const struct clk_ops clk_rcg_bypass_ops = {
3685 - };
3686 - EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops);
3687 -
3688 -+const struct clk_ops clk_rcg_lcc_ops = {
3689 -+ .enable = clk_rcg_lcc_enable,
3690 -+ .disable = clk_rcg_lcc_disable,
3691 -+ .get_parent = clk_rcg_get_parent,
3692 -+ .set_parent = clk_rcg_set_parent,
3693 -+ .recalc_rate = clk_rcg_recalc_rate,
3694 -+ .determine_rate = clk_rcg_determine_rate,
3695 -+ .set_rate = clk_rcg_lcc_set_rate,
3696 -+};
3697 -+EXPORT_SYMBOL_GPL(clk_rcg_lcc_ops);
3698 -+
3699 - const struct clk_ops clk_dyn_rcg_ops = {
3700 - .enable = clk_enable_regmap,
3701 - .is_enabled = clk_is_enabled_regmap,
3702 -diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
3703 -index 687e41f..d09d06b 100644
3704 ---- a/drivers/clk/qcom/clk-rcg.h
3705 -+++ b/drivers/clk/qcom/clk-rcg.h
3706 -@@ -96,6 +96,7 @@ struct clk_rcg {
3707 -
3708 - extern const struct clk_ops clk_rcg_ops;
3709 - extern const struct clk_ops clk_rcg_bypass_ops;
3710 -+extern const struct clk_ops clk_rcg_lcc_ops;
3711 -
3712 - #define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
3713 -
3714 -diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
3715 -index 742acfa..381f274 100644
3716 ---- a/drivers/clk/qcom/clk-rcg2.c
3717 -+++ b/drivers/clk/qcom/clk-rcg2.c
3718 -@@ -243,7 +243,7 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
3719 - mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
3720 - cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
3721 - cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
3722 -- if (rcg->mnd_width && f->n)
3723 -+ if (rcg->mnd_width && f->n && (f->m != f->n))
3724 - cfg |= CFG_MODE_DUAL_EDGE;
3725 - ret = regmap_update_bits(rcg->clkr.regmap,
3726 - rcg->cmd_rcgr + CFG_REG, mask, cfg);
3727 -diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
3728 -index cbdc31d..a015bb0 100644
3729 ---- a/drivers/clk/qcom/gcc-ipq806x.c
3730 -+++ b/drivers/clk/qcom/gcc-ipq806x.c
3731 -@@ -525,8 +525,8 @@ static struct freq_tbl clk_tbl_gsbi_qup[] = {
3732 - { 10800000, P_PXO, 1, 2, 5 },
3733 - { 15060000, P_PLL8, 1, 2, 51 },
3734 - { 24000000, P_PLL8, 4, 1, 4 },
3735 -+ { 25000000, P_PXO, 1, 0, 0 },
3736 - { 25600000, P_PLL8, 1, 1, 15 },
3737 -- { 27000000, P_PXO, 1, 0, 0 },
3738 - { 48000000, P_PLL8, 4, 1, 2 },
3739 - { 51200000, P_PLL8, 1, 2, 15 },
3740 - { }
3741 -diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
3742 -index c9ff27b..a6d3a67 100644
3743 ---- a/drivers/clk/qcom/lcc-ipq806x.c
3744 -+++ b/drivers/clk/qcom/lcc-ipq806x.c
3745 -@@ -294,14 +294,14 @@ static struct clk_regmap_mux pcm_clk = {
3746 - };
3747 -
3748 - static struct freq_tbl clk_tbl_aif_osr[] = {
3749 -- { 22050, P_PLL4, 1, 147, 20480 },
3750 -- { 32000, P_PLL4, 1, 1, 96 },
3751 -- { 44100, P_PLL4, 1, 147, 10240 },
3752 -- { 48000, P_PLL4, 1, 1, 64 },
3753 -- { 88200, P_PLL4, 1, 147, 5120 },
3754 -- { 96000, P_PLL4, 1, 1, 32 },
3755 -- { 176400, P_PLL4, 1, 147, 2560 },
3756 -- { 192000, P_PLL4, 1, 1, 16 },
3757 -+ { 2822400, P_PLL4, 1, 147, 20480 },
3758 -+ { 4096000, P_PLL4, 1, 1, 96 },
3759 -+ { 5644800, P_PLL4, 1, 147, 10240 },
3760 -+ { 6144000, P_PLL4, 1, 1, 64 },
3761 -+ { 11289600, P_PLL4, 1, 147, 5120 },
3762 -+ { 12288000, P_PLL4, 1, 1, 32 },
3763 -+ { 22579200, P_PLL4, 1, 147, 2560 },
3764 -+ { 24576000, P_PLL4, 1, 1, 16 },
3765 - { },
3766 - };
3767 -
3768 -@@ -360,7 +360,7 @@ static struct clk_branch spdif_clk = {
3769 - };
3770 -
3771 - static struct freq_tbl clk_tbl_ahbix[] = {
3772 -- { 131072, P_PLL4, 1, 1, 3 },
3773 -+ { 131072000, P_PLL4, 1, 1, 3 },
3774 - { },
3775 - };
3776 -
3777 -@@ -386,13 +386,12 @@ static struct clk_rcg ahbix_clk = {
3778 - .freq_tbl = clk_tbl_ahbix,
3779 - .clkr = {
3780 - .enable_reg = 0x38,
3781 -- .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
3782 -+ .enable_mask = BIT(11),
3783 - .hw.init = &(struct clk_init_data){
3784 - .name = "ahbix",
3785 - .parent_names = lcc_pxo_pll4,
3786 - .num_parents = 2,
3787 -- .ops = &clk_rcg_ops,
3788 -- .flags = CLK_SET_RATE_GATE,
3789 -+ .ops = &clk_rcg_lcc_ops,
3790 - },
3791 - },
3792 - };
3793 -diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
3794 -index 51462e8..714d6ba 100644
3795 ---- a/drivers/clk/samsung/clk-exynos4.c
3796 -+++ b/drivers/clk/samsung/clk-exynos4.c
3797 -@@ -1354,7 +1354,7 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
3798 - VPLL_LOCK, VPLL_CON0, NULL),
3799 - };
3800 -
3801 --static void __init exynos4_core_down_clock(enum exynos4_soc soc)
3802 -+static void __init exynos4x12_core_down_clock(void)
3803 - {
3804 - unsigned int tmp;
3805 -
3806 -@@ -1373,11 +1373,9 @@ static void __init exynos4_core_down_clock(enum exynos4_soc soc)
3807 - __raw_writel(tmp, reg_base + PWR_CTRL1);
3808 -
3809 - /*
3810 -- * Disable the clock up feature on Exynos4x12, in case it was
3811 -- * enabled by bootloader.
3812 -+ * Disable the clock up feature in case it was enabled by bootloader.
3813 - */
3814 -- if (exynos4_soc == EXYNOS4X12)
3815 -- __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
3816 -+ __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
3817 - }
3818 -
3819 - /* register exynos4 clocks */
3820 -@@ -1474,7 +1472,8 @@ static void __init exynos4_clk_init(struct device_node *np,
3821 - samsung_clk_register_alias(ctx, exynos4_aliases,
3822 - ARRAY_SIZE(exynos4_aliases));
3823 -
3824 -- exynos4_core_down_clock(soc);
3825 -+ if (soc == EXYNOS4X12)
3826 -+ exynos4x12_core_down_clock();
3827 - exynos4_clk_sleep_init();
3828 -
3829 - samsung_clk_of_add_provider(np, ctx);
3830 -diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
3831 -index 9a893f2..23ce0af 100644
3832 ---- a/drivers/clk/tegra/clk-tegra124.c
3833 -+++ b/drivers/clk/tegra/clk-tegra124.c
3834 -@@ -1110,16 +1110,18 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
3835 - 1, 2);
3836 - clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
3837 -
3838 -- clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
3839 -+ clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
3840 - clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
3841 -- clks[TEGRA124_CLK_PLLD_DSI] = clk;
3842 -+ clks[TEGRA124_CLK_PLL_D_DSI_OUT] = clk;
3843 -
3844 -- clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
3845 -- 0, 48, periph_clk_enb_refcnt);
3846 -+ clk = tegra_clk_register_periph_gate("dsia", "pll_d_dsi_out", 0,
3847 -+ clk_base, 0, 48,
3848 -+ periph_clk_enb_refcnt);
3849 - clks[TEGRA124_CLK_DSIA] = clk;
3850 -
3851 -- clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
3852 -- 0, 82, periph_clk_enb_refcnt);
3853 -+ clk = tegra_clk_register_periph_gate("dsib", "pll_d_dsi_out", 0,
3854 -+ clk_base, 0, 82,
3855 -+ periph_clk_enb_refcnt);
3856 - clks[TEGRA124_CLK_DSIB] = clk;
3857 -
3858 - /* emc mux */
3859 -diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
3860 -index 9ddb754..7a1df61 100644
3861 ---- a/drivers/clk/tegra/clk.c
3862 -+++ b/drivers/clk/tegra/clk.c
3863 -@@ -272,7 +272,7 @@ void __init tegra_add_of_provider(struct device_node *np)
3864 - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
3865 -
3866 - rst_ctlr.of_node = np;
3867 -- rst_ctlr.nr_resets = clk_num * 32;
3868 -+ rst_ctlr.nr_resets = periph_banks * 32;
3869 - reset_controller_register(&rst_ctlr);
3870 - }
3871 -
3872 -diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
3873 -index 42f95a4..9a28b7e 100644
3874 ---- a/drivers/crypto/omap-aes.c
3875 -+++ b/drivers/crypto/omap-aes.c
3876 -@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
3877 - return err;
3878 - }
3879 -
3880 --static int omap_aes_check_aligned(struct scatterlist *sg)
3881 -+static int omap_aes_check_aligned(struct scatterlist *sg, int total)
3882 - {
3883 -+ int len = 0;
3884 -+
3885 - while (sg) {
3886 - if (!IS_ALIGNED(sg->offset, 4))
3887 - return -1;
3888 - if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
3889 - return -1;
3890 -+
3891 -+ len += sg->length;
3892 - sg = sg_next(sg);
3893 - }
3894 -+
3895 -+ if (len != total)
3896 -+ return -1;
3897 -+
3898 - return 0;
3899 - }
3900 -
3901 -@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
3902 - dd->in_sg = req->src;
3903 - dd->out_sg = req->dst;
3904 -
3905 -- if (omap_aes_check_aligned(dd->in_sg) ||
3906 -- omap_aes_check_aligned(dd->out_sg)) {
3907 -+ if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
3908 -+ omap_aes_check_aligned(dd->out_sg, dd->total)) {
3909 - if (omap_aes_copy_sgs(dd))
3910 - pr_err("Failed to copy SGs for unaligned cases\n");
3911 - dd->sgs_copied = 1;
3912 -diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
3913 -index d0bc123..1a54205 100644
3914 ---- a/drivers/gpio/gpio-mvebu.c
3915 -+++ b/drivers/gpio/gpio-mvebu.c
3916 -@@ -320,11 +320,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
3917 - {
3918 - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
3919 - struct mvebu_gpio_chip *mvchip = gc->private;
3920 -+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
3921 - u32 mask = 1 << (d->irq - gc->irq_base);
3922 -
3923 - irq_gc_lock(gc);
3924 -- gc->mask_cache &= ~mask;
3925 -- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
3926 -+ ct->mask_cache_priv &= ~mask;
3927 -+
3928 -+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
3929 - irq_gc_unlock(gc);
3930 - }
3931 -
3932 -@@ -332,11 +334,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
3933 - {
3934 - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
3935 - struct mvebu_gpio_chip *mvchip = gc->private;
3936 -+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
3937 -+
3938 - u32 mask = 1 << (d->irq - gc->irq_base);
3939 -
3940 - irq_gc_lock(gc);
3941 -- gc->mask_cache |= mask;
3942 -- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
3943 -+ ct->mask_cache_priv |= mask;
3944 -+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
3945 - irq_gc_unlock(gc);
3946 - }
3947 -
3948 -@@ -344,11 +348,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
3949 - {
3950 - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
3951 - struct mvebu_gpio_chip *mvchip = gc->private;
3952 -+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
3953 -+
3954 - u32 mask = 1 << (d->irq - gc->irq_base);
3955 -
3956 - irq_gc_lock(gc);
3957 -- gc->mask_cache &= ~mask;
3958 -- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
3959 -+ ct->mask_cache_priv &= ~mask;
3960 -+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
3961 - irq_gc_unlock(gc);
3962 - }
3963 -
3964 -@@ -356,11 +362,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
3965 - {
3966 - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
3967 - struct mvebu_gpio_chip *mvchip = gc->private;
3968 -+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
3969 -+
3970 - u32 mask = 1 << (d->irq - gc->irq_base);
3971 -
3972 - irq_gc_lock(gc);
3973 -- gc->mask_cache |= mask;
3974 -- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
3975 -+ ct->mask_cache_priv |= mask;
3976 -+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
3977 - irq_gc_unlock(gc);
3978 - }
3979 -
3980 -diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
3981 -index bf17a60..1dbfba5 100644
3982 ---- a/drivers/gpu/drm/exynos/exynos_dp_core.c
3983 -+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
3984 -@@ -32,10 +32,16 @@
3985 - #include <drm/bridge/ptn3460.h>
3986 -
3987 - #include "exynos_dp_core.h"
3988 -+#include "exynos_drm_fimd.h"
3989 -
3990 - #define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
3991 - connector)
3992 -
3993 -+static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
3994 -+{
3995 -+ return to_exynos_crtc(dp->encoder->crtc);
3996 -+}
3997 -+
3998 - static inline struct exynos_dp_device *
3999 - display_to_dp(struct exynos_drm_display *d)
4000 - {
4001 -@@ -1070,6 +1076,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
4002 - }
4003 - }
4004 -
4005 -+ fimd_dp_clock_enable(dp_to_crtc(dp), true);
4006 -+
4007 - clk_prepare_enable(dp->clock);
4008 - exynos_dp_phy_init(dp);
4009 - exynos_dp_init_dp(dp);
4010 -@@ -1094,6 +1102,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
4011 - exynos_dp_phy_exit(dp);
4012 - clk_disable_unprepare(dp->clock);
4013 -
4014 -+ fimd_dp_clock_enable(dp_to_crtc(dp), false);
4015 -+
4016 - if (dp->panel) {
4017 - if (drm_panel_unprepare(dp->panel))
4018 - DRM_ERROR("failed to turnoff the panel\n");
4019 -diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
4020 -index 33a10ce..5d58f6c 100644
4021 ---- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
4022 -+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
4023 -@@ -32,6 +32,7 @@
4024 - #include "exynos_drm_fbdev.h"
4025 - #include "exynos_drm_crtc.h"
4026 - #include "exynos_drm_iommu.h"
4027 -+#include "exynos_drm_fimd.h"
4028 -
4029 - /*
4030 - * FIMD stands for Fully Interactive Mobile Display and
4031 -@@ -1233,6 +1234,24 @@ static int fimd_remove(struct platform_device *pdev)
4032 - return 0;
4033 - }
4034 -
4035 -+void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
4036 -+{
4037 -+ struct fimd_context *ctx = crtc->ctx;
4038 -+ u32 val;
4039 -+
4040 -+ /*
4041 -+ * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
4042 -+ * clock. On these SoCs the bootloader may enable it but any
4043 -+ * power domain off/on will reset it to disable state.
4044 -+ */
4045 -+ if (ctx->driver_data != &exynos5_fimd_driver_data)
4046 -+ return;
4047 -+
4048 -+ val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
4049 -+ writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
4050 -+}
4051 -+EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
4052 -+
4053 - struct platform_driver fimd_driver = {
4054 - .probe = fimd_probe,
4055 - .remove = fimd_remove,
4056 -diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
4057 -new file mode 100644
4058 -index 0000000..b4fcaa5
4059 ---- /dev/null
4060 -+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
4061 -@@ -0,0 +1,15 @@
4062 -+/*
4063 -+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
4064 -+ *
4065 -+ * This program is free software; you can redistribute it and/or modify it
4066 -+ * under the terms of the GNU General Public License as published by the
4067 -+ * Free Software Foundation; either version 2 of the License, or (at your
4068 -+ * option) any later version.
4069 -+ */
4070 -+
4071 -+#ifndef _EXYNOS_DRM_FIMD_H_
4072 -+#define _EXYNOS_DRM_FIMD_H_
4073 -+
4074 -+extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
4075 -+
4076 -+#endif /* _EXYNOS_DRM_FIMD_H_ */
4077 -diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
4078 -index fa140e0..60ab1f7 100644
4079 ---- a/drivers/gpu/drm/i2c/adv7511.c
4080 -+++ b/drivers/gpu/drm/i2c/adv7511.c
4081 -@@ -33,6 +33,7 @@ struct adv7511 {
4082 -
4083 - unsigned int current_edid_segment;
4084 - uint8_t edid_buf[256];
4085 -+ bool edid_read;
4086 -
4087 - wait_queue_head_t wq;
4088 - struct drm_encoder *encoder;
4089 -@@ -379,69 +380,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
4090 - return false;
4091 - }
4092 -
4093 --static irqreturn_t adv7511_irq_handler(int irq, void *devid)
4094 --{
4095 -- struct adv7511 *adv7511 = devid;
4096 --
4097 -- if (adv7511_hpd(adv7511))
4098 -- drm_helper_hpd_irq_event(adv7511->encoder->dev);
4099 --
4100 -- wake_up_all(&adv7511->wq);
4101 --
4102 -- return IRQ_HANDLED;
4103 --}
4104 --
4105 --static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
4106 -- unsigned int irq)
4107 -+static int adv7511_irq_process(struct adv7511 *adv7511)
4108 - {
4109 - unsigned int irq0, irq1;
4110 -- unsigned int pending;
4111 - int ret;
4112 -
4113 - ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
4114 - if (ret < 0)
4115 -- return 0;
4116 -+ return ret;
4117 -+
4118 - ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
4119 - if (ret < 0)
4120 -- return 0;
4121 -+ return ret;
4122 -+
4123 -+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
4124 -+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
4125 -+
4126 -+ if (irq0 & ADV7511_INT0_HDP)
4127 -+ drm_helper_hpd_irq_event(adv7511->encoder->dev);
4128 -+
4129 -+ if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
4130 -+ adv7511->edid_read = true;
4131 -+
4132 -+ if (adv7511->i2c_main->irq)
4133 -+ wake_up_all(&adv7511->wq);
4134 -+ }
4135 -+
4136 -+ return 0;
4137 -+}
4138 -
4139 -- pending = (irq1 << 8) | irq0;
4140 -+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
4141 -+{
4142 -+ struct adv7511 *adv7511 = devid;
4143 -+ int ret;
4144 -
4145 -- return pending & irq;
4146 -+ ret = adv7511_irq_process(adv7511);
4147 -+ return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
4148 - }
4149 -
4150 --static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
4151 -- int timeout)
4152 -+/* -----------------------------------------------------------------------------
4153 -+ * EDID retrieval
4154 -+ */
4155 -+
4156 -+static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
4157 - {
4158 -- unsigned int pending;
4159 - int ret;
4160 -
4161 - if (adv7511->i2c_main->irq) {
4162 - ret = wait_event_interruptible_timeout(adv7511->wq,
4163 -- adv7511_is_interrupt_pending(adv7511, irq),
4164 -- msecs_to_jiffies(timeout));
4165 -- if (ret <= 0)
4166 -- return 0;
4167 -- pending = adv7511_is_interrupt_pending(adv7511, irq);
4168 -+ adv7511->edid_read, msecs_to_jiffies(timeout));
4169 - } else {
4170 -- if (timeout < 25)
4171 -- timeout = 25;
4172 -- do {
4173 -- pending = adv7511_is_interrupt_pending(adv7511, irq);
4174 -- if (pending)
4175 -+ for (; timeout > 0; timeout -= 25) {
4176 -+ ret = adv7511_irq_process(adv7511);
4177 -+ if (ret < 0)
4178 - break;
4179 -+
4180 -+ if (adv7511->edid_read)
4181 -+ break;
4182 -+
4183 - msleep(25);
4184 -- timeout -= 25;
4185 -- } while (timeout >= 25);
4186 -+ }
4187 - }
4188 -
4189 -- return pending;
4190 -+ return adv7511->edid_read ? 0 : -EIO;
4191 - }
4192 -
4193 --/* -----------------------------------------------------------------------------
4194 -- * EDID retrieval
4195 -- */
4196 --
4197 - static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
4198 - size_t len)
4199 - {
4200 -@@ -463,19 +466,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
4201 - return ret;
4202 -
4203 - if (status != 2) {
4204 -+ adv7511->edid_read = false;
4205 - regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
4206 - block);
4207 -- ret = adv7511_wait_for_interrupt(adv7511,
4208 -- ADV7511_INT0_EDID_READY |
4209 -- ADV7511_INT1_DDC_ERROR, 200);
4210 --
4211 -- if (!(ret & ADV7511_INT0_EDID_READY))
4212 -- return -EIO;
4213 -+ ret = adv7511_wait_for_edid(adv7511, 200);
4214 -+ if (ret < 0)
4215 -+ return ret;
4216 - }
4217 -
4218 -- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
4219 -- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
4220 --
4221 - /* Break this apart, hopefully more I2C controllers will
4222 - * support 64 byte transfers than 256 byte transfers
4223 - */
4224 -@@ -528,7 +526,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
4225 - /* Reading the EDID only works if the device is powered */
4226 - if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
4227 - regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
4228 -- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
4229 -+ ADV7511_INT0_EDID_READY);
4230 -+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
4231 -+ ADV7511_INT1_DDC_ERROR);
4232 - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
4233 - ADV7511_POWER_POWER_DOWN, 0);
4234 - adv7511->current_edid_segment = -1;
4235 -@@ -563,7 +563,9 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
4236 - adv7511->current_edid_segment = -1;
4237 -
4238 - regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
4239 -- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
4240 -+ ADV7511_INT0_EDID_READY);
4241 -+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
4242 -+ ADV7511_INT1_DDC_ERROR);
4243 - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
4244 - ADV7511_POWER_POWER_DOWN, 0);
4245 - /*
4246 -diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
4247 -index 5c66b56..ec4d932 100644
4248 ---- a/drivers/gpu/drm/i915/i915_drv.c
4249 -+++ b/drivers/gpu/drm/i915/i915_drv.c
4250 -@@ -1042,7 +1042,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
4251 - s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
4252 -
4253 - s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
4254 -- s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
4255 -+ s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
4256 -
4257 - s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
4258 - s->ecochk = I915_READ(GAM_ECOCHK);
4259 -@@ -1124,7 +1124,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
4260 - I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
4261 -
4262 - I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
4263 -- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
4264 -+ I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
4265 -
4266 - I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
4267 - I915_WRITE(GAM_ECOCHK, s->ecochk);
4268 -diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
4269 -index ede5bbb..07320cb 100644
4270 ---- a/drivers/gpu/drm/i915/i915_irq.c
4271 -+++ b/drivers/gpu/drm/i915/i915_irq.c
4272 -@@ -3718,14 +3718,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
4273 - ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4274 - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4275 - I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4276 -- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4277 -- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4278 -+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4279 - I915_WRITE16(IMR, dev_priv->irq_mask);
4280 -
4281 - I915_WRITE16(IER,
4282 - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4283 - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4284 -- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4285 - I915_USER_INTERRUPT);
4286 - POSTING_READ16(IER);
4287 -
4288 -@@ -3887,14 +3885,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
4289 - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4290 - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4291 - I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4292 -- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4293 -- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4294 -+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4295 -
4296 - enable_mask =
4297 - I915_ASLE_INTERRUPT |
4298 - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4299 - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4300 -- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4301 - I915_USER_INTERRUPT;
4302 -
4303 - if (I915_HAS_HOTPLUG(dev)) {
4304 -diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
4305 -index 33b3d0a2..f536ff2 100644
4306 ---- a/drivers/gpu/drm/i915/i915_reg.h
4307 -+++ b/drivers/gpu/drm/i915/i915_reg.h
4308 -@@ -1740,6 +1740,7 @@ enum punit_power_well {
4309 - #define GMBUS_CYCLE_INDEX (2<<25)
4310 - #define GMBUS_CYCLE_STOP (4<<25)
4311 - #define GMBUS_BYTE_COUNT_SHIFT 16
4312 -+#define GMBUS_BYTE_COUNT_MAX 256U
4313 - #define GMBUS_SLAVE_INDEX_SHIFT 8
4314 - #define GMBUS_SLAVE_ADDR_SHIFT 1
4315 - #define GMBUS_SLAVE_READ (1<<0)
4316 -diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
4317 -index b31088a..56e437e 100644
4318 ---- a/drivers/gpu/drm/i915/intel_i2c.c
4319 -+++ b/drivers/gpu/drm/i915/intel_i2c.c
4320 -@@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
4321 - }
4322 -
4323 - static int
4324 --gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
4325 -- u32 gmbus1_index)
4326 -+gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
4327 -+ unsigned short addr, u8 *buf, unsigned int len,
4328 -+ u32 gmbus1_index)
4329 - {
4330 - int reg_offset = dev_priv->gpio_mmio_base;
4331 -- u16 len = msg->len;
4332 -- u8 *buf = msg->buf;
4333 -
4334 - I915_WRITE(GMBUS1 + reg_offset,
4335 - gmbus1_index |
4336 - GMBUS_CYCLE_WAIT |
4337 - (len << GMBUS_BYTE_COUNT_SHIFT) |
4338 -- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
4339 -+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
4340 - GMBUS_SLAVE_READ | GMBUS_SW_RDY);
4341 - while (len) {
4342 - int ret;
4343 -@@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
4344 - }
4345 -
4346 - static int
4347 --gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
4348 -+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
4349 -+ u32 gmbus1_index)
4350 - {
4351 -- int reg_offset = dev_priv->gpio_mmio_base;
4352 -- u16 len = msg->len;
4353 - u8 *buf = msg->buf;
4354 -+ unsigned int rx_size = msg->len;
4355 -+ unsigned int len;
4356 -+ int ret;
4357 -+
4358 -+ do {
4359 -+ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
4360 -+
4361 -+ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
4362 -+ buf, len, gmbus1_index);
4363 -+ if (ret)
4364 -+ return ret;
4365 -+
4366 -+ rx_size -= len;
4367 -+ buf += len;
4368 -+ } while (rx_size != 0);
4369 -+
4370 -+ return 0;
4371 -+}
4372 -+
4373 -+static int
4374 -+gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
4375 -+ unsigned short addr, u8 *buf, unsigned int len)
4376 -+{
4377 -+ int reg_offset = dev_priv->gpio_mmio_base;
4378 -+ unsigned int chunk_size = len;
4379 - u32 val, loop;
4380 -
4381 - val = loop = 0;
4382 -@@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
4383 - I915_WRITE(GMBUS3 + reg_offset, val);
4384 - I915_WRITE(GMBUS1 + reg_offset,
4385 - GMBUS_CYCLE_WAIT |
4386 -- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
4387 -- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
4388 -+ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
4389 -+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
4390 - GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
4391 - while (len) {
4392 - int ret;
4393 -@@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
4394 - if (ret)
4395 - return ret;
4396 - }
4397 -+
4398 -+ return 0;
4399 -+}
4400 -+
4401 -+static int
4402 -+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
4403 -+{
4404 -+ u8 *buf = msg->buf;
4405 -+ unsigned int tx_size = msg->len;
4406 -+ unsigned int len;
4407 -+ int ret;
4408 -+
4409 -+ do {
4410 -+ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
4411 -+
4412 -+ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
4413 -+ if (ret)
4414 -+ return ret;
4415 -+
4416 -+ buf += len;
4417 -+ tx_size -= len;
4418 -+ } while (tx_size != 0);
4419 -+
4420 - return 0;
4421 - }
4422 -
4423 -diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
4424 -index 86807ee..9bd5611 100644
4425 ---- a/drivers/gpu/drm/radeon/atombios_crtc.c
4426 -+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
4427 -@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
4428 - misc |= ATOM_COMPOSITESYNC;
4429 - if (mode->flags & DRM_MODE_FLAG_INTERLACE)
4430 - misc |= ATOM_INTERLACE;
4431 -- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
4432 -+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
4433 - misc |= ATOM_DOUBLE_CLOCK_MODE;
4434 -+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
4435 -+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
4436 -
4437 - args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
4438 - args.ucCRTC = radeon_crtc->crtc_id;
4439 -@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
4440 - misc |= ATOM_COMPOSITESYNC;
4441 - if (mode->flags & DRM_MODE_FLAG_INTERLACE)
4442 - misc |= ATOM_INTERLACE;
4443 -- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
4444 -+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
4445 - misc |= ATOM_DOUBLE_CLOCK_MODE;
4446 -+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
4447 -+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
4448 -
4449 - args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
4450 - args.ucCRTC = radeon_crtc->crtc_id;
4451 -diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
4452 -index 9c47867..7fe5590 100644
4453 ---- a/drivers/hid/hid-ids.h
4454 -+++ b/drivers/hid/hid-ids.h
4455 -@@ -459,6 +459,10 @@
4456 - #define USB_DEVICE_ID_UGCI_FLYING 0x0020
4457 - #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
4458 -
4459 -+#define USB_VENDOR_ID_HP 0x03f0
4460 -+#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE 0x0a4a
4461 -+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
4462 -+
4463 - #define USB_VENDOR_ID_HUION 0x256c
4464 - #define USB_DEVICE_ID_HUION_TABLET 0x006e
4465 -
4466 -diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
4467 -index a821277..4e3ae9f 100644
4468 ---- a/drivers/hid/usbhid/hid-quirks.c
4469 -+++ b/drivers/hid/usbhid/hid-quirks.c
4470 -@@ -78,6 +78,8 @@ static const struct hid_blacklist {
4471 - { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
4472 - { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
4473 - { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
4474 -+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
4475 -+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
4476 - { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
4477 - { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
4478 - { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
4479 -diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
4480 -index 2978f5e..00bc30e 100644
4481 ---- a/drivers/hv/channel.c
4482 -+++ b/drivers/hv/channel.c
4483 -@@ -135,7 +135,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
4484 - GFP_KERNEL);
4485 - if (!open_info) {
4486 - err = -ENOMEM;
4487 -- goto error0;
4488 -+ goto error_gpadl;
4489 - }
4490 -
4491 - init_completion(&open_info->waitevent);
4492 -@@ -151,7 +151,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
4493 -
4494 - if (userdatalen > MAX_USER_DEFINED_BYTES) {
4495 - err = -EINVAL;
4496 -- goto error0;
4497 -+ goto error_gpadl;
4498 - }
4499 -
4500 - if (userdatalen)
4501 -@@ -195,6 +195,9 @@ error1:
4502 - list_del(&open_info->msglistentry);
4503 - spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
4504 -
4505 -+error_gpadl:
4506 -+ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
4507 -+
4508 - error0:
4509 - free_pages((unsigned long)out,
4510 - get_order(send_ringbuffer_size + recv_ringbuffer_size));
4511 -diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
4512 -index 5f96b1b..019d542 100644
4513 ---- a/drivers/i2c/busses/i2c-rk3x.c
4514 -+++ b/drivers/i2c/busses/i2c-rk3x.c
4515 -@@ -833,7 +833,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
4516 - clk_disable(i2c->clk);
4517 - spin_unlock_irqrestore(&i2c->lock, flags);
4518 -
4519 -- return ret;
4520 -+ return ret < 0 ? ret : num;
4521 - }
4522 -
4523 - static u32 rk3x_i2c_func(struct i2c_adapter *adap)
4524 -diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
4525 -index edf274c..8143162 100644
4526 ---- a/drivers/i2c/i2c-core.c
4527 -+++ b/drivers/i2c/i2c-core.c
4528 -@@ -596,6 +596,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
4529 - adap->bus_recovery_info->set_scl(adap, 1);
4530 - return i2c_generic_recovery(adap);
4531 - }
4532 -+EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
4533 -
4534 - int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
4535 - {
4536 -@@ -610,6 +611,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
4537 -
4538 - return ret;
4539 - }
4540 -+EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
4541 -
4542 - int i2c_recover_bus(struct i2c_adapter *adap)
4543 - {
4544 -@@ -619,6 +621,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
4545 - dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
4546 - return adap->bus_recovery_info->recover_bus(adap);
4547 - }
4548 -+EXPORT_SYMBOL_GPL(i2c_recover_bus);
4549 -
4550 - static int i2c_device_probe(struct device *dev)
4551 - {
4552 -@@ -1410,6 +1413,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
4553 -
4554 - dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
4555 -
4556 -+ pm_runtime_no_callbacks(&adap->dev);
4557 -+
4558 - #ifdef CONFIG_I2C_COMPAT
4559 - res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
4560 - adap->dev.parent);
4561 -diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
4562 -index 593f7ca..06cc1ff 100644
4563 ---- a/drivers/i2c/i2c-mux.c
4564 -+++ b/drivers/i2c/i2c-mux.c
4565 -@@ -32,8 +32,9 @@ struct i2c_mux_priv {
4566 - struct i2c_algorithm algo;
4567 -
4568 - struct i2c_adapter *parent;
4569 -- void *mux_priv; /* the mux chip/device */
4570 -- u32 chan_id; /* the channel id */
4571 -+ struct device *mux_dev;
4572 -+ void *mux_priv;
4573 -+ u32 chan_id;
4574 -
4575 - int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
4576 - int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
4577 -@@ -119,6 +120,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
4578 -
4579 - /* Set up private adapter data */
4580 - priv->parent = parent;
4581 -+ priv->mux_dev = mux_dev;
4582 - priv->mux_priv = mux_priv;
4583 - priv->chan_id = chan_id;
4584 - priv->select = select;
4585 -@@ -203,7 +205,7 @@ void i2c_del_mux_adapter(struct i2c_adapter *adap)
4586 - char symlink_name[20];
4587 -
4588 - snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
4589 -- sysfs_remove_link(&adap->dev.parent->kobj, symlink_name);
4590 -+ sysfs_remove_link(&priv->mux_dev->kobj, symlink_name);
4591 -
4592 - sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
4593 - i2c_del_adapter(adap);
4594 -diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
4595 -index b0e5852..44d1d79 100644
4596 ---- a/drivers/idle/intel_idle.c
4597 -+++ b/drivers/idle/intel_idle.c
4598 -@@ -218,18 +218,10 @@ static struct cpuidle_state byt_cstates[] = {
4599 - .enter = &intel_idle,
4600 - .enter_freeze = intel_idle_freeze, },
4601 - {
4602 -- .name = "C1E-BYT",
4603 -- .desc = "MWAIT 0x01",
4604 -- .flags = MWAIT2flg(0x01),
4605 -- .exit_latency = 15,
4606 -- .target_residency = 30,
4607 -- .enter = &intel_idle,
4608 -- .enter_freeze = intel_idle_freeze, },
4609 -- {
4610 - .name = "C6N-BYT",
4611 - .desc = "MWAIT 0x58",
4612 - .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
4613 -- .exit_latency = 40,
4614 -+ .exit_latency = 300,
4615 - .target_residency = 275,
4616 - .enter = &intel_idle,
4617 - .enter_freeze = intel_idle_freeze, },
4618 -@@ -237,7 +229,7 @@ static struct cpuidle_state byt_cstates[] = {
4619 - .name = "C6S-BYT",
4620 - .desc = "MWAIT 0x52",
4621 - .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
4622 -- .exit_latency = 140,
4623 -+ .exit_latency = 500,
4624 - .target_residency = 560,
4625 - .enter = &intel_idle,
4626 - .enter_freeze = intel_idle_freeze, },
4627 -@@ -246,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = {
4628 - .desc = "MWAIT 0x60",
4629 - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
4630 - .exit_latency = 1200,
4631 -- .target_residency = 1500,
4632 -+ .target_residency = 4000,
4633 - .enter = &intel_idle,
4634 - .enter_freeze = intel_idle_freeze, },
4635 - {
4636 -diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
4637 -index 8c014b5..38acb3c 100644
4638 ---- a/drivers/infiniband/core/umem.c
4639 -+++ b/drivers/infiniband/core/umem.c
4640 -@@ -99,12 +99,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
4641 - if (dmasync)
4642 - dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
4643 -
4644 -+ if (!size)
4645 -+ return ERR_PTR(-EINVAL);
4646 -+
4647 - /*
4648 - * If the combination of the addr and size requested for this memory
4649 - * region causes an integer overflow, return error.
4650 - */
4651 -- if ((PAGE_ALIGN(addr + size) <= size) ||
4652 -- (PAGE_ALIGN(addr + size) <= addr))
4653 -+ if (((addr + size) < addr) ||
4654 -+ PAGE_ALIGN(addr + size) < (addr + size))
4655 - return ERR_PTR(-EINVAL);
4656 -
4657 - if (!can_do_mlock())
4658 -diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
4659 -index ed2bd67..fbde33a 100644
4660 ---- a/drivers/infiniband/hw/mlx4/qp.c
4661 -+++ b/drivers/infiniband/hw/mlx4/qp.c
4662 -@@ -2605,8 +2605,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
4663 -
4664 - memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
4665 -
4666 -- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
4667 -- wr->wr.ud.hlen);
4668 -+ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
4669 - *lso_seg_len = halign;
4670 - return 0;
4671 - }
4672 -diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
4673 -index 20e859a..76eb57b 100644
4674 ---- a/drivers/infiniband/ulp/iser/iser_initiator.c
4675 -+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
4676 -@@ -409,8 +409,8 @@ int iser_send_command(struct iscsi_conn *conn,
4677 - if (scsi_prot_sg_count(sc)) {
4678 - prot_buf->buf = scsi_prot_sglist(sc);
4679 - prot_buf->size = scsi_prot_sg_count(sc);
4680 -- prot_buf->data_len = data_buf->data_len >>
4681 -- ilog2(sc->device->sector_size) * 8;
4682 -+ prot_buf->data_len = (data_buf->data_len >>
4683 -+ ilog2(sc->device->sector_size)) * 8;
4684 - }
4685 -
4686 - if (hdr->flags & ISCSI_FLAG_CMD_READ) {
4687 -diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
4688 -index 075b19c..147029a 100644
4689 ---- a/drivers/infiniband/ulp/isert/ib_isert.c
4690 -+++ b/drivers/infiniband/ulp/isert/ib_isert.c
4691 -@@ -222,7 +222,7 @@ fail:
4692 - static void
4693 - isert_free_rx_descriptors(struct isert_conn *isert_conn)
4694 - {
4695 -- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
4696 -+ struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
4697 - struct iser_rx_desc *rx_desc;
4698 - int i;
4699 -
4700 -@@ -719,8 +719,8 @@ out:
4701 - static void
4702 - isert_connect_release(struct isert_conn *isert_conn)
4703 - {
4704 -- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
4705 - struct isert_device *device = isert_conn->conn_device;
4706 -+ struct ib_device *ib_dev = device->ib_device;
4707 -
4708 - isert_dbg("conn %p\n", isert_conn);
4709 -
4710 -@@ -728,7 +728,8 @@ isert_connect_release(struct isert_conn *isert_conn)
4711 - isert_conn_free_fastreg_pool(isert_conn);
4712 -
4713 - isert_free_rx_descriptors(isert_conn);
4714 -- rdma_destroy_id(isert_conn->conn_cm_id);
4715 -+ if (isert_conn->conn_cm_id)
4716 -+ rdma_destroy_id(isert_conn->conn_cm_id);
4717 -
4718 - if (isert_conn->conn_qp) {
4719 - struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
4720 -@@ -878,12 +879,15 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
4721 - return 0;
4722 - }
4723 -
4724 --static void
4725 -+static int
4726 - isert_connect_error(struct rdma_cm_id *cma_id)
4727 - {
4728 - struct isert_conn *isert_conn = cma_id->qp->qp_context;
4729 -
4730 -+ isert_conn->conn_cm_id = NULL;
4731 - isert_put_conn(isert_conn);
4732 -+
4733 -+ return -1;
4734 - }
4735 -
4736 - static int
4737 -@@ -912,7 +916,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
4738 - case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
4739 - case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
4740 - case RDMA_CM_EVENT_CONNECT_ERROR:
4741 -- isert_connect_error(cma_id);
4742 -+ ret = isert_connect_error(cma_id);
4743 - break;
4744 - default:
4745 - isert_err("Unhandled RDMA CMA event: %d\n", event->event);
4746 -@@ -1861,11 +1865,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
4747 - cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
4748 - spin_unlock_bh(&cmd->istate_lock);
4749 -
4750 -- if (ret)
4751 -+ if (ret) {
4752 -+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
4753 - transport_send_check_condition_and_sense(se_cmd,
4754 - se_cmd->pi_err, 0);
4755 -- else
4756 -+ } else {
4757 - target_execute_cmd(se_cmd);
4758 -+ }
4759 - }
4760 -
4761 - static void
4762 -diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
4763 -index 27bcdbc..ea6cb64 100644
4764 ---- a/drivers/input/mouse/alps.c
4765 -+++ b/drivers/input/mouse/alps.c
4766 -@@ -1159,13 +1159,14 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
4767 - bool report_buttons)
4768 - {
4769 - struct alps_data *priv = psmouse->private;
4770 -- struct input_dev *dev;
4771 -+ struct input_dev *dev, *dev2 = NULL;
4772 -
4773 - /* Figure out which device to use to report the bare packet */
4774 - if (priv->proto_version == ALPS_PROTO_V2 &&
4775 - (priv->flags & ALPS_DUALPOINT)) {
4776 - /* On V2 devices the DualPoint Stick reports bare packets */
4777 - dev = priv->dev2;
4778 -+ dev2 = psmouse->dev;
4779 - } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
4780 - /* Register dev3 mouse if we received PS/2 packet first time */
4781 - if (!IS_ERR(priv->dev3))
4782 -@@ -1177,7 +1178,7 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
4783 - }
4784 -
4785 - if (report_buttons)
4786 -- alps_report_buttons(dev, NULL,
4787 -+ alps_report_buttons(dev, dev2,
4788 - packet[0] & 1, packet[0] & 2, packet[0] & 4);
4789 -
4790 - input_report_rel(dev, REL_X,
4791 -diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
4792 -index 6e22682..991dc6b 100644
4793 ---- a/drivers/input/mouse/elantech.c
4794 -+++ b/drivers/input/mouse/elantech.c
4795 -@@ -893,6 +893,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
4796 - }
4797 -
4798 - /*
4799 -+ * This writes the reg_07 value again to the hardware at the end of every
4800 -+ * set_rate call because the register loses its value. reg_07 allows setting
4801 -+ * absolute mode on v4 hardware
4802 -+ */
4803 -+static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
4804 -+ unsigned int rate)
4805 -+{
4806 -+ struct elantech_data *etd = psmouse->private;
4807 -+
4808 -+ etd->original_set_rate(psmouse, rate);
4809 -+ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
4810 -+ psmouse_err(psmouse, "restoring reg_07 failed\n");
4811 -+}
4812 -+
4813 -+/*
4814 - * Put the touchpad into absolute mode
4815 - */
4816 - static int elantech_set_absolute_mode(struct psmouse *psmouse)
4817 -@@ -1094,6 +1109,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
4818 - * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
4819 - * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
4820 - * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
4821 -+ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
4822 -+ * Asus X750JN 0x381f17 10, 14, 0e clickpad
4823 - * Asus UX31 0x361f00 20, 15, 0e clickpad
4824 - * Asus UX32VD 0x361f02 00, 15, 0e clickpad
4825 - * Avatar AVIU-145A2 0x361f00 ? clickpad
4826 -@@ -1635,6 +1652,11 @@ int elantech_init(struct psmouse *psmouse)
4827 - goto init_fail;
4828 - }
4829 -
4830 -+ if (etd->fw_version == 0x381f17) {
4831 -+ etd->original_set_rate = psmouse->set_rate;
4832 -+ psmouse->set_rate = elantech_set_rate_restore_reg_07;
4833 -+ }
4834 -+
4835 - if (elantech_set_input_params(psmouse)) {
4836 - psmouse_err(psmouse, "failed to query touchpad range.\n");
4837 - goto init_fail;
4838 -diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
4839 -index 6f3afec..f965d15 100644
4840 ---- a/drivers/input/mouse/elantech.h
4841 -+++ b/drivers/input/mouse/elantech.h
4842 -@@ -142,6 +142,7 @@ struct elantech_data {
4843 - struct finger_pos mt[ETP_MAX_FINGERS];
4844 - unsigned char parity[256];
4845 - int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
4846 -+ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
4847 - };
4848 -
4849 - #ifdef CONFIG_MOUSE_PS2_ELANTECH
4850 -diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
4851 -index 713a962..41473929 100644
4852 ---- a/drivers/md/dm-crypt.c
4853 -+++ b/drivers/md/dm-crypt.c
4854 -@@ -925,11 +925,10 @@ static int crypt_convert(struct crypt_config *cc,
4855 -
4856 - switch (r) {
4857 - /* async */
4858 -+ case -EINPROGRESS:
4859 - case -EBUSY:
4860 - wait_for_completion(&ctx->restart);
4861 - reinit_completion(&ctx->restart);
4862 -- /* fall through*/
4863 -- case -EINPROGRESS:
4864 - ctx->req = NULL;
4865 - ctx->cc_sector++;
4866 - continue;
4867 -@@ -1346,10 +1345,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
4868 - struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
4869 - struct crypt_config *cc = io->cc;
4870 -
4871 -- if (error == -EINPROGRESS) {
4872 -- complete(&ctx->restart);
4873 -+ if (error == -EINPROGRESS)
4874 - return;
4875 -- }
4876 -
4877 - if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
4878 - error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
4879 -@@ -1360,12 +1357,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
4880 - crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
4881 -
4882 - if (!atomic_dec_and_test(&ctx->cc_pending))
4883 -- return;
4884 -+ goto done;
4885 -
4886 - if (bio_data_dir(io->base_bio) == READ)
4887 - kcryptd_crypt_read_done(io);
4888 - else
4889 - kcryptd_crypt_write_io_submit(io, 1);
4890 -+done:
4891 -+ if (!completion_done(&ctx->restart))
4892 -+ complete(&ctx->restart);
4893 - }
4894 -
4895 - static void kcryptd_crypt(struct work_struct *work)
4896 -diff --git a/drivers/md/md.c b/drivers/md/md.c
4897 -index 717daad..e617878 100644
4898 ---- a/drivers/md/md.c
4899 -+++ b/drivers/md/md.c
4900 -@@ -249,6 +249,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
4901 - const int rw = bio_data_dir(bio);
4902 - struct mddev *mddev = q->queuedata;
4903 - unsigned int sectors;
4904 -+ int cpu;
4905 -
4906 - if (mddev == NULL || mddev->pers == NULL
4907 - || !mddev->ready) {
4908 -@@ -284,7 +285,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
4909 - sectors = bio_sectors(bio);
4910 - mddev->pers->make_request(mddev, bio);
4911 -
4912 -- generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
4913 -+ cpu = part_stat_lock();
4914 -+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
4915 -+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
4916 -+ part_stat_unlock();
4917 -
4918 - if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
4919 - wake_up(&mddev->sb_wait);
4920 -diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
4921 -index 3ed9f42..3b5d7f7 100644
4922 ---- a/drivers/md/raid0.c
4923 -+++ b/drivers/md/raid0.c
4924 -@@ -313,7 +313,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
4925 -
4926 - /*
4927 - * remaps the bio to the target device. we separate two flows.
4928 -- * power 2 flow and a general flow for the sake of perfromance
4929 -+ * power 2 flow and a general flow for the sake of performance
4930 - */
4931 - static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
4932 - sector_t sector, sector_t *sector_offset)
4933 -@@ -524,6 +524,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
4934 - split = bio;
4935 - }
4936 -
4937 -+ sector = bio->bi_iter.bi_sector;
4938 - zone = find_zone(mddev->private, &sector);
4939 - tmp_dev = map_sector(mddev, zone, sector, &sector);
4940 - split->bi_bdev = tmp_dev->bdev;
4941 -diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
4942 -index 77c78de..7020659 100644
4943 ---- a/drivers/media/rc/img-ir/img-ir-core.c
4944 -+++ b/drivers/media/rc/img-ir/img-ir-core.c
4945 -@@ -146,7 +146,7 @@ static int img_ir_remove(struct platform_device *pdev)
4946 - {
4947 - struct img_ir_priv *priv = platform_get_drvdata(pdev);
4948 -
4949 -- free_irq(priv->irq, img_ir_isr);
4950 -+ free_irq(priv->irq, priv);
4951 - img_ir_remove_hw(priv);
4952 - img_ir_remove_raw(priv);
4953 -
4954 -diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
4955 -index 65a326c..749ad56 100644
4956 ---- a/drivers/media/usb/stk1160/stk1160-v4l.c
4957 -+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
4958 -@@ -240,6 +240,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
4959 - if (mutex_lock_interruptible(&dev->v4l_lock))
4960 - return -ERESTARTSYS;
4961 -
4962 -+ /*
4963 -+ * Once URBs are cancelled, the URB complete handler
4964 -+ * won't be running. This is required to safely release the
4965 -+ * current buffer (dev->isoc_ctl.buf).
4966 -+ */
4967 - stk1160_cancel_isoc(dev);
4968 -
4969 - /*
4970 -@@ -620,8 +625,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
4971 - stk1160_info("buffer [%p/%d] aborted\n",
4972 - buf, buf->vb.v4l2_buf.index);
4973 - }
4974 -- /* It's important to clear current buffer */
4975 -- dev->isoc_ctl.buf = NULL;
4976 -+
4977 -+ /* It's important to release the current buffer */
4978 -+ if (dev->isoc_ctl.buf) {
4979 -+ buf = dev->isoc_ctl.buf;
4980 -+ dev->isoc_ctl.buf = NULL;
4981 -+
4982 -+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
4983 -+ stk1160_info("buffer [%p/%d] aborted\n",
4984 -+ buf, buf->vb.v4l2_buf.index);
4985 -+ }
4986 - spin_unlock_irqrestore(&dev->buf_lock, flags);
4987 - }
4988 -
4989 -diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
4990 -index fc145d2..922a750 100644
4991 ---- a/drivers/memstick/core/mspro_block.c
4992 -+++ b/drivers/memstick/core/mspro_block.c
4993 -@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
4994 -
4995 - if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
4996 - if (msb->data_dir == READ) {
4997 -- for (cnt = 0; cnt < msb->current_seg; cnt++)
4998 -+ for (cnt = 0; cnt < msb->current_seg; cnt++) {
4999 - t_len += msb->req_sg[cnt].length
5000 - / msb->page_size;
5001 -
5002 -@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
5003 - t_len += msb->current_page - 1;
5004 -
5005 - t_len *= msb->page_size;
5006 -+ }
5007 - }
5008 - } else
5009 - t_len = blk_rq_bytes(msb->block_req);
5010 -diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
5011 -index 2a87f69..1aed3b7 100644
5012 ---- a/drivers/mfd/mfd-core.c
5013 -+++ b/drivers/mfd/mfd-core.c
5014 -@@ -128,7 +128,7 @@ static int mfd_add_device(struct device *parent, int id,
5015 - int platform_id;
5016 - int r;
5017 -
5018 -- if (id < 0)
5019 -+ if (id == PLATFORM_DEVID_AUTO)
5020 - platform_id = id;
5021 - else
5022 - platform_id = id + cell->id;
5023 -diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
5024 -index e8a4218..459ed1b 100644
5025 ---- a/drivers/mmc/host/sunxi-mmc.c
5026 -+++ b/drivers/mmc/host/sunxi-mmc.c
5027 -@@ -930,7 +930,9 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
5028 - return PTR_ERR(host->clk_sample);
5029 - }
5030 -
5031 -- host->reset = devm_reset_control_get(&pdev->dev, "ahb");
5032 -+ host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
5033 -+ if (PTR_ERR(host->reset) == -EPROBE_DEFER)
5034 -+ return PTR_ERR(host->reset);
5035 -
5036 - ret = clk_prepare_enable(host->clk_ahb);
5037 - if (ret) {
5038 -diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
5039 -index a31c357..dba7e1c 100644
5040 ---- a/drivers/mmc/host/tmio_mmc_pio.c
5041 -+++ b/drivers/mmc/host/tmio_mmc_pio.c
5042 -@@ -1073,8 +1073,6 @@ EXPORT_SYMBOL(tmio_mmc_host_alloc);
5043 - void tmio_mmc_host_free(struct tmio_mmc_host *host)
5044 - {
5045 - mmc_free_host(host->mmc);
5046 --
5047 -- host->mmc = NULL;
5048 - }
5049 - EXPORT_SYMBOL(tmio_mmc_host_free);
5050 -
5051 -diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
5052 -index 9d2e16f..b5e1548 100644
5053 ---- a/drivers/mtd/ubi/attach.c
5054 -+++ b/drivers/mtd/ubi/attach.c
5055 -@@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
5056 - second_is_newer = !second_is_newer;
5057 - } else {
5058 - dbg_bld("PEB %d CRC is OK", pnum);
5059 -- bitflips = !!err;
5060 -+ bitflips |= !!err;
5061 - }
5062 - mutex_unlock(&ubi->buf_mutex);
5063 -
5064 -diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
5065 -index d647e50..d16fccf 100644
5066 ---- a/drivers/mtd/ubi/cdev.c
5067 -+++ b/drivers/mtd/ubi/cdev.c
5068 -@@ -455,7 +455,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
5069 - /* Validate the request */
5070 - err = -EINVAL;
5071 - if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
5072 -- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
5073 -+ req.bytes < 0 || req.bytes > vol->usable_leb_size)
5074 - break;
5075 -
5076 - err = get_exclusive(desc);
5077 -diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
5078 -index 16e34b3..8c9a710 100644
5079 ---- a/drivers/mtd/ubi/eba.c
5080 -+++ b/drivers/mtd/ubi/eba.c
5081 -@@ -1419,7 +1419,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
5082 - * during re-size.
5083 - */
5084 - ubi_move_aeb_to_list(av, aeb, &ai->erase);
5085 -- vol->eba_tbl[aeb->lnum] = aeb->pnum;
5086 -+ else
5087 -+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
5088 - }
5089 - }
5090 -
5091 -diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
5092 -index 8f7bde6..0bd92d8 100644
5093 ---- a/drivers/mtd/ubi/wl.c
5094 -+++ b/drivers/mtd/ubi/wl.c
5095 -@@ -1002,7 +1002,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
5096 - int shutdown)
5097 - {
5098 - int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
5099 -- int vol_id = -1, uninitialized_var(lnum);
5100 -+ int vol_id = -1, lnum = -1;
5101 - #ifdef CONFIG_MTD_UBI_FASTMAP
5102 - int anchor = wrk->anchor;
5103 - #endif
5104 -diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
5105 -index 81d4153..77bf133 100644
5106 ---- a/drivers/net/ethernet/cadence/macb.c
5107 -+++ b/drivers/net/ethernet/cadence/macb.c
5108 -@@ -2165,7 +2165,7 @@ static void macb_configure_caps(struct macb *bp)
5109 - }
5110 - }
5111 -
5112 -- if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
5113 -+ if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) >= 0x2)
5114 - bp->caps |= MACB_CAPS_MACB_IS_GEM;
5115 -
5116 - if (macb_is_gem(bp)) {
5117 -diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
5118 -index 7f997d3..a71c446 100644
5119 ---- a/drivers/net/ethernet/intel/e1000/e1000_main.c
5120 -+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
5121 -@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
5122 - static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
5123 - struct e1000_rx_ring *rx_ring,
5124 - int *work_done, int work_to_do);
5125 -+static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
5126 -+ struct e1000_rx_ring *rx_ring,
5127 -+ int cleaned_count)
5128 -+{
5129 -+}
5130 - static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
5131 - struct e1000_rx_ring *rx_ring,
5132 - int cleaned_count);
5133 -@@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5134 - msleep(1);
5135 - /* e1000_down has a dependency on max_frame_size */
5136 - hw->max_frame_size = max_frame;
5137 -- if (netif_running(netdev))
5138 -+ if (netif_running(netdev)) {
5139 -+ /* prevent buffers from being reallocated */
5140 -+ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
5141 - e1000_down(adapter);
5142 -+ }
5143 -
5144 - /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5145 - * means we reserve 2 more, this pushes us to allocate from the next
5146 -diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
5147 -index af829c5..7ace07d 100644
5148 ---- a/drivers/net/ethernet/marvell/pxa168_eth.c
5149 -+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
5150 -@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
5151 - np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
5152 - if (!np) {
5153 - dev_err(&pdev->dev, "missing phy-handle\n");
5154 -- return -EINVAL;
5155 -+ err = -EINVAL;
5156 -+ goto err_netdev;
5157 - }
5158 - of_property_read_u32(np, "reg", &pep->phy_addr);
5159 - pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
5160 -@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
5161 - pep->smi_bus = mdiobus_alloc();
5162 - if (pep->smi_bus == NULL) {
5163 - err = -ENOMEM;
5164 -- goto err_base;
5165 -+ goto err_netdev;
5166 - }
5167 - pep->smi_bus->priv = pep;
5168 - pep->smi_bus->name = "pxa168_eth smi";
5169 -@@ -1551,13 +1552,10 @@ err_mdiobus:
5170 - mdiobus_unregister(pep->smi_bus);
5171 - err_free_mdio:
5172 - mdiobus_free(pep->smi_bus);
5173 --err_base:
5174 -- iounmap(pep->base);
5175 - err_netdev:
5176 - free_netdev(dev);
5177 - err_clk:
5178 -- clk_disable(clk);
5179 -- clk_put(clk);
5180 -+ clk_disable_unprepare(clk);
5181 - return err;
5182 - }
5183 -
5184 -@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
5185 - if (pep->phy)
5186 - phy_disconnect(pep->phy);
5187 - if (pep->clk) {
5188 -- clk_disable(pep->clk);
5189 -- clk_put(pep->clk);
5190 -- pep->clk = NULL;
5191 -+ clk_disable_unprepare(pep->clk);
5192 - }
5193 -
5194 -- iounmap(pep->base);
5195 -- pep->base = NULL;
5196 - mdiobus_unregister(pep->smi_bus);
5197 - mdiobus_free(pep->smi_bus);
5198 - unregister_netdev(dev);
5199 -diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
5200 -index a7b58ba..3dccf01 100644
5201 ---- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
5202 -+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
5203 -@@ -981,20 +981,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
5204 - struct mlx4_en_priv *priv = netdev_priv(dev);
5205 -
5206 - /* check if requested function is supported by the device */
5207 -- if ((hfunc == ETH_RSS_HASH_TOP &&
5208 -- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
5209 -- (hfunc == ETH_RSS_HASH_XOR &&
5210 -- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
5211 -- return -EINVAL;
5212 -+ if (hfunc == ETH_RSS_HASH_TOP) {
5213 -+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
5214 -+ return -EINVAL;
5215 -+ if (!(dev->features & NETIF_F_RXHASH))
5216 -+ en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
5217 -+ return 0;
5218 -+ } else if (hfunc == ETH_RSS_HASH_XOR) {
5219 -+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
5220 -+ return -EINVAL;
5221 -+ if (dev->features & NETIF_F_RXHASH)
5222 -+ en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
5223 -+ return 0;
5224 -+ }
5225 -
5226 -- priv->rss_hash_fn = hfunc;
5227 -- if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
5228 -- en_warn(priv,
5229 -- "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
5230 -- if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
5231 -- en_warn(priv,
5232 -- "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
5233 -- return 0;
5234 -+ return -EINVAL;
5235 - }
5236 -
5237 - static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
5238 -@@ -1068,6 +1069,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
5239 - priv->prof->rss_rings = rss_rings;
5240 - if (key)
5241 - memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
5242 -+ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
5243 -+ priv->rss_hash_fn = hfunc;
5244 -
5245 - if (port_up) {
5246 - err = mlx4_en_start_port(dev);
5247 -diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
5248 -index af034db..9d15566 100644
5249 ---- a/drivers/net/ppp/ppp_generic.c
5250 -+++ b/drivers/net/ppp/ppp_generic.c
5251 -@@ -1716,6 +1716,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
5252 - {
5253 - /* note: a 0-length skb is used as an error indication */
5254 - if (skb->len > 0) {
5255 -+ skb_checksum_complete_unset(skb);
5256 - #ifdef CONFIG_PPP_MULTILINK
5257 - /* XXX do channel-level decompression here */
5258 - if (PPP_PROTO(skb) == PPP_MP)
5259 -diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
5260 -index 90a714c..23806c2 100644
5261 ---- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
5262 -+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
5263 -@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
5264 - {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
5265 - {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
5266 - {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
5267 -+ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
5268 - {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
5269 - {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
5270 - {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
5271 -@@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
5272 - {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
5273 - {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
5274 - {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
5275 -+ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
5276 - {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
5277 - {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
5278 - {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
5279 -diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
5280 -index c93fae9..5fbd223 100644
5281 ---- a/drivers/net/wireless/ti/wl18xx/debugfs.c
5282 -+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
5283 -@@ -139,7 +139,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
5284 - WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
5285 - WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
5286 -
5287 --WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
5288 -+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
5289 -
5290 - WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
5291 - AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
5292 -diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
5293 -index 0f2cfb0..bf14676 100644
5294 ---- a/drivers/net/wireless/ti/wlcore/debugfs.h
5295 -+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
5296 -@@ -26,8 +26,8 @@
5297 -
5298 - #include "wlcore.h"
5299 -
5300 --int wl1271_format_buffer(char __user *userbuf, size_t count,
5301 -- loff_t *ppos, char *fmt, ...);
5302 -+__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
5303 -+ loff_t *ppos, char *fmt, ...);
5304 -
5305 - int wl1271_debugfs_init(struct wl1271 *wl);
5306 - void wl1271_debugfs_exit(struct wl1271 *wl);
5307 -diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
5308 -index eb88693..7b53a5c 100644
5309 ---- a/drivers/nfc/st21nfcb/i2c.c
5310 -+++ b/drivers/nfc/st21nfcb/i2c.c
5311 -@@ -109,7 +109,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
5312 - return phy->ndlc->hard_fault;
5313 -
5314 - r = i2c_master_send(client, skb->data, skb->len);
5315 -- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
5316 -+ if (r < 0) { /* Retry, chip was in standby */
5317 - usleep_range(1000, 4000);
5318 - r = i2c_master_send(client, skb->data, skb->len);
5319 - }
5320 -@@ -148,7 +148,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
5321 - struct i2c_client *client = phy->i2c_dev;
5322 -
5323 - r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
5324 -- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
5325 -+ if (r < 0) { /* Retry, chip was in standby */
5326 - usleep_range(1000, 4000);
5327 - r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
5328 - }
5329 -diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
5330 -index 15c0fab..bceb30b 100644
5331 ---- a/drivers/platform/x86/compal-laptop.c
5332 -+++ b/drivers/platform/x86/compal-laptop.c
5333 -@@ -1026,9 +1026,9 @@ static int compal_probe(struct platform_device *pdev)
5334 - if (err)
5335 - return err;
5336 -
5337 -- hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
5338 -- "compal", data,
5339 -- compal_hwmon_groups);
5340 -+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
5341 -+ "compal", data,
5342 -+ compal_hwmon_groups);
5343 - if (IS_ERR(hwmon_dev)) {
5344 - err = PTR_ERR(hwmon_dev);
5345 - goto remove;
5346 -@@ -1036,7 +1036,9 @@ static int compal_probe(struct platform_device *pdev)
5347 -
5348 - /* Power supply */
5349 - initialize_power_supply_data(data);
5350 -- power_supply_register(&compal_device->dev, &data->psy);
5351 -+ err = power_supply_register(&compal_device->dev, &data->psy);
5352 -+ if (err < 0)
5353 -+ goto remove;
5354 -
5355 - platform_set_drvdata(pdev, data);
5356 -
5357 -diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c
5358 -index 9d69460..96b15e0 100644
5359 ---- a/drivers/power/ipaq_micro_battery.c
5360 -+++ b/drivers/power/ipaq_micro_battery.c
5361 -@@ -226,6 +226,7 @@ static struct power_supply micro_ac_power = {
5362 - static int micro_batt_probe(struct platform_device *pdev)
5363 - {
5364 - struct micro_battery *mb;
5365 -+ int ret;
5366 -
5367 - mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
5368 - if (!mb)
5369 -@@ -233,14 +234,30 @@ static int micro_batt_probe(struct platform_device *pdev)
5370 -
5371 - mb->micro = dev_get_drvdata(pdev->dev.parent);
5372 - mb->wq = create_singlethread_workqueue("ipaq-battery-wq");
5373 -+ if (!mb->wq)
5374 -+ return -ENOMEM;
5375 -+
5376 - INIT_DELAYED_WORK(&mb->update, micro_battery_work);
5377 - platform_set_drvdata(pdev, mb);
5378 - queue_delayed_work(mb->wq, &mb->update, 1);
5379 -- power_supply_register(&pdev->dev, &micro_batt_power);
5380 -- power_supply_register(&pdev->dev, &micro_ac_power);
5381 -+
5382 -+ ret = power_supply_register(&pdev->dev, &micro_batt_power);
5383 -+ if (ret < 0)
5384 -+ goto batt_err;
5385 -+
5386 -+ ret = power_supply_register(&pdev->dev, &micro_ac_power);
5387 -+ if (ret < 0)
5388 -+ goto ac_err;
5389 -
5390 - dev_info(&pdev->dev, "iPAQ micro battery driver\n");
5391 - return 0;
5392 -+
5393 -+ac_err:
5394 -+ power_supply_unregister(&micro_ac_power);
5395 -+batt_err:
5396 -+ cancel_delayed_work_sync(&mb->update);
5397 -+ destroy_workqueue(mb->wq);
5398 -+ return ret;
5399 - }
5400 -
5401 - static int micro_batt_remove(struct platform_device *pdev)
5402 -@@ -251,6 +268,7 @@ static int micro_batt_remove(struct platform_device *pdev)
5403 - power_supply_unregister(&micro_ac_power);
5404 - power_supply_unregister(&micro_batt_power);
5405 - cancel_delayed_work_sync(&mb->update);
5406 -+ destroy_workqueue(mb->wq);
5407 -
5408 - return 0;
5409 - }
5410 -diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
5411 -index 21fc233..176dab2 100644
5412 ---- a/drivers/power/lp8788-charger.c
5413 -+++ b/drivers/power/lp8788-charger.c
5414 -@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
5415 - pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
5416 - pchg->battery.get_property = lp8788_battery_get_property;
5417 -
5418 -- if (power_supply_register(&pdev->dev, &pchg->battery))
5419 -+ if (power_supply_register(&pdev->dev, &pchg->battery)) {
5420 -+ power_supply_unregister(&pchg->charger);
5421 - return -EPERM;
5422 -+ }
5423 -
5424 - return 0;
5425 - }
5426 -diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
5427 -index 7ef445a..cf90760 100644
5428 ---- a/drivers/power/twl4030_madc_battery.c
5429 -+++ b/drivers/power/twl4030_madc_battery.c
5430 -@@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
5431 - {
5432 - struct twl4030_madc_battery *twl4030_madc_bat;
5433 - struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
5434 -+ int ret = 0;
5435 -
5436 - twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
5437 - if (!twl4030_madc_bat)
5438 -@@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
5439 -
5440 - twl4030_madc_bat->pdata = pdata;
5441 - platform_set_drvdata(pdev, twl4030_madc_bat);
5442 -- power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
5443 -+ ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
5444 -+ if (ret < 0)
5445 -+ kfree(twl4030_madc_bat);
5446 -
5447 -- return 0;
5448 -+ return ret;
5449 - }
5450 -
5451 - static int twl4030_madc_battery_remove(struct platform_device *pdev)
5452 -diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
5453 -index 675b5e7..5a0800d 100644
5454 ---- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
5455 -+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
5456 -@@ -1584,11 +1584,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
5457 - fp_possible = io_info.fpOkForIo;
5458 - }
5459 -
5460 -- /* Use smp_processor_id() for now until cmd->request->cpu is CPU
5461 -+ /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
5462 - id by default, not CPU group id, otherwise all MSI-X queues won't
5463 - be utilized */
5464 - cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
5465 -- smp_processor_id() % instance->msix_vectors : 0;
5466 -+ raw_smp_processor_id() % instance->msix_vectors : 0;
5467 -
5468 - if (fp_possible) {
5469 - megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
5470 -@@ -1693,7 +1693,10 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
5471 - << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
5472 - cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
5473 - cmd->request_desc->SCSIIO.MSIxIndex =
5474 -- instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
5475 -+ instance->msix_vectors ?
5476 -+ raw_smp_processor_id() %
5477 -+ instance->msix_vectors :
5478 -+ 0;
5479 - os_timeout_value = scmd->request->timeout / HZ;
5480 -
5481 - if (instance->secure_jbod_support &&
5482 -diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
5483 -index 2d5ab6d..454536c 100644
5484 ---- a/drivers/scsi/mvsas/mv_sas.c
5485 -+++ b/drivers/scsi/mvsas/mv_sas.c
5486 -@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
5487 - static int mvs_task_prep_ata(struct mvs_info *mvi,
5488 - struct mvs_task_exec_info *tei)
5489 - {
5490 -- struct sas_ha_struct *sha = mvi->sas;
5491 - struct sas_task *task = tei->task;
5492 - struct domain_device *dev = task->dev;
5493 - struct mvs_device *mvi_dev = dev->lldd_dev;
5494 - struct mvs_cmd_hdr *hdr = tei->hdr;
5495 - struct asd_sas_port *sas_port = dev->port;
5496 -- struct sas_phy *sphy = dev->phy;
5497 -- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
5498 - struct mvs_slot_info *slot;
5499 - void *buf_prd;
5500 - u32 tag = tei->tag, hdr_tag;
5501 -@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
5502 - slot->tx = mvi->tx_prod;
5503 - del_q = TXQ_MODE_I | tag |
5504 - (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
5505 -- (MVS_PHY_ID << TXQ_PHY_SHIFT) |
5506 -+ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
5507 - (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
5508 - mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
5509 -
5510 -diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
5511 -index 6b78476..3290a3e 100644
5512 ---- a/drivers/scsi/sd.c
5513 -+++ b/drivers/scsi/sd.c
5514 -@@ -3100,6 +3100,7 @@ static void scsi_disk_release(struct device *dev)
5515 - ida_remove(&sd_index_ida, sdkp->index);
5516 - spin_unlock(&sd_index_lock);
5517 -
5518 -+ blk_integrity_unregister(disk);
5519 - disk->private_data = NULL;
5520 - put_disk(disk);
5521 - put_device(&sdkp->device->sdev_gendev);
5522 -diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
5523 -index 14c7d42..5c06d29 100644
5524 ---- a/drivers/scsi/sd_dif.c
5525 -+++ b/drivers/scsi/sd_dif.c
5526 -@@ -77,7 +77,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
5527 -
5528 - disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
5529 -
5530 -- if (!sdkp)
5531 -+ if (!sdkp->ATO)
5532 - return;
5533 -
5534 - if (type == SD_DIF_TYPE3_PROTECTION)
5535 -diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
5536 -index efc6e44..bf8c5c1 100644
5537 ---- a/drivers/scsi/storvsc_drv.c
5538 -+++ b/drivers/scsi/storvsc_drv.c
5539 -@@ -746,21 +746,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
5540 - if (bounce_sgl[j].length == PAGE_SIZE) {
5541 - /* full..move to next entry */
5542 - sg_kunmap_atomic(bounce_addr);
5543 -+ bounce_addr = 0;
5544 - j++;
5545 -+ }
5546 -
5547 -- /* if we need to use another bounce buffer */
5548 -- if (srclen || i != orig_sgl_count - 1)
5549 -- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
5550 -+ /* if we need to use another bounce buffer */
5551 -+ if (srclen && bounce_addr == 0)
5552 -+ bounce_addr = sg_kmap_atomic(bounce_sgl, j);
5553 -
5554 -- } else if (srclen == 0 && i == orig_sgl_count - 1) {
5555 -- /* unmap the last bounce that is < PAGE_SIZE */
5556 -- sg_kunmap_atomic(bounce_addr);
5557 -- }
5558 - }
5559 -
5560 - sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
5561 - }
5562 -
5563 -+ if (bounce_addr)
5564 -+ sg_kunmap_atomic(bounce_addr);
5565 -+
5566 - local_irq_restore(flags);
5567 -
5568 - return total_copied;
5569 -diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
5570 -index 6fea4af..aea3a67 100644
5571 ---- a/drivers/spi/spi-imx.c
5572 -+++ b/drivers/spi/spi-imx.c
5573 -@@ -370,8 +370,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
5574 - if (spi_imx->dma_is_inited) {
5575 - dma = readl(spi_imx->base + MX51_ECSPI_DMA);
5576 -
5577 -- spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
5578 -- spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
5579 - spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
5580 - rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
5581 - tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
5582 -@@ -868,6 +866,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
5583 - master->max_dma_len = MAX_SDMA_BD_BYTES;
5584 - spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
5585 - SPI_MASTER_MUST_TX;
5586 -+ spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
5587 -+ spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
5588 - spi_imx->dma_is_inited = 1;
5589 -
5590 - return 0;
5591 -diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
5592 -index 4eb7a98..7bf5186 100644
5593 ---- a/drivers/spi/spidev.c
5594 -+++ b/drivers/spi/spidev.c
5595 -@@ -245,7 +245,10 @@ static int spidev_message(struct spidev_data *spidev,
5596 - k_tmp->len = u_tmp->len;
5597 -
5598 - total += k_tmp->len;
5599 -- if (total > bufsiz) {
5600 -+ /* Check total length of transfers. Also check each
5601 -+ * transfer length to avoid arithmetic overflow.
5602 -+ */
5603 -+ if (total > bufsiz || k_tmp->len > bufsiz) {
5604 - status = -EMSGSIZE;
5605 - goto done;
5606 - }
5607 -diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
5608 -index 7bdb62b..f83e00c 100644
5609 ---- a/drivers/staging/android/sync.c
5610 -+++ b/drivers/staging/android/sync.c
5611 -@@ -114,7 +114,7 @@ void sync_timeline_signal(struct sync_timeline *obj)
5612 - list_for_each_entry_safe(pt, next, &obj->active_list_head,
5613 - active_list) {
5614 - if (fence_is_signaled_locked(&pt->base))
5615 -- list_del(&pt->active_list);
5616 -+ list_del_init(&pt->active_list);
5617 - }
5618 -
5619 - spin_unlock_irqrestore(&obj->child_list_lock, flags);
5620 -diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
5621 -index 6ed35b6..04fc217 100644
5622 ---- a/drivers/staging/panel/panel.c
5623 -+++ b/drivers/staging/panel/panel.c
5624 -@@ -335,11 +335,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
5625 - * LCD types
5626 - */
5627 - #define LCD_TYPE_NONE 0
5628 --#define LCD_TYPE_OLD 1
5629 --#define LCD_TYPE_KS0074 2
5630 --#define LCD_TYPE_HANTRONIX 3
5631 --#define LCD_TYPE_NEXCOM 4
5632 --#define LCD_TYPE_CUSTOM 5
5633 -+#define LCD_TYPE_CUSTOM 1
5634 -+#define LCD_TYPE_OLD 2
5635 -+#define LCD_TYPE_KS0074 3
5636 -+#define LCD_TYPE_HANTRONIX 4
5637 -+#define LCD_TYPE_NEXCOM 5
5638 -
5639 - /*
5640 - * keypad types
5641 -@@ -502,7 +502,7 @@ MODULE_PARM_DESC(keypad_type,
5642 - static int lcd_type = NOT_SET;
5643 - module_param(lcd_type, int, 0000);
5644 - MODULE_PARM_DESC(lcd_type,
5645 -- "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
5646 -+ "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
5647 -
5648 - static int lcd_height = NOT_SET;
5649 - module_param(lcd_height, int, 0000);
5650 -diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
5651 -index 07ce3fd..fdf5c56 100644
5652 ---- a/drivers/staging/vt6655/rxtx.c
5653 -+++ b/drivers/staging/vt6655/rxtx.c
5654 -@@ -1308,10 +1308,18 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
5655 - priv->hw->conf.chandef.chan->hw_value);
5656 - }
5657 -
5658 -- if (current_rate > RATE_11M)
5659 -- pkt_type = (u8)priv->byPacketType;
5660 -- else
5661 -+ if (current_rate > RATE_11M) {
5662 -+ if (info->band == IEEE80211_BAND_5GHZ) {
5663 -+ pkt_type = PK_TYPE_11A;
5664 -+ } else {
5665 -+ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
5666 -+ pkt_type = PK_TYPE_11GB;
5667 -+ else
5668 -+ pkt_type = PK_TYPE_11GA;
5669 -+ }
5670 -+ } else {
5671 - pkt_type = PK_TYPE_11B;
5672 -+ }
5673 -
5674 - /*Set fifo controls */
5675 - if (pkt_type == PK_TYPE_11A)
5676 -diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
5677 -index 77d6425..5e35612 100644
5678 ---- a/drivers/target/iscsi/iscsi_target.c
5679 -+++ b/drivers/target/iscsi/iscsi_target.c
5680 -@@ -537,7 +537,7 @@ static struct iscsit_transport iscsi_target_transport = {
5681 -
5682 - static int __init iscsi_target_init_module(void)
5683 - {
5684 -- int ret = 0;
5685 -+ int ret = 0, size;
5686 -
5687 - pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
5688 -
5689 -@@ -546,6 +546,7 @@ static int __init iscsi_target_init_module(void)
5690 - pr_err("Unable to allocate memory for iscsit_global\n");
5691 - return -1;
5692 - }
5693 -+ spin_lock_init(&iscsit_global->ts_bitmap_lock);
5694 - mutex_init(&auth_id_lock);
5695 - spin_lock_init(&sess_idr_lock);
5696 - idr_init(&tiqn_idr);
5697 -@@ -555,15 +556,11 @@ static int __init iscsi_target_init_module(void)
5698 - if (ret < 0)
5699 - goto out;
5700 -
5701 -- ret = iscsi_thread_set_init();
5702 -- if (ret < 0)
5703 -+ size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
5704 -+ iscsit_global->ts_bitmap = vzalloc(size);
5705 -+ if (!iscsit_global->ts_bitmap) {
5706 -+ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
5707 - goto configfs_out;
5708 --
5709 -- if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
5710 -- TARGET_THREAD_SET_COUNT) {
5711 -- pr_err("iscsi_allocate_thread_sets() returned"
5712 -- " unexpected value!\n");
5713 -- goto ts_out1;
5714 - }
5715 -
5716 - lio_qr_cache = kmem_cache_create("lio_qr_cache",
5717 -@@ -572,7 +569,7 @@ static int __init iscsi_target_init_module(void)
5718 - if (!lio_qr_cache) {
5719 - pr_err("nable to kmem_cache_create() for"
5720 - " lio_qr_cache\n");
5721 -- goto ts_out2;
5722 -+ goto bitmap_out;
5723 - }
5724 -
5725 - lio_dr_cache = kmem_cache_create("lio_dr_cache",
5726 -@@ -617,10 +614,8 @@ dr_out:
5727 - kmem_cache_destroy(lio_dr_cache);
5728 - qr_out:
5729 - kmem_cache_destroy(lio_qr_cache);
5730 --ts_out2:
5731 -- iscsi_deallocate_thread_sets();
5732 --ts_out1:
5733 -- iscsi_thread_set_free();
5734 -+bitmap_out:
5735 -+ vfree(iscsit_global->ts_bitmap);
5736 - configfs_out:
5737 - iscsi_target_deregister_configfs();
5738 - out:
5739 -@@ -630,8 +625,6 @@ out:
5740 -
5741 - static void __exit iscsi_target_cleanup_module(void)
5742 - {
5743 -- iscsi_deallocate_thread_sets();
5744 -- iscsi_thread_set_free();
5745 - iscsit_release_discovery_tpg();
5746 - iscsit_unregister_transport(&iscsi_target_transport);
5747 - kmem_cache_destroy(lio_qr_cache);
5748 -@@ -641,6 +634,7 @@ static void __exit iscsi_target_cleanup_module(void)
5749 -
5750 - iscsi_target_deregister_configfs();
5751 -
5752 -+ vfree(iscsit_global->ts_bitmap);
5753 - kfree(iscsit_global);
5754 - }
5755 -
5756 -@@ -3715,17 +3709,16 @@ static int iscsit_send_reject(
5757 -
5758 - void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
5759 - {
5760 -- struct iscsi_thread_set *ts = conn->thread_set;
5761 - int ord, cpu;
5762 - /*
5763 -- * thread_id is assigned from iscsit_global->ts_bitmap from
5764 -- * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
5765 -+ * bitmap_id is assigned from iscsit_global->ts_bitmap from
5766 -+ * within iscsit_start_kthreads()
5767 - *
5768 -- * Here we use thread_id to determine which CPU that this
5769 -- * iSCSI connection's iscsi_thread_set will be scheduled to
5770 -+ * Here we use bitmap_id to determine which CPU that this
5771 -+ * iSCSI connection's RX/TX threads will be scheduled to
5772 - * execute upon.
5773 - */
5774 -- ord = ts->thread_id % cpumask_weight(cpu_online_mask);
5775 -+ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
5776 - for_each_online_cpu(cpu) {
5777 - if (ord-- == 0) {
5778 - cpumask_set_cpu(cpu, conn->conn_cpumask);
5779 -@@ -3914,7 +3907,7 @@ check_rsp_state:
5780 - switch (state) {
5781 - case ISTATE_SEND_LOGOUTRSP:
5782 - if (!iscsit_logout_post_handler(cmd, conn))
5783 -- goto restart;
5784 -+ return -ECONNRESET;
5785 - /* fall through */
5786 - case ISTATE_SEND_STATUS:
5787 - case ISTATE_SEND_ASYNCMSG:
5788 -@@ -3942,8 +3935,6 @@ check_rsp_state:
5789 -
5790 - err:
5791 - return -1;
5792 --restart:
5793 -- return -EAGAIN;
5794 - }
5795 -
5796 - static int iscsit_handle_response_queue(struct iscsi_conn *conn)
5797 -@@ -3970,21 +3961,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
5798 - int iscsi_target_tx_thread(void *arg)
5799 - {
5800 - int ret = 0;
5801 -- struct iscsi_conn *conn;
5802 -- struct iscsi_thread_set *ts = arg;
5803 -+ struct iscsi_conn *conn = arg;
5804 - /*
5805 - * Allow ourselves to be interrupted by SIGINT so that a
5806 - * connection recovery / failure event can be triggered externally.
5807 - */
5808 - allow_signal(SIGINT);
5809 -
5810 --restart:
5811 -- conn = iscsi_tx_thread_pre_handler(ts);
5812 -- if (!conn)
5813 -- goto out;
5814 --
5815 -- ret = 0;
5816 --
5817 - while (!kthread_should_stop()) {
5818 - /*
5819 - * Ensure that both TX and RX per connection kthreads
5820 -@@ -3993,11 +3976,9 @@ restart:
5821 - iscsit_thread_check_cpumask(conn, current, 1);
5822 -
5823 - wait_event_interruptible(conn->queues_wq,
5824 -- !iscsit_conn_all_queues_empty(conn) ||
5825 -- ts->status == ISCSI_THREAD_SET_RESET);
5826 -+ !iscsit_conn_all_queues_empty(conn));
5827 -
5828 -- if ((ts->status == ISCSI_THREAD_SET_RESET) ||
5829 -- signal_pending(current))
5830 -+ if (signal_pending(current))
5831 - goto transport_err;
5832 -
5833 - get_immediate:
5834 -@@ -4008,15 +3989,14 @@ get_immediate:
5835 - ret = iscsit_handle_response_queue(conn);
5836 - if (ret == 1)
5837 - goto get_immediate;
5838 -- else if (ret == -EAGAIN)
5839 -- goto restart;
5840 -+ else if (ret == -ECONNRESET)
5841 -+ goto out;
5842 - else if (ret < 0)
5843 - goto transport_err;
5844 - }
5845 -
5846 - transport_err:
5847 - iscsit_take_action_for_connection_exit(conn);
5848 -- goto restart;
5849 - out:
5850 - return 0;
5851 - }
5852 -@@ -4111,8 +4091,7 @@ int iscsi_target_rx_thread(void *arg)
5853 - int ret;
5854 - u8 buffer[ISCSI_HDR_LEN], opcode;
5855 - u32 checksum = 0, digest = 0;
5856 -- struct iscsi_conn *conn = NULL;
5857 -- struct iscsi_thread_set *ts = arg;
5858 -+ struct iscsi_conn *conn = arg;
5859 - struct kvec iov;
5860 - /*
5861 - * Allow ourselves to be interrupted by SIGINT so that a
5862 -@@ -4120,11 +4099,6 @@ int iscsi_target_rx_thread(void *arg)
5863 - */
5864 - allow_signal(SIGINT);
5865 -
5866 --restart:
5867 -- conn = iscsi_rx_thread_pre_handler(ts);
5868 -- if (!conn)
5869 -- goto out;
5870 --
5871 - if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
5872 - struct completion comp;
5873 - int rc;
5874 -@@ -4134,7 +4108,7 @@ restart:
5875 - if (rc < 0)
5876 - goto transport_err;
5877 -
5878 -- goto out;
5879 -+ goto transport_err;
5880 - }
5881 -
5882 - while (!kthread_should_stop()) {
5883 -@@ -4210,8 +4184,6 @@ transport_err:
5884 - if (!signal_pending(current))
5885 - atomic_set(&conn->transport_failed, 1);
5886 - iscsit_take_action_for_connection_exit(conn);
5887 -- goto restart;
5888 --out:
5889 - return 0;
5890 - }
5891 -
5892 -@@ -4273,7 +4245,24 @@ int iscsit_close_connection(
5893 - if (conn->conn_transport->transport_type == ISCSI_TCP)
5894 - complete(&conn->conn_logout_comp);
5895 -
5896 -- iscsi_release_thread_set(conn);
5897 -+ if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
5898 -+ if (conn->tx_thread &&
5899 -+ cmpxchg(&conn->tx_thread_active, true, false)) {
5900 -+ send_sig(SIGINT, conn->tx_thread, 1);
5901 -+ kthread_stop(conn->tx_thread);
5902 -+ }
5903 -+ } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
5904 -+ if (conn->rx_thread &&
5905 -+ cmpxchg(&conn->rx_thread_active, true, false)) {
5906 -+ send_sig(SIGINT, conn->rx_thread, 1);
5907 -+ kthread_stop(conn->rx_thread);
5908 -+ }
5909 -+ }
5910 -+
5911 -+ spin_lock(&iscsit_global->ts_bitmap_lock);
5912 -+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
5913 -+ get_order(1));
5914 -+ spin_unlock(&iscsit_global->ts_bitmap_lock);
5915 -
5916 - iscsit_stop_timers_for_cmds(conn);
5917 - iscsit_stop_nopin_response_timer(conn);
5918 -@@ -4551,15 +4540,13 @@ static void iscsit_logout_post_handler_closesession(
5919 - struct iscsi_conn *conn)
5920 - {
5921 - struct iscsi_session *sess = conn->sess;
5922 --
5923 -- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
5924 -- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
5925 -+ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
5926 -
5927 - atomic_set(&conn->conn_logout_remove, 0);
5928 - complete(&conn->conn_logout_comp);
5929 -
5930 - iscsit_dec_conn_usage_count(conn);
5931 -- iscsit_stop_session(sess, 1, 1);
5932 -+ iscsit_stop_session(sess, sleep, sleep);
5933 - iscsit_dec_session_usage_count(sess);
5934 - target_put_session(sess->se_sess);
5935 - }
5936 -@@ -4567,13 +4554,12 @@ static void iscsit_logout_post_handler_closesession(
5937 - static void iscsit_logout_post_handler_samecid(
5938 - struct iscsi_conn *conn)
5939 - {
5940 -- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
5941 -- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
5942 -+ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
5943 -
5944 - atomic_set(&conn->conn_logout_remove, 0);
5945 - complete(&conn->conn_logout_comp);
5946 -
5947 -- iscsit_cause_connection_reinstatement(conn, 1);
5948 -+ iscsit_cause_connection_reinstatement(conn, sleep);
5949 - iscsit_dec_conn_usage_count(conn);
5950 - }
5951 -
5952 -diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
5953 -index bdd8731..e008ed2 100644
5954 ---- a/drivers/target/iscsi/iscsi_target_erl0.c
5955 -+++ b/drivers/target/iscsi/iscsi_target_erl0.c
5956 -@@ -860,7 +860,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
5957 - }
5958 - spin_unlock_bh(&conn->state_lock);
5959 -
5960 -- iscsi_thread_set_force_reinstatement(conn);
5961 -+ if (conn->tx_thread && conn->tx_thread_active)
5962 -+ send_sig(SIGINT, conn->tx_thread, 1);
5963 -+ if (conn->rx_thread && conn->rx_thread_active)
5964 -+ send_sig(SIGINT, conn->rx_thread, 1);
5965 -
5966 - sleep:
5967 - wait_for_completion(&conn->conn_wait_rcfr_comp);
5968 -@@ -885,10 +888,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
5969 - return;
5970 - }
5971 -
5972 -- if (iscsi_thread_set_force_reinstatement(conn) < 0) {
5973 -- spin_unlock_bh(&conn->state_lock);
5974 -- return;
5975 -- }
5976 -+ if (conn->tx_thread && conn->tx_thread_active)
5977 -+ send_sig(SIGINT, conn->tx_thread, 1);
5978 -+ if (conn->rx_thread && conn->rx_thread_active)
5979 -+ send_sig(SIGINT, conn->rx_thread, 1);
5980 -
5981 - atomic_set(&conn->connection_reinstatement, 1);
5982 - if (!sleep) {
5983 -diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
5984 -index 153fb66..345f073 100644
5985 ---- a/drivers/target/iscsi/iscsi_target_login.c
5986 -+++ b/drivers/target/iscsi/iscsi_target_login.c
5987 -@@ -699,6 +699,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
5988 - iscsit_start_nopin_timer(conn);
5989 - }
5990 -
5991 -+int iscsit_start_kthreads(struct iscsi_conn *conn)
5992 -+{
5993 -+ int ret = 0;
5994 -+
5995 -+ spin_lock(&iscsit_global->ts_bitmap_lock);
5996 -+ conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
5997 -+ ISCSIT_BITMAP_BITS, get_order(1));
5998 -+ spin_unlock(&iscsit_global->ts_bitmap_lock);
5999 -+
6000 -+ if (conn->bitmap_id < 0) {
6001 -+ pr_err("bitmap_find_free_region() failed for"
6002 -+ " iscsit_start_kthreads()\n");
6003 -+ return -ENOMEM;
6004 -+ }
6005 -+
6006 -+ conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
6007 -+ "%s", ISCSI_TX_THREAD_NAME);
6008 -+ if (IS_ERR(conn->tx_thread)) {
6009 -+ pr_err("Unable to start iscsi_target_tx_thread\n");
6010 -+ ret = PTR_ERR(conn->tx_thread);
6011 -+ goto out_bitmap;
6012 -+ }
6013 -+ conn->tx_thread_active = true;
6014 -+
6015 -+ conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
6016 -+ "%s", ISCSI_RX_THREAD_NAME);
6017 -+ if (IS_ERR(conn->rx_thread)) {
6018 -+ pr_err("Unable to start iscsi_target_rx_thread\n");
6019 -+ ret = PTR_ERR(conn->rx_thread);
6020 -+ goto out_tx;
6021 -+ }
6022 -+ conn->rx_thread_active = true;
6023 -+
6024 -+ return 0;
6025 -+out_tx:
6026 -+ kthread_stop(conn->tx_thread);
6027 -+ conn->tx_thread_active = false;
6028 -+out_bitmap:
6029 -+ spin_lock(&iscsit_global->ts_bitmap_lock);
6030 -+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
6031 -+ get_order(1));
6032 -+ spin_unlock(&iscsit_global->ts_bitmap_lock);
6033 -+ return ret;
6034 -+}
6035 -+
6036 - int iscsi_post_login_handler(
6037 - struct iscsi_np *np,
6038 - struct iscsi_conn *conn,
6039 -@@ -709,7 +754,7 @@ int iscsi_post_login_handler(
6040 - struct se_session *se_sess = sess->se_sess;
6041 - struct iscsi_portal_group *tpg = sess->tpg;
6042 - struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
6043 -- struct iscsi_thread_set *ts;
6044 -+ int rc;
6045 -
6046 - iscsit_inc_conn_usage_count(conn);
6047 -
6048 -@@ -724,7 +769,6 @@ int iscsi_post_login_handler(
6049 - /*
6050 - * SCSI Initiator -> SCSI Target Port Mapping
6051 - */
6052 -- ts = iscsi_get_thread_set();
6053 - if (!zero_tsih) {
6054 - iscsi_set_session_parameters(sess->sess_ops,
6055 - conn->param_list, 0);
6056 -@@ -751,9 +795,11 @@ int iscsi_post_login_handler(
6057 - sess->sess_ops->InitiatorName);
6058 - spin_unlock_bh(&sess->conn_lock);
6059 -
6060 -- iscsi_post_login_start_timers(conn);
6061 -+ rc = iscsit_start_kthreads(conn);
6062 -+ if (rc)
6063 -+ return rc;
6064 -
6065 -- iscsi_activate_thread_set(conn, ts);
6066 -+ iscsi_post_login_start_timers(conn);
6067 - /*
6068 - * Determine CPU mask to ensure connection's RX and TX kthreads
6069 - * are scheduled on the same CPU.
6070 -@@ -810,8 +856,11 @@ int iscsi_post_login_handler(
6071 - " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
6072 - spin_unlock_bh(&se_tpg->session_lock);
6073 -
6074 -+ rc = iscsit_start_kthreads(conn);
6075 -+ if (rc)
6076 -+ return rc;
6077 -+
6078 - iscsi_post_login_start_timers(conn);
6079 -- iscsi_activate_thread_set(conn, ts);
6080 - /*
6081 - * Determine CPU mask to ensure connection's RX and TX kthreads
6082 - * are scheduled on the same CPU.
6083 -diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
6084 -index 44620fb..cbb0cc2 100644
6085 ---- a/drivers/target/target_core_file.c
6086 -+++ b/drivers/target/target_core_file.c
6087 -@@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
6088 - struct se_device *se_dev = cmd->se_dev;
6089 - struct fd_dev *dev = FD_DEV(se_dev);
6090 - struct file *prot_fd = dev->fd_prot_file;
6091 -- struct scatterlist *sg;
6092 - loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
6093 - unsigned char *buf;
6094 -- u32 prot_size, len, size;
6095 -- int rc, ret = 1, i;
6096 -+ u32 prot_size;
6097 -+ int rc, ret = 1;
6098 -
6099 - prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
6100 - se_dev->prot_length;
6101 -
6102 - if (!is_write) {
6103 -- fd_prot->prot_buf = vzalloc(prot_size);
6104 -+ fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
6105 - if (!fd_prot->prot_buf) {
6106 - pr_err("Unable to allocate fd_prot->prot_buf\n");
6107 - return -ENOMEM;
6108 - }
6109 - buf = fd_prot->prot_buf;
6110 -
6111 -- fd_prot->prot_sg_nents = cmd->t_prot_nents;
6112 -- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
6113 -- fd_prot->prot_sg_nents, GFP_KERNEL);
6114 -+ fd_prot->prot_sg_nents = 1;
6115 -+ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
6116 -+ GFP_KERNEL);
6117 - if (!fd_prot->prot_sg) {
6118 - pr_err("Unable to allocate fd_prot->prot_sg\n");
6119 -- vfree(fd_prot->prot_buf);
6120 -+ kfree(fd_prot->prot_buf);
6121 - return -ENOMEM;
6122 - }
6123 -- size = prot_size;
6124 --
6125 -- for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
6126 --
6127 -- len = min_t(u32, PAGE_SIZE, size);
6128 -- sg_set_buf(sg, buf, len);
6129 -- size -= len;
6130 -- buf += len;
6131 -- }
6132 -+ sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
6133 -+ sg_set_buf(fd_prot->prot_sg, buf, prot_size);
6134 - }
6135 -
6136 - if (is_write) {
6137 -@@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
6138 -
6139 - if (is_write || ret < 0) {
6140 - kfree(fd_prot->prot_sg);
6141 -- vfree(fd_prot->prot_buf);
6142 -+ kfree(fd_prot->prot_buf);
6143 - }
6144 -
6145 - return ret;
6146 -@@ -549,6 +541,56 @@ fd_execute_write_same(struct se_cmd *cmd)
6147 - return 0;
6148 - }
6149 -
6150 -+static int
6151 -+fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
6152 -+ void *buf, size_t bufsize)
6153 -+{
6154 -+ struct fd_dev *fd_dev = FD_DEV(se_dev);
6155 -+ struct file *prot_fd = fd_dev->fd_prot_file;
6156 -+ sector_t prot_length, prot;
6157 -+ loff_t pos = lba * se_dev->prot_length;
6158 -+
6159 -+ if (!prot_fd) {
6160 -+ pr_err("Unable to locate fd_dev->fd_prot_file\n");
6161 -+ return -ENODEV;
6162 -+ }
6163 -+
6164 -+ prot_length = nolb * se_dev->prot_length;
6165 -+
6166 -+ for (prot = 0; prot < prot_length;) {
6167 -+ sector_t len = min_t(sector_t, bufsize, prot_length - prot);
6168 -+ ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
6169 -+
6170 -+ if (ret != len) {
6171 -+ pr_err("vfs_write to prot file failed: %zd\n", ret);
6172 -+ return ret < 0 ? ret : -ENODEV;
6173 -+ }
6174 -+ prot += ret;
6175 -+ }
6176 -+
6177 -+ return 0;
6178 -+}
6179 -+
6180 -+static int
6181 -+fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
6182 -+{
6183 -+ void *buf;
6184 -+ int rc;
6185 -+
6186 -+ buf = (void *)__get_free_page(GFP_KERNEL);
6187 -+ if (!buf) {
6188 -+ pr_err("Unable to allocate FILEIO prot buf\n");
6189 -+ return -ENOMEM;
6190 -+ }
6191 -+ memset(buf, 0xff, PAGE_SIZE);
6192 -+
6193 -+ rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
6194 -+
6195 -+ free_page((unsigned long)buf);
6196 -+
6197 -+ return rc;
6198 -+}
6199 -+
6200 - static sense_reason_t
6201 - fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
6202 - {
6203 -@@ -556,6 +598,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
6204 - struct inode *inode = file->f_mapping->host;
6205 - int ret;
6206 -
6207 -+ if (cmd->se_dev->dev_attrib.pi_prot_type) {
6208 -+ ret = fd_do_prot_unmap(cmd, lba, nolb);
6209 -+ if (ret)
6210 -+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
6211 -+ }
6212 -+
6213 - if (S_ISBLK(inode->i_mode)) {
6214 - /* The backend is block device, use discard */
6215 - struct block_device *bdev = inode->i_bdev;
6216 -@@ -658,11 +706,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
6217 - 0, fd_prot.prot_sg, 0);
6218 - if (rc) {
6219 - kfree(fd_prot.prot_sg);
6220 -- vfree(fd_prot.prot_buf);
6221 -+ kfree(fd_prot.prot_buf);
6222 - return rc;
6223 - }
6224 - kfree(fd_prot.prot_sg);
6225 -- vfree(fd_prot.prot_buf);
6226 -+ kfree(fd_prot.prot_buf);
6227 - }
6228 - } else {
6229 - memset(&fd_prot, 0, sizeof(struct fd_prot));
6230 -@@ -678,7 +726,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
6231 - 0, fd_prot.prot_sg, 0);
6232 - if (rc) {
6233 - kfree(fd_prot.prot_sg);
6234 -- vfree(fd_prot.prot_buf);
6235 -+ kfree(fd_prot.prot_buf);
6236 - return rc;
6237 - }
6238 - }
6239 -@@ -714,7 +762,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
6240 -
6241 - if (ret < 0) {
6242 - kfree(fd_prot.prot_sg);
6243 -- vfree(fd_prot.prot_buf);
6244 -+ kfree(fd_prot.prot_buf);
6245 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
6246 - }
6247 -
6248 -@@ -878,48 +926,28 @@ static int fd_init_prot(struct se_device *dev)
6249 -
6250 - static int fd_format_prot(struct se_device *dev)
6251 - {
6252 -- struct fd_dev *fd_dev = FD_DEV(dev);
6253 -- struct file *prot_fd = fd_dev->fd_prot_file;
6254 -- sector_t prot_length, prot;
6255 - unsigned char *buf;
6256 -- loff_t pos = 0;
6257 - int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
6258 -- int rc, ret = 0, size, len;
6259 -+ int ret;
6260 -
6261 - if (!dev->dev_attrib.pi_prot_type) {
6262 - pr_err("Unable to format_prot while pi_prot_type == 0\n");
6263 - return -ENODEV;
6264 - }
6265 -- if (!prot_fd) {
6266 -- pr_err("Unable to locate fd_dev->fd_prot_file\n");
6267 -- return -ENODEV;
6268 -- }
6269 -
6270 - buf = vzalloc(unit_size);
6271 - if (!buf) {
6272 - pr_err("Unable to allocate FILEIO prot buf\n");
6273 - return -ENOMEM;
6274 - }
6275 -- prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
6276 -- size = prot_length;
6277 -
6278 - pr_debug("Using FILEIO prot_length: %llu\n",
6279 -- (unsigned long long)prot_length);
6280 -+ (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
6281 -+ dev->prot_length);
6282 -
6283 - memset(buf, 0xff, unit_size);
6284 -- for (prot = 0; prot < prot_length; prot += unit_size) {
6285 -- len = min(unit_size, size);
6286 -- rc = kernel_write(prot_fd, buf, len, pos);
6287 -- if (rc != len) {
6288 -- pr_err("vfs_write to prot file failed: %d\n", rc);
6289 -- ret = -ENODEV;
6290 -- goto out;
6291 -- }
6292 -- pos += len;
6293 -- size -= len;
6294 -- }
6295 --
6296 --out:
6297 -+ ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
6298 -+ buf, unit_size);
6299 - vfree(buf);
6300 - return ret;
6301 - }
6302 -diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
6303 -index 3e72974..755bd9b3 100644
6304 ---- a/drivers/target/target_core_sbc.c
6305 -+++ b/drivers/target/target_core_sbc.c
6306 -@@ -312,7 +312,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
6307 - return 0;
6308 - }
6309 -
6310 --static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
6311 -+static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
6312 - {
6313 - unsigned char *buf, *addr;
6314 - struct scatterlist *sg;
6315 -@@ -376,7 +376,7 @@ sbc_execute_rw(struct se_cmd *cmd)
6316 - cmd->data_direction);
6317 - }
6318 -
6319 --static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
6320 -+static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
6321 - {
6322 - struct se_device *dev = cmd->se_dev;
6323 -
6324 -@@ -399,7 +399,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
6325 - return TCM_NO_SENSE;
6326 - }
6327 -
6328 --static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
6329 -+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
6330 - {
6331 - struct se_device *dev = cmd->se_dev;
6332 - struct scatterlist *write_sg = NULL, *sg;
6333 -@@ -414,11 +414,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
6334 -
6335 - /*
6336 - * Handle early failure in transport_generic_request_failure(),
6337 -- * which will not have taken ->caw_mutex yet..
6338 -+ * which will not have taken ->caw_sem yet..
6339 - */
6340 -- if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
6341 -+ if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
6342 - return TCM_NO_SENSE;
6343 - /*
6344 -+ * Handle special case for zero-length COMPARE_AND_WRITE
6345 -+ */
6346 -+ if (!cmd->data_length)
6347 -+ goto out;
6348 -+ /*
6349 - * Immediately exit + release dev->caw_sem if command has already
6350 - * been failed with a non-zero SCSI status.
6351 - */
6352 -diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
6353 -index ac3cbab..f786de0 100644
6354 ---- a/drivers/target/target_core_transport.c
6355 -+++ b/drivers/target/target_core_transport.c
6356 -@@ -1615,11 +1615,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
6357 - transport_complete_task_attr(cmd);
6358 - /*
6359 - * Handle special case for COMPARE_AND_WRITE failure, where the
6360 -- * callback is expected to drop the per device ->caw_mutex.
6361 -+ * callback is expected to drop the per device ->caw_sem.
6362 - */
6363 - if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
6364 - cmd->transport_complete_callback)
6365 -- cmd->transport_complete_callback(cmd);
6366 -+ cmd->transport_complete_callback(cmd, false);
6367 -
6368 - switch (sense_reason) {
6369 - case TCM_NON_EXISTENT_LUN:
6370 -@@ -1975,8 +1975,12 @@ static void target_complete_ok_work(struct work_struct *work)
6371 - if (cmd->transport_complete_callback) {
6372 - sense_reason_t rc;
6373 -
6374 -- rc = cmd->transport_complete_callback(cmd);
6375 -+ rc = cmd->transport_complete_callback(cmd, true);
6376 - if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
6377 -+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
6378 -+ !cmd->data_length)
6379 -+ goto queue_rsp;
6380 -+
6381 - return;
6382 - } else if (rc) {
6383 - ret = transport_send_check_condition_and_sense(cmd,
6384 -@@ -1990,6 +1994,7 @@ static void target_complete_ok_work(struct work_struct *work)
6385 - }
6386 - }
6387 -
6388 -+queue_rsp:
6389 - switch (cmd->data_direction) {
6390 - case DMA_FROM_DEVICE:
6391 - spin_lock(&cmd->se_lun->lun_sep_lock);
6392 -@@ -2094,6 +2099,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
6393 - static inline void transport_free_pages(struct se_cmd *cmd)
6394 - {
6395 - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
6396 -+ /*
6397 -+ * Release special case READ buffer payload required for
6398 -+ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
6399 -+ */
6400 -+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
6401 -+ transport_free_sgl(cmd->t_bidi_data_sg,
6402 -+ cmd->t_bidi_data_nents);
6403 -+ cmd->t_bidi_data_sg = NULL;
6404 -+ cmd->t_bidi_data_nents = 0;
6405 -+ }
6406 - transport_reset_sgl_orig(cmd);
6407 - return;
6408 - }
6409 -@@ -2246,6 +2261,7 @@ sense_reason_t
6410 - transport_generic_new_cmd(struct se_cmd *cmd)
6411 - {
6412 - int ret = 0;
6413 -+ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
6414 -
6415 - /*
6416 - * Determine is the TCM fabric module has already allocated physical
6417 -@@ -2254,7 +2270,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
6418 - */
6419 - if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
6420 - cmd->data_length) {
6421 -- bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
6422 -
6423 - if ((cmd->se_cmd_flags & SCF_BIDI) ||
6424 - (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
6425 -@@ -2285,6 +2300,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
6426 - cmd->data_length, zero_flag);
6427 - if (ret < 0)
6428 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
6429 -+ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
6430 -+ cmd->data_length) {
6431 -+ /*
6432 -+ * Special case for COMPARE_AND_WRITE with fabrics
6433 -+ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
6434 -+ */
6435 -+ u32 caw_length = cmd->t_task_nolb *
6436 -+ cmd->se_dev->dev_attrib.block_size;
6437 -+
6438 -+ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
6439 -+ &cmd->t_bidi_data_nents,
6440 -+ caw_length, zero_flag);
6441 -+ if (ret < 0)
6442 -+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
6443 - }
6444 - /*
6445 - * If this command is not a write we can execute it right here,
6446 -diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
6447 -index deae122..d465ace 100644
6448 ---- a/drivers/tty/serial/8250/8250_core.c
6449 -+++ b/drivers/tty/serial/8250/8250_core.c
6450 -@@ -3444,7 +3444,8 @@ void serial8250_suspend_port(int line)
6451 - port->type != PORT_8250) {
6452 - unsigned char canary = 0xa5;
6453 - serial_out(up, UART_SCR, canary);
6454 -- up->canary = canary;
6455 -+ if (serial_in(up, UART_SCR) == canary)
6456 -+ up->canary = canary;
6457 - }
6458 -
6459 - uart_suspend_port(&serial8250_reg, port);
6460 -diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
6461 -index 6ae5b85..7a80250 100644
6462 ---- a/drivers/tty/serial/8250/8250_dw.c
6463 -+++ b/drivers/tty/serial/8250/8250_dw.c
6464 -@@ -629,6 +629,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
6465 - { "80860F0A", 0 },
6466 - { "8086228A", 0 },
6467 - { "APMC0D08", 0},
6468 -+ { "AMD0020", 0 },
6469 - { },
6470 - };
6471 - MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
6472 -diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
6473 -index 0eb29b1..2306191 100644
6474 ---- a/drivers/tty/serial/imx.c
6475 -+++ b/drivers/tty/serial/imx.c
6476 -@@ -818,7 +818,7 @@ static irqreturn_t imx_int(int irq, void *dev_id)
6477 - if (sts2 & USR2_ORE) {
6478 - dev_err(sport->port.dev, "Rx FIFO overrun\n");
6479 - sport->port.icount.overrun++;
6480 -- writel(sts2 | USR2_ORE, sport->port.membase + USR2);
6481 -+ writel(USR2_ORE, sport->port.membase + USR2);
6482 - }
6483 -
6484 - return IRQ_HANDLED;
6485 -@@ -1181,10 +1181,12 @@ static int imx_startup(struct uart_port *port)
6486 - imx_uart_dma_init(sport);
6487 -
6488 - spin_lock_irqsave(&sport->port.lock, flags);
6489 -+
6490 - /*
6491 - * Finally, clear and enable interrupts
6492 - */
6493 - writel(USR1_RTSD, sport->port.membase + USR1);
6494 -+ writel(USR2_ORE, sport->port.membase + USR2);
6495 -
6496 - if (sport->dma_is_inited && !sport->dma_is_enabled)
6497 - imx_enable_dma(sport);
6498 -@@ -1199,10 +1201,6 @@ static int imx_startup(struct uart_port *port)
6499 -
6500 - writel(temp, sport->port.membase + UCR1);
6501 -
6502 -- /* Clear any pending ORE flag before enabling interrupt */
6503 -- temp = readl(sport->port.membase + USR2);
6504 -- writel(temp | USR2_ORE, sport->port.membase + USR2);
6505 --
6506 - temp = readl(sport->port.membase + UCR4);
6507 - temp |= UCR4_OREN;
6508 - writel(temp, sport->port.membase + UCR4);
6509 -diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
6510 -index a051a7a..a81f9dd 100644
6511 ---- a/drivers/usb/class/cdc-wdm.c
6512 -+++ b/drivers/usb/class/cdc-wdm.c
6513 -@@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb)
6514 - case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
6515 - dev_dbg(&desc->intf->dev,
6516 - "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
6517 -- dr->wIndex, dr->wLength);
6518 -+ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
6519 - break;
6520 -
6521 - case USB_CDC_NOTIFY_NETWORK_CONNECTION:
6522 -@@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb)
6523 - clear_bit(WDM_POLL_RUNNING, &desc->flags);
6524 - dev_err(&desc->intf->dev,
6525 - "unknown notification %d received: index %d len %d\n",
6526 -- dr->bNotificationType, dr->wIndex, dr->wLength);
6527 -+ dr->bNotificationType,
6528 -+ le16_to_cpu(dr->wIndex),
6529 -+ le16_to_cpu(dr->wLength));
6530 - goto exit;
6531 - }
6532 -
6533 -@@ -408,7 +410,7 @@ static ssize_t wdm_write
6534 - USB_RECIP_INTERFACE);
6535 - req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
6536 - req->wValue = 0;
6537 -- req->wIndex = desc->inum;
6538 -+ req->wIndex = desc->inum; /* already converted */
6539 - req->wLength = cpu_to_le16(count);
6540 - set_bit(WDM_IN_USE, &desc->flags);
6541 - desc->outbuf = buf;
6542 -@@ -422,7 +424,7 @@ static ssize_t wdm_write
6543 - rv = usb_translate_errors(rv);
6544 - } else {
6545 - dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
6546 -- req->wIndex);
6547 -+ le16_to_cpu(req->wIndex));
6548 - }
6549 - out:
6550 - usb_autopm_put_interface(desc->intf);
6551 -@@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
6552 - desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
6553 - desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
6554 - desc->irq->wValue = 0;
6555 -- desc->irq->wIndex = desc->inum;
6556 -+ desc->irq->wIndex = desc->inum; /* already converted */
6557 - desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
6558 -
6559 - usb_fill_control_urb(
6560 -diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
6561 -index d7c3d5a..3b71516 100644
6562 ---- a/drivers/usb/core/hub.c
6563 -+++ b/drivers/usb/core/hub.c
6564 -@@ -3406,10 +3406,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
6565 - if (status) {
6566 - dev_dbg(&port_dev->dev, "can't resume, status %d\n", status);
6567 - } else {
6568 -- /* drive resume for at least 20 msec */
6569 -+ /* drive resume for USB_RESUME_TIMEOUT msec */
6570 - dev_dbg(&udev->dev, "usb %sresume\n",
6571 - (PMSG_IS_AUTO(msg) ? "auto-" : ""));
6572 -- msleep(25);
6573 -+ msleep(USB_RESUME_TIMEOUT);
6574 -
6575 - /* Virtual root hubs can trigger on GET_PORT_STATUS to
6576 - * stop resume signaling. Then finish the resume
6577 -diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
6578 -index c78c874..758b7e0 100644
6579 ---- a/drivers/usb/dwc2/hcd.c
6580 -+++ b/drivers/usb/dwc2/hcd.c
6581 -@@ -1521,7 +1521,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
6582 - dev_dbg(hsotg->dev,
6583 - "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
6584 - writel(0, hsotg->regs + PCGCTL);
6585 -- usleep_range(20000, 40000);
6586 -+ msleep(USB_RESUME_TIMEOUT);
6587 -
6588 - hprt0 = dwc2_read_hprt0(hsotg);
6589 - hprt0 |= HPRT0_RES;
6590 -diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
6591 -index 9054598..6385c19 100644
6592 ---- a/drivers/usb/gadget/legacy/printer.c
6593 -+++ b/drivers/usb/gadget/legacy/printer.c
6594 -@@ -1031,6 +1031,15 @@ unknown:
6595 - break;
6596 - }
6597 - /* host either stalls (value < 0) or reports success */
6598 -+ if (value >= 0) {
6599 -+ req->length = value;
6600 -+ req->zero = value < wLength;
6601 -+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
6602 -+ if (value < 0) {
6603 -+ ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
6604 -+ req->status = 0;
6605 -+ }
6606 -+ }
6607 - return value;
6608 - }
6609 -
6610 -diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
6611 -index 85e56d1..f4d88df 100644
6612 ---- a/drivers/usb/host/ehci-hcd.c
6613 -+++ b/drivers/usb/host/ehci-hcd.c
6614 -@@ -792,12 +792,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
6615 - ehci->reset_done[i] == 0))
6616 - continue;
6617 -
6618 -- /* start 20 msec resume signaling from this port,
6619 -- * and make hub_wq collect PORT_STAT_C_SUSPEND to
6620 -- * stop that signaling. Use 5 ms extra for safety,
6621 -- * like usb_port_resume() does.
6622 -+ /* start USB_RESUME_TIMEOUT msec resume signaling from
6623 -+ * this port, and make hub_wq collect
6624 -+ * PORT_STAT_C_SUSPEND to stop that signaling.
6625 - */
6626 -- ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
6627 -+ ehci->reset_done[i] = jiffies +
6628 -+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
6629 - set_bit(i, &ehci->resuming_ports);
6630 - ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
6631 - usb_hcd_start_port_resume(&hcd->self, i);
6632 -diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
6633 -index 87cf86f..7354d01 100644
6634 ---- a/drivers/usb/host/ehci-hub.c
6635 -+++ b/drivers/usb/host/ehci-hub.c
6636 -@@ -471,10 +471,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
6637 - ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
6638 - }
6639 -
6640 -- /* msleep for 20ms only if code is trying to resume port */
6641 -+ /*
6642 -+ * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
6643 -+ * port
6644 -+ */
6645 - if (resume_needed) {
6646 - spin_unlock_irq(&ehci->lock);
6647 -- msleep(20);
6648 -+ msleep(USB_RESUME_TIMEOUT);
6649 - spin_lock_irq(&ehci->lock);
6650 - if (ehci->shutdown)
6651 - goto shutdown;
6652 -@@ -942,7 +945,7 @@ int ehci_hub_control(
6653 - temp &= ~PORT_WAKE_BITS;
6654 - ehci_writel(ehci, temp | PORT_RESUME, status_reg);
6655 - ehci->reset_done[wIndex] = jiffies
6656 -- + msecs_to_jiffies(20);
6657 -+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
6658 - set_bit(wIndex, &ehci->resuming_ports);
6659 - usb_hcd_start_port_resume(&hcd->self, wIndex);
6660 - break;
6661 -diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
6662 -index 475b21f..7a6681f 100644
6663 ---- a/drivers/usb/host/fotg210-hcd.c
6664 -+++ b/drivers/usb/host/fotg210-hcd.c
6665 -@@ -1595,7 +1595,7 @@ static int fotg210_hub_control(
6666 - /* resume signaling for 20 msec */
6667 - fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
6668 - fotg210->reset_done[wIndex] = jiffies
6669 -- + msecs_to_jiffies(20);
6670 -+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
6671 - break;
6672 - case USB_PORT_FEAT_C_SUSPEND:
6673 - clear_bit(wIndex, &fotg210->port_c_suspend);
6674 -diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
6675 -index a83eefe..ba77e2e 100644
6676 ---- a/drivers/usb/host/fusbh200-hcd.c
6677 -+++ b/drivers/usb/host/fusbh200-hcd.c
6678 -@@ -1550,10 +1550,9 @@ static int fusbh200_hub_control (
6679 - if ((temp & PORT_PE) == 0)
6680 - goto error;
6681 -
6682 -- /* resume signaling for 20 msec */
6683 - fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
6684 - fusbh200->reset_done[wIndex] = jiffies
6685 -- + msecs_to_jiffies(20);
6686 -+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
6687 - break;
6688 - case USB_PORT_FEAT_C_SUSPEND:
6689 - clear_bit(wIndex, &fusbh200->port_c_suspend);
6690 -diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
6691 -index 113d0cc..9ef5644 100644
6692 ---- a/drivers/usb/host/isp116x-hcd.c
6693 -+++ b/drivers/usb/host/isp116x-hcd.c
6694 -@@ -1490,7 +1490,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
6695 - spin_unlock_irq(&isp116x->lock);
6696 -
6697 - hcd->state = HC_STATE_RESUMING;
6698 -- msleep(20);
6699 -+ msleep(USB_RESUME_TIMEOUT);
6700 -
6701 - /* Go operational */
6702 - spin_lock_irq(&isp116x->lock);
6703 -diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
6704 -index ef7efb2..28a2866 100644
6705 ---- a/drivers/usb/host/oxu210hp-hcd.c
6706 -+++ b/drivers/usb/host/oxu210hp-hcd.c
6707 -@@ -2500,11 +2500,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
6708 - || oxu->reset_done[i] != 0)
6709 - continue;
6710 -
6711 -- /* start 20 msec resume signaling from this port,
6712 -- * and make hub_wq collect PORT_STAT_C_SUSPEND to
6713 -+ /* start USB_RESUME_TIMEOUT resume signaling from this
6714 -+ * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
6715 - * stop that signaling.
6716 - */
6717 -- oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
6718 -+ oxu->reset_done[i] = jiffies +
6719 -+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
6720 - oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
6721 - mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
6722 - }
6723 -diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
6724 -index bdc82fe..54a4170 100644
6725 ---- a/drivers/usb/host/r8a66597-hcd.c
6726 -+++ b/drivers/usb/host/r8a66597-hcd.c
6727 -@@ -2301,7 +2301,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
6728 - rh->port &= ~USB_PORT_STAT_SUSPEND;
6729 - rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
6730 - r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
6731 -- msleep(50);
6732 -+ msleep(USB_RESUME_TIMEOUT);
6733 - r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
6734 - }
6735 -
6736 -diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
6737 -index 4f4ba1e..9118cd8 100644
6738 ---- a/drivers/usb/host/sl811-hcd.c
6739 -+++ b/drivers/usb/host/sl811-hcd.c
6740 -@@ -1259,7 +1259,7 @@ sl811h_hub_control(
6741 - sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
6742 -
6743 - mod_timer(&sl811->timer, jiffies
6744 -- + msecs_to_jiffies(20));
6745 -+ + msecs_to_jiffies(USB_RESUME_TIMEOUT));
6746 - break;
6747 - case USB_PORT_FEAT_POWER:
6748 - port_power(sl811, 0);
6749 -diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
6750 -index 19ba5ea..7b3d1af 100644
6751 ---- a/drivers/usb/host/uhci-hub.c
6752 -+++ b/drivers/usb/host/uhci-hub.c
6753 -@@ -166,7 +166,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
6754 - /* Port received a wakeup request */
6755 - set_bit(port, &uhci->resuming_ports);
6756 - uhci->ports_timeout = jiffies +
6757 -- msecs_to_jiffies(25);
6758 -+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
6759 - usb_hcd_start_port_resume(
6760 - &uhci_to_hcd(uhci)->self, port);
6761 -
6762 -@@ -338,7 +338,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
6763 - uhci_finish_suspend(uhci, port, port_addr);
6764 -
6765 - /* USB v2.0 7.1.7.5 */
6766 -- uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
6767 -+ uhci->ports_timeout = jiffies +
6768 -+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
6769 - break;
6770 - case USB_PORT_FEAT_POWER:
6771 - /* UHCI has no power switching */
6772 -diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
6773 -index 73485fa..eeedde8 100644
6774 ---- a/drivers/usb/host/xhci-ring.c
6775 -+++ b/drivers/usb/host/xhci-ring.c
6776 -@@ -1574,7 +1574,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
6777 - } else {
6778 - xhci_dbg(xhci, "resume HS port %d\n", port_id);
6779 - bus_state->resume_done[faked_port_index] = jiffies +
6780 -- msecs_to_jiffies(20);
6781 -+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
6782 - set_bit(faked_port_index, &bus_state->resuming_ports);
6783 - mod_timer(&hcd->rh_timer,
6784 - bus_state->resume_done[faked_port_index]);
6785 -diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
6786 -index 3cb98b1..7911b6b 100644
6787 ---- a/drivers/usb/isp1760/isp1760-hcd.c
6788 -+++ b/drivers/usb/isp1760/isp1760-hcd.c
6789 -@@ -1869,7 +1869,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
6790 - reg_write32(hcd->regs, HC_PORTSC1,
6791 - temp | PORT_RESUME);
6792 - priv->reset_done = jiffies +
6793 -- msecs_to_jiffies(20);
6794 -+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
6795 - }
6796 - break;
6797 - case USB_PORT_FEAT_C_SUSPEND:
6798 -diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
6799 -index 067920f..ec0ee3b 100644
6800 ---- a/drivers/usb/musb/musb_core.c
6801 -+++ b/drivers/usb/musb/musb_core.c
6802 -@@ -99,6 +99,7 @@
6803 - #include <linux/platform_device.h>
6804 - #include <linux/io.h>
6805 - #include <linux/dma-mapping.h>
6806 -+#include <linux/usb.h>
6807 -
6808 - #include "musb_core.h"
6809 -
6810 -@@ -562,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
6811 - (USB_PORT_STAT_C_SUSPEND << 16)
6812 - | MUSB_PORT_STAT_RESUME;
6813 - musb->rh_timer = jiffies
6814 -- + msecs_to_jiffies(20);
6815 -+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
6816 - musb->need_finish_resume = 1;
6817 -
6818 - musb->xceiv->otg->state = OTG_STATE_A_HOST;
6819 -@@ -1597,16 +1598,30 @@ irqreturn_t musb_interrupt(struct musb *musb)
6820 - is_host_active(musb) ? "host" : "peripheral",
6821 - musb->int_usb, musb->int_tx, musb->int_rx);
6822 -
6823 -- /* the core can interrupt us for multiple reasons; docs have
6824 -- * a generic interrupt flowchart to follow
6825 -+ /**
6826 -+ * According to Mentor Graphics' documentation, flowchart on page 98,
6827 -+ * IRQ should be handled as follows:
6828 -+ *
6829 -+ * . Resume IRQ
6830 -+ * . Session Request IRQ
6831 -+ * . VBUS Error IRQ
6832 -+ * . Suspend IRQ
6833 -+ * . Connect IRQ
6834 -+ * . Disconnect IRQ
6835 -+ * . Reset/Babble IRQ
6836 -+ * . SOF IRQ (we're not using this one)
6837 -+ * . Endpoint 0 IRQ
6838 -+ * . TX Endpoints
6839 -+ * . RX Endpoints
6840 -+ *
6841 -+ * We will be following that flowchart in order to avoid any problems
6842 -+ * that might arise with internal Finite State Machine.
6843 - */
6844 -+
6845 - if (musb->int_usb)
6846 - retval |= musb_stage0_irq(musb, musb->int_usb,
6847 - devctl);
6848 -
6849 -- /* "stage 1" is handling endpoint irqs */
6850 --
6851 -- /* handle endpoint 0 first */
6852 - if (musb->int_tx & 1) {
6853 - if (is_host_active(musb))
6854 - retval |= musb_h_ep0_irq(musb);
6855 -@@ -1614,37 +1629,31 @@ irqreturn_t musb_interrupt(struct musb *musb)
6856 - retval |= musb_g_ep0_irq(musb);
6857 - }
6858 -
6859 -- /* RX on endpoints 1-15 */
6860 -- reg = musb->int_rx >> 1;
6861 -+ reg = musb->int_tx >> 1;
6862 - ep_num = 1;
6863 - while (reg) {
6864 - if (reg & 1) {
6865 -- /* musb_ep_select(musb->mregs, ep_num); */
6866 -- /* REVISIT just retval = ep->rx_irq(...) */
6867 - retval = IRQ_HANDLED;
6868 - if (is_host_active(musb))
6869 -- musb_host_rx(musb, ep_num);
6870 -+ musb_host_tx(musb, ep_num);
6871 - else
6872 -- musb_g_rx(musb, ep_num);
6873 -+ musb_g_tx(musb, ep_num);
6874 - }
6875 --
6876 - reg >>= 1;
6877 - ep_num++;
6878 - }
6879 -
6880 -- /* TX on endpoints 1-15 */
6881 -- reg = musb->int_tx >> 1;
6882 -+ reg = musb->int_rx >> 1;
6883 - ep_num = 1;
6884 - while (reg) {
6885 - if (reg & 1) {
6886 -- /* musb_ep_select(musb->mregs, ep_num); */
6887 -- /* REVISIT just retval |= ep->tx_irq(...) */
6888 - retval = IRQ_HANDLED;
6889 - if (is_host_active(musb))
6890 -- musb_host_tx(musb, ep_num);
6891 -+ musb_host_rx(musb, ep_num);
6892 - else
6893 -- musb_g_tx(musb, ep_num);
6894 -+ musb_g_rx(musb, ep_num);
6895 - }
6896 -+
6897 - reg >>= 1;
6898 - ep_num++;
6899 - }
6900 -@@ -2463,7 +2472,7 @@ static int musb_resume(struct device *dev)
6901 - if (musb->need_finish_resume) {
6902 - musb->need_finish_resume = 0;
6903 - schedule_delayed_work(&musb->finish_resume_work,
6904 -- msecs_to_jiffies(20));
6905 -+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
6906 - }
6907 -
6908 - /*
6909 -@@ -2506,7 +2515,7 @@ static int musb_runtime_resume(struct device *dev)
6910 - if (musb->need_finish_resume) {
6911 - musb->need_finish_resume = 0;
6912 - schedule_delayed_work(&musb->finish_resume_work,
6913 -- msecs_to_jiffies(20));
6914 -+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
6915 - }
6916 -
6917 - return 0;
6918 -diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
6919 -index 294e159..5428ed1 100644
6920 ---- a/drivers/usb/musb/musb_virthub.c
6921 -+++ b/drivers/usb/musb/musb_virthub.c
6922 -@@ -136,7 +136,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
6923 - /* later, GetPortStatus will stop RESUME signaling */
6924 - musb->port1_status |= MUSB_PORT_STAT_RESUME;
6925 - schedule_delayed_work(&musb->finish_resume_work,
6926 -- msecs_to_jiffies(20));
6927 -+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
6928 - }
6929 - }
6930 -
6931 -diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
6932 -index 2f9735b..d1cd6b5 100644
6933 ---- a/drivers/usb/phy/phy.c
6934 -+++ b/drivers/usb/phy/phy.c
6935 -@@ -81,7 +81,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
6936 -
6937 - static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
6938 - {
6939 -- return res == match_data;
6940 -+ struct usb_phy **phy = res;
6941 -+
6942 -+ return *phy == match_data;
6943 - }
6944 -
6945 - /**
6946 -diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
6947 -index 995986b..d925f55 100644
6948 ---- a/fs/binfmt_elf.c
6949 -+++ b/fs/binfmt_elf.c
6950 -@@ -862,6 +862,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
6951 - i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
6952 - int elf_prot = 0, elf_flags;
6953 - unsigned long k, vaddr;
6954 -+ unsigned long total_size = 0;
6955 -
6956 - if (elf_ppnt->p_type != PT_LOAD)
6957 - continue;
6958 -@@ -924,10 +925,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
6959 - #else
6960 - load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
6961 - #endif
6962 -+ total_size = total_mapping_size(elf_phdata,
6963 -+ loc->elf_ex.e_phnum);
6964 -+ if (!total_size) {
6965 -+ error = -EINVAL;
6966 -+ goto out_free_dentry;
6967 -+ }
6968 - }
6969 -
6970 - error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
6971 -- elf_prot, elf_flags, 0);
6972 -+ elf_prot, elf_flags, total_size);
6973 - if (BAD_ADDR(error)) {
6974 - retval = IS_ERR((void *)error) ?
6975 - PTR_ERR((void*)error) : -EINVAL;
6976 -diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
6977 -index 8b353ad..0a795c9 100644
6978 ---- a/fs/btrfs/extent-tree.c
6979 -+++ b/fs/btrfs/extent-tree.c
6980 -@@ -6956,12 +6956,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6981 - return -ENOSPC;
6982 - }
6983 -
6984 -- if (btrfs_test_opt(root, DISCARD))
6985 -- ret = btrfs_discard_extent(root, start, len, NULL);
6986 --
6987 - if (pin)
6988 - pin_down_extent(root, cache, start, len, 1);
6989 - else {
6990 -+ if (btrfs_test_opt(root, DISCARD))
6991 -+ ret = btrfs_discard_extent(root, start, len, NULL);
6992 - btrfs_add_free_space(cache, start, len);
6993 - btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
6994 - }
6995 -diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
6996 -index 74609b9..f23d4be 100644
6997 ---- a/fs/btrfs/ioctl.c
6998 -+++ b/fs/btrfs/ioctl.c
6999 -@@ -2897,6 +2897,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
7000 - if (src == dst)
7001 - return -EINVAL;
7002 -
7003 -+ if (len == 0)
7004 -+ return 0;
7005 -+
7006 - btrfs_double_lock(src, loff, dst, dst_loff, len);
7007 -
7008 - ret = extent_same_check_offsets(src, loff, len);
7009 -@@ -3626,6 +3629,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
7010 - if (off + len == src->i_size)
7011 - len = ALIGN(src->i_size, bs) - off;
7012 -
7013 -+ if (len == 0) {
7014 -+ ret = 0;
7015 -+ goto out_unlock;
7016 -+ }
7017 -+
7018 - /* verify the end result is block aligned */
7019 - if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
7020 - !IS_ALIGNED(destoff, bs))
7021 -diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
7022 -index 883b936..45ea704 100644
7023 ---- a/fs/btrfs/xattr.c
7024 -+++ b/fs/btrfs/xattr.c
7025 -@@ -364,22 +364,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
7026 - /*
7027 - * Check if the attribute is in a supported namespace.
7028 - *
7029 -- * This applied after the check for the synthetic attributes in the system
7030 -+ * This is applied after the check for the synthetic attributes in the system
7031 - * namespace.
7032 - */
7033 --static bool btrfs_is_valid_xattr(const char *name)
7034 -+static int btrfs_is_valid_xattr(const char *name)
7035 - {
7036 -- return !strncmp(name, XATTR_SECURITY_PREFIX,
7037 -- XATTR_SECURITY_PREFIX_LEN) ||
7038 -- !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
7039 -- !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
7040 -- !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
7041 -- !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
7042 -+ int len = strlen(name);
7043 -+ int prefixlen = 0;
7044 -+
7045 -+ if (!strncmp(name, XATTR_SECURITY_PREFIX,
7046 -+ XATTR_SECURITY_PREFIX_LEN))
7047 -+ prefixlen = XATTR_SECURITY_PREFIX_LEN;
7048 -+ else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
7049 -+ prefixlen = XATTR_SYSTEM_PREFIX_LEN;
7050 -+ else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
7051 -+ prefixlen = XATTR_TRUSTED_PREFIX_LEN;
7052 -+ else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
7053 -+ prefixlen = XATTR_USER_PREFIX_LEN;
7054 -+ else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
7055 -+ prefixlen = XATTR_BTRFS_PREFIX_LEN;
7056 -+ else
7057 -+ return -EOPNOTSUPP;
7058 -+
7059 -+ /*
7060 -+ * The name cannot consist of just prefix
7061 -+ */
7062 -+ if (len <= prefixlen)
7063 -+ return -EINVAL;
7064 -+
7065 -+ return 0;
7066 - }
7067 -
7068 - ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
7069 - void *buffer, size_t size)
7070 - {
7071 -+ int ret;
7072 -+
7073 - /*
7074 - * If this is a request for a synthetic attribute in the system.*
7075 - * namespace use the generic infrastructure to resolve a handler
7076 -@@ -388,8 +408,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
7077 - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
7078 - return generic_getxattr(dentry, name, buffer, size);
7079 -
7080 -- if (!btrfs_is_valid_xattr(name))
7081 -- return -EOPNOTSUPP;
7082 -+ ret = btrfs_is_valid_xattr(name);
7083 -+ if (ret)
7084 -+ return ret;
7085 - return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
7086 - }
7087 -
7088 -@@ -397,6 +418,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
7089 - size_t size, int flags)
7090 - {
7091 - struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
7092 -+ int ret;
7093 -
7094 - /*
7095 - * The permission on security.* and system.* is not checked
7096 -@@ -413,8 +435,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
7097 - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
7098 - return generic_setxattr(dentry, name, value, size, flags);
7099 -
7100 -- if (!btrfs_is_valid_xattr(name))
7101 -- return -EOPNOTSUPP;
7102 -+ ret = btrfs_is_valid_xattr(name);
7103 -+ if (ret)
7104 -+ return ret;
7105 -
7106 - if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
7107 - return btrfs_set_prop(dentry->d_inode, name,
7108 -@@ -430,6 +453,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
7109 - int btrfs_removexattr(struct dentry *dentry, const char *name)
7110 - {
7111 - struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
7112 -+ int ret;
7113 -
7114 - /*
7115 - * The permission on security.* and system.* is not checked
7116 -@@ -446,8 +470,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
7117 - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
7118 - return generic_removexattr(dentry, name);
7119 -
7120 -- if (!btrfs_is_valid_xattr(name))
7121 -- return -EOPNOTSUPP;
7122 -+ ret = btrfs_is_valid_xattr(name);
7123 -+ if (ret)
7124 -+ return ret;
7125 -
7126 - if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
7127 - return btrfs_set_prop(dentry->d_inode, name,
7128 -diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
7129 -index 28fe71a..aae7011 100644
7130 ---- a/fs/ext4/namei.c
7131 -+++ b/fs/ext4/namei.c
7132 -@@ -1865,7 +1865,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
7133 - struct inode *inode)
7134 - {
7135 - struct inode *dir = dentry->d_parent->d_inode;
7136 -- struct buffer_head *bh;
7137 -+ struct buffer_head *bh = NULL;
7138 - struct ext4_dir_entry_2 *de;
7139 - struct ext4_dir_entry_tail *t;
7140 - struct super_block *sb;
7141 -@@ -1889,14 +1889,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
7142 - return retval;
7143 - if (retval == 1) {
7144 - retval = 0;
7145 -- return retval;
7146 -+ goto out;
7147 - }
7148 - }
7149 -
7150 - if (is_dx(dir)) {
7151 - retval = ext4_dx_add_entry(handle, dentry, inode);
7152 - if (!retval || (retval != ERR_BAD_DX_DIR))
7153 -- return retval;
7154 -+ goto out;
7155 - ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
7156 - dx_fallback++;
7157 - ext4_mark_inode_dirty(handle, dir);
7158 -@@ -1908,14 +1908,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
7159 - return PTR_ERR(bh);
7160 -
7161 - retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
7162 -- if (retval != -ENOSPC) {
7163 -- brelse(bh);
7164 -- return retval;
7165 -- }
7166 -+ if (retval != -ENOSPC)
7167 -+ goto out;
7168 -
7169 - if (blocks == 1 && !dx_fallback &&
7170 -- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
7171 -- return make_indexed_dir(handle, dentry, inode, bh);
7172 -+ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
7173 -+ retval = make_indexed_dir(handle, dentry, inode, bh);
7174 -+ bh = NULL; /* make_indexed_dir releases bh */
7175 -+ goto out;
7176 -+ }
7177 - brelse(bh);
7178 - }
7179 - bh = ext4_append(handle, dir, &block);
7180 -@@ -1931,6 +1932,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
7181 - }
7182 -
7183 - retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
7184 -+out:
7185 - brelse(bh);
7186 - if (retval == 0)
7187 - ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
7188 -diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
7189 -index 665ef5a..a563ddb 100644
7190 ---- a/fs/lockd/svcsubs.c
7191 -+++ b/fs/lockd/svcsubs.c
7192 -@@ -31,7 +31,7 @@
7193 - static struct hlist_head nlm_files[FILE_NRHASH];
7194 - static DEFINE_MUTEX(nlm_file_mutex);
7195 -
7196 --#ifdef NFSD_DEBUG
7197 -+#ifdef CONFIG_SUNRPC_DEBUG
7198 - static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
7199 - {
7200 - u32 *fhp = (u32*)f->data;
7201 -diff --git a/fs/namei.c b/fs/namei.c
7202 -index c83145a..caa38a2 100644
7203 ---- a/fs/namei.c
7204 -+++ b/fs/namei.c
7205 -@@ -1591,7 +1591,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
7206 -
7207 - if (should_follow_link(path->dentry, follow)) {
7208 - if (nd->flags & LOOKUP_RCU) {
7209 -- if (unlikely(unlazy_walk(nd, path->dentry))) {
7210 -+ if (unlikely(nd->path.mnt != path->mnt ||
7211 -+ unlazy_walk(nd, path->dentry))) {
7212 - err = -ECHILD;
7213 - goto out_err;
7214 - }
7215 -@@ -3047,7 +3048,8 @@ finish_lookup:
7216 -
7217 - if (should_follow_link(path->dentry, !symlink_ok)) {
7218 - if (nd->flags & LOOKUP_RCU) {
7219 -- if (unlikely(unlazy_walk(nd, path->dentry))) {
7220 -+ if (unlikely(nd->path.mnt != path->mnt ||
7221 -+ unlazy_walk(nd, path->dentry))) {
7222 - error = -ECHILD;
7223 - goto out;
7224 - }
7225 -diff --git a/fs/namespace.c b/fs/namespace.c
7226 -index 82ef140..4622ee3 100644
7227 ---- a/fs/namespace.c
7228 -+++ b/fs/namespace.c
7229 -@@ -632,14 +632,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
7230 - */
7231 - struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
7232 - {
7233 -- struct mount *p, *res;
7234 -- res = p = __lookup_mnt(mnt, dentry);
7235 -+ struct mount *p, *res = NULL;
7236 -+ p = __lookup_mnt(mnt, dentry);
7237 - if (!p)
7238 - goto out;
7239 -+ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
7240 -+ res = p;
7241 - hlist_for_each_entry_continue(p, mnt_hash) {
7242 - if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
7243 - break;
7244 -- res = p;
7245 -+ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
7246 -+ res = p;
7247 - }
7248 - out:
7249 - return res;
7250 -@@ -795,10 +798,8 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
7251 - /*
7252 - * vfsmount lock must be held for write
7253 - */
7254 --static void detach_mnt(struct mount *mnt, struct path *old_path)
7255 -+static void unhash_mnt(struct mount *mnt)
7256 - {
7257 -- old_path->dentry = mnt->mnt_mountpoint;
7258 -- old_path->mnt = &mnt->mnt_parent->mnt;
7259 - mnt->mnt_parent = mnt;
7260 - mnt->mnt_mountpoint = mnt->mnt.mnt_root;
7261 - list_del_init(&mnt->mnt_child);
7262 -@@ -811,6 +812,26 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
7263 - /*
7264 - * vfsmount lock must be held for write
7265 - */
7266 -+static void detach_mnt(struct mount *mnt, struct path *old_path)
7267 -+{
7268 -+ old_path->dentry = mnt->mnt_mountpoint;
7269 -+ old_path->mnt = &mnt->mnt_parent->mnt;
7270 -+ unhash_mnt(mnt);
7271 -+}
7272 -+
7273 -+/*
7274 -+ * vfsmount lock must be held for write
7275 -+ */
7276 -+static void umount_mnt(struct mount *mnt)
7277 -+{
7278 -+ /* old mountpoint will be dropped when we can do that */
7279 -+ mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
7280 -+ unhash_mnt(mnt);
7281 -+}
7282 -+
7283 -+/*
7284 -+ * vfsmount lock must be held for write
7285 -+ */
7286 - void mnt_set_mountpoint(struct mount *mnt,
7287 - struct mountpoint *mp,
7288 - struct mount *child_mnt)
7289 -@@ -1078,6 +1099,13 @@ static void mntput_no_expire(struct mount *mnt)
7290 - rcu_read_unlock();
7291 -
7292 - list_del(&mnt->mnt_instance);
7293 -+
7294 -+ if (unlikely(!list_empty(&mnt->mnt_mounts))) {
7295 -+ struct mount *p, *tmp;
7296 -+ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
7297 -+ umount_mnt(p);
7298 -+ }
7299 -+ }
7300 - unlock_mount_hash();
7301 -
7302 - if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
7303 -@@ -1319,49 +1347,63 @@ static inline void namespace_lock(void)
7304 - down_write(&namespace_sem);
7305 - }
7306 -
7307 -+enum umount_tree_flags {
7308 -+ UMOUNT_SYNC = 1,
7309 -+ UMOUNT_PROPAGATE = 2,
7310 -+ UMOUNT_CONNECTED = 4,
7311 -+};
7312 - /*
7313 - * mount_lock must be held
7314 - * namespace_sem must be held for write
7315 -- * how = 0 => just this tree, don't propagate
7316 -- * how = 1 => propagate; we know that nobody else has reference to any victims
7317 -- * how = 2 => lazy umount
7318 - */
7319 --void umount_tree(struct mount *mnt, int how)
7320 -+static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
7321 - {
7322 -- HLIST_HEAD(tmp_list);
7323 -+ LIST_HEAD(tmp_list);
7324 - struct mount *p;
7325 -
7326 -+ if (how & UMOUNT_PROPAGATE)
7327 -+ propagate_mount_unlock(mnt);
7328 -+
7329 -+ /* Gather the mounts to umount */
7330 - for (p = mnt; p; p = next_mnt(p, mnt)) {
7331 -- hlist_del_init_rcu(&p->mnt_hash);
7332 -- hlist_add_head(&p->mnt_hash, &tmp_list);
7333 -+ p->mnt.mnt_flags |= MNT_UMOUNT;
7334 -+ list_move(&p->mnt_list, &tmp_list);
7335 - }
7336 -
7337 -- hlist_for_each_entry(p, &tmp_list, mnt_hash)
7338 -+ /* Hide the mounts from mnt_mounts */
7339 -+ list_for_each_entry(p, &tmp_list, mnt_list) {
7340 - list_del_init(&p->mnt_child);
7341 -+ }
7342 -
7343 -- if (how)
7344 -+ /* Add propogated mounts to the tmp_list */
7345 -+ if (how & UMOUNT_PROPAGATE)
7346 - propagate_umount(&tmp_list);
7347 -
7348 -- while (!hlist_empty(&tmp_list)) {
7349 -- p = hlist_entry(tmp_list.first, struct mount, mnt_hash);
7350 -- hlist_del_init_rcu(&p->mnt_hash);
7351 -+ while (!list_empty(&tmp_list)) {
7352 -+ bool disconnect;
7353 -+ p = list_first_entry(&tmp_list, struct mount, mnt_list);
7354 - list_del_init(&p->mnt_expire);
7355 - list_del_init(&p->mnt_list);
7356 - __touch_mnt_namespace(p->mnt_ns);
7357 - p->mnt_ns = NULL;
7358 -- if (how < 2)
7359 -+ if (how & UMOUNT_SYNC)
7360 - p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
7361 -
7362 -- pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted);
7363 -+ disconnect = !(((how & UMOUNT_CONNECTED) &&
7364 -+ mnt_has_parent(p) &&
7365 -+ (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
7366 -+ IS_MNT_LOCKED_AND_LAZY(p));
7367 -+
7368 -+ pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
7369 -+ disconnect ? &unmounted : NULL);
7370 - if (mnt_has_parent(p)) {
7371 -- hlist_del_init(&p->mnt_mp_list);
7372 -- put_mountpoint(p->mnt_mp);
7373 - mnt_add_count(p->mnt_parent, -1);
7374 -- /* old mountpoint will be dropped when we can do that */
7375 -- p->mnt_ex_mountpoint = p->mnt_mountpoint;
7376 -- p->mnt_mountpoint = p->mnt.mnt_root;
7377 -- p->mnt_parent = p;
7378 -- p->mnt_mp = NULL;
7379 -+ if (!disconnect) {
7380 -+ /* Don't forget about p */
7381 -+ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
7382 -+ } else {
7383 -+ umount_mnt(p);
7384 -+ }
7385 - }
7386 - change_mnt_propagation(p, MS_PRIVATE);
7387 - }
7388 -@@ -1447,14 +1489,14 @@ static int do_umount(struct mount *mnt, int flags)
7389 -
7390 - if (flags & MNT_DETACH) {
7391 - if (!list_empty(&mnt->mnt_list))
7392 -- umount_tree(mnt, 2);
7393 -+ umount_tree(mnt, UMOUNT_PROPAGATE);
7394 - retval = 0;
7395 - } else {
7396 - shrink_submounts(mnt);
7397 - retval = -EBUSY;
7398 - if (!propagate_mount_busy(mnt, 2)) {
7399 - if (!list_empty(&mnt->mnt_list))
7400 -- umount_tree(mnt, 1);
7401 -+ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
7402 - retval = 0;
7403 - }
7404 - }
7405 -@@ -1480,13 +1522,20 @@ void __detach_mounts(struct dentry *dentry)
7406 -
7407 - namespace_lock();
7408 - mp = lookup_mountpoint(dentry);
7409 -- if (!mp)
7410 -+ if (IS_ERR_OR_NULL(mp))
7411 - goto out_unlock;
7412 -
7413 - lock_mount_hash();
7414 - while (!hlist_empty(&mp->m_list)) {
7415 - mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
7416 -- umount_tree(mnt, 2);
7417 -+ if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
7418 -+ struct mount *p, *tmp;
7419 -+ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
7420 -+ hlist_add_head(&p->mnt_umount.s_list, &unmounted);
7421 -+ umount_mnt(p);
7422 -+ }
7423 -+ }
7424 -+ else umount_tree(mnt, UMOUNT_CONNECTED);
7425 - }
7426 - unlock_mount_hash();
7427 - put_mountpoint(mp);
7428 -@@ -1648,7 +1697,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
7429 - out:
7430 - if (res) {
7431 - lock_mount_hash();
7432 -- umount_tree(res, 0);
7433 -+ umount_tree(res, UMOUNT_SYNC);
7434 - unlock_mount_hash();
7435 - }
7436 - return q;
7437 -@@ -1672,7 +1721,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
7438 - {
7439 - namespace_lock();
7440 - lock_mount_hash();
7441 -- umount_tree(real_mount(mnt), 0);
7442 -+ umount_tree(real_mount(mnt), UMOUNT_SYNC);
7443 - unlock_mount_hash();
7444 - namespace_unlock();
7445 - }
7446 -@@ -1855,7 +1904,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
7447 - out_cleanup_ids:
7448 - while (!hlist_empty(&tree_list)) {
7449 - child = hlist_entry(tree_list.first, struct mount, mnt_hash);
7450 -- umount_tree(child, 0);
7451 -+ umount_tree(child, UMOUNT_SYNC);
7452 - }
7453 - unlock_mount_hash();
7454 - cleanup_group_ids(source_mnt, NULL);
7455 -@@ -2035,7 +2084,7 @@ static int do_loopback(struct path *path, const char *old_name,
7456 - err = graft_tree(mnt, parent, mp);
7457 - if (err) {
7458 - lock_mount_hash();
7459 -- umount_tree(mnt, 0);
7460 -+ umount_tree(mnt, UMOUNT_SYNC);
7461 - unlock_mount_hash();
7462 - }
7463 - out2:
7464 -@@ -2406,7 +2455,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
7465 - while (!list_empty(&graveyard)) {
7466 - mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
7467 - touch_mnt_namespace(mnt->mnt_ns);
7468 -- umount_tree(mnt, 1);
7469 -+ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
7470 - }
7471 - unlock_mount_hash();
7472 - namespace_unlock();
7473 -@@ -2477,7 +2526,7 @@ static void shrink_submounts(struct mount *mnt)
7474 - m = list_first_entry(&graveyard, struct mount,
7475 - mnt_expire);
7476 - touch_mnt_namespace(m->mnt_ns);
7477 -- umount_tree(m, 1);
7478 -+ umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
7479 - }
7480 - }
7481 - }
7482 -diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
7483 -index 351be920..8d129bb 100644
7484 ---- a/fs/nfs/callback.c
7485 -+++ b/fs/nfs/callback.c
7486 -@@ -128,7 +128,7 @@ nfs41_callback_svc(void *vrqstp)
7487 - if (try_to_freeze())
7488 - continue;
7489 -
7490 -- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
7491 -+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
7492 - spin_lock_bh(&serv->sv_cb_lock);
7493 - if (!list_empty(&serv->sv_cb_list)) {
7494 - req = list_first_entry(&serv->sv_cb_list,
7495 -@@ -142,10 +142,10 @@ nfs41_callback_svc(void *vrqstp)
7496 - error);
7497 - } else {
7498 - spin_unlock_bh(&serv->sv_cb_lock);
7499 -- /* schedule_timeout to game the hung task watchdog */
7500 -- schedule_timeout(60 * HZ);
7501 -+ schedule();
7502 - finish_wait(&serv->sv_cb_waitq, &wq);
7503 - }
7504 -+ flush_signals(current);
7505 - }
7506 - return 0;
7507 - }
7508 -diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
7509 -index e907c8c..ab21ef1 100644
7510 ---- a/fs/nfs/direct.c
7511 -+++ b/fs/nfs/direct.c
7512 -@@ -129,22 +129,25 @@ nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
7513 - int i;
7514 - ssize_t count;
7515 -
7516 -- WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count);
7517 --
7518 -- count = dreq->mirrors[hdr->pgio_mirror_idx].count;
7519 -- if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
7520 -- count = hdr->io_start + hdr->good_bytes - dreq->io_start;
7521 -- dreq->mirrors[hdr->pgio_mirror_idx].count = count;
7522 -- }
7523 --
7524 -- /* update the dreq->count by finding the minimum agreed count from all
7525 -- * mirrors */
7526 -- count = dreq->mirrors[0].count;
7527 -+ if (dreq->mirror_count == 1) {
7528 -+ dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
7529 -+ dreq->count += hdr->good_bytes;
7530 -+ } else {
7531 -+ /* mirrored writes */
7532 -+ count = dreq->mirrors[hdr->pgio_mirror_idx].count;
7533 -+ if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
7534 -+ count = hdr->io_start + hdr->good_bytes - dreq->io_start;
7535 -+ dreq->mirrors[hdr->pgio_mirror_idx].count = count;
7536 -+ }
7537 -+ /* update the dreq->count by finding the minimum agreed count from all
7538 -+ * mirrors */
7539 -+ count = dreq->mirrors[0].count;
7540 -
7541 -- for (i = 1; i < dreq->mirror_count; i++)
7542 -- count = min(count, dreq->mirrors[i].count);
7543 -+ for (i = 1; i < dreq->mirror_count; i++)
7544 -+ count = min(count, dreq->mirrors[i].count);
7545 -
7546 -- dreq->count = count;
7547 -+ dreq->count = count;
7548 -+ }
7549 - }
7550 -
7551 - /*
7552 -diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
7553 -index 5c399ec..d494ea2 100644
7554 ---- a/fs/nfs/nfs4xdr.c
7555 -+++ b/fs/nfs/nfs4xdr.c
7556 -@@ -7365,6 +7365,11 @@ nfs4_stat_to_errno(int stat)
7557 - .p_name = #proc, \
7558 - }
7559 -
7560 -+#define STUB(proc) \
7561 -+[NFSPROC4_CLNT_##proc] = { \
7562 -+ .p_name = #proc, \
7563 -+}
7564 -+
7565 - struct rpc_procinfo nfs4_procedures[] = {
7566 - PROC(READ, enc_read, dec_read),
7567 - PROC(WRITE, enc_write, dec_write),
7568 -@@ -7417,6 +7422,7 @@ struct rpc_procinfo nfs4_procedures[] = {
7569 - PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
7570 - PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
7571 - PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
7572 -+ STUB(GETDEVICELIST),
7573 - PROC(BIND_CONN_TO_SESSION,
7574 - enc_bind_conn_to_session, dec_bind_conn_to_session),
7575 - PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
7576 -diff --git a/fs/nfs/read.c b/fs/nfs/read.c
7577 -index 568ecf0..848d8b1 100644
7578 ---- a/fs/nfs/read.c
7579 -+++ b/fs/nfs/read.c
7580 -@@ -284,7 +284,7 @@ int nfs_readpage(struct file *file, struct page *page)
7581 - dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
7582 - page, PAGE_CACHE_SIZE, page_file_index(page));
7583 - nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
7584 -- nfs_inc_stats(inode, NFSIOS_READPAGES);
7585 -+ nfs_add_stats(inode, NFSIOS_READPAGES, 1);
7586 -
7587 - /*
7588 - * Try to flush any pending writes to the file..
7589 -diff --git a/fs/nfs/write.c b/fs/nfs/write.c
7590 -index 849ed78..41b3f1096 100644
7591 ---- a/fs/nfs/write.c
7592 -+++ b/fs/nfs/write.c
7593 -@@ -580,7 +580,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
7594 - int ret;
7595 -
7596 - nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
7597 -- nfs_inc_stats(inode, NFSIOS_WRITEPAGES);
7598 -+ nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
7599 -
7600 - nfs_pageio_cond_complete(pgio, page_file_index(page));
7601 - ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
7602 -diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
7603 -index 92b9d97..5416968 100644
7604 ---- a/fs/nfsd/nfs4proc.c
7605 -+++ b/fs/nfsd/nfs4proc.c
7606 -@@ -1030,6 +1030,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7607 - dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
7608 - return status;
7609 - }
7610 -+ if (!file)
7611 -+ return nfserr_bad_stateid;
7612 -
7613 - status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
7614 - fallocate->falloc_offset,
7615 -@@ -1069,6 +1071,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7616 - dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
7617 - return status;
7618 - }
7619 -+ if (!file)
7620 -+ return nfserr_bad_stateid;
7621 -
7622 - switch (seek->seek_whence) {
7623 - case NFS4_CONTENT_DATA:
7624 -diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
7625 -index 8ba1d88..ee1cccd 100644
7626 ---- a/fs/nfsd/nfs4state.c
7627 -+++ b/fs/nfsd/nfs4state.c
7628 -@@ -1139,7 +1139,7 @@ hash_sessionid(struct nfs4_sessionid *sessionid)
7629 - return sid->sequence % SESSION_HASH_SIZE;
7630 - }
7631 -
7632 --#ifdef NFSD_DEBUG
7633 -+#ifdef CONFIG_SUNRPC_DEBUG
7634 - static inline void
7635 - dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
7636 - {
7637 -diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
7638 -index 5fb7e78..5b33ce1 100644
7639 ---- a/fs/nfsd/nfs4xdr.c
7640 -+++ b/fs/nfsd/nfs4xdr.c
7641 -@@ -3422,6 +3422,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
7642 - unsigned long maxcount;
7643 - struct xdr_stream *xdr = &resp->xdr;
7644 - struct file *file = read->rd_filp;
7645 -+ struct svc_fh *fhp = read->rd_fhp;
7646 - int starting_len = xdr->buf->len;
7647 - struct raparms *ra;
7648 - __be32 *p;
7649 -@@ -3445,12 +3446,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
7650 - maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len));
7651 - maxcount = min_t(unsigned long, maxcount, read->rd_length);
7652 -
7653 -- if (!read->rd_filp) {
7654 -+ if (read->rd_filp)
7655 -+ err = nfsd_permission(resp->rqstp, fhp->fh_export,
7656 -+ fhp->fh_dentry,
7657 -+ NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE);
7658 -+ else
7659 - err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp,
7660 - &file, &ra);
7661 -- if (err)
7662 -- goto err_truncate;
7663 -- }
7664 -+ if (err)
7665 -+ goto err_truncate;
7666 -
7667 - if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
7668 - err = nfsd4_encode_splice_read(resp, read, file, maxcount);
7669 -diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
7670 -index aa47d75..9690cb4 100644
7671 ---- a/fs/nfsd/nfsctl.c
7672 -+++ b/fs/nfsd/nfsctl.c
7673 -@@ -1250,15 +1250,15 @@ static int __init init_nfsd(void)
7674 - int retval;
7675 - printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@×××××××××.de).\n");
7676 -
7677 -- retval = register_cld_notifier();
7678 -- if (retval)
7679 -- return retval;
7680 - retval = register_pernet_subsys(&nfsd_net_ops);
7681 - if (retval < 0)
7682 -- goto out_unregister_notifier;
7683 -- retval = nfsd4_init_slabs();
7684 -+ return retval;
7685 -+ retval = register_cld_notifier();
7686 - if (retval)
7687 - goto out_unregister_pernet;
7688 -+ retval = nfsd4_init_slabs();
7689 -+ if (retval)
7690 -+ goto out_unregister_notifier;
7691 - retval = nfsd4_init_pnfs();
7692 - if (retval)
7693 - goto out_free_slabs;
7694 -@@ -1290,10 +1290,10 @@ out_exit_pnfs:
7695 - nfsd4_exit_pnfs();
7696 - out_free_slabs:
7697 - nfsd4_free_slabs();
7698 --out_unregister_pernet:
7699 -- unregister_pernet_subsys(&nfsd_net_ops);
7700 - out_unregister_notifier:
7701 - unregister_cld_notifier();
7702 -+out_unregister_pernet:
7703 -+ unregister_pernet_subsys(&nfsd_net_ops);
7704 - return retval;
7705 - }
7706 -
7707 -@@ -1308,8 +1308,8 @@ static void __exit exit_nfsd(void)
7708 - nfsd4_exit_pnfs();
7709 - nfsd_fault_inject_cleanup();
7710 - unregister_filesystem(&nfsd_fs_type);
7711 -- unregister_pernet_subsys(&nfsd_net_ops);
7712 - unregister_cld_notifier();
7713 -+ unregister_pernet_subsys(&nfsd_net_ops);
7714 - }
7715 -
7716 - MODULE_AUTHOR("Olaf Kirch <okir@×××××××××.de>");
7717 -diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
7718 -index 565c4da..cf98052 100644
7719 ---- a/fs/nfsd/nfsd.h
7720 -+++ b/fs/nfsd/nfsd.h
7721 -@@ -24,7 +24,7 @@
7722 - #include "export.h"
7723 -
7724 - #undef ifdebug
7725 --#ifdef NFSD_DEBUG
7726 -+#ifdef CONFIG_SUNRPC_DEBUG
7727 - # define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag)
7728 - #else
7729 - # define ifdebug(flag) if (0)
7730 -diff --git a/fs/open.c b/fs/open.c
7731 -index 33f9cbf..44a3be1 100644
7732 ---- a/fs/open.c
7733 -+++ b/fs/open.c
7734 -@@ -570,6 +570,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
7735 - uid = make_kuid(current_user_ns(), user);
7736 - gid = make_kgid(current_user_ns(), group);
7737 -
7738 -+retry_deleg:
7739 - newattrs.ia_valid = ATTR_CTIME;
7740 - if (user != (uid_t) -1) {
7741 - if (!uid_valid(uid))
7742 -@@ -586,7 +587,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
7743 - if (!S_ISDIR(inode->i_mode))
7744 - newattrs.ia_valid |=
7745 - ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
7746 --retry_deleg:
7747 - mutex_lock(&inode->i_mutex);
7748 - error = security_path_chown(path, uid, gid);
7749 - if (!error)
7750 -diff --git a/fs/pnode.c b/fs/pnode.c
7751 -index 260ac8f..6367e1e 100644
7752 ---- a/fs/pnode.c
7753 -+++ b/fs/pnode.c
7754 -@@ -362,6 +362,46 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
7755 - }
7756 -
7757 - /*
7758 -+ * Clear MNT_LOCKED when it can be shown to be safe.
7759 -+ *
7760 -+ * mount_lock lock must be held for write
7761 -+ */
7762 -+void propagate_mount_unlock(struct mount *mnt)
7763 -+{
7764 -+ struct mount *parent = mnt->mnt_parent;
7765 -+ struct mount *m, *child;
7766 -+
7767 -+ BUG_ON(parent == mnt);
7768 -+
7769 -+ for (m = propagation_next(parent, parent); m;
7770 -+ m = propagation_next(m, parent)) {
7771 -+ child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
7772 -+ if (child)
7773 -+ child->mnt.mnt_flags &= ~MNT_LOCKED;
7774 -+ }
7775 -+}
7776 -+
7777 -+/*
7778 -+ * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
7779 -+ */
7780 -+static void mark_umount_candidates(struct mount *mnt)
7781 -+{
7782 -+ struct mount *parent = mnt->mnt_parent;
7783 -+ struct mount *m;
7784 -+
7785 -+ BUG_ON(parent == mnt);
7786 -+
7787 -+ for (m = propagation_next(parent, parent); m;
7788 -+ m = propagation_next(m, parent)) {
7789 -+ struct mount *child = __lookup_mnt_last(&m->mnt,
7790 -+ mnt->mnt_mountpoint);
7791 -+ if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
7792 -+ SET_MNT_MARK(child);
7793 -+ }
7794 -+ }
7795 -+}
7796 -+
7797 -+/*
7798 - * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
7799 - * parent propagates to.
7800 - */
7801 -@@ -378,13 +418,16 @@ static void __propagate_umount(struct mount *mnt)
7802 - struct mount *child = __lookup_mnt_last(&m->mnt,
7803 - mnt->mnt_mountpoint);
7804 - /*
7805 -- * umount the child only if the child has no
7806 -- * other children
7807 -+ * umount the child only if the child has no children
7808 -+ * and the child is marked safe to unmount.
7809 - */
7810 -- if (child && list_empty(&child->mnt_mounts)) {
7811 -+ if (!child || !IS_MNT_MARKED(child))
7812 -+ continue;
7813 -+ CLEAR_MNT_MARK(child);
7814 -+ if (list_empty(&child->mnt_mounts)) {
7815 - list_del_init(&child->mnt_child);
7816 -- hlist_del_init_rcu(&child->mnt_hash);
7817 -- hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
7818 -+ child->mnt.mnt_flags |= MNT_UMOUNT;
7819 -+ list_move_tail(&child->mnt_list, &mnt->mnt_list);
7820 - }
7821 - }
7822 - }
7823 -@@ -396,11 +439,14 @@ static void __propagate_umount(struct mount *mnt)
7824 - *
7825 - * vfsmount lock must be held for write
7826 - */
7827 --int propagate_umount(struct hlist_head *list)
7828 -+int propagate_umount(struct list_head *list)
7829 - {
7830 - struct mount *mnt;
7831 -
7832 -- hlist_for_each_entry(mnt, list, mnt_hash)
7833 -+ list_for_each_entry_reverse(mnt, list, mnt_list)
7834 -+ mark_umount_candidates(mnt);
7835 -+
7836 -+ list_for_each_entry(mnt, list, mnt_list)
7837 - __propagate_umount(mnt);
7838 - return 0;
7839 - }
7840 -diff --git a/fs/pnode.h b/fs/pnode.h
7841 -index 4a24635..7114ce6 100644
7842 ---- a/fs/pnode.h
7843 -+++ b/fs/pnode.h
7844 -@@ -19,6 +19,9 @@
7845 - #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
7846 - #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
7847 - #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
7848 -+#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
7849 -+#define IS_MNT_LOCKED_AND_LAZY(m) \
7850 -+ (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
7851 -
7852 - #define CL_EXPIRE 0x01
7853 - #define CL_SLAVE 0x02
7854 -@@ -40,14 +43,14 @@ static inline void set_mnt_shared(struct mount *mnt)
7855 - void change_mnt_propagation(struct mount *, int);
7856 - int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
7857 - struct hlist_head *);
7858 --int propagate_umount(struct hlist_head *);
7859 -+int propagate_umount(struct list_head *);
7860 - int propagate_mount_busy(struct mount *, int);
7861 -+void propagate_mount_unlock(struct mount *);
7862 - void mnt_release_group_id(struct mount *);
7863 - int get_dominating_id(struct mount *mnt, const struct path *root);
7864 - unsigned int mnt_get_count(struct mount *mnt);
7865 - void mnt_set_mountpoint(struct mount *, struct mountpoint *,
7866 - struct mount *);
7867 --void umount_tree(struct mount *, int);
7868 - struct mount *copy_tree(struct mount *, struct dentry *, int);
7869 - bool is_path_reachable(struct mount *, struct dentry *,
7870 - const struct path *root);
7871 -diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
7872 -index b034f10..0d58525 100644
7873 ---- a/include/acpi/actypes.h
7874 -+++ b/include/acpi/actypes.h
7875 -@@ -199,9 +199,29 @@ typedef int s32;
7876 - typedef s32 acpi_native_int;
7877 -
7878 - typedef u32 acpi_size;
7879 -+
7880 -+#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
7881 -+
7882 -+/*
7883 -+ * OSPMs can define this to shrink the size of the structures for 32-bit
7884 -+ * none PAE environment. ASL compiler may always define this to generate
7885 -+ * 32-bit OSPM compliant tables.
7886 -+ */
7887 - typedef u32 acpi_io_address;
7888 - typedef u32 acpi_physical_address;
7889 -
7890 -+#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
7891 -+
7892 -+/*
7893 -+ * It is reported that, after some calculations, the physical addresses can
7894 -+ * wrap over the 32-bit boundary on 32-bit PAE environment.
7895 -+ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
7896 -+ */
7897 -+typedef u64 acpi_io_address;
7898 -+typedef u64 acpi_physical_address;
7899 -+
7900 -+#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
7901 -+
7902 - #define ACPI_MAX_PTR ACPI_UINT32_MAX
7903 - #define ACPI_SIZE_MAX ACPI_UINT32_MAX
7904 -
7905 -@@ -736,10 +756,6 @@ typedef u32 acpi_event_status;
7906 - #define ACPI_GPE_ENABLE 0
7907 - #define ACPI_GPE_DISABLE 1
7908 - #define ACPI_GPE_CONDITIONAL_ENABLE 2
7909 --#define ACPI_GPE_SAVE_MASK 4
7910 --
7911 --#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK)
7912 --#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK)
7913 -
7914 - /*
7915 - * GPE info flags - Per GPE
7916 -diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
7917 -index ad74dc5..ecdf940 100644
7918 ---- a/include/acpi/platform/acenv.h
7919 -+++ b/include/acpi/platform/acenv.h
7920 -@@ -76,6 +76,7 @@
7921 - #define ACPI_LARGE_NAMESPACE_NODE
7922 - #define ACPI_DATA_TABLE_DISASSEMBLY
7923 - #define ACPI_SINGLE_THREADED
7924 -+#define ACPI_32BIT_PHYSICAL_ADDRESS
7925 - #endif
7926 -
7927 - /* acpi_exec configuration. Multithreaded with full AML debugger */
7928 -diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
7929 -index ae2eb17..a215609 100644
7930 ---- a/include/dt-bindings/clock/tegra124-car-common.h
7931 -+++ b/include/dt-bindings/clock/tegra124-car-common.h
7932 -@@ -297,7 +297,7 @@
7933 - #define TEGRA124_CLK_PLL_C4 270
7934 - #define TEGRA124_CLK_PLL_DP 271
7935 - #define TEGRA124_CLK_PLL_E_MUX 272
7936 --#define TEGRA124_CLK_PLLD_DSI 273
7937 -+#define TEGRA124_CLK_PLL_D_DSI_OUT 273
7938 - /* 274 */
7939 - /* 275 */
7940 - /* 276 */
7941 -diff --git a/include/linux/bpf.h b/include/linux/bpf.h
7942 -index bbfceb7..33b52fb 100644
7943 ---- a/include/linux/bpf.h
7944 -+++ b/include/linux/bpf.h
7945 -@@ -48,7 +48,7 @@ struct bpf_map *bpf_map_get(struct fd f);
7946 -
7947 - /* function argument constraints */
7948 - enum bpf_arg_type {
7949 -- ARG_ANYTHING = 0, /* any argument is ok */
7950 -+ ARG_DONTCARE = 0, /* unused argument in helper function */
7951 -
7952 - /* the following constraints used to prototype
7953 - * bpf_map_lookup/update/delete_elem() functions
7954 -@@ -62,6 +62,8 @@ enum bpf_arg_type {
7955 - */
7956 - ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
7957 - ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
7958 -+
7959 -+ ARG_ANYTHING, /* any (initialized) argument is ok */
7960 - };
7961 -
7962 - /* type of values returned from helper functions */
7963 -diff --git a/include/linux/mount.h b/include/linux/mount.h
7964 -index c2c561d..564beee 100644
7965 ---- a/include/linux/mount.h
7966 -+++ b/include/linux/mount.h
7967 -@@ -61,6 +61,7 @@ struct mnt_namespace;
7968 - #define MNT_DOOMED 0x1000000
7969 - #define MNT_SYNC_UMOUNT 0x2000000
7970 - #define MNT_MARKED 0x4000000
7971 -+#define MNT_UMOUNT 0x8000000
7972 -
7973 - struct vfsmount {
7974 - struct dentry *mnt_root; /* root of the mounted tree */
7975 -diff --git a/include/linux/sched.h b/include/linux/sched.h
7976 -index a419b65..51348f7 100644
7977 ---- a/include/linux/sched.h
7978 -+++ b/include/linux/sched.h
7979 -@@ -176,6 +176,14 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
7980 - extern void calc_global_load(unsigned long ticks);
7981 - extern void update_cpu_load_nohz(void);
7982 -
7983 -+/* Notifier for when a task gets migrated to a new CPU */
7984 -+struct task_migration_notifier {
7985 -+ struct task_struct *task;
7986 -+ int from_cpu;
7987 -+ int to_cpu;
7988 -+};
7989 -+extern void register_task_migration_notifier(struct notifier_block *n);
7990 -+
7991 - extern unsigned long get_parent_ip(unsigned long addr);
7992 -
7993 - extern void dump_cpu_task(int cpu);
7994 -diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
7995 -index f54d665..bdccc4b 100644
7996 ---- a/include/linux/skbuff.h
7997 -+++ b/include/linux/skbuff.h
7998 -@@ -769,6 +769,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
7999 -
8000 - struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
8001 - int node);
8002 -+struct sk_buff *__build_skb(void *data, unsigned int frag_size);
8003 - struct sk_buff *build_skb(void *data, unsigned int frag_size);
8004 - static inline struct sk_buff *alloc_skb(unsigned int size,
8005 - gfp_t priority)
8006 -@@ -3013,6 +3014,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
8007 - */
8008 - #define CHECKSUM_BREAK 76
8009 -
8010 -+/* Unset checksum-complete
8011 -+ *
8012 -+ * Unset checksum complete can be done when packet is being modified
8013 -+ * (uncompressed for instance) and checksum-complete value is
8014 -+ * invalidated.
8015 -+ */
8016 -+static inline void skb_checksum_complete_unset(struct sk_buff *skb)
8017 -+{
8018 -+ if (skb->ip_summed == CHECKSUM_COMPLETE)
8019 -+ skb->ip_summed = CHECKSUM_NONE;
8020 -+}
8021 -+
8022 - /* Validate (init) checksum based on checksum complete.
8023 - *
8024 - * Return values:
8025 -diff --git a/include/linux/usb.h b/include/linux/usb.h
8026 -index 7ee1b5c..447fe29 100644
8027 ---- a/include/linux/usb.h
8028 -+++ b/include/linux/usb.h
8029 -@@ -205,6 +205,32 @@ void usb_put_intf(struct usb_interface *intf);
8030 - #define USB_MAXINTERFACES 32
8031 - #define USB_MAXIADS (USB_MAXINTERFACES/2)
8032 -
8033 -+/*
8034 -+ * USB Resume Timer: Every Host controller driver should drive the resume
8035 -+ * signalling on the bus for the amount of time defined by this macro.
8036 -+ *
8037 -+ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
8038 -+ *
8039 -+ * Note that the USB Specification states we should drive resume for *at least*
8040 -+ * 20 ms, but it doesn't give an upper bound. This creates two possible
8041 -+ * situations which we want to avoid:
8042 -+ *
8043 -+ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
8044 -+ * us to fail USB Electrical Tests, thus failing Certification
8045 -+ *
8046 -+ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
8047 -+ * and while we can argue that's against the USB Specification, we don't have
8048 -+ * control over which devices a certification laboratory will be using for
8049 -+ * certification. If CertLab uses a device which was tested against Windows and
8050 -+ * that happens to have relaxed resume signalling rules, we might fall into
8051 -+ * situations where we fail interoperability and electrical tests.
8052 -+ *
8053 -+ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
8054 -+ * should cope with both LPJ calibration errors and devices not following every
8055 -+ * detail of the USB Specification.
8056 -+ */
8057 -+#define USB_RESUME_TIMEOUT 40 /* ms */
8058 -+
8059 - /**
8060 - * struct usb_interface_cache - long-term representation of a device interface
8061 - * @num_altsetting: number of altsettings defined.
8062 -diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
8063 -index d3583d3..dd0f3ab 100644
8064 ---- a/include/target/iscsi/iscsi_target_core.h
8065 -+++ b/include/target/iscsi/iscsi_target_core.h
8066 -@@ -602,6 +602,11 @@ struct iscsi_conn {
8067 - struct iscsi_session *sess;
8068 - /* Pointer to thread_set in use for this conn's threads */
8069 - struct iscsi_thread_set *thread_set;
8070 -+ int bitmap_id;
8071 -+ int rx_thread_active;
8072 -+ struct task_struct *rx_thread;
8073 -+ int tx_thread_active;
8074 -+ struct task_struct *tx_thread;
8075 - /* list_head for session connection list */
8076 - struct list_head conn_list;
8077 - } ____cacheline_aligned;
8078 -@@ -871,10 +876,12 @@ struct iscsit_global {
8079 - /* Unique identifier used for the authentication daemon */
8080 - u32 auth_id;
8081 - u32 inactive_ts;
8082 -+#define ISCSIT_BITMAP_BITS 262144
8083 - /* Thread Set bitmap count */
8084 - int ts_bitmap_count;
8085 - /* Thread Set bitmap pointer */
8086 - unsigned long *ts_bitmap;
8087 -+ spinlock_t ts_bitmap_lock;
8088 - /* Used for iSCSI discovery session authentication */
8089 - struct iscsi_node_acl discovery_acl;
8090 - struct iscsi_portal_group *discovery_tpg;
8091 -diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
8092 -index 672150b..985ca4c 100644
8093 ---- a/include/target/target_core_base.h
8094 -+++ b/include/target/target_core_base.h
8095 -@@ -524,7 +524,7 @@ struct se_cmd {
8096 - sense_reason_t (*execute_cmd)(struct se_cmd *);
8097 - sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
8098 - u32, enum dma_data_direction);
8099 -- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
8100 -+ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
8101 -
8102 - unsigned char *t_task_cdb;
8103 - unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
8104 -diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
8105 -index 0bf130a..28ec6c9 100644
8106 ---- a/include/uapi/linux/nfsd/debug.h
8107 -+++ b/include/uapi/linux/nfsd/debug.h
8108 -@@ -12,14 +12,6 @@
8109 - #include <linux/sunrpc/debug.h>
8110 -
8111 - /*
8112 -- * Enable debugging for nfsd.
8113 -- * Requires RPC_DEBUG.
8114 -- */
8115 --#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
8116 --# define NFSD_DEBUG 1
8117 --#endif
8118 --
8119 --/*
8120 - * knfsd debug flags
8121 - */
8122 - #define NFSDDBG_SOCK 0x0001
8123 -diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
8124 -index a20e4a3..847a0a2 100644
8125 ---- a/include/video/samsung_fimd.h
8126 -+++ b/include/video/samsung_fimd.h
8127 -@@ -436,6 +436,12 @@
8128 - #define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
8129 - #define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
8130 -
8131 -+/* Display port clock control */
8132 -+#define DP_MIE_CLKCON 0x27c
8133 -+#define DP_MIE_CLK_DISABLE 0x0
8134 -+#define DP_MIE_CLK_DP_ENABLE 0x2
8135 -+#define DP_MIE_CLK_MIE_ENABLE 0x3
8136 -+
8137 - /* Notes on per-window bpp settings
8138 - *
8139 - * Value Win0 Win1 Win2 Win3 Win 4
8140 -diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
8141 -index 36508e6..5d8ea3d 100644
8142 ---- a/kernel/bpf/verifier.c
8143 -+++ b/kernel/bpf/verifier.c
8144 -@@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
8145 - enum bpf_reg_type expected_type;
8146 - int err = 0;
8147 -
8148 -- if (arg_type == ARG_ANYTHING)
8149 -+ if (arg_type == ARG_DONTCARE)
8150 - return 0;
8151 -
8152 - if (reg->type == NOT_INIT) {
8153 -@@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
8154 - return -EACCES;
8155 - }
8156 -
8157 -+ if (arg_type == ARG_ANYTHING)
8158 -+ return 0;
8159 -+
8160 - if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
8161 - arg_type == ARG_PTR_TO_MAP_VALUE) {
8162 - expected_type = PTR_TO_STACK;
8163 -diff --git a/kernel/ptrace.c b/kernel/ptrace.c
8164 -index 227fec3..9a34bd8 100644
8165 ---- a/kernel/ptrace.c
8166 -+++ b/kernel/ptrace.c
8167 -@@ -697,6 +697,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
8168 - static int ptrace_resume(struct task_struct *child, long request,
8169 - unsigned long data)
8170 - {
8171 -+ bool need_siglock;
8172 -+
8173 - if (!valid_signal(data))
8174 - return -EIO;
8175 -
8176 -@@ -724,8 +726,26 @@ static int ptrace_resume(struct task_struct *child, long request,
8177 - user_disable_single_step(child);
8178 - }
8179 -
8180 -+ /*
8181 -+ * Change ->exit_code and ->state under siglock to avoid the race
8182 -+ * with wait_task_stopped() in between; a non-zero ->exit_code will
8183 -+ * wrongly look like another report from tracee.
8184 -+ *
8185 -+ * Note that we need siglock even if ->exit_code == data and/or this
8186 -+ * status was not reported yet, the new status must not be cleared by
8187 -+ * wait_task_stopped() after resume.
8188 -+ *
8189 -+ * If data == 0 we do not care if wait_task_stopped() reports the old
8190 -+ * status and clears the code too; this can't race with the tracee, it
8191 -+ * takes siglock after resume.
8192 -+ */
8193 -+ need_siglock = data && !thread_group_empty(current);
8194 -+ if (need_siglock)
8195 -+ spin_lock_irq(&child->sighand->siglock);
8196 - child->exit_code = data;
8197 - wake_up_state(child, __TASK_TRACED);
8198 -+ if (need_siglock)
8199 -+ spin_unlock_irq(&child->sighand->siglock);
8200 -
8201 - return 0;
8202 - }
8203 -diff --git a/kernel/sched/core.c b/kernel/sched/core.c
8204 -index 62671f5..3d5f6f6 100644
8205 ---- a/kernel/sched/core.c
8206 -+++ b/kernel/sched/core.c
8207 -@@ -996,6 +996,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
8208 - rq_clock_skip_update(rq, true);
8209 - }
8210 -
8211 -+static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
8212 -+
8213 -+void register_task_migration_notifier(struct notifier_block *n)
8214 -+{
8215 -+ atomic_notifier_chain_register(&task_migration_notifier, n);
8216 -+}
8217 -+
8218 - #ifdef CONFIG_SMP
8219 - void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
8220 - {
8221 -@@ -1026,10 +1033,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
8222 - trace_sched_migrate_task(p, new_cpu);
8223 -
8224 - if (task_cpu(p) != new_cpu) {
8225 -+ struct task_migration_notifier tmn;
8226 -+
8227 - if (p->sched_class->migrate_task_rq)
8228 - p->sched_class->migrate_task_rq(p, new_cpu);
8229 - p->se.nr_migrations++;
8230 - perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
8231 -+
8232 -+ tmn.task = p;
8233 -+ tmn.from_cpu = task_cpu(p);
8234 -+ tmn.to_cpu = new_cpu;
8235 -+
8236 -+ atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
8237 - }
8238 -
8239 - __set_task_cpu(p, new_cpu);
8240 -diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
8241 -index 3fa8fa6..f670cbb 100644
8242 ---- a/kernel/sched/deadline.c
8243 -+++ b/kernel/sched/deadline.c
8244 -@@ -514,7 +514,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
8245 - unsigned long flags;
8246 - struct rq *rq;
8247 -
8248 -- rq = task_rq_lock(current, &flags);
8249 -+ rq = task_rq_lock(p, &flags);
8250 -
8251 - /*
8252 - * We need to take care of several possible races here:
8253 -@@ -569,7 +569,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
8254 - push_dl_task(rq);
8255 - #endif
8256 - unlock:
8257 -- task_rq_unlock(rq, current, &flags);
8258 -+ task_rq_unlock(rq, p, &flags);
8259 -
8260 - return HRTIMER_NORESTART;
8261 - }
8262 -diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
8263 -index 5040d44..922048a 100644
8264 ---- a/kernel/trace/ring_buffer.c
8265 -+++ b/kernel/trace/ring_buffer.c
8266 -@@ -2679,7 +2679,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
8267 -
8268 - static __always_inline int trace_recursive_lock(void)
8269 - {
8270 -- unsigned int val = this_cpu_read(current_context);
8271 -+ unsigned int val = __this_cpu_read(current_context);
8272 - int bit;
8273 -
8274 - if (in_interrupt()) {
8275 -@@ -2696,18 +2696,17 @@ static __always_inline int trace_recursive_lock(void)
8276 - return 1;
8277 -
8278 - val |= (1 << bit);
8279 -- this_cpu_write(current_context, val);
8280 -+ __this_cpu_write(current_context, val);
8281 -
8282 - return 0;
8283 - }
8284 -
8285 - static __always_inline void trace_recursive_unlock(void)
8286 - {
8287 -- unsigned int val = this_cpu_read(current_context);
8288 -+ unsigned int val = __this_cpu_read(current_context);
8289 -
8290 -- val--;
8291 -- val &= this_cpu_read(current_context);
8292 -- this_cpu_write(current_context, val);
8293 -+ val &= val & (val - 1);
8294 -+ __this_cpu_write(current_context, val);
8295 - }
8296 -
8297 - #else
8298 -diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
8299 -index db54dda..a9c10a3 100644
8300 ---- a/kernel/trace/trace_events.c
8301 -+++ b/kernel/trace/trace_events.c
8302 -@@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
8303 - static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
8304 - {
8305 - char *event = NULL, *sub = NULL, *match;
8306 -+ int ret;
8307 -
8308 - /*
8309 - * The buf format can be <subsystem>:<event-name>
8310 -@@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
8311 - event = NULL;
8312 - }
8313 -
8314 -- return __ftrace_set_clr_event(tr, match, sub, event, set);
8315 -+ ret = __ftrace_set_clr_event(tr, match, sub, event, set);
8316 -+
8317 -+ /* Put back the colon to allow this to be called again */
8318 -+ if (buf)
8319 -+ *(buf - 1) = ':';
8320 -+
8321 -+ return ret;
8322 - }
8323 -
8324 - /**
8325 -diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
8326 -index 2d25ad1..b6fce36 100644
8327 ---- a/kernel/trace/trace_functions_graph.c
8328 -+++ b/kernel/trace/trace_functions_graph.c
8329 -@@ -1309,15 +1309,19 @@ void graph_trace_open(struct trace_iterator *iter)
8330 - {
8331 - /* pid and depth on the last trace processed */
8332 - struct fgraph_data *data;
8333 -+ gfp_t gfpflags;
8334 - int cpu;
8335 -
8336 - iter->private = NULL;
8337 -
8338 -- data = kzalloc(sizeof(*data), GFP_KERNEL);
8339 -+ /* We can be called in atomic context via ftrace_dump() */
8340 -+ gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
8341 -+
8342 -+ data = kzalloc(sizeof(*data), gfpflags);
8343 - if (!data)
8344 - goto out_err;
8345 -
8346 -- data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
8347 -+ data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
8348 - if (!data->cpu_data)
8349 - goto out_err_free;
8350 -
8351 -diff --git a/lib/string.c b/lib/string.c
8352 -index ce81aae..a579201 100644
8353 ---- a/lib/string.c
8354 -+++ b/lib/string.c
8355 -@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
8356 - void memzero_explicit(void *s, size_t count)
8357 - {
8358 - memset(s, 0, count);
8359 -- OPTIMIZER_HIDE_VAR(s);
8360 -+ barrier();
8361 - }
8362 - EXPORT_SYMBOL(memzero_explicit);
8363 -
8364 -diff --git a/mm/huge_memory.c b/mm/huge_memory.c
8365 -index 6817b03..956d4db 100644
8366 ---- a/mm/huge_memory.c
8367 -+++ b/mm/huge_memory.c
8368 -@@ -2316,8 +2316,14 @@ static struct page
8369 - struct vm_area_struct *vma, unsigned long address,
8370 - int node)
8371 - {
8372 -+ gfp_t flags;
8373 -+
8374 - VM_BUG_ON_PAGE(*hpage, *hpage);
8375 -
8376 -+ /* Only allocate from the target node */
8377 -+ flags = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
8378 -+ __GFP_THISNODE;
8379 -+
8380 - /*
8381 - * Before allocating the hugepage, release the mmap_sem read lock.
8382 - * The allocation can take potentially a long time if it involves
8383 -@@ -2326,8 +2332,7 @@ static struct page
8384 - */
8385 - up_read(&mm->mmap_sem);
8386 -
8387 -- *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
8388 -- khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
8389 -+ *hpage = alloc_pages_exact_node(node, flags, HPAGE_PMD_ORDER);
8390 - if (unlikely(!*hpage)) {
8391 - count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
8392 - *hpage = ERR_PTR(-ENOMEM);
8393 -diff --git a/mm/hugetlb.c b/mm/hugetlb.c
8394 -index c41b2a0..caad3c5 100644
8395 ---- a/mm/hugetlb.c
8396 -+++ b/mm/hugetlb.c
8397 -@@ -3735,8 +3735,7 @@ retry:
8398 - if (!pmd_huge(*pmd))
8399 - goto out;
8400 - if (pmd_present(*pmd)) {
8401 -- page = pte_page(*(pte_t *)pmd) +
8402 -- ((address & ~PMD_MASK) >> PAGE_SHIFT);
8403 -+ page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
8404 - if (flags & FOLL_GET)
8405 - get_page(page);
8406 - } else {
8407 -diff --git a/mm/mempolicy.c b/mm/mempolicy.c
8408 -index 4721046..de5dc5e 100644
8409 ---- a/mm/mempolicy.c
8410 -+++ b/mm/mempolicy.c
8411 -@@ -1985,7 +1985,8 @@ retry_cpuset:
8412 - nmask = policy_nodemask(gfp, pol);
8413 - if (!nmask || node_isset(node, *nmask)) {
8414 - mpol_cond_put(pol);
8415 -- page = alloc_pages_exact_node(node, gfp, order);
8416 -+ page = alloc_pages_exact_node(node,
8417 -+ gfp | __GFP_THISNODE, order);
8418 - goto out;
8419 - }
8420 - }
8421 -diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
8422 -index 0ee453f..f371cbf 100644
8423 ---- a/net/bridge/br_netfilter.c
8424 -+++ b/net/bridge/br_netfilter.c
8425 -@@ -651,6 +651,13 @@ static int br_nf_forward_finish(struct sk_buff *skb)
8426 - struct net_device *in;
8427 -
8428 - if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
8429 -+ int frag_max_size;
8430 -+
8431 -+ if (skb->protocol == htons(ETH_P_IP)) {
8432 -+ frag_max_size = IPCB(skb)->frag_max_size;
8433 -+ BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
8434 -+ }
8435 -+
8436 - in = nf_bridge->physindev;
8437 - if (nf_bridge->mask & BRNF_PKT_TYPE) {
8438 - skb->pkt_type = PACKET_OTHERHOST;
8439 -@@ -710,8 +717,14 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
8440 - nf_bridge->mask |= BRNF_PKT_TYPE;
8441 - }
8442 -
8443 -- if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
8444 -- return NF_DROP;
8445 -+ if (pf == NFPROTO_IPV4) {
8446 -+ int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
8447 -+
8448 -+ if (br_parse_ip_options(skb))
8449 -+ return NF_DROP;
8450 -+
8451 -+ IPCB(skb)->frag_max_size = frag_max;
8452 -+ }
8453 -
8454 - /* The physdev module checks on this */
8455 - nf_bridge->mask |= BRNF_BRIDGED;
8456 -diff --git a/net/core/dev.c b/net/core/dev.c
8457 -index 45109b7..22a53ac 100644
8458 ---- a/net/core/dev.c
8459 -+++ b/net/core/dev.c
8460 -@@ -3041,7 +3041,7 @@ static struct rps_dev_flow *
8461 - set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
8462 - struct rps_dev_flow *rflow, u16 next_cpu)
8463 - {
8464 -- if (next_cpu != RPS_NO_CPU) {
8465 -+ if (next_cpu < nr_cpu_ids) {
8466 - #ifdef CONFIG_RFS_ACCEL
8467 - struct netdev_rx_queue *rxqueue;
8468 - struct rps_dev_flow_table *flow_table;
8469 -@@ -3146,7 +3146,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
8470 - * If the desired CPU (where last recvmsg was done) is
8471 - * different from current CPU (one in the rx-queue flow
8472 - * table entry), switch if one of the following holds:
8473 -- * - Current CPU is unset (equal to RPS_NO_CPU).
8474 -+ * - Current CPU is unset (>= nr_cpu_ids).
8475 - * - Current CPU is offline.
8476 - * - The current CPU's queue tail has advanced beyond the
8477 - * last packet that was enqueued using this table entry.
8478 -@@ -3154,14 +3154,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
8479 - * have been dequeued, thus preserving in order delivery.
8480 - */
8481 - if (unlikely(tcpu != next_cpu) &&
8482 -- (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
8483 -+ (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
8484 - ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
8485 - rflow->last_qtail)) >= 0)) {
8486 - tcpu = next_cpu;
8487 - rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
8488 - }
8489 -
8490 -- if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
8491 -+ if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
8492 - *rflowp = rflow;
8493 - cpu = tcpu;
8494 - goto done;
8495 -@@ -3202,14 +3202,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
8496 - struct rps_dev_flow_table *flow_table;
8497 - struct rps_dev_flow *rflow;
8498 - bool expire = true;
8499 -- int cpu;
8500 -+ unsigned int cpu;
8501 -
8502 - rcu_read_lock();
8503 - flow_table = rcu_dereference(rxqueue->rps_flow_table);
8504 - if (flow_table && flow_id <= flow_table->mask) {
8505 - rflow = &flow_table->flows[flow_id];
8506 - cpu = ACCESS_ONCE(rflow->cpu);
8507 -- if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
8508 -+ if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
8509 - ((int)(per_cpu(softnet_data, cpu).input_queue_head -
8510 - rflow->last_qtail) <
8511 - (int)(10 * flow_table->mask)))
8512 -diff --git a/net/core/skbuff.c b/net/core/skbuff.c
8513 -index 98d45fe..e9f9a15 100644
8514 ---- a/net/core/skbuff.c
8515 -+++ b/net/core/skbuff.c
8516 -@@ -280,13 +280,14 @@ nodata:
8517 - EXPORT_SYMBOL(__alloc_skb);
8518 -
8519 - /**
8520 -- * build_skb - build a network buffer
8521 -+ * __build_skb - build a network buffer
8522 - * @data: data buffer provided by caller
8523 -- * @frag_size: size of fragment, or 0 if head was kmalloced
8524 -+ * @frag_size: size of data, or 0 if head was kmalloced
8525 - *
8526 - * Allocate a new &sk_buff. Caller provides space holding head and
8527 - * skb_shared_info. @data must have been allocated by kmalloc() only if
8528 -- * @frag_size is 0, otherwise data should come from the page allocator.
8529 -+ * @frag_size is 0, otherwise data should come from the page allocator
8530 -+ * or vmalloc()
8531 - * The return is the new skb buffer.
8532 - * On a failure the return is %NULL, and @data is not freed.
8533 - * Notes :
8534 -@@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb);
8535 - * before giving packet to stack.
8536 - * RX rings only contains data buffers, not full skbs.
8537 - */
8538 --struct sk_buff *build_skb(void *data, unsigned int frag_size)
8539 -+struct sk_buff *__build_skb(void *data, unsigned int frag_size)
8540 - {
8541 - struct skb_shared_info *shinfo;
8542 - struct sk_buff *skb;
8543 -@@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
8544 -
8545 - memset(skb, 0, offsetof(struct sk_buff, tail));
8546 - skb->truesize = SKB_TRUESIZE(size);
8547 -- skb->head_frag = frag_size != 0;
8548 - atomic_set(&skb->users, 1);
8549 - skb->head = data;
8550 - skb->data = data;
8551 -@@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
8552 -
8553 - return skb;
8554 - }
8555 -+
8556 -+/* build_skb() is wrapper over __build_skb(), that specifically
8557 -+ * takes care of skb->head and skb->pfmemalloc
8558 -+ * This means that if @frag_size is not zero, then @data must be backed
8559 -+ * by a page fragment, not kmalloc() or vmalloc()
8560 -+ */
8561 -+struct sk_buff *build_skb(void *data, unsigned int frag_size)
8562 -+{
8563 -+ struct sk_buff *skb = __build_skb(data, frag_size);
8564 -+
8565 -+ if (skb && frag_size) {
8566 -+ skb->head_frag = 1;
8567 -+ if (virt_to_head_page(data)->pfmemalloc)
8568 -+ skb->pfmemalloc = 1;
8569 -+ }
8570 -+ return skb;
8571 -+}
8572 - EXPORT_SYMBOL(build_skb);
8573 -
8574 - struct netdev_alloc_cache {
8575 -@@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
8576 - gfp_t gfp = gfp_mask;
8577 -
8578 - if (order) {
8579 -- gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
8580 -+ gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
8581 -+ __GFP_NOMEMALLOC;
8582 - page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
8583 - nc->frag.size = PAGE_SIZE << (page ? order : 0);
8584 - }
8585 -diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
8586 -index d9bc28a..53bd53f 100644
8587 ---- a/net/ipv4/ip_forward.c
8588 -+++ b/net/ipv4/ip_forward.c
8589 -@@ -82,6 +82,9 @@ int ip_forward(struct sk_buff *skb)
8590 - if (skb->pkt_type != PACKET_HOST)
8591 - goto drop;
8592 -
8593 -+ if (unlikely(skb->sk))
8594 -+ goto drop;
8595 -+
8596 - if (skb_warn_if_lro(skb))
8597 - goto drop;
8598 -
8599 -diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
8600 -index d520492..9d48dc4 100644
8601 ---- a/net/ipv4/tcp_output.c
8602 -+++ b/net/ipv4/tcp_output.c
8603 -@@ -2751,39 +2751,65 @@ begin_fwd:
8604 - }
8605 - }
8606 -
8607 --/* Send a fin. The caller locks the socket for us. This cannot be
8608 -- * allowed to fail queueing a FIN frame under any circumstances.
8609 -+/* We allow to exceed memory limits for FIN packets to expedite
8610 -+ * connection tear down and (memory) recovery.
8611 -+ * Otherwise tcp_send_fin() could be tempted to either delay FIN
8612 -+ * or even be forced to close flow without any FIN.
8613 -+ */
8614 -+static void sk_forced_wmem_schedule(struct sock *sk, int size)
8615 -+{
8616 -+ int amt, status;
8617 -+
8618 -+ if (size <= sk->sk_forward_alloc)
8619 -+ return;
8620 -+ amt = sk_mem_pages(size);
8621 -+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
8622 -+ sk_memory_allocated_add(sk, amt, &status);
8623 -+}
8624 -+
8625 -+/* Send a FIN. The caller locks the socket for us.
8626 -+ * We should try to send a FIN packet really hard, but eventually give up.
8627 - */
8628 - void tcp_send_fin(struct sock *sk)
8629 - {
8630 -+ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
8631 - struct tcp_sock *tp = tcp_sk(sk);
8632 -- struct sk_buff *skb = tcp_write_queue_tail(sk);
8633 -- int mss_now;
8634 -
8635 -- /* Optimization, tack on the FIN if we have a queue of
8636 -- * unsent frames. But be careful about outgoing SACKS
8637 -- * and IP options.
8638 -+ /* Optimization, tack on the FIN if we have one skb in write queue and
8639 -+ * this skb was not yet sent, or we are under memory pressure.
8640 -+ * Note: in the latter case, FIN packet will be sent after a timeout,
8641 -+ * as TCP stack thinks it has already been transmitted.
8642 - */
8643 -- mss_now = tcp_current_mss(sk);
8644 --
8645 -- if (tcp_send_head(sk) != NULL) {
8646 -- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
8647 -- TCP_SKB_CB(skb)->end_seq++;
8648 -+ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
8649 -+coalesce:
8650 -+ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
8651 -+ TCP_SKB_CB(tskb)->end_seq++;
8652 - tp->write_seq++;
8653 -+ if (!tcp_send_head(sk)) {
8654 -+ /* This means tskb was already sent.
8655 -+ * Pretend we included the FIN on previous transmit.
8656 -+ * We need to set tp->snd_nxt to the value it would have
8657 -+ * if FIN had been sent. This is because retransmit path
8658 -+ * does not change tp->snd_nxt.
8659 -+ */
8660 -+ tp->snd_nxt++;
8661 -+ return;
8662 -+ }
8663 - } else {
8664 -- /* Socket is locked, keep trying until memory is available. */
8665 -- for (;;) {
8666 -- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
8667 -- if (skb)
8668 -- break;
8669 -- yield();
8670 -+ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
8671 -+ if (unlikely(!skb)) {
8672 -+ if (tskb)
8673 -+ goto coalesce;
8674 -+ return;
8675 - }
8676 -+ skb_reserve(skb, MAX_TCP_HEADER);
8677 -+ sk_forced_wmem_schedule(sk, skb->truesize);
8678 - /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
8679 - tcp_init_nondata_skb(skb, tp->write_seq,
8680 - TCPHDR_ACK | TCPHDR_FIN);
8681 - tcp_queue_skb(sk, skb);
8682 - }
8683 -- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
8684 -+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
8685 - }
8686 -
8687 - /* We get here when a process closes a file descriptor (either due to
8688 -diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
8689 -index 142f66a..0ca013d 100644
8690 ---- a/net/mac80211/mlme.c
8691 -+++ b/net/mac80211/mlme.c
8692 -@@ -2260,7 +2260,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
8693 - else
8694 - ssid_len = ssid[1];
8695 -
8696 -- ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
8697 -+ ieee80211_send_probe_req(sdata, sdata->vif.addr, dst,
8698 - ssid + 2, ssid_len, NULL,
8699 - 0, (u32) -1, true, 0,
8700 - ifmgd->associated->channel, false);
8701 -diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
8702 -index 05919bf..d1d7a81 100644
8703 ---- a/net/netlink/af_netlink.c
8704 -+++ b/net/netlink/af_netlink.c
8705 -@@ -1616,13 +1616,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
8706 - if (data == NULL)
8707 - return NULL;
8708 -
8709 -- skb = build_skb(data, size);
8710 -+ skb = __build_skb(data, size);
8711 - if (skb == NULL)
8712 - vfree(data);
8713 -- else {
8714 -- skb->head_frag = 0;
8715 -+ else
8716 - skb->destructor = netlink_skb_destructor;
8717 -- }
8718 -
8719 - return skb;
8720 - }
8721 -diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
8722 -index 2ca9f2e..53745f4 100644
8723 ---- a/sound/pci/emu10k1/emuproc.c
8724 -+++ b/sound/pci/emu10k1/emuproc.c
8725 -@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
8726 - struct snd_emu10k1 *emu = entry->private_data;
8727 - u32 value;
8728 - u32 value2;
8729 -- unsigned long flags;
8730 - u32 rate;
8731 -
8732 - if (emu->card_capabilities->emu_model) {
8733 -- spin_lock_irqsave(&emu->emu_lock, flags);
8734 - snd_emu1010_fpga_read(emu, 0x38, &value);
8735 -- spin_unlock_irqrestore(&emu->emu_lock, flags);
8736 - if ((value & 0x1) == 0) {
8737 -- spin_lock_irqsave(&emu->emu_lock, flags);
8738 - snd_emu1010_fpga_read(emu, 0x2a, &value);
8739 - snd_emu1010_fpga_read(emu, 0x2b, &value2);
8740 -- spin_unlock_irqrestore(&emu->emu_lock, flags);
8741 - rate = 0x1770000 / (((value << 5) | value2)+1);
8742 - snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
8743 - } else {
8744 - snd_iprintf(buffer, "ADAT Unlocked\n");
8745 - }
8746 -- spin_lock_irqsave(&emu->emu_lock, flags);
8747 - snd_emu1010_fpga_read(emu, 0x20, &value);
8748 -- spin_unlock_irqrestore(&emu->emu_lock, flags);
8749 - if ((value & 0x4) == 0) {
8750 -- spin_lock_irqsave(&emu->emu_lock, flags);
8751 - snd_emu1010_fpga_read(emu, 0x28, &value);
8752 - snd_emu1010_fpga_read(emu, 0x29, &value2);
8753 -- spin_unlock_irqrestore(&emu->emu_lock, flags);
8754 - rate = 0x1770000 / (((value << 5) | value2)+1);
8755 - snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
8756 - } else {
8757 -@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
8758 - {
8759 - struct snd_emu10k1 *emu = entry->private_data;
8760 - u32 value;
8761 -- unsigned long flags;
8762 - int i;
8763 - snd_iprintf(buffer, "EMU1010 Registers:\n\n");
8764 -
8765 - for(i = 0; i < 0x40; i+=1) {
8766 -- spin_lock_irqsave(&emu->emu_lock, flags);
8767 - snd_emu1010_fpga_read(emu, i, &value);
8768 -- spin_unlock_irqrestore(&emu->emu_lock, flags);
8769 - snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
8770 - }
8771 - }
8772 -diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
8773 -index f9d12c0..2fd490b 100644
8774 ---- a/sound/pci/hda/patch_realtek.c
8775 -+++ b/sound/pci/hda/patch_realtek.c
8776 -@@ -5047,12 +5047,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8777 - SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
8778 - SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
8779 - SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
8780 -+ SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
8781 - SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
8782 - SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
8783 - SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
8784 - SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
8785 - SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
8786 - SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
8787 -+ SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
8788 - SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
8789 - SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
8790 - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
8791 -@@ -5142,6 +5144,16 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
8792 - {0x1b, 0x411111f0}, \
8793 - {0x1e, 0x411111f0}
8794 -
8795 -+#define ALC256_STANDARD_PINS \
8796 -+ {0x12, 0x90a60140}, \
8797 -+ {0x14, 0x90170110}, \
8798 -+ {0x19, 0x411111f0}, \
8799 -+ {0x1a, 0x411111f0}, \
8800 -+ {0x1b, 0x411111f0}, \
8801 -+ {0x1d, 0x40700001}, \
8802 -+ {0x1e, 0x411111f0}, \
8803 -+ {0x21, 0x02211020}
8804 -+
8805 - #define ALC282_STANDARD_PINS \
8806 - {0x14, 0x90170110}, \
8807 - {0x18, 0x411111f0}, \
8808 -@@ -5235,15 +5247,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
8809 - {0x1d, 0x40700001},
8810 - {0x21, 0x02211050}),
8811 - SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
8812 -- {0x12, 0x90a60140},
8813 -- {0x13, 0x40000000},
8814 -- {0x14, 0x90170110},
8815 -- {0x19, 0x411111f0},
8816 -- {0x1a, 0x411111f0},
8817 -- {0x1b, 0x411111f0},
8818 -- {0x1d, 0x40700001},
8819 -- {0x1e, 0x411111f0},
8820 -- {0x21, 0x02211020}),
8821 -+ ALC256_STANDARD_PINS,
8822 -+ {0x13, 0x40000000}),
8823 -+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
8824 -+ ALC256_STANDARD_PINS,
8825 -+ {0x13, 0x411111f0}),
8826 - SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
8827 - {0x12, 0x90a60130},
8828 - {0x13, 0x40000000},
8829 -@@ -5563,6 +5571,8 @@ static int patch_alc269(struct hda_codec *codec)
8830 - break;
8831 - case 0x10ec0256:
8832 - spec->codec_variant = ALC269_TYPE_ALC256;
8833 -+ spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
8834 -+ alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
8835 - break;
8836 - }
8837 -
8838 -@@ -5576,8 +5586,8 @@ static int patch_alc269(struct hda_codec *codec)
8839 - if (err < 0)
8840 - goto error;
8841 -
8842 -- if (!spec->gen.no_analog && spec->gen.beep_nid)
8843 -- set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
8844 -+ if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid)
8845 -+ set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT);
8846 -
8847 - codec->patch_ops = alc_patch_ops;
8848 - #ifdef CONFIG_PM
8849 -diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
8850 -index 7d3a6ac..e770ee6 100644
8851 ---- a/sound/soc/codecs/cs4271.c
8852 -+++ b/sound/soc/codecs/cs4271.c
8853 -@@ -561,10 +561,10 @@ static int cs4271_codec_probe(struct snd_soc_codec *codec)
8854 - if (gpio_is_valid(cs4271->gpio_nreset)) {
8855 - /* Reset codec */
8856 - gpio_direction_output(cs4271->gpio_nreset, 0);
8857 -- udelay(1);
8858 -+ mdelay(1);
8859 - gpio_set_value(cs4271->gpio_nreset, 1);
8860 - /* Give the codec time to wake up */
8861 -- udelay(1);
8862 -+ mdelay(1);
8863 - }
8864 -
8865 - ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
8866 -diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
8867 -index 474cae8..8c09e3f 100644
8868 ---- a/sound/soc/codecs/pcm512x.c
8869 -+++ b/sound/soc/codecs/pcm512x.c
8870 -@@ -304,9 +304,9 @@ static const struct soc_enum pcm512x_veds =
8871 - static const struct snd_kcontrol_new pcm512x_controls[] = {
8872 - SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
8873 - PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
8874 --SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
8875 -+SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
8876 - PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
8877 --SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
8878 -+SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
8879 - PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
8880 - SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
8881 - PCM512x_RQMR_SHIFT, 1, 1),
8882 -@@ -576,8 +576,8 @@ static int pcm512x_find_pll_coeff(struct snd_soc_dai *dai,
8883 -
8884 - /* pllin_rate / P (or here, den) cannot be greater than 20 MHz */
8885 - if (pllin_rate / den > 20000000 && num < 8) {
8886 -- num *= 20000000 / (pllin_rate / den);
8887 -- den *= 20000000 / (pllin_rate / den);
8888 -+ num *= DIV_ROUND_UP(pllin_rate / den, 20000000);
8889 -+ den *= DIV_ROUND_UP(pllin_rate / den, 20000000);
8890 - }
8891 - dev_dbg(dev, "num / den = %lu / %lu\n", num, den);
8892 -
8893 -diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
8894 -index 31bb480..9e71c76 100644
8895 ---- a/sound/soc/codecs/wm8741.c
8896 -+++ b/sound/soc/codecs/wm8741.c
8897 -@@ -123,7 +123,7 @@ static struct {
8898 - };
8899 -
8900 - static const unsigned int rates_11289[] = {
8901 -- 44100, 88235,
8902 -+ 44100, 88200,
8903 - };
8904 -
8905 - static const struct snd_pcm_hw_constraint_list constraints_11289 = {
8906 -@@ -150,7 +150,7 @@ static const struct snd_pcm_hw_constraint_list constraints_16384 = {
8907 - };
8908 -
8909 - static const unsigned int rates_16934[] = {
8910 -- 44100, 88235,
8911 -+ 44100, 88200,
8912 - };
8913 -
8914 - static const struct snd_pcm_hw_constraint_list constraints_16934 = {
8915 -@@ -168,7 +168,7 @@ static const struct snd_pcm_hw_constraint_list constraints_18432 = {
8916 - };
8917 -
8918 - static const unsigned int rates_22579[] = {
8919 -- 44100, 88235, 1764000
8920 -+ 44100, 88200, 176400
8921 - };
8922 -
8923 - static const struct snd_pcm_hw_constraint_list constraints_22579 = {
8924 -@@ -186,7 +186,7 @@ static const struct snd_pcm_hw_constraint_list constraints_24576 = {
8925 - };
8926 -
8927 - static const unsigned int rates_36864[] = {
8928 -- 48000, 96000, 19200
8929 -+ 48000, 96000, 192000
8930 - };
8931 -
8932 - static const struct snd_pcm_hw_constraint_list constraints_36864 = {
8933 -diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
8934 -index b6bb594..8c2b9be 100644
8935 ---- a/sound/soc/davinci/davinci-evm.c
8936 -+++ b/sound/soc/davinci/davinci-evm.c
8937 -@@ -425,18 +425,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
8938 - return ret;
8939 - }
8940 -
8941 --static int davinci_evm_remove(struct platform_device *pdev)
8942 --{
8943 -- struct snd_soc_card *card = platform_get_drvdata(pdev);
8944 --
8945 -- snd_soc_unregister_card(card);
8946 --
8947 -- return 0;
8948 --}
8949 --
8950 - static struct platform_driver davinci_evm_driver = {
8951 - .probe = davinci_evm_probe,
8952 -- .remove = davinci_evm_remove,
8953 - .driver = {
8954 - .name = "davinci_evm",
8955 - .pm = &snd_soc_pm_ops,
8956 -diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
8957 -index 9a28365..32631a8 100644
8958 ---- a/sound/usb/quirks.c
8959 -+++ b/sound/usb/quirks.c
8960 -@@ -1115,6 +1115,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
8961 - {
8962 - /* devices which do not support reading the sample rate. */
8963 - switch (chip->usb_id) {
8964 -+ case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
8965 - case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
8966 - case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
8967 - return true;
8968 -diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
8969 -index dcc6652..deb3569 100644
8970 ---- a/tools/lib/traceevent/kbuffer-parse.c
8971 -+++ b/tools/lib/traceevent/kbuffer-parse.c
8972 -@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
8973 - switch (type_len) {
8974 - case KBUFFER_TYPE_PADDING:
8975 - *length = read_4(kbuf, data);
8976 -- data += *length;
8977 - break;
8978 -
8979 - case KBUFFER_TYPE_TIME_EXTEND:
8980 -diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
8981 -index cc22408..0884d31 100644
8982 ---- a/tools/perf/config/Makefile
8983 -+++ b/tools/perf/config/Makefile
8984 -@@ -651,7 +651,7 @@ ifeq (${IS_64_BIT}, 1)
8985 - NO_PERF_READ_VDSO32 := 1
8986 - endif
8987 - endif
8988 -- ifneq (${IS_X86_64}, 1)
8989 -+ ifneq ($(ARCH), x86)
8990 - NO_PERF_READ_VDSOX32 := 1
8991 - endif
8992 - ifndef NO_PERF_READ_VDSOX32
8993 -@@ -699,7 +699,7 @@ sysconfdir = $(prefix)/etc
8994 - ETC_PERFCONFIG = etc/perfconfig
8995 - endif
8996 - ifndef lib
8997 --ifeq ($(IS_X86_64),1)
8998 -+ifeq ($(ARCH)$(IS_64_BIT), x861)
8999 - lib = lib64
9000 - else
9001 - lib = lib
9002 -diff --git a/tools/perf/tests/make b/tools/perf/tests/make
9003 -index 75709d2..bff8532 100644
9004 ---- a/tools/perf/tests/make
9005 -+++ b/tools/perf/tests/make
9006 -@@ -5,7 +5,7 @@ include config/Makefile.arch
9007 -
9008 - # FIXME looks like x86 is the only arch running tests ;-)
9009 - # we need some IS_(32/64) flag to make this generic
9010 --ifeq ($(IS_X86_64),1)
9011 -+ifeq ($(ARCH)$(IS_64_BIT), x861)
9012 - lib = lib64
9013 - else
9014 - lib = lib
9015 -diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
9016 -index 6da965b..85b5238 100644
9017 ---- a/tools/perf/util/cloexec.c
9018 -+++ b/tools/perf/util/cloexec.c
9019 -@@ -7,6 +7,12 @@
9020 -
9021 - static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
9022 -
9023 -+int __weak sched_getcpu(void)
9024 -+{
9025 -+ errno = ENOSYS;
9026 -+ return -1;
9027 -+}
9028 -+
9029 - static int perf_flag_probe(void)
9030 - {
9031 - /* use 'safest' configuration as used in perf_evsel__fallback() */
9032 -diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
9033 -index 94a5a7d..68888c2 100644
9034 ---- a/tools/perf/util/cloexec.h
9035 -+++ b/tools/perf/util/cloexec.h
9036 -@@ -3,4 +3,10 @@
9037 -
9038 - unsigned long perf_event_open_cloexec_flag(void);
9039 -
9040 -+#ifdef __GLIBC_PREREQ
9041 -+#if !__GLIBC_PREREQ(2, 6)
9042 -+extern int sched_getcpu(void) __THROW;
9043 -+#endif
9044 -+#endif
9045 -+
9046 - #endif /* __PERF_CLOEXEC_H */
9047 -diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
9048 -index 33b7a2a..9bdf007 100644
9049 ---- a/tools/perf/util/symbol-elf.c
9050 -+++ b/tools/perf/util/symbol-elf.c
9051 -@@ -74,6 +74,10 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
9052 - return GELF_ST_TYPE(sym->st_info);
9053 - }
9054 -
9055 -+#ifndef STT_GNU_IFUNC
9056 -+#define STT_GNU_IFUNC 10
9057 -+#endif
9058 -+
9059 - static inline int elf_sym__is_function(const GElf_Sym *sym)
9060 - {
9061 - return (elf_sym__type(sym) == STT_FUNC ||
9062 -diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
9063 -index d1b3a36..4039854 100644
9064 ---- a/tools/power/x86/turbostat/Makefile
9065 -+++ b/tools/power/x86/turbostat/Makefile
9066 -@@ -1,8 +1,12 @@
9067 - CC = $(CROSS_COMPILE)gcc
9068 --BUILD_OUTPUT := $(PWD)
9069 -+BUILD_OUTPUT := $(CURDIR)
9070 - PREFIX := /usr
9071 - DESTDIR :=
9072 -
9073 -+ifeq ("$(origin O)", "command line")
9074 -+ BUILD_OUTPUT := $(O)
9075 -+endif
9076 -+
9077 - turbostat : turbostat.c
9078 - CFLAGS += -Wall
9079 - CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
9080 -diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
9081 -index c9f60f5..e5abe7c 100644
9082 ---- a/virt/kvm/arm/vgic.c
9083 -+++ b/virt/kvm/arm/vgic.c
9084 -@@ -1371,6 +1371,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
9085 - goto out;
9086 - }
9087 -
9088 -+ if (irq_num >= kvm->arch.vgic.nr_irqs)
9089 -+ return -EINVAL;
9090 -+
9091 - vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
9092 - if (vcpu_id >= 0) {
9093 - /* kick the specified vcpu */
9094 -diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
9095 -index cc6a25d..f8f3f5f 100644
9096 ---- a/virt/kvm/kvm_main.c
9097 -+++ b/virt/kvm/kvm_main.c
9098 -@@ -1653,8 +1653,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
9099 - ghc->generation = slots->generation;
9100 - ghc->len = len;
9101 - ghc->memslot = gfn_to_memslot(kvm, start_gfn);
9102 -- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
9103 -- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
9104 -+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
9105 -+ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
9106 - ghc->hva += offset;
9107 - } else {
9108 - /*
9109
9110 diff --git a/1002_linux-4.0.3.patch b/1002_linux-4.0.3.patch
9111 deleted file mode 100644
9112 index d137bf2..0000000
9113 --- a/1002_linux-4.0.3.patch
9114 +++ /dev/null
9115 @@ -1,2827 +0,0 @@
9116 -diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
9117 -index bfcb1a62a7b4..4d68ec841304 100644
9118 ---- a/Documentation/kernel-parameters.txt
9119 -+++ b/Documentation/kernel-parameters.txt
9120 -@@ -3746,6 +3746,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
9121 - READ_CAPACITY_16 command);
9122 - f = NO_REPORT_OPCODES (don't use report opcodes
9123 - command, uas only);
9124 -+ g = MAX_SECTORS_240 (don't transfer more than
9125 -+ 240 sectors at a time, uas only);
9126 - h = CAPACITY_HEURISTICS (decrease the
9127 - reported device capacity by one
9128 - sector if the number is odd);
9129 -diff --git a/Makefile b/Makefile
9130 -index 0649a6011a76..dc9f43a019d6 100644
9131 ---- a/Makefile
9132 -+++ b/Makefile
9133 -@@ -1,6 +1,6 @@
9134 - VERSION = 4
9135 - PATCHLEVEL = 0
9136 --SUBLEVEL = 2
9137 -+SUBLEVEL = 3
9138 - EXTRAVERSION =
9139 - NAME = Hurr durr I'ma sheep
9140 -
9141 -diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
9142 -index ef7d112f5ce0..b0bd4e5fd5cf 100644
9143 ---- a/arch/arm64/mm/dma-mapping.c
9144 -+++ b/arch/arm64/mm/dma-mapping.c
9145 -@@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
9146 -
9147 - *ret_page = phys_to_page(phys);
9148 - ptr = (void *)val;
9149 -- if (flags & __GFP_ZERO)
9150 -- memset(ptr, 0, size);
9151 -+ memset(ptr, 0, size);
9152 - }
9153 -
9154 - return ptr;
9155 -@@ -105,7 +104,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
9156 - struct page *page;
9157 - void *addr;
9158 -
9159 -- size = PAGE_ALIGN(size);
9160 - page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
9161 - get_order(size));
9162 - if (!page)
9163 -@@ -113,8 +111,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
9164 -
9165 - *dma_handle = phys_to_dma(dev, page_to_phys(page));
9166 - addr = page_address(page);
9167 -- if (flags & __GFP_ZERO)
9168 -- memset(addr, 0, size);
9169 -+ memset(addr, 0, size);
9170 - return addr;
9171 - } else {
9172 - return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
9173 -@@ -195,6 +192,8 @@ static void __dma_free(struct device *dev, size_t size,
9174 - {
9175 - void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
9176 -
9177 -+ size = PAGE_ALIGN(size);
9178 -+
9179 - if (!is_device_dma_coherent(dev)) {
9180 - if (__free_from_pool(vaddr, size))
9181 - return;
9182 -diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
9183 -index c7a16904cd03..1a313c468d65 100644
9184 ---- a/arch/mips/Kconfig
9185 -+++ b/arch/mips/Kconfig
9186 -@@ -2072,7 +2072,7 @@ config MIPSR2_TO_R6_EMULATOR
9187 - help
9188 - Choose this option if you want to run non-R6 MIPS userland code.
9189 - Even if you say 'Y' here, the emulator will still be disabled by
9190 -- default. You can enable it using the 'mipsr2emul' kernel option.
9191 -+ default. You can enable it using the 'mipsr2emu' kernel option.
9192 - The only reason this is a build-time option is to save ~14K from the
9193 - final kernel image.
9194 - comment "MIPS R2-to-R6 emulator is only available for UP kernels"
9195 -@@ -2142,7 +2142,7 @@ config MIPS_CMP
9196 -
9197 - config MIPS_CPS
9198 - bool "MIPS Coherent Processing System support"
9199 -- depends on SYS_SUPPORTS_MIPS_CPS
9200 -+ depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
9201 - select MIPS_CM
9202 - select MIPS_CPC
9203 - select MIPS_CPS_PM if HOTPLUG_CPU
9204 -diff --git a/arch/mips/Makefile b/arch/mips/Makefile
9205 -index 8f57fc72d62c..1b4dab1e6ab8 100644
9206 ---- a/arch/mips/Makefile
9207 -+++ b/arch/mips/Makefile
9208 -@@ -197,11 +197,17 @@ endif
9209 - # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
9210 - # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
9211 - # been fixed properly.
9212 --mips-cflags := "$(cflags-y)"
9213 --cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
9214 --cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips)
9215 -+mips-cflags := $(cflags-y)
9216 -+ifeq ($(CONFIG_CPU_HAS_SMARTMIPS),y)
9217 -+smartmips-ase := $(call cc-option-yn,$(mips-cflags) -msmartmips)
9218 -+cflags-$(smartmips-ase) += -msmartmips -Wa,--no-warn
9219 -+endif
9220 -+ifeq ($(CONFIG_CPU_MICROMIPS),y)
9221 -+micromips-ase := $(call cc-option-yn,$(mips-cflags) -mmicromips)
9222 -+cflags-$(micromips-ase) += -mmicromips
9223 -+endif
9224 - ifeq ($(CONFIG_CPU_HAS_MSA),y)
9225 --toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
9226 -+toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(comma)-mmsa)
9227 - cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
9228 - endif
9229 -
9230 -diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
9231 -index b3ae068ca4fa..3fd369d74444 100644
9232 ---- a/arch/mips/bcm47xx/board.c
9233 -+++ b/arch/mips/bcm47xx/board.c
9234 -@@ -247,8 +247,8 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
9235 - }
9236 -
9237 - if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0 &&
9238 -- bcm47xx_nvram_getenv("boardtype", buf2, sizeof(buf2)) >= 0) {
9239 -- for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
9240 -+ bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0) {
9241 -+ for (e2 = bcm47xx_board_list_hw_version_num; e2->value1; e2++) {
9242 - if (!strstarts(buf1, e2->value1) &&
9243 - !strcmp(buf2, e2->value2))
9244 - return &e2->board;
9245 -diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
9246 -index e1f27d653f60..7019e2967009 100644
9247 ---- a/arch/mips/bcm63xx/prom.c
9248 -+++ b/arch/mips/bcm63xx/prom.c
9249 -@@ -17,7 +17,6 @@
9250 - #include <bcm63xx_cpu.h>
9251 - #include <bcm63xx_io.h>
9252 - #include <bcm63xx_regs.h>
9253 --#include <bcm63xx_gpio.h>
9254 -
9255 - void __init prom_init(void)
9256 - {
9257 -@@ -53,9 +52,6 @@ void __init prom_init(void)
9258 - reg &= ~mask;
9259 - bcm_perf_writel(reg, PERF_CKCTL_REG);
9260 -
9261 -- /* register gpiochip */
9262 -- bcm63xx_gpio_init();
9263 --
9264 - /* do low level board init */
9265 - board_prom_init();
9266 -
9267 -diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
9268 -index 6660c7ddf87b..240fb4ffa55c 100644
9269 ---- a/arch/mips/bcm63xx/setup.c
9270 -+++ b/arch/mips/bcm63xx/setup.c
9271 -@@ -20,6 +20,7 @@
9272 - #include <bcm63xx_cpu.h>
9273 - #include <bcm63xx_regs.h>
9274 - #include <bcm63xx_io.h>
9275 -+#include <bcm63xx_gpio.h>
9276 -
9277 - void bcm63xx_machine_halt(void)
9278 - {
9279 -@@ -160,6 +161,9 @@ void __init plat_mem_setup(void)
9280 -
9281 - int __init bcm63xx_register_devices(void)
9282 - {
9283 -+ /* register gpiochip */
9284 -+ bcm63xx_gpio_init();
9285 -+
9286 - return board_register_devices();
9287 - }
9288 -
9289 -diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
9290 -index 7d8987818ccf..d8960d46417b 100644
9291 ---- a/arch/mips/cavium-octeon/dma-octeon.c
9292 -+++ b/arch/mips/cavium-octeon/dma-octeon.c
9293 -@@ -306,7 +306,7 @@ void __init plat_swiotlb_setup(void)
9294 - swiotlbsize = 64 * (1<<20);
9295 - }
9296 - #endif
9297 --#ifdef CONFIG_USB_OCTEON_OHCI
9298 -+#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
9299 - /* OCTEON II ohci is only 32-bit. */
9300 - if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
9301 - swiotlbsize = 64 * (1<<20);
9302 -diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
9303 -index a42110e7edbc..a7f40820e567 100644
9304 ---- a/arch/mips/cavium-octeon/setup.c
9305 -+++ b/arch/mips/cavium-octeon/setup.c
9306 -@@ -413,7 +413,10 @@ static void octeon_restart(char *command)
9307 -
9308 - mb();
9309 - while (1)
9310 -- cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
9311 -+ if (OCTEON_IS_OCTEON3())
9312 -+ cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
9313 -+ else
9314 -+ cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
9315 - }
9316 -
9317 -
9318 -diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
9319 -index e08381a37f8b..723229f4cf27 100644
9320 ---- a/arch/mips/include/asm/cacheflush.h
9321 -+++ b/arch/mips/include/asm/cacheflush.h
9322 -@@ -29,6 +29,20 @@
9323 - * - flush_icache_all() flush the entire instruction cache
9324 - * - flush_data_cache_page() flushes a page from the data cache
9325 - */
9326 -+
9327 -+ /*
9328 -+ * This flag is used to indicate that the page pointed to by a pte
9329 -+ * is dirty and requires cleaning before returning it to the user.
9330 -+ */
9331 -+#define PG_dcache_dirty PG_arch_1
9332 -+
9333 -+#define Page_dcache_dirty(page) \
9334 -+ test_bit(PG_dcache_dirty, &(page)->flags)
9335 -+#define SetPageDcacheDirty(page) \
9336 -+ set_bit(PG_dcache_dirty, &(page)->flags)
9337 -+#define ClearPageDcacheDirty(page) \
9338 -+ clear_bit(PG_dcache_dirty, &(page)->flags)
9339 -+
9340 - extern void (*flush_cache_all)(void);
9341 - extern void (*__flush_cache_all)(void);
9342 - extern void (*flush_cache_mm)(struct mm_struct *mm);
9343 -@@ -37,13 +51,15 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma,
9344 - unsigned long start, unsigned long end);
9345 - extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
9346 - extern void __flush_dcache_page(struct page *page);
9347 -+extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
9348 -
9349 - #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
9350 - static inline void flush_dcache_page(struct page *page)
9351 - {
9352 -- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
9353 -+ if (cpu_has_dc_aliases)
9354 - __flush_dcache_page(page);
9355 --
9356 -+ else if (!cpu_has_ic_fills_f_dc)
9357 -+ SetPageDcacheDirty(page);
9358 - }
9359 -
9360 - #define flush_dcache_mmap_lock(mapping) do { } while (0)
9361 -@@ -61,6 +77,11 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
9362 - static inline void flush_icache_page(struct vm_area_struct *vma,
9363 - struct page *page)
9364 - {
9365 -+ if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
9366 -+ Page_dcache_dirty(page)) {
9367 -+ __flush_icache_page(vma, page);
9368 -+ ClearPageDcacheDirty(page);
9369 -+ }
9370 - }
9371 -
9372 - extern void (*flush_icache_range)(unsigned long start, unsigned long end);
9373 -@@ -95,19 +116,6 @@ extern void (*flush_icache_all)(void);
9374 - extern void (*local_flush_data_cache_page)(void * addr);
9375 - extern void (*flush_data_cache_page)(unsigned long addr);
9376 -
9377 --/*
9378 -- * This flag is used to indicate that the page pointed to by a pte
9379 -- * is dirty and requires cleaning before returning it to the user.
9380 -- */
9381 --#define PG_dcache_dirty PG_arch_1
9382 --
9383 --#define Page_dcache_dirty(page) \
9384 -- test_bit(PG_dcache_dirty, &(page)->flags)
9385 --#define SetPageDcacheDirty(page) \
9386 -- set_bit(PG_dcache_dirty, &(page)->flags)
9387 --#define ClearPageDcacheDirty(page) \
9388 -- clear_bit(PG_dcache_dirty, &(page)->flags)
9389 --
9390 - /* Run kernel code uncached, useful for cache probing functions. */
9391 - unsigned long run_uncached(void *func);
9392 -
9393 -diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
9394 -index 0d8208de9a3f..345fd7f80730 100644
9395 ---- a/arch/mips/include/asm/cpu-features.h
9396 -+++ b/arch/mips/include/asm/cpu-features.h
9397 -@@ -235,8 +235,39 @@
9398 - /* MIPSR2 and MIPSR6 have a lot of similarities */
9399 - #define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6)
9400 -
9401 -+/*
9402 -+ * cpu_has_mips_r2_exec_hazard - return if IHB is required on current processor
9403 -+ *
9404 -+ * Returns non-zero value if the current processor implementation requires
9405 -+ * an IHB instruction to deal with an instruction hazard as per MIPS R2
9406 -+ * architecture specification, zero otherwise.
9407 -+ */
9408 - #ifndef cpu_has_mips_r2_exec_hazard
9409 --#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
9410 -+#define cpu_has_mips_r2_exec_hazard \
9411 -+({ \
9412 -+ int __res; \
9413 -+ \
9414 -+ switch (current_cpu_type()) { \
9415 -+ case CPU_M14KC: \
9416 -+ case CPU_74K: \
9417 -+ case CPU_1074K: \
9418 -+ case CPU_PROAPTIV: \
9419 -+ case CPU_P5600: \
9420 -+ case CPU_M5150: \
9421 -+ case CPU_QEMU_GENERIC: \
9422 -+ case CPU_CAVIUM_OCTEON: \
9423 -+ case CPU_CAVIUM_OCTEON_PLUS: \
9424 -+ case CPU_CAVIUM_OCTEON2: \
9425 -+ case CPU_CAVIUM_OCTEON3: \
9426 -+ __res = 0; \
9427 -+ break; \
9428 -+ \
9429 -+ default: \
9430 -+ __res = 1; \
9431 -+ } \
9432 -+ \
9433 -+ __res; \
9434 -+})
9435 - #endif
9436 -
9437 - /*
9438 -diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
9439 -index 535f196ffe02..694925a26924 100644
9440 ---- a/arch/mips/include/asm/elf.h
9441 -+++ b/arch/mips/include/asm/elf.h
9442 -@@ -294,6 +294,9 @@ do { \
9443 - if (personality(current->personality) != PER_LINUX) \
9444 - set_personality(PER_LINUX); \
9445 - \
9446 -+ clear_thread_flag(TIF_HYBRID_FPREGS); \
9447 -+ set_thread_flag(TIF_32BIT_FPREGS); \
9448 -+ \
9449 - mips_set_personality_fp(state); \
9450 - \
9451 - current->thread.abi = &mips_abi; \
9452 -@@ -319,6 +322,8 @@ do { \
9453 - do { \
9454 - set_thread_flag(TIF_32BIT_REGS); \
9455 - set_thread_flag(TIF_32BIT_ADDR); \
9456 -+ clear_thread_flag(TIF_HYBRID_FPREGS); \
9457 -+ set_thread_flag(TIF_32BIT_FPREGS); \
9458 - \
9459 - mips_set_personality_fp(state); \
9460 - \
9461 -diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
9462 -index fa1f3cfbae8d..d68e685cde60 100644
9463 ---- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
9464 -+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
9465 -@@ -50,7 +50,6 @@
9466 - #define cpu_has_mips32r2 0
9467 - #define cpu_has_mips64r1 0
9468 - #define cpu_has_mips64r2 1
9469 --#define cpu_has_mips_r2_exec_hazard 0
9470 - #define cpu_has_dsp 0
9471 - #define cpu_has_dsp2 0
9472 - #define cpu_has_mipsmt 0
9473 -diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
9474 -index 33db1c806b01..774bb45834cb 100644
9475 ---- a/arch/mips/include/asm/octeon/cvmx.h
9476 -+++ b/arch/mips/include/asm/octeon/cvmx.h
9477 -@@ -436,14 +436,6 @@ static inline uint64_t cvmx_get_cycle_global(void)
9478 -
9479 - /***************************************************************************/
9480 -
9481 --static inline void cvmx_reset_octeon(void)
9482 --{
9483 -- union cvmx_ciu_soft_rst ciu_soft_rst;
9484 -- ciu_soft_rst.u64 = 0;
9485 -- ciu_soft_rst.s.soft_rst = 1;
9486 -- cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
9487 --}
9488 --
9489 - /* Return the number of cores available in the chip */
9490 - static inline uint32_t cvmx_octeon_num_cores(void)
9491 - {
9492 -diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
9493 -index 64ba56a02843..1884609741a8 100644
9494 ---- a/arch/mips/include/asm/octeon/pci-octeon.h
9495 -+++ b/arch/mips/include/asm/octeon/pci-octeon.h
9496 -@@ -11,9 +11,6 @@
9497 -
9498 - #include <linux/pci.h>
9499 -
9500 --/* Some PCI cards require delays when accessing config space. */
9501 --#define PCI_CONFIG_SPACE_DELAY 10000
9502 --
9503 - /*
9504 - * The physical memory base mapped by BAR1. 256MB at the end of the
9505 - * first 4GB.
9506 -diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
9507 -index bef782c4a44b..f8f809fd6c6d 100644
9508 ---- a/arch/mips/include/asm/pgtable.h
9509 -+++ b/arch/mips/include/asm/pgtable.h
9510 -@@ -127,10 +127,6 @@ do { \
9511 - } \
9512 - } while(0)
9513 -
9514 --
9515 --extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
9516 -- pte_t pteval);
9517 --
9518 - #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
9519 -
9520 - #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
9521 -@@ -154,6 +150,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
9522 - }
9523 - }
9524 - }
9525 -+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
9526 -
9527 - static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
9528 - {
9529 -@@ -192,6 +189,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
9530 - }
9531 - #endif
9532 - }
9533 -+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
9534 -
9535 - static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
9536 - {
9537 -@@ -407,12 +405,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
9538 -
9539 - extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
9540 - pte_t pte);
9541 -+extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
9542 -+ pte_t pte);
9543 -
9544 - static inline void update_mmu_cache(struct vm_area_struct *vma,
9545 - unsigned long address, pte_t *ptep)
9546 - {
9547 - pte_t pte = *ptep;
9548 - __update_tlb(vma, address, pte);
9549 -+ __update_cache(vma, address, pte);
9550 - }
9551 -
9552 - static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
9553 -diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
9554 -index 1b22d2da88a1..38902bf97adc 100644
9555 ---- a/arch/mips/include/asm/r4kcache.h
9556 -+++ b/arch/mips/include/asm/r4kcache.h
9557 -@@ -12,6 +12,8 @@
9558 - #ifndef _ASM_R4KCACHE_H
9559 - #define _ASM_R4KCACHE_H
9560 -
9561 -+#include <linux/stringify.h>
9562 -+
9563 - #include <asm/asm.h>
9564 - #include <asm/cacheops.h>
9565 - #include <asm/compiler.h>
9566 -@@ -344,7 +346,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
9567 - " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
9568 - " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
9569 - " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
9570 -- " addiu $1, $0, 0x100 \n" \
9571 -+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
9572 - " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
9573 - " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
9574 - " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
9575 -@@ -368,17 +370,17 @@ static inline void invalidate_tcache_page(unsigned long addr)
9576 - " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
9577 - " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
9578 - " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
9579 -- " addiu $1, %0, 0x100\n" \
9580 -+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
9581 - " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
9582 - " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
9583 - " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
9584 - " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
9585 -- " addiu $1, $1, 0x100\n" \
9586 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9587 - " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
9588 - " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
9589 - " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
9590 - " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
9591 -- " addiu $1, $1, 0x100\n" \
9592 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
9593 - " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
9594 - " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
9595 - " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
9596 -@@ -396,25 +398,25 @@ static inline void invalidate_tcache_page(unsigned long addr)
9597 - " .set noat\n" \
9598 - " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
9599 - " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
9600 -- " addiu $1, %0, 0x100\n" \
9601 -+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
9602 - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
9603 - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
9604 -- " addiu $1, %0, 0x100\n" \
9605 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9606 - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
9607 - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
9608 -- " addiu $1, %0, 0x100\n" \
9609 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9610 - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
9611 - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
9612 -- " addiu $1, %0, 0x100\n" \
9613 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9614 - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
9615 - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
9616 -- " addiu $1, %0, 0x100\n" \
9617 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9618 - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
9619 - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
9620 -- " addiu $1, %0, 0x100\n" \
9621 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9622 - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
9623 - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
9624 -- " addiu $1, %0, 0x100\n" \
9625 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9626 - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
9627 - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
9628 - " .set pop\n" \
9629 -@@ -429,39 +431,38 @@ static inline void invalidate_tcache_page(unsigned long addr)
9630 - " .set mips64r6\n" \
9631 - " .set noat\n" \
9632 - " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9633 -- " addiu $1, %0, 0x100\n" \
9634 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9635 -- " addiu $1, %0, 0x100\n" \
9636 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9637 -- " addiu $1, %0, 0x100\n" \
9638 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9639 -- " addiu $1, %0, 0x100\n" \
9640 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9641 -- " addiu $1, %0, 0x100\n" \
9642 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9643 -- " addiu $1, %0, 0x100\n" \
9644 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9645 -- " addiu $1, %0, 0x100\n" \
9646 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9647 -- " addiu $1, %0, 0x100\n" \
9648 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9649 -- " addiu $1, %0, 0x100\n" \
9650 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9651 -- " addiu $1, %0, 0x100\n" \
9652 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9653 -- " addiu $1, %0, 0x100\n" \
9654 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9655 -- " addiu $1, %0, 0x100\n" \
9656 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9657 -- " addiu $1, %0, 0x100\n" \
9658 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9659 -- " addiu $1, %0, 0x100\n" \
9660 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9661 -- " addiu $1, %0, 0x100\n" \
9662 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9663 -- " addiu $1, %0, 0x100\n" \
9664 -- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
9665 -- " addiu $1, %0, 0x100\n" \
9666 -+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
9667 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9668 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9669 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9670 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9671 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9672 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9673 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9674 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9675 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9676 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9677 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9678 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9679 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9680 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9681 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9682 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9683 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9684 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9685 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9686 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9687 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9688 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9689 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9690 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9691 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9692 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9693 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9694 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9695 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9696 -+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
9697 -+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
9698 - " .set pop\n" \
9699 - : \
9700 - : "r" (base), \
9701 -diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
9702 -index b4548690ade9..1fca2e0793dc 100644
9703 ---- a/arch/mips/include/asm/spinlock.h
9704 -+++ b/arch/mips/include/asm/spinlock.h
9705 -@@ -263,7 +263,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
9706 - if (R10000_LLSC_WAR) {
9707 - __asm__ __volatile__(
9708 - "1: ll %1, %2 # arch_read_unlock \n"
9709 -- " addiu %1, 1 \n"
9710 -+ " addiu %1, -1 \n"
9711 - " sc %1, %0 \n"
9712 - " beqzl %1, 1b \n"
9713 - : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
9714 -diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
9715 -index af41ba6db960..7791840cf22c 100644
9716 ---- a/arch/mips/kernel/entry.S
9717 -+++ b/arch/mips/kernel/entry.S
9718 -@@ -10,6 +10,7 @@
9719 -
9720 - #include <asm/asm.h>
9721 - #include <asm/asmmacro.h>
9722 -+#include <asm/compiler.h>
9723 - #include <asm/regdef.h>
9724 - #include <asm/mipsregs.h>
9725 - #include <asm/stackframe.h>
9726 -@@ -185,7 +186,7 @@ syscall_exit_work:
9727 - * For C code use the inline version named instruction_hazard().
9728 - */
9729 - LEAF(mips_ihb)
9730 -- .set mips32r2
9731 -+ .set MIPS_ISA_LEVEL_RAW
9732 - jr.hb ra
9733 - nop
9734 - END(mips_ihb)
9735 -diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
9736 -index bed7590e475f..d5589bedd0a4 100644
9737 ---- a/arch/mips/kernel/smp-cps.c
9738 -+++ b/arch/mips/kernel/smp-cps.c
9739 -@@ -88,6 +88,12 @@ static void __init cps_smp_setup(void)
9740 -
9741 - /* Make core 0 coherent with everything */
9742 - write_gcr_cl_coherence(0xff);
9743 -+
9744 -+#ifdef CONFIG_MIPS_MT_FPAFF
9745 -+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
9746 -+ if (cpu_has_fpu)
9747 -+ cpu_set(0, mt_fpu_cpumask);
9748 -+#endif /* CONFIG_MIPS_MT_FPAFF */
9749 - }
9750 -
9751 - static void __init cps_prepare_cpus(unsigned int max_cpus)
9752 -diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
9753 -index 7e3ea7766822..77d96db8253c 100644
9754 ---- a/arch/mips/mm/cache.c
9755 -+++ b/arch/mips/mm/cache.c
9756 -@@ -119,36 +119,37 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
9757 -
9758 - EXPORT_SYMBOL(__flush_anon_page);
9759 -
9760 --static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
9761 -+void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
9762 -+{
9763 -+ unsigned long addr;
9764 -+
9765 -+ if (PageHighMem(page))
9766 -+ return;
9767 -+
9768 -+ addr = (unsigned long) page_address(page);
9769 -+ flush_data_cache_page(addr);
9770 -+}
9771 -+EXPORT_SYMBOL_GPL(__flush_icache_page);
9772 -+
9773 -+void __update_cache(struct vm_area_struct *vma, unsigned long address,
9774 -+ pte_t pte)
9775 - {
9776 - struct page *page;
9777 -- unsigned long pfn = pte_pfn(pteval);
9778 -+ unsigned long pfn, addr;
9779 -+ int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
9780 -
9781 -+ pfn = pte_pfn(pte);
9782 - if (unlikely(!pfn_valid(pfn)))
9783 - return;
9784 --
9785 - page = pfn_to_page(pfn);
9786 - if (page_mapping(page) && Page_dcache_dirty(page)) {
9787 -- unsigned long page_addr = (unsigned long) page_address(page);
9788 --
9789 -- if (!cpu_has_ic_fills_f_dc ||
9790 -- pages_do_alias(page_addr, address & PAGE_MASK))
9791 -- flush_data_cache_page(page_addr);
9792 -+ addr = (unsigned long) page_address(page);
9793 -+ if (exec || pages_do_alias(addr, address & PAGE_MASK))
9794 -+ flush_data_cache_page(addr);
9795 - ClearPageDcacheDirty(page);
9796 - }
9797 - }
9798 -
9799 --void set_pte_at(struct mm_struct *mm, unsigned long addr,
9800 -- pte_t *ptep, pte_t pteval)
9801 --{
9802 -- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
9803 -- if (pte_present(pteval))
9804 -- mips_flush_dcache_from_pte(pteval, addr);
9805 -- }
9806 --
9807 -- set_pte(ptep, pteval);
9808 --}
9809 --
9810 - unsigned long _page_cachable_default;
9811 - EXPORT_SYMBOL(_page_cachable_default);
9812 -
9813 -diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
9814 -index d75ff73a2012..a79fd0af0224 100644
9815 ---- a/arch/mips/mm/tlbex.c
9816 -+++ b/arch/mips/mm/tlbex.c
9817 -@@ -501,26 +501,9 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
9818 - case tlb_indexed: tlbw = uasm_i_tlbwi; break;
9819 - }
9820 -
9821 -- if (cpu_has_mips_r2_exec_hazard) {
9822 -- /*
9823 -- * The architecture spec says an ehb is required here,
9824 -- * but a number of cores do not have the hazard and
9825 -- * using an ehb causes an expensive pipeline stall.
9826 -- */
9827 -- switch (current_cpu_type()) {
9828 -- case CPU_M14KC:
9829 -- case CPU_74K:
9830 -- case CPU_1074K:
9831 -- case CPU_PROAPTIV:
9832 -- case CPU_P5600:
9833 -- case CPU_M5150:
9834 -- case CPU_QEMU_GENERIC:
9835 -- break;
9836 --
9837 -- default:
9838 -+ if (cpu_has_mips_r2_r6) {
9839 -+ if (cpu_has_mips_r2_exec_hazard)
9840 - uasm_i_ehb(p);
9841 -- break;
9842 -- }
9843 - tlbw(p);
9844 - return;
9845 - }
9846 -diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
9847 -index c83dbf3689e2..7b066a44e679 100644
9848 ---- a/arch/mips/netlogic/xlp/ahci-init-xlp2.c
9849 -+++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
9850 -@@ -203,6 +203,7 @@ static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel)
9851 - static void config_sata_phy(u64 regbase)
9852 - {
9853 - u32 port, i, reg;
9854 -+ u8 val;
9855 -
9856 - for (port = 0; port < 2; port++) {
9857 - for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
9858 -@@ -210,6 +211,18 @@ static void config_sata_phy(u64 regbase)
9859 -
9860 - for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
9861 - write_phy_reg(regbase, reg, port, sata_phy_config2[i]);
9862 -+
9863 -+ /* Fix for PHY link up failures at lower temperatures */
9864 -+ write_phy_reg(regbase, 0x800F, port, 0x1f);
9865 -+
9866 -+ val = read_phy_reg(regbase, 0x0029, port);
9867 -+ write_phy_reg(regbase, 0x0029, port, val | (0x7 << 1));
9868 -+
9869 -+ val = read_phy_reg(regbase, 0x0056, port);
9870 -+ write_phy_reg(regbase, 0x0056, port, val & ~(1 << 3));
9871 -+
9872 -+ val = read_phy_reg(regbase, 0x0018, port);
9873 -+ write_phy_reg(regbase, 0x0018, port, val & ~(0x7 << 0));
9874 - }
9875 - }
9876 -
9877 -diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
9878 -index 300591c6278d..2eda01e6e08f 100644
9879 ---- a/arch/mips/pci/Makefile
9880 -+++ b/arch/mips/pci/Makefile
9881 -@@ -43,7 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
9882 - obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
9883 - obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
9884 - obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
9885 --obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o
9886 -+obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o
9887 - obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o
9888 - obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
9889 - obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
9890 -diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
9891 -index a04af55d89f1..c258cd406fbb 100644
9892 ---- a/arch/mips/pci/pci-octeon.c
9893 -+++ b/arch/mips/pci/pci-octeon.c
9894 -@@ -214,6 +214,8 @@ const char *octeon_get_pci_interrupts(void)
9895 - return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
9896 - case CVMX_BOARD_TYPE_BBGW_REF:
9897 - return "AABCD";
9898 -+ case CVMX_BOARD_TYPE_CUST_DSR1000N:
9899 -+ return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
9900 - case CVMX_BOARD_TYPE_THUNDER:
9901 - case CVMX_BOARD_TYPE_EBH3000:
9902 - default:
9903 -@@ -271,9 +273,6 @@ static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
9904 - pci_addr.s.func = devfn & 0x7;
9905 - pci_addr.s.reg = reg;
9906 -
9907 --#if PCI_CONFIG_SPACE_DELAY
9908 -- udelay(PCI_CONFIG_SPACE_DELAY);
9909 --#endif
9910 - switch (size) {
9911 - case 4:
9912 - *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
9913 -@@ -308,9 +307,6 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
9914 - pci_addr.s.func = devfn & 0x7;
9915 - pci_addr.s.reg = reg;
9916 -
9917 --#if PCI_CONFIG_SPACE_DELAY
9918 -- udelay(PCI_CONFIG_SPACE_DELAY);
9919 --#endif
9920 - switch (size) {
9921 - case 4:
9922 - cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
9923 -diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
9924 -index 1bb0b2bf8d6e..99f3db4f0a9b 100644
9925 ---- a/arch/mips/pci/pcie-octeon.c
9926 -+++ b/arch/mips/pci/pcie-octeon.c
9927 -@@ -1762,14 +1762,6 @@ static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
9928 - default:
9929 - return PCIBIOS_FUNC_NOT_SUPPORTED;
9930 - }
9931 --#if PCI_CONFIG_SPACE_DELAY
9932 -- /*
9933 -- * Delay on writes so that devices have time to come up. Some
9934 -- * bridges need this to allow time for the secondary busses to
9935 -- * work
9936 -- */
9937 -- udelay(PCI_CONFIG_SPACE_DELAY);
9938 --#endif
9939 - return PCIBIOS_SUCCESSFUL;
9940 - }
9941 -
9942 -diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
9943 -index b1c52ca580f9..e9bc8c96174e 100644
9944 ---- a/arch/mips/ralink/Kconfig
9945 -+++ b/arch/mips/ralink/Kconfig
9946 -@@ -7,6 +7,11 @@ config CLKEVT_RT3352
9947 - select CLKSRC_OF
9948 - select CLKSRC_MMIO
9949 -
9950 -+config RALINK_ILL_ACC
9951 -+ bool
9952 -+ depends on SOC_RT305X
9953 -+ default y
9954 -+
9955 - choice
9956 - prompt "Ralink SoC selection"
9957 - default SOC_RT305X
9958 -diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
9959 -index a7a3edd28beb..f23179e84128 100644
9960 ---- a/drivers/acpi/sbs.c
9961 -+++ b/drivers/acpi/sbs.c
9962 -@@ -670,7 +670,7 @@ static int acpi_sbs_add(struct acpi_device *device)
9963 - if (!sbs_manager_broken) {
9964 - result = acpi_manager_get_info(sbs);
9965 - if (!result) {
9966 -- sbs->manager_present = 0;
9967 -+ sbs->manager_present = 1;
9968 - for (id = 0; id < MAX_SBS_BAT; ++id)
9969 - if ((sbs->batteries_supported & (1 << id)))
9970 - acpi_battery_add(sbs, id);
9971 -diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
9972 -index b40af3203089..b67066d0d9a6 100644
9973 ---- a/drivers/block/rbd.c
9974 -+++ b/drivers/block/rbd.c
9975 -@@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
9976 - result, xferred);
9977 - if (!img_request->result)
9978 - img_request->result = result;
9979 -+ /*
9980 -+ * Need to end I/O on the entire obj_request worth of
9981 -+ * bytes in case of error.
9982 -+ */
9983 -+ xferred = obj_request->length;
9984 - }
9985 -
9986 - /* Image object requests don't own their page array */
9987 -diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
9988 -index 9bd56116fd5a..1afc0b419da2 100644
9989 ---- a/drivers/gpu/drm/radeon/atombios_crtc.c
9990 -+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
9991 -@@ -580,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
9992 - else
9993 - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
9994 -
9995 -+ /* if there is no audio, set MINM_OVER_MAXP */
9996 -+ if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
9997 -+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
9998 - if (rdev->family < CHIP_RV770)
9999 - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
10000 - /* use frac fb div on APUs */
10001 -diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
10002 -index c39c1d0d9d4e..f20eb32406d1 100644
10003 ---- a/drivers/gpu/drm/radeon/atombios_encoders.c
10004 -+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
10005 -@@ -1729,17 +1729,15 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
10006 - struct drm_device *dev = encoder->dev;
10007 - struct radeon_device *rdev = dev->dev_private;
10008 - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
10009 -- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
10010 - int encoder_mode = atombios_get_encoder_mode(encoder);
10011 -
10012 - DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
10013 - radeon_encoder->encoder_id, mode, radeon_encoder->devices,
10014 - radeon_encoder->active_device);
10015 -
10016 -- if (connector && (radeon_audio != 0) &&
10017 -+ if ((radeon_audio != 0) &&
10018 - ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
10019 -- (ENCODER_MODE_IS_DP(encoder_mode) &&
10020 -- drm_detect_monitor_audio(radeon_connector_edid(connector)))))
10021 -+ ENCODER_MODE_IS_DP(encoder_mode)))
10022 - radeon_audio_dpms(encoder, mode);
10023 -
10024 - switch (radeon_encoder->encoder_id) {
10025 -diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
10026 -index 3adc2afe32aa..68fd9fc677e3 100644
10027 ---- a/drivers/gpu/drm/radeon/dce6_afmt.c
10028 -+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
10029 -@@ -295,28 +295,3 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
10030 - WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
10031 - }
10032 - }
10033 --
10034 --void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
10035 --{
10036 -- struct drm_device *dev = encoder->dev;
10037 -- struct radeon_device *rdev = dev->dev_private;
10038 -- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
10039 -- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
10040 --
10041 -- if (!dig || !dig->afmt)
10042 -- return;
10043 --
10044 -- if (enable) {
10045 -- WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
10046 -- EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
10047 -- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
10048 -- EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
10049 -- EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
10050 -- EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
10051 -- EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
10052 -- } else {
10053 -- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
10054 -- }
10055 --
10056 -- dig->afmt->enabled = enable;
10057 --}
10058 -diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
10059 -index c18d4ecbd95d..0926739c9fa7 100644
10060 ---- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
10061 -+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
10062 -@@ -219,13 +219,9 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
10063 - WREG32(AFMT_AVI_INFO3 + offset,
10064 - frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
10065 -
10066 -- WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
10067 -- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
10068 -- HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
10069 --
10070 - WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
10071 -- HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
10072 -- ~HDMI_AVI_INFO_LINE_MASK);
10073 -+ HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
10074 -+ ~HDMI_AVI_INFO_LINE_MASK);
10075 - }
10076 -
10077 - void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
10078 -@@ -370,9 +366,13 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
10079 - WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
10080 - AFMT_AUDIO_CHANNEL_ENABLE(0xff));
10081 -
10082 -+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
10083 -+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
10084 -+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
10085 -+
10086 - /* allow 60958 channel status and send audio packets fields to be updated */
10087 -- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
10088 -- AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
10089 -+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
10090 -+ AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
10091 - }
10092 -
10093 -
10094 -@@ -398,17 +398,26 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
10095 - return;
10096 -
10097 - if (enable) {
10098 -- WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
10099 -- HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
10100 --
10101 -- WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
10102 -- HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
10103 -- HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
10104 -+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
10105 -
10106 -- WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
10107 -- HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
10108 -- HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
10109 -+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
10110 -+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
10111 -+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
10112 -+ HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
10113 -+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
10114 -+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
10115 -+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
10116 -+ AFMT_AUDIO_SAMPLE_SEND);
10117 -+ } else {
10118 -+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
10119 -+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
10120 -+ HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
10121 -+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
10122 -+ ~AFMT_AUDIO_SAMPLE_SEND);
10123 -+ }
10124 - } else {
10125 -+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
10126 -+ ~AFMT_AUDIO_SAMPLE_SEND);
10127 - WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
10128 - }
10129 -
10130 -@@ -424,20 +433,24 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
10131 - struct radeon_device *rdev = dev->dev_private;
10132 - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
10133 - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
10134 -+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
10135 -
10136 - if (!dig || !dig->afmt)
10137 - return;
10138 -
10139 -- if (enable) {
10140 -+ if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
10141 - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
10142 - struct radeon_connector *radeon_connector = to_radeon_connector(connector);
10143 - struct radeon_connector_atom_dig *dig_connector;
10144 - uint32_t val;
10145 -
10146 -+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
10147 -+ AFMT_AUDIO_SAMPLE_SEND);
10148 -+
10149 - WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
10150 - EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
10151 -
10152 -- if (radeon_connector->con_priv) {
10153 -+ if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) {
10154 - dig_connector = radeon_connector->con_priv;
10155 - val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
10156 - val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
10157 -@@ -457,6 +470,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
10158 - EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
10159 - } else {
10160 - WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
10161 -+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
10162 -+ ~AFMT_AUDIO_SAMPLE_SEND);
10163 - }
10164 -
10165 - dig->afmt->enabled = enable;
10166 -diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
10167 -index dd6606b8e23c..e85894ade95c 100644
10168 ---- a/drivers/gpu/drm/radeon/r600_hdmi.c
10169 -+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
10170 -@@ -228,12 +228,13 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
10171 - WREG32(HDMI0_AVI_INFO3 + offset,
10172 - frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
10173 -
10174 -+ WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
10175 -+ HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
10176 -+
10177 - WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
10178 -- HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
10179 -- HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
10180 -+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
10181 -+ HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
10182 -
10183 -- WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
10184 -- HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
10185 - }
10186 -
10187 - /*
10188 -diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
10189 -index b21ef69a34ac..b7d33a13db9f 100644
10190 ---- a/drivers/gpu/drm/radeon/radeon_audio.c
10191 -+++ b/drivers/gpu/drm/radeon/radeon_audio.c
10192 -@@ -102,7 +102,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
10193 - void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
10194 - void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
10195 - void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
10196 --void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
10197 -
10198 - static const u32 pin_offsets[7] =
10199 - {
10200 -@@ -240,7 +239,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
10201 - .set_avi_packet = evergreen_set_avi_packet,
10202 - .set_audio_packet = dce4_set_audio_packet,
10203 - .mode_set = radeon_audio_dp_mode_set,
10204 -- .dpms = dce6_dp_enable,
10205 -+ .dpms = evergreen_dp_enable,
10206 - };
10207 -
10208 - static void radeon_audio_interface_init(struct radeon_device *rdev)
10209 -@@ -461,30 +460,33 @@ void radeon_audio_detect(struct drm_connector *connector,
10210 - if (!connector || !connector->encoder)
10211 - return;
10212 -
10213 -+ if (!radeon_encoder_is_digital(connector->encoder))
10214 -+ return;
10215 -+
10216 - rdev = connector->encoder->dev->dev_private;
10217 - radeon_encoder = to_radeon_encoder(connector->encoder);
10218 - dig = radeon_encoder->enc_priv;
10219 -
10220 -- if (status == connector_status_connected) {
10221 -- struct radeon_connector *radeon_connector;
10222 -- int sink_type;
10223 --
10224 -- if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
10225 -- radeon_encoder->audio = NULL;
10226 -- return;
10227 -- }
10228 -+ if (!dig->afmt)
10229 -+ return;
10230 -
10231 -- radeon_connector = to_radeon_connector(connector);
10232 -- sink_type = radeon_dp_getsinktype(radeon_connector);
10233 -+ if (status == connector_status_connected) {
10234 -+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
10235 -
10236 - if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
10237 -- sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
10238 -+ radeon_dp_getsinktype(radeon_connector) ==
10239 -+ CONNECTOR_OBJECT_ID_DISPLAYPORT)
10240 - radeon_encoder->audio = rdev->audio.dp_funcs;
10241 - else
10242 - radeon_encoder->audio = rdev->audio.hdmi_funcs;
10243 -
10244 - dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
10245 -- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
10246 -+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
10247 -+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
10248 -+ } else {
10249 -+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
10250 -+ dig->afmt->pin = NULL;
10251 -+ }
10252 - } else {
10253 - radeon_audio_enable(rdev, dig->afmt->pin, 0);
10254 - dig->afmt->pin = NULL;
10255 -diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
10256 -index 27def67cb6be..27973e3faf0e 100644
10257 ---- a/drivers/gpu/drm/radeon/radeon_connectors.c
10258 -+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
10259 -@@ -1333,8 +1333,10 @@ out:
10260 - /* updated in get modes as well since we need to know if it's analog or digital */
10261 - radeon_connector_update_scratch_regs(connector, ret);
10262 -
10263 -- if (radeon_audio != 0)
10264 -+ if (radeon_audio != 0) {
10265 -+ radeon_connector_get_edid(connector);
10266 - radeon_audio_detect(connector, ret);
10267 -+ }
10268 -
10269 - exit:
10270 - pm_runtime_mark_last_busy(connector->dev->dev);
10271 -@@ -1659,8 +1661,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
10272 -
10273 - radeon_connector_update_scratch_regs(connector, ret);
10274 -
10275 -- if (radeon_audio != 0)
10276 -+ if (radeon_audio != 0) {
10277 -+ radeon_connector_get_edid(connector);
10278 - radeon_audio_detect(connector, ret);
10279 -+ }
10280 -
10281 - out:
10282 - pm_runtime_mark_last_busy(connector->dev->dev);
10283 -diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
10284 -index 4d0f96cc3da4..ab39b85e0f76 100644
10285 ---- a/drivers/gpu/drm/radeon/radeon_cs.c
10286 -+++ b/drivers/gpu/drm/radeon/radeon_cs.c
10287 -@@ -88,7 +88,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
10288 - p->dma_reloc_idx = 0;
10289 - /* FIXME: we assume that each relocs use 4 dwords */
10290 - p->nrelocs = chunk->length_dw / 4;
10291 -- p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
10292 -+ p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
10293 - if (p->relocs == NULL) {
10294 - return -ENOMEM;
10295 - }
10296 -@@ -428,7 +428,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
10297 - }
10298 - }
10299 - kfree(parser->track);
10300 -- kfree(parser->relocs);
10301 -+ drm_free_large(parser->relocs);
10302 - drm_free_large(parser->vm_bos);
10303 - for (i = 0; i < parser->nchunks; i++)
10304 - drm_free_large(parser->chunks[i].kdata);
10305 -diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
10306 -index 2a5a4a9e772d..de42fc4a22b8 100644
10307 ---- a/drivers/gpu/drm/radeon/radeon_vm.c
10308 -+++ b/drivers/gpu/drm/radeon/radeon_vm.c
10309 -@@ -473,6 +473,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
10310 - }
10311 -
10312 - mutex_lock(&vm->mutex);
10313 -+ soffset /= RADEON_GPU_PAGE_SIZE;
10314 -+ eoffset /= RADEON_GPU_PAGE_SIZE;
10315 -+ if (soffset || eoffset) {
10316 -+ struct interval_tree_node *it;
10317 -+ it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
10318 -+ if (it && it != &bo_va->it) {
10319 -+ struct radeon_bo_va *tmp;
10320 -+ tmp = container_of(it, struct radeon_bo_va, it);
10321 -+ /* bo and tmp overlap, invalid offset */
10322 -+ dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
10323 -+ "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
10324 -+ soffset, tmp->bo, tmp->it.start, tmp->it.last);
10325 -+ mutex_unlock(&vm->mutex);
10326 -+ return -EINVAL;
10327 -+ }
10328 -+ }
10329 -+
10330 - if (bo_va->it.start || bo_va->it.last) {
10331 - if (bo_va->addr) {
10332 - /* add a clone of the bo_va to clear the old address */
10333 -@@ -490,6 +507,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
10334 - spin_lock(&vm->status_lock);
10335 - list_add(&tmp->vm_status, &vm->freed);
10336 - spin_unlock(&vm->status_lock);
10337 -+
10338 -+ bo_va->addr = 0;
10339 - }
10340 -
10341 - interval_tree_remove(&bo_va->it, &vm->va);
10342 -@@ -497,21 +516,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
10343 - bo_va->it.last = 0;
10344 - }
10345 -
10346 -- soffset /= RADEON_GPU_PAGE_SIZE;
10347 -- eoffset /= RADEON_GPU_PAGE_SIZE;
10348 - if (soffset || eoffset) {
10349 -- struct interval_tree_node *it;
10350 -- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
10351 -- if (it) {
10352 -- struct radeon_bo_va *tmp;
10353 -- tmp = container_of(it, struct radeon_bo_va, it);
10354 -- /* bo and tmp overlap, invalid offset */
10355 -- dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
10356 -- "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
10357 -- soffset, tmp->bo, tmp->it.start, tmp->it.last);
10358 -- mutex_unlock(&vm->mutex);
10359 -- return -EINVAL;
10360 -- }
10361 - bo_va->it.start = soffset;
10362 - bo_va->it.last = eoffset - 1;
10363 - interval_tree_insert(&bo_va->it, &vm->va);
10364 -@@ -1107,7 +1112,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
10365 - list_del(&bo_va->bo_list);
10366 -
10367 - mutex_lock(&vm->mutex);
10368 -- interval_tree_remove(&bo_va->it, &vm->va);
10369 -+ if (bo_va->it.start || bo_va->it.last)
10370 -+ interval_tree_remove(&bo_va->it, &vm->va);
10371 - spin_lock(&vm->status_lock);
10372 - list_del(&bo_va->vm_status);
10373 -
10374 -diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
10375 -index 7be11651b7e6..9dbb3154d559 100644
10376 ---- a/drivers/gpu/drm/radeon/si_dpm.c
10377 -+++ b/drivers/gpu/drm/radeon/si_dpm.c
10378 -@@ -2924,6 +2924,7 @@ struct si_dpm_quirk {
10379 - static struct si_dpm_quirk si_dpm_quirk_list[] = {
10380 - /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
10381 - { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
10382 -+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
10383 - { 0, 0, 0, 0 },
10384 - };
10385 -
10386 -diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
10387 -index 3736f71bdec5..18def3022f6e 100644
10388 ---- a/drivers/hv/channel_mgmt.c
10389 -+++ b/drivers/hv/channel_mgmt.c
10390 -@@ -787,7 +787,7 @@ int vmbus_request_offers(void)
10391 - {
10392 - struct vmbus_channel_message_header *msg;
10393 - struct vmbus_channel_msginfo *msginfo;
10394 -- int ret, t;
10395 -+ int ret;
10396 -
10397 - msginfo = kmalloc(sizeof(*msginfo) +
10398 - sizeof(struct vmbus_channel_message_header),
10399 -@@ -795,8 +795,6 @@ int vmbus_request_offers(void)
10400 - if (!msginfo)
10401 - return -ENOMEM;
10402 -
10403 -- init_completion(&msginfo->waitevent);
10404 --
10405 - msg = (struct vmbus_channel_message_header *)msginfo->msg;
10406 -
10407 - msg->msgtype = CHANNELMSG_REQUESTOFFERS;
10408 -@@ -810,14 +808,6 @@ int vmbus_request_offers(void)
10409 - goto cleanup;
10410 - }
10411 -
10412 -- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
10413 -- if (t == 0) {
10414 -- ret = -ETIMEDOUT;
10415 -- goto cleanup;
10416 -- }
10417 --
10418 --
10419 --
10420 - cleanup:
10421 - kfree(msginfo);
10422 -
10423 -diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
10424 -index ee394dc68303..ec1ea8ba7aac 100644
10425 ---- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
10426 -+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
10427 -@@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
10428 - memoffset = (mtype * (edc_size * 1024 * 1024));
10429 - else {
10430 - mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
10431 -- MA_EXT_MEMORY1_BAR_A));
10432 -+ MA_EXT_MEMORY0_BAR_A));
10433 - memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
10434 - }
10435 -
10436 -diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
10437 -index 3485acf03014..2f1324bed7b3 100644
10438 ---- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
10439 -+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
10440 -@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
10441 - if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
10442 - mlx4_en_ptp_overflow_check(mdev);
10443 -
10444 -+ mlx4_en_recover_from_oom(priv);
10445 - queue_delayed_work(mdev->workqueue, &priv->service_task,
10446 - SERVICE_TASK_DELAY);
10447 - }
10448 -diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
10449 -index 698d60de1255..05ec5e151ded 100644
10450 ---- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
10451 -+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
10452 -@@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
10453 - return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
10454 - }
10455 -
10456 -+static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
10457 -+{
10458 -+ BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
10459 -+ return ring->prod == ring->cons;
10460 -+}
10461 -+
10462 - static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
10463 - {
10464 - *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
10465 -@@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
10466 - ring->cons, ring->prod);
10467 -
10468 - /* Unmap and free Rx buffers */
10469 -- BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
10470 -- while (ring->cons != ring->prod) {
10471 -+ while (!mlx4_en_is_ring_empty(ring)) {
10472 - index = ring->cons & ring->size_mask;
10473 - en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
10474 - mlx4_en_free_rx_desc(priv, ring, index);
10475 -@@ -491,6 +496,23 @@ err_allocator:
10476 - return err;
10477 - }
10478 -
10479 -+/* We recover from out of memory by scheduling our napi poll
10480 -+ * function (mlx4_en_process_cq), which tries to allocate
10481 -+ * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
10482 -+ */
10483 -+void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
10484 -+{
10485 -+ int ring;
10486 -+
10487 -+ if (!priv->port_up)
10488 -+ return;
10489 -+
10490 -+ for (ring = 0; ring < priv->rx_ring_num; ring++) {
10491 -+ if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
10492 -+ napi_reschedule(&priv->rx_cq[ring]->napi);
10493 -+ }
10494 -+}
10495 -+
10496 - void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
10497 - struct mlx4_en_rx_ring **pring,
10498 - u32 size, u16 stride)
10499 -diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
10500 -index 55f9f5c5344e..8c234ec1d8aa 100644
10501 ---- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
10502 -+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
10503 -@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
10504 - ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
10505 - ring->queue_index = queue_index;
10506 -
10507 -- if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
10508 -- cpumask_set_cpu(queue_index, &ring->affinity_mask);
10509 -+ if (queue_index < priv->num_tx_rings_p_up)
10510 -+ cpumask_set_cpu_local_first(queue_index,
10511 -+ priv->mdev->dev->numa_node,
10512 -+ &ring->affinity_mask);
10513 -
10514 - *pring = ring;
10515 - return 0;
10516 -@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
10517 -
10518 - err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
10519 - &ring->qp, &ring->qp_state);
10520 -- if (!user_prio && cpu_online(ring->queue_index))
10521 -+ if (!cpumask_empty(&ring->affinity_mask))
10522 - netif_set_xps_queue(priv->dev, &ring->affinity_mask,
10523 - ring->queue_index);
10524 -
10525 -diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
10526 -index ebbe244e80dd..8687c8d54227 100644
10527 ---- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
10528 -+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
10529 -@@ -790,6 +790,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
10530 - void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
10531 - struct mlx4_en_tx_ring *ring);
10532 - void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
10533 -+void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
10534 - int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
10535 - struct mlx4_en_rx_ring **pring,
10536 - u32 size, u16 stride, int node);
10537 -diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
10538 -index 7600639db4c4..add419d6ff34 100644
10539 ---- a/drivers/scsi/3w-9xxx.c
10540 -+++ b/drivers/scsi/3w-9xxx.c
10541 -@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
10542 - static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
10543 - static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
10544 - static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
10545 --static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
10546 -
10547 - /* Functions */
10548 -
10549 -@@ -1340,11 +1339,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
10550 - }
10551 -
10552 - /* Now complete the io */
10553 -+ scsi_dma_unmap(cmd);
10554 -+ cmd->scsi_done(cmd);
10555 - tw_dev->state[request_id] = TW_S_COMPLETED;
10556 - twa_free_request_id(tw_dev, request_id);
10557 - tw_dev->posted_request_count--;
10558 -- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
10559 -- twa_unmap_scsi_data(tw_dev, request_id);
10560 - }
10561 -
10562 - /* Check for valid status after each drain */
10563 -@@ -1402,26 +1401,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
10564 - }
10565 - } /* End twa_load_sgl() */
10566 -
10567 --/* This function will perform a pci-dma mapping for a scatter gather list */
10568 --static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
10569 --{
10570 -- int use_sg;
10571 -- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
10572 --
10573 -- use_sg = scsi_dma_map(cmd);
10574 -- if (!use_sg)
10575 -- return 0;
10576 -- else if (use_sg < 0) {
10577 -- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
10578 -- return 0;
10579 -- }
10580 --
10581 -- cmd->SCp.phase = TW_PHASE_SGLIST;
10582 -- cmd->SCp.have_data_in = use_sg;
10583 --
10584 -- return use_sg;
10585 --} /* End twa_map_scsi_sg_data() */
10586 --
10587 - /* This function will poll for a response interrupt of a request */
10588 - static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
10589 - {
10590 -@@ -1600,9 +1579,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
10591 - (tw_dev->state[i] != TW_S_INITIAL) &&
10592 - (tw_dev->state[i] != TW_S_COMPLETED)) {
10593 - if (tw_dev->srb[i]) {
10594 -- tw_dev->srb[i]->result = (DID_RESET << 16);
10595 -- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
10596 -- twa_unmap_scsi_data(tw_dev, i);
10597 -+ struct scsi_cmnd *cmd = tw_dev->srb[i];
10598 -+
10599 -+ cmd->result = (DID_RESET << 16);
10600 -+ scsi_dma_unmap(cmd);
10601 -+ cmd->scsi_done(cmd);
10602 - }
10603 - }
10604 - }
10605 -@@ -1781,21 +1762,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
10606 - /* Save the scsi command for use by the ISR */
10607 - tw_dev->srb[request_id] = SCpnt;
10608 -
10609 -- /* Initialize phase to zero */
10610 -- SCpnt->SCp.phase = TW_PHASE_INITIAL;
10611 --
10612 - retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
10613 - switch (retval) {
10614 - case SCSI_MLQUEUE_HOST_BUSY:
10615 -+ scsi_dma_unmap(SCpnt);
10616 - twa_free_request_id(tw_dev, request_id);
10617 -- twa_unmap_scsi_data(tw_dev, request_id);
10618 - break;
10619 - case 1:
10620 -- tw_dev->state[request_id] = TW_S_COMPLETED;
10621 -- twa_free_request_id(tw_dev, request_id);
10622 -- twa_unmap_scsi_data(tw_dev, request_id);
10623 - SCpnt->result = (DID_ERROR << 16);
10624 -+ scsi_dma_unmap(SCpnt);
10625 - done(SCpnt);
10626 -+ tw_dev->state[request_id] = TW_S_COMPLETED;
10627 -+ twa_free_request_id(tw_dev, request_id);
10628 - retval = 0;
10629 - }
10630 - out:
10631 -@@ -1863,8 +1841,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
10632 - command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
10633 - command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
10634 - } else {
10635 -- sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
10636 -- if (sg_count == 0)
10637 -+ sg_count = scsi_dma_map(srb);
10638 -+ if (sg_count < 0)
10639 - goto out;
10640 -
10641 - scsi_for_each_sg(srb, sg, sg_count, i) {
10642 -@@ -1979,15 +1957,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
10643 - return(table[index].text);
10644 - } /* End twa_string_lookup() */
10645 -
10646 --/* This function will perform a pci-dma unmap */
10647 --static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
10648 --{
10649 -- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
10650 --
10651 -- if (cmd->SCp.phase == TW_PHASE_SGLIST)
10652 -- scsi_dma_unmap(cmd);
10653 --} /* End twa_unmap_scsi_data() */
10654 --
10655 - /* This function gets called when a disk is coming on-line */
10656 - static int twa_slave_configure(struct scsi_device *sdev)
10657 - {
10658 -diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
10659 -index 040f7214e5b7..0fdc83cfa0e1 100644
10660 ---- a/drivers/scsi/3w-9xxx.h
10661 -+++ b/drivers/scsi/3w-9xxx.h
10662 -@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
10663 - #define TW_CURRENT_DRIVER_BUILD 0
10664 - #define TW_CURRENT_DRIVER_BRANCH 0
10665 -
10666 --/* Phase defines */
10667 --#define TW_PHASE_INITIAL 0
10668 --#define TW_PHASE_SINGLE 1
10669 --#define TW_PHASE_SGLIST 2
10670 --
10671 - /* Misc defines */
10672 - #define TW_9550SX_DRAIN_COMPLETED 0xFFFF
10673 - #define TW_SECTOR_SIZE 512
10674 -diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
10675 -index 2361772d5909..f8374850f714 100644
10676 ---- a/drivers/scsi/3w-sas.c
10677 -+++ b/drivers/scsi/3w-sas.c
10678 -@@ -290,26 +290,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
10679 - return 0;
10680 - } /* End twl_post_command_packet() */
10681 -
10682 --/* This function will perform a pci-dma mapping for a scatter gather list */
10683 --static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
10684 --{
10685 -- int use_sg;
10686 -- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
10687 --
10688 -- use_sg = scsi_dma_map(cmd);
10689 -- if (!use_sg)
10690 -- return 0;
10691 -- else if (use_sg < 0) {
10692 -- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
10693 -- return 0;
10694 -- }
10695 --
10696 -- cmd->SCp.phase = TW_PHASE_SGLIST;
10697 -- cmd->SCp.have_data_in = use_sg;
10698 --
10699 -- return use_sg;
10700 --} /* End twl_map_scsi_sg_data() */
10701 --
10702 - /* This function hands scsi cdb's to the firmware */
10703 - static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
10704 - {
10705 -@@ -357,8 +337,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
10706 - if (!sglistarg) {
10707 - /* Map sglist from scsi layer to cmd packet */
10708 - if (scsi_sg_count(srb)) {
10709 -- sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
10710 -- if (sg_count == 0)
10711 -+ sg_count = scsi_dma_map(srb);
10712 -+ if (sg_count <= 0)
10713 - goto out;
10714 -
10715 - scsi_for_each_sg(srb, sg, sg_count, i) {
10716 -@@ -1102,15 +1082,6 @@ out:
10717 - return retval;
10718 - } /* End twl_initialize_device_extension() */
10719 -
10720 --/* This function will perform a pci-dma unmap */
10721 --static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
10722 --{
10723 -- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
10724 --
10725 -- if (cmd->SCp.phase == TW_PHASE_SGLIST)
10726 -- scsi_dma_unmap(cmd);
10727 --} /* End twl_unmap_scsi_data() */
10728 --
10729 - /* This function will handle attention interrupts */
10730 - static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
10731 - {
10732 -@@ -1251,11 +1222,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
10733 - }
10734 -
10735 - /* Now complete the io */
10736 -+ scsi_dma_unmap(cmd);
10737 -+ cmd->scsi_done(cmd);
10738 - tw_dev->state[request_id] = TW_S_COMPLETED;
10739 - twl_free_request_id(tw_dev, request_id);
10740 - tw_dev->posted_request_count--;
10741 -- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
10742 -- twl_unmap_scsi_data(tw_dev, request_id);
10743 - }
10744 -
10745 - /* Check for another response interrupt */
10746 -@@ -1400,10 +1371,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
10747 - if ((tw_dev->state[i] != TW_S_FINISHED) &&
10748 - (tw_dev->state[i] != TW_S_INITIAL) &&
10749 - (tw_dev->state[i] != TW_S_COMPLETED)) {
10750 -- if (tw_dev->srb[i]) {
10751 -- tw_dev->srb[i]->result = (DID_RESET << 16);
10752 -- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
10753 -- twl_unmap_scsi_data(tw_dev, i);
10754 -+ struct scsi_cmnd *cmd = tw_dev->srb[i];
10755 -+
10756 -+ if (cmd) {
10757 -+ cmd->result = (DID_RESET << 16);
10758 -+ scsi_dma_unmap(cmd);
10759 -+ cmd->scsi_done(cmd);
10760 - }
10761 - }
10762 - }
10763 -@@ -1507,9 +1480,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
10764 - /* Save the scsi command for use by the ISR */
10765 - tw_dev->srb[request_id] = SCpnt;
10766 -
10767 -- /* Initialize phase to zero */
10768 -- SCpnt->SCp.phase = TW_PHASE_INITIAL;
10769 --
10770 - retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
10771 - if (retval) {
10772 - tw_dev->state[request_id] = TW_S_COMPLETED;
10773 -diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
10774 -index d474892701d4..fec6449c7595 100644
10775 ---- a/drivers/scsi/3w-sas.h
10776 -+++ b/drivers/scsi/3w-sas.h
10777 -@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
10778 - #define TW_CURRENT_DRIVER_BUILD 0
10779 - #define TW_CURRENT_DRIVER_BRANCH 0
10780 -
10781 --/* Phase defines */
10782 --#define TW_PHASE_INITIAL 0
10783 --#define TW_PHASE_SGLIST 2
10784 --
10785 - /* Misc defines */
10786 - #define TW_SECTOR_SIZE 512
10787 - #define TW_MAX_UNITS 32
10788 -diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
10789 -index c75f2048319f..2940bd769936 100644
10790 ---- a/drivers/scsi/3w-xxxx.c
10791 -+++ b/drivers/scsi/3w-xxxx.c
10792 -@@ -1271,32 +1271,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
10793 - return 0;
10794 - } /* End tw_initialize_device_extension() */
10795 -
10796 --static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
10797 --{
10798 -- int use_sg;
10799 --
10800 -- dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
10801 --
10802 -- use_sg = scsi_dma_map(cmd);
10803 -- if (use_sg < 0) {
10804 -- printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
10805 -- return 0;
10806 -- }
10807 --
10808 -- cmd->SCp.phase = TW_PHASE_SGLIST;
10809 -- cmd->SCp.have_data_in = use_sg;
10810 --
10811 -- return use_sg;
10812 --} /* End tw_map_scsi_sg_data() */
10813 --
10814 --static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
10815 --{
10816 -- dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
10817 --
10818 -- if (cmd->SCp.phase == TW_PHASE_SGLIST)
10819 -- scsi_dma_unmap(cmd);
10820 --} /* End tw_unmap_scsi_data() */
10821 --
10822 - /* This function will reset a device extension */
10823 - static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
10824 - {
10825 -@@ -1319,8 +1293,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
10826 - srb = tw_dev->srb[i];
10827 - if (srb != NULL) {
10828 - srb->result = (DID_RESET << 16);
10829 -- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
10830 -- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]);
10831 -+ scsi_dma_unmap(srb);
10832 -+ srb->scsi_done(srb);
10833 - }
10834 - }
10835 - }
10836 -@@ -1767,8 +1741,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
10837 - command_packet->byte8.io.lba = lba;
10838 - command_packet->byte6.block_count = num_sectors;
10839 -
10840 -- use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
10841 -- if (!use_sg)
10842 -+ use_sg = scsi_dma_map(srb);
10843 -+ if (use_sg <= 0)
10844 - return 1;
10845 -
10846 - scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
10847 -@@ -1955,9 +1929,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
10848 - /* Save the scsi command for use by the ISR */
10849 - tw_dev->srb[request_id] = SCpnt;
10850 -
10851 -- /* Initialize phase to zero */
10852 -- SCpnt->SCp.phase = TW_PHASE_INITIAL;
10853 --
10854 - switch (*command) {
10855 - case READ_10:
10856 - case READ_6:
10857 -@@ -2185,12 +2156,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
10858 -
10859 - /* Now complete the io */
10860 - if ((error != TW_ISR_DONT_COMPLETE)) {
10861 -+ scsi_dma_unmap(tw_dev->srb[request_id]);
10862 -+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
10863 - tw_dev->state[request_id] = TW_S_COMPLETED;
10864 - tw_state_request_finish(tw_dev, request_id);
10865 - tw_dev->posted_request_count--;
10866 -- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
10867 --
10868 -- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
10869 - }
10870 - }
10871 -
10872 -diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
10873 -index 29b0b84ed69e..6f65e663d393 100644
10874 ---- a/drivers/scsi/3w-xxxx.h
10875 -+++ b/drivers/scsi/3w-xxxx.h
10876 -@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
10877 - #define TW_AEN_SMART_FAIL 0x000F
10878 - #define TW_AEN_SBUF_FAIL 0x0024
10879 -
10880 --/* Phase defines */
10881 --#define TW_PHASE_INITIAL 0
10882 --#define TW_PHASE_SINGLE 1
10883 --#define TW_PHASE_SGLIST 2
10884 --
10885 - /* Misc defines */
10886 - #define TW_ALIGNMENT_6000 64 /* 64 bytes */
10887 - #define TW_ALIGNMENT_7000 4 /* 4 bytes */
10888 -diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
10889 -index 262ab837a704..9f77d23239a2 100644
10890 ---- a/drivers/scsi/scsi_devinfo.c
10891 -+++ b/drivers/scsi/scsi_devinfo.c
10892 -@@ -226,6 +226,7 @@ static struct {
10893 - {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
10894 - {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
10895 - {"Promise", "", NULL, BLIST_SPARSELUN},
10896 -+ {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
10897 - {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
10898 - {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
10899 - {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
10900 -diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
10901 -index 9c0a520d933c..3e6142f61499 100644
10902 ---- a/drivers/scsi/scsi_scan.c
10903 -+++ b/drivers/scsi/scsi_scan.c
10904 -@@ -897,6 +897,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
10905 - */
10906 - if (*bflags & BLIST_MAX_512)
10907 - blk_queue_max_hw_sectors(sdev->request_queue, 512);
10908 -+ /*
10909 -+ * Max 1024 sector transfer length for targets that report incorrect
10910 -+ * max/optimal lengths and relied on the old block layer safe default
10911 -+ */
10912 -+ else if (*bflags & BLIST_MAX_1024)
10913 -+ blk_queue_max_hw_sectors(sdev->request_queue, 1024);
10914 -
10915 - /*
10916 - * Some devices may not want to have a start command automatically
10917 -diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
10918 -index 75b3603906c1..f0d22cdb51cd 100644
10919 ---- a/drivers/ssb/Kconfig
10920 -+++ b/drivers/ssb/Kconfig
10921 -@@ -130,6 +130,7 @@ config SSB_DRIVER_MIPS
10922 - bool "SSB Broadcom MIPS core driver"
10923 - depends on SSB && MIPS
10924 - select SSB_SERIAL
10925 -+ select SSB_SFLASH
10926 - help
10927 - Driver for the Sonics Silicon Backplane attached
10928 - Broadcom MIPS core.
10929 -diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
10930 -index 4e959c43f680..6afce7eb3d74 100644
10931 ---- a/drivers/tty/serial/atmel_serial.c
10932 -+++ b/drivers/tty/serial/atmel_serial.c
10933 -@@ -880,6 +880,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
10934 - config.direction = DMA_MEM_TO_DEV;
10935 - config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
10936 - config.dst_addr = port->mapbase + ATMEL_US_THR;
10937 -+ config.dst_maxburst = 1;
10938 -
10939 - ret = dmaengine_slave_config(atmel_port->chan_tx,
10940 - &config);
10941 -@@ -1059,6 +1060,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
10942 - config.direction = DMA_DEV_TO_MEM;
10943 - config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
10944 - config.src_addr = port->mapbase + ATMEL_US_RHR;
10945 -+ config.src_maxburst = 1;
10946 -
10947 - ret = dmaengine_slave_config(atmel_port->chan_rx,
10948 - &config);
10949 -diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
10950 -index 33fb94f78967..0a52c8b55a5f 100644
10951 ---- a/drivers/tty/serial/of_serial.c
10952 -+++ b/drivers/tty/serial/of_serial.c
10953 -@@ -344,7 +344,6 @@ static struct of_device_id of_platform_serial_table[] = {
10954 - { .compatible = "ibm,qpace-nwp-serial",
10955 - .data = (void *)PORT_NWPSERIAL, },
10956 - #endif
10957 -- { .type = "serial", .data = (void *)PORT_UNKNOWN, },
10958 - { /* end of list */ },
10959 - };
10960 -
10961 -diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
10962 -index 189f52e3111f..a0099a7f60d4 100644
10963 ---- a/drivers/tty/serial/uartlite.c
10964 -+++ b/drivers/tty/serial/uartlite.c
10965 -@@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
10966 -
10967 - static int ulite_probe(struct platform_device *pdev)
10968 - {
10969 -- struct resource *res, *res2;
10970 -+ struct resource *res;
10971 -+ int irq;
10972 - int id = pdev->id;
10973 - #ifdef CONFIG_OF
10974 - const __be32 *prop;
10975 -@@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev)
10976 - if (!res)
10977 - return -ENODEV;
10978 -
10979 -- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
10980 -- if (!res2)
10981 -- return -ENODEV;
10982 -+ irq = platform_get_irq(pdev, 0);
10983 -+ if (irq <= 0)
10984 -+ return -ENXIO;
10985 -
10986 -- return ulite_assign(&pdev->dev, id, res->start, res2->start);
10987 -+ return ulite_assign(&pdev->dev, id, res->start, irq);
10988 - }
10989 -
10990 - static int ulite_remove(struct platform_device *pdev)
10991 -diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
10992 -index cff531a51a78..54853a02ce9e 100644
10993 ---- a/drivers/tty/serial/xilinx_uartps.c
10994 -+++ b/drivers/tty/serial/xilinx_uartps.c
10995 -@@ -1325,9 +1325,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend,
10996 - */
10997 - static int cdns_uart_probe(struct platform_device *pdev)
10998 - {
10999 -- int rc, id;
11000 -+ int rc, id, irq;
11001 - struct uart_port *port;
11002 -- struct resource *res, *res2;
11003 -+ struct resource *res;
11004 - struct cdns_uart *cdns_uart_data;
11005 -
11006 - cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
11007 -@@ -1374,9 +1374,9 @@ static int cdns_uart_probe(struct platform_device *pdev)
11008 - goto err_out_clk_disable;
11009 - }
11010 -
11011 -- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
11012 -- if (!res2) {
11013 -- rc = -ENODEV;
11014 -+ irq = platform_get_irq(pdev, 0);
11015 -+ if (irq <= 0) {
11016 -+ rc = -ENXIO;
11017 - goto err_out_clk_disable;
11018 - }
11019 -
11020 -@@ -1405,7 +1405,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
11021 - * and triggers invocation of the config_port() entry point.
11022 - */
11023 - port->mapbase = res->start;
11024 -- port->irq = res2->start;
11025 -+ port->irq = irq;
11026 - port->dev = &pdev->dev;
11027 - port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
11028 - port->private_data = cdns_uart_data;
11029 -diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
11030 -index 562e581f6765..3770330a2201 100644
11031 ---- a/drivers/usb/chipidea/otg_fsm.c
11032 -+++ b/drivers/usb/chipidea/otg_fsm.c
11033 -@@ -537,7 +537,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
11034 - {
11035 - struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
11036 -
11037 -- mutex_unlock(&fsm->lock);
11038 - if (on) {
11039 - ci_role_stop(ci);
11040 - ci_role_start(ci, CI_ROLE_HOST);
11041 -@@ -546,7 +545,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
11042 - hw_device_reset(ci);
11043 - ci_role_start(ci, CI_ROLE_GADGET);
11044 - }
11045 -- mutex_lock(&fsm->lock);
11046 - return 0;
11047 - }
11048 -
11049 -@@ -554,12 +552,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on)
11050 - {
11051 - struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
11052 -
11053 -- mutex_unlock(&fsm->lock);
11054 - if (on)
11055 - usb_gadget_vbus_connect(&ci->gadget);
11056 - else
11057 - usb_gadget_vbus_disconnect(&ci->gadget);
11058 -- mutex_lock(&fsm->lock);
11059 -
11060 - return 0;
11061 - }
11062 -diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
11063 -index 683617714e7c..220c0fd059bb 100644
11064 ---- a/drivers/usb/class/cdc-acm.c
11065 -+++ b/drivers/usb/class/cdc-acm.c
11066 -@@ -1133,11 +1133,16 @@ static int acm_probe(struct usb_interface *intf,
11067 - }
11068 -
11069 - while (buflen > 0) {
11070 -+ elength = buffer[0];
11071 -+ if (!elength) {
11072 -+ dev_err(&intf->dev, "skipping garbage byte\n");
11073 -+ elength = 1;
11074 -+ goto next_desc;
11075 -+ }
11076 - if (buffer[1] != USB_DT_CS_INTERFACE) {
11077 - dev_err(&intf->dev, "skipping garbage\n");
11078 - goto next_desc;
11079 - }
11080 -- elength = buffer[0];
11081 -
11082 - switch (buffer[2]) {
11083 - case USB_CDC_UNION_TYPE: /* we've found it */
11084 -diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
11085 -index 9893d696fc97..f58caa9e6a27 100644
11086 ---- a/drivers/usb/storage/uas-detect.h
11087 -+++ b/drivers/usb/storage/uas-detect.h
11088 -@@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt,
11089 - }
11090 -
11091 - static int uas_use_uas_driver(struct usb_interface *intf,
11092 -- const struct usb_device_id *id)
11093 -+ const struct usb_device_id *id,
11094 -+ unsigned long *flags_ret)
11095 - {
11096 - struct usb_host_endpoint *eps[4] = { };
11097 - struct usb_device *udev = interface_to_usbdev(intf);
11098 -@@ -73,7 +74,7 @@ static int uas_use_uas_driver(struct usb_interface *intf,
11099 - * this writing the following versions exist:
11100 - * ASM1051 - no uas support version
11101 - * ASM1051 - with broken (*) uas support
11102 -- * ASM1053 - with working uas support
11103 -+ * ASM1053 - with working uas support, but problems with large xfers
11104 - * ASM1153 - with working uas support
11105 - *
11106 - * Devices with these chips re-use a number of device-ids over the
11107 -@@ -103,6 +104,9 @@ static int uas_use_uas_driver(struct usb_interface *intf,
11108 - } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
11109 - /* Possibly an ASM1051, disable uas */
11110 - flags |= US_FL_IGNORE_UAS;
11111 -+ } else {
11112 -+ /* ASM1053, these have issues with large transfers */
11113 -+ flags |= US_FL_MAX_SECTORS_240;
11114 - }
11115 - }
11116 -
11117 -@@ -132,5 +136,8 @@ static int uas_use_uas_driver(struct usb_interface *intf,
11118 - return 0;
11119 - }
11120 -
11121 -+ if (flags_ret)
11122 -+ *flags_ret = flags;
11123 -+
11124 - return 1;
11125 - }
11126 -diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
11127 -index 6cdabdc119a7..6d3122afeed3 100644
11128 ---- a/drivers/usb/storage/uas.c
11129 -+++ b/drivers/usb/storage/uas.c
11130 -@@ -759,7 +759,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
11131 -
11132 - static int uas_slave_alloc(struct scsi_device *sdev)
11133 - {
11134 -- sdev->hostdata = (void *)sdev->host->hostdata;
11135 -+ struct uas_dev_info *devinfo =
11136 -+ (struct uas_dev_info *)sdev->host->hostdata;
11137 -+
11138 -+ sdev->hostdata = devinfo;
11139 -
11140 - /* USB has unusual DMA-alignment requirements: Although the
11141 - * starting address of each scatter-gather element doesn't matter,
11142 -@@ -778,6 +781,11 @@ static int uas_slave_alloc(struct scsi_device *sdev)
11143 - */
11144 - blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
11145 -
11146 -+ if (devinfo->flags & US_FL_MAX_SECTORS_64)
11147 -+ blk_queue_max_hw_sectors(sdev->request_queue, 64);
11148 -+ else if (devinfo->flags & US_FL_MAX_SECTORS_240)
11149 -+ blk_queue_max_hw_sectors(sdev->request_queue, 240);
11150 -+
11151 - return 0;
11152 - }
11153 -
11154 -@@ -887,8 +895,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
11155 - struct Scsi_Host *shost = NULL;
11156 - struct uas_dev_info *devinfo;
11157 - struct usb_device *udev = interface_to_usbdev(intf);
11158 -+ unsigned long dev_flags;
11159 -
11160 -- if (!uas_use_uas_driver(intf, id))
11161 -+ if (!uas_use_uas_driver(intf, id, &dev_flags))
11162 - return -ENODEV;
11163 -
11164 - if (uas_switch_interface(udev, intf))
11165 -@@ -910,8 +919,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
11166 - devinfo->udev = udev;
11167 - devinfo->resetting = 0;
11168 - devinfo->shutdown = 0;
11169 -- devinfo->flags = id->driver_info;
11170 -- usb_stor_adjust_quirks(udev, &devinfo->flags);
11171 -+ devinfo->flags = dev_flags;
11172 - init_usb_anchor(&devinfo->cmd_urbs);
11173 - init_usb_anchor(&devinfo->sense_urbs);
11174 - init_usb_anchor(&devinfo->data_urbs);
11175 -diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
11176 -index 5600c33fcadb..6c10c888f35f 100644
11177 ---- a/drivers/usb/storage/usb.c
11178 -+++ b/drivers/usb/storage/usb.c
11179 -@@ -479,7 +479,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
11180 - US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
11181 - US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
11182 - US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
11183 -- US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES);
11184 -+ US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
11185 -+ US_FL_MAX_SECTORS_240);
11186 -
11187 - p = quirks;
11188 - while (*p) {
11189 -@@ -520,6 +521,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
11190 - case 'f':
11191 - f |= US_FL_NO_REPORT_OPCODES;
11192 - break;
11193 -+ case 'g':
11194 -+ f |= US_FL_MAX_SECTORS_240;
11195 -+ break;
11196 - case 'h':
11197 - f |= US_FL_CAPACITY_HEURISTICS;
11198 - break;
11199 -@@ -1080,7 +1084,7 @@ static int storage_probe(struct usb_interface *intf,
11200 -
11201 - /* If uas is enabled and this device can do uas then ignore it. */
11202 - #if IS_ENABLED(CONFIG_USB_UAS)
11203 -- if (uas_use_uas_driver(intf, id))
11204 -+ if (uas_use_uas_driver(intf, id, NULL))
11205 - return -ENXIO;
11206 - #endif
11207 -
11208 -diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
11209 -index f23d4be3280e..2b4c5423672d 100644
11210 ---- a/fs/btrfs/ioctl.c
11211 -+++ b/fs/btrfs/ioctl.c
11212 -@@ -2403,7 +2403,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
11213 - "Attempt to delete subvolume %llu during send",
11214 - dest->root_key.objectid);
11215 - err = -EPERM;
11216 -- goto out_dput;
11217 -+ goto out_unlock_inode;
11218 - }
11219 -
11220 - d_invalidate(dentry);
11221 -@@ -2498,6 +2498,7 @@ out_up_write:
11222 - root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
11223 - spin_unlock(&dest->root_item_lock);
11224 - }
11225 -+out_unlock_inode:
11226 - mutex_unlock(&inode->i_mutex);
11227 - if (!err) {
11228 - shrink_dcache_sb(root->fs_info->sb);
11229 -diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
11230 -index bed43081720f..16f6365f65e7 100644
11231 ---- a/fs/ext4/extents.c
11232 -+++ b/fs/ext4/extents.c
11233 -@@ -4934,13 +4934,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
11234 - if (ret)
11235 - return ret;
11236 -
11237 -- /*
11238 -- * currently supporting (pre)allocate mode for extent-based
11239 -- * files _only_
11240 -- */
11241 -- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
11242 -- return -EOPNOTSUPP;
11243 --
11244 - if (mode & FALLOC_FL_COLLAPSE_RANGE)
11245 - return ext4_collapse_range(inode, offset, len);
11246 -
11247 -@@ -4962,6 +4955,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
11248 -
11249 - mutex_lock(&inode->i_mutex);
11250 -
11251 -+ /*
11252 -+ * We only support preallocation for extent-based files only
11253 -+ */
11254 -+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
11255 -+ ret = -EOPNOTSUPP;
11256 -+ goto out;
11257 -+ }
11258 -+
11259 - if (!(mode & FALLOC_FL_KEEP_SIZE) &&
11260 - offset + len > i_size_read(inode)) {
11261 - new_size = offset + len;
11262 -diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
11263 -index e04d45733976..9a0121376358 100644
11264 ---- a/fs/ext4/extents_status.c
11265 -+++ b/fs/ext4/extents_status.c
11266 -@@ -705,6 +705,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
11267 -
11268 - BUG_ON(end < lblk);
11269 -
11270 -+ if ((status & EXTENT_STATUS_DELAYED) &&
11271 -+ (status & EXTENT_STATUS_WRITTEN)) {
11272 -+ ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
11273 -+ " delayed and written which can potentially "
11274 -+ " cause data loss.\n", lblk, len);
11275 -+ WARN_ON(1);
11276 -+ }
11277 -+
11278 - newes.es_lblk = lblk;
11279 - newes.es_len = len;
11280 - ext4_es_store_pblock_status(&newes, pblk, status);
11281 -diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
11282 -index 5cb9a212b86f..852cc521f327 100644
11283 ---- a/fs/ext4/inode.c
11284 -+++ b/fs/ext4/inode.c
11285 -@@ -534,6 +534,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
11286 - status = map->m_flags & EXT4_MAP_UNWRITTEN ?
11287 - EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
11288 - if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
11289 -+ !(status & EXTENT_STATUS_WRITTEN) &&
11290 - ext4_find_delalloc_range(inode, map->m_lblk,
11291 - map->m_lblk + map->m_len - 1))
11292 - status |= EXTENT_STATUS_DELAYED;
11293 -@@ -638,6 +639,7 @@ found:
11294 - status = map->m_flags & EXT4_MAP_UNWRITTEN ?
11295 - EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
11296 - if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
11297 -+ !(status & EXTENT_STATUS_WRITTEN) &&
11298 - ext4_find_delalloc_range(inode, map->m_lblk,
11299 - map->m_lblk + map->m_len - 1))
11300 - status |= EXTENT_STATUS_DELAYED;
11301 -diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
11302 -index d98094a9f476..ff10f3decbc9 100644
11303 ---- a/fs/hfsplus/xattr.c
11304 -+++ b/fs/hfsplus/xattr.c
11305 -@@ -806,9 +806,6 @@ end_removexattr:
11306 - static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
11307 - void *buffer, size_t size, int type)
11308 - {
11309 -- char *xattr_name;
11310 -- int res;
11311 --
11312 - if (!strcmp(name, ""))
11313 - return -EINVAL;
11314 -
11315 -@@ -818,24 +815,19 @@ static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
11316 - */
11317 - if (is_known_namespace(name))
11318 - return -EOPNOTSUPP;
11319 -- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
11320 -- + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
11321 -- if (!xattr_name)
11322 -- return -ENOMEM;
11323 -- strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
11324 -- strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
11325 -
11326 -- res = hfsplus_getxattr(dentry, xattr_name, buffer, size);
11327 -- kfree(xattr_name);
11328 -- return res;
11329 -+ /*
11330 -+ * osx is the namespace we use to indicate an unprefixed
11331 -+ * attribute on the filesystem (like the ones that OS X
11332 -+ * creates), so we pass the name through unmodified (after
11333 -+ * ensuring it doesn't conflict with another namespace).
11334 -+ */
11335 -+ return hfsplus_getxattr(dentry, name, buffer, size);
11336 - }
11337 -
11338 - static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
11339 - const void *buffer, size_t size, int flags, int type)
11340 - {
11341 -- char *xattr_name;
11342 -- int res;
11343 --
11344 - if (!strcmp(name, ""))
11345 - return -EINVAL;
11346 -
11347 -@@ -845,16 +837,14 @@ static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
11348 - */
11349 - if (is_known_namespace(name))
11350 - return -EOPNOTSUPP;
11351 -- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
11352 -- + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
11353 -- if (!xattr_name)
11354 -- return -ENOMEM;
11355 -- strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
11356 -- strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
11357 -
11358 -- res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
11359 -- kfree(xattr_name);
11360 -- return res;
11361 -+ /*
11362 -+ * osx is the namespace we use to indicate an unprefixed
11363 -+ * attribute on the filesystem (like the ones that OS X
11364 -+ * creates), so we pass the name through unmodified (after
11365 -+ * ensuring it doesn't conflict with another namespace).
11366 -+ */
11367 -+ return hfsplus_setxattr(dentry, name, buffer, size, flags);
11368 - }
11369 -
11370 - static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list,
11371 -diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
11372 -index a7f2604c5f25..7f5f78bd15ad 100644
11373 ---- a/include/linux/usb_usual.h
11374 -+++ b/include/linux/usb_usual.h
11375 -@@ -77,6 +77,8 @@
11376 - /* Cannot handle ATA_12 or ATA_16 CDBs */ \
11377 - US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
11378 - /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
11379 -+ US_FLAG(MAX_SECTORS_240, 0x08000000) \
11380 -+ /* Sets max_sectors to 240 */ \
11381 -
11382 - #define US_FLAG(name, value) US_FL_##name = value ,
11383 - enum { US_DO_ALL_FLAGS };
11384 -diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
11385 -index 183eaab7c380..96e3f56519e7 100644
11386 ---- a/include/scsi/scsi_devinfo.h
11387 -+++ b/include/scsi/scsi_devinfo.h
11388 -@@ -36,5 +36,6 @@
11389 - for sequential scan */
11390 - #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
11391 - #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
11392 -+#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
11393 -
11394 - #endif
11395 -diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
11396 -index 0de95ccb92cf..5bd134651f5e 100644
11397 ---- a/include/sound/emu10k1.h
11398 -+++ b/include/sound/emu10k1.h
11399 -@@ -41,7 +41,8 @@
11400 -
11401 - #define EMUPAGESIZE 4096
11402 - #define MAXREQVOICES 8
11403 --#define MAXPAGES 8192
11404 -+#define MAXPAGES0 4096 /* 32 bit mode */
11405 -+#define MAXPAGES1 8192 /* 31 bit mode */
11406 - #define RESERVED 0
11407 - #define NUM_MIDI 16
11408 - #define NUM_G 64 /* use all channels */
11409 -@@ -50,8 +51,7 @@
11410 -
11411 - /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
11412 - #define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */
11413 --#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */
11414 -- /* See ALSA bug #1276 - rlrevell */
11415 -+#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
11416 -
11417 - #define TMEMSIZE 256*1024
11418 - #define TMEMSIZEREG 4
11419 -@@ -466,8 +466,11 @@
11420 -
11421 - #define MAPB 0x0d /* Cache map B */
11422 -
11423 --#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
11424 --#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
11425 -+#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
11426 -+#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */
11427 -+
11428 -+#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
11429 -+#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
11430 -
11431 - /* 0x0e, 0x0f: Not used */
11432 -
11433 -@@ -1704,6 +1707,7 @@ struct snd_emu10k1 {
11434 - unsigned short model; /* subsystem id */
11435 - unsigned int card_type; /* EMU10K1_CARD_* */
11436 - unsigned int ecard_ctrl; /* ecard control bits */
11437 -+ unsigned int address_mode; /* address mode */
11438 - unsigned long dma_mask; /* PCI DMA mask */
11439 - unsigned int delay_pcm_irq; /* in samples */
11440 - int max_cache_pages; /* max memory size / PAGE_SIZE */
11441 -diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
11442 -index 8d7416e46861..15355892a0ff 100644
11443 ---- a/include/sound/soc-dapm.h
11444 -+++ b/include/sound/soc-dapm.h
11445 -@@ -287,7 +287,7 @@ struct device;
11446 - .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
11447 - .tlv.p = (tlv_array), \
11448 - .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
11449 -- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
11450 -+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
11451 - #define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
11452 - SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
11453 - #define SOC_DAPM_ENUM(xname, xenum) \
11454 -diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
11455 -index a64e7a207d2b..0c5796eadae1 100644
11456 ---- a/kernel/bpf/core.c
11457 -+++ b/kernel/bpf/core.c
11458 -@@ -357,8 +357,8 @@ select_insn:
11459 - ALU64_MOD_X:
11460 - if (unlikely(SRC == 0))
11461 - return 0;
11462 -- tmp = DST;
11463 -- DST = do_div(tmp, SRC);
11464 -+ div64_u64_rem(DST, SRC, &tmp);
11465 -+ DST = tmp;
11466 - CONT;
11467 - ALU_MOD_X:
11468 - if (unlikely(SRC == 0))
11469 -@@ -367,8 +367,8 @@ select_insn:
11470 - DST = do_div(tmp, (u32) SRC);
11471 - CONT;
11472 - ALU64_MOD_K:
11473 -- tmp = DST;
11474 -- DST = do_div(tmp, IMM);
11475 -+ div64_u64_rem(DST, IMM, &tmp);
11476 -+ DST = tmp;
11477 - CONT;
11478 - ALU_MOD_K:
11479 - tmp = (u32) DST;
11480 -@@ -377,7 +377,7 @@ select_insn:
11481 - ALU64_DIV_X:
11482 - if (unlikely(SRC == 0))
11483 - return 0;
11484 -- do_div(DST, SRC);
11485 -+ DST = div64_u64(DST, SRC);
11486 - CONT;
11487 - ALU_DIV_X:
11488 - if (unlikely(SRC == 0))
11489 -@@ -387,7 +387,7 @@ select_insn:
11490 - DST = (u32) tmp;
11491 - CONT;
11492 - ALU64_DIV_K:
11493 -- do_div(DST, IMM);
11494 -+ DST = div64_u64(DST, IMM);
11495 - CONT;
11496 - ALU_DIV_K:
11497 - tmp = (u32) DST;
11498 -diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
11499 -index 208d5439e59b..787b0d699969 100644
11500 ---- a/net/ipv4/ping.c
11501 -+++ b/net/ipv4/ping.c
11502 -@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
11503 - if (sk_hashed(sk)) {
11504 - write_lock_bh(&ping_table.lock);
11505 - hlist_nulls_del(&sk->sk_nulls_node);
11506 -+ sk_nulls_node_init(&sk->sk_nulls_node);
11507 - sock_put(sk);
11508 - isk->inet_num = 0;
11509 - isk->inet_sport = 0;
11510 -diff --git a/net/ipv4/route.c b/net/ipv4/route.c
11511 -index ad5064362c5c..20fc0202cbbe 100644
11512 ---- a/net/ipv4/route.c
11513 -+++ b/net/ipv4/route.c
11514 -@@ -963,10 +963,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
11515 - if (dst_metric_locked(dst, RTAX_MTU))
11516 - return;
11517 -
11518 -- if (dst->dev->mtu < mtu)
11519 -- return;
11520 --
11521 -- if (rt->rt_pmtu && rt->rt_pmtu < mtu)
11522 -+ if (ipv4_mtu(dst) < mtu)
11523 - return;
11524 -
11525 - if (mtu < ip_rt_min_pmtu)
11526 -diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
11527 -index 37d0220a094c..db7a2e5e4a14 100644
11528 ---- a/sound/pci/emu10k1/emu10k1.c
11529 -+++ b/sound/pci/emu10k1/emu10k1.c
11530 -@@ -183,8 +183,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
11531 - }
11532 - #endif
11533 -
11534 -- strcpy(card->driver, emu->card_capabilities->driver);
11535 -- strcpy(card->shortname, emu->card_capabilities->name);
11536 -+ strlcpy(card->driver, emu->card_capabilities->driver,
11537 -+ sizeof(card->driver));
11538 -+ strlcpy(card->shortname, emu->card_capabilities->name,
11539 -+ sizeof(card->shortname));
11540 - snprintf(card->longname, sizeof(card->longname),
11541 - "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
11542 - card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
11543 -diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
11544 -index 874cd76c7b7f..d2c7ea3a7610 100644
11545 ---- a/sound/pci/emu10k1/emu10k1_callback.c
11546 -+++ b/sound/pci/emu10k1/emu10k1_callback.c
11547 -@@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp)
11548 - snd_emu10k1_ptr_write(hw, Z2, ch, 0);
11549 -
11550 - /* invalidate maps */
11551 -- temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
11552 -+ temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
11553 - snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
11554 - snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
11555 - #if 0
11556 -@@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp)
11557 - snd_emu10k1_ptr_write(hw, CDF, ch, sample);
11558 -
11559 - /* invalidate maps */
11560 -- temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
11561 -+ temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
11562 - snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
11563 - snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
11564 -
11565 -diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
11566 -index b4458a630a7c..df9f5c7c9c77 100644
11567 ---- a/sound/pci/emu10k1/emu10k1_main.c
11568 -+++ b/sound/pci/emu10k1/emu10k1_main.c
11569 -@@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
11570 - snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */
11571 - snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */
11572 -
11573 -- silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK;
11574 -+ silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
11575 - for (ch = 0; ch < NUM_G; ch++) {
11576 - snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
11577 - snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
11578 -@@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
11579 - outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
11580 - }
11581 -
11582 -+ if (emu->address_mode == 0) {
11583 -+ /* use 16M in 4G */
11584 -+ outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
11585 -+ }
11586 -+
11587 - return 0;
11588 - }
11589 -
11590 -@@ -1421,7 +1426,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
11591 - *
11592 - */
11593 - {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
11594 -- .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]",
11595 -+ .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
11596 - .id = "Audigy2",
11597 - .emu10k2_chip = 1,
11598 - .ca0108_chip = 1,
11599 -@@ -1571,7 +1576,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
11600 - .adc_1361t = 1, /* 24 bit capture instead of 16bit */
11601 - .ac97_chip = 1} ,
11602 - {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
11603 -- .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]",
11604 -+ .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
11605 - .id = "Audigy2",
11606 - .emu10k2_chip = 1,
11607 - .ca0102_chip = 1,
11608 -@@ -1877,8 +1882,10 @@ int snd_emu10k1_create(struct snd_card *card,
11609 -
11610 - is_audigy = emu->audigy = c->emu10k2_chip;
11611 -
11612 -+ /* set addressing mode */
11613 -+ emu->address_mode = is_audigy ? 0 : 1;
11614 - /* set the DMA transfer mask */
11615 -- emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK;
11616 -+ emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
11617 - if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
11618 - pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
11619 - dev_err(card->dev,
11620 -@@ -1903,7 +1910,7 @@ int snd_emu10k1_create(struct snd_card *card,
11621 -
11622 - emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
11623 - if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
11624 -- 32 * 1024, &emu->ptb_pages) < 0) {
11625 -+ (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
11626 - err = -ENOMEM;
11627 - goto error;
11628 - }
11629 -@@ -2002,8 +2009,8 @@ int snd_emu10k1_create(struct snd_card *card,
11630 -
11631 - /* Clear silent pages and set up pointers */
11632 - memset(emu->silent_page.area, 0, PAGE_SIZE);
11633 -- silent_page = emu->silent_page.addr << 1;
11634 -- for (idx = 0; idx < MAXPAGES; idx++)
11635 -+ silent_page = emu->silent_page.addr << emu->address_mode;
11636 -+ for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
11637 - ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
11638 -
11639 - /* set up voice indices */
11640 -diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
11641 -index 0dc07385af0e..14a305bd8a98 100644
11642 ---- a/sound/pci/emu10k1/emupcm.c
11643 -+++ b/sound/pci/emu10k1/emupcm.c
11644 -@@ -380,7 +380,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
11645 - snd_emu10k1_ptr_write(emu, Z1, voice, 0);
11646 - snd_emu10k1_ptr_write(emu, Z2, voice, 0);
11647 - /* invalidate maps */
11648 -- silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK;
11649 -+ silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
11650 - snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
11651 - snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
11652 - /* modulation envelope */
11653 -diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
11654 -index c68e6dd2fa67..4f1f69be1865 100644
11655 ---- a/sound/pci/emu10k1/memory.c
11656 -+++ b/sound/pci/emu10k1/memory.c
11657 -@@ -34,10 +34,11 @@
11658 - * aligned pages in others
11659 - */
11660 - #define __set_ptb_entry(emu,page,addr) \
11661 -- (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
11662 -+ (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
11663 -
11664 - #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
11665 --#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
11666 -+#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
11667 -+#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
11668 - /* get aligned page from offset address */
11669 - #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
11670 - /* get offset address from aligned page */
11671 -@@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis
11672 - }
11673 - page = blk->mapped_page + blk->pages;
11674 - }
11675 -- size = MAX_ALIGN_PAGES - page;
11676 -+ size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
11677 - if (size >= max_size) {
11678 - *nextp = pos;
11679 - return page;
11680 -@@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
11681 - q = get_emu10k1_memblk(p, mapped_link);
11682 - end_page = q->mapped_page;
11683 - } else
11684 -- end_page = MAX_ALIGN_PAGES;
11685 -+ end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
11686 -
11687 - /* remove links */
11688 - list_del(&blk->mapped_link);
11689 -@@ -307,7 +308,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
11690 - if (snd_BUG_ON(!emu))
11691 - return NULL;
11692 - if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
11693 -- runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
11694 -+ runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
11695 - return NULL;
11696 - hdr = emu->memhdr;
11697 - if (snd_BUG_ON(!hdr))
11698 -diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
11699 -index 2fe86d2e1b09..a63a86332deb 100644
11700 ---- a/sound/pci/hda/hda_codec.c
11701 -+++ b/sound/pci/hda/hda_codec.c
11702 -@@ -3027,6 +3027,16 @@ static struct snd_kcontrol_new vmaster_mute_mode = {
11703 - .put = vmaster_mute_mode_put,
11704 - };
11705 -
11706 -+/* meta hook to call each driver's vmaster hook */
11707 -+static void vmaster_hook(void *private_data, int enabled)
11708 -+{
11709 -+ struct hda_vmaster_mute_hook *hook = private_data;
11710 -+
11711 -+ if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER)
11712 -+ enabled = hook->mute_mode;
11713 -+ hook->hook(hook->codec, enabled);
11714 -+}
11715 -+
11716 - /**
11717 - * snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED
11718 - * @codec: the HDA codec
11719 -@@ -3045,9 +3055,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec,
11720 -
11721 - if (!hook->hook || !hook->sw_kctl)
11722 - return 0;
11723 -- snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec);
11724 - hook->codec = codec;
11725 - hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER;
11726 -+ snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook);
11727 - if (!expose_enum_ctl)
11728 - return 0;
11729 - kctl = snd_ctl_new1(&vmaster_mute_mode, hook);
11730 -@@ -3073,14 +3083,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook)
11731 - */
11732 - if (hook->codec->bus->shutdown)
11733 - return;
11734 -- switch (hook->mute_mode) {
11735 -- case HDA_VMUTE_FOLLOW_MASTER:
11736 -- snd_ctl_sync_vmaster_hook(hook->sw_kctl);
11737 -- break;
11738 -- default:
11739 -- hook->hook(hook->codec, hook->mute_mode);
11740 -- break;
11741 -- }
11742 -+ snd_ctl_sync_vmaster_hook(hook->sw_kctl);
11743 - }
11744 - EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook);
11745 -
11746 -diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
11747 -index 6ba0b5517c40..2341fc334163 100644
11748 ---- a/sound/pci/hda/thinkpad_helper.c
11749 -+++ b/sound/pci/hda/thinkpad_helper.c
11750 -@@ -72,6 +72,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
11751 - if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
11752 - old_vmaster_hook = spec->vmaster_mute.hook;
11753 - spec->vmaster_mute.hook = update_tpacpi_mute_led;
11754 -+ spec->vmaster_mute_enum = 1;
11755 - removefunc = false;
11756 - }
11757 - if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
11758 -diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
11759 -index fb9c20eace3f..97b33e96439a 100644
11760 ---- a/sound/soc/codecs/rt5677.c
11761 -+++ b/sound/soc/codecs/rt5677.c
11762 -@@ -62,6 +62,9 @@ static const struct reg_default init_list[] = {
11763 - {RT5677_PR_BASE + 0x1e, 0x0000},
11764 - {RT5677_PR_BASE + 0x12, 0x0eaa},
11765 - {RT5677_PR_BASE + 0x14, 0x018a},
11766 -+ {RT5677_PR_BASE + 0x15, 0x0490},
11767 -+ {RT5677_PR_BASE + 0x38, 0x0f71},
11768 -+ {RT5677_PR_BASE + 0x39, 0x0f71},
11769 - };
11770 - #define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list)
11771 -
11772 -@@ -901,7 +904,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
11773 - {
11774 - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
11775 - struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
11776 -- int idx = rl6231_calc_dmic_clk(rt5677->sysclk);
11777 -+ int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8);
11778 -
11779 - if (idx < 0)
11780 - dev_err(codec->dev, "Failed to set DMIC clock\n");
11781 -diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
11782 -index 16f1b71edb55..aab0af681e8c 100644
11783 ---- a/sound/soc/codecs/tfa9879.c
11784 -+++ b/sound/soc/codecs/tfa9879.c
11785 -@@ -280,8 +280,8 @@ static int tfa9879_i2c_probe(struct i2c_client *i2c,
11786 - int i;
11787 -
11788 - tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL);
11789 -- if (IS_ERR(tfa9879))
11790 -- return PTR_ERR(tfa9879);
11791 -+ if (!tfa9879)
11792 -+ return -ENOMEM;
11793 -
11794 - i2c_set_clientdata(i2c, tfa9879);
11795 -
11796 -diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
11797 -index 326d3c3804e3..5bf723689692 100644
11798 ---- a/sound/soc/samsung/s3c24xx-i2s.c
11799 -+++ b/sound/soc/samsung/s3c24xx-i2s.c
11800 -@@ -461,8 +461,8 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
11801 - return -ENOENT;
11802 - }
11803 - s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
11804 -- if (s3c24xx_i2s.regs == NULL)
11805 -- return -ENXIO;
11806 -+ if (IS_ERR(s3c24xx_i2s.regs))
11807 -+ return PTR_ERR(s3c24xx_i2s.regs);
11808 -
11809 - s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO;
11810 - s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO;
11811 -diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
11812 -index ab37add269ae..82e350e9501c 100644
11813 ---- a/sound/synth/emux/emux_oss.c
11814 -+++ b/sound/synth/emux/emux_oss.c
11815 -@@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
11816 - if (snd_BUG_ON(!arg || !emu))
11817 - return -ENXIO;
11818 -
11819 -- mutex_lock(&emu->register_mutex);
11820 --
11821 -- if (!snd_emux_inc_count(emu)) {
11822 -- mutex_unlock(&emu->register_mutex);
11823 -+ if (!snd_emux_inc_count(emu))
11824 - return -EFAULT;
11825 -- }
11826 -
11827 - memset(&callback, 0, sizeof(callback));
11828 - callback.owner = THIS_MODULE;
11829 -@@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
11830 - if (p == NULL) {
11831 - snd_printk(KERN_ERR "can't create port\n");
11832 - snd_emux_dec_count(emu);
11833 -- mutex_unlock(&emu->register_mutex);
11834 - return -ENOMEM;
11835 - }
11836 -
11837 -@@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
11838 - reset_port_mode(p, arg->seq_mode);
11839 -
11840 - snd_emux_reset_port(p);
11841 --
11842 -- mutex_unlock(&emu->register_mutex);
11843 - return 0;
11844 - }
11845 -
11846 -@@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
11847 - if (snd_BUG_ON(!emu))
11848 - return -ENXIO;
11849 -
11850 -- mutex_lock(&emu->register_mutex);
11851 - snd_emux_sounds_off_all(p);
11852 - snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
11853 - snd_seq_event_port_detach(p->chset.client, p->chset.port);
11854 - snd_emux_dec_count(emu);
11855 -
11856 -- mutex_unlock(&emu->register_mutex);
11857 - return 0;
11858 - }
11859 -
11860 -diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
11861 -index 7778b8e19782..a0209204ae48 100644
11862 ---- a/sound/synth/emux/emux_seq.c
11863 -+++ b/sound/synth/emux/emux_seq.c
11864 -@@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu)
11865 - if (emu->voices)
11866 - snd_emux_terminate_all(emu);
11867 -
11868 -- mutex_lock(&emu->register_mutex);
11869 - if (emu->client >= 0) {
11870 - snd_seq_delete_kernel_client(emu->client);
11871 - emu->client = -1;
11872 - }
11873 -- mutex_unlock(&emu->register_mutex);
11874 - }
11875 -
11876 -
11877 -@@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data,
11878 - /*
11879 - * increment usage count
11880 - */
11881 --int
11882 --snd_emux_inc_count(struct snd_emux *emu)
11883 -+static int
11884 -+__snd_emux_inc_count(struct snd_emux *emu)
11885 - {
11886 - emu->used++;
11887 - if (!try_module_get(emu->ops.owner))
11888 -@@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu)
11889 - return 1;
11890 - }
11891 -
11892 -+int snd_emux_inc_count(struct snd_emux *emu)
11893 -+{
11894 -+ int ret;
11895 -+
11896 -+ mutex_lock(&emu->register_mutex);
11897 -+ ret = __snd_emux_inc_count(emu);
11898 -+ mutex_unlock(&emu->register_mutex);
11899 -+ return ret;
11900 -+}
11901 -
11902 - /*
11903 - * decrease usage count
11904 - */
11905 --void
11906 --snd_emux_dec_count(struct snd_emux *emu)
11907 -+static void
11908 -+__snd_emux_dec_count(struct snd_emux *emu)
11909 - {
11910 - module_put(emu->card->module);
11911 - emu->used--;
11912 -@@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu)
11913 - module_put(emu->ops.owner);
11914 - }
11915 -
11916 -+void snd_emux_dec_count(struct snd_emux *emu)
11917 -+{
11918 -+ mutex_lock(&emu->register_mutex);
11919 -+ __snd_emux_dec_count(emu);
11920 -+ mutex_unlock(&emu->register_mutex);
11921 -+}
11922 -
11923 - /*
11924 - * Routine that is called upon a first use of a particular port
11925 -@@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info)
11926 -
11927 - mutex_lock(&emu->register_mutex);
11928 - snd_emux_init_port(p);
11929 -- snd_emux_inc_count(emu);
11930 -+ __snd_emux_inc_count(emu);
11931 - mutex_unlock(&emu->register_mutex);
11932 - return 0;
11933 - }
11934 -@@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info)
11935 -
11936 - mutex_lock(&emu->register_mutex);
11937 - snd_emux_sounds_off_all(p);
11938 -- snd_emux_dec_count(emu);
11939 -+ __snd_emux_dec_count(emu);
11940 - mutex_unlock(&emu->register_mutex);
11941 - return 0;
11942 - }
11943
11944 diff --git a/1003_linux-4.0.4.patch b/1003_linux-4.0.4.patch
11945 deleted file mode 100644
11946 index e5c793a..0000000
11947 --- a/1003_linux-4.0.4.patch
11948 +++ /dev/null
11949 @@ -1,2713 +0,0 @@
11950 -diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
11951 -index a4873e5e3e36..e30e184f50c7 100644
11952 ---- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
11953 -+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
11954 -@@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 {
11955 - 80 81 68 69
11956 - 70 71 72 73
11957 - 74 75 76 77>;
11958 -- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
11959 -+ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
11960 - "saif0", "saif1", "i2c0", "i2c1",
11961 - "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
11962 - "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
11963 -diff --git a/Makefile b/Makefile
11964 -index dc9f43a019d6..3d16bcc87585 100644
11965 ---- a/Makefile
11966 -+++ b/Makefile
11967 -@@ -1,6 +1,6 @@
11968 - VERSION = 4
11969 - PATCHLEVEL = 0
11970 --SUBLEVEL = 3
11971 -+SUBLEVEL = 4
11972 - EXTRAVERSION =
11973 - NAME = Hurr durr I'ma sheep
11974 -
11975 -diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
11976 -index 0c76d9f05fd0..f4838ebd918b 100644
11977 ---- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
11978 -+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
11979 -@@ -105,6 +105,10 @@
11980 - };
11981 -
11982 - internal-regs {
11983 -+ rtc@10300 {
11984 -+ /* No crystal connected to the internal RTC */
11985 -+ status = "disabled";
11986 -+ };
11987 - serial@12000 {
11988 - status = "okay";
11989 - };
11990 -diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
11991 -index 7e6eef2488e8..82045398bf1f 100644
11992 ---- a/arch/arm/boot/dts/imx23-olinuxino.dts
11993 -+++ b/arch/arm/boot/dts/imx23-olinuxino.dts
11994 -@@ -12,6 +12,7 @@
11995 - */
11996 -
11997 - /dts-v1/;
11998 -+#include <dt-bindings/gpio/gpio.h>
11999 - #include "imx23.dtsi"
12000 -
12001 - / {
12002 -@@ -93,6 +94,7 @@
12003 -
12004 - ahb@80080000 {
12005 - usb0: usb@80080000 {
12006 -+ dr_mode = "host";
12007 - vbus-supply = <&reg_usb0_vbus>;
12008 - status = "okay";
12009 - };
12010 -@@ -122,7 +124,7 @@
12011 -
12012 - user {
12013 - label = "green";
12014 -- gpios = <&gpio2 1 1>;
12015 -+ gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
12016 - };
12017 - };
12018 - };
12019 -diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
12020 -index e4d3aecc4ed2..677f81d9dcd5 100644
12021 ---- a/arch/arm/boot/dts/imx25.dtsi
12022 -+++ b/arch/arm/boot/dts/imx25.dtsi
12023 -@@ -428,6 +428,7 @@
12024 -
12025 - pwm4: pwm@53fc8000 {
12026 - compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
12027 -+ #pwm-cells = <2>;
12028 - reg = <0x53fc8000 0x4000>;
12029 - clocks = <&clks 108>, <&clks 52>;
12030 - clock-names = "ipg", "per";
12031 -diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
12032 -index 47f68ac868d4..5ed245a3f9ac 100644
12033 ---- a/arch/arm/boot/dts/imx28.dtsi
12034 -+++ b/arch/arm/boot/dts/imx28.dtsi
12035 -@@ -900,7 +900,7 @@
12036 - 80 81 68 69
12037 - 70 71 72 73
12038 - 74 75 76 77>;
12039 -- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
12040 -+ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
12041 - "saif0", "saif1", "i2c0", "i2c1",
12042 - "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
12043 - "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
12044 -diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
12045 -index 19cc269a08d4..1ce6133b67f5 100644
12046 ---- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
12047 -+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
12048 -@@ -31,6 +31,7 @@
12049 - regulator-min-microvolt = <5000000>;
12050 - regulator-max-microvolt = <5000000>;
12051 - gpio = <&gpio4 15 0>;
12052 -+ enable-active-high;
12053 - };
12054 -
12055 - reg_usb_h1_vbus: regulator@1 {
12056 -@@ -40,6 +41,7 @@
12057 - regulator-min-microvolt = <5000000>;
12058 - regulator-max-microvolt = <5000000>;
12059 - gpio = <&gpio1 0 0>;
12060 -+ enable-active-high;
12061 - };
12062 - };
12063 -
12064 -diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
12065 -index db80f9d376fa..9c8bdf2c93a1 100644
12066 ---- a/arch/arm/boot/dts/omap3-n900.dts
12067 -+++ b/arch/arm/boot/dts/omap3-n900.dts
12068 -@@ -484,6 +484,8 @@
12069 - DRVDD-supply = <&vmmc2>;
12070 - IOVDD-supply = <&vio>;
12071 - DVDD-supply = <&vio>;
12072 -+
12073 -+ ai3x-micbias-vg = <1>;
12074 - };
12075 -
12076 - tlv320aic3x_aux: tlv320aic3x@19 {
12077 -@@ -495,6 +497,8 @@
12078 - DRVDD-supply = <&vmmc2>;
12079 - IOVDD-supply = <&vio>;
12080 - DVDD-supply = <&vio>;
12081 -+
12082 -+ ai3x-micbias-vg = <2>;
12083 - };
12084 -
12085 - tsl2563: tsl2563@29 {
12086 -diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
12087 -index bfd3f1c734b8..2201cd5da3bb 100644
12088 ---- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
12089 -+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
12090 -@@ -1017,23 +1017,6 @@
12091 - status = "disabled";
12092 - };
12093 -
12094 -- vmmci: regulator-gpio {
12095 -- compatible = "regulator-gpio";
12096 --
12097 -- regulator-min-microvolt = <1800000>;
12098 -- regulator-max-microvolt = <2900000>;
12099 -- regulator-name = "mmci-reg";
12100 -- regulator-type = "voltage";
12101 --
12102 -- startup-delay-us = <100>;
12103 -- enable-active-high;
12104 --
12105 -- states = <1800000 0x1
12106 -- 2900000 0x0>;
12107 --
12108 -- status = "disabled";
12109 -- };
12110 --
12111 - mcde@a0350000 {
12112 - compatible = "stericsson,mcde";
12113 - reg = <0xa0350000 0x1000>, /* MCDE */
12114 -diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
12115 -index bf8f0eddc2c0..744c1e3a744d 100644
12116 ---- a/arch/arm/boot/dts/ste-href.dtsi
12117 -+++ b/arch/arm/boot/dts/ste-href.dtsi
12118 -@@ -111,6 +111,21 @@
12119 - pinctrl-1 = <&i2c3_sleep_mode>;
12120 - };
12121 -
12122 -+ vmmci: regulator-gpio {
12123 -+ compatible = "regulator-gpio";
12124 -+
12125 -+ regulator-min-microvolt = <1800000>;
12126 -+ regulator-max-microvolt = <2900000>;
12127 -+ regulator-name = "mmci-reg";
12128 -+ regulator-type = "voltage";
12129 -+
12130 -+ startup-delay-us = <100>;
12131 -+ enable-active-high;
12132 -+
12133 -+ states = <1800000 0x1
12134 -+ 2900000 0x0>;
12135 -+ };
12136 -+
12137 - // External Micro SD slot
12138 - sdi0_per1@80126000 {
12139 - arm,primecell-periphid = <0x10480180>;
12140 -diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
12141 -index 206826a855c0..1bc84ebdccaa 100644
12142 ---- a/arch/arm/boot/dts/ste-snowball.dts
12143 -+++ b/arch/arm/boot/dts/ste-snowball.dts
12144 -@@ -146,8 +146,21 @@
12145 - };
12146 -
12147 - vmmci: regulator-gpio {
12148 -+ compatible = "regulator-gpio";
12149 -+
12150 - gpios = <&gpio7 4 0x4>;
12151 - enable-gpio = <&gpio6 25 0x4>;
12152 -+
12153 -+ regulator-min-microvolt = <1800000>;
12154 -+ regulator-max-microvolt = <2900000>;
12155 -+ regulator-name = "mmci-reg";
12156 -+ regulator-type = "voltage";
12157 -+
12158 -+ startup-delay-us = <100>;
12159 -+ enable-active-high;
12160 -+
12161 -+ states = <1800000 0x1
12162 -+ 2900000 0x0>;
12163 - };
12164 -
12165 - // External Micro SD slot
12166 -diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
12167 -index 902397dd1000..1c1cdfa566ac 100644
12168 ---- a/arch/arm/kernel/Makefile
12169 -+++ b/arch/arm/kernel/Makefile
12170 -@@ -86,7 +86,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
12171 -
12172 - obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
12173 - ifeq ($(CONFIG_ARM_PSCI),y)
12174 --obj-y += psci.o
12175 -+obj-y += psci.o psci-call.o
12176 - obj-$(CONFIG_SMP) += psci_smp.o
12177 - endif
12178 -
12179 -diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S
12180 -new file mode 100644
12181 -index 000000000000..a78e9e1e206d
12182 ---- /dev/null
12183 -+++ b/arch/arm/kernel/psci-call.S
12184 -@@ -0,0 +1,31 @@
12185 -+/*
12186 -+ * This program is free software; you can redistribute it and/or modify
12187 -+ * it under the terms of the GNU General Public License version 2 as
12188 -+ * published by the Free Software Foundation.
12189 -+ *
12190 -+ * This program is distributed in the hope that it will be useful,
12191 -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12192 -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12193 -+ * GNU General Public License for more details.
12194 -+ *
12195 -+ * Copyright (C) 2015 ARM Limited
12196 -+ *
12197 -+ * Author: Mark Rutland <mark.rutland@×××.com>
12198 -+ */
12199 -+
12200 -+#include <linux/linkage.h>
12201 -+
12202 -+#include <asm/opcodes-sec.h>
12203 -+#include <asm/opcodes-virt.h>
12204 -+
12205 -+/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
12206 -+ENTRY(__invoke_psci_fn_hvc)
12207 -+ __HVC(0)
12208 -+ bx lr
12209 -+ENDPROC(__invoke_psci_fn_hvc)
12210 -+
12211 -+/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
12212 -+ENTRY(__invoke_psci_fn_smc)
12213 -+ __SMC(0)
12214 -+ bx lr
12215 -+ENDPROC(__invoke_psci_fn_smc)
12216 -diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
12217 -index f73891b6b730..f90fdf4ce7c7 100644
12218 ---- a/arch/arm/kernel/psci.c
12219 -+++ b/arch/arm/kernel/psci.c
12220 -@@ -23,8 +23,6 @@
12221 -
12222 - #include <asm/compiler.h>
12223 - #include <asm/errno.h>
12224 --#include <asm/opcodes-sec.h>
12225 --#include <asm/opcodes-virt.h>
12226 - #include <asm/psci.h>
12227 - #include <asm/system_misc.h>
12228 -
12229 -@@ -33,6 +31,9 @@ struct psci_operations psci_ops;
12230 - static int (*invoke_psci_fn)(u32, u32, u32, u32);
12231 - typedef int (*psci_initcall_t)(const struct device_node *);
12232 -
12233 -+asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
12234 -+asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
12235 -+
12236 - enum psci_function {
12237 - PSCI_FN_CPU_SUSPEND,
12238 - PSCI_FN_CPU_ON,
12239 -@@ -71,40 +72,6 @@ static u32 psci_power_state_pack(struct psci_power_state state)
12240 - & PSCI_0_2_POWER_STATE_AFFL_MASK);
12241 - }
12242 -
12243 --/*
12244 -- * The following two functions are invoked via the invoke_psci_fn pointer
12245 -- * and will not be inlined, allowing us to piggyback on the AAPCS.
12246 -- */
12247 --static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
12248 -- u32 arg2)
12249 --{
12250 -- asm volatile(
12251 -- __asmeq("%0", "r0")
12252 -- __asmeq("%1", "r1")
12253 -- __asmeq("%2", "r2")
12254 -- __asmeq("%3", "r3")
12255 -- __HVC(0)
12256 -- : "+r" (function_id)
12257 -- : "r" (arg0), "r" (arg1), "r" (arg2));
12258 --
12259 -- return function_id;
12260 --}
12261 --
12262 --static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
12263 -- u32 arg2)
12264 --{
12265 -- asm volatile(
12266 -- __asmeq("%0", "r0")
12267 -- __asmeq("%1", "r1")
12268 -- __asmeq("%2", "r2")
12269 -- __asmeq("%3", "r3")
12270 -- __SMC(0)
12271 -- : "+r" (function_id)
12272 -- : "r" (arg0), "r" (arg1), "r" (arg2));
12273 --
12274 -- return function_id;
12275 --}
12276 --
12277 - static int psci_get_version(void)
12278 - {
12279 - int err;
12280 -diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
12281 -index cbefbd7cfdb5..661d753df584 100644
12282 ---- a/arch/arm/mach-omap2/prm-regbits-34xx.h
12283 -+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
12284 -@@ -112,6 +112,7 @@
12285 - #define OMAP3430_VC_CMD_ONLP_SHIFT 16
12286 - #define OMAP3430_VC_CMD_RET_SHIFT 8
12287 - #define OMAP3430_VC_CMD_OFF_SHIFT 0
12288 -+#define OMAP3430_SREN_MASK (1 << 4)
12289 - #define OMAP3430_HSEN_MASK (1 << 3)
12290 - #define OMAP3430_MCODE_MASK (0x7 << 0)
12291 - #define OMAP3430_VALID_MASK (1 << 24)
12292 -diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h
12293 -index b1c7a33e00e7..e794828dee55 100644
12294 ---- a/arch/arm/mach-omap2/prm-regbits-44xx.h
12295 -+++ b/arch/arm/mach-omap2/prm-regbits-44xx.h
12296 -@@ -35,6 +35,7 @@
12297 - #define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1
12298 - #define OMAP4430_GLOBAL_WUEN_MASK (1 << 16)
12299 - #define OMAP4430_HSMCODE_MASK (0x7 << 0)
12300 -+#define OMAP4430_SRMODEEN_MASK (1 << 4)
12301 - #define OMAP4430_HSMODEEN_MASK (1 << 3)
12302 - #define OMAP4430_HSSCLL_SHIFT 24
12303 - #define OMAP4430_ICEPICK_RST_SHIFT 9
12304 -diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
12305 -index be9ef834fa81..076fd20d7e5a 100644
12306 ---- a/arch/arm/mach-omap2/vc.c
12307 -+++ b/arch/arm/mach-omap2/vc.c
12308 -@@ -316,7 +316,8 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm)
12309 - * idle. And we can also scale voltages to zero for off-idle.
12310 - * Note that no actual voltage scaling during off-idle will
12311 - * happen unless the board specific twl4030 PMIC scripts are
12312 -- * loaded.
12313 -+ * loaded. See also omap_vc_i2c_init for comments regarding
12314 -+ * erratum i531.
12315 - */
12316 - val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET);
12317 - if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) {
12318 -@@ -704,9 +705,16 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
12319 - return;
12320 - }
12321 -
12322 -+ /*
12323 -+ * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around
12324 -+ * erratum i531 "Extra Power Consumed When Repeated Start Operation
12325 -+ * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)".
12326 -+ * Otherwise I2C4 eventually leads into about 23mW extra power being
12327 -+ * consumed even during off idle using VMODE.
12328 -+ */
12329 - i2c_high_speed = voltdm->pmic->i2c_high_speed;
12330 - if (i2c_high_speed)
12331 -- voltdm->rmw(vc->common->i2c_cfg_hsen_mask,
12332 -+ voltdm->rmw(vc->common->i2c_cfg_clear_mask,
12333 - vc->common->i2c_cfg_hsen_mask,
12334 - vc->common->i2c_cfg_reg);
12335 -
12336 -diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h
12337 -index cdbdd78e755e..89b83b7ff3ec 100644
12338 ---- a/arch/arm/mach-omap2/vc.h
12339 -+++ b/arch/arm/mach-omap2/vc.h
12340 -@@ -34,6 +34,7 @@ struct voltagedomain;
12341 - * @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register
12342 - * @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register
12343 - * @i2c_cfg_reg: I2C configuration register offset
12344 -+ * @i2c_cfg_clear_mask: high-speed mode bit clear mask in I2C config register
12345 - * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register
12346 - * @i2c_mcode_mask: MCODE field mask for I2C config register
12347 - *
12348 -@@ -52,6 +53,7 @@ struct omap_vc_common {
12349 - u8 cmd_ret_shift;
12350 - u8 cmd_off_shift;
12351 - u8 i2c_cfg_reg;
12352 -+ u8 i2c_cfg_clear_mask;
12353 - u8 i2c_cfg_hsen_mask;
12354 - u8 i2c_mcode_mask;
12355 - };
12356 -diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
12357 -index 75bc4aa22b3a..71d74c9172c1 100644
12358 ---- a/arch/arm/mach-omap2/vc3xxx_data.c
12359 -+++ b/arch/arm/mach-omap2/vc3xxx_data.c
12360 -@@ -40,6 +40,7 @@ static struct omap_vc_common omap3_vc_common = {
12361 - .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT,
12362 - .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT,
12363 - .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT,
12364 -+ .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK,
12365 - .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK,
12366 - .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET,
12367 - .i2c_mcode_mask = OMAP3430_MCODE_MASK,
12368 -diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
12369 -index 085e5d6a04fd..2abd5fa8a697 100644
12370 ---- a/arch/arm/mach-omap2/vc44xx_data.c
12371 -+++ b/arch/arm/mach-omap2/vc44xx_data.c
12372 -@@ -42,6 +42,7 @@ static const struct omap_vc_common omap4_vc_common = {
12373 - .cmd_ret_shift = OMAP4430_RET_SHIFT,
12374 - .cmd_off_shift = OMAP4430_OFF_SHIFT,
12375 - .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET,
12376 -+ .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK,
12377 - .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK,
12378 - .i2c_mcode_mask = OMAP4430_HSMCODE_MASK,
12379 - };
12380 -diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
12381 -index e1268f905026..f412b53ed268 100644
12382 ---- a/arch/arm/net/bpf_jit_32.c
12383 -+++ b/arch/arm/net/bpf_jit_32.c
12384 -@@ -449,10 +449,21 @@ static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
12385 - return;
12386 - }
12387 - #endif
12388 -- if (rm != ARM_R0)
12389 -- emit(ARM_MOV_R(ARM_R0, rm), ctx);
12390 -+
12391 -+ /*
12392 -+ * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
12393 -+ * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
12394 -+ * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
12395 -+ * before using it as a source for ARM_R1.
12396 -+ *
12397 -+ * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
12398 -+ * ARM_R5 (r_X) so there is no particular register overlap
12399 -+ * issues.
12400 -+ */
12401 - if (rn != ARM_R1)
12402 - emit(ARM_MOV_R(ARM_R1, rn), ctx);
12403 -+ if (rm != ARM_R0)
12404 -+ emit(ARM_MOV_R(ARM_R0, rm), ctx);
12405 -
12406 - ctx->seen |= SEEN_CALL;
12407 - emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
12408 -diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12409 -index cf87de3fc390..64b611782ef0 100644
12410 ---- a/arch/x86/include/asm/spinlock.h
12411 -+++ b/arch/x86/include/asm/spinlock.h
12412 -@@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
12413 - struct __raw_tickets tmp = READ_ONCE(lock->tickets);
12414 -
12415 - tmp.head &= ~TICKET_SLOWPATH_FLAG;
12416 -- return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
12417 -+ return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
12418 - }
12419 - #define arch_spin_is_contended arch_spin_is_contended
12420 -
12421 -diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
12422 -index e4695985f9de..d93963340c3c 100644
12423 ---- a/arch/x86/pci/acpi.c
12424 -+++ b/arch/x86/pci/acpi.c
12425 -@@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge)
12426 - kfree(info);
12427 - }
12428 -
12429 -+/*
12430 -+ * An IO port or MMIO resource assigned to a PCI host bridge may be
12431 -+ * consumed by the host bridge itself or available to its child
12432 -+ * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
12433 -+ * to tell whether the resource is consumed by the host bridge itself,
12434 -+ * but firmware hasn't used that bit consistently, so we can't rely on it.
12435 -+ *
12436 -+ * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
12437 -+ * to be available to child bus/devices except one special case:
12438 -+ * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
12439 -+ * to access PCI configuration space.
12440 -+ *
12441 -+ * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
12442 -+ */
12443 -+static bool resource_is_pcicfg_ioport(struct resource *res)
12444 -+{
12445 -+ return (res->flags & IORESOURCE_IO) &&
12446 -+ res->start == 0xCF8 && res->end == 0xCFF;
12447 -+}
12448 -+
12449 - static void probe_pci_root_info(struct pci_root_info *info,
12450 - struct acpi_device *device,
12451 - int busnum, int domain,
12452 -@@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info,
12453 - "no IO and memory resources present in _CRS\n");
12454 - else
12455 - resource_list_for_each_entry_safe(entry, tmp, list) {
12456 -- if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
12457 -- (entry->res->flags & IORESOURCE_DISABLED))
12458 -+ if ((entry->res->flags & IORESOURCE_DISABLED) ||
12459 -+ resource_is_pcicfg_ioport(entry->res))
12460 - resource_list_destroy_entry(entry);
12461 - else
12462 - entry->res->name = info->name;
12463 -diff --git a/block/blk-core.c b/block/blk-core.c
12464 -index 794c3e7f01cf..66406474f0c4 100644
12465 ---- a/block/blk-core.c
12466 -+++ b/block/blk-core.c
12467 -@@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q)
12468 - q->queue_lock = &q->__queue_lock;
12469 - spin_unlock_irq(lock);
12470 -
12471 -+ bdi_destroy(&q->backing_dev_info);
12472 -+
12473 - /* @q is and will stay empty, shutdown and put */
12474 - blk_put_queue(q);
12475 - }
12476 -diff --git a/block/blk-mq.c b/block/blk-mq.c
12477 -index 33c428530193..5c39703e644f 100644
12478 ---- a/block/blk-mq.c
12479 -+++ b/block/blk-mq.c
12480 -@@ -675,8 +675,11 @@ static void blk_mq_rq_timer(unsigned long priv)
12481 - data.next = blk_rq_timeout(round_jiffies_up(data.next));
12482 - mod_timer(&q->timeout, data.next);
12483 - } else {
12484 -- queue_for_each_hw_ctx(q, hctx, i)
12485 -- blk_mq_tag_idle(hctx);
12486 -+ queue_for_each_hw_ctx(q, hctx, i) {
12487 -+ /* the hctx may be unmapped, so check it here */
12488 -+ if (blk_mq_hw_queue_mapped(hctx))
12489 -+ blk_mq_tag_idle(hctx);
12490 -+ }
12491 - }
12492 - }
12493 -
12494 -@@ -1570,22 +1573,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
12495 - return NOTIFY_OK;
12496 - }
12497 -
12498 --static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
12499 --{
12500 -- struct request_queue *q = hctx->queue;
12501 -- struct blk_mq_tag_set *set = q->tag_set;
12502 --
12503 -- if (set->tags[hctx->queue_num])
12504 -- return NOTIFY_OK;
12505 --
12506 -- set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
12507 -- if (!set->tags[hctx->queue_num])
12508 -- return NOTIFY_STOP;
12509 --
12510 -- hctx->tags = set->tags[hctx->queue_num];
12511 -- return NOTIFY_OK;
12512 --}
12513 --
12514 - static int blk_mq_hctx_notify(void *data, unsigned long action,
12515 - unsigned int cpu)
12516 - {
12517 -@@ -1593,8 +1580,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
12518 -
12519 - if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
12520 - return blk_mq_hctx_cpu_offline(hctx, cpu);
12521 -- else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
12522 -- return blk_mq_hctx_cpu_online(hctx, cpu);
12523 -+
12524 -+ /*
12525 -+ * In case of CPU online, tags may be reallocated
12526 -+ * in blk_mq_map_swqueue() after mapping is updated.
12527 -+ */
12528 -
12529 - return NOTIFY_OK;
12530 - }
12531 -@@ -1776,6 +1766,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
12532 - unsigned int i;
12533 - struct blk_mq_hw_ctx *hctx;
12534 - struct blk_mq_ctx *ctx;
12535 -+ struct blk_mq_tag_set *set = q->tag_set;
12536 -
12537 - queue_for_each_hw_ctx(q, hctx, i) {
12538 - cpumask_clear(hctx->cpumask);
12539 -@@ -1802,16 +1793,20 @@ static void blk_mq_map_swqueue(struct request_queue *q)
12540 - * disable it and free the request entries.
12541 - */
12542 - if (!hctx->nr_ctx) {
12543 -- struct blk_mq_tag_set *set = q->tag_set;
12544 --
12545 - if (set->tags[i]) {
12546 - blk_mq_free_rq_map(set, set->tags[i], i);
12547 - set->tags[i] = NULL;
12548 -- hctx->tags = NULL;
12549 - }
12550 -+ hctx->tags = NULL;
12551 - continue;
12552 - }
12553 -
12554 -+ /* unmapped hw queue can be remapped after CPU topo changed */
12555 -+ if (!set->tags[i])
12556 -+ set->tags[i] = blk_mq_init_rq_map(set, i);
12557 -+ hctx->tags = set->tags[i];
12558 -+ WARN_ON(!hctx->tags);
12559 -+
12560 - /*
12561 - * Initialize batch roundrobin counts
12562 - */
12563 -@@ -2075,9 +2070,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
12564 - */
12565 - list_for_each_entry(q, &all_q_list, all_q_node)
12566 - blk_mq_freeze_queue_start(q);
12567 -- list_for_each_entry(q, &all_q_list, all_q_node)
12568 -+ list_for_each_entry(q, &all_q_list, all_q_node) {
12569 - blk_mq_freeze_queue_wait(q);
12570 -
12571 -+ /*
12572 -+ * timeout handler can't touch hw queue during the
12573 -+ * reinitialization
12574 -+ */
12575 -+ del_timer_sync(&q->timeout);
12576 -+ }
12577 -+
12578 - list_for_each_entry(q, &all_q_list, all_q_node)
12579 - blk_mq_queue_reinit(q);
12580 -
12581 -diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
12582 -index faaf36ade7eb..2b8fd302f677 100644
12583 ---- a/block/blk-sysfs.c
12584 -+++ b/block/blk-sysfs.c
12585 -@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj)
12586 -
12587 - blk_trace_shutdown(q);
12588 -
12589 -- bdi_destroy(&q->backing_dev_info);
12590 --
12591 - ida_simple_remove(&blk_queue_ida, q->id);
12592 - call_rcu(&q->rcu_head, blk_free_queue_rcu);
12593 - }
12594 -diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
12595 -index b193f8425999..ff6d8adc9cda 100644
12596 ---- a/drivers/acpi/acpi_pnp.c
12597 -+++ b/drivers/acpi/acpi_pnp.c
12598 -@@ -304,6 +304,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
12599 - {"PNPb006"},
12600 - /* cs423x-pnpbios */
12601 - {"CSC0100"},
12602 -+ {"CSC0103"},
12603 -+ {"CSC0110"},
12604 - {"CSC0000"},
12605 - {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */
12606 - /* es18xx-pnpbios */
12607 -diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
12608 -index cf607fe69dbd..c240bdf824f2 100644
12609 ---- a/drivers/acpi/acpica/acmacros.h
12610 -+++ b/drivers/acpi/acpica/acmacros.h
12611 -@@ -63,23 +63,12 @@
12612 - #define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
12613 -
12614 - /*
12615 -- * printf() format helpers. These macros are workarounds for the difficulties
12616 -+ * printf() format helper. This macros is a workaround for the difficulties
12617 - * with emitting 64-bit integers and 64-bit pointers with the same code
12618 - * for both 32-bit and 64-bit hosts.
12619 - */
12620 - #define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
12621 -
12622 --#if ACPI_MACHINE_WIDTH == 64
12623 --#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
12624 --#define ACPI_FORMAT_TO_UINT(i) ACPI_FORMAT_UINT64(i)
12625 --#define ACPI_PRINTF_UINT "0x%8.8X%8.8X"
12626 --
12627 --#else
12628 --#define ACPI_FORMAT_NATIVE_UINT(i) 0, (u32) (i)
12629 --#define ACPI_FORMAT_TO_UINT(i) (u32) (i)
12630 --#define ACPI_PRINTF_UINT "0x%8.8X"
12631 --#endif
12632 --
12633 - /*
12634 - * Macros for moving data around to/from buffers that are possibly unaligned.
12635 - * If the hardware supports the transfer of unaligned data, just do the store.
12636 -diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
12637 -index 77244182ff02..ea0cc4e08f80 100644
12638 ---- a/drivers/acpi/acpica/dsopcode.c
12639 -+++ b/drivers/acpi/acpica/dsopcode.c
12640 -@@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
12641 -
12642 - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
12643 - obj_desc,
12644 -- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
12645 -+ ACPI_FORMAT_UINT64(obj_desc->region.address),
12646 - obj_desc->region.length));
12647 -
12648 - /* Now the address and length are valid for this opregion */
12649 -@@ -539,13 +539,12 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
12650 - return_ACPI_STATUS(AE_NOT_EXIST);
12651 - }
12652 -
12653 -- obj_desc->region.address =
12654 -- (acpi_physical_address) ACPI_TO_INTEGER(table);
12655 -+ obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
12656 - obj_desc->region.length = table->length;
12657 -
12658 - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
12659 - obj_desc,
12660 -- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
12661 -+ ACPI_FORMAT_UINT64(obj_desc->region.address),
12662 - obj_desc->region.length));
12663 -
12664 - /* Now the address and length are valid for this opregion */
12665 -diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
12666 -index 9abace3401f9..2ba28a63fb68 100644
12667 ---- a/drivers/acpi/acpica/evregion.c
12668 -+++ b/drivers/acpi/acpica/evregion.c
12669 -@@ -272,7 +272,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
12670 - ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
12671 - "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
12672 - &region_obj->region.handler->address_space, handler,
12673 -- ACPI_FORMAT_NATIVE_UINT(address),
12674 -+ ACPI_FORMAT_UINT64(address),
12675 - acpi_ut_get_region_name(region_obj->region.
12676 - space_id)));
12677 -
12678 -diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
12679 -index 7c213b6b6472..1da52bef632e 100644
12680 ---- a/drivers/acpi/acpica/exdump.c
12681 -+++ b/drivers/acpi/acpica/exdump.c
12682 -@@ -767,8 +767,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
12683 - acpi_os_printf("\n");
12684 - } else {
12685 - acpi_os_printf(" base %8.8X%8.8X Length %X\n",
12686 -- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
12687 -- address),
12688 -+ ACPI_FORMAT_UINT64(obj_desc->region.
12689 -+ address),
12690 - obj_desc->region.length);
12691 - }
12692 - break;
12693 -diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
12694 -index 49479927e7f7..725a3746a2df 100644
12695 ---- a/drivers/acpi/acpica/exfldio.c
12696 -+++ b/drivers/acpi/acpica/exfldio.c
12697 -@@ -263,17 +263,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
12698 - }
12699 -
12700 - ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
12701 -- " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
12702 -+ " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
12703 - acpi_ut_get_region_name(rgn_desc->region.
12704 - space_id),
12705 - rgn_desc->region.space_id,
12706 - obj_desc->common_field.access_byte_width,
12707 - obj_desc->common_field.base_byte_offset,
12708 -- field_datum_byte_offset, ACPI_CAST_PTR(void,
12709 -- (rgn_desc->
12710 -- region.
12711 -- address +
12712 -- region_offset))));
12713 -+ field_datum_byte_offset,
12714 -+ ACPI_FORMAT_UINT64(rgn_desc->region.address +
12715 -+ region_offset)));
12716 -
12717 - /* Invoke the appropriate address_space/op_region handler */
12718 -
12719 -diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
12720 -index 0fe188e238ef..b4bbf3150bc1 100644
12721 ---- a/drivers/acpi/acpica/exregion.c
12722 -+++ b/drivers/acpi/acpica/exregion.c
12723 -@@ -181,7 +181,7 @@ acpi_ex_system_memory_space_handler(u32 function,
12724 - if (!mem_info->mapped_logical_address) {
12725 - ACPI_ERROR((AE_INFO,
12726 - "Could not map memory at 0x%8.8X%8.8X, size %u",
12727 -- ACPI_FORMAT_NATIVE_UINT(address),
12728 -+ ACPI_FORMAT_UINT64(address),
12729 - (u32) map_length));
12730 - mem_info->mapped_length = 0;
12731 - return_ACPI_STATUS(AE_NO_MEMORY);
12732 -@@ -202,8 +202,7 @@ acpi_ex_system_memory_space_handler(u32 function,
12733 -
12734 - ACPI_DEBUG_PRINT((ACPI_DB_INFO,
12735 - "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
12736 -- bit_width, function,
12737 -- ACPI_FORMAT_NATIVE_UINT(address)));
12738 -+ bit_width, function, ACPI_FORMAT_UINT64(address)));
12739 -
12740 - /*
12741 - * Perform the memory read or write
12742 -@@ -318,8 +317,7 @@ acpi_ex_system_io_space_handler(u32 function,
12743 -
12744 - ACPI_DEBUG_PRINT((ACPI_DB_INFO,
12745 - "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
12746 -- bit_width, function,
12747 -- ACPI_FORMAT_NATIVE_UINT(address)));
12748 -+ bit_width, function, ACPI_FORMAT_UINT64(address)));
12749 -
12750 - /* Decode the function parameter */
12751 -
12752 -diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
12753 -index 2bd33fe56cb3..29033d71417b 100644
12754 ---- a/drivers/acpi/acpica/hwvalid.c
12755 -+++ b/drivers/acpi/acpica/hwvalid.c
12756 -@@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
12757 - byte_width = ACPI_DIV_8(bit_width);
12758 - last_address = address + byte_width - 1;
12759 -
12760 -- ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
12761 -- ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
12762 -- last_address),
12763 -- byte_width));
12764 -+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
12765 -+ "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
12766 -+ ACPI_FORMAT_UINT64(address),
12767 -+ ACPI_FORMAT_UINT64(last_address), byte_width));
12768 -
12769 - /* Maximum 16-bit address in I/O space */
12770 -
12771 - if (last_address > ACPI_UINT16_MAX) {
12772 - ACPI_ERROR((AE_INFO,
12773 -- "Illegal I/O port address/length above 64K: %p/0x%X",
12774 -- ACPI_CAST_PTR(void, address), byte_width));
12775 -+ "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
12776 -+ ACPI_FORMAT_UINT64(address), byte_width));
12777 - return_ACPI_STATUS(AE_LIMIT);
12778 - }
12779 -
12780 -@@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
12781 -
12782 - if (acpi_gbl_osi_data >= port_info->osi_dependency) {
12783 - ACPI_DEBUG_PRINT((ACPI_DB_IO,
12784 -- "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
12785 -- ACPI_CAST_PTR(void, address),
12786 -+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
12787 -+ ACPI_FORMAT_UINT64(address),
12788 - byte_width, port_info->name,
12789 - port_info->start,
12790 - port_info->end));
12791 -diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
12792 -index 80f097eb7381..d259393505fa 100644
12793 ---- a/drivers/acpi/acpica/nsdump.c
12794 -+++ b/drivers/acpi/acpica/nsdump.c
12795 -@@ -271,12 +271,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
12796 - switch (type) {
12797 - case ACPI_TYPE_PROCESSOR:
12798 -
12799 -- acpi_os_printf("ID %02X Len %02X Addr %p\n",
12800 -+ acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
12801 - obj_desc->processor.proc_id,
12802 - obj_desc->processor.length,
12803 -- ACPI_CAST_PTR(void,
12804 -- obj_desc->processor.
12805 -- address));
12806 -+ ACPI_FORMAT_UINT64(obj_desc->processor.
12807 -+ address));
12808 - break;
12809 -
12810 - case ACPI_TYPE_DEVICE:
12811 -@@ -347,8 +346,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
12812 - space_id));
12813 - if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
12814 - acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
12815 -- ACPI_FORMAT_NATIVE_UINT
12816 -- (obj_desc->region.address),
12817 -+ ACPI_FORMAT_UINT64(obj_desc->
12818 -+ region.
12819 -+ address),
12820 - obj_desc->region.length);
12821 - } else {
12822 - acpi_os_printf
12823 -diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
12824 -index 6a144957aadd..fd5998b2b46b 100644
12825 ---- a/drivers/acpi/acpica/tbdata.c
12826 -+++ b/drivers/acpi/acpica/tbdata.c
12827 -@@ -113,9 +113,9 @@ acpi_tb_acquire_table(struct acpi_table_desc *table_desc,
12828 - case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
12829 - case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
12830 -
12831 -- table =
12832 -- ACPI_CAST_PTR(struct acpi_table_header,
12833 -- table_desc->address);
12834 -+ table = ACPI_CAST_PTR(struct acpi_table_header,
12835 -+ ACPI_PHYSADDR_TO_PTR(table_desc->
12836 -+ address));
12837 - break;
12838 -
12839 - default:
12840 -@@ -214,7 +214,8 @@ acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
12841 - case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
12842 - case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
12843 -
12844 -- table_header = ACPI_CAST_PTR(struct acpi_table_header, address);
12845 -+ table_header = ACPI_CAST_PTR(struct acpi_table_header,
12846 -+ ACPI_PHYSADDR_TO_PTR(address));
12847 - if (!table_header) {
12848 - return (AE_NO_MEMORY);
12849 - }
12850 -@@ -398,14 +399,14 @@ acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature)
12851 - table_desc->length);
12852 - if (ACPI_FAILURE(status)) {
12853 - ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
12854 -- "%4.4s " ACPI_PRINTF_UINT
12855 -+ "%4.4s 0x%8.8X%8.8X"
12856 - " Attempted table install failed",
12857 - acpi_ut_valid_acpi_name(table_desc->
12858 - signature.
12859 - ascii) ?
12860 - table_desc->signature.ascii : "????",
12861 -- ACPI_FORMAT_TO_UINT(table_desc->
12862 -- address)));
12863 -+ ACPI_FORMAT_UINT64(table_desc->
12864 -+ address)));
12865 - goto invalidate_and_exit;
12866 - }
12867 - }
12868 -diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
12869 -index 7fbc2b9dcbbb..7e69bc73bd16 100644
12870 ---- a/drivers/acpi/acpica/tbinstal.c
12871 -+++ b/drivers/acpi/acpica/tbinstal.c
12872 -@@ -187,8 +187,9 @@ acpi_tb_install_fixed_table(acpi_physical_address address,
12873 - status = acpi_tb_acquire_temp_table(&new_table_desc, address,
12874 - ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL);
12875 - if (ACPI_FAILURE(status)) {
12876 -- ACPI_ERROR((AE_INFO, "Could not acquire table length at %p",
12877 -- ACPI_CAST_PTR(void, address)));
12878 -+ ACPI_ERROR((AE_INFO,
12879 -+ "Could not acquire table length at %8.8X%8.8X",
12880 -+ ACPI_FORMAT_UINT64(address)));
12881 - return_ACPI_STATUS(status);
12882 - }
12883 -
12884 -@@ -246,8 +247,9 @@ acpi_tb_install_standard_table(acpi_physical_address address,
12885 -
12886 - status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags);
12887 - if (ACPI_FAILURE(status)) {
12888 -- ACPI_ERROR((AE_INFO, "Could not acquire table length at %p",
12889 -- ACPI_CAST_PTR(void, address)));
12890 -+ ACPI_ERROR((AE_INFO,
12891 -+ "Could not acquire table length at %8.8X%8.8X",
12892 -+ ACPI_FORMAT_UINT64(address)));
12893 - return_ACPI_STATUS(status);
12894 - }
12895 -
12896 -@@ -258,9 +260,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
12897 - if (!reload &&
12898 - acpi_gbl_disable_ssdt_table_install &&
12899 - ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
12900 -- ACPI_INFO((AE_INFO, "Ignoring installation of %4.4s at %p",
12901 -- new_table_desc.signature.ascii, ACPI_CAST_PTR(void,
12902 -- address)));
12903 -+ ACPI_INFO((AE_INFO,
12904 -+ "Ignoring installation of %4.4s at %8.8X%8.8X",
12905 -+ new_table_desc.signature.ascii,
12906 -+ ACPI_FORMAT_UINT64(address)));
12907 - goto release_and_exit;
12908 - }
12909 -
12910 -@@ -428,11 +431,11 @@ finish_override:
12911 - return;
12912 - }
12913 -
12914 -- ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT
12915 -- " %s table override, new table: " ACPI_PRINTF_UINT,
12916 -+ ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X"
12917 -+ " %s table override, new table: 0x%8.8X%8.8X",
12918 - old_table_desc->signature.ascii,
12919 -- ACPI_FORMAT_TO_UINT(old_table_desc->address),
12920 -- override_type, ACPI_FORMAT_TO_UINT(new_table_desc.address)));
12921 -+ ACPI_FORMAT_UINT64(old_table_desc->address),
12922 -+ override_type, ACPI_FORMAT_UINT64(new_table_desc.address)));
12923 -
12924 - /* We can now uninstall the original table */
12925 -
12926 -@@ -516,7 +519,7 @@ void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc)
12927 -
12928 - if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
12929 - ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) {
12930 -- ACPI_FREE(ACPI_CAST_PTR(void, table_desc->address));
12931 -+ ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address));
12932 - }
12933 -
12934 - table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL);
12935 -diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
12936 -index ef16c06e5091..77ba5c71c6e7 100644
12937 ---- a/drivers/acpi/acpica/tbprint.c
12938 -+++ b/drivers/acpi/acpica/tbprint.c
12939 -@@ -127,18 +127,12 @@ acpi_tb_print_table_header(acpi_physical_address address,
12940 - {
12941 - struct acpi_table_header local_header;
12942 -
12943 -- /*
12944 -- * The reason that we use ACPI_PRINTF_UINT and ACPI_FORMAT_TO_UINT is to
12945 -- * support both 32-bit and 64-bit hosts/addresses in a consistent manner.
12946 -- * The %p specifier does not emit uniform output on all hosts. On some,
12947 -- * leading zeros are not supported.
12948 -- */
12949 - if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
12950 -
12951 - /* FACS only has signature and length fields */
12952 -
12953 -- ACPI_INFO((AE_INFO, "%-4.4s " ACPI_PRINTF_UINT " %06X",
12954 -- header->signature, ACPI_FORMAT_TO_UINT(address),
12955 -+ ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X",
12956 -+ header->signature, ACPI_FORMAT_UINT64(address),
12957 - header->length));
12958 - } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
12959 -
12960 -@@ -149,9 +143,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
12961 - header)->oem_id, ACPI_OEM_ID_SIZE);
12962 - acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
12963 -
12964 -- ACPI_INFO((AE_INFO,
12965 -- "RSDP " ACPI_PRINTF_UINT " %06X (v%.2d %-6.6s)",
12966 -- ACPI_FORMAT_TO_UINT(address),
12967 -+ ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
12968 -+ ACPI_FORMAT_UINT64(address),
12969 - (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
12970 - revision >
12971 - 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
12972 -@@ -165,9 +158,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
12973 - acpi_tb_cleanup_table_header(&local_header, header);
12974 -
12975 - ACPI_INFO((AE_INFO,
12976 -- "%-4.4s " ACPI_PRINTF_UINT
12977 -+ "%-4.4s 0x%8.8X%8.8X"
12978 - " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
12979 -- local_header.signature, ACPI_FORMAT_TO_UINT(address),
12980 -+ local_header.signature, ACPI_FORMAT_UINT64(address),
12981 - local_header.length, local_header.revision,
12982 - local_header.oem_id, local_header.oem_table_id,
12983 - local_header.oem_revision,
12984 -diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
12985 -index eac52cf14f1a..fa76a3603aa1 100644
12986 ---- a/drivers/acpi/acpica/tbxfroot.c
12987 -+++ b/drivers/acpi/acpica/tbxfroot.c
12988 -@@ -142,7 +142,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
12989 - *
12990 - ******************************************************************************/
12991 -
12992 --acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
12993 -+acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
12994 - {
12995 - u8 *table_ptr;
12996 - u8 *mem_rover;
12997 -@@ -200,7 +200,8 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
12998 - physical_address +=
12999 - (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
13000 -
13001 -- *table_address = physical_address;
13002 -+ *table_address =
13003 -+ (acpi_physical_address) physical_address;
13004 - return_ACPI_STATUS(AE_OK);
13005 - }
13006 - }
13007 -@@ -233,7 +234,7 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
13008 - (ACPI_HI_RSDP_WINDOW_BASE +
13009 - ACPI_PTR_DIFF(mem_rover, table_ptr));
13010 -
13011 -- *table_address = physical_address;
13012 -+ *table_address = (acpi_physical_address) physical_address;
13013 - return_ACPI_STATUS(AE_OK);
13014 - }
13015 -
13016 -diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
13017 -index 1279f50da757..911ea8e7fe87 100644
13018 ---- a/drivers/acpi/acpica/utaddress.c
13019 -+++ b/drivers/acpi/acpica/utaddress.c
13020 -@@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id,
13021 - acpi_gbl_address_range_list[space_id] = range_info;
13022 -
13023 - ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
13024 -- "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
13025 -+ "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
13026 - acpi_ut_get_node_name(range_info->region_node),
13027 -- ACPI_CAST_PTR(void, address),
13028 -- ACPI_CAST_PTR(void, range_info->end_address)));
13029 -+ ACPI_FORMAT_UINT64(address),
13030 -+ ACPI_FORMAT_UINT64(range_info->end_address)));
13031 -
13032 - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
13033 - return_ACPI_STATUS(AE_OK);
13034 -@@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
13035 - }
13036 -
13037 - ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
13038 -- "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
13039 -+ "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
13040 - acpi_ut_get_node_name(range_info->
13041 - region_node),
13042 -- ACPI_CAST_PTR(void,
13043 -- range_info->
13044 -- start_address),
13045 -- ACPI_CAST_PTR(void,
13046 -- range_info->
13047 -- end_address)));
13048 -+ ACPI_FORMAT_UINT64(range_info->
13049 -+ start_address),
13050 -+ ACPI_FORMAT_UINT64(range_info->
13051 -+ end_address)));
13052 -
13053 - ACPI_FREE(range_info);
13054 - return_VOID;
13055 -@@ -245,16 +243,14 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
13056 - region_node);
13057 -
13058 - ACPI_WARNING((AE_INFO,
13059 -- "%s range 0x%p-0x%p conflicts with OpRegion 0x%p-0x%p (%s)",
13060 -+ "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)",
13061 - acpi_ut_get_region_name(space_id),
13062 -- ACPI_CAST_PTR(void, address),
13063 -- ACPI_CAST_PTR(void, end_address),
13064 -- ACPI_CAST_PTR(void,
13065 -- range_info->
13066 -- start_address),
13067 -- ACPI_CAST_PTR(void,
13068 -- range_info->
13069 -- end_address),
13070 -+ ACPI_FORMAT_UINT64(address),
13071 -+ ACPI_FORMAT_UINT64(end_address),
13072 -+ ACPI_FORMAT_UINT64(range_info->
13073 -+ start_address),
13074 -+ ACPI_FORMAT_UINT64(range_info->
13075 -+ end_address),
13076 - pathname));
13077 - ACPI_FREE(pathname);
13078 - }
13079 -diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
13080 -index 5589a6e2a023..8244f013f210 100644
13081 ---- a/drivers/acpi/resource.c
13082 -+++ b/drivers/acpi/resource.c
13083 -@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
13084 - * @ares: Input ACPI resource object.
13085 - * @types: Valid resource types of IORESOURCE_XXX
13086 - *
13087 -- * This is a hepler function to support acpi_dev_get_resources(), which filters
13088 -+ * This is a helper function to support acpi_dev_get_resources(), which filters
13089 - * ACPI resource objects according to resource types.
13090 - */
13091 - int acpi_dev_filter_resource_type(struct acpi_resource *ares,
13092 -diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
13093 -index 26e5b5060523..bf034f8b7c1a 100644
13094 ---- a/drivers/acpi/sbshc.c
13095 -+++ b/drivers/acpi/sbshc.c
13096 -@@ -14,6 +14,7 @@
13097 - #include <linux/delay.h>
13098 - #include <linux/module.h>
13099 - #include <linux/interrupt.h>
13100 -+#include <linux/dmi.h>
13101 - #include "sbshc.h"
13102 -
13103 - #define PREFIX "ACPI: "
13104 -@@ -87,6 +88,8 @@ enum acpi_smb_offset {
13105 - ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
13106 - };
13107 -
13108 -+static bool macbook;
13109 -+
13110 - static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
13111 - {
13112 - return ec_read(hc->offset + address, data);
13113 -@@ -132,6 +135,8 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
13114 - }
13115 -
13116 - mutex_lock(&hc->lock);
13117 -+ if (macbook)
13118 -+ udelay(5);
13119 - if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
13120 - goto end;
13121 - if (temp) {
13122 -@@ -257,12 +262,29 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
13123 - acpi_handle handle, acpi_ec_query_func func,
13124 - void *data);
13125 -
13126 -+static int macbook_dmi_match(const struct dmi_system_id *d)
13127 -+{
13128 -+ pr_debug("Detected MacBook, enabling workaround\n");
13129 -+ macbook = true;
13130 -+ return 0;
13131 -+}
13132 -+
13133 -+static struct dmi_system_id acpi_smbus_dmi_table[] = {
13134 -+ { macbook_dmi_match, "Apple MacBook", {
13135 -+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
13136 -+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
13137 -+ },
13138 -+ { },
13139 -+};
13140 -+
13141 - static int acpi_smbus_hc_add(struct acpi_device *device)
13142 - {
13143 - int status;
13144 - unsigned long long val;
13145 - struct acpi_smb_hc *hc;
13146 -
13147 -+ dmi_check_system(acpi_smbus_dmi_table);
13148 -+
13149 - if (!device)
13150 - return -EINVAL;
13151 -
13152 -diff --git a/drivers/block/loop.c b/drivers/block/loop.c
13153 -index d1f168b73634..773e964f14d9 100644
13154 ---- a/drivers/block/loop.c
13155 -+++ b/drivers/block/loop.c
13156 -@@ -1672,8 +1672,8 @@ out:
13157 -
13158 - static void loop_remove(struct loop_device *lo)
13159 - {
13160 -- del_gendisk(lo->lo_disk);
13161 - blk_cleanup_queue(lo->lo_queue);
13162 -+ del_gendisk(lo->lo_disk);
13163 - blk_mq_free_tag_set(&lo->tag_set);
13164 - put_disk(lo->lo_disk);
13165 - kfree(lo);
13166 -diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
13167 -index 7722ed53bd65..af3bc7a8033b 100644
13168 ---- a/drivers/gpio/gpiolib-sysfs.c
13169 -+++ b/drivers/gpio/gpiolib-sysfs.c
13170 -@@ -551,6 +551,7 @@ static struct class gpio_class = {
13171 - */
13172 - int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
13173 - {
13174 -+ struct gpio_chip *chip;
13175 - unsigned long flags;
13176 - int status;
13177 - const char *ioname = NULL;
13178 -@@ -568,8 +569,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
13179 - return -EINVAL;
13180 - }
13181 -
13182 -+ chip = desc->chip;
13183 -+
13184 - mutex_lock(&sysfs_lock);
13185 -
13186 -+ /* check if chip is being removed */
13187 -+ if (!chip || !chip->exported) {
13188 -+ status = -ENODEV;
13189 -+ goto fail_unlock;
13190 -+ }
13191 -+
13192 - spin_lock_irqsave(&gpio_lock, flags);
13193 - if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
13194 - test_bit(FLAG_EXPORT, &desc->flags)) {
13195 -@@ -783,12 +792,15 @@ void gpiochip_unexport(struct gpio_chip *chip)
13196 - {
13197 - int status;
13198 - struct device *dev;
13199 -+ struct gpio_desc *desc;
13200 -+ unsigned int i;
13201 -
13202 - mutex_lock(&sysfs_lock);
13203 - dev = class_find_device(&gpio_class, NULL, chip, match_export);
13204 - if (dev) {
13205 - put_device(dev);
13206 - device_unregister(dev);
13207 -+ /* prevent further gpiod exports */
13208 - chip->exported = false;
13209 - status = 0;
13210 - } else
13211 -@@ -797,6 +809,13 @@ void gpiochip_unexport(struct gpio_chip *chip)
13212 -
13213 - if (status)
13214 - chip_dbg(chip, "%s: status %d\n", __func__, status);
13215 -+
13216 -+ /* unregister gpiod class devices owned by sysfs */
13217 -+ for (i = 0; i < chip->ngpio; i++) {
13218 -+ desc = &chip->desc[i];
13219 -+ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
13220 -+ gpiod_free(desc);
13221 -+ }
13222 - }
13223 -
13224 - static int __init gpiolib_sysfs_init(void)
13225 -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
13226 -index d8135adb2238..39762a7d2ec7 100644
13227 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
13228 -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
13229 -@@ -429,9 +429,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
13230 -
13231 - BUG_ON(!dqm || !qpd);
13232 -
13233 -- BUG_ON(!list_empty(&qpd->queues_list));
13234 -+ pr_debug("In func %s\n", __func__);
13235 -
13236 -- pr_debug("kfd: In func %s\n", __func__);
13237 -+ pr_debug("qpd->queues_list is %s\n",
13238 -+ list_empty(&qpd->queues_list) ? "empty" : "not empty");
13239 -
13240 - retval = 0;
13241 - mutex_lock(&dqm->lock);
13242 -@@ -878,6 +879,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
13243 - return -ENOMEM;
13244 - }
13245 -
13246 -+ init_sdma_vm(dqm, q, qpd);
13247 -+
13248 - retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
13249 - &q->gart_mqd_addr, &q->properties);
13250 - if (retval != 0)
13251 -diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
13252 -index 10574a0c3a55..5769db4f51f3 100644
13253 ---- a/drivers/gpu/drm/drm_irq.c
13254 -+++ b/drivers/gpu/drm/drm_irq.c
13255 -@@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
13256 -
13257 - /* Reinitialize corresponding vblank timestamp if high-precision query
13258 - * available. Skip this step if query unsupported or failed. Will
13259 -- * reinitialize delayed at next vblank interrupt in that case.
13260 -+ * reinitialize delayed at next vblank interrupt in that case and
13261 -+ * assign 0 for now, to mark the vblanktimestamp as invalid.
13262 - */
13263 -- if (rc) {
13264 -- tslot = atomic_read(&vblank->count) + diff;
13265 -- vblanktimestamp(dev, crtc, tslot) = t_vblank;
13266 -- }
13267 -+ tslot = atomic_read(&vblank->count) + diff;
13268 -+ vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0};
13269 -
13270 - smp_mb__before_atomic();
13271 - atomic_add(diff, &vblank->count);
13272 -diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
13273 -index a74aaf9242b9..88b36a9173c9 100644
13274 ---- a/drivers/gpu/drm/i915/intel_dp.c
13275 -+++ b/drivers/gpu/drm/i915/intel_dp.c
13276 -@@ -1176,7 +1176,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
13277 -
13278 - pipe_config->has_dp_encoder = true;
13279 - pipe_config->has_drrs = false;
13280 -- pipe_config->has_audio = intel_dp->has_audio;
13281 -+ pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
13282 -
13283 - if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
13284 - intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
13285 -@@ -2026,8 +2026,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
13286 - int dotclock;
13287 -
13288 - tmp = I915_READ(intel_dp->output_reg);
13289 -- if (tmp & DP_AUDIO_OUTPUT_ENABLE)
13290 -- pipe_config->has_audio = true;
13291 -+
13292 -+ pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
13293 -
13294 - if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
13295 - if (tmp & DP_SYNC_HS_HIGH)
13296 -diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
13297 -index 071b96d6e146..fbc2a83795fa 100644
13298 ---- a/drivers/gpu/drm/i915/intel_lvds.c
13299 -+++ b/drivers/gpu/drm/i915/intel_lvds.c
13300 -@@ -812,12 +812,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
13301 - static const struct dmi_system_id intel_dual_link_lvds[] = {
13302 - {
13303 - .callback = intel_dual_link_lvds_callback,
13304 -- .ident = "Apple MacBook Pro (Core i5/i7 Series)",
13305 -+ .ident = "Apple MacBook Pro 15\" (2010)",
13306 -+ .matches = {
13307 -+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
13308 -+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
13309 -+ },
13310 -+ },
13311 -+ {
13312 -+ .callback = intel_dual_link_lvds_callback,
13313 -+ .ident = "Apple MacBook Pro 15\" (2011)",
13314 - .matches = {
13315 - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
13316 - DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
13317 - },
13318 - },
13319 -+ {
13320 -+ .callback = intel_dual_link_lvds_callback,
13321 -+ .ident = "Apple MacBook Pro 15\" (2012)",
13322 -+ .matches = {
13323 -+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
13324 -+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
13325 -+ },
13326 -+ },
13327 - { } /* terminating entry */
13328 - };
13329 -
13330 -@@ -847,6 +863,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
13331 - if (i915.lvds_channel_mode > 0)
13332 - return i915.lvds_channel_mode == 2;
13333 -
13334 -+ /* single channel LVDS is limited to 112 MHz */
13335 -+ if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
13336 -+ > 112999)
13337 -+ return true;
13338 -+
13339 - if (dmi_check_system(intel_dual_link_lvds))
13340 - return true;
13341 -
13342 -@@ -1104,6 +1125,8 @@ void intel_lvds_init(struct drm_device *dev)
13343 - out:
13344 - mutex_unlock(&dev->mode_config.mutex);
13345 -
13346 -+ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
13347 -+
13348 - lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
13349 - DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
13350 - lvds_encoder->is_dual_link ? "dual" : "single");
13351 -@@ -1118,7 +1141,6 @@ out:
13352 - }
13353 - drm_connector_register(connector);
13354 -
13355 -- intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
13356 - intel_panel_setup_backlight(connector, INVALID_PIPE);
13357 -
13358 - return;
13359 -diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
13360 -index c0ecd128b14b..7348f222684d 100644
13361 ---- a/drivers/gpu/drm/radeon/radeon_asic.c
13362 -+++ b/drivers/gpu/drm/radeon/radeon_asic.c
13363 -@@ -1180,7 +1180,7 @@ static struct radeon_asic rs780_asic = {
13364 - static struct radeon_asic_ring rv770_uvd_ring = {
13365 - .ib_execute = &uvd_v1_0_ib_execute,
13366 - .emit_fence = &uvd_v2_2_fence_emit,
13367 -- .emit_semaphore = &uvd_v1_0_semaphore_emit,
13368 -+ .emit_semaphore = &uvd_v2_2_semaphore_emit,
13369 - .cs_parse = &radeon_uvd_cs_parse,
13370 - .ring_test = &uvd_v1_0_ring_test,
13371 - .ib_test = &uvd_v1_0_ib_test,
13372 -diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
13373 -index 72bdd3bf0d8e..c2fd3a5e6c55 100644
13374 ---- a/drivers/gpu/drm/radeon/radeon_asic.h
13375 -+++ b/drivers/gpu/drm/radeon/radeon_asic.h
13376 -@@ -919,6 +919,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
13377 - int uvd_v2_2_resume(struct radeon_device *rdev);
13378 - void uvd_v2_2_fence_emit(struct radeon_device *rdev,
13379 - struct radeon_fence *fence);
13380 -+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
13381 -+ struct radeon_ring *ring,
13382 -+ struct radeon_semaphore *semaphore,
13383 -+ bool emit_wait);
13384 -
13385 - /* uvd v3.1 */
13386 - bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
13387 -diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
13388 -index b7d33a13db9f..b7c6bb69f3c7 100644
13389 ---- a/drivers/gpu/drm/radeon/radeon_audio.c
13390 -+++ b/drivers/gpu/drm/radeon/radeon_audio.c
13391 -@@ -464,6 +464,10 @@ void radeon_audio_detect(struct drm_connector *connector,
13392 - return;
13393 -
13394 - rdev = connector->encoder->dev->dev_private;
13395 -+
13396 -+ if (!radeon_audio_chipset_supported(rdev))
13397 -+ return;
13398 -+
13399 - radeon_encoder = to_radeon_encoder(connector->encoder);
13400 - dig = radeon_encoder->enc_priv;
13401 -
13402 -diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
13403 -index b292aca0f342..edafd3c2b170 100644
13404 ---- a/drivers/gpu/drm/radeon/radeon_ttm.c
13405 -+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
13406 -@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
13407 - {
13408 - struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
13409 - struct radeon_ttm_tt *gtt = (void *)ttm;
13410 -- struct scatterlist *sg;
13411 -- int i;
13412 -+ struct sg_page_iter sg_iter;
13413 -
13414 - int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
13415 - enum dma_data_direction direction = write ?
13416 -@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
13417 - /* free the sg table and pages again */
13418 - dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
13419 -
13420 -- for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
13421 -- struct page *page = sg_page(sg);
13422 --
13423 -+ for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
13424 -+ struct page *page = sg_page_iter_page(&sg_iter);
13425 - if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
13426 - set_page_dirty(page);
13427 -
13428 -diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
13429 -index c10b2aec6450..cd630287cf0a 100644
13430 ---- a/drivers/gpu/drm/radeon/radeon_uvd.c
13431 -+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
13432 -@@ -396,6 +396,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
13433 - return 0;
13434 - }
13435 -
13436 -+static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
13437 -+ unsigned stream_type)
13438 -+{
13439 -+ switch (stream_type) {
13440 -+ case 0: /* H264 */
13441 -+ case 1: /* VC1 */
13442 -+ /* always supported */
13443 -+ return 0;
13444 -+
13445 -+ case 3: /* MPEG2 */
13446 -+ case 4: /* MPEG4 */
13447 -+ /* only since UVD 3 */
13448 -+ if (p->rdev->family >= CHIP_PALM)
13449 -+ return 0;
13450 -+
13451 -+ /* fall through */
13452 -+ default:
13453 -+ DRM_ERROR("UVD codec not supported by hardware %d!\n",
13454 -+ stream_type);
13455 -+ return -EINVAL;
13456 -+ }
13457 -+}
13458 -+
13459 - static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
13460 - unsigned offset, unsigned buf_sizes[])
13461 - {
13462 -@@ -436,50 +459,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
13463 - return -EINVAL;
13464 - }
13465 -
13466 -- if (msg_type == 1) {
13467 -- /* it's a decode msg, calc buffer sizes */
13468 -- r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
13469 -- /* calc image size (width * height) */
13470 -- img_size = msg[6] * msg[7];
13471 -+ switch (msg_type) {
13472 -+ case 0:
13473 -+ /* it's a create msg, calc image size (width * height) */
13474 -+ img_size = msg[7] * msg[8];
13475 -+
13476 -+ r = radeon_uvd_validate_codec(p, msg[4]);
13477 -+ radeon_bo_kunmap(bo);
13478 -+ if (r)
13479 -+ return r;
13480 -+
13481 -+ /* try to alloc a new handle */
13482 -+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
13483 -+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
13484 -+ DRM_ERROR("Handle 0x%x already in use!\n", handle);
13485 -+ return -EINVAL;
13486 -+ }
13487 -+
13488 -+ if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
13489 -+ p->rdev->uvd.filp[i] = p->filp;
13490 -+ p->rdev->uvd.img_size[i] = img_size;
13491 -+ return 0;
13492 -+ }
13493 -+ }
13494 -+
13495 -+ DRM_ERROR("No more free UVD handles!\n");
13496 -+ return -EINVAL;
13497 -+
13498 -+ case 1:
13499 -+ /* it's a decode msg, validate codec and calc buffer sizes */
13500 -+ r = radeon_uvd_validate_codec(p, msg[4]);
13501 -+ if (!r)
13502 -+ r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
13503 - radeon_bo_kunmap(bo);
13504 - if (r)
13505 - return r;
13506 -
13507 -- } else if (msg_type == 2) {
13508 -+ /* validate the handle */
13509 -+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
13510 -+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
13511 -+ if (p->rdev->uvd.filp[i] != p->filp) {
13512 -+ DRM_ERROR("UVD handle collision detected!\n");
13513 -+ return -EINVAL;
13514 -+ }
13515 -+ return 0;
13516 -+ }
13517 -+ }
13518 -+
13519 -+ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
13520 -+ return -ENOENT;
13521 -+
13522 -+ case 2:
13523 - /* it's a destroy msg, free the handle */
13524 - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
13525 - atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
13526 - radeon_bo_kunmap(bo);
13527 - return 0;
13528 -- } else {
13529 -- /* it's a create msg, calc image size (width * height) */
13530 -- img_size = msg[7] * msg[8];
13531 -- radeon_bo_kunmap(bo);
13532 -
13533 -- if (msg_type != 0) {
13534 -- DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
13535 -- return -EINVAL;
13536 -- }
13537 --
13538 -- /* it's a create msg, no special handling needed */
13539 -- }
13540 --
13541 -- /* create or decode, validate the handle */
13542 -- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
13543 -- if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
13544 -- return 0;
13545 -- }
13546 -+ default:
13547 -
13548 -- /* handle not found try to alloc a new one */
13549 -- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
13550 -- if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
13551 -- p->rdev->uvd.filp[i] = p->filp;
13552 -- p->rdev->uvd.img_size[i] = img_size;
13553 -- return 0;
13554 -- }
13555 -+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
13556 -+ return -EINVAL;
13557 - }
13558 -
13559 -- DRM_ERROR("No more free UVD handles!\n");
13560 -+ BUG();
13561 - return -EINVAL;
13562 - }
13563 -
13564 -diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
13565 -index 976fe432f4e2..7ed561225007 100644
13566 ---- a/drivers/gpu/drm/radeon/radeon_vce.c
13567 -+++ b/drivers/gpu/drm/radeon/radeon_vce.c
13568 -@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
13569 - *
13570 - * @p: parser context
13571 - * @handle: handle to validate
13572 -+ * @allocated: allocated a new handle?
13573 - *
13574 - * Validates the handle and return the found session index or -EINVAL
13575 - * we we don't have another free session index.
13576 - */
13577 --int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
13578 -+static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
13579 -+ uint32_t handle, bool *allocated)
13580 - {
13581 - unsigned i;
13582 -
13583 -+ *allocated = false;
13584 -+
13585 - /* validate the handle */
13586 - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
13587 -- if (atomic_read(&p->rdev->vce.handles[i]) == handle)
13588 -+ if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
13589 -+ if (p->rdev->vce.filp[i] != p->filp) {
13590 -+ DRM_ERROR("VCE handle collision detected!\n");
13591 -+ return -EINVAL;
13592 -+ }
13593 - return i;
13594 -+ }
13595 - }
13596 -
13597 - /* handle not found try to alloc a new one */
13598 -@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
13599 - if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
13600 - p->rdev->vce.filp[i] = p->filp;
13601 - p->rdev->vce.img_size[i] = 0;
13602 -+ *allocated = true;
13603 - return i;
13604 - }
13605 - }
13606 -@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
13607 - int radeon_vce_cs_parse(struct radeon_cs_parser *p)
13608 - {
13609 - int session_idx = -1;
13610 -- bool destroyed = false;
13611 -+ bool destroyed = false, created = false, allocated = false;
13612 - uint32_t tmp, handle = 0;
13613 - uint32_t *size = &tmp;
13614 -- int i, r;
13615 -+ int i, r = 0;
13616 -
13617 - while (p->idx < p->chunk_ib->length_dw) {
13618 - uint32_t len = radeon_get_ib_value(p, p->idx);
13619 -@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
13620 -
13621 - if ((len < 8) || (len & 3)) {
13622 - DRM_ERROR("invalid VCE command length (%d)!\n", len);
13623 -- return -EINVAL;
13624 -+ r = -EINVAL;
13625 -+ goto out;
13626 - }
13627 -
13628 - if (destroyed) {
13629 - DRM_ERROR("No other command allowed after destroy!\n");
13630 -- return -EINVAL;
13631 -+ r = -EINVAL;
13632 -+ goto out;
13633 - }
13634 -
13635 - switch (cmd) {
13636 - case 0x00000001: // session
13637 - handle = radeon_get_ib_value(p, p->idx + 2);
13638 -- session_idx = radeon_vce_validate_handle(p, handle);
13639 -+ session_idx = radeon_vce_validate_handle(p, handle,
13640 -+ &allocated);
13641 - if (session_idx < 0)
13642 - return session_idx;
13643 - size = &p->rdev->vce.img_size[session_idx];
13644 -@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
13645 - break;
13646 -
13647 - case 0x01000001: // create
13648 -+ created = true;
13649 -+ if (!allocated) {
13650 -+ DRM_ERROR("Handle already in use!\n");
13651 -+ r = -EINVAL;
13652 -+ goto out;
13653 -+ }
13654 -+
13655 - *size = radeon_get_ib_value(p, p->idx + 8) *
13656 - radeon_get_ib_value(p, p->idx + 10) *
13657 - 8 * 3 / 2;
13658 -@@ -577,12 +597,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
13659 - r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
13660 - *size);
13661 - if (r)
13662 -- return r;
13663 -+ goto out;
13664 -
13665 - r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
13666 - *size / 3);
13667 - if (r)
13668 -- return r;
13669 -+ goto out;
13670 - break;
13671 -
13672 - case 0x02000001: // destroy
13673 -@@ -593,7 +613,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
13674 - r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
13675 - *size * 2);
13676 - if (r)
13677 -- return r;
13678 -+ goto out;
13679 - break;
13680 -
13681 - case 0x05000004: // video bitstream buffer
13682 -@@ -601,36 +621,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
13683 - r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
13684 - tmp);
13685 - if (r)
13686 -- return r;
13687 -+ goto out;
13688 - break;
13689 -
13690 - case 0x05000005: // feedback buffer
13691 - r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
13692 - 4096);
13693 - if (r)
13694 -- return r;
13695 -+ goto out;
13696 - break;
13697 -
13698 - default:
13699 - DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
13700 -- return -EINVAL;
13701 -+ r = -EINVAL;
13702 -+ goto out;
13703 - }
13704 -
13705 - if (session_idx == -1) {
13706 - DRM_ERROR("no session command at start of IB\n");
13707 -- return -EINVAL;
13708 -+ r = -EINVAL;
13709 -+ goto out;
13710 - }
13711 -
13712 - p->idx += len / 4;
13713 - }
13714 -
13715 -- if (destroyed) {
13716 -- /* IB contains a destroy msg, free the handle */
13717 -+ if (allocated && !created) {
13718 -+ DRM_ERROR("New session without create command!\n");
13719 -+ r = -ENOENT;
13720 -+ }
13721 -+
13722 -+out:
13723 -+ if ((!r && destroyed) || (r && allocated)) {
13724 -+ /*
13725 -+ * IB contains a destroy msg or we have allocated an
13726 -+ * handle and got an error, anyway free the handle
13727 -+ */
13728 - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
13729 - atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
13730 - }
13731 -
13732 -- return 0;
13733 -+ return r;
13734 - }
13735 -
13736 - /**
13737 -diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
13738 -index 3cf1e2921545..9ef2064b1c9c 100644
13739 ---- a/drivers/gpu/drm/radeon/rv770d.h
13740 -+++ b/drivers/gpu/drm/radeon/rv770d.h
13741 -@@ -989,6 +989,9 @@
13742 - ((n) & 0x3FFF) << 16)
13743 -
13744 - /* UVD */
13745 -+#define UVD_SEMA_ADDR_LOW 0xef00
13746 -+#define UVD_SEMA_ADDR_HIGH 0xef04
13747 -+#define UVD_SEMA_CMD 0xef08
13748 - #define UVD_GPCOM_VCPU_CMD 0xef0c
13749 - #define UVD_GPCOM_VCPU_DATA0 0xef10
13750 - #define UVD_GPCOM_VCPU_DATA1 0xef14
13751 -diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
13752 -index e72b3cb59358..c6b1cbca47fc 100644
13753 ---- a/drivers/gpu/drm/radeon/uvd_v1_0.c
13754 -+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
13755 -@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
13756 - struct radeon_semaphore *semaphore,
13757 - bool emit_wait)
13758 - {
13759 -- uint64_t addr = semaphore->gpu_addr;
13760 --
13761 -- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
13762 -- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
13763 --
13764 -- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
13765 -- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
13766 --
13767 -- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
13768 -- radeon_ring_write(ring, emit_wait ? 1 : 0);
13769 --
13770 -- return true;
13771 -+ /* disable semaphores for UVD V1 hardware */
13772 -+ return false;
13773 - }
13774 -
13775 - /**
13776 -diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
13777 -index 89193519f8a1..7ed778cec7c6 100644
13778 ---- a/drivers/gpu/drm/radeon/uvd_v2_2.c
13779 -+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
13780 -@@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
13781 - }
13782 -
13783 - /**
13784 -+ * uvd_v2_2_semaphore_emit - emit semaphore command
13785 -+ *
13786 -+ * @rdev: radeon_device pointer
13787 -+ * @ring: radeon_ring pointer
13788 -+ * @semaphore: semaphore to emit commands for
13789 -+ * @emit_wait: true if we should emit a wait command
13790 -+ *
13791 -+ * Emit a semaphore command (either wait or signal) to the UVD ring.
13792 -+ */
13793 -+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
13794 -+ struct radeon_ring *ring,
13795 -+ struct radeon_semaphore *semaphore,
13796 -+ bool emit_wait)
13797 -+{
13798 -+ uint64_t addr = semaphore->gpu_addr;
13799 -+
13800 -+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
13801 -+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
13802 -+
13803 -+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
13804 -+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
13805 -+
13806 -+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
13807 -+ radeon_ring_write(ring, emit_wait ? 1 : 0);
13808 -+
13809 -+ return true;
13810 -+}
13811 -+
13812 -+/**
13813 - * uvd_v2_2_resume - memory controller programming
13814 - *
13815 - * @rdev: radeon_device pointer
13816 -diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
13817 -index d570030d899c..06441a43c3aa 100644
13818 ---- a/drivers/infiniband/core/cma.c
13819 -+++ b/drivers/infiniband/core/cma.c
13820 -@@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
13821 - memcpy(&ib->sib_addr, &path->dgid, 16);
13822 - }
13823 -
13824 -+static __be16 ss_get_port(const struct sockaddr_storage *ss)
13825 -+{
13826 -+ if (ss->ss_family == AF_INET)
13827 -+ return ((struct sockaddr_in *)ss)->sin_port;
13828 -+ else if (ss->ss_family == AF_INET6)
13829 -+ return ((struct sockaddr_in6 *)ss)->sin6_port;
13830 -+ BUG();
13831 -+}
13832 -+
13833 - static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
13834 - struct cma_hdr *hdr)
13835 - {
13836 -- struct sockaddr_in *listen4, *ip4;
13837 -+ struct sockaddr_in *ip4;
13838 -
13839 -- listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
13840 - ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
13841 -- ip4->sin_family = listen4->sin_family;
13842 -+ ip4->sin_family = AF_INET;
13843 - ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
13844 -- ip4->sin_port = listen4->sin_port;
13845 -+ ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
13846 -
13847 - ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
13848 -- ip4->sin_family = listen4->sin_family;
13849 -+ ip4->sin_family = AF_INET;
13850 - ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
13851 - ip4->sin_port = hdr->port;
13852 - }
13853 -@@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
13854 - static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
13855 - struct cma_hdr *hdr)
13856 - {
13857 -- struct sockaddr_in6 *listen6, *ip6;
13858 -+ struct sockaddr_in6 *ip6;
13859 -
13860 -- listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
13861 - ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
13862 -- ip6->sin6_family = listen6->sin6_family;
13863 -+ ip6->sin6_family = AF_INET6;
13864 - ip6->sin6_addr = hdr->dst_addr.ip6;
13865 -- ip6->sin6_port = listen6->sin6_port;
13866 -+ ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
13867 -
13868 - ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
13869 -- ip6->sin6_family = listen6->sin6_family;
13870 -+ ip6->sin6_family = AF_INET6;
13871 - ip6->sin6_addr = hdr->src_addr.ip6;
13872 - ip6->sin6_port = hdr->port;
13873 - }
13874 -diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
13875 -index 414739295d04..713a96237a80 100644
13876 ---- a/drivers/md/dm-crypt.c
13877 -+++ b/drivers/md/dm-crypt.c
13878 -@@ -925,10 +925,11 @@ static int crypt_convert(struct crypt_config *cc,
13879 -
13880 - switch (r) {
13881 - /* async */
13882 -- case -EINPROGRESS:
13883 - case -EBUSY:
13884 - wait_for_completion(&ctx->restart);
13885 - reinit_completion(&ctx->restart);
13886 -+ /* fall through*/
13887 -+ case -EINPROGRESS:
13888 - ctx->req = NULL;
13889 - ctx->cc_sector++;
13890 - continue;
13891 -@@ -1345,8 +1346,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
13892 - struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
13893 - struct crypt_config *cc = io->cc;
13894 -
13895 -- if (error == -EINPROGRESS)
13896 -+ if (error == -EINPROGRESS) {
13897 -+ complete(&ctx->restart);
13898 - return;
13899 -+ }
13900 -
13901 - if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
13902 - error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
13903 -@@ -1357,15 +1360,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
13904 - crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
13905 -
13906 - if (!atomic_dec_and_test(&ctx->cc_pending))
13907 -- goto done;
13908 -+ return;
13909 -
13910 - if (bio_data_dir(io->base_bio) == READ)
13911 - kcryptd_crypt_read_done(io);
13912 - else
13913 - kcryptd_crypt_write_io_submit(io, 1);
13914 --done:
13915 -- if (!completion_done(&ctx->restart))
13916 -- complete(&ctx->restart);
13917 - }
13918 -
13919 - static void kcryptd_crypt(struct work_struct *work)
13920 -diff --git a/drivers/md/md.c b/drivers/md/md.c
13921 -index e6178787ce3d..e47d1dd046da 100644
13922 ---- a/drivers/md/md.c
13923 -+++ b/drivers/md/md.c
13924 -@@ -4754,12 +4754,12 @@ static void md_free(struct kobject *ko)
13925 - if (mddev->sysfs_state)
13926 - sysfs_put(mddev->sysfs_state);
13927 -
13928 -+ if (mddev->queue)
13929 -+ blk_cleanup_queue(mddev->queue);
13930 - if (mddev->gendisk) {
13931 - del_gendisk(mddev->gendisk);
13932 - put_disk(mddev->gendisk);
13933 - }
13934 -- if (mddev->queue)
13935 -- blk_cleanup_queue(mddev->queue);
13936 -
13937 - kfree(mddev);
13938 - }
13939 -diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
13940 -index dd5b1415f974..f902eb4ee569 100644
13941 ---- a/drivers/media/platform/marvell-ccic/mcam-core.c
13942 -+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
13943 -@@ -116,8 +116,8 @@ static struct mcam_format_struct {
13944 - .planar = false,
13945 - },
13946 - {
13947 -- .desc = "UYVY 4:2:2",
13948 -- .pixelformat = V4L2_PIX_FMT_UYVY,
13949 -+ .desc = "YVYU 4:2:2",
13950 -+ .pixelformat = V4L2_PIX_FMT_YVYU,
13951 - .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
13952 - .bpp = 2,
13953 - .planar = false,
13954 -@@ -748,7 +748,7 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
13955 -
13956 - switch (fmt->pixelformat) {
13957 - case V4L2_PIX_FMT_YUYV:
13958 -- case V4L2_PIX_FMT_UYVY:
13959 -+ case V4L2_PIX_FMT_YVYU:
13960 - widthy = fmt->width * 2;
13961 - widthuv = 0;
13962 - break;
13963 -@@ -784,15 +784,15 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
13964 - case V4L2_PIX_FMT_YUV420:
13965 - case V4L2_PIX_FMT_YVU420:
13966 - mcam_reg_write_mask(cam, REG_CTRL0,
13967 -- C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK);
13968 -+ C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK);
13969 - break;
13970 - case V4L2_PIX_FMT_YUYV:
13971 - mcam_reg_write_mask(cam, REG_CTRL0,
13972 -- C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK);
13973 -+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK);
13974 - break;
13975 -- case V4L2_PIX_FMT_UYVY:
13976 -+ case V4L2_PIX_FMT_YVYU:
13977 - mcam_reg_write_mask(cam, REG_CTRL0,
13978 -- C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
13979 -+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK);
13980 - break;
13981 - case V4L2_PIX_FMT_JPEG:
13982 - mcam_reg_write_mask(cam, REG_CTRL0,
13983 -diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
13984 -index aa0c6eac254a..7ffdf4dbaf8c 100644
13985 ---- a/drivers/media/platform/marvell-ccic/mcam-core.h
13986 -+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
13987 -@@ -330,10 +330,10 @@ int mccic_resume(struct mcam_camera *cam);
13988 - #define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
13989 - #define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
13990 - #define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
13991 --#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
13992 --#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
13993 --#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
13994 --#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
13995 -+#define C0_YUVE_NOSWAP 0x00000000 /* no bytes swapping */
13996 -+#define C0_YUVE_SWAP13 0x00010000 /* swap byte 1 and 3 */
13997 -+#define C0_YUVE_SWAP24 0x00020000 /* swap byte 2 and 4 */
13998 -+#define C0_YUVE_SWAP1324 0x00030000 /* swap bytes 1&3 and 2&4 */
13999 - /* Bayer bits 18,19 if needed */
14000 - #define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
14001 - #define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */
14002 -diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
14003 -index c69afb5e264e..ed2e71a74a58 100644
14004 ---- a/drivers/mmc/card/block.c
14005 -+++ b/drivers/mmc/card/block.c
14006 -@@ -1029,6 +1029,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
14007 - md->reset_done &= ~type;
14008 - }
14009 -
14010 -+int mmc_access_rpmb(struct mmc_queue *mq)
14011 -+{
14012 -+ struct mmc_blk_data *md = mq->data;
14013 -+ /*
14014 -+ * If this is a RPMB partition access, return ture
14015 -+ */
14016 -+ if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
14017 -+ return true;
14018 -+
14019 -+ return false;
14020 -+}
14021 -+
14022 - static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
14023 - {
14024 - struct mmc_blk_data *md = mq->data;
14025 -diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
14026 -index 236d194c2883..8efa3684aef8 100644
14027 ---- a/drivers/mmc/card/queue.c
14028 -+++ b/drivers/mmc/card/queue.c
14029 -@@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
14030 - return BLKPREP_KILL;
14031 - }
14032 -
14033 -- if (mq && mmc_card_removed(mq->card))
14034 -+ if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
14035 - return BLKPREP_KILL;
14036 -
14037 - req->cmd_flags |= REQ_DONTPREP;
14038 -diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
14039 -index 5752d50049a3..99e6521e6169 100644
14040 ---- a/drivers/mmc/card/queue.h
14041 -+++ b/drivers/mmc/card/queue.h
14042 -@@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
14043 - extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
14044 - extern void mmc_packed_clean(struct mmc_queue *);
14045 -
14046 -+extern int mmc_access_rpmb(struct mmc_queue *);
14047 -+
14048 - #endif
14049 -diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
14050 -index 23f10f72e5f3..57a8d00672d3 100644
14051 ---- a/drivers/mmc/core/core.c
14052 -+++ b/drivers/mmc/core/core.c
14053 -@@ -2648,6 +2648,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
14054 - switch (mode) {
14055 - case PM_HIBERNATION_PREPARE:
14056 - case PM_SUSPEND_PREPARE:
14057 -+ case PM_RESTORE_PREPARE:
14058 - spin_lock_irqsave(&host->lock, flags);
14059 - host->rescan_disable = 1;
14060 - spin_unlock_irqrestore(&host->lock, flags);
14061 -diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
14062 -index 7d9d6a321521..5165ae75d540 100644
14063 ---- a/drivers/mmc/host/sh_mmcif.c
14064 -+++ b/drivers/mmc/host/sh_mmcif.c
14065 -@@ -1402,7 +1402,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
14066 - host = mmc_priv(mmc);
14067 - host->mmc = mmc;
14068 - host->addr = reg;
14069 -- host->timeout = msecs_to_jiffies(1000);
14070 -+ host->timeout = msecs_to_jiffies(10000);
14071 - host->ccs_enable = !pd || !pd->ccs_unsupported;
14072 - host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
14073 -
14074 -diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
14075 -index 89dca77ca038..18ee2089df4a 100644
14076 ---- a/drivers/pinctrl/core.c
14077 -+++ b/drivers/pinctrl/core.c
14078 -@@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p)
14079 - EXPORT_SYMBOL_GPL(devm_pinctrl_put);
14080 -
14081 - int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
14082 -- bool dup, bool locked)
14083 -+ bool dup)
14084 - {
14085 - int i, ret;
14086 - struct pinctrl_maps *maps_node;
14087 -@@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
14088 - maps_node->maps = maps;
14089 - }
14090 -
14091 -- if (!locked)
14092 -- mutex_lock(&pinctrl_maps_mutex);
14093 -+ mutex_lock(&pinctrl_maps_mutex);
14094 - list_add_tail(&maps_node->node, &pinctrl_maps);
14095 -- if (!locked)
14096 -- mutex_unlock(&pinctrl_maps_mutex);
14097 -+ mutex_unlock(&pinctrl_maps_mutex);
14098 -
14099 - return 0;
14100 - }
14101 -@@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
14102 - int pinctrl_register_mappings(struct pinctrl_map const *maps,
14103 - unsigned num_maps)
14104 - {
14105 -- return pinctrl_register_map(maps, num_maps, true, false);
14106 -+ return pinctrl_register_map(maps, num_maps, true);
14107 - }
14108 -
14109 - void pinctrl_unregister_map(struct pinctrl_map const *map)
14110 -diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
14111 -index 75476b3d87da..b24ea846c867 100644
14112 ---- a/drivers/pinctrl/core.h
14113 -+++ b/drivers/pinctrl/core.h
14114 -@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
14115 - }
14116 -
14117 - int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
14118 -- bool dup, bool locked);
14119 -+ bool dup);
14120 - void pinctrl_unregister_map(struct pinctrl_map const *map);
14121 -
14122 - extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
14123 -diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
14124 -index eda13de2e7c0..0bbf7d71b281 100644
14125 ---- a/drivers/pinctrl/devicetree.c
14126 -+++ b/drivers/pinctrl/devicetree.c
14127 -@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
14128 - dt_map->num_maps = num_maps;
14129 - list_add_tail(&dt_map->node, &p->dt_maps);
14130 -
14131 -- return pinctrl_register_map(map, num_maps, false, true);
14132 -+ return pinctrl_register_map(map, num_maps, false);
14133 - }
14134 -
14135 - struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
14136 -diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
14137 -index 43e04af39e09..cb70ced7e0db 100644
14138 ---- a/drivers/rtc/rtc-armada38x.c
14139 -+++ b/drivers/rtc/rtc-armada38x.c
14140 -@@ -40,6 +40,13 @@ struct armada38x_rtc {
14141 - void __iomem *regs;
14142 - void __iomem *regs_soc;
14143 - spinlock_t lock;
14144 -+ /*
14145 -+ * While setting the time, the RTC TIME register should not be
14146 -+ * accessed. Setting the RTC time involves sleeping during
14147 -+ * 100ms, so a mutex instead of a spinlock is used to protect
14148 -+ * it
14149 -+ */
14150 -+ struct mutex mutex_time;
14151 - int irq;
14152 - };
14153 -
14154 -@@ -59,8 +66,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
14155 - struct armada38x_rtc *rtc = dev_get_drvdata(dev);
14156 - unsigned long time, time_check, flags;
14157 -
14158 -- spin_lock_irqsave(&rtc->lock, flags);
14159 --
14160 -+ mutex_lock(&rtc->mutex_time);
14161 - time = readl(rtc->regs + RTC_TIME);
14162 - /*
14163 - * WA for failing time set attempts. As stated in HW ERRATA if
14164 -@@ -71,7 +77,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
14165 - if ((time_check - time) > 1)
14166 - time_check = readl(rtc->regs + RTC_TIME);
14167 -
14168 -- spin_unlock_irqrestore(&rtc->lock, flags);
14169 -+ mutex_unlock(&rtc->mutex_time);
14170 -
14171 - rtc_time_to_tm(time_check, tm);
14172 -
14173 -@@ -94,19 +100,12 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
14174 - * then wait for 100ms before writing to the time register to be
14175 - * sure that the data will be taken into account.
14176 - */
14177 -- spin_lock_irqsave(&rtc->lock, flags);
14178 --
14179 -+ mutex_lock(&rtc->mutex_time);
14180 - rtc_delayed_write(0, rtc, RTC_STATUS);
14181 --
14182 -- spin_unlock_irqrestore(&rtc->lock, flags);
14183 --
14184 - msleep(100);
14185 --
14186 -- spin_lock_irqsave(&rtc->lock, flags);
14187 --
14188 - rtc_delayed_write(time, rtc, RTC_TIME);
14189 -+ mutex_unlock(&rtc->mutex_time);
14190 -
14191 -- spin_unlock_irqrestore(&rtc->lock, flags);
14192 - out:
14193 - return ret;
14194 - }
14195 -@@ -230,6 +229,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
14196 - return -ENOMEM;
14197 -
14198 - spin_lock_init(&rtc->lock);
14199 -+ mutex_init(&rtc->mutex_time);
14200 -
14201 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
14202 - rtc->regs = devm_ioremap_resource(&pdev->dev, res);
14203 -diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
14204 -index f1e57425e39f..5bab1c684bb1 100644
14205 ---- a/drivers/tty/hvc/hvc_xen.c
14206 -+++ b/drivers/tty/hvc/hvc_xen.c
14207 -@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void)
14208 - return 0;
14209 - }
14210 -
14211 -+static void xen_console_update_evtchn(struct xencons_info *info)
14212 -+{
14213 -+ if (xen_hvm_domain()) {
14214 -+ uint64_t v;
14215 -+ int err;
14216 -+
14217 -+ err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
14218 -+ if (!err && v)
14219 -+ info->evtchn = v;
14220 -+ } else
14221 -+ info->evtchn = xen_start_info->console.domU.evtchn;
14222 -+}
14223 -+
14224 - void xen_console_resume(void)
14225 - {
14226 - struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
14227 -- if (info != NULL && info->irq)
14228 -+ if (info != NULL && info->irq) {
14229 -+ if (!xen_initial_domain())
14230 -+ xen_console_update_evtchn(info);
14231 - rebind_evtchn_irq(info->evtchn, info->irq);
14232 -+ }
14233 - }
14234 -
14235 - static void xencons_disconnect_backend(struct xencons_info *info)
14236 -diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
14237 -index 4cde85501444..837d1778970b 100644
14238 ---- a/drivers/vfio/vfio.c
14239 -+++ b/drivers/vfio/vfio.c
14240 -@@ -711,6 +711,8 @@ void *vfio_del_group_dev(struct device *dev)
14241 - void *device_data = device->device_data;
14242 - struct vfio_unbound_dev *unbound;
14243 - unsigned int i = 0;
14244 -+ long ret;
14245 -+ bool interrupted = false;
14246 -
14247 - /*
14248 - * The group exists so long as we have a device reference. Get
14249 -@@ -756,9 +758,22 @@ void *vfio_del_group_dev(struct device *dev)
14250 -
14251 - vfio_device_put(device);
14252 -
14253 -- } while (wait_event_interruptible_timeout(vfio.release_q,
14254 -- !vfio_dev_present(group, dev),
14255 -- HZ * 10) <= 0);
14256 -+ if (interrupted) {
14257 -+ ret = wait_event_timeout(vfio.release_q,
14258 -+ !vfio_dev_present(group, dev), HZ * 10);
14259 -+ } else {
14260 -+ ret = wait_event_interruptible_timeout(vfio.release_q,
14261 -+ !vfio_dev_present(group, dev), HZ * 10);
14262 -+ if (ret == -ERESTARTSYS) {
14263 -+ interrupted = true;
14264 -+ dev_warn(dev,
14265 -+ "Device is currently in use, task"
14266 -+ " \"%s\" (%d) "
14267 -+ "blocked until device is released",
14268 -+ current->comm, task_pid_nr(current));
14269 -+ }
14270 -+ }
14271 -+ } while (ret <= 0);
14272 -
14273 - vfio_group_put(group);
14274 -
14275 -diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
14276 -index 5db43fc100a4..7dd46312c180 100644
14277 ---- a/drivers/xen/events/events_2l.c
14278 -+++ b/drivers/xen/events/events_2l.c
14279 -@@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
14280 - return IRQ_HANDLED;
14281 - }
14282 -
14283 -+static void evtchn_2l_resume(void)
14284 -+{
14285 -+ int i;
14286 -+
14287 -+ for_each_online_cpu(i)
14288 -+ memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
14289 -+ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
14290 -+}
14291 -+
14292 - static const struct evtchn_ops evtchn_ops_2l = {
14293 - .max_channels = evtchn_2l_max_channels,
14294 - .nr_channels = evtchn_2l_max_channels,
14295 -@@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = {
14296 - .mask = evtchn_2l_mask,
14297 - .unmask = evtchn_2l_unmask,
14298 - .handle_events = evtchn_2l_handle_events,
14299 -+ .resume = evtchn_2l_resume,
14300 - };
14301 -
14302 - void __init xen_evtchn_2l_init(void)
14303 -diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
14304 -index 70fba973a107..2b8553bd8715 100644
14305 ---- a/drivers/xen/events/events_base.c
14306 -+++ b/drivers/xen/events/events_base.c
14307 -@@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq)
14308 - if (rc)
14309 - goto err;
14310 -
14311 -- bind_evtchn_to_cpu(evtchn, 0);
14312 - info->evtchn = evtchn;
14313 -+ bind_evtchn_to_cpu(evtchn, 0);
14314 -
14315 - rc = xen_evtchn_port_setup(info);
14316 - if (rc)
14317 -@@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
14318 -
14319 - mutex_unlock(&irq_mapping_update_lock);
14320 -
14321 -- /* new event channels are always bound to cpu 0 */
14322 -- irq_set_affinity(irq, cpumask_of(0));
14323 -+ bind_evtchn_to_cpu(evtchn, info->cpu);
14324 -+ /* This will be deferred until interrupt is processed */
14325 -+ irq_set_affinity(irq, cpumask_of(info->cpu));
14326 -
14327 - /* Unmask the event channel. */
14328 - enable_irq(irq);
14329 -diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
14330 -index 75fe3d466515..9c234209d8b5 100644
14331 ---- a/drivers/xen/xen-pciback/conf_space.c
14332 -+++ b/drivers/xen/xen-pciback/conf_space.c
14333 -@@ -16,8 +16,8 @@
14334 - #include "conf_space.h"
14335 - #include "conf_space_quirks.h"
14336 -
14337 --bool permissive;
14338 --module_param(permissive, bool, 0644);
14339 -+bool xen_pcibk_permissive;
14340 -+module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
14341 -
14342 - /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
14343 - * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
14344 -@@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
14345 - * This means that some fields may still be read-only because
14346 - * they have entries in the config_field list that intercept
14347 - * the write and do nothing. */
14348 -- if (dev_data->permissive || permissive) {
14349 -+ if (dev_data->permissive || xen_pcibk_permissive) {
14350 - switch (size) {
14351 - case 1:
14352 - err = pci_write_config_byte(dev, offset,
14353 -diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
14354 -index 2e1d73d1d5d0..62461a8ba1d6 100644
14355 ---- a/drivers/xen/xen-pciback/conf_space.h
14356 -+++ b/drivers/xen/xen-pciback/conf_space.h
14357 -@@ -64,7 +64,7 @@ struct config_field_entry {
14358 - void *data;
14359 - };
14360 -
14361 --extern bool permissive;
14362 -+extern bool xen_pcibk_permissive;
14363 -
14364 - #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
14365 -
14366 -diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
14367 -index 2d7369391472..f8baf463dd35 100644
14368 ---- a/drivers/xen/xen-pciback/conf_space_header.c
14369 -+++ b/drivers/xen/xen-pciback/conf_space_header.c
14370 -@@ -105,7 +105,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
14371 -
14372 - cmd->val = value;
14373 -
14374 -- if (!permissive && (!dev_data || !dev_data->permissive))
14375 -+ if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
14376 - return 0;
14377 -
14378 - /* Only allow the guest to control certain bits. */
14379 -diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
14380 -index 564b31584860..5390a674b5e3 100644
14381 ---- a/drivers/xen/xenbus/xenbus_probe.c
14382 -+++ b/drivers/xen/xenbus/xenbus_probe.c
14383 -@@ -57,6 +57,7 @@
14384 - #include <xen/xen.h>
14385 - #include <xen/xenbus.h>
14386 - #include <xen/events.h>
14387 -+#include <xen/xen-ops.h>
14388 - #include <xen/page.h>
14389 -
14390 - #include <xen/hvm.h>
14391 -@@ -735,6 +736,30 @@ static int __init xenstored_local_init(void)
14392 - return err;
14393 - }
14394 -
14395 -+static int xenbus_resume_cb(struct notifier_block *nb,
14396 -+ unsigned long action, void *data)
14397 -+{
14398 -+ int err = 0;
14399 -+
14400 -+ if (xen_hvm_domain()) {
14401 -+ uint64_t v;
14402 -+
14403 -+ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
14404 -+ if (!err && v)
14405 -+ xen_store_evtchn = v;
14406 -+ else
14407 -+ pr_warn("Cannot update xenstore event channel: %d\n",
14408 -+ err);
14409 -+ } else
14410 -+ xen_store_evtchn = xen_start_info->store_evtchn;
14411 -+
14412 -+ return err;
14413 -+}
14414 -+
14415 -+static struct notifier_block xenbus_resume_nb = {
14416 -+ .notifier_call = xenbus_resume_cb,
14417 -+};
14418 -+
14419 - static int __init xenbus_init(void)
14420 - {
14421 - int err = 0;
14422 -@@ -793,6 +818,10 @@ static int __init xenbus_init(void)
14423 - goto out_error;
14424 - }
14425 -
14426 -+ if ((xen_store_domain_type != XS_LOCAL) &&
14427 -+ (xen_store_domain_type != XS_UNKNOWN))
14428 -+ xen_resume_notifier_register(&xenbus_resume_nb);
14429 -+
14430 - #ifdef CONFIG_XEN_COMPAT_XENFS
14431 - /*
14432 - * Create xenfs mountpoint in /proc for compatibility with
14433 -diff --git a/fs/coredump.c b/fs/coredump.c
14434 -index f319926ddf8c..bbbe139ab280 100644
14435 ---- a/fs/coredump.c
14436 -+++ b/fs/coredump.c
14437 -@@ -657,7 +657,7 @@ void do_coredump(const siginfo_t *siginfo)
14438 - */
14439 - if (!uid_eq(inode->i_uid, current_fsuid()))
14440 - goto close_fail;
14441 -- if (!cprm.file->f_op->write)
14442 -+ if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
14443 - goto close_fail;
14444 - if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
14445 - goto close_fail;
14446 -diff --git a/fs/namei.c b/fs/namei.c
14447 -index caa38a24e1f7..50a8583e8156 100644
14448 ---- a/fs/namei.c
14449 -+++ b/fs/namei.c
14450 -@@ -3228,7 +3228,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
14451 -
14452 - if (unlikely(file->f_flags & __O_TMPFILE)) {
14453 - error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
14454 -- goto out;
14455 -+ goto out2;
14456 - }
14457 -
14458 - error = path_init(dfd, pathname->name, flags, nd);
14459 -@@ -3258,6 +3258,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
14460 - }
14461 - out:
14462 - path_cleanup(nd);
14463 -+out2:
14464 - if (!(opened & FILE_OPENED)) {
14465 - BUG_ON(!error);
14466 - put_filp(file);
14467 -diff --git a/fs/namespace.c b/fs/namespace.c
14468 -index 4622ee32a5e2..38ed1e1bed41 100644
14469 ---- a/fs/namespace.c
14470 -+++ b/fs/namespace.c
14471 -@@ -3178,6 +3178,12 @@ bool fs_fully_visible(struct file_system_type *type)
14472 - if (mnt->mnt.mnt_sb->s_type != type)
14473 - continue;
14474 -
14475 -+ /* This mount is not fully visible if it's root directory
14476 -+ * is not the root directory of the filesystem.
14477 -+ */
14478 -+ if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
14479 -+ continue;
14480 -+
14481 - /* This mount is not fully visible if there are any child mounts
14482 - * that cover anything except for empty directories.
14483 - */
14484 -diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
14485 -index ecdbae19a766..090d8ce25bd1 100644
14486 ---- a/fs/nilfs2/btree.c
14487 -+++ b/fs/nilfs2/btree.c
14488 -@@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
14489 - nchildren = nilfs_btree_node_get_nchildren(node);
14490 -
14491 - if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
14492 -- level > NILFS_BTREE_LEVEL_MAX ||
14493 -+ level >= NILFS_BTREE_LEVEL_MAX ||
14494 - nchildren < 0 ||
14495 - nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
14496 - pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
14497 -diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
14498 -index a6944b25fd5b..fdf4b41d0609 100644
14499 ---- a/fs/ocfs2/dlm/dlmmaster.c
14500 -+++ b/fs/ocfs2/dlm/dlmmaster.c
14501 -@@ -757,6 +757,19 @@ lookup:
14502 - if (tmpres) {
14503 - spin_unlock(&dlm->spinlock);
14504 - spin_lock(&tmpres->spinlock);
14505 -+
14506 -+ /*
14507 -+ * Right after dlm spinlock was released, dlm_thread could have
14508 -+ * purged the lockres. Check if lockres got unhashed. If so
14509 -+ * start over.
14510 -+ */
14511 -+ if (hlist_unhashed(&tmpres->hash_node)) {
14512 -+ spin_unlock(&tmpres->spinlock);
14513 -+ dlm_lockres_put(tmpres);
14514 -+ tmpres = NULL;
14515 -+ goto lookup;
14516 -+ }
14517 -+
14518 - /* Wait on the thread that is mastering the resource */
14519 - if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
14520 - __dlm_wait_on_lockres(tmpres);
14521 -diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
14522 -index d56f5d722138..65aa4fa0ae4e 100644
14523 ---- a/include/acpi/acpixf.h
14524 -+++ b/include/acpi/acpixf.h
14525 -@@ -431,13 +431,13 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void))
14526 - ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
14527 -
14528 - ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
14529 -- acpi_find_root_pointer(acpi_size * rsdp_address))
14530 --
14531 -+ acpi_find_root_pointer(acpi_physical_address *
14532 -+ rsdp_address))
14533 - ACPI_EXTERNAL_RETURN_STATUS(acpi_status
14534 -- acpi_get_table_header(acpi_string signature,
14535 -- u32 instance,
14536 -- struct acpi_table_header
14537 -- *out_table_header))
14538 -+ acpi_get_table_header(acpi_string signature,
14539 -+ u32 instance,
14540 -+ struct acpi_table_header
14541 -+ *out_table_header))
14542 - ACPI_EXTERNAL_RETURN_STATUS(acpi_status
14543 - acpi_get_table(acpi_string signature, u32 instance,
14544 - struct acpi_table_header
14545 -diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
14546 -index ff3fea3194c6..9abb763e4b86 100644
14547 ---- a/include/linux/nilfs2_fs.h
14548 -+++ b/include/linux/nilfs2_fs.h
14549 -@@ -460,7 +460,7 @@ struct nilfs_btree_node {
14550 - /* level */
14551 - #define NILFS_BTREE_LEVEL_DATA 0
14552 - #define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
14553 --#define NILFS_BTREE_LEVEL_MAX 14
14554 -+#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
14555 -
14556 - /**
14557 - * struct nilfs_palloc_group_desc - block group descriptor
14558 -diff --git a/mm/memory-failure.c b/mm/memory-failure.c
14559 -index d487f8dc6d39..72a5224c8084 100644
14560 ---- a/mm/memory-failure.c
14561 -+++ b/mm/memory-failure.c
14562 -@@ -1141,10 +1141,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
14563 - * The check (unnecessarily) ignores LRU pages being isolated and
14564 - * walked by the page reclaim code, however that's not a big loss.
14565 - */
14566 -- if (!PageHuge(p) && !PageTransTail(p)) {
14567 -- if (!PageLRU(p))
14568 -- shake_page(p, 0);
14569 -- if (!PageLRU(p)) {
14570 -+ if (!PageHuge(p)) {
14571 -+ if (!PageLRU(hpage))
14572 -+ shake_page(hpage, 0);
14573 -+ if (!PageLRU(hpage)) {
14574 - /*
14575 - * shake_page could have turned it free.
14576 - */
14577 -@@ -1721,12 +1721,12 @@ int soft_offline_page(struct page *page, int flags)
14578 - } else if (ret == 0) { /* for free pages */
14579 - if (PageHuge(page)) {
14580 - set_page_hwpoison_huge_page(hpage);
14581 -- dequeue_hwpoisoned_huge_page(hpage);
14582 -- atomic_long_add(1 << compound_order(hpage),
14583 -+ if (!dequeue_hwpoisoned_huge_page(hpage))
14584 -+ atomic_long_add(1 << compound_order(hpage),
14585 - &num_poisoned_pages);
14586 - } else {
14587 -- SetPageHWPoison(page);
14588 -- atomic_long_inc(&num_poisoned_pages);
14589 -+ if (!TestSetPageHWPoison(page))
14590 -+ atomic_long_inc(&num_poisoned_pages);
14591 - }
14592 - }
14593 - unset_migratetype_isolate(page, MIGRATE_MOVABLE);
14594 -diff --git a/mm/page-writeback.c b/mm/page-writeback.c
14595 -index 644bcb665773..ad05f2f7bb65 100644
14596 ---- a/mm/page-writeback.c
14597 -+++ b/mm/page-writeback.c
14598 -@@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
14599 - long x;
14600 -
14601 - x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
14602 -- limit - setpoint + 1);
14603 -+ (limit - setpoint) | 1);
14604 - pos_ratio = x;
14605 - pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
14606 - pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
14607 -@@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
14608 - * scale global setpoint to bdi's:
14609 - * bdi_setpoint = setpoint * bdi_thresh / thresh
14610 - */
14611 -- x = div_u64((u64)bdi_thresh << 16, thresh + 1);
14612 -+ x = div_u64((u64)bdi_thresh << 16, thresh | 1);
14613 - bdi_setpoint = setpoint * (u64)x >> 16;
14614 - /*
14615 - * Use span=(8*write_bw) in single bdi case as indicated by
14616 -@@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
14617 -
14618 - if (bdi_dirty < x_intercept - span / 4) {
14619 - pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
14620 -- x_intercept - bdi_setpoint + 1);
14621 -+ (x_intercept - bdi_setpoint) | 1);
14622 - } else
14623 - pos_ratio /= 4;
14624 -
14625 -diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
14626 -index c0eea1dfe90f..f19da4b47c1d 100644
14627 ---- a/sound/oss/sequencer.c
14628 -+++ b/sound/oss/sequencer.c
14629 -@@ -681,13 +681,8 @@ static int seq_timing_event(unsigned char *event_rec)
14630 - break;
14631 -
14632 - case TMR_ECHO:
14633 -- if (seq_mode == SEQ_2)
14634 -- seq_copy_to_input(event_rec, 8);
14635 -- else
14636 -- {
14637 -- parm = (parm << 8 | SEQ_ECHO);
14638 -- seq_copy_to_input((unsigned char *) &parm, 4);
14639 -- }
14640 -+ parm = (parm << 8 | SEQ_ECHO);
14641 -+ seq_copy_to_input((unsigned char *) &parm, 4);
14642 - break;
14643 -
14644 - default:;
14645 -@@ -1324,7 +1319,6 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
14646 - int mode = translate_mode(file);
14647 - struct synth_info inf;
14648 - struct seq_event_rec event_rec;
14649 -- unsigned long flags;
14650 - int __user *p = arg;
14651 -
14652 - orig_dev = dev = dev >> 4;
14653 -@@ -1479,9 +1473,7 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
14654 - case SNDCTL_SEQ_OUTOFBAND:
14655 - if (copy_from_user(&event_rec, arg, sizeof(event_rec)))
14656 - return -EFAULT;
14657 -- spin_lock_irqsave(&lock,flags);
14658 - play_event(event_rec.arr);
14659 -- spin_unlock_irqrestore(&lock,flags);
14660 - return 0;
14661 -
14662 - case SNDCTL_MIDI_INFO:
14663
14664 diff --git a/1004_linux-4.0.5.patch b/1004_linux-4.0.5.patch
14665 deleted file mode 100644
14666 index 84509c0..0000000
14667 --- a/1004_linux-4.0.5.patch
14668 +++ /dev/null
14669 @@ -1,4937 +0,0 @@
14670 -diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401
14671 -index 8eb88e974055..711f75e189eb 100644
14672 ---- a/Documentation/hwmon/tmp401
14673 -+++ b/Documentation/hwmon/tmp401
14674 -@@ -20,7 +20,7 @@ Supported chips:
14675 - Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html
14676 - * Texas Instruments TMP435
14677 - Prefix: 'tmp435'
14678 -- Addresses scanned: I2C 0x37, 0x48 - 0x4f
14679 -+ Addresses scanned: I2C 0x48 - 0x4f
14680 - Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html
14681 -
14682 - Authors:
14683 -diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
14684 -index 1e52d67d0abf..dbe6623fed1c 100644
14685 ---- a/Documentation/serial/tty.txt
14686 -+++ b/Documentation/serial/tty.txt
14687 -@@ -198,6 +198,9 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write
14688 -
14689 - TTY_OTHER_CLOSED Device is a pty and the other side has closed.
14690 -
14691 -+TTY_OTHER_DONE Device is a pty and the other side has closed and
14692 -+ all pending input processing has been completed.
14693 -+
14694 - TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
14695 - smaller chunks.
14696 -
14697 -diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
14698 -index 53838d9c6295..c59bd9bc41ef 100644
14699 ---- a/Documentation/virtual/kvm/mmu.txt
14700 -+++ b/Documentation/virtual/kvm/mmu.txt
14701 -@@ -169,6 +169,10 @@ Shadow pages contain the following information:
14702 - Contains the value of cr4.smep && !cr0.wp for which the page is valid
14703 - (pages for which this is true are different from other pages; see the
14704 - treatment of cr0.wp=0 below).
14705 -+ role.smap_andnot_wp:
14706 -+ Contains the value of cr4.smap && !cr0.wp for which the page is valid
14707 -+ (pages for which this is true are different from other pages; see the
14708 -+ treatment of cr0.wp=0 below).
14709 - gfn:
14710 - Either the guest page table containing the translations shadowed by this
14711 - page, or the base page frame for linear translations. See role.direct.
14712 -@@ -344,10 +348,16 @@ on fault type:
14713 -
14714 - (user write faults generate a #PF)
14715 -
14716 --In the first case there is an additional complication if CR4.SMEP is
14717 --enabled: since we've turned the page into a kernel page, the kernel may now
14718 --execute it. We handle this by also setting spte.nx. If we get a user
14719 --fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back.
14720 -+In the first case there are two additional complications:
14721 -+- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
14722 -+ the kernel may now execute it. We handle this by also setting spte.nx.
14723 -+ If we get a user fetch or read fault, we'll change spte.u=1 and
14724 -+ spte.nx=gpte.nx back.
14725 -+- if CR4.SMAP is disabled: since the page has been changed to a kernel
14726 -+ page, it can not be reused when CR4.SMAP is enabled. We set
14727 -+ CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
14728 -+ here we do not care the case that CR4.SMAP is enabled since KVM will
14729 -+ directly inject #PF to guest due to failed permission check.
14730 -
14731 - To prevent an spte that was converted into a kernel page with cr0.wp=0
14732 - from being written by the kernel after cr0.wp has changed to 1, we make
14733 -diff --git a/Makefile b/Makefile
14734 -index 3d16bcc87585..1880cf77059b 100644
14735 ---- a/Makefile
14736 -+++ b/Makefile
14737 -@@ -1,6 +1,6 @@
14738 - VERSION = 4
14739 - PATCHLEVEL = 0
14740 --SUBLEVEL = 4
14741 -+SUBLEVEL = 5
14742 - EXTRAVERSION =
14743 - NAME = Hurr durr I'ma sheep
14744 -
14745 -diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
14746 -index 067551b6920a..9917a45fc430 100644
14747 ---- a/arch/arc/include/asm/atomic.h
14748 -+++ b/arch/arc/include/asm/atomic.h
14749 -@@ -99,7 +99,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
14750 - atomic_ops_unlock(flags); \
14751 - }
14752 -
14753 --#define ATOMIC_OP_RETURN(op, c_op) \
14754 -+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
14755 - static inline int atomic_##op##_return(int i, atomic_t *v) \
14756 - { \
14757 - unsigned long flags; \
14758 -diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
14759 -index a1c776b8dcec..992ea0b063d5 100644
14760 ---- a/arch/arm/boot/dts/Makefile
14761 -+++ b/arch/arm/boot/dts/Makefile
14762 -@@ -215,7 +215,7 @@ dtb-$(CONFIG_SOC_IMX25) += \
14763 - imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \
14764 - imx25-karo-tx25.dtb \
14765 - imx25-pdk.dtb
14766 --dtb-$(CONFIG_SOC_IMX31) += \
14767 -+dtb-$(CONFIG_SOC_IMX27) += \
14768 - imx27-apf27.dtb \
14769 - imx27-apf27dev.dtb \
14770 - imx27-eukrea-mbimxsd27-baseboard.dtb \
14771 -diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
14772 -index 173ffa479ad3..792394dd0f2a 100644
14773 ---- a/arch/arm/boot/dts/exynos4412-trats2.dts
14774 -+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
14775 -@@ -736,7 +736,7 @@
14776 -
14777 - display-timings {
14778 - timing-0 {
14779 -- clock-frequency = <0>;
14780 -+ clock-frequency = <57153600>;
14781 - hactive = <720>;
14782 - vactive = <1280>;
14783 - hfront-porch = <5>;
14784 -diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
14785 -index 4b063b68db44..9ce1d2128749 100644
14786 ---- a/arch/arm/boot/dts/imx27.dtsi
14787 -+++ b/arch/arm/boot/dts/imx27.dtsi
14788 -@@ -531,7 +531,7 @@
14789 -
14790 - fec: ethernet@1002b000 {
14791 - compatible = "fsl,imx27-fec";
14792 -- reg = <0x1002b000 0x4000>;
14793 -+ reg = <0x1002b000 0x1000>;
14794 - interrupts = <50>;
14795 - clocks = <&clks IMX27_CLK_FEC_IPG_GATE>,
14796 - <&clks IMX27_CLK_FEC_AHB_GATE>;
14797 -diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
14798 -index f8ccc21fa032..4e7f40c577e6 100644
14799 ---- a/arch/arm/kernel/entry-common.S
14800 -+++ b/arch/arm/kernel/entry-common.S
14801 -@@ -33,7 +33,9 @@ ret_fast_syscall:
14802 - UNWIND(.fnstart )
14803 - UNWIND(.cantunwind )
14804 - disable_irq @ disable interrupts
14805 -- ldr r1, [tsk, #TI_FLAGS]
14806 -+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
14807 -+ tst r1, #_TIF_SYSCALL_WORK
14808 -+ bne __sys_trace_return
14809 - tst r1, #_TIF_WORK_MASK
14810 - bne fast_work_pending
14811 - asm_trace_hardirqs_on
14812 -diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
14813 -index 37266a826437..1f02bcb350e5 100644
14814 ---- a/arch/arm/mach-exynos/pm_domains.c
14815 -+++ b/arch/arm/mach-exynos/pm_domains.c
14816 -@@ -169,7 +169,7 @@ no_clk:
14817 - args.np = np;
14818 - args.args_count = 0;
14819 - child_domain = of_genpd_get_from_provider(&args);
14820 -- if (!child_domain)
14821 -+ if (IS_ERR(child_domain))
14822 - continue;
14823 -
14824 - if (of_parse_phandle_with_args(np, "power-domains",
14825 -@@ -177,7 +177,7 @@ no_clk:
14826 - continue;
14827 -
14828 - parent_domain = of_genpd_get_from_provider(&args);
14829 -- if (!parent_domain)
14830 -+ if (IS_ERR(parent_domain))
14831 - continue;
14832 -
14833 - if (pm_genpd_add_subdomain(parent_domain, child_domain))
14834 -diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S
14835 -index 31d25834b9c4..cf950790fbdc 100644
14836 ---- a/arch/arm/mach-exynos/sleep.S
14837 -+++ b/arch/arm/mach-exynos/sleep.S
14838 -@@ -23,14 +23,7 @@
14839 - #define CPU_MASK 0xff0ffff0
14840 - #define CPU_CORTEX_A9 0x410fc090
14841 -
14842 -- /*
14843 -- * The following code is located into the .data section. This is to
14844 -- * allow l2x0_regs_phys to be accessed with a relative load while we
14845 -- * can't rely on any MMU translation. We could have put l2x0_regs_phys
14846 -- * in the .text section as well, but some setups might insist on it to
14847 -- * be truly read-only. (Reference from: arch/arm/kernel/sleep.S)
14848 -- */
14849 -- .data
14850 -+ .text
14851 - .align
14852 -
14853 - /*
14854 -@@ -69,10 +62,12 @@ ENTRY(exynos_cpu_resume_ns)
14855 - cmp r0, r1
14856 - bne skip_cp15
14857 -
14858 -- adr r0, cp15_save_power
14859 -+ adr r0, _cp15_save_power
14860 - ldr r1, [r0]
14861 -- adr r0, cp15_save_diag
14862 -+ ldr r1, [r0, r1]
14863 -+ adr r0, _cp15_save_diag
14864 - ldr r2, [r0]
14865 -+ ldr r2, [r0, r2]
14866 - mov r0, #SMC_CMD_C15RESUME
14867 - dsb
14868 - smc #0
14869 -@@ -118,14 +113,20 @@ skip_l2x0:
14870 - skip_cp15:
14871 - b cpu_resume
14872 - ENDPROC(exynos_cpu_resume_ns)
14873 -+
14874 -+ .align
14875 -+_cp15_save_power:
14876 -+ .long cp15_save_power - .
14877 -+_cp15_save_diag:
14878 -+ .long cp15_save_diag - .
14879 -+#ifdef CONFIG_CACHE_L2X0
14880 -+1: .long l2x0_saved_regs - .
14881 -+#endif /* CONFIG_CACHE_L2X0 */
14882 -+
14883 -+ .data
14884 - .globl cp15_save_diag
14885 - cp15_save_diag:
14886 - .long 0 @ cp15 diagnostic
14887 - .globl cp15_save_power
14888 - cp15_save_power:
14889 - .long 0 @ cp15 power control
14890 --
14891 --#ifdef CONFIG_CACHE_L2X0
14892 -- .align
14893 --1: .long l2x0_saved_regs - .
14894 --#endif /* CONFIG_CACHE_L2X0 */
14895 -diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
14896 -index 4e6ef896c619..7186382672b5 100644
14897 ---- a/arch/arm/mm/mmu.c
14898 -+++ b/arch/arm/mm/mmu.c
14899 -@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
14900 - }
14901 -
14902 - /*
14903 -- * Find the first non-section-aligned page, and point
14904 -+ * Find the first non-pmd-aligned page, and point
14905 - * memblock_limit at it. This relies on rounding the
14906 -- * limit down to be section-aligned, which happens at
14907 -- * the end of this function.
14908 -+ * limit down to be pmd-aligned, which happens at the
14909 -+ * end of this function.
14910 - *
14911 - * With this algorithm, the start or end of almost any
14912 -- * bank can be non-section-aligned. The only exception
14913 -- * is that the start of the bank 0 must be section-
14914 -+ * bank can be non-pmd-aligned. The only exception is
14915 -+ * that the start of the bank 0 must be section-
14916 - * aligned, since otherwise memory would need to be
14917 - * allocated when mapping the start of bank 0, which
14918 - * occurs before any free memory is mapped.
14919 - */
14920 - if (!memblock_limit) {
14921 -- if (!IS_ALIGNED(block_start, SECTION_SIZE))
14922 -+ if (!IS_ALIGNED(block_start, PMD_SIZE))
14923 - memblock_limit = block_start;
14924 -- else if (!IS_ALIGNED(block_end, SECTION_SIZE))
14925 -+ else if (!IS_ALIGNED(block_end, PMD_SIZE))
14926 - memblock_limit = arm_lowmem_limit;
14927 - }
14928 -
14929 -@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
14930 - high_memory = __va(arm_lowmem_limit - 1) + 1;
14931 -
14932 - /*
14933 -- * Round the memblock limit down to a section size. This
14934 -+ * Round the memblock limit down to a pmd size. This
14935 - * helps to ensure that we will allocate memory from the
14936 -- * last full section, which should be mapped.
14937 -+ * last full pmd, which should be mapped.
14938 - */
14939 - if (memblock_limit)
14940 -- memblock_limit = round_down(memblock_limit, SECTION_SIZE);
14941 -+ memblock_limit = round_down(memblock_limit, PMD_SIZE);
14942 - if (!memblock_limit)
14943 - memblock_limit = arm_lowmem_limit;
14944 -
14945 -diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
14946 -index edba042b2325..dc6a4842683a 100644
14947 ---- a/arch/arm64/net/bpf_jit_comp.c
14948 -+++ b/arch/arm64/net/bpf_jit_comp.c
14949 -@@ -487,7 +487,7 @@ emit_cond_jmp:
14950 - return -EINVAL;
14951 - }
14952 -
14953 -- imm64 = (u64)insn1.imm << 32 | imm;
14954 -+ imm64 = (u64)insn1.imm << 32 | (u32)imm;
14955 - emit_a64_mov_i64(dst, imm64, ctx);
14956 -
14957 - return 1;
14958 -diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
14959 -index d2c09f6475c5..f20cedcb50f1 100644
14960 ---- a/arch/mips/kernel/elf.c
14961 -+++ b/arch/mips/kernel/elf.c
14962 -@@ -76,14 +76,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
14963 -
14964 - /* Lets see if this is an O32 ELF */
14965 - if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
14966 -- /* FR = 1 for N32 */
14967 -- if (ehdr32->e_flags & EF_MIPS_ABI2)
14968 -- state->overall_fp_mode = FP_FR1;
14969 -- else
14970 -- /* Set a good default FPU mode for O32 */
14971 -- state->overall_fp_mode = cpu_has_mips_r6 ?
14972 -- FP_FRE : FP_FR0;
14973 --
14974 - if (ehdr32->e_flags & EF_MIPS_FP64) {
14975 - /*
14976 - * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
14977 -@@ -104,9 +96,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
14978 - (char *)&abiflags,
14979 - sizeof(abiflags));
14980 - } else {
14981 -- /* FR=1 is really the only option for 64-bit */
14982 -- state->overall_fp_mode = FP_FR1;
14983 --
14984 - if (phdr64->p_type != PT_MIPS_ABIFLAGS)
14985 - return 0;
14986 - if (phdr64->p_filesz < sizeof(abiflags))
14987 -@@ -147,6 +136,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
14988 - struct elf32_hdr *ehdr = _ehdr;
14989 - struct mode_req prog_req, interp_req;
14990 - int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
14991 -+ bool is_mips64;
14992 -
14993 - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
14994 - return 0;
14995 -@@ -162,10 +152,22 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
14996 - abi0 = abi1 = fp_abi;
14997 - }
14998 -
14999 -- /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
15000 -- max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
15001 -- (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
15002 -- MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
15003 -+ is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ||
15004 -+ (ehdr->e_flags & EF_MIPS_ABI2);
15005 -+
15006 -+ if (is_mips64) {
15007 -+ /* MIPS64 code always uses FR=1, thus the default is easy */
15008 -+ state->overall_fp_mode = FP_FR1;
15009 -+
15010 -+ /* Disallow access to the various FPXX & FP64 ABIs */
15011 -+ max_abi = MIPS_ABI_FP_SOFT;
15012 -+ } else {
15013 -+ /* Default to a mode capable of running code expecting FR=0 */
15014 -+ state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
15015 -+
15016 -+ /* Allow all ABIs we know about */
15017 -+ max_abi = MIPS_ABI_FP_64A;
15018 -+ }
15019 -
15020 - if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
15021 - (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
15022 -diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
15023 -index 3391d061eccc..78c9fd32c554 100644
15024 ---- a/arch/parisc/include/asm/elf.h
15025 -+++ b/arch/parisc/include/asm/elf.h
15026 -@@ -348,6 +348,10 @@ struct pt_regs; /* forward declaration... */
15027 -
15028 - #define ELF_HWCAP 0
15029 -
15030 -+#define STACK_RND_MASK (is_32bit_task() ? \
15031 -+ 0x7ff >> (PAGE_SHIFT - 12) : \
15032 -+ 0x3ffff >> (PAGE_SHIFT - 12))
15033 -+
15034 - struct mm_struct;
15035 - extern unsigned long arch_randomize_brk(struct mm_struct *);
15036 - #define arch_randomize_brk arch_randomize_brk
15037 -diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
15038 -index e1ffea2f9a0b..5aba01ac457f 100644
15039 ---- a/arch/parisc/kernel/sys_parisc.c
15040 -+++ b/arch/parisc/kernel/sys_parisc.c
15041 -@@ -77,6 +77,9 @@ static unsigned long mmap_upper_limit(void)
15042 - if (stack_base > STACK_SIZE_MAX)
15043 - stack_base = STACK_SIZE_MAX;
15044 -
15045 -+ /* Add space for stack randomization. */
15046 -+ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
15047 -+
15048 - return PAGE_ALIGN(STACK_TOP - stack_base);
15049 - }
15050 -
15051 -diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
15052 -index 15c99b649b04..b2eb4686bd8f 100644
15053 ---- a/arch/powerpc/kernel/mce.c
15054 -+++ b/arch/powerpc/kernel/mce.c
15055 -@@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
15056 - uint64_t nip, uint64_t addr)
15057 - {
15058 - uint64_t srr1;
15059 -- int index = __this_cpu_inc_return(mce_nest_count);
15060 -+ int index = __this_cpu_inc_return(mce_nest_count) - 1;
15061 - struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
15062 -
15063 - /*
15064 -@@ -184,7 +184,7 @@ void machine_check_queue_event(void)
15065 - if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
15066 - return;
15067 -
15068 -- index = __this_cpu_inc_return(mce_queue_count);
15069 -+ index = __this_cpu_inc_return(mce_queue_count) - 1;
15070 - /* If queue is full, just return for now. */
15071 - if (index >= MAX_MC_EVT) {
15072 - __this_cpu_dec(mce_queue_count);
15073 -diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
15074 -index f096e72262f4..1db685104ffc 100644
15075 ---- a/arch/powerpc/kernel/vmlinux.lds.S
15076 -+++ b/arch/powerpc/kernel/vmlinux.lds.S
15077 -@@ -213,6 +213,7 @@ SECTIONS
15078 - *(.opd)
15079 - }
15080 -
15081 -+ . = ALIGN(256);
15082 - .got : AT(ADDR(.got) - LOAD_OFFSET) {
15083 - __toc_start = .;
15084 - #ifndef CONFIG_RELOCATABLE
15085 -diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
15086 -index 7940dc90e80b..b258110da952 100644
15087 ---- a/arch/s390/crypto/ghash_s390.c
15088 -+++ b/arch/s390/crypto/ghash_s390.c
15089 -@@ -16,11 +16,12 @@
15090 - #define GHASH_DIGEST_SIZE 16
15091 -
15092 - struct ghash_ctx {
15093 -- u8 icv[16];
15094 -- u8 key[16];
15095 -+ u8 key[GHASH_BLOCK_SIZE];
15096 - };
15097 -
15098 - struct ghash_desc_ctx {
15099 -+ u8 icv[GHASH_BLOCK_SIZE];
15100 -+ u8 key[GHASH_BLOCK_SIZE];
15101 - u8 buffer[GHASH_BLOCK_SIZE];
15102 - u32 bytes;
15103 - };
15104 -@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
15105 - static int ghash_init(struct shash_desc *desc)
15106 - {
15107 - struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
15108 -+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
15109 -
15110 - memset(dctx, 0, sizeof(*dctx));
15111 -+ memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
15112 -
15113 - return 0;
15114 - }
15115 -@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
15116 - }
15117 -
15118 - memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
15119 -- memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
15120 -
15121 - return 0;
15122 - }
15123 -@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
15124 - const u8 *src, unsigned int srclen)
15125 - {
15126 - struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
15127 -- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
15128 - unsigned int n;
15129 - u8 *buf = dctx->buffer;
15130 - int ret;
15131 -@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
15132 - src += n;
15133 -
15134 - if (!dctx->bytes) {
15135 -- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
15136 -+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
15137 - GHASH_BLOCK_SIZE);
15138 - if (ret != GHASH_BLOCK_SIZE)
15139 - return -EIO;
15140 -@@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
15141 -
15142 - n = srclen & ~(GHASH_BLOCK_SIZE - 1);
15143 - if (n) {
15144 -- ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
15145 -+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
15146 - if (ret != n)
15147 - return -EIO;
15148 - src += n;
15149 -@@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
15150 - return 0;
15151 - }
15152 -
15153 --static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
15154 -+static int ghash_flush(struct ghash_desc_ctx *dctx)
15155 - {
15156 - u8 *buf = dctx->buffer;
15157 - int ret;
15158 -@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
15159 -
15160 - memset(pos, 0, dctx->bytes);
15161 -
15162 -- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
15163 -+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
15164 - if (ret != GHASH_BLOCK_SIZE)
15165 - return -EIO;
15166 -+
15167 -+ dctx->bytes = 0;
15168 - }
15169 -
15170 -- dctx->bytes = 0;
15171 - return 0;
15172 - }
15173 -
15174 - static int ghash_final(struct shash_desc *desc, u8 *dst)
15175 - {
15176 - struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
15177 -- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
15178 - int ret;
15179 -
15180 -- ret = ghash_flush(ctx, dctx);
15181 -+ ret = ghash_flush(dctx);
15182 - if (!ret)
15183 -- memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
15184 -+ memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
15185 - return ret;
15186 - }
15187 -
15188 -diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
15189 -index e08ec38f8c6e..e10112da008d 100644
15190 ---- a/arch/s390/include/asm/pgtable.h
15191 -+++ b/arch/s390/include/asm/pgtable.h
15192 -@@ -600,7 +600,7 @@ static inline int pmd_large(pmd_t pmd)
15193 - return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
15194 - }
15195 -
15196 --static inline int pmd_pfn(pmd_t pmd)
15197 -+static inline unsigned long pmd_pfn(pmd_t pmd)
15198 - {
15199 - unsigned long origin_mask;
15200 -
15201 -diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
15202 -index a236e39cc385..1c0fb570b5c2 100644
15203 ---- a/arch/x86/include/asm/kvm_host.h
15204 -+++ b/arch/x86/include/asm/kvm_host.h
15205 -@@ -212,6 +212,7 @@ union kvm_mmu_page_role {
15206 - unsigned nxe:1;
15207 - unsigned cr0_wp:1;
15208 - unsigned smep_andnot_wp:1;
15209 -+ unsigned smap_andnot_wp:1;
15210 - };
15211 - };
15212 -
15213 -@@ -404,6 +405,7 @@ struct kvm_vcpu_arch {
15214 - struct kvm_mmu_memory_cache mmu_page_header_cache;
15215 -
15216 - struct fpu guest_fpu;
15217 -+ bool eager_fpu;
15218 - u64 xcr0;
15219 - u64 guest_supported_xcr0;
15220 - u32 guest_xstate_size;
15221 -@@ -735,6 +737,7 @@ struct kvm_x86_ops {
15222 - void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
15223 - unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
15224 - void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
15225 -+ void (*fpu_activate)(struct kvm_vcpu *vcpu);
15226 - void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
15227 -
15228 - void (*tlb_flush)(struct kvm_vcpu *vcpu);
15229 -diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
15230 -index 3c036cb4a370..11dd8f23fcea 100644
15231 ---- a/arch/x86/kernel/cpu/mcheck/mce.c
15232 -+++ b/arch/x86/kernel/cpu/mcheck/mce.c
15233 -@@ -705,6 +705,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
15234 - struct pt_regs *regs)
15235 - {
15236 - int i, ret = 0;
15237 -+ char *tmp;
15238 -
15239 - for (i = 0; i < mca_cfg.banks; i++) {
15240 - m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
15241 -@@ -713,9 +714,11 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
15242 - if (quirk_no_way_out)
15243 - quirk_no_way_out(i, m, regs);
15244 - }
15245 -- if (mce_severity(m, mca_cfg.tolerant, msg, true) >=
15246 -- MCE_PANIC_SEVERITY)
15247 -+
15248 -+ if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
15249 -+ *msg = tmp;
15250 - ret = 1;
15251 -+ }
15252 - }
15253 - return ret;
15254 - }
15255 -diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
15256 -index c4bb8b8e5017..76d8cbe5a10f 100644
15257 ---- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
15258 -+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
15259 -@@ -680,6 +680,7 @@ static int __init rapl_pmu_init(void)
15260 - break;
15261 - case 60: /* Haswell */
15262 - case 69: /* Haswell-Celeron */
15263 -+ case 61: /* Broadwell */
15264 - rapl_cntr_mask = RAPL_IDX_HSW;
15265 - rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
15266 - break;
15267 -diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
15268 -index d5651fce0b71..f341d56b7883 100644
15269 ---- a/arch/x86/kernel/i387.c
15270 -+++ b/arch/x86/kernel/i387.c
15271 -@@ -169,6 +169,21 @@ static void init_thread_xstate(void)
15272 - xstate_size = sizeof(struct i387_fxsave_struct);
15273 - else
15274 - xstate_size = sizeof(struct i387_fsave_struct);
15275 -+
15276 -+ /*
15277 -+ * Quirk: we don't yet handle the XSAVES* instructions
15278 -+ * correctly, as we don't correctly convert between
15279 -+ * standard and compacted format when interfacing
15280 -+ * with user-space - so disable it for now.
15281 -+ *
15282 -+ * The difference is small: with recent CPUs the
15283 -+ * compacted format is only marginally smaller than
15284 -+ * the standard FPU state format.
15285 -+ *
15286 -+ * ( This is easy to backport while we are fixing
15287 -+ * XSAVES* support. )
15288 -+ */
15289 -+ setup_clear_cpu_cap(X86_FEATURE_XSAVES);
15290 - }
15291 -
15292 - /*
15293 -diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
15294 -index 8a80737ee6e6..307f9ec28e08 100644
15295 ---- a/arch/x86/kvm/cpuid.c
15296 -+++ b/arch/x86/kvm/cpuid.c
15297 -@@ -16,6 +16,8 @@
15298 - #include <linux/module.h>
15299 - #include <linux/vmalloc.h>
15300 - #include <linux/uaccess.h>
15301 -+#include <asm/i387.h> /* For use_eager_fpu. Ugh! */
15302 -+#include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */
15303 - #include <asm/user.h>
15304 - #include <asm/xsave.h>
15305 - #include "cpuid.h"
15306 -@@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
15307 - if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
15308 - best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
15309 -
15310 -+ vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
15311 -+
15312 - /*
15313 - * The existing code assumes virtual address is 48-bit in the canonical
15314 - * address checks; exit if it is ever changed.
15315 -diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
15316 -index 4452eedfaedd..9bec2b8cdced 100644
15317 ---- a/arch/x86/kvm/cpuid.h
15318 -+++ b/arch/x86/kvm/cpuid.h
15319 -@@ -111,4 +111,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
15320 - best = kvm_find_cpuid_entry(vcpu, 7, 0);
15321 - return best && (best->ebx & bit(X86_FEATURE_RTM));
15322 - }
15323 -+
15324 -+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
15325 -+{
15326 -+ struct kvm_cpuid_entry2 *best;
15327 -+
15328 -+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
15329 -+ return best && (best->ebx & bit(X86_FEATURE_MPX));
15330 -+}
15331 - #endif
15332 -diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
15333 -index cee759299a35..88ee9282a57e 100644
15334 ---- a/arch/x86/kvm/mmu.c
15335 -+++ b/arch/x86/kvm/mmu.c
15336 -@@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
15337 - }
15338 - }
15339 -
15340 --void update_permission_bitmask(struct kvm_vcpu *vcpu,
15341 -- struct kvm_mmu *mmu, bool ept)
15342 -+static void update_permission_bitmask(struct kvm_vcpu *vcpu,
15343 -+ struct kvm_mmu *mmu, bool ept)
15344 - {
15345 - unsigned bit, byte, pfec;
15346 - u8 map;
15347 -@@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
15348 - void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
15349 - {
15350 - bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
15351 -+ bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
15352 - struct kvm_mmu *context = &vcpu->arch.mmu;
15353 -
15354 - MMU_WARN_ON(VALID_PAGE(context->root_hpa));
15355 -@@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
15356 - context->base_role.cr0_wp = is_write_protection(vcpu);
15357 - context->base_role.smep_andnot_wp
15358 - = smep && !is_write_protection(vcpu);
15359 -+ context->base_role.smap_andnot_wp
15360 -+ = smap && !is_write_protection(vcpu);
15361 - }
15362 - EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
15363 -
15364 -@@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
15365 - const u8 *new, int bytes)
15366 - {
15367 - gfn_t gfn = gpa >> PAGE_SHIFT;
15368 -- union kvm_mmu_page_role mask = { .word = 0 };
15369 - struct kvm_mmu_page *sp;
15370 - LIST_HEAD(invalid_list);
15371 - u64 entry, gentry, *spte;
15372 - int npte;
15373 - bool remote_flush, local_flush, zap_page;
15374 -+ union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
15375 -+ .cr0_wp = 1,
15376 -+ .cr4_pae = 1,
15377 -+ .nxe = 1,
15378 -+ .smep_andnot_wp = 1,
15379 -+ .smap_andnot_wp = 1,
15380 -+ };
15381 -
15382 - /*
15383 - * If we don't have indirect shadow pages, it means no page is
15384 -@@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
15385 - ++vcpu->kvm->stat.mmu_pte_write;
15386 - kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
15387 -
15388 -- mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
15389 - for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
15390 - if (detect_write_misaligned(sp, gpa, bytes) ||
15391 - detect_write_flooding(sp)) {
15392 -diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
15393 -index c7d65637c851..0ada65ecddcf 100644
15394 ---- a/arch/x86/kvm/mmu.h
15395 -+++ b/arch/x86/kvm/mmu.h
15396 -@@ -71,8 +71,6 @@ enum {
15397 - int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
15398 - void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
15399 - void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
15400 --void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
15401 -- bool ept);
15402 -
15403 - static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
15404 - {
15405 -@@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
15406 - int index = (pfec >> 1) +
15407 - (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
15408 -
15409 -+ WARN_ON(pfec & PFERR_RSVD_MASK);
15410 -+
15411 - return (mmu->permissions[index] >> pte_access) & 1;
15412 - }
15413 -
15414 -diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
15415 -index fd49c867b25a..6e6d115fe9b5 100644
15416 ---- a/arch/x86/kvm/paging_tmpl.h
15417 -+++ b/arch/x86/kvm/paging_tmpl.h
15418 -@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
15419 - mmu_is_nested(vcpu));
15420 - if (likely(r != RET_MMIO_PF_INVALID))
15421 - return r;
15422 -+
15423 -+ /*
15424 -+ * page fault with PFEC.RSVD = 1 is caused by shadow
15425 -+ * page fault, should not be used to walk guest page
15426 -+ * table.
15427 -+ */
15428 -+ error_code &= ~PFERR_RSVD_MASK;
15429 - };
15430 -
15431 - r = mmu_topup_memory_caches(vcpu);
15432 -diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
15433 -index cc618c882f90..a4e62fcfabcb 100644
15434 ---- a/arch/x86/kvm/svm.c
15435 -+++ b/arch/x86/kvm/svm.c
15436 -@@ -4374,6 +4374,7 @@ static struct kvm_x86_ops svm_x86_ops = {
15437 - .cache_reg = svm_cache_reg,
15438 - .get_rflags = svm_get_rflags,
15439 - .set_rflags = svm_set_rflags,
15440 -+ .fpu_activate = svm_fpu_activate,
15441 - .fpu_deactivate = svm_fpu_deactivate,
15442 -
15443 - .tlb_flush = svm_flush_tlb,
15444 -diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
15445 -index a60bd3aa0965..5318d64674b0 100644
15446 ---- a/arch/x86/kvm/vmx.c
15447 -+++ b/arch/x86/kvm/vmx.c
15448 -@@ -10179,6 +10179,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
15449 - .cache_reg = vmx_cache_reg,
15450 - .get_rflags = vmx_get_rflags,
15451 - .set_rflags = vmx_set_rflags,
15452 -+ .fpu_activate = vmx_fpu_activate,
15453 - .fpu_deactivate = vmx_fpu_deactivate,
15454 -
15455 - .tlb_flush = vmx_flush_tlb,
15456 -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
15457 -index e222ba5d2beb..8838057da9c3 100644
15458 ---- a/arch/x86/kvm/x86.c
15459 -+++ b/arch/x86/kvm/x86.c
15460 -@@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
15461 - int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
15462 - {
15463 - unsigned long old_cr4 = kvm_read_cr4(vcpu);
15464 -- unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
15465 -- X86_CR4_PAE | X86_CR4_SMEP;
15466 -+ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
15467 -+ X86_CR4_SMEP | X86_CR4_SMAP;
15468 -+
15469 - if (cr4 & CR4_RESERVED_BITS)
15470 - return 1;
15471 -
15472 -@@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
15473 - (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
15474 - kvm_mmu_reset_context(vcpu);
15475 -
15476 -- if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
15477 -- update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
15478 --
15479 - if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
15480 - kvm_update_cpuid(vcpu);
15481 -
15482 -@@ -6141,6 +6139,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
15483 - return;
15484 -
15485 - page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
15486 -+ if (is_error_page(page))
15487 -+ return;
15488 - kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
15489 -
15490 - /*
15491 -@@ -6996,7 +6996,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
15492 - fpu_save_init(&vcpu->arch.guest_fpu);
15493 - __kernel_fpu_end();
15494 - ++vcpu->stat.fpu_reload;
15495 -- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
15496 -+ if (!vcpu->arch.eager_fpu)
15497 -+ kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
15498 -+
15499 - trace_kvm_fpu(0);
15500 - }
15501 -
15502 -@@ -7012,11 +7014,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
15503 - struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
15504 - unsigned int id)
15505 - {
15506 -+ struct kvm_vcpu *vcpu;
15507 -+
15508 - if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
15509 - printk_once(KERN_WARNING
15510 - "kvm: SMP vm created on host with unstable TSC; "
15511 - "guest TSC will not be reliable\n");
15512 -- return kvm_x86_ops->vcpu_create(kvm, id);
15513 -+
15514 -+ vcpu = kvm_x86_ops->vcpu_create(kvm, id);
15515 -+
15516 -+ /*
15517 -+ * Activate fpu unconditionally in case the guest needs eager FPU. It will be
15518 -+ * deactivated soon if it doesn't.
15519 -+ */
15520 -+ kvm_x86_ops->fpu_activate(vcpu);
15521 -+ return vcpu;
15522 - }
15523 -
15524 - int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
15525 -diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
15526 -index f9eeae871593..5aa1f6e281d2 100644
15527 ---- a/drivers/acpi/osl.c
15528 -+++ b/drivers/acpi/osl.c
15529 -@@ -182,7 +182,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
15530 - request_mem_region(addr, length, desc);
15531 - }
15532 -
15533 --static int __init acpi_reserve_resources(void)
15534 -+static void __init acpi_reserve_resources(void)
15535 - {
15536 - acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
15537 - "ACPI PM1a_EVT_BLK");
15538 -@@ -211,10 +211,7 @@ static int __init acpi_reserve_resources(void)
15539 - if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
15540 - acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
15541 - acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
15542 --
15543 -- return 0;
15544 - }
15545 --device_initcall(acpi_reserve_resources);
15546 -
15547 - void acpi_os_printf(const char *fmt, ...)
15548 - {
15549 -@@ -1845,6 +1842,7 @@ acpi_status __init acpi_os_initialize(void)
15550 -
15551 - acpi_status __init acpi_os_initialize1(void)
15552 - {
15553 -+ acpi_reserve_resources();
15554 - kacpid_wq = alloc_workqueue("kacpid", 0, 1);
15555 - kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
15556 - kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
15557 -diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
15558 -index 33bb06e006c9..adce56fa9cef 100644
15559 ---- a/drivers/ata/ahci.c
15560 -+++ b/drivers/ata/ahci.c
15561 -@@ -66,6 +66,7 @@ enum board_ids {
15562 - board_ahci_yes_fbs,
15563 -
15564 - /* board IDs for specific chipsets in alphabetical order */
15565 -+ board_ahci_avn,
15566 - board_ahci_mcp65,
15567 - board_ahci_mcp77,
15568 - board_ahci_mcp89,
15569 -@@ -84,6 +85,8 @@ enum board_ids {
15570 - static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
15571 - static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
15572 - unsigned long deadline);
15573 -+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
15574 -+ unsigned long deadline);
15575 - static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
15576 - static bool is_mcp89_apple(struct pci_dev *pdev);
15577 - static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
15578 -@@ -107,6 +110,11 @@ static struct ata_port_operations ahci_p5wdh_ops = {
15579 - .hardreset = ahci_p5wdh_hardreset,
15580 - };
15581 -
15582 -+static struct ata_port_operations ahci_avn_ops = {
15583 -+ .inherits = &ahci_ops,
15584 -+ .hardreset = ahci_avn_hardreset,
15585 -+};
15586 -+
15587 - static const struct ata_port_info ahci_port_info[] = {
15588 - /* by features */
15589 - [board_ahci] = {
15590 -@@ -151,6 +159,12 @@ static const struct ata_port_info ahci_port_info[] = {
15591 - .port_ops = &ahci_ops,
15592 - },
15593 - /* by chipsets */
15594 -+ [board_ahci_avn] = {
15595 -+ .flags = AHCI_FLAG_COMMON,
15596 -+ .pio_mask = ATA_PIO4,
15597 -+ .udma_mask = ATA_UDMA6,
15598 -+ .port_ops = &ahci_avn_ops,
15599 -+ },
15600 - [board_ahci_mcp65] = {
15601 - AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
15602 - AHCI_HFLAG_YES_NCQ),
15603 -@@ -290,14 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
15604 - { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
15605 - { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
15606 - { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
15607 -- { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
15608 -- { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
15609 -- { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
15610 -- { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
15611 -- { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
15612 -- { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
15613 -- { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
15614 -- { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
15615 -+ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
15616 -+ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
15617 -+ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
15618 -+ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
15619 -+ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
15620 -+ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
15621 -+ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
15622 -+ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
15623 - { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
15624 - { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
15625 - { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
15626 -@@ -670,6 +684,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
15627 - return rc;
15628 - }
15629 -
15630 -+/*
15631 -+ * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
15632 -+ *
15633 -+ * It has been observed with some SSDs that the timing of events in the
15634 -+ * link synchronization phase can leave the port in a state that can not
15635 -+ * be recovered by a SATA-hard-reset alone. The failing signature is
15636 -+ * SStatus.DET stuck at 1 ("Device presence detected but Phy
15637 -+ * communication not established"). It was found that unloading and
15638 -+ * reloading the driver when this problem occurs allows the drive
15639 -+ * connection to be recovered (DET advanced to 0x3). The critical
15640 -+ * component of reloading the driver is that the port state machines are
15641 -+ * reset by bouncing "port enable" in the AHCI PCS configuration
15642 -+ * register. So, reproduce that effect by bouncing a port whenever we
15643 -+ * see DET==1 after a reset.
15644 -+ */
15645 -+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
15646 -+ unsigned long deadline)
15647 -+{
15648 -+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
15649 -+ struct ata_port *ap = link->ap;
15650 -+ struct ahci_port_priv *pp = ap->private_data;
15651 -+ struct ahci_host_priv *hpriv = ap->host->private_data;
15652 -+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
15653 -+ unsigned long tmo = deadline - jiffies;
15654 -+ struct ata_taskfile tf;
15655 -+ bool online;
15656 -+ int rc, i;
15657 -+
15658 -+ DPRINTK("ENTER\n");
15659 -+
15660 -+ ahci_stop_engine(ap);
15661 -+
15662 -+ for (i = 0; i < 2; i++) {
15663 -+ u16 val;
15664 -+ u32 sstatus;
15665 -+ int port = ap->port_no;
15666 -+ struct ata_host *host = ap->host;
15667 -+ struct pci_dev *pdev = to_pci_dev(host->dev);
15668 -+
15669 -+ /* clear D2H reception area to properly wait for D2H FIS */
15670 -+ ata_tf_init(link->device, &tf);
15671 -+ tf.command = ATA_BUSY;
15672 -+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
15673 -+
15674 -+ rc = sata_link_hardreset(link, timing, deadline, &online,
15675 -+ ahci_check_ready);
15676 -+
15677 -+ if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
15678 -+ (sstatus & 0xf) != 1)
15679 -+ break;
15680 -+
15681 -+ ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
15682 -+ port);
15683 -+
15684 -+ pci_read_config_word(pdev, 0x92, &val);
15685 -+ val &= ~(1 << port);
15686 -+ pci_write_config_word(pdev, 0x92, val);
15687 -+ ata_msleep(ap, 1000);
15688 -+ val |= 1 << port;
15689 -+ pci_write_config_word(pdev, 0x92, val);
15690 -+ deadline += tmo;
15691 -+ }
15692 -+
15693 -+ hpriv->start_engine(ap);
15694 -+
15695 -+ if (online)
15696 -+ *class = ahci_dev_classify(ap);
15697 -+
15698 -+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
15699 -+ return rc;
15700 -+}
15701 -+
15702 -+
15703 - #ifdef CONFIG_PM
15704 - static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
15705 - {
15706 -diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
15707 -index 61a9c07e0dff..287c4ba0219f 100644
15708 ---- a/drivers/ata/libahci.c
15709 -+++ b/drivers/ata/libahci.c
15710 -@@ -1707,8 +1707,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
15711 - if (unlikely(resetting))
15712 - status &= ~PORT_IRQ_BAD_PMP;
15713 -
15714 -- /* if LPM is enabled, PHYRDY doesn't mean anything */
15715 -- if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
15716 -+ if (sata_lpm_ignore_phy_events(&ap->link)) {
15717 - status &= ~PORT_IRQ_PHYRDY;
15718 - ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
15719 - }
15720 -diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
15721 -index 23dac3babfe3..87b4b7f9fdc6 100644
15722 ---- a/drivers/ata/libata-core.c
15723 -+++ b/drivers/ata/libata-core.c
15724 -@@ -4214,7 +4214,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
15725 - ATA_HORKAGE_ZERO_AFTER_TRIM, },
15726 - { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
15727 - ATA_HORKAGE_ZERO_AFTER_TRIM, },
15728 -- { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
15729 -+ { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
15730 - ATA_HORKAGE_ZERO_AFTER_TRIM, },
15731 -
15732 - /*
15733 -@@ -6728,6 +6728,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
15734 - return tmp;
15735 - }
15736 -
15737 -+/**
15738 -+ * sata_lpm_ignore_phy_events - test if PHY event should be ignored
15739 -+ * @link: Link receiving the event
15740 -+ *
15741 -+ * Test whether the received PHY event has to be ignored or not.
15742 -+ *
15743 -+ * LOCKING:
15744 -+ * None:
15745 -+ *
15746 -+ * RETURNS:
15747 -+ * True if the event has to be ignored.
15748 -+ */
15749 -+bool sata_lpm_ignore_phy_events(struct ata_link *link)
15750 -+{
15751 -+ unsigned long lpm_timeout = link->last_lpm_change +
15752 -+ msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
15753 -+
15754 -+ /* if LPM is enabled, PHYRDY doesn't mean anything */
15755 -+ if (link->lpm_policy > ATA_LPM_MAX_POWER)
15756 -+ return true;
15757 -+
15758 -+ /* ignore the first PHY event after the LPM policy changed
15759 -+ * as it is might be spurious
15760 -+ */
15761 -+ if ((link->flags & ATA_LFLAG_CHANGED) &&
15762 -+ time_before(jiffies, lpm_timeout))
15763 -+ return true;
15764 -+
15765 -+ return false;
15766 -+}
15767 -+EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
15768 -+
15769 - /*
15770 - * Dummy port_ops
15771 - */
15772 -diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
15773 -index d2029a462e2c..89c3d83e1ca7 100644
15774 ---- a/drivers/ata/libata-eh.c
15775 -+++ b/drivers/ata/libata-eh.c
15776 -@@ -3489,6 +3489,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
15777 - }
15778 - }
15779 -
15780 -+ link->last_lpm_change = jiffies;
15781 -+ link->flags |= ATA_LFLAG_CHANGED;
15782 -+
15783 - return 0;
15784 -
15785 - fail:
15786 -diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
15787 -index 237f23f68bfc..1daa0ea2f1ac 100644
15788 ---- a/drivers/clk/clk.c
15789 -+++ b/drivers/clk/clk.c
15790 -@@ -1443,8 +1443,10 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
15791 - */
15792 - if (clk->prepare_count) {
15793 - clk_core_prepare(parent);
15794 -+ flags = clk_enable_lock();
15795 - clk_core_enable(parent);
15796 - clk_core_enable(clk);
15797 -+ clk_enable_unlock(flags);
15798 - }
15799 -
15800 - /* update the clk tree topology */
15801 -@@ -1459,13 +1461,17 @@ static void __clk_set_parent_after(struct clk_core *core,
15802 - struct clk_core *parent,
15803 - struct clk_core *old_parent)
15804 - {
15805 -+ unsigned long flags;
15806 -+
15807 - /*
15808 - * Finish the migration of prepare state and undo the changes done
15809 - * for preventing a race with clk_enable().
15810 - */
15811 - if (core->prepare_count) {
15812 -+ flags = clk_enable_lock();
15813 - clk_core_disable(core);
15814 - clk_core_disable(old_parent);
15815 -+ clk_enable_unlock(flags);
15816 - clk_core_unprepare(old_parent);
15817 - }
15818 - }
15819 -@@ -1489,8 +1495,10 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
15820 - clk_enable_unlock(flags);
15821 -
15822 - if (clk->prepare_count) {
15823 -+ flags = clk_enable_lock();
15824 - clk_core_disable(clk);
15825 - clk_core_disable(parent);
15826 -+ clk_enable_unlock(flags);
15827 - clk_core_unprepare(parent);
15828 - }
15829 - return ret;
15830 -diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
15831 -index 07d666cc6a29..bea4a173eef5 100644
15832 ---- a/drivers/clk/samsung/clk-exynos5420.c
15833 -+++ b/drivers/clk/samsung/clk-exynos5420.c
15834 -@@ -271,6 +271,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
15835 - { .offset = SRC_MASK_PERIC0, .value = 0x11111110, },
15836 - { .offset = SRC_MASK_PERIC1, .value = 0x11111100, },
15837 - { .offset = SRC_MASK_ISP, .value = 0x11111000, },
15838 -+ { .offset = GATE_BUS_TOP, .value = 0xffffffff, },
15839 - { .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
15840 - { .offset = GATE_IP_PERIC, .value = 0xffffffff, },
15841 - };
15842 -diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
15843 -index 2eebd28b4c40..ccc20188f00c 100644
15844 ---- a/drivers/firmware/dmi_scan.c
15845 -+++ b/drivers/firmware/dmi_scan.c
15846 -@@ -499,18 +499,19 @@ static int __init dmi_present(const u8 *buf)
15847 - buf += 16;
15848 -
15849 - if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
15850 -+ if (smbios_ver)
15851 -+ dmi_ver = smbios_ver;
15852 -+ else
15853 -+ dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
15854 - dmi_num = get_unaligned_le16(buf + 12);
15855 - dmi_len = get_unaligned_le16(buf + 6);
15856 - dmi_base = get_unaligned_le32(buf + 8);
15857 -
15858 - if (dmi_walk_early(dmi_decode) == 0) {
15859 - if (smbios_ver) {
15860 -- dmi_ver = smbios_ver;
15861 - pr_info("SMBIOS %d.%d present.\n",
15862 - dmi_ver >> 8, dmi_ver & 0xFF);
15863 - } else {
15864 -- dmi_ver = (buf[14] & 0xF0) << 4 |
15865 -- (buf[14] & 0x0F);
15866 - pr_info("Legacy DMI %d.%d present.\n",
15867 - dmi_ver >> 8, dmi_ver & 0xFF);
15868 - }
15869 -diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
15870 -index 443518f63f15..a6b0def4bd7b 100644
15871 ---- a/drivers/gpio/gpio-kempld.c
15872 -+++ b/drivers/gpio/gpio-kempld.c
15873 -@@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
15874 - = container_of(chip, struct kempld_gpio_data, chip);
15875 - struct kempld_device_data *pld = gpio->pld;
15876 -
15877 -- return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
15878 -+ return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
15879 - }
15880 -
15881 - static int kempld_gpio_pincount(struct kempld_device_data *pld)
15882 -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
15883 -index 498399323a8c..406624a0b201 100644
15884 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
15885 -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
15886 -@@ -729,7 +729,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
15887 - kfd2kgd->get_max_engine_clock_in_mhz(
15888 - dev->gpu->kgd));
15889 - sysfs_show_64bit_prop(buffer, "local_mem_size",
15890 -- kfd2kgd->get_vmem_size(dev->gpu->kgd));
15891 -+ (unsigned long long int) 0);
15892 -
15893 - sysfs_show_32bit_prop(buffer, "fw_version",
15894 - kfd2kgd->get_fw_version(
15895 -diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
15896 -index 5ba5792bfdba..98b125763ecd 100644
15897 ---- a/drivers/gpu/drm/drm_plane_helper.c
15898 -+++ b/drivers/gpu/drm/drm_plane_helper.c
15899 -@@ -476,6 +476,9 @@ int drm_plane_helper_commit(struct drm_plane *plane,
15900 - if (!crtc[i])
15901 - continue;
15902 -
15903 -+ if (crtc[i]->cursor == plane)
15904 -+ continue;
15905 -+
15906 - /* There's no other way to figure out whether the crtc is running. */
15907 - ret = drm_crtc_vblank_get(crtc[i]);
15908 - if (ret == 0) {
15909 -diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
15910 -index 1afc0b419da2..965a45619f6b 100644
15911 ---- a/drivers/gpu/drm/radeon/atombios_crtc.c
15912 -+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
15913 -@@ -1789,7 +1789,9 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
15914 - if ((crtc->mode.clock == test_crtc->mode.clock) &&
15915 - (adjusted_clock == test_adjusted_clock) &&
15916 - (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
15917 -- (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
15918 -+ (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
15919 -+ (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
15920 -+ drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
15921 - return test_radeon_crtc->pll_id;
15922 - }
15923 - }
15924 -diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
15925 -index 8d74de82456e..8b2c4c890507 100644
15926 ---- a/drivers/gpu/drm/radeon/atombios_dp.c
15927 -+++ b/drivers/gpu/drm/radeon/atombios_dp.c
15928 -@@ -412,19 +412,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
15929 - {
15930 - struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
15931 - u8 msg[DP_DPCD_SIZE];
15932 -- int ret;
15933 -+ int ret, i;
15934 -
15935 -- ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
15936 -- DP_DPCD_SIZE);
15937 -- if (ret > 0) {
15938 -- memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
15939 -+ for (i = 0; i < 7; i++) {
15940 -+ ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
15941 -+ DP_DPCD_SIZE);
15942 -+ if (ret == DP_DPCD_SIZE) {
15943 -+ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
15944 -
15945 -- DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
15946 -- dig_connector->dpcd);
15947 -+ DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
15948 -+ dig_connector->dpcd);
15949 -
15950 -- radeon_dp_probe_oui(radeon_connector);
15951 -+ radeon_dp_probe_oui(radeon_connector);
15952 -
15953 -- return true;
15954 -+ return true;
15955 -+ }
15956 - }
15957 - dig_connector->dpcd[0] = 0;
15958 - return false;
15959 -diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
15960 -index 3e670d344a20..19aafb71fd8e 100644
15961 ---- a/drivers/gpu/drm/radeon/cik.c
15962 -+++ b/drivers/gpu/drm/radeon/cik.c
15963 -@@ -5804,7 +5804,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
15964 - /* restore context1-15 */
15965 - /* set vm size, must be a multiple of 4 */
15966 - WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
15967 -- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
15968 -+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
15969 - for (i = 1; i < 16; i++) {
15970 - if (i < 8)
15971 - WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
15972 -diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
15973 -index 0926739c9fa7..9953356fe263 100644
15974 ---- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
15975 -+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
15976 -@@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
15977 - if (enable) {
15978 - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
15979 -
15980 -- if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
15981 -+ if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
15982 - WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
15983 - HDMI_AVI_INFO_SEND | /* enable AVI info frames */
15984 - HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
15985 -@@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
15986 - if (!dig || !dig->afmt)
15987 - return;
15988 -
15989 -- if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
15990 -+ if (enable && connector &&
15991 -+ drm_detect_monitor_audio(radeon_connector_edid(connector))) {
15992 - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
15993 - struct radeon_connector *radeon_connector = to_radeon_connector(connector);
15994 - struct radeon_connector_atom_dig *dig_connector;
15995 -diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
15996 -index dab00812abaa..02d585455f49 100644
15997 ---- a/drivers/gpu/drm/radeon/ni.c
15998 -+++ b/drivers/gpu/drm/radeon/ni.c
15999 -@@ -1272,7 +1272,8 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
16000 - */
16001 - for (i = 1; i < 8; i++) {
16002 - WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
16003 -- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
16004 -+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
16005 -+ rdev->vm_manager.max_pfn - 1);
16006 - WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
16007 - rdev->vm_manager.saved_table_addr[i]);
16008 - }
16009 -diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
16010 -index b7c6bb69f3c7..88c04bc0a7f6 100644
16011 ---- a/drivers/gpu/drm/radeon/radeon_audio.c
16012 -+++ b/drivers/gpu/drm/radeon/radeon_audio.c
16013 -@@ -460,9 +460,6 @@ void radeon_audio_detect(struct drm_connector *connector,
16014 - if (!connector || !connector->encoder)
16015 - return;
16016 -
16017 -- if (!radeon_encoder_is_digital(connector->encoder))
16018 -- return;
16019 --
16020 - rdev = connector->encoder->dev->dev_private;
16021 -
16022 - if (!radeon_audio_chipset_supported(rdev))
16023 -@@ -471,26 +468,26 @@ void radeon_audio_detect(struct drm_connector *connector,
16024 - radeon_encoder = to_radeon_encoder(connector->encoder);
16025 - dig = radeon_encoder->enc_priv;
16026 -
16027 -- if (!dig->afmt)
16028 -- return;
16029 --
16030 - if (status == connector_status_connected) {
16031 -- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
16032 -+ struct radeon_connector *radeon_connector;
16033 -+ int sink_type;
16034 -+
16035 -+ if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
16036 -+ radeon_encoder->audio = NULL;
16037 -+ return;
16038 -+ }
16039 -+
16040 -+ radeon_connector = to_radeon_connector(connector);
16041 -+ sink_type = radeon_dp_getsinktype(radeon_connector);
16042 -
16043 - if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
16044 -- radeon_dp_getsinktype(radeon_connector) ==
16045 -- CONNECTOR_OBJECT_ID_DISPLAYPORT)
16046 -+ sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
16047 - radeon_encoder->audio = rdev->audio.dp_funcs;
16048 - else
16049 - radeon_encoder->audio = rdev->audio.hdmi_funcs;
16050 -
16051 - dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
16052 -- if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
16053 -- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
16054 -- } else {
16055 -- radeon_audio_enable(rdev, dig->afmt->pin, 0);
16056 -- dig->afmt->pin = NULL;
16057 -- }
16058 -+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
16059 - } else {
16060 - radeon_audio_enable(rdev, dig->afmt->pin, 0);
16061 - dig->afmt->pin = NULL;
16062 -diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
16063 -index 27973e3faf0e..27def67cb6be 100644
16064 ---- a/drivers/gpu/drm/radeon/radeon_connectors.c
16065 -+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
16066 -@@ -1333,10 +1333,8 @@ out:
16067 - /* updated in get modes as well since we need to know if it's analog or digital */
16068 - radeon_connector_update_scratch_regs(connector, ret);
16069 -
16070 -- if (radeon_audio != 0) {
16071 -- radeon_connector_get_edid(connector);
16072 -+ if (radeon_audio != 0)
16073 - radeon_audio_detect(connector, ret);
16074 -- }
16075 -
16076 - exit:
16077 - pm_runtime_mark_last_busy(connector->dev->dev);
16078 -@@ -1661,10 +1659,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
16079 -
16080 - radeon_connector_update_scratch_regs(connector, ret);
16081 -
16082 -- if (radeon_audio != 0) {
16083 -- radeon_connector_get_edid(connector);
16084 -+ if (radeon_audio != 0)
16085 - radeon_audio_detect(connector, ret);
16086 -- }
16087 -
16088 - out:
16089 - pm_runtime_mark_last_busy(connector->dev->dev);
16090 -diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
16091 -index a7fb2735d4a9..f433491fab6f 100644
16092 ---- a/drivers/gpu/drm/radeon/si.c
16093 -+++ b/drivers/gpu/drm/radeon/si.c
16094 -@@ -4288,7 +4288,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
16095 - /* empty context1-15 */
16096 - /* set vm size, must be a multiple of 4 */
16097 - WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
16098 -- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
16099 -+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
16100 - /* Assign the pt base to something valid for now; the pts used for
16101 - * the VMs are determined by the application and setup and assigned
16102 - * on the fly in the vm part of radeon_gart.c
16103 -diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
16104 -index e77658cd037c..2caf5b2f3446 100644
16105 ---- a/drivers/hid/hid-logitech-hidpp.c
16106 -+++ b/drivers/hid/hid-logitech-hidpp.c
16107 -@@ -39,7 +39,6 @@ MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@××××××××.com>");
16108 - /* bits 1..20 are reserved for classes */
16109 - #define HIDPP_QUIRK_DELAYED_INIT BIT(21)
16110 - #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
16111 --#define HIDPP_QUIRK_MULTI_INPUT BIT(23)
16112 -
16113 - /*
16114 - * There are two hidpp protocols in use, the first version hidpp10 is known
16115 -@@ -701,12 +700,6 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
16116 - struct hid_field *field, struct hid_usage *usage,
16117 - unsigned long **bit, int *max)
16118 - {
16119 -- struct hidpp_device *hidpp = hid_get_drvdata(hdev);
16120 --
16121 -- if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) &&
16122 -- (field->application == HID_GD_KEYBOARD))
16123 -- return 0;
16124 --
16125 - return -1;
16126 - }
16127 -
16128 -@@ -715,10 +708,6 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
16129 - {
16130 - struct wtp_data *wd = hidpp->private_data;
16131 -
16132 -- if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core)
16133 -- /* this is the generic hid-input call */
16134 -- return;
16135 --
16136 - __set_bit(EV_ABS, input_dev->evbit);
16137 - __set_bit(EV_KEY, input_dev->evbit);
16138 - __clear_bit(EV_REL, input_dev->evbit);
16139 -@@ -1234,10 +1223,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
16140 - if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
16141 - connect_mask &= ~HID_CONNECT_HIDINPUT;
16142 -
16143 -- /* Re-enable hidinput for multi-input devices */
16144 -- if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT)
16145 -- connect_mask |= HID_CONNECT_HIDINPUT;
16146 --
16147 - ret = hid_hw_start(hdev, connect_mask);
16148 - if (ret) {
16149 - hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
16150 -@@ -1285,11 +1270,6 @@ static const struct hid_device_id hidpp_devices[] = {
16151 - HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
16152 - USB_DEVICE_ID_LOGITECH_T651),
16153 - .driver_data = HIDPP_QUIRK_CLASS_WTP },
16154 -- { /* Keyboard TK820 */
16155 -- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
16156 -- USB_VENDOR_ID_LOGITECH, 0x4102),
16157 -- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT |
16158 -- HIDPP_QUIRK_CLASS_WTP },
16159 -
16160 - { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
16161 - USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
16162 -diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
16163 -index f3830db02d46..37f01702d081 100644
16164 ---- a/drivers/hwmon/nct6683.c
16165 -+++ b/drivers/hwmon/nct6683.c
16166 -@@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
16167 - (*t)->dev_attr.attr.name, tg->base + i);
16168 - if ((*t)->s2) {
16169 - a2 = &su->u.a2;
16170 -+ sysfs_attr_init(&a2->dev_attr.attr);
16171 - a2->dev_attr.attr.name = su->name;
16172 - a2->nr = (*t)->u.s.nr + i;
16173 - a2->index = (*t)->u.s.index;
16174 -@@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
16175 - *attrs = &a2->dev_attr.attr;
16176 - } else {
16177 - a = &su->u.a1;
16178 -+ sysfs_attr_init(&a->dev_attr.attr);
16179 - a->dev_attr.attr.name = su->name;
16180 - a->index = (*t)->u.index + i;
16181 - a->dev_attr.attr.mode =
16182 -diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
16183 -index 1be41177b620..0773930c110e 100644
16184 ---- a/drivers/hwmon/nct6775.c
16185 -+++ b/drivers/hwmon/nct6775.c
16186 -@@ -994,6 +994,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
16187 - (*t)->dev_attr.attr.name, tg->base + i);
16188 - if ((*t)->s2) {
16189 - a2 = &su->u.a2;
16190 -+ sysfs_attr_init(&a2->dev_attr.attr);
16191 - a2->dev_attr.attr.name = su->name;
16192 - a2->nr = (*t)->u.s.nr + i;
16193 - a2->index = (*t)->u.s.index;
16194 -@@ -1004,6 +1005,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
16195 - *attrs = &a2->dev_attr.attr;
16196 - } else {
16197 - a = &su->u.a1;
16198 -+ sysfs_attr_init(&a->dev_attr.attr);
16199 - a->dev_attr.attr.name = su->name;
16200 - a->index = (*t)->u.index + i;
16201 - a->dev_attr.attr.mode =
16202 -diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
16203 -index 112e4d45e4a0..68800115876b 100644
16204 ---- a/drivers/hwmon/ntc_thermistor.c
16205 -+++ b/drivers/hwmon/ntc_thermistor.c
16206 -@@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data *
16207 - ntc_thermistor_parse_dt(struct platform_device *pdev)
16208 - {
16209 - struct iio_channel *chan;
16210 -+ enum iio_chan_type type;
16211 - struct device_node *np = pdev->dev.of_node;
16212 - struct ntc_thermistor_platform_data *pdata;
16213 -+ int ret;
16214 -
16215 - if (!np)
16216 - return NULL;
16217 -@@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
16218 - if (IS_ERR(chan))
16219 - return ERR_CAST(chan);
16220 -
16221 -+ ret = iio_get_channel_type(chan, &type);
16222 -+ if (ret < 0)
16223 -+ return ERR_PTR(ret);
16224 -+
16225 -+ if (type != IIO_VOLTAGE)
16226 -+ return ERR_PTR(-EINVAL);
16227 -+
16228 - if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
16229 - return ERR_PTR(-ENODEV);
16230 - if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
16231 -diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
16232 -index 99664ebc738d..ccf4cffe0ee1 100644
16233 ---- a/drivers/hwmon/tmp401.c
16234 -+++ b/drivers/hwmon/tmp401.c
16235 -@@ -44,7 +44,7 @@
16236 - #include <linux/sysfs.h>
16237 -
16238 - /* Addresses to scan */
16239 --static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d,
16240 -+static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
16241 - 0x4e, 0x4f, I2C_CLIENT_END };
16242 -
16243 - enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
16244 -diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
16245 -index 53f32629283a..6805db0e4f07 100644
16246 ---- a/drivers/iio/accel/st_accel_core.c
16247 -+++ b/drivers/iio/accel/st_accel_core.c
16248 -@@ -465,6 +465,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
16249 -
16250 - indio_dev->modes = INDIO_DIRECT_MODE;
16251 - indio_dev->info = &accel_info;
16252 -+ mutex_init(&adata->tb.buf_lock);
16253 -
16254 - st_sensors_power_enable(indio_dev);
16255 -
16256 -diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
16257 -index 08bcfb061ca5..56008a86b78f 100644
16258 ---- a/drivers/iio/adc/axp288_adc.c
16259 -+++ b/drivers/iio/adc/axp288_adc.c
16260 -@@ -53,39 +53,42 @@ static const struct iio_chan_spec const axp288_adc_channels[] = {
16261 - .channel = 0,
16262 - .address = AXP288_TS_ADC_H,
16263 - .datasheet_name = "TS_PIN",
16264 -+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
16265 - }, {
16266 - .indexed = 1,
16267 - .type = IIO_TEMP,
16268 - .channel = 1,
16269 - .address = AXP288_PMIC_ADC_H,
16270 - .datasheet_name = "PMIC_TEMP",
16271 -+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
16272 - }, {
16273 - .indexed = 1,
16274 - .type = IIO_TEMP,
16275 - .channel = 2,
16276 - .address = AXP288_GP_ADC_H,
16277 - .datasheet_name = "GPADC",
16278 -+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
16279 - }, {
16280 - .indexed = 1,
16281 - .type = IIO_CURRENT,
16282 - .channel = 3,
16283 - .address = AXP20X_BATT_CHRG_I_H,
16284 - .datasheet_name = "BATT_CHG_I",
16285 -- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
16286 -+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
16287 - }, {
16288 - .indexed = 1,
16289 - .type = IIO_CURRENT,
16290 - .channel = 4,
16291 - .address = AXP20X_BATT_DISCHRG_I_H,
16292 - .datasheet_name = "BATT_DISCHRG_I",
16293 -- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
16294 -+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
16295 - }, {
16296 - .indexed = 1,
16297 - .type = IIO_VOLTAGE,
16298 - .channel = 5,
16299 - .address = AXP20X_BATT_V_H,
16300 - .datasheet_name = "BATT_V",
16301 -- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
16302 -+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
16303 - },
16304 - };
16305 -
16306 -@@ -151,9 +154,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
16307 - chan->address))
16308 - dev_err(&indio_dev->dev, "TS pin restore\n");
16309 - break;
16310 -- case IIO_CHAN_INFO_PROCESSED:
16311 -- ret = axp288_adc_read_channel(val, chan->address, info->regmap);
16312 -- break;
16313 - default:
16314 - ret = -EINVAL;
16315 - }
16316 -diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
16317 -index 51e2a83c9404..115f6e99a7fa 100644
16318 ---- a/drivers/iio/adc/cc10001_adc.c
16319 -+++ b/drivers/iio/adc/cc10001_adc.c
16320 -@@ -35,8 +35,9 @@
16321 - #define CC10001_ADC_EOC_SET BIT(0)
16322 -
16323 - #define CC10001_ADC_CHSEL_SAMPLED 0x0c
16324 --#define CC10001_ADC_POWER_UP 0x10
16325 --#define CC10001_ADC_POWER_UP_SET BIT(0)
16326 -+#define CC10001_ADC_POWER_DOWN 0x10
16327 -+#define CC10001_ADC_POWER_DOWN_SET BIT(0)
16328 -+
16329 - #define CC10001_ADC_DEBUG 0x14
16330 - #define CC10001_ADC_DATA_COUNT 0x20
16331 -
16332 -@@ -62,7 +63,6 @@ struct cc10001_adc_device {
16333 - u16 *buf;
16334 -
16335 - struct mutex lock;
16336 -- unsigned long channel_map;
16337 - unsigned int start_delay_ns;
16338 - unsigned int eoc_delay_ns;
16339 - };
16340 -@@ -79,6 +79,18 @@ static inline u32 cc10001_adc_read_reg(struct cc10001_adc_device *adc_dev,
16341 - return readl(adc_dev->reg_base + reg);
16342 - }
16343 -
16344 -+static void cc10001_adc_power_up(struct cc10001_adc_device *adc_dev)
16345 -+{
16346 -+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN, 0);
16347 -+ ndelay(adc_dev->start_delay_ns);
16348 -+}
16349 -+
16350 -+static void cc10001_adc_power_down(struct cc10001_adc_device *adc_dev)
16351 -+{
16352 -+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN,
16353 -+ CC10001_ADC_POWER_DOWN_SET);
16354 -+}
16355 -+
16356 - static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
16357 - unsigned int channel)
16358 - {
16359 -@@ -88,6 +100,7 @@ static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
16360 - val = (channel & CC10001_ADC_CH_MASK) | CC10001_ADC_MODE_SINGLE_CONV;
16361 - cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
16362 -
16363 -+ udelay(1);
16364 - val = cc10001_adc_read_reg(adc_dev, CC10001_ADC_CONFIG);
16365 - val = val | CC10001_ADC_START_CONV;
16366 - cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
16367 -@@ -129,6 +142,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
16368 - struct iio_dev *indio_dev;
16369 - unsigned int delay_ns;
16370 - unsigned int channel;
16371 -+ unsigned int scan_idx;
16372 - bool sample_invalid;
16373 - u16 *data;
16374 - int i;
16375 -@@ -139,20 +153,17 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
16376 -
16377 - mutex_lock(&adc_dev->lock);
16378 -
16379 -- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
16380 -- CC10001_ADC_POWER_UP_SET);
16381 --
16382 -- /* Wait for 8 (6+2) clock cycles before activating START */
16383 -- ndelay(adc_dev->start_delay_ns);
16384 -+ cc10001_adc_power_up(adc_dev);
16385 -
16386 - /* Calculate delay step for eoc and sampled data */
16387 - delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
16388 -
16389 - i = 0;
16390 - sample_invalid = false;
16391 -- for_each_set_bit(channel, indio_dev->active_scan_mask,
16392 -+ for_each_set_bit(scan_idx, indio_dev->active_scan_mask,
16393 - indio_dev->masklength) {
16394 -
16395 -+ channel = indio_dev->channels[scan_idx].channel;
16396 - cc10001_adc_start(adc_dev, channel);
16397 -
16398 - data[i] = cc10001_adc_poll_done(indio_dev, channel, delay_ns);
16399 -@@ -166,7 +177,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
16400 - }
16401 -
16402 - done:
16403 -- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
16404 -+ cc10001_adc_power_down(adc_dev);
16405 -
16406 - mutex_unlock(&adc_dev->lock);
16407 -
16408 -@@ -185,11 +196,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
16409 - unsigned int delay_ns;
16410 - u16 val;
16411 -
16412 -- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
16413 -- CC10001_ADC_POWER_UP_SET);
16414 --
16415 -- /* Wait for 8 (6+2) clock cycles before activating START */
16416 -- ndelay(adc_dev->start_delay_ns);
16417 -+ cc10001_adc_power_up(adc_dev);
16418 -
16419 - /* Calculate delay step for eoc and sampled data */
16420 - delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
16421 -@@ -198,7 +205,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
16422 -
16423 - val = cc10001_adc_poll_done(indio_dev, chan->channel, delay_ns);
16424 -
16425 -- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
16426 -+ cc10001_adc_power_down(adc_dev);
16427 -
16428 - return val;
16429 - }
16430 -@@ -224,7 +231,7 @@ static int cc10001_adc_read_raw(struct iio_dev *indio_dev,
16431 -
16432 - case IIO_CHAN_INFO_SCALE:
16433 - ret = regulator_get_voltage(adc_dev->reg);
16434 -- if (ret)
16435 -+ if (ret < 0)
16436 - return ret;
16437 -
16438 - *val = ret / 1000;
16439 -@@ -255,22 +262,22 @@ static const struct iio_info cc10001_adc_info = {
16440 - .update_scan_mode = &cc10001_update_scan_mode,
16441 - };
16442 -
16443 --static int cc10001_adc_channel_init(struct iio_dev *indio_dev)
16444 -+static int cc10001_adc_channel_init(struct iio_dev *indio_dev,
16445 -+ unsigned long channel_map)
16446 - {
16447 -- struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
16448 - struct iio_chan_spec *chan_array, *timestamp;
16449 - unsigned int bit, idx = 0;
16450 -
16451 -- indio_dev->num_channels = bitmap_weight(&adc_dev->channel_map,
16452 -- CC10001_ADC_NUM_CHANNELS);
16453 -+ indio_dev->num_channels = bitmap_weight(&channel_map,
16454 -+ CC10001_ADC_NUM_CHANNELS) + 1;
16455 -
16456 -- chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels + 1,
16457 -+ chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels,
16458 - sizeof(struct iio_chan_spec),
16459 - GFP_KERNEL);
16460 - if (!chan_array)
16461 - return -ENOMEM;
16462 -
16463 -- for_each_set_bit(bit, &adc_dev->channel_map, CC10001_ADC_NUM_CHANNELS) {
16464 -+ for_each_set_bit(bit, &channel_map, CC10001_ADC_NUM_CHANNELS) {
16465 - struct iio_chan_spec *chan = &chan_array[idx];
16466 -
16467 - chan->type = IIO_VOLTAGE;
16468 -@@ -305,6 +312,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
16469 - unsigned long adc_clk_rate;
16470 - struct resource *res;
16471 - struct iio_dev *indio_dev;
16472 -+ unsigned long channel_map;
16473 - int ret;
16474 -
16475 - indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
16476 -@@ -313,9 +321,9 @@ static int cc10001_adc_probe(struct platform_device *pdev)
16477 -
16478 - adc_dev = iio_priv(indio_dev);
16479 -
16480 -- adc_dev->channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
16481 -+ channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
16482 - if (!of_property_read_u32(node, "adc-reserved-channels", &ret))
16483 -- adc_dev->channel_map &= ~ret;
16484 -+ channel_map &= ~ret;
16485 -
16486 - adc_dev->reg = devm_regulator_get(&pdev->dev, "vref");
16487 - if (IS_ERR(adc_dev->reg))
16488 -@@ -361,7 +369,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
16489 - adc_dev->start_delay_ns = adc_dev->eoc_delay_ns * CC10001_WAIT_CYCLES;
16490 -
16491 - /* Setup the ADC channels available on the device */
16492 -- ret = cc10001_adc_channel_init(indio_dev);
16493 -+ ret = cc10001_adc_channel_init(indio_dev, channel_map);
16494 - if (ret < 0)
16495 - goto err_disable_clk;
16496 -
16497 -diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
16498 -index 3211729bcb0b..0c4618b4d515 100644
16499 ---- a/drivers/iio/adc/qcom-spmi-vadc.c
16500 -+++ b/drivers/iio/adc/qcom-spmi-vadc.c
16501 -@@ -18,6 +18,7 @@
16502 - #include <linux/iio/iio.h>
16503 - #include <linux/interrupt.h>
16504 - #include <linux/kernel.h>
16505 -+#include <linux/math64.h>
16506 - #include <linux/module.h>
16507 - #include <linux/of.h>
16508 - #include <linux/platform_device.h>
16509 -@@ -471,11 +472,11 @@ static s32 vadc_calibrate(struct vadc_priv *vadc,
16510 - const struct vadc_channel_prop *prop, u16 adc_code)
16511 - {
16512 - const struct vadc_prescale_ratio *prescale;
16513 -- s32 voltage;
16514 -+ s64 voltage;
16515 -
16516 - voltage = adc_code - vadc->graph[prop->calibration].gnd;
16517 - voltage *= vadc->graph[prop->calibration].dx;
16518 -- voltage = voltage / vadc->graph[prop->calibration].dy;
16519 -+ voltage = div64_s64(voltage, vadc->graph[prop->calibration].dy);
16520 -
16521 - if (prop->calibration == VADC_CALIB_ABSOLUTE)
16522 - voltage += vadc->graph[prop->calibration].dx;
16523 -@@ -487,7 +488,7 @@ static s32 vadc_calibrate(struct vadc_priv *vadc,
16524 -
16525 - voltage = voltage * prescale->den;
16526 -
16527 -- return voltage / prescale->num;
16528 -+ return div64_s64(voltage, prescale->num);
16529 - }
16530 -
16531 - static int vadc_decimation_from_dt(u32 value)
16532 -diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
16533 -index a221f7329b79..ce93bd8e3f68 100644
16534 ---- a/drivers/iio/adc/xilinx-xadc-core.c
16535 -+++ b/drivers/iio/adc/xilinx-xadc-core.c
16536 -@@ -856,6 +856,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
16537 - switch (chan->address) {
16538 - case XADC_REG_VCCINT:
16539 - case XADC_REG_VCCAUX:
16540 -+ case XADC_REG_VREFP:
16541 - case XADC_REG_VCCBRAM:
16542 - case XADC_REG_VCCPINT:
16543 - case XADC_REG_VCCPAUX:
16544 -@@ -996,7 +997,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
16545 - .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
16546 - .scan_index = (_scan_index), \
16547 - .scan_type = { \
16548 -- .sign = 'u', \
16549 -+ .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
16550 - .realbits = 12, \
16551 - .storagebits = 16, \
16552 - .shift = 4, \
16553 -@@ -1008,7 +1009,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
16554 - static const struct iio_chan_spec xadc_channels[] = {
16555 - XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP),
16556 - XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
16557 -- XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCINT, "vccaux", true),
16558 -+ XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
16559 - XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
16560 - XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
16561 - XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
16562 -diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
16563 -index c7487e8d7f80..54adc5087210 100644
16564 ---- a/drivers/iio/adc/xilinx-xadc.h
16565 -+++ b/drivers/iio/adc/xilinx-xadc.h
16566 -@@ -145,9 +145,9 @@ static inline int xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
16567 - #define XADC_REG_MAX_VCCPINT 0x28
16568 - #define XADC_REG_MAX_VCCPAUX 0x29
16569 - #define XADC_REG_MAX_VCCO_DDR 0x2a
16570 --#define XADC_REG_MIN_VCCPINT 0x2b
16571 --#define XADC_REG_MIN_VCCPAUX 0x2c
16572 --#define XADC_REG_MIN_VCCO_DDR 0x2d
16573 -+#define XADC_REG_MIN_VCCPINT 0x2c
16574 -+#define XADC_REG_MIN_VCCPAUX 0x2d
16575 -+#define XADC_REG_MIN_VCCO_DDR 0x2e
16576 -
16577 - #define XADC_REG_CONF0 0x40
16578 - #define XADC_REG_CONF1 0x41
16579 -diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
16580 -index edd13d2b4121..8dd0477e201c 100644
16581 ---- a/drivers/iio/common/st_sensors/st_sensors_core.c
16582 -+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
16583 -@@ -304,8 +304,6 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
16584 - struct st_sensors_platform_data *of_pdata;
16585 - int err = 0;
16586 -
16587 -- mutex_init(&sdata->tb.buf_lock);
16588 --
16589 - /* If OF/DT pdata exists, it will take precedence of anything else */
16590 - of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata);
16591 - if (of_pdata)
16592 -diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
16593 -index f07a2336f7dc..566f7d2df031 100644
16594 ---- a/drivers/iio/gyro/st_gyro_core.c
16595 -+++ b/drivers/iio/gyro/st_gyro_core.c
16596 -@@ -317,6 +317,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
16597 -
16598 - indio_dev->modes = INDIO_DIRECT_MODE;
16599 - indio_dev->info = &gyro_info;
16600 -+ mutex_init(&gdata->tb.buf_lock);
16601 -
16602 - st_sensors_power_enable(indio_dev);
16603 -
16604 -diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
16605 -index 3ecf79ed08ac..88f21bbe947c 100644
16606 ---- a/drivers/iio/light/hid-sensor-prox.c
16607 -+++ b/drivers/iio/light/hid-sensor-prox.c
16608 -@@ -43,8 +43,6 @@ struct prox_state {
16609 - static const struct iio_chan_spec prox_channels[] = {
16610 - {
16611 - .type = IIO_PROXIMITY,
16612 -- .modified = 1,
16613 -- .channel2 = IIO_NO_MOD,
16614 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
16615 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
16616 - BIT(IIO_CHAN_INFO_SCALE) |
16617 -diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
16618 -index 8ade473f99fe..2e56f812a644 100644
16619 ---- a/drivers/iio/magnetometer/st_magn_core.c
16620 -+++ b/drivers/iio/magnetometer/st_magn_core.c
16621 -@@ -369,6 +369,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
16622 -
16623 - indio_dev->modes = INDIO_DIRECT_MODE;
16624 - indio_dev->info = &magn_info;
16625 -+ mutex_init(&mdata->tb.buf_lock);
16626 -
16627 - st_sensors_power_enable(indio_dev);
16628 -
16629 -diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
16630 -index 1af314926ebd..476a7d03d2ce 100644
16631 ---- a/drivers/iio/pressure/hid-sensor-press.c
16632 -+++ b/drivers/iio/pressure/hid-sensor-press.c
16633 -@@ -47,8 +47,6 @@ struct press_state {
16634 - static const struct iio_chan_spec press_channels[] = {
16635 - {
16636 - .type = IIO_PRESSURE,
16637 -- .modified = 1,
16638 -- .channel2 = IIO_NO_MOD,
16639 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
16640 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
16641 - BIT(IIO_CHAN_INFO_SCALE) |
16642 -diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
16643 -index 97baf40d424b..e881fa6291e9 100644
16644 ---- a/drivers/iio/pressure/st_pressure_core.c
16645 -+++ b/drivers/iio/pressure/st_pressure_core.c
16646 -@@ -417,6 +417,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
16647 -
16648 - indio_dev->modes = INDIO_DIRECT_MODE;
16649 - indio_dev->info = &press_info;
16650 -+ mutex_init(&press_data->tb.buf_lock);
16651 -
16652 - st_sensors_power_enable(indio_dev);
16653 -
16654 -diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
16655 -index b85ddbc979e0..e5558b2660f2 100644
16656 ---- a/drivers/infiniband/core/iwpm_msg.c
16657 -+++ b/drivers/infiniband/core/iwpm_msg.c
16658 -@@ -33,7 +33,7 @@
16659 -
16660 - #include "iwpm_util.h"
16661 -
16662 --static const char iwpm_ulib_name[] = "iWarpPortMapperUser";
16663 -+static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser";
16664 - static int iwpm_ulib_version = 3;
16665 - static int iwpm_user_pid = IWPM_PID_UNDEFINED;
16666 - static atomic_t echo_nlmsg_seq;
16667 -diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
16668 -index 991dc6b20a58..79363b687195 100644
16669 ---- a/drivers/input/mouse/elantech.c
16670 -+++ b/drivers/input/mouse/elantech.c
16671 -@@ -315,7 +315,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev,
16672 - unsigned int x2, unsigned int y2)
16673 - {
16674 - elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
16675 -- elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
16676 -+ elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
16677 - }
16678 -
16679 - /*
16680 -diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
16681 -index 6d5a5c44453b..173e70dbf61b 100644
16682 ---- a/drivers/iommu/amd_iommu_v2.c
16683 -+++ b/drivers/iommu/amd_iommu_v2.c
16684 -@@ -266,6 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
16685 -
16686 - static void put_pasid_state_wait(struct pasid_state *pasid_state)
16687 - {
16688 -+ atomic_dec(&pasid_state->count);
16689 - wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
16690 - free_pasid_state(pasid_state);
16691 - }
16692 -diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
16693 -index a3adde6519f0..bd6252b01510 100644
16694 ---- a/drivers/iommu/arm-smmu.c
16695 -+++ b/drivers/iommu/arm-smmu.c
16696 -@@ -224,14 +224,7 @@
16697 - #define RESUME_TERMINATE (1 << 0)
16698 -
16699 - #define TTBCR2_SEP_SHIFT 15
16700 --#define TTBCR2_SEP_MASK 0x7
16701 --
16702 --#define TTBCR2_ADDR_32 0
16703 --#define TTBCR2_ADDR_36 1
16704 --#define TTBCR2_ADDR_40 2
16705 --#define TTBCR2_ADDR_42 3
16706 --#define TTBCR2_ADDR_44 4
16707 --#define TTBCR2_ADDR_48 5
16708 -+#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
16709 -
16710 - #define TTBRn_HI_ASID_SHIFT 16
16711 -
16712 -@@ -783,26 +776,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
16713 - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
16714 - if (smmu->version > ARM_SMMU_V1) {
16715 - reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
16716 -- switch (smmu->va_size) {
16717 -- case 32:
16718 -- reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
16719 -- break;
16720 -- case 36:
16721 -- reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
16722 -- break;
16723 -- case 40:
16724 -- reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
16725 -- break;
16726 -- case 42:
16727 -- reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
16728 -- break;
16729 -- case 44:
16730 -- reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
16731 -- break;
16732 -- case 48:
16733 -- reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
16734 -- break;
16735 -- }
16736 -+ reg |= TTBCR2_SEP_UPSTREAM;
16737 - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
16738 - }
16739 - } else {
16740 -diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
16741 -index 7dc93aa004c8..312ffd3d0017 100644
16742 ---- a/drivers/lguest/core.c
16743 -+++ b/drivers/lguest/core.c
16744 -@@ -173,7 +173,7 @@ static void unmap_switcher(void)
16745 - bool lguest_address_ok(const struct lguest *lg,
16746 - unsigned long addr, unsigned long len)
16747 - {
16748 -- return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
16749 -+ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
16750 - }
16751 -
16752 - /*
16753 -diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
16754 -index 6554d9148927..757f1ba34c4d 100644
16755 ---- a/drivers/md/dm-table.c
16756 -+++ b/drivers/md/dm-table.c
16757 -@@ -823,6 +823,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
16758 - }
16759 - EXPORT_SYMBOL(dm_consume_args);
16760 -
16761 -+static bool __table_type_request_based(unsigned table_type)
16762 -+{
16763 -+ return (table_type == DM_TYPE_REQUEST_BASED ||
16764 -+ table_type == DM_TYPE_MQ_REQUEST_BASED);
16765 -+}
16766 -+
16767 - static int dm_table_set_type(struct dm_table *t)
16768 - {
16769 - unsigned i;
16770 -@@ -855,8 +861,7 @@ static int dm_table_set_type(struct dm_table *t)
16771 - * Determine the type from the live device.
16772 - * Default to bio-based if device is new.
16773 - */
16774 -- if (live_md_type == DM_TYPE_REQUEST_BASED ||
16775 -- live_md_type == DM_TYPE_MQ_REQUEST_BASED)
16776 -+ if (__table_type_request_based(live_md_type))
16777 - request_based = 1;
16778 - else
16779 - bio_based = 1;
16780 -@@ -906,7 +911,7 @@ static int dm_table_set_type(struct dm_table *t)
16781 - }
16782 - t->type = DM_TYPE_MQ_REQUEST_BASED;
16783 -
16784 -- } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) {
16785 -+ } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
16786 - /* inherit live MD type */
16787 - t->type = live_md_type;
16788 -
16789 -@@ -928,10 +933,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
16790 -
16791 - bool dm_table_request_based(struct dm_table *t)
16792 - {
16793 -- unsigned table_type = dm_table_get_type(t);
16794 --
16795 -- return (table_type == DM_TYPE_REQUEST_BASED ||
16796 -- table_type == DM_TYPE_MQ_REQUEST_BASED);
16797 -+ return __table_type_request_based(dm_table_get_type(t));
16798 - }
16799 -
16800 - bool dm_table_mq_request_based(struct dm_table *t)
16801 -diff --git a/drivers/md/dm.c b/drivers/md/dm.c
16802 -index 8001fe9e3434..9b4e30a82e4a 100644
16803 ---- a/drivers/md/dm.c
16804 -+++ b/drivers/md/dm.c
16805 -@@ -1642,8 +1642,7 @@ static int dm_merge_bvec(struct request_queue *q,
16806 - struct mapped_device *md = q->queuedata;
16807 - struct dm_table *map = dm_get_live_table_fast(md);
16808 - struct dm_target *ti;
16809 -- sector_t max_sectors;
16810 -- int max_size = 0;
16811 -+ sector_t max_sectors, max_size = 0;
16812 -
16813 - if (unlikely(!map))
16814 - goto out;
16815 -@@ -1658,8 +1657,16 @@ static int dm_merge_bvec(struct request_queue *q,
16816 - max_sectors = min(max_io_len(bvm->bi_sector, ti),
16817 - (sector_t) queue_max_sectors(q));
16818 - max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
16819 -- if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
16820 -- max_size = 0;
16821 -+
16822 -+ /*
16823 -+ * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
16824 -+ * to the targets' merge function since it holds sectors not bytes).
16825 -+ * Just doing this as an interim fix for stable@ because the more
16826 -+ * comprehensive cleanup of switching to sector_t will impact every
16827 -+ * DM target that implements a ->merge hook.
16828 -+ */
16829 -+ if (max_size > INT_MAX)
16830 -+ max_size = INT_MAX;
16831 -
16832 - /*
16833 - * merge_bvec_fn() returns number of bytes
16834 -@@ -1667,7 +1674,7 @@ static int dm_merge_bvec(struct request_queue *q,
16835 - * max is precomputed maximal io size
16836 - */
16837 - if (max_size && ti->type->merge)
16838 -- max_size = ti->type->merge(ti, bvm, biovec, max_size);
16839 -+ max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
16840 - /*
16841 - * If the target doesn't support merge method and some of the devices
16842 - * provided their merge_bvec method (we know this by looking for the
16843 -diff --git a/drivers/md/md.c b/drivers/md/md.c
16844 -index e47d1dd046da..907534b7f40d 100644
16845 ---- a/drivers/md/md.c
16846 -+++ b/drivers/md/md.c
16847 -@@ -4138,12 +4138,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
16848 - if (!mddev->pers || !mddev->pers->sync_request)
16849 - return -EINVAL;
16850 -
16851 -- if (cmd_match(page, "frozen"))
16852 -- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
16853 -- else
16854 -- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
16855 -
16856 - if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
16857 -+ if (cmd_match(page, "frozen"))
16858 -+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
16859 -+ else
16860 -+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
16861 - flush_workqueue(md_misc_wq);
16862 - if (mddev->sync_thread) {
16863 - set_bit(MD_RECOVERY_INTR, &mddev->recovery);
16864 -@@ -4156,16 +4156,17 @@ action_store(struct mddev *mddev, const char *page, size_t len)
16865 - test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
16866 - return -EBUSY;
16867 - else if (cmd_match(page, "resync"))
16868 -- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
16869 -+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
16870 - else if (cmd_match(page, "recover")) {
16871 -+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
16872 - set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
16873 -- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
16874 - } else if (cmd_match(page, "reshape")) {
16875 - int err;
16876 - if (mddev->pers->start_reshape == NULL)
16877 - return -EINVAL;
16878 - err = mddev_lock(mddev);
16879 - if (!err) {
16880 -+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
16881 - err = mddev->pers->start_reshape(mddev);
16882 - mddev_unlock(mddev);
16883 - }
16884 -@@ -4177,6 +4178,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
16885 - set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
16886 - else if (!cmd_match(page, "repair"))
16887 - return -EINVAL;
16888 -+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
16889 - set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
16890 - set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
16891 - }
16892 -diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
16893 -index 3b5d7f704aa3..903391ce9353 100644
16894 ---- a/drivers/md/raid0.c
16895 -+++ b/drivers/md/raid0.c
16896 -@@ -517,6 +517,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
16897 - ? (sector & (chunk_sects-1))
16898 - : sector_div(sector, chunk_sects));
16899 -
16900 -+ /* Restore due to sector_div */
16901 -+ sector = bio->bi_iter.bi_sector;
16902 -+
16903 - if (sectors < bio_sectors(bio)) {
16904 - split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
16905 - bio_chain(split, bio);
16906 -@@ -524,7 +527,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
16907 - split = bio;
16908 - }
16909 -
16910 -- sector = bio->bi_iter.bi_sector;
16911 - zone = find_zone(mddev->private, &sector);
16912 - tmp_dev = map_sector(mddev, zone, sector, &sector);
16913 - split->bi_bdev = tmp_dev->bdev;
16914 -diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
16915 -index cd2f96b2c572..007ab861eca0 100644
16916 ---- a/drivers/md/raid5.c
16917 -+++ b/drivers/md/raid5.c
16918 -@@ -1933,7 +1933,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
16919 -
16920 - conf->slab_cache = sc;
16921 - conf->active_name = 1-conf->active_name;
16922 -- conf->pool_size = newsize;
16923 -+ if (!err)
16924 -+ conf->pool_size = newsize;
16925 - return err;
16926 - }
16927 -
16928 -diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
16929 -index ae498b53ee40..46e3840c7a37 100644
16930 ---- a/drivers/mfd/da9052-core.c
16931 -+++ b/drivers/mfd/da9052-core.c
16932 -@@ -433,6 +433,10 @@ EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
16933 - static const struct mfd_cell da9052_subdev_info[] = {
16934 - {
16935 - .name = "da9052-regulator",
16936 -+ .id = 0,
16937 -+ },
16938 -+ {
16939 -+ .name = "da9052-regulator",
16940 - .id = 1,
16941 - },
16942 - {
16943 -@@ -484,10 +488,6 @@ static const struct mfd_cell da9052_subdev_info[] = {
16944 - .id = 13,
16945 - },
16946 - {
16947 -- .name = "da9052-regulator",
16948 -- .id = 14,
16949 -- },
16950 -- {
16951 - .name = "da9052-onkey",
16952 - },
16953 - {
16954 -diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
16955 -index 03d7c7521d97..9a39e0b7e583 100644
16956 ---- a/drivers/mmc/host/atmel-mci.c
16957 -+++ b/drivers/mmc/host/atmel-mci.c
16958 -@@ -1304,7 +1304,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
16959 -
16960 - if (ios->clock) {
16961 - unsigned int clock_min = ~0U;
16962 -- u32 clkdiv;
16963 -+ int clkdiv;
16964 -
16965 - spin_lock_bh(&host->lock);
16966 - if (!host->mode_reg) {
16967 -@@ -1328,7 +1328,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
16968 - /* Calculate clock divider */
16969 - if (host->caps.has_odd_clk_div) {
16970 - clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
16971 -- if (clkdiv > 511) {
16972 -+ if (clkdiv < 0) {
16973 -+ dev_warn(&mmc->class_dev,
16974 -+ "clock %u too fast; using %lu\n",
16975 -+ clock_min, host->bus_hz / 2);
16976 -+ clkdiv = 0;
16977 -+ } else if (clkdiv > 511) {
16978 - dev_warn(&mmc->class_dev,
16979 - "clock %u too slow; using %lu\n",
16980 - clock_min, host->bus_hz / (511 + 2));
16981 -diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
16982 -index db2c05b6fe7f..c9eb78f10a0d 100644
16983 ---- a/drivers/mtd/ubi/block.c
16984 -+++ b/drivers/mtd/ubi/block.c
16985 -@@ -310,6 +310,8 @@ static void ubiblock_do_work(struct work_struct *work)
16986 - blk_rq_map_sg(req->q, req, pdu->usgl.sg);
16987 -
16988 - ret = ubiblock_read(pdu);
16989 -+ rq_flush_dcache_pages(req);
16990 -+
16991 - blk_mq_end_request(req, ret);
16992 - }
16993 -
16994 -diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
16995 -index 6262612dec45..7a3231d8b933 100644
16996 ---- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
16997 -+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
16998 -@@ -512,11 +512,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
16999 - msgbuf->rx_pktids,
17000 - msgbuf->ioctl_resp_pktid);
17001 - if (msgbuf->ioctl_resp_ret_len != 0) {
17002 -- if (!skb) {
17003 -- brcmf_err("Invalid packet id idx recv'd %d\n",
17004 -- msgbuf->ioctl_resp_pktid);
17005 -+ if (!skb)
17006 - return -EBADF;
17007 -- }
17008 -+
17009 - memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
17010 - len : msgbuf->ioctl_resp_ret_len);
17011 - }
17012 -@@ -875,10 +873,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
17013 - flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
17014 - skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
17015 - msgbuf->tx_pktids, idx);
17016 -- if (!skb) {
17017 -- brcmf_err("Invalid packet id idx recv'd %d\n", idx);
17018 -+ if (!skb)
17019 - return;
17020 -- }
17021 -
17022 - set_bit(flowid, msgbuf->txstatus_done_map);
17023 - commonring = msgbuf->flowrings[flowid];
17024 -@@ -1157,6 +1153,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
17025 -
17026 - skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
17027 - msgbuf->rx_pktids, idx);
17028 -+ if (!skb)
17029 -+ return;
17030 -
17031 - if (data_offset)
17032 - skb_pull(skb, data_offset);
17033 -diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
17034 -index 14e8fd661889..fd5a0bb1493f 100644
17035 ---- a/drivers/net/wireless/iwlwifi/mvm/d3.c
17036 -+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
17037 -@@ -1742,8 +1742,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
17038 - int i, j, n_matches, ret;
17039 -
17040 - fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
17041 -- if (!IS_ERR_OR_NULL(fw_status))
17042 -+ if (!IS_ERR_OR_NULL(fw_status)) {
17043 - reasons = le32_to_cpu(fw_status->wakeup_reasons);
17044 -+ kfree(fw_status);
17045 -+ }
17046 -
17047 - if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
17048 - wakeup.rfkill_release = true;
17049 -@@ -1860,15 +1862,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
17050 - /* get the BSS vif pointer again */
17051 - vif = iwl_mvm_get_bss_vif(mvm);
17052 - if (IS_ERR_OR_NULL(vif))
17053 -- goto out_unlock;
17054 -+ goto err;
17055 -
17056 - ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
17057 - if (ret)
17058 -- goto out_unlock;
17059 -+ goto err;
17060 -
17061 - if (d3_status != IWL_D3_STATUS_ALIVE) {
17062 - IWL_INFO(mvm, "Device was reset during suspend\n");
17063 -- goto out_unlock;
17064 -+ goto err;
17065 - }
17066 -
17067 - /* query SRAM first in case we want event logging */
17068 -@@ -1886,7 +1888,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
17069 - /* has unlocked the mutex, so skip that */
17070 - goto out;
17071 -
17072 -- out_unlock:
17073 -+err:
17074 -+ iwl_mvm_free_nd(mvm);
17075 - mutex_unlock(&mvm->mutex);
17076 -
17077 - out:
17078 -diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
17079 -index 69935aa5a1b3..cb72edb3d16a 100644
17080 ---- a/drivers/net/wireless/iwlwifi/pcie/trans.c
17081 -+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
17082 -@@ -5,8 +5,8 @@
17083 - *
17084 - * GPL LICENSE SUMMARY
17085 - *
17086 -- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
17087 -- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
17088 -+ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
17089 -+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
17090 - *
17091 - * This program is free software; you can redistribute it and/or modify
17092 - * it under the terms of version 2 of the GNU General Public License as
17093 -@@ -31,8 +31,8 @@
17094 - *
17095 - * BSD LICENSE
17096 - *
17097 -- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
17098 -- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
17099 -+ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
17100 -+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
17101 - * All rights reserved.
17102 - *
17103 - * Redistribution and use in source and binary forms, with or without
17104 -@@ -104,7 +104,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
17105 - static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
17106 - {
17107 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
17108 -- struct page *page;
17109 -+ struct page *page = NULL;
17110 - dma_addr_t phys;
17111 - u32 size;
17112 - u8 power;
17113 -@@ -131,6 +131,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
17114 - DMA_FROM_DEVICE);
17115 - if (dma_mapping_error(trans->dev, phys)) {
17116 - __free_pages(page, order);
17117 -+ page = NULL;
17118 - continue;
17119 - }
17120 - IWL_INFO(trans,
17121 -diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
17122 -index 8444313eabe2..8694dddcce9a 100644
17123 ---- a/drivers/net/wireless/rt2x00/rt2800usb.c
17124 -+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
17125 -@@ -1040,6 +1040,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
17126 - { USB_DEVICE(0x07d1, 0x3c17) },
17127 - { USB_DEVICE(0x2001, 0x3317) },
17128 - { USB_DEVICE(0x2001, 0x3c1b) },
17129 -+ { USB_DEVICE(0x2001, 0x3c25) },
17130 - /* Draytek */
17131 - { USB_DEVICE(0x07fa, 0x7712) },
17132 - /* DVICO */
17133 -diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
17134 -index 46ee956d0235..27cd6cabf6c5 100644
17135 ---- a/drivers/net/wireless/rtlwifi/usb.c
17136 -+++ b/drivers/net/wireless/rtlwifi/usb.c
17137 -@@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
17138 -
17139 - do {
17140 - status = usb_control_msg(udev, pipe, request, reqtype, value,
17141 -- index, pdata, len, 0); /*max. timeout*/
17142 -+ index, pdata, len, 1000);
17143 - if (status < 0) {
17144 - /* firmware download is checksumed, don't retry */
17145 - if ((value >= FW_8192C_START_ADDRESS &&
17146 -diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
17147 -index 13584e24736a..4d7d60e593b8 100644
17148 ---- a/drivers/power/reset/at91-reset.c
17149 -+++ b/drivers/power/reset/at91-reset.c
17150 -@@ -212,9 +212,9 @@ static int at91_reset_platform_probe(struct platform_device *pdev)
17151 - res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 );
17152 - at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start,
17153 - resource_size(res));
17154 -- if (IS_ERR(at91_ramc_base[idx])) {
17155 -+ if (!at91_ramc_base[idx]) {
17156 - dev_err(&pdev->dev, "Could not map ram controller address\n");
17157 -- return PTR_ERR(at91_ramc_base[idx]);
17158 -+ return -ENOMEM;
17159 - }
17160 - }
17161 -
17162 -diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
17163 -index 476171a768d6..8a029f9bc18c 100644
17164 ---- a/drivers/pwm/pwm-img.c
17165 -+++ b/drivers/pwm/pwm-img.c
17166 -@@ -16,6 +16,7 @@
17167 - #include <linux/mfd/syscon.h>
17168 - #include <linux/module.h>
17169 - #include <linux/of.h>
17170 -+#include <linux/of_device.h>
17171 - #include <linux/platform_device.h>
17172 - #include <linux/pwm.h>
17173 - #include <linux/regmap.h>
17174 -@@ -38,7 +39,22 @@
17175 - #define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
17176 - #define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
17177 -
17178 --#define MAX_TMBASE_STEPS 65536
17179 -+/*
17180 -+ * PWM period is specified with a timebase register,
17181 -+ * in number of step periods. The PWM duty cycle is also
17182 -+ * specified in step periods, in the [0, $timebase] range.
17183 -+ * In other words, the timebase imposes the duty cycle
17184 -+ * resolution. Therefore, let's constraint the timebase to
17185 -+ * a minimum value to allow a sane range of duty cycle values.
17186 -+ * Imposing a minimum timebase, will impose a maximum PWM frequency.
17187 -+ *
17188 -+ * The value chosen is completely arbitrary.
17189 -+ */
17190 -+#define MIN_TMBASE_STEPS 16
17191 -+
17192 -+struct img_pwm_soc_data {
17193 -+ u32 max_timebase;
17194 -+};
17195 -
17196 - struct img_pwm_chip {
17197 - struct device *dev;
17198 -@@ -47,6 +63,9 @@ struct img_pwm_chip {
17199 - struct clk *sys_clk;
17200 - void __iomem *base;
17201 - struct regmap *periph_regs;
17202 -+ int max_period_ns;
17203 -+ int min_period_ns;
17204 -+ const struct img_pwm_soc_data *data;
17205 - };
17206 -
17207 - static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
17208 -@@ -72,24 +91,31 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
17209 - u32 val, div, duty, timebase;
17210 - unsigned long mul, output_clk_hz, input_clk_hz;
17211 - struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
17212 -+ unsigned int max_timebase = pwm_chip->data->max_timebase;
17213 -+
17214 -+ if (period_ns < pwm_chip->min_period_ns ||
17215 -+ period_ns > pwm_chip->max_period_ns) {
17216 -+ dev_err(chip->dev, "configured period not in range\n");
17217 -+ return -ERANGE;
17218 -+ }
17219 -
17220 - input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
17221 - output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
17222 -
17223 - mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
17224 -- if (mul <= MAX_TMBASE_STEPS) {
17225 -+ if (mul <= max_timebase) {
17226 - div = PWM_CTRL_CFG_NO_SUB_DIV;
17227 - timebase = DIV_ROUND_UP(mul, 1);
17228 -- } else if (mul <= MAX_TMBASE_STEPS * 8) {
17229 -+ } else if (mul <= max_timebase * 8) {
17230 - div = PWM_CTRL_CFG_SUB_DIV0;
17231 - timebase = DIV_ROUND_UP(mul, 8);
17232 -- } else if (mul <= MAX_TMBASE_STEPS * 64) {
17233 -+ } else if (mul <= max_timebase * 64) {
17234 - div = PWM_CTRL_CFG_SUB_DIV1;
17235 - timebase = DIV_ROUND_UP(mul, 64);
17236 -- } else if (mul <= MAX_TMBASE_STEPS * 512) {
17237 -+ } else if (mul <= max_timebase * 512) {
17238 - div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
17239 - timebase = DIV_ROUND_UP(mul, 512);
17240 -- } else if (mul > MAX_TMBASE_STEPS * 512) {
17241 -+ } else if (mul > max_timebase * 512) {
17242 - dev_err(chip->dev,
17243 - "failed to configure timebase steps/divider value\n");
17244 - return -EINVAL;
17245 -@@ -143,11 +169,27 @@ static const struct pwm_ops img_pwm_ops = {
17246 - .owner = THIS_MODULE,
17247 - };
17248 -
17249 -+static const struct img_pwm_soc_data pistachio_pwm = {
17250 -+ .max_timebase = 255,
17251 -+};
17252 -+
17253 -+static const struct of_device_id img_pwm_of_match[] = {
17254 -+ {
17255 -+ .compatible = "img,pistachio-pwm",
17256 -+ .data = &pistachio_pwm,
17257 -+ },
17258 -+ { }
17259 -+};
17260 -+MODULE_DEVICE_TABLE(of, img_pwm_of_match);
17261 -+
17262 - static int img_pwm_probe(struct platform_device *pdev)
17263 - {
17264 - int ret;
17265 -+ u64 val;
17266 -+ unsigned long clk_rate;
17267 - struct resource *res;
17268 - struct img_pwm_chip *pwm;
17269 -+ const struct of_device_id *of_dev_id;
17270 -
17271 - pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
17272 - if (!pwm)
17273 -@@ -160,6 +202,11 @@ static int img_pwm_probe(struct platform_device *pdev)
17274 - if (IS_ERR(pwm->base))
17275 - return PTR_ERR(pwm->base);
17276 -
17277 -+ of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
17278 -+ if (!of_dev_id)
17279 -+ return -ENODEV;
17280 -+ pwm->data = of_dev_id->data;
17281 -+
17282 - pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
17283 - "img,cr-periph");
17284 - if (IS_ERR(pwm->periph_regs))
17285 -@@ -189,6 +236,17 @@ static int img_pwm_probe(struct platform_device *pdev)
17286 - goto disable_sysclk;
17287 - }
17288 -
17289 -+ clk_rate = clk_get_rate(pwm->pwm_clk);
17290 -+
17291 -+ /* The maximum input clock divider is 512 */
17292 -+ val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase;
17293 -+ do_div(val, clk_rate);
17294 -+ pwm->max_period_ns = val;
17295 -+
17296 -+ val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS;
17297 -+ do_div(val, clk_rate);
17298 -+ pwm->min_period_ns = val;
17299 -+
17300 - pwm->chip.dev = &pdev->dev;
17301 - pwm->chip.ops = &img_pwm_ops;
17302 - pwm->chip.base = -1;
17303 -@@ -228,12 +286,6 @@ static int img_pwm_remove(struct platform_device *pdev)
17304 - return pwmchip_remove(&pwm_chip->chip);
17305 - }
17306 -
17307 --static const struct of_device_id img_pwm_of_match[] = {
17308 -- { .compatible = "img,pistachio-pwm", },
17309 -- { }
17310 --};
17311 --MODULE_DEVICE_TABLE(of, img_pwm_of_match);
17312 --
17313 - static struct platform_driver img_pwm_driver = {
17314 - .driver = {
17315 - .name = "img-pwm",
17316 -diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
17317 -index 8a4df7a1f2ee..e628d4c2f2ae 100644
17318 ---- a/drivers/regulator/da9052-regulator.c
17319 -+++ b/drivers/regulator/da9052-regulator.c
17320 -@@ -394,6 +394,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
17321 -
17322 - static int da9052_regulator_probe(struct platform_device *pdev)
17323 - {
17324 -+ const struct mfd_cell *cell = mfd_get_cell(pdev);
17325 - struct regulator_config config = { };
17326 - struct da9052_regulator *regulator;
17327 - struct da9052 *da9052;
17328 -@@ -409,7 +410,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
17329 - regulator->da9052 = da9052;
17330 -
17331 - regulator->info = find_regulator_info(regulator->da9052->chip_id,
17332 -- pdev->id);
17333 -+ cell->id);
17334 - if (regulator->info == NULL) {
17335 - dev_err(&pdev->dev, "invalid regulator ID specified\n");
17336 - return -EINVAL;
17337 -@@ -419,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
17338 - config.driver_data = regulator;
17339 - config.regmap = da9052->regmap;
17340 - if (pdata && pdata->regulators) {
17341 -- config.init_data = pdata->regulators[pdev->id];
17342 -+ config.init_data = pdata->regulators[cell->id];
17343 - } else {
17344 - #ifdef CONFIG_OF
17345 - struct device_node *nproot = da9052->dev->of_node;
17346 -diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
17347 -index 3290a3ed5b31..a661d339adf7 100644
17348 ---- a/drivers/scsi/sd.c
17349 -+++ b/drivers/scsi/sd.c
17350 -@@ -1624,6 +1624,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
17351 - {
17352 - u64 start_lba = blk_rq_pos(scmd->request);
17353 - u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
17354 -+ u64 factor = scmd->device->sector_size / 512;
17355 - u64 bad_lba;
17356 - int info_valid;
17357 - /*
17358 -@@ -1645,16 +1646,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
17359 - if (scsi_bufflen(scmd) <= scmd->device->sector_size)
17360 - return 0;
17361 -
17362 -- if (scmd->device->sector_size < 512) {
17363 -- /* only legitimate sector_size here is 256 */
17364 -- start_lba <<= 1;
17365 -- end_lba <<= 1;
17366 -- } else {
17367 -- /* be careful ... don't want any overflows */
17368 -- unsigned int factor = scmd->device->sector_size / 512;
17369 -- do_div(start_lba, factor);
17370 -- do_div(end_lba, factor);
17371 -- }
17372 -+ /* be careful ... don't want any overflows */
17373 -+ do_div(start_lba, factor);
17374 -+ do_div(end_lba, factor);
17375 -
17376 - /* The bad lba was reported incorrectly, we have no idea where
17377 - * the error is.
17378 -@@ -2212,8 +2206,7 @@ got_data:
17379 - if (sector_size != 512 &&
17380 - sector_size != 1024 &&
17381 - sector_size != 2048 &&
17382 -- sector_size != 4096 &&
17383 -- sector_size != 256) {
17384 -+ sector_size != 4096) {
17385 - sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
17386 - sector_size);
17387 - /*
17388 -@@ -2268,8 +2261,6 @@ got_data:
17389 - sdkp->capacity <<= 2;
17390 - else if (sector_size == 1024)
17391 - sdkp->capacity <<= 1;
17392 -- else if (sector_size == 256)
17393 -- sdkp->capacity >>= 1;
17394 -
17395 - blk_queue_physical_block_size(sdp->request_queue,
17396 - sdkp->physical_block_size);
17397 -diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
17398 -index bf8c5c1e254e..75efaaeb0eca 100644
17399 ---- a/drivers/scsi/storvsc_drv.c
17400 -+++ b/drivers/scsi/storvsc_drv.c
17401 -@@ -1565,8 +1565,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
17402 - break;
17403 - default:
17404 - vm_srb->data_in = UNKNOWN_TYPE;
17405 -- vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
17406 -- SRB_FLAGS_DATA_OUT);
17407 -+ vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
17408 - break;
17409 - }
17410 -
17411 -diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
17412 -index d1ab996b3305..a21a51efaad0 100644
17413 ---- a/drivers/staging/gdm724x/gdm_mux.c
17414 -+++ b/drivers/staging/gdm724x/gdm_mux.c
17415 -@@ -158,7 +158,7 @@ static int up_to_host(struct mux_rx *r)
17416 - unsigned int start_flag;
17417 - unsigned int payload_size;
17418 - unsigned short packet_type;
17419 -- int dummy_cnt;
17420 -+ int total_len;
17421 - u32 packet_size_sum = r->offset;
17422 - int index;
17423 - int ret = TO_HOST_INVALID_PACKET;
17424 -@@ -176,10 +176,10 @@ static int up_to_host(struct mux_rx *r)
17425 - break;
17426 - }
17427 -
17428 -- dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
17429 -+ total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
17430 -
17431 - if (len - packet_size_sum <
17432 -- MUX_HEADER_SIZE + payload_size + dummy_cnt) {
17433 -+ total_len) {
17434 - pr_err("invalid payload : %d %d %04x\n",
17435 - payload_size, len, packet_type);
17436 - break;
17437 -@@ -202,7 +202,7 @@ static int up_to_host(struct mux_rx *r)
17438 - break;
17439 - }
17440 -
17441 -- packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
17442 -+ packet_size_sum += total_len;
17443 - if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
17444 - ret = r->callback(NULL,
17445 - 0,
17446 -@@ -361,7 +361,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
17447 - struct mux_pkt_header *mux_header;
17448 - struct mux_tx *t = NULL;
17449 - static u32 seq_num = 1;
17450 -- int dummy_cnt;
17451 - int total_len;
17452 - int ret;
17453 - unsigned long flags;
17454 -@@ -374,9 +373,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
17455 -
17456 - spin_lock_irqsave(&mux_dev->write_lock, flags);
17457 -
17458 -- dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4);
17459 --
17460 -- total_len = len + MUX_HEADER_SIZE + dummy_cnt;
17461 -+ total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
17462 -
17463 - t = alloc_mux_tx(total_len);
17464 - if (!t) {
17465 -@@ -392,7 +389,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
17466 - mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
17467 -
17468 - memcpy(t->buf+MUX_HEADER_SIZE, data, len);
17469 -- memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
17470 -+ memset(t->buf+MUX_HEADER_SIZE+len, 0, total_len - MUX_HEADER_SIZE -
17471 -+ len);
17472 -
17473 - t->len = total_len;
17474 - t->callback = cb;
17475 -diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
17476 -index 03b2a90b9ac0..992236f605d8 100644
17477 ---- a/drivers/staging/vt6655/device_main.c
17478 -+++ b/drivers/staging/vt6655/device_main.c
17479 -@@ -911,7 +911,11 @@ static int vnt_int_report_rate(struct vnt_private *priv,
17480 -
17481 - if (!(tsr1 & TSR1_TERR)) {
17482 - info->status.rates[0].idx = idx;
17483 -- info->flags |= IEEE80211_TX_STAT_ACK;
17484 -+
17485 -+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
17486 -+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
17487 -+ else
17488 -+ info->flags |= IEEE80211_TX_STAT_ACK;
17489 - }
17490 -
17491 - return 0;
17492 -@@ -936,9 +940,6 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
17493 - //Only the status of first TD in the chain is correct
17494 - if (pTD->m_td1TD1.byTCR & TCR_STP) {
17495 - if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
17496 --
17497 -- vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
17498 --
17499 - if (!(byTsr1 & TSR1_TERR)) {
17500 - if (byTsr0 != 0) {
17501 - pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
17502 -@@ -957,6 +958,9 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
17503 - (int)uIdx, byTsr1, byTsr0);
17504 - }
17505 - }
17506 -+
17507 -+ vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
17508 -+
17509 - device_free_tx_buf(pDevice, pTD);
17510 - pDevice->iTDUsed[uIdx]--;
17511 - }
17512 -@@ -988,10 +992,8 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
17513 - PCI_DMA_TODEVICE);
17514 - }
17515 -
17516 -- if (pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
17517 -+ if (skb)
17518 - ieee80211_tx_status_irqsafe(pDevice->hw, skb);
17519 -- else
17520 -- dev_kfree_skb_irq(skb);
17521 -
17522 - pTDInfo->skb_dma = 0;
17523 - pTDInfo->skb = NULL;
17524 -@@ -1201,14 +1203,6 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
17525 - if (dma_idx == TYPE_AC0DMA)
17526 - head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
17527 -
17528 -- priv->iTDUsed[dma_idx]++;
17529 --
17530 -- /* Take ownership */
17531 -- wmb();
17532 -- head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
17533 --
17534 -- /* get Next */
17535 -- wmb();
17536 - priv->apCurrTD[dma_idx] = head_td->next;
17537 -
17538 - spin_unlock_irqrestore(&priv->lock, flags);
17539 -@@ -1229,11 +1223,18 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
17540 -
17541 - head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
17542 -
17543 -+ /* Poll Transmit the adapter */
17544 -+ wmb();
17545 -+ head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
17546 -+ wmb(); /* second memory barrier */
17547 -+
17548 - if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
17549 - MACvTransmitAC0(priv->PortOffset);
17550 - else
17551 - MACvTransmit0(priv->PortOffset);
17552 -
17553 -+ priv->iTDUsed[dma_idx]++;
17554 -+
17555 - spin_unlock_irqrestore(&priv->lock, flags);
17556 -
17557 - return 0;
17558 -@@ -1413,9 +1414,16 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
17559 -
17560 - priv->current_aid = conf->aid;
17561 -
17562 -- if (changed & BSS_CHANGED_BSSID)
17563 -+ if (changed & BSS_CHANGED_BSSID) {
17564 -+ unsigned long flags;
17565 -+
17566 -+ spin_lock_irqsave(&priv->lock, flags);
17567 -+
17568 - MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid);
17569 -
17570 -+ spin_unlock_irqrestore(&priv->lock, flags);
17571 -+ }
17572 -+
17573 - if (changed & BSS_CHANGED_BASIC_RATES) {
17574 - priv->basic_rates = conf->basic_rates;
17575 -
17576 -diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
17577 -index 33baf26de4b5..ee9ce165dcde 100644
17578 ---- a/drivers/staging/vt6656/rxtx.c
17579 -+++ b/drivers/staging/vt6656/rxtx.c
17580 -@@ -805,10 +805,18 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
17581 - vnt_schedule_command(priv, WLAN_CMD_SETPOWER);
17582 - }
17583 -
17584 -- if (current_rate > RATE_11M)
17585 -- pkt_type = priv->packet_type;
17586 -- else
17587 -+ if (current_rate > RATE_11M) {
17588 -+ if (info->band == IEEE80211_BAND_5GHZ) {
17589 -+ pkt_type = PK_TYPE_11A;
17590 -+ } else {
17591 -+ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
17592 -+ pkt_type = PK_TYPE_11GB;
17593 -+ else
17594 -+ pkt_type = PK_TYPE_11GA;
17595 -+ }
17596 -+ } else {
17597 - pkt_type = PK_TYPE_11B;
17598 -+ }
17599 -
17600 - spin_lock_irqsave(&priv->lock, flags);
17601 -
17602 -diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
17603 -index f6c954c4635f..4073869d2090 100644
17604 ---- a/drivers/target/target_core_pscsi.c
17605 -+++ b/drivers/target/target_core_pscsi.c
17606 -@@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
17607 - " pdv_host_id: %d\n", pdv->pdv_host_id);
17608 - return -EINVAL;
17609 - }
17610 -+ pdv->pdv_lld_host = sh;
17611 - }
17612 - } else {
17613 - if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
17614 -@@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev)
17615 - if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
17616 - (phv->phv_lld_host != NULL))
17617 - scsi_host_put(phv->phv_lld_host);
17618 -+ else if (pdv->pdv_lld_host)
17619 -+ scsi_host_put(pdv->pdv_lld_host);
17620 -
17621 - if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
17622 - scsi_device_put(sd);
17623 -diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
17624 -index 1bd757dff8ee..820d3052b775 100644
17625 ---- a/drivers/target/target_core_pscsi.h
17626 -+++ b/drivers/target/target_core_pscsi.h
17627 -@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
17628 - int pdv_lun_id;
17629 - struct block_device *pdv_bd;
17630 - struct scsi_device *pdv_sd;
17631 -+ struct Scsi_Host *pdv_lld_host;
17632 - } ____cacheline_aligned;
17633 -
17634 - typedef enum phv_modes {
17635 -diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
17636 -index c2556cf5186b..01255fd65135 100644
17637 ---- a/drivers/thermal/armada_thermal.c
17638 -+++ b/drivers/thermal/armada_thermal.c
17639 -@@ -224,9 +224,9 @@ static const struct armada_thermal_data armada380_data = {
17640 - .is_valid_shift = 10,
17641 - .temp_shift = 0,
17642 - .temp_mask = 0x3ff,
17643 -- .coef_b = 1169498786UL,
17644 -- .coef_m = 2000000UL,
17645 -- .coef_div = 4289,
17646 -+ .coef_b = 2931108200UL,
17647 -+ .coef_m = 5000000UL,
17648 -+ .coef_div = 10502,
17649 - .inverted = true,
17650 - };
17651 -
17652 -diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
17653 -index 5bab1c684bb1..7a3d146a5f0e 100644
17654 ---- a/drivers/tty/hvc/hvc_xen.c
17655 -+++ b/drivers/tty/hvc/hvc_xen.c
17656 -@@ -289,7 +289,7 @@ static int xen_initial_domain_console_init(void)
17657 - return -ENOMEM;
17658 - }
17659 -
17660 -- info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
17661 -+ info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
17662 - info->vtermno = HVC_COOKIE;
17663 -
17664 - spin_lock(&xencons_lock);
17665 -diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
17666 -index c4343764cc5b..bce16e405d59 100644
17667 ---- a/drivers/tty/n_gsm.c
17668 -+++ b/drivers/tty/n_gsm.c
17669 -@@ -3170,7 +3170,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
17670 - return gsmtty_modem_update(dlci, encode);
17671 - }
17672 -
17673 --static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
17674 -+static void gsmtty_cleanup(struct tty_struct *tty)
17675 - {
17676 - struct gsm_dlci *dlci = tty->driver_data;
17677 - struct gsm_mux *gsm = dlci->gsm;
17678 -@@ -3178,7 +3178,6 @@ static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
17679 - dlci_put(dlci);
17680 - dlci_put(gsm->dlci[0]);
17681 - mux_put(gsm);
17682 -- driver->ttys[tty->index] = NULL;
17683 - }
17684 -
17685 - /* Virtual ttys for the demux */
17686 -@@ -3199,7 +3198,7 @@ static const struct tty_operations gsmtty_ops = {
17687 - .tiocmget = gsmtty_tiocmget,
17688 - .tiocmset = gsmtty_tiocmset,
17689 - .break_ctl = gsmtty_break_ctl,
17690 -- .remove = gsmtty_remove,
17691 -+ .cleanup = gsmtty_cleanup,
17692 - };
17693 -
17694 -
17695 -diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
17696 -index 644ddb841d9f..bbc4ce66c2c1 100644
17697 ---- a/drivers/tty/n_hdlc.c
17698 -+++ b/drivers/tty/n_hdlc.c
17699 -@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
17700 - add_wait_queue(&tty->read_wait, &wait);
17701 -
17702 - for (;;) {
17703 -- if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
17704 -+ if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
17705 - ret = -EIO;
17706 - break;
17707 - }
17708 -@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
17709 - /* set bits for operations that won't block */
17710 - if (n_hdlc->rx_buf_list.head)
17711 - mask |= POLLIN | POLLRDNORM; /* readable */
17712 -- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
17713 -+ if (test_bit(TTY_OTHER_DONE, &tty->flags))
17714 - mask |= POLLHUP;
17715 - if (tty_hung_up_p(filp))
17716 - mask |= POLLHUP;
17717 -diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
17718 -index cf6e0f2e1331..cc57a3a6b02b 100644
17719 ---- a/drivers/tty/n_tty.c
17720 -+++ b/drivers/tty/n_tty.c
17721 -@@ -1949,6 +1949,18 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
17722 - return ldata->commit_head - ldata->read_tail >= amt;
17723 - }
17724 -
17725 -+static inline int check_other_done(struct tty_struct *tty)
17726 -+{
17727 -+ int done = test_bit(TTY_OTHER_DONE, &tty->flags);
17728 -+ if (done) {
17729 -+ /* paired with cmpxchg() in check_other_closed(); ensures
17730 -+ * read buffer head index is not stale
17731 -+ */
17732 -+ smp_mb__after_atomic();
17733 -+ }
17734 -+ return done;
17735 -+}
17736 -+
17737 - /**
17738 - * copy_from_read_buf - copy read data directly
17739 - * @tty: terminal device
17740 -@@ -2167,7 +2179,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
17741 - struct n_tty_data *ldata = tty->disc_data;
17742 - unsigned char __user *b = buf;
17743 - DEFINE_WAIT_FUNC(wait, woken_wake_function);
17744 -- int c;
17745 -+ int c, done;
17746 - int minimum, time;
17747 - ssize_t retval = 0;
17748 - long timeout;
17749 -@@ -2235,8 +2247,10 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
17750 - ((minimum - (b - buf)) >= 1))
17751 - ldata->minimum_to_wake = (minimum - (b - buf));
17752 -
17753 -+ done = check_other_done(tty);
17754 -+
17755 - if (!input_available_p(tty, 0)) {
17756 -- if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
17757 -+ if (done) {
17758 - retval = -EIO;
17759 - break;
17760 - }
17761 -@@ -2443,12 +2457,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
17762 -
17763 - poll_wait(file, &tty->read_wait, wait);
17764 - poll_wait(file, &tty->write_wait, wait);
17765 -+ if (check_other_done(tty))
17766 -+ mask |= POLLHUP;
17767 - if (input_available_p(tty, 1))
17768 - mask |= POLLIN | POLLRDNORM;
17769 - if (tty->packet && tty->link->ctrl_status)
17770 - mask |= POLLPRI | POLLIN | POLLRDNORM;
17771 -- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
17772 -- mask |= POLLHUP;
17773 - if (tty_hung_up_p(file))
17774 - mask |= POLLHUP;
17775 - if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
17776 -diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
17777 -index e72ee629cead..4d5e8409769c 100644
17778 ---- a/drivers/tty/pty.c
17779 -+++ b/drivers/tty/pty.c
17780 -@@ -53,9 +53,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
17781 - /* Review - krefs on tty_link ?? */
17782 - if (!tty->link)
17783 - return;
17784 -- tty_flush_to_ldisc(tty->link);
17785 - set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
17786 -- wake_up_interruptible(&tty->link->read_wait);
17787 -+ tty_flip_buffer_push(tty->link->port);
17788 - wake_up_interruptible(&tty->link->write_wait);
17789 - if (tty->driver->subtype == PTY_TYPE_MASTER) {
17790 - set_bit(TTY_OTHER_CLOSED, &tty->flags);
17791 -@@ -243,7 +242,9 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
17792 - goto out;
17793 -
17794 - clear_bit(TTY_IO_ERROR, &tty->flags);
17795 -+ /* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
17796 - clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
17797 -+ clear_bit(TTY_OTHER_DONE, &tty->link->flags);
17798 - set_bit(TTY_THROTTLED, &tty->flags);
17799 - return 0;
17800 -
17801 -diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
17802 -index 75661641f5fe..2f78b77f0f81 100644
17803 ---- a/drivers/tty/tty_buffer.c
17804 -+++ b/drivers/tty/tty_buffer.c
17805 -@@ -37,6 +37,28 @@
17806 -
17807 - #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
17808 -
17809 -+/*
17810 -+ * If all tty flip buffers have been processed by flush_to_ldisc() or
17811 -+ * dropped by tty_buffer_flush(), check if the linked pty has been closed.
17812 -+ * If so, wake the reader/poll to process
17813 -+ */
17814 -+static inline void check_other_closed(struct tty_struct *tty)
17815 -+{
17816 -+ unsigned long flags, old;
17817 -+
17818 -+ /* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
17819 -+ for (flags = ACCESS_ONCE(tty->flags);
17820 -+ test_bit(TTY_OTHER_CLOSED, &flags);
17821 -+ ) {
17822 -+ old = flags;
17823 -+ __set_bit(TTY_OTHER_DONE, &flags);
17824 -+ flags = cmpxchg(&tty->flags, old, flags);
17825 -+ if (old == flags) {
17826 -+ wake_up_interruptible(&tty->read_wait);
17827 -+ break;
17828 -+ }
17829 -+ }
17830 -+}
17831 -
17832 - /**
17833 - * tty_buffer_lock_exclusive - gain exclusive access to buffer
17834 -@@ -229,6 +251,8 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
17835 - if (ld && ld->ops->flush_buffer)
17836 - ld->ops->flush_buffer(tty);
17837 -
17838 -+ check_other_closed(tty);
17839 -+
17840 - atomic_dec(&buf->priority);
17841 - mutex_unlock(&buf->lock);
17842 - }
17843 -@@ -471,8 +495,10 @@ static void flush_to_ldisc(struct work_struct *work)
17844 - smp_rmb();
17845 - count = head->commit - head->read;
17846 - if (!count) {
17847 -- if (next == NULL)
17848 -+ if (next == NULL) {
17849 -+ check_other_closed(tty);
17850 - break;
17851 -+ }
17852 - buf->head = next;
17853 - tty_buffer_free(port, head);
17854 - continue;
17855 -@@ -489,19 +515,6 @@ static void flush_to_ldisc(struct work_struct *work)
17856 - }
17857 -
17858 - /**
17859 -- * tty_flush_to_ldisc
17860 -- * @tty: tty to push
17861 -- *
17862 -- * Push the terminal flip buffers to the line discipline.
17863 -- *
17864 -- * Must not be called from IRQ context.
17865 -- */
17866 --void tty_flush_to_ldisc(struct tty_struct *tty)
17867 --{
17868 -- flush_work(&tty->port->buf.work);
17869 --}
17870 --
17871 --/**
17872 - * tty_flip_buffer_push - terminal
17873 - * @port: tty port to push
17874 - *
17875 -diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
17876 -index c42765b3a060..0495c94a23d7 100644
17877 ---- a/drivers/usb/gadget/configfs.c
17878 -+++ b/drivers/usb/gadget/configfs.c
17879 -@@ -1295,6 +1295,7 @@ static void purge_configs_funcs(struct gadget_info *gi)
17880 - }
17881 - }
17882 - c->next_interface_id = 0;
17883 -+ memset(c->interface, 0, sizeof(c->interface));
17884 - c->superspeed = 0;
17885 - c->highspeed = 0;
17886 - c->fullspeed = 0;
17887 -diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
17888 -index eeedde8c435a..6994c99e58a6 100644
17889 ---- a/drivers/usb/host/xhci-ring.c
17890 -+++ b/drivers/usb/host/xhci-ring.c
17891 -@@ -2026,8 +2026,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
17892 - break;
17893 - case COMP_DEV_ERR:
17894 - case COMP_STALL:
17895 -+ frame->status = -EPROTO;
17896 -+ skip_td = true;
17897 -+ break;
17898 - case COMP_TX_ERR:
17899 - frame->status = -EPROTO;
17900 -+ if (event_trb != td->last_trb)
17901 -+ return 0;
17902 - skip_td = true;
17903 - break;
17904 - case COMP_STOP:
17905 -@@ -2640,7 +2645,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
17906 - xhci_halt(xhci);
17907 - hw_died:
17908 - spin_unlock(&xhci->lock);
17909 -- return -ESHUTDOWN;
17910 -+ return IRQ_HANDLED;
17911 - }
17912 -
17913 - /*
17914 -diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
17915 -index 8e421b89632d..ea75e8ccd3c1 100644
17916 ---- a/drivers/usb/host/xhci.h
17917 -+++ b/drivers/usb/host/xhci.h
17918 -@@ -1267,7 +1267,7 @@ union xhci_trb {
17919 - * since the command ring is 64-byte aligned.
17920 - * It must also be greater than 16.
17921 - */
17922 --#define TRBS_PER_SEGMENT 64
17923 -+#define TRBS_PER_SEGMENT 256
17924 - /* Allow two commands + a link TRB, along with any reserved command TRBs */
17925 - #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
17926 - #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
17927 -diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
17928 -index 84ce2d74894c..9031750e7404 100644
17929 ---- a/drivers/usb/serial/cp210x.c
17930 -+++ b/drivers/usb/serial/cp210x.c
17931 -@@ -127,6 +127,7 @@ static const struct usb_device_id id_table[] = {
17932 - { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
17933 - { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
17934 - { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
17935 -+ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
17936 - { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
17937 - { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
17938 - { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
17939 -diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
17940 -index 829604d11f3f..f5257af33ecf 100644
17941 ---- a/drivers/usb/serial/pl2303.c
17942 -+++ b/drivers/usb/serial/pl2303.c
17943 -@@ -61,7 +61,6 @@ static const struct usb_device_id id_table[] = {
17944 - { USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) },
17945 - { USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
17946 - { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
17947 -- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) },
17948 - { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1),
17949 - .driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
17950 - { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65),
17951 -diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
17952 -index 71fd9da1d6e7..e3b7af8adfb7 100644
17953 ---- a/drivers/usb/serial/pl2303.h
17954 -+++ b/drivers/usb/serial/pl2303.h
17955 -@@ -62,10 +62,6 @@
17956 - #define ALCATEL_VENDOR_ID 0x11f7
17957 - #define ALCATEL_PRODUCT_ID 0x02df
17958 -
17959 --/* Samsung I330 phone cradle */
17960 --#define SAMSUNG_VENDOR_ID 0x04e8
17961 --#define SAMSUNG_PRODUCT_ID 0x8001
17962 --
17963 - #define SIEMENS_VENDOR_ID 0x11f5
17964 - #define SIEMENS_PRODUCT_ID_SX1 0x0001
17965 - #define SIEMENS_PRODUCT_ID_X65 0x0003
17966 -diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
17967 -index bf2bd40e5f2a..60afb39eb73c 100644
17968 ---- a/drivers/usb/serial/visor.c
17969 -+++ b/drivers/usb/serial/visor.c
17970 -@@ -95,7 +95,7 @@ static const struct usb_device_id id_table[] = {
17971 - .driver_info = (kernel_ulong_t)&palm_os_4_probe },
17972 - { USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID),
17973 - .driver_info = (kernel_ulong_t)&palm_os_4_probe },
17974 -- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID),
17975 -+ { USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff),
17976 - .driver_info = (kernel_ulong_t)&palm_os_4_probe },
17977 - { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID),
17978 - .driver_info = (kernel_ulong_t)&palm_os_4_probe },
17979 -diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
17980 -index d684b4b8108f..caf188800c67 100644
17981 ---- a/drivers/usb/storage/unusual_devs.h
17982 -+++ b/drivers/usb/storage/unusual_devs.h
17983 -@@ -766,6 +766,13 @@ UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
17984 - USB_SC_DEVICE, USB_PR_DEVICE, NULL,
17985 - US_FL_GO_SLOW ),
17986 -
17987 -+/* Reported by Christian Schaller <cschalle@××××××.com> */
17988 -+UNUSUAL_DEV( 0x059f, 0x0651, 0x0000, 0x0000,
17989 -+ "LaCie",
17990 -+ "External HDD",
17991 -+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
17992 -+ US_FL_NO_WP_DETECT ),
17993 -+
17994 - /* Submitted by Joel Bourquard <numlock@××××××××.ch>
17995 - * Some versions of this device need the SubClass and Protocol overrides
17996 - * while others don't.
17997 -diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
17998 -index 2b8553bd8715..38387950490e 100644
17999 ---- a/drivers/xen/events/events_base.c
18000 -+++ b/drivers/xen/events/events_base.c
18001 -@@ -957,7 +957,7 @@ unsigned xen_evtchn_nr_channels(void)
18002 - }
18003 - EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
18004 -
18005 --int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
18006 -+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
18007 - {
18008 - struct evtchn_bind_virq bind_virq;
18009 - int evtchn, irq, ret;
18010 -@@ -971,8 +971,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
18011 - if (irq < 0)
18012 - goto out;
18013 -
18014 -- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
18015 -- handle_percpu_irq, "virq");
18016 -+ if (percpu)
18017 -+ irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
18018 -+ handle_percpu_irq, "virq");
18019 -+ else
18020 -+ irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
18021 -+ handle_edge_irq, "virq");
18022 -
18023 - bind_virq.virq = virq;
18024 - bind_virq.vcpu = cpu;
18025 -@@ -1062,7 +1066,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
18026 - {
18027 - int irq, retval;
18028 -
18029 -- irq = bind_virq_to_irq(virq, cpu);
18030 -+ irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
18031 - if (irq < 0)
18032 - return irq;
18033 - retval = request_irq(irq, handler, irqflags, devname, dev_id);
18034 -diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
18035 -index d925f55e4857..8081aba116a7 100644
18036 ---- a/fs/binfmt_elf.c
18037 -+++ b/fs/binfmt_elf.c
18038 -@@ -928,7 +928,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
18039 - total_size = total_mapping_size(elf_phdata,
18040 - loc->elf_ex.e_phnum);
18041 - if (!total_size) {
18042 -- error = -EINVAL;
18043 -+ retval = -EINVAL;
18044 - goto out_free_dentry;
18045 - }
18046 - }
18047 -diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
18048 -index 0a795c969c78..8b33da6ec3dd 100644
18049 ---- a/fs/btrfs/extent-tree.c
18050 -+++ b/fs/btrfs/extent-tree.c
18051 -@@ -8548,7 +8548,9 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
18052 - out:
18053 - if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
18054 - alloc_flags = update_block_group_flags(root, cache->flags);
18055 -+ lock_chunks(root->fs_info->chunk_root);
18056 - check_system_chunk(trans, root, alloc_flags);
18057 -+ unlock_chunks(root->fs_info->chunk_root);
18058 - }
18059 -
18060 - btrfs_end_transaction(trans, root);
18061 -diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
18062 -index 8222f6f74147..44a7e0398d97 100644
18063 ---- a/fs/btrfs/volumes.c
18064 -+++ b/fs/btrfs/volumes.c
18065 -@@ -4626,6 +4626,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
18066 - {
18067 - u64 chunk_offset;
18068 -
18069 -+ ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
18070 - chunk_offset = find_next_chunk(extent_root->fs_info);
18071 - return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
18072 - }
18073 -diff --git a/fs/dcache.c b/fs/dcache.c
18074 -index c71e3732e53b..922f23ef6041 100644
18075 ---- a/fs/dcache.c
18076 -+++ b/fs/dcache.c
18077 -@@ -1205,13 +1205,13 @@ ascend:
18078 - /* might go back up the wrong parent if we have had a rename. */
18079 - if (need_seqretry(&rename_lock, seq))
18080 - goto rename_retry;
18081 -- next = child->d_child.next;
18082 -- while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
18083 -+ /* go into the first sibling still alive */
18084 -+ do {
18085 -+ next = child->d_child.next;
18086 - if (next == &this_parent->d_subdirs)
18087 - goto ascend;
18088 - child = list_entry(next, struct dentry, d_child);
18089 -- next = next->next;
18090 -- }
18091 -+ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
18092 - rcu_read_unlock();
18093 - goto resume;
18094 - }
18095 -diff --git a/fs/exec.c b/fs/exec.c
18096 -index 00400cf522dc..120244523647 100644
18097 ---- a/fs/exec.c
18098 -+++ b/fs/exec.c
18099 -@@ -659,6 +659,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
18100 - if (stack_base > STACK_SIZE_MAX)
18101 - stack_base = STACK_SIZE_MAX;
18102 -
18103 -+ /* Add space for stack randomization. */
18104 -+ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
18105 -+
18106 - /* Make sure we didn't let the argument array grow too large. */
18107 - if (vma->vm_end - vma->vm_start > stack_base)
18108 - return -ENOMEM;
18109 -diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
18110 -index 3445035c7e01..d41843181818 100644
18111 ---- a/fs/ext4/ext4_jbd2.c
18112 -+++ b/fs/ext4/ext4_jbd2.c
18113 -@@ -87,6 +87,12 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
18114 - ext4_put_nojournal(handle);
18115 - return 0;
18116 - }
18117 -+
18118 -+ if (!handle->h_transaction) {
18119 -+ err = jbd2_journal_stop(handle);
18120 -+ return handle->h_err ? handle->h_err : err;
18121 -+ }
18122 -+
18123 - sb = handle->h_transaction->t_journal->j_private;
18124 - err = handle->h_err;
18125 - rc = jbd2_journal_stop(handle);
18126 -diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
18127 -index 16f6365f65e7..ea4ee1732143 100644
18128 ---- a/fs/ext4/extents.c
18129 -+++ b/fs/ext4/extents.c
18130 -@@ -377,7 +377,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
18131 - ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
18132 - ext4_lblk_t last = lblock + len - 1;
18133 -
18134 -- if (lblock > last)
18135 -+ if (len == 0 || lblock > last)
18136 - return 0;
18137 - return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
18138 - }
18139 -diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
18140 -index 852cc521f327..1f252b4e0f51 100644
18141 ---- a/fs/ext4/inode.c
18142 -+++ b/fs/ext4/inode.c
18143 -@@ -4233,7 +4233,7 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
18144 - int inode_size = EXT4_INODE_SIZE(sb);
18145 -
18146 - oi.orig_ino = orig_ino;
18147 -- ino = orig_ino & ~(inodes_per_block - 1);
18148 -+ ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
18149 - for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
18150 - if (ino == orig_ino)
18151 - continue;
18152 -diff --git a/fs/fhandle.c b/fs/fhandle.c
18153 -index 999ff5c3cab0..d59712dfa3e7 100644
18154 ---- a/fs/fhandle.c
18155 -+++ b/fs/fhandle.c
18156 -@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
18157 - goto out_err;
18158 - }
18159 - /* copy the full handle */
18160 -- if (copy_from_user(handle, ufh,
18161 -- sizeof(struct file_handle) +
18162 -+ *handle = f_handle;
18163 -+ if (copy_from_user(&handle->f_handle,
18164 -+ &ufh->f_handle,
18165 - f_handle.handle_bytes)) {
18166 - retval = -EFAULT;
18167 - goto out_handle;
18168 -diff --git a/fs/fs_pin.c b/fs/fs_pin.c
18169 -index b06c98796afb..611b5408f6ec 100644
18170 ---- a/fs/fs_pin.c
18171 -+++ b/fs/fs_pin.c
18172 -@@ -9,8 +9,8 @@ static DEFINE_SPINLOCK(pin_lock);
18173 - void pin_remove(struct fs_pin *pin)
18174 - {
18175 - spin_lock(&pin_lock);
18176 -- hlist_del(&pin->m_list);
18177 -- hlist_del(&pin->s_list);
18178 -+ hlist_del_init(&pin->m_list);
18179 -+ hlist_del_init(&pin->s_list);
18180 - spin_unlock(&pin_lock);
18181 - spin_lock_irq(&pin->wait.lock);
18182 - pin->done = 1;
18183 -diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
18184 -index b5128c6e63ad..a9079d035ae5 100644
18185 ---- a/fs/jbd2/recovery.c
18186 -+++ b/fs/jbd2/recovery.c
18187 -@@ -842,15 +842,23 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
18188 - {
18189 - jbd2_journal_revoke_header_t *header;
18190 - int offset, max;
18191 -+ int csum_size = 0;
18192 -+ __u32 rcount;
18193 - int record_len = 4;
18194 -
18195 - header = (jbd2_journal_revoke_header_t *) bh->b_data;
18196 - offset = sizeof(jbd2_journal_revoke_header_t);
18197 -- max = be32_to_cpu(header->r_count);
18198 -+ rcount = be32_to_cpu(header->r_count);
18199 -
18200 - if (!jbd2_revoke_block_csum_verify(journal, header))
18201 - return -EINVAL;
18202 -
18203 -+ if (jbd2_journal_has_csum_v2or3(journal))
18204 -+ csum_size = sizeof(struct jbd2_journal_revoke_tail);
18205 -+ if (rcount > journal->j_blocksize - csum_size)
18206 -+ return -EINVAL;
18207 -+ max = rcount;
18208 -+
18209 - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
18210 - record_len = 8;
18211 -
18212 -diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
18213 -index c6cbaef2bda1..14214da80eb8 100644
18214 ---- a/fs/jbd2/revoke.c
18215 -+++ b/fs/jbd2/revoke.c
18216 -@@ -577,7 +577,7 @@ static void write_one_revoke_record(journal_t *journal,
18217 - {
18218 - int csum_size = 0;
18219 - struct buffer_head *descriptor;
18220 -- int offset;
18221 -+ int sz, offset;
18222 - journal_header_t *header;
18223 -
18224 - /* If we are already aborting, this all becomes a noop. We
18225 -@@ -594,9 +594,14 @@ static void write_one_revoke_record(journal_t *journal,
18226 - if (jbd2_journal_has_csum_v2or3(journal))
18227 - csum_size = sizeof(struct jbd2_journal_revoke_tail);
18228 -
18229 -+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
18230 -+ sz = 8;
18231 -+ else
18232 -+ sz = 4;
18233 -+
18234 - /* Make sure we have a descriptor with space left for the record */
18235 - if (descriptor) {
18236 -- if (offset >= journal->j_blocksize - csum_size) {
18237 -+ if (offset + sz > journal->j_blocksize - csum_size) {
18238 - flush_descriptor(journal, descriptor, offset, write_op);
18239 - descriptor = NULL;
18240 - }
18241 -@@ -619,16 +624,13 @@ static void write_one_revoke_record(journal_t *journal,
18242 - *descriptorp = descriptor;
18243 - }
18244 -
18245 -- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
18246 -+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
18247 - * ((__be64 *)(&descriptor->b_data[offset])) =
18248 - cpu_to_be64(record->blocknr);
18249 -- offset += 8;
18250 --
18251 -- } else {
18252 -+ else
18253 - * ((__be32 *)(&descriptor->b_data[offset])) =
18254 - cpu_to_be32(record->blocknr);
18255 -- offset += 4;
18256 -- }
18257 -+ offset += sz;
18258 -
18259 - *offsetp = offset;
18260 - }
18261 -diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
18262 -index 5f09370c90a8..ff2f2e6ad311 100644
18263 ---- a/fs/jbd2/transaction.c
18264 -+++ b/fs/jbd2/transaction.c
18265 -@@ -551,7 +551,6 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
18266 - int result;
18267 - int wanted;
18268 -
18269 -- WARN_ON(!transaction);
18270 - if (is_handle_aborted(handle))
18271 - return -EROFS;
18272 - journal = transaction->t_journal;
18273 -@@ -627,7 +626,6 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
18274 - tid_t tid;
18275 - int need_to_start, ret;
18276 -
18277 -- WARN_ON(!transaction);
18278 - /* If we've had an abort of any type, don't even think about
18279 - * actually doing the restart! */
18280 - if (is_handle_aborted(handle))
18281 -@@ -785,7 +783,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
18282 - int need_copy = 0;
18283 - unsigned long start_lock, time_lock;
18284 -
18285 -- WARN_ON(!transaction);
18286 - if (is_handle_aborted(handle))
18287 - return -EROFS;
18288 - journal = transaction->t_journal;
18289 -@@ -1051,7 +1048,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
18290 - int err;
18291 -
18292 - jbd_debug(5, "journal_head %p\n", jh);
18293 -- WARN_ON(!transaction);
18294 - err = -EROFS;
18295 - if (is_handle_aborted(handle))
18296 - goto out;
18297 -@@ -1266,7 +1262,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
18298 - struct journal_head *jh;
18299 - int ret = 0;
18300 -
18301 -- WARN_ON(!transaction);
18302 - if (is_handle_aborted(handle))
18303 - return -EROFS;
18304 - journal = transaction->t_journal;
18305 -@@ -1397,7 +1392,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
18306 - int err = 0;
18307 - int was_modified = 0;
18308 -
18309 -- WARN_ON(!transaction);
18310 - if (is_handle_aborted(handle))
18311 - return -EROFS;
18312 - journal = transaction->t_journal;
18313 -@@ -1530,8 +1524,22 @@ int jbd2_journal_stop(handle_t *handle)
18314 - tid_t tid;
18315 - pid_t pid;
18316 -
18317 -- if (!transaction)
18318 -- goto free_and_exit;
18319 -+ if (!transaction) {
18320 -+ /*
18321 -+ * Handle is already detached from the transaction so
18322 -+ * there is nothing to do other than decrease a refcount,
18323 -+ * or free the handle if refcount drops to zero
18324 -+ */
18325 -+ if (--handle->h_ref > 0) {
18326 -+ jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
18327 -+ handle->h_ref);
18328 -+ return err;
18329 -+ } else {
18330 -+ if (handle->h_rsv_handle)
18331 -+ jbd2_free_handle(handle->h_rsv_handle);
18332 -+ goto free_and_exit;
18333 -+ }
18334 -+ }
18335 - journal = transaction->t_journal;
18336 -
18337 - J_ASSERT(journal_current_handle() == handle);
18338 -@@ -2373,7 +2381,6 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
18339 - transaction_t *transaction = handle->h_transaction;
18340 - journal_t *journal;
18341 -
18342 -- WARN_ON(!transaction);
18343 - if (is_handle_aborted(handle))
18344 - return -EROFS;
18345 - journal = transaction->t_journal;
18346 -diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
18347 -index 6acc9648f986..345b35fd329d 100644
18348 ---- a/fs/kernfs/dir.c
18349 -+++ b/fs/kernfs/dir.c
18350 -@@ -518,7 +518,14 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
18351 - if (!kn)
18352 - goto err_out1;
18353 -
18354 -- ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
18355 -+ /*
18356 -+ * If the ino of the sysfs entry created for a kmem cache gets
18357 -+ * allocated from an ida layer, which is accounted to the memcg that
18358 -+ * owns the cache, the memcg will get pinned forever. So do not account
18359 -+ * ino ida allocations.
18360 -+ */
18361 -+ ret = ida_simple_get(&root->ino_ida, 1, 0,
18362 -+ GFP_KERNEL | __GFP_NOACCOUNT);
18363 - if (ret < 0)
18364 - goto err_out2;
18365 - kn->ino = ret;
18366 -diff --git a/fs/namespace.c b/fs/namespace.c
18367 -index 38ed1e1bed41..13b0f7bfc096 100644
18368 ---- a/fs/namespace.c
18369 -+++ b/fs/namespace.c
18370 -@@ -1709,8 +1709,11 @@ struct vfsmount *collect_mounts(struct path *path)
18371 - {
18372 - struct mount *tree;
18373 - namespace_lock();
18374 -- tree = copy_tree(real_mount(path->mnt), path->dentry,
18375 -- CL_COPY_ALL | CL_PRIVATE);
18376 -+ if (!check_mnt(real_mount(path->mnt)))
18377 -+ tree = ERR_PTR(-EINVAL);
18378 -+ else
18379 -+ tree = copy_tree(real_mount(path->mnt), path->dentry,
18380 -+ CL_COPY_ALL | CL_PRIVATE);
18381 - namespace_unlock();
18382 - if (IS_ERR(tree))
18383 - return ERR_CAST(tree);
18384 -diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
18385 -index 03d647bf195d..cdefaa331a07 100644
18386 ---- a/fs/nfsd/blocklayout.c
18387 -+++ b/fs/nfsd/blocklayout.c
18388 -@@ -181,6 +181,17 @@ nfsd4_block_proc_layoutcommit(struct inode *inode,
18389 - }
18390 -
18391 - const struct nfsd4_layout_ops bl_layout_ops = {
18392 -+ /*
18393 -+ * Pretend that we send notification to the client. This is a blatant
18394 -+ * lie to force recent Linux clients to cache our device IDs.
18395 -+ * We rarely ever change the device ID, so the harm of leaking deviceids
18396 -+ * for a while isn't too bad. Unfortunately RFC5661 is a complete mess
18397 -+ * in this regard, but I filed errata 4119 for this a while ago, and
18398 -+ * hopefully the Linux client will eventually start caching deviceids
18399 -+ * without this again.
18400 -+ */
18401 -+ .notify_types =
18402 -+ NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
18403 - .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo,
18404 - .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
18405 - .proc_layoutget = nfsd4_block_proc_layoutget,
18406 -diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
18407 -index ee1cccdb083a..b4541ede7cb8 100644
18408 ---- a/fs/nfsd/nfs4state.c
18409 -+++ b/fs/nfsd/nfs4state.c
18410 -@@ -4386,10 +4386,17 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
18411 - return nfserr_old_stateid;
18412 - }
18413 -
18414 -+static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
18415 -+{
18416 -+ if (ols->st_stateowner->so_is_open_owner &&
18417 -+ !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
18418 -+ return nfserr_bad_stateid;
18419 -+ return nfs_ok;
18420 -+}
18421 -+
18422 - static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
18423 - {
18424 - struct nfs4_stid *s;
18425 -- struct nfs4_ol_stateid *ols;
18426 - __be32 status = nfserr_bad_stateid;
18427 -
18428 - if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
18429 -@@ -4419,13 +4426,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
18430 - break;
18431 - case NFS4_OPEN_STID:
18432 - case NFS4_LOCK_STID:
18433 -- ols = openlockstateid(s);
18434 -- if (ols->st_stateowner->so_is_open_owner
18435 -- && !(openowner(ols->st_stateowner)->oo_flags
18436 -- & NFS4_OO_CONFIRMED))
18437 -- status = nfserr_bad_stateid;
18438 -- else
18439 -- status = nfs_ok;
18440 -+ status = nfsd4_check_openowner_confirmed(openlockstateid(s));
18441 - break;
18442 - default:
18443 - printk("unknown stateid type %x\n", s->sc_type);
18444 -@@ -4517,8 +4518,8 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
18445 - status = nfs4_check_fh(current_fh, stp);
18446 - if (status)
18447 - goto out;
18448 -- if (stp->st_stateowner->so_is_open_owner
18449 -- && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
18450 -+ status = nfsd4_check_openowner_confirmed(stp);
18451 -+ if (status)
18452 - goto out;
18453 - status = nfs4_check_openmode(stp, flags);
18454 - if (status)
18455 -diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
18456 -index 138321b0c6c2..454111a3308e 100644
18457 ---- a/fs/omfs/inode.c
18458 -+++ b/fs/omfs/inode.c
18459 -@@ -306,7 +306,8 @@ static const struct super_operations omfs_sops = {
18460 - */
18461 - static int omfs_get_imap(struct super_block *sb)
18462 - {
18463 -- unsigned int bitmap_size, count, array_size;
18464 -+ unsigned int bitmap_size, array_size;
18465 -+ int count;
18466 - struct omfs_sb_info *sbi = OMFS_SB(sb);
18467 - struct buffer_head *bh;
18468 - unsigned long **ptr;
18469 -@@ -359,7 +360,7 @@ nomem:
18470 - }
18471 -
18472 - enum {
18473 -- Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask
18474 -+ Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err
18475 - };
18476 -
18477 - static const match_table_t tokens = {
18478 -@@ -368,6 +369,7 @@ static const match_table_t tokens = {
18479 - {Opt_umask, "umask=%o"},
18480 - {Opt_dmask, "dmask=%o"},
18481 - {Opt_fmask, "fmask=%o"},
18482 -+ {Opt_err, NULL},
18483 - };
18484 -
18485 - static int parse_options(char *options, struct omfs_sb_info *sbi)
18486 -diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
18487 -index 24f640441bd9..84d693d37428 100644
18488 ---- a/fs/overlayfs/copy_up.c
18489 -+++ b/fs/overlayfs/copy_up.c
18490 -@@ -299,6 +299,9 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
18491 - struct cred *override_cred;
18492 - char *link = NULL;
18493 -
18494 -+ if (WARN_ON(!workdir))
18495 -+ return -EROFS;
18496 -+
18497 - ovl_path_upper(parent, &parentpath);
18498 - upperdir = parentpath.dentry;
18499 -
18500 -diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
18501 -index d139405d2bfa..692ceda3bc21 100644
18502 ---- a/fs/overlayfs/dir.c
18503 -+++ b/fs/overlayfs/dir.c
18504 -@@ -222,6 +222,9 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
18505 - struct kstat stat;
18506 - int err;
18507 -
18508 -+ if (WARN_ON(!workdir))
18509 -+ return ERR_PTR(-EROFS);
18510 -+
18511 - err = ovl_lock_rename_workdir(workdir, upperdir);
18512 - if (err)
18513 - goto out;
18514 -@@ -322,6 +325,9 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
18515 - struct dentry *newdentry;
18516 - int err;
18517 -
18518 -+ if (WARN_ON(!workdir))
18519 -+ return -EROFS;
18520 -+
18521 - err = ovl_lock_rename_workdir(workdir, upperdir);
18522 - if (err)
18523 - goto out;
18524 -@@ -506,11 +512,28 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
18525 - struct dentry *opaquedir = NULL;
18526 - int err;
18527 -
18528 -- if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
18529 -- opaquedir = ovl_check_empty_and_clear(dentry);
18530 -- err = PTR_ERR(opaquedir);
18531 -- if (IS_ERR(opaquedir))
18532 -- goto out;
18533 -+ if (WARN_ON(!workdir))
18534 -+ return -EROFS;
18535 -+
18536 -+ if (is_dir) {
18537 -+ if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
18538 -+ opaquedir = ovl_check_empty_and_clear(dentry);
18539 -+ err = PTR_ERR(opaquedir);
18540 -+ if (IS_ERR(opaquedir))
18541 -+ goto out;
18542 -+ } else {
18543 -+ LIST_HEAD(list);
18544 -+
18545 -+ /*
18546 -+ * When removing an empty opaque directory, then it
18547 -+ * makes no sense to replace it with an exact replica of
18548 -+ * itself. But emptiness still needs to be checked.
18549 -+ */
18550 -+ err = ovl_check_empty_dir(dentry, &list);
18551 -+ ovl_cache_free(&list);
18552 -+ if (err)
18553 -+ goto out;
18554 -+ }
18555 - }
18556 -
18557 - err = ovl_lock_rename_workdir(workdir, upperdir);
18558 -diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
18559 -index 5f0d1993e6e3..bf8537c7f455 100644
18560 ---- a/fs/overlayfs/super.c
18561 -+++ b/fs/overlayfs/super.c
18562 -@@ -529,7 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
18563 - {
18564 - struct ovl_fs *ufs = sb->s_fs_info;
18565 -
18566 -- if (!(*flags & MS_RDONLY) && !ufs->upper_mnt)
18567 -+ if (!(*flags & MS_RDONLY) && (!ufs->upper_mnt || !ufs->workdir))
18568 - return -EROFS;
18569 -
18570 - return 0;
18571 -@@ -925,9 +925,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
18572 - ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
18573 - err = PTR_ERR(ufs->workdir);
18574 - if (IS_ERR(ufs->workdir)) {
18575 -- pr_err("overlayfs: failed to create directory %s/%s\n",
18576 -- ufs->config.workdir, OVL_WORKDIR_NAME);
18577 -- goto out_put_upper_mnt;
18578 -+ pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n",
18579 -+ ufs->config.workdir, OVL_WORKDIR_NAME, -err);
18580 -+ sb->s_flags |= MS_RDONLY;
18581 -+ ufs->workdir = NULL;
18582 - }
18583 - }
18584 -
18585 -@@ -997,7 +998,6 @@ out_put_lower_mnt:
18586 - kfree(ufs->lower_mnt);
18587 - out_put_workdir:
18588 - dput(ufs->workdir);
18589 --out_put_upper_mnt:
18590 - mntput(ufs->upper_mnt);
18591 - out_put_lowerpath:
18592 - for (i = 0; i < numlower; i++)
18593 -diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
18594 -index 15105dbc9e28..0166e7e829a7 100644
18595 ---- a/fs/xfs/libxfs/xfs_attr_leaf.c
18596 -+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
18597 -@@ -498,8 +498,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
18598 - * After the last attribute is removed revert to original inode format,
18599 - * making all literal area available to the data fork once more.
18600 - */
18601 --STATIC void
18602 --xfs_attr_fork_reset(
18603 -+void
18604 -+xfs_attr_fork_remove(
18605 - struct xfs_inode *ip,
18606 - struct xfs_trans *tp)
18607 - {
18608 -@@ -565,7 +565,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
18609 - (mp->m_flags & XFS_MOUNT_ATTR2) &&
18610 - (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
18611 - !(args->op_flags & XFS_DA_OP_ADDNAME)) {
18612 -- xfs_attr_fork_reset(dp, args->trans);
18613 -+ xfs_attr_fork_remove(dp, args->trans);
18614 - } else {
18615 - xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
18616 - dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
18617 -@@ -828,7 +828,7 @@ xfs_attr3_leaf_to_shortform(
18618 - if (forkoff == -1) {
18619 - ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
18620 - ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
18621 -- xfs_attr_fork_reset(dp, args->trans);
18622 -+ xfs_attr_fork_remove(dp, args->trans);
18623 - goto out;
18624 - }
18625 -
18626 -diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
18627 -index e2929da7c3ba..4f3a60aa93d4 100644
18628 ---- a/fs/xfs/libxfs/xfs_attr_leaf.h
18629 -+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
18630 -@@ -53,7 +53,7 @@ int xfs_attr_shortform_remove(struct xfs_da_args *args);
18631 - int xfs_attr_shortform_list(struct xfs_attr_list_context *context);
18632 - int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
18633 - int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
18634 --
18635 -+void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
18636 -
18637 - /*
18638 - * Internal routines when attribute fork size == XFS_LBSIZE(mp).
18639 -diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
18640 -index 83af4c149635..487c8374a1e0 100644
18641 ---- a/fs/xfs/xfs_attr_inactive.c
18642 -+++ b/fs/xfs/xfs_attr_inactive.c
18643 -@@ -379,23 +379,31 @@ xfs_attr3_root_inactive(
18644 - return error;
18645 - }
18646 -
18647 -+/*
18648 -+ * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
18649 -+ * removes both the on-disk and in-memory inode fork. Note that this also has to
18650 -+ * handle the condition of inodes without attributes but with an attribute fork
18651 -+ * configured, so we can't use xfs_inode_hasattr() here.
18652 -+ *
18653 -+ * The in-memory attribute fork is removed even on error.
18654 -+ */
18655 - int
18656 --xfs_attr_inactive(xfs_inode_t *dp)
18657 -+xfs_attr_inactive(
18658 -+ struct xfs_inode *dp)
18659 - {
18660 -- xfs_trans_t *trans;
18661 -- xfs_mount_t *mp;
18662 -- int error;
18663 -+ struct xfs_trans *trans;
18664 -+ struct xfs_mount *mp;
18665 -+ int cancel_flags = 0;
18666 -+ int lock_mode = XFS_ILOCK_SHARED;
18667 -+ int error = 0;
18668 -
18669 - mp = dp->i_mount;
18670 - ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
18671 -
18672 -- xfs_ilock(dp, XFS_ILOCK_SHARED);
18673 -- if (!xfs_inode_hasattr(dp) ||
18674 -- dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
18675 -- xfs_iunlock(dp, XFS_ILOCK_SHARED);
18676 -- return 0;
18677 -- }
18678 -- xfs_iunlock(dp, XFS_ILOCK_SHARED);
18679 -+ xfs_ilock(dp, lock_mode);
18680 -+ if (!XFS_IFORK_Q(dp))
18681 -+ goto out_destroy_fork;
18682 -+ xfs_iunlock(dp, lock_mode);
18683 -
18684 - /*
18685 - * Start our first transaction of the day.
18686 -@@ -407,13 +415,18 @@ xfs_attr_inactive(xfs_inode_t *dp)
18687 - * the inode in every transaction to let it float upward through
18688 - * the log.
18689 - */
18690 -+ lock_mode = 0;
18691 - trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
18692 - error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
18693 -- if (error) {
18694 -- xfs_trans_cancel(trans, 0);
18695 -- return error;
18696 -- }
18697 -- xfs_ilock(dp, XFS_ILOCK_EXCL);
18698 -+ if (error)
18699 -+ goto out_cancel;
18700 -+
18701 -+ lock_mode = XFS_ILOCK_EXCL;
18702 -+ cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT;
18703 -+ xfs_ilock(dp, lock_mode);
18704 -+
18705 -+ if (!XFS_IFORK_Q(dp))
18706 -+ goto out_cancel;
18707 -
18708 - /*
18709 - * No need to make quota reservations here. We expect to release some
18710 -@@ -421,29 +434,31 @@ xfs_attr_inactive(xfs_inode_t *dp)
18711 - */
18712 - xfs_trans_ijoin(trans, dp, 0);
18713 -
18714 -- /*
18715 -- * Decide on what work routines to call based on the inode size.
18716 -- */
18717 -- if (!xfs_inode_hasattr(dp) ||
18718 -- dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
18719 -- error = 0;
18720 -- goto out;
18721 -+ /* invalidate and truncate the attribute fork extents */
18722 -+ if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
18723 -+ error = xfs_attr3_root_inactive(&trans, dp);
18724 -+ if (error)
18725 -+ goto out_cancel;
18726 -+
18727 -+ error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
18728 -+ if (error)
18729 -+ goto out_cancel;
18730 - }
18731 -- error = xfs_attr3_root_inactive(&trans, dp);
18732 -- if (error)
18733 -- goto out;
18734 -
18735 -- error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
18736 -- if (error)
18737 -- goto out;
18738 -+ /* Reset the attribute fork - this also destroys the in-core fork */
18739 -+ xfs_attr_fork_remove(dp, trans);
18740 -
18741 - error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
18742 -- xfs_iunlock(dp, XFS_ILOCK_EXCL);
18743 --
18744 -+ xfs_iunlock(dp, lock_mode);
18745 - return error;
18746 -
18747 --out:
18748 -- xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
18749 -- xfs_iunlock(dp, XFS_ILOCK_EXCL);
18750 -+out_cancel:
18751 -+ xfs_trans_cancel(trans, cancel_flags);
18752 -+out_destroy_fork:
18753 -+ /* kill the in-core attr fork before we drop the inode lock */
18754 -+ if (dp->i_afp)
18755 -+ xfs_idestroy_fork(dp, XFS_ATTR_FORK);
18756 -+ if (lock_mode)
18757 -+ xfs_iunlock(dp, lock_mode);
18758 - return error;
18759 - }
18760 -diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
18761 -index a2e1cb8a568b..f3ba637a8ece 100644
18762 ---- a/fs/xfs/xfs_file.c
18763 -+++ b/fs/xfs/xfs_file.c
18764 -@@ -125,7 +125,7 @@ xfs_iozero(
18765 - status = 0;
18766 - } while (count);
18767 -
18768 -- return (-status);
18769 -+ return status;
18770 - }
18771 -
18772 - int
18773 -diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
18774 -index 6163767aa856..b1edda7890f4 100644
18775 ---- a/fs/xfs/xfs_inode.c
18776 -+++ b/fs/xfs/xfs_inode.c
18777 -@@ -1889,21 +1889,17 @@ xfs_inactive(
18778 - /*
18779 - * If there are attributes associated with the file then blow them away
18780 - * now. The code calls a routine that recursively deconstructs the
18781 -- * attribute fork. We need to just commit the current transaction
18782 -- * because we can't use it for xfs_attr_inactive().
18783 -+ * attribute fork. If also blows away the in-core attribute fork.
18784 - */
18785 -- if (ip->i_d.di_anextents > 0) {
18786 -- ASSERT(ip->i_d.di_forkoff != 0);
18787 --
18788 -+ if (XFS_IFORK_Q(ip)) {
18789 - error = xfs_attr_inactive(ip);
18790 - if (error)
18791 - return;
18792 - }
18793 -
18794 -- if (ip->i_afp)
18795 -- xfs_idestroy_fork(ip, XFS_ATTR_FORK);
18796 --
18797 -+ ASSERT(!ip->i_afp);
18798 - ASSERT(ip->i_d.di_anextents == 0);
18799 -+ ASSERT(ip->i_d.di_forkoff == 0);
18800 -
18801 - /*
18802 - * Free the inode.
18803 -diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
18804 -index 2dd405c9be78..45c39a37f924 100644
18805 ---- a/include/drm/drm_pciids.h
18806 -+++ b/include/drm/drm_pciids.h
18807 -@@ -186,6 +186,7 @@
18808 - {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
18809 - {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
18810 - {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
18811 -+ {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
18812 - {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
18813 - {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
18814 - {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
18815 -diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
18816 -index 9dc4e0384bfb..3886b3bffd7f 100644
18817 ---- a/include/linux/fs_pin.h
18818 -+++ b/include/linux/fs_pin.h
18819 -@@ -13,6 +13,8 @@ struct vfsmount;
18820 - static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
18821 - {
18822 - init_waitqueue_head(&p->wait);
18823 -+ INIT_HLIST_NODE(&p->s_list);
18824 -+ INIT_HLIST_NODE(&p->m_list);
18825 - p->kill = kill;
18826 - }
18827 -
18828 -diff --git a/include/linux/gfp.h b/include/linux/gfp.h
18829 -index 51bd1e72a917..eb6fafe66bec 100644
18830 ---- a/include/linux/gfp.h
18831 -+++ b/include/linux/gfp.h
18832 -@@ -30,6 +30,7 @@ struct vm_area_struct;
18833 - #define ___GFP_HARDWALL 0x20000u
18834 - #define ___GFP_THISNODE 0x40000u
18835 - #define ___GFP_RECLAIMABLE 0x80000u
18836 -+#define ___GFP_NOACCOUNT 0x100000u
18837 - #define ___GFP_NOTRACK 0x200000u
18838 - #define ___GFP_NO_KSWAPD 0x400000u
18839 - #define ___GFP_OTHER_NODE 0x800000u
18840 -@@ -85,6 +86,7 @@ struct vm_area_struct;
18841 - #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
18842 - #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
18843 - #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
18844 -+#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
18845 - #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
18846 -
18847 - #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
18848 -diff --git a/include/linux/ktime.h b/include/linux/ktime.h
18849 -index 5fc3d1083071..2b6a204bd8d4 100644
18850 ---- a/include/linux/ktime.h
18851 -+++ b/include/linux/ktime.h
18852 -@@ -166,19 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
18853 - }
18854 -
18855 - #if BITS_PER_LONG < 64
18856 --extern u64 __ktime_divns(const ktime_t kt, s64 div);
18857 --static inline u64 ktime_divns(const ktime_t kt, s64 div)
18858 -+extern s64 __ktime_divns(const ktime_t kt, s64 div);
18859 -+static inline s64 ktime_divns(const ktime_t kt, s64 div)
18860 - {
18861 -+ /*
18862 -+ * Negative divisors could cause an inf loop,
18863 -+ * so bug out here.
18864 -+ */
18865 -+ BUG_ON(div < 0);
18866 - if (__builtin_constant_p(div) && !(div >> 32)) {
18867 -- u64 ns = kt.tv64;
18868 -- do_div(ns, div);
18869 -- return ns;
18870 -+ s64 ns = kt.tv64;
18871 -+ u64 tmp = ns < 0 ? -ns : ns;
18872 -+
18873 -+ do_div(tmp, div);
18874 -+ return ns < 0 ? -tmp : tmp;
18875 - } else {
18876 - return __ktime_divns(kt, div);
18877 - }
18878 - }
18879 - #else /* BITS_PER_LONG < 64 */
18880 --# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
18881 -+static inline s64 ktime_divns(const ktime_t kt, s64 div)
18882 -+{
18883 -+ /*
18884 -+ * 32-bit implementation cannot handle negative divisors,
18885 -+ * so catch them on 64bit as well.
18886 -+ */
18887 -+ WARN_ON(div < 0);
18888 -+ return kt.tv64 / div;
18889 -+}
18890 - #endif
18891 -
18892 - static inline s64 ktime_to_us(const ktime_t kt)
18893 -diff --git a/include/linux/libata.h b/include/linux/libata.h
18894 -index 6b08cc106c21..f8994b4b122c 100644
18895 ---- a/include/linux/libata.h
18896 -+++ b/include/linux/libata.h
18897 -@@ -205,6 +205,7 @@ enum {
18898 - ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
18899 - ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
18900 - ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
18901 -+ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
18902 -
18903 - /* struct ata_port flags */
18904 - ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
18905 -@@ -310,6 +311,12 @@ enum {
18906 - */
18907 - ATA_TMOUT_PMP_SRST_WAIT = 5000,
18908 -
18909 -+ /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
18910 -+ * be a spurious PHY event, so ignore the first PHY event that
18911 -+ * occurs within 10s after the policy change.
18912 -+ */
18913 -+ ATA_TMOUT_SPURIOUS_PHY = 10000,
18914 -+
18915 - /* ATA bus states */
18916 - BUS_UNKNOWN = 0,
18917 - BUS_DMA = 1,
18918 -@@ -789,6 +796,8 @@ struct ata_link {
18919 - struct ata_eh_context eh_context;
18920 -
18921 - struct ata_device device[ATA_MAX_DEVICES];
18922 -+
18923 -+ unsigned long last_lpm_change; /* when last LPM change happened */
18924 - };
18925 - #define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag)
18926 - #define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0])
18927 -@@ -1202,6 +1211,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
18928 - extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
18929 - extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
18930 - extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
18931 -+extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
18932 -
18933 - extern int ata_cable_40wire(struct ata_port *ap);
18934 - extern int ata_cable_80wire(struct ata_port *ap);
18935 -diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
18936 -index 72dff5fb0d0c..6c8918114804 100644
18937 ---- a/include/linux/memcontrol.h
18938 -+++ b/include/linux/memcontrol.h
18939 -@@ -463,6 +463,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
18940 - if (!memcg_kmem_enabled())
18941 - return true;
18942 -
18943 -+ if (gfp & __GFP_NOACCOUNT)
18944 -+ return true;
18945 - /*
18946 - * __GFP_NOFAIL allocations will move on even if charging is not
18947 - * possible. Therefore we don't even try, and have this allocation
18948 -@@ -522,6 +524,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
18949 - {
18950 - if (!memcg_kmem_enabled())
18951 - return cachep;
18952 -+ if (gfp & __GFP_NOACCOUNT)
18953 -+ return cachep;
18954 - if (gfp & __GFP_NOFAIL)
18955 - return cachep;
18956 - if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
18957 -diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
18958 -index 6341f5be6e24..a30b172df6e1 100644
18959 ---- a/include/linux/sched/rt.h
18960 -+++ b/include/linux/sched/rt.h
18961 -@@ -18,7 +18,7 @@ static inline int rt_task(struct task_struct *p)
18962 - #ifdef CONFIG_RT_MUTEXES
18963 - extern int rt_mutex_getprio(struct task_struct *p);
18964 - extern void rt_mutex_setprio(struct task_struct *p, int prio);
18965 --extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
18966 -+extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
18967 - extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
18968 - extern void rt_mutex_adjust_pi(struct task_struct *p);
18969 - static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
18970 -@@ -31,9 +31,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
18971 - return p->normal_prio;
18972 - }
18973 -
18974 --static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
18975 -+static inline int rt_mutex_get_effective_prio(struct task_struct *task,
18976 -+ int newprio)
18977 - {
18978 -- return 0;
18979 -+ return newprio;
18980 - }
18981 -
18982 - static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
18983 -diff --git a/include/linux/tty.h b/include/linux/tty.h
18984 -index 358a337af598..790752ac074a 100644
18985 ---- a/include/linux/tty.h
18986 -+++ b/include/linux/tty.h
18987 -@@ -339,6 +339,7 @@ struct tty_file_private {
18988 - #define TTY_EXCLUSIVE 3 /* Exclusive open mode */
18989 - #define TTY_DEBUG 4 /* Debugging */
18990 - #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
18991 -+#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
18992 - #define TTY_LDISC_OPEN 11 /* Line discipline is open */
18993 - #define TTY_PTY_LOCK 16 /* pty private */
18994 - #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
18995 -@@ -462,7 +463,6 @@ extern int tty_hung_up_p(struct file *filp);
18996 - extern void do_SAK(struct tty_struct *tty);
18997 - extern void __do_SAK(struct tty_struct *tty);
18998 - extern void no_tty(void);
18999 --extern void tty_flush_to_ldisc(struct tty_struct *tty);
19000 - extern void tty_buffer_free_all(struct tty_port *port);
19001 - extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
19002 - extern void tty_buffer_init(struct tty_port *port);
19003 -diff --git a/include/xen/events.h b/include/xen/events.h
19004 -index 5321cd9636e6..7d95fdf9cf3e 100644
19005 ---- a/include/xen/events.h
19006 -+++ b/include/xen/events.h
19007 -@@ -17,7 +17,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
19008 - irq_handler_t handler,
19009 - unsigned long irqflags, const char *devname,
19010 - void *dev_id);
19011 --int bind_virq_to_irq(unsigned int virq, unsigned int cpu);
19012 -+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
19013 - int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
19014 - irq_handler_t handler,
19015 - unsigned long irqflags, const char *devname,
19016 -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
19017 -index 6357265a31ad..ce9108c059fb 100644
19018 ---- a/kernel/locking/rtmutex.c
19019 -+++ b/kernel/locking/rtmutex.c
19020 -@@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
19021 - }
19022 -
19023 - /*
19024 -- * Called by sched_setscheduler() to check whether the priority change
19025 -- * is overruled by a possible priority boosting.
19026 -+ * Called by sched_setscheduler() to get the priority which will be
19027 -+ * effective after the change.
19028 - */
19029 --int rt_mutex_check_prio(struct task_struct *task, int newprio)
19030 -+int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
19031 - {
19032 - if (!task_has_pi_waiters(task))
19033 -- return 0;
19034 -+ return newprio;
19035 -
19036 -- return task_top_pi_waiter(task)->task->prio <= newprio;
19037 -+ if (task_top_pi_waiter(task)->task->prio <= newprio)
19038 -+ return task_top_pi_waiter(task)->task->prio;
19039 -+ return newprio;
19040 - }
19041 -
19042 - /*
19043 -diff --git a/kernel/module.c b/kernel/module.c
19044 -index ec53f594e9c9..538794ce3cc7 100644
19045 ---- a/kernel/module.c
19046 -+++ b/kernel/module.c
19047 -@@ -3366,6 +3366,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
19048 - module_bug_cleanup(mod);
19049 - mutex_unlock(&module_mutex);
19050 -
19051 -+ blocking_notifier_call_chain(&module_notify_list,
19052 -+ MODULE_STATE_GOING, mod);
19053 -+
19054 - /* we can't deallocate the module until we clear memory protection */
19055 - unset_module_init_ro_nx(mod);
19056 - unset_module_core_ro_nx(mod);
19057 -diff --git a/kernel/sched/core.c b/kernel/sched/core.c
19058 -index 3d5f6f6d14c2..f4da2cbbfd7f 100644
19059 ---- a/kernel/sched/core.c
19060 -+++ b/kernel/sched/core.c
19061 -@@ -3295,15 +3295,18 @@ static void __setscheduler_params(struct task_struct *p,
19062 -
19063 - /* Actually do priority change: must hold pi & rq lock. */
19064 - static void __setscheduler(struct rq *rq, struct task_struct *p,
19065 -- const struct sched_attr *attr)
19066 -+ const struct sched_attr *attr, bool keep_boost)
19067 - {
19068 - __setscheduler_params(p, attr);
19069 -
19070 - /*
19071 -- * If we get here, there was no pi waiters boosting the
19072 -- * task. It is safe to use the normal prio.
19073 -+ * Keep a potential priority boosting if called from
19074 -+ * sched_setscheduler().
19075 - */
19076 -- p->prio = normal_prio(p);
19077 -+ if (keep_boost)
19078 -+ p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
19079 -+ else
19080 -+ p->prio = normal_prio(p);
19081 -
19082 - if (dl_prio(p->prio))
19083 - p->sched_class = &dl_sched_class;
19084 -@@ -3403,7 +3406,7 @@ static int __sched_setscheduler(struct task_struct *p,
19085 - int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
19086 - MAX_RT_PRIO - 1 - attr->sched_priority;
19087 - int retval, oldprio, oldpolicy = -1, queued, running;
19088 -- int policy = attr->sched_policy;
19089 -+ int new_effective_prio, policy = attr->sched_policy;
19090 - unsigned long flags;
19091 - const struct sched_class *prev_class;
19092 - struct rq *rq;
19093 -@@ -3585,15 +3588,14 @@ change:
19094 - oldprio = p->prio;
19095 -
19096 - /*
19097 -- * Special case for priority boosted tasks.
19098 -- *
19099 -- * If the new priority is lower or equal (user space view)
19100 -- * than the current (boosted) priority, we just store the new
19101 -+ * Take priority boosted tasks into account. If the new
19102 -+ * effective priority is unchanged, we just store the new
19103 - * normal parameters and do not touch the scheduler class and
19104 - * the runqueue. This will be done when the task deboost
19105 - * itself.
19106 - */
19107 -- if (rt_mutex_check_prio(p, newprio)) {
19108 -+ new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
19109 -+ if (new_effective_prio == oldprio) {
19110 - __setscheduler_params(p, attr);
19111 - task_rq_unlock(rq, p, &flags);
19112 - return 0;
19113 -@@ -3607,7 +3609,7 @@ change:
19114 - put_prev_task(rq, p);
19115 -
19116 - prev_class = p->sched_class;
19117 -- __setscheduler(rq, p, attr);
19118 -+ __setscheduler(rq, p, attr, true);
19119 -
19120 - if (running)
19121 - p->sched_class->set_curr_task(rq);
19122 -@@ -4382,10 +4384,7 @@ long __sched io_schedule_timeout(long timeout)
19123 - long ret;
19124 -
19125 - current->in_iowait = 1;
19126 -- if (old_iowait)
19127 -- blk_schedule_flush_plug(current);
19128 -- else
19129 -- blk_flush_plug(current);
19130 -+ blk_schedule_flush_plug(current);
19131 -
19132 - delayacct_blkio_start();
19133 - rq = raw_rq();
19134 -@@ -7357,7 +7356,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
19135 - queued = task_on_rq_queued(p);
19136 - if (queued)
19137 - dequeue_task(rq, p, 0);
19138 -- __setscheduler(rq, p, &attr);
19139 -+ __setscheduler(rq, p, &attr, false);
19140 - if (queued) {
19141 - enqueue_task(rq, p, 0);
19142 - resched_curr(rq);
19143 -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
19144 -index bee0c1f78091..38f586c076fe 100644
19145 ---- a/kernel/time/hrtimer.c
19146 -+++ b/kernel/time/hrtimer.c
19147 -@@ -266,21 +266,23 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
19148 - /*
19149 - * Divide a ktime value by a nanosecond value
19150 - */
19151 --u64 __ktime_divns(const ktime_t kt, s64 div)
19152 -+s64 __ktime_divns(const ktime_t kt, s64 div)
19153 - {
19154 -- u64 dclc;
19155 - int sft = 0;
19156 -+ s64 dclc;
19157 -+ u64 tmp;
19158 -
19159 - dclc = ktime_to_ns(kt);
19160 -+ tmp = dclc < 0 ? -dclc : dclc;
19161 -+
19162 - /* Make sure the divisor is less than 2^32: */
19163 - while (div >> 32) {
19164 - sft++;
19165 - div >>= 1;
19166 - }
19167 -- dclc >>= sft;
19168 -- do_div(dclc, (unsigned long) div);
19169 --
19170 -- return dclc;
19171 -+ tmp >>= sft;
19172 -+ do_div(tmp, (unsigned long) div);
19173 -+ return dclc < 0 ? -tmp : tmp;
19174 - }
19175 - EXPORT_SYMBOL_GPL(__ktime_divns);
19176 - #endif /* BITS_PER_LONG >= 64 */
19177 -diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
19178 -index a28df5206d95..11649615c505 100644
19179 ---- a/lib/strnlen_user.c
19180 -+++ b/lib/strnlen_user.c
19181 -@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
19182 - return res + find_zero(data) + 1 - align;
19183 - }
19184 - res += sizeof(unsigned long);
19185 -- if (unlikely(max < sizeof(unsigned long)))
19186 -+ /* We already handled 'unsigned long' bytes. Did we do it all ? */
19187 -+ if (unlikely(max <= sizeof(unsigned long)))
19188 - break;
19189 - max -= sizeof(unsigned long);
19190 - if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
19191 -diff --git a/mm/kmemleak.c b/mm/kmemleak.c
19192 -index 5405aff5a590..f0fe4f2c1fa7 100644
19193 ---- a/mm/kmemleak.c
19194 -+++ b/mm/kmemleak.c
19195 -@@ -115,7 +115,8 @@
19196 - #define BYTES_PER_POINTER sizeof(void *)
19197 -
19198 - /* GFP bitmask for kmemleak internal allocations */
19199 --#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
19200 -+#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
19201 -+ __GFP_NOACCOUNT)) | \
19202 - __GFP_NORETRY | __GFP_NOMEMALLOC | \
19203 - __GFP_NOWARN)
19204 -
19205 -diff --git a/mm/mempolicy.c b/mm/mempolicy.c
19206 -index de5dc5e12691..0f7d73b3e4b1 100644
19207 ---- a/mm/mempolicy.c
19208 -+++ b/mm/mempolicy.c
19209 -@@ -2517,7 +2517,7 @@ static void __init check_numabalancing_enable(void)
19210 - if (numabalancing_override)
19211 - set_numabalancing_state(numabalancing_override == 1);
19212 -
19213 -- if (nr_node_ids > 1 && !numabalancing_override) {
19214 -+ if (num_online_nodes() > 1 && !numabalancing_override) {
19215 - pr_info("%s automatic NUMA balancing. "
19216 - "Configure with numa_balancing= or the "
19217 - "kernel.numa_balancing sysctl",
19218 -diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
19219 -index 41a4abc7e98e..c4ec9239249a 100644
19220 ---- a/net/ceph/osd_client.c
19221 -+++ b/net/ceph/osd_client.c
19222 -@@ -1306,8 +1306,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
19223 - if (list_empty(&req->r_osd_item))
19224 - req->r_osd = NULL;
19225 - }
19226 --
19227 -- list_del_init(&req->r_req_lru_item); /* can be on notarget */
19228 - ceph_osdc_put_request(req);
19229 - }
19230 -
19231 -@@ -2017,20 +2015,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
19232 - err = __map_request(osdc, req,
19233 - force_resend || force_resend_writes);
19234 - dout("__map_request returned %d\n", err);
19235 -- if (err == 0)
19236 -- continue; /* no change and no osd was specified */
19237 - if (err < 0)
19238 - continue; /* hrm! */
19239 -- if (req->r_osd == NULL) {
19240 -- dout("tid %llu maps to no valid osd\n", req->r_tid);
19241 -- needmap++; /* request a newer map */
19242 -- continue;
19243 -- }
19244 -+ if (req->r_osd == NULL || err > 0) {
19245 -+ if (req->r_osd == NULL) {
19246 -+ dout("lingering %p tid %llu maps to no osd\n",
19247 -+ req, req->r_tid);
19248 -+ /*
19249 -+ * A homeless lingering request makes
19250 -+ * no sense, as it's job is to keep
19251 -+ * a particular OSD connection open.
19252 -+ * Request a newer map and kick the
19253 -+ * request, knowing that it won't be
19254 -+ * resent until we actually get a map
19255 -+ * that can tell us where to send it.
19256 -+ */
19257 -+ needmap++;
19258 -+ }
19259 -
19260 -- dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
19261 -- req->r_osd ? req->r_osd->o_osd : -1);
19262 -- __register_request(osdc, req);
19263 -- __unregister_linger_request(osdc, req);
19264 -+ dout("kicking lingering %p tid %llu osd%d\n", req,
19265 -+ req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
19266 -+ __register_request(osdc, req);
19267 -+ __unregister_linger_request(osdc, req);
19268 -+ }
19269 - }
19270 - reset_changed_osds(osdc);
19271 - mutex_unlock(&osdc->request_mutex);
19272 -diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
19273 -index 8d53d65bd2ab..81e8dc5cb7f9 100644
19274 ---- a/net/mac80211/ieee80211_i.h
19275 -+++ b/net/mac80211/ieee80211_i.h
19276 -@@ -204,6 +204,8 @@ enum ieee80211_packet_rx_flags {
19277 - * @IEEE80211_RX_CMNTR: received on cooked monitor already
19278 - * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
19279 - * to cfg80211_report_obss_beacon().
19280 -+ * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
19281 -+ * reorder buffer timeout timer, not the normal RX path
19282 - *
19283 - * These flags are used across handling multiple interfaces
19284 - * for a single frame.
19285 -@@ -211,6 +213,7 @@ enum ieee80211_packet_rx_flags {
19286 - enum ieee80211_rx_flags {
19287 - IEEE80211_RX_CMNTR = BIT(0),
19288 - IEEE80211_RX_BEACON_REPORTED = BIT(1),
19289 -+ IEEE80211_RX_REORDER_TIMER = BIT(2),
19290 - };
19291 -
19292 - struct ieee80211_rx_data {
19293 -diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
19294 -index 1eb730bf8752..4c887d053333 100644
19295 ---- a/net/mac80211/rx.c
19296 -+++ b/net/mac80211/rx.c
19297 -@@ -2106,7 +2106,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
19298 - /* deliver to local stack */
19299 - skb->protocol = eth_type_trans(skb, dev);
19300 - memset(skb->cb, 0, sizeof(skb->cb));
19301 -- if (rx->local->napi)
19302 -+ if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
19303 -+ rx->local->napi)
19304 - napi_gro_receive(rx->local->napi, skb);
19305 - else
19306 - netif_receive_skb(skb);
19307 -@@ -3215,7 +3216,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
19308 - /* This is OK -- must be QoS data frame */
19309 - .security_idx = tid,
19310 - .seqno_idx = tid,
19311 -- .flags = 0,
19312 -+ .flags = IEEE80211_RX_REORDER_TIMER,
19313 - };
19314 - struct tid_ampdu_rx *tid_agg_rx;
19315 -
19316 -diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
19317 -index a4220e92f0cc..efa3f48f1ec5 100644
19318 ---- a/net/mac80211/wep.c
19319 -+++ b/net/mac80211/wep.c
19320 -@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
19321 -
19322 - hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
19323 -
19324 -- if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN ||
19325 -- skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
19326 -+ if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
19327 - return NULL;
19328 -
19329 - hdrlen = ieee80211_hdrlen(hdr->frame_control);
19330 -@@ -167,6 +166,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
19331 - size_t len;
19332 - u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
19333 -
19334 -+ if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
19335 -+ return -1;
19336 -+
19337 - iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
19338 - if (!iv)
19339 - return -1;
19340 -diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
19341 -index 1ec19f6f0c2b..eeeba5adee6d 100644
19342 ---- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
19343 -+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
19344 -@@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
19345 - {
19346 - u32 value_follows;
19347 - int err;
19348 -+ struct page *scratch;
19349 -+
19350 -+ scratch = alloc_page(GFP_KERNEL);
19351 -+ if (!scratch)
19352 -+ return -ENOMEM;
19353 -+ xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE);
19354 -
19355 - /* res->status */
19356 - err = gssx_dec_status(xdr, &res->status);
19357 - if (err)
19358 -- return err;
19359 -+ goto out_free;
19360 -
19361 - /* res->context_handle */
19362 - err = gssx_dec_bool(xdr, &value_follows);
19363 - if (err)
19364 -- return err;
19365 -+ goto out_free;
19366 - if (value_follows) {
19367 - err = gssx_dec_ctx(xdr, res->context_handle);
19368 - if (err)
19369 -- return err;
19370 -+ goto out_free;
19371 - } else {
19372 - res->context_handle = NULL;
19373 - }
19374 -@@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
19375 - /* res->output_token */
19376 - err = gssx_dec_bool(xdr, &value_follows);
19377 - if (err)
19378 -- return err;
19379 -+ goto out_free;
19380 - if (value_follows) {
19381 - err = gssx_dec_buffer(xdr, res->output_token);
19382 - if (err)
19383 -- return err;
19384 -+ goto out_free;
19385 - } else {
19386 - res->output_token = NULL;
19387 - }
19388 -@@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
19389 - /* res->delegated_cred_handle */
19390 - err = gssx_dec_bool(xdr, &value_follows);
19391 - if (err)
19392 -- return err;
19393 -+ goto out_free;
19394 - if (value_follows) {
19395 - /* we do not support upcall servers sending this data. */
19396 -- return -EINVAL;
19397 -+ err = -EINVAL;
19398 -+ goto out_free;
19399 - }
19400 -
19401 - /* res->options */
19402 - err = gssx_dec_option_array(xdr, &res->options);
19403 -
19404 -+out_free:
19405 -+ __free_page(scratch);
19406 - return err;
19407 - }
19408 -diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
19409 -index a8a1e14272a1..a002a6d1e6da 100644
19410 ---- a/sound/pci/hda/hda_intel.c
19411 -+++ b/sound/pci/hda/hda_intel.c
19412 -@@ -2108,6 +2108,8 @@ static const struct pci_device_id azx_ids[] = {
19413 - .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
19414 - { PCI_DEVICE(0x1002, 0xaab0),
19415 - .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
19416 -+ { PCI_DEVICE(0x1002, 0xaac8),
19417 -+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
19418 - /* VIA VT8251/VT8237A */
19419 - { PCI_DEVICE(0x1106, 0x3288),
19420 - .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
19421 -diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
19422 -index da67ea8645a6..e27298bdcd6d 100644
19423 ---- a/sound/pci/hda/patch_conexant.c
19424 -+++ b/sound/pci/hda/patch_conexant.c
19425 -@@ -973,6 +973,14 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
19426 - .patch = patch_conexant_auto },
19427 - { .id = 0x14f150b9, .name = "CX20665",
19428 - .patch = patch_conexant_auto },
19429 -+ { .id = 0x14f150f1, .name = "CX20721",
19430 -+ .patch = patch_conexant_auto },
19431 -+ { .id = 0x14f150f2, .name = "CX20722",
19432 -+ .patch = patch_conexant_auto },
19433 -+ { .id = 0x14f150f3, .name = "CX20723",
19434 -+ .patch = patch_conexant_auto },
19435 -+ { .id = 0x14f150f4, .name = "CX20724",
19436 -+ .patch = patch_conexant_auto },
19437 - { .id = 0x14f1510f, .name = "CX20751/2",
19438 - .patch = patch_conexant_auto },
19439 - { .id = 0x14f15110, .name = "CX20751/2",
19440 -@@ -1007,6 +1015,10 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab");
19441 - MODULE_ALIAS("snd-hda-codec-id:14f150ac");
19442 - MODULE_ALIAS("snd-hda-codec-id:14f150b8");
19443 - MODULE_ALIAS("snd-hda-codec-id:14f150b9");
19444 -+MODULE_ALIAS("snd-hda-codec-id:14f150f1");
19445 -+MODULE_ALIAS("snd-hda-codec-id:14f150f2");
19446 -+MODULE_ALIAS("snd-hda-codec-id:14f150f3");
19447 -+MODULE_ALIAS("snd-hda-codec-id:14f150f4");
19448 - MODULE_ALIAS("snd-hda-codec-id:14f1510f");
19449 - MODULE_ALIAS("snd-hda-codec-id:14f15110");
19450 - MODULE_ALIAS("snd-hda-codec-id:14f15111");
19451 -diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
19452 -index 2fd490b1764b..93c78c3c4b95 100644
19453 ---- a/sound/pci/hda/patch_realtek.c
19454 -+++ b/sound/pci/hda/patch_realtek.c
19455 -@@ -5027,6 +5027,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19456 - SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
19457 - SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
19458 - SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
19459 -+ SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
19460 - SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
19461 - SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
19462 - SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
19463 -@@ -5056,6 +5057,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19464 - SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
19465 - SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
19466 - SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
19467 -+ SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
19468 - SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
19469 - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
19470 - SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
19471 -@@ -5246,6 +5248,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
19472 - {0x17, 0x40000000},
19473 - {0x1d, 0x40700001},
19474 - {0x21, 0x02211050}),
19475 -+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
19476 -+ ALC255_STANDARD_PINS,
19477 -+ {0x12, 0x90a60180},
19478 -+ {0x14, 0x90170130},
19479 -+ {0x17, 0x40000000},
19480 -+ {0x1d, 0x40700001},
19481 -+ {0x21, 0x02211040}),
19482 - SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
19483 - ALC256_STANDARD_PINS,
19484 - {0x13, 0x40000000}),
19485 -diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
19486 -index 2341fc334163..6ba0b5517c40 100644
19487 ---- a/sound/pci/hda/thinkpad_helper.c
19488 -+++ b/sound/pci/hda/thinkpad_helper.c
19489 -@@ -72,7 +72,6 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
19490 - if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
19491 - old_vmaster_hook = spec->vmaster_mute.hook;
19492 - spec->vmaster_mute.hook = update_tpacpi_mute_led;
19493 -- spec->vmaster_mute_enum = 1;
19494 - removefunc = false;
19495 - }
19496 - if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
19497 -diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
19498 -index 2ffb9a0570dc..3d44fc50e4d0 100644
19499 ---- a/sound/soc/codecs/mc13783.c
19500 -+++ b/sound/soc/codecs/mc13783.c
19501 -@@ -623,14 +623,14 @@ static int mc13783_probe(struct snd_soc_codec *codec)
19502 - AUDIO_SSI_SEL, 0);
19503 - else
19504 - mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC,
19505 -- 0, AUDIO_SSI_SEL);
19506 -+ AUDIO_SSI_SEL, AUDIO_SSI_SEL);
19507 -
19508 - if (priv->dac_ssi_port == MC13783_SSI1_PORT)
19509 - mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
19510 - AUDIO_SSI_SEL, 0);
19511 - else
19512 - mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
19513 -- 0, AUDIO_SSI_SEL);
19514 -+ AUDIO_SSI_SEL, AUDIO_SSI_SEL);
19515 -
19516 - return 0;
19517 - }
19518 -diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
19519 -index dc7778b6dd7f..c3c33bd0df1c 100644
19520 ---- a/sound/soc/codecs/uda1380.c
19521 -+++ b/sound/soc/codecs/uda1380.c
19522 -@@ -437,7 +437,7 @@ static int uda1380_set_dai_fmt_both(struct snd_soc_dai *codec_dai,
19523 - if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
19524 - return -EINVAL;
19525 -
19526 -- uda1380_write(codec, UDA1380_IFACE, iface);
19527 -+ uda1380_write_reg_cache(codec, UDA1380_IFACE, iface);
19528 -
19529 - return 0;
19530 - }
19531 -diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
19532 -index 3035d9856415..e97a7615df85 100644
19533 ---- a/sound/soc/codecs/wm8960.c
19534 -+++ b/sound/soc/codecs/wm8960.c
19535 -@@ -395,7 +395,7 @@ static const struct snd_soc_dapm_route audio_paths[] = {
19536 - { "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
19537 - { "Right Input Mixer", NULL, "RINPUT1", }, /* Really Boost Switch */
19538 - { "Right Input Mixer", NULL, "RINPUT2" },
19539 -- { "Right Input Mixer", NULL, "LINPUT3" },
19540 -+ { "Right Input Mixer", NULL, "RINPUT3" },
19541 -
19542 - { "Left ADC", NULL, "Left Input Mixer" },
19543 - { "Right ADC", NULL, "Right Input Mixer" },
19544 -diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
19545 -index 4fbc7689339a..a1c04dab6684 100644
19546 ---- a/sound/soc/codecs/wm8994.c
19547 -+++ b/sound/soc/codecs/wm8994.c
19548 -@@ -2754,7 +2754,7 @@ static struct {
19549 - };
19550 -
19551 - static int fs_ratios[] = {
19552 -- 64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536
19553 -+ 64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536
19554 - };
19555 -
19556 - static int bclk_divs[] = {
19557 -diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
19558 -index b6f88202b8c9..e19a6765bd8a 100644
19559 ---- a/sound/soc/soc-dapm.c
19560 -+++ b/sound/soc/soc-dapm.c
19561 -@@ -3074,11 +3074,16 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
19562 - }
19563 -
19564 - prefix = soc_dapm_prefix(dapm);
19565 -- if (prefix)
19566 -+ if (prefix) {
19567 - w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
19568 -- else
19569 -+ if (widget->sname)
19570 -+ w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
19571 -+ widget->sname);
19572 -+ } else {
19573 - w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
19574 --
19575 -+ if (widget->sname)
19576 -+ w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
19577 -+ }
19578 - if (w->name == NULL) {
19579 - kfree(w);
19580 - return NULL;
19581 -diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
19582 -index 32631a86078b..e21ec5abcc3a 100644
19583 ---- a/sound/usb/quirks.c
19584 -+++ b/sound/usb/quirks.c
19585 -@@ -1117,6 +1117,8 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
19586 - switch (chip->usb_id) {
19587 - case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
19588 - case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
19589 -+ case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
19590 -+ case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
19591 - case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
19592 - return true;
19593 - }
19594 -diff --git a/tools/vm/Makefile b/tools/vm/Makefile
19595 -index ac884b65a072..93aadaf7ff63 100644
19596 ---- a/tools/vm/Makefile
19597 -+++ b/tools/vm/Makefile
19598 -@@ -3,7 +3,7 @@
19599 - TARGETS=page-types slabinfo page_owner_sort
19600 -
19601 - LIB_DIR = ../lib/api
19602 --LIBS = $(LIB_DIR)/libapikfs.a
19603 -+LIBS = $(LIB_DIR)/libapi.a
19604 -
19605 - CC = $(CROSS_COMPILE)gcc
19606 - CFLAGS = -Wall -Wextra -I../lib/
19607
19608 diff --git a/1500_XATTR_USER_PREFIX.patch b/1500_XATTR_USER_PREFIX.patch
19609 deleted file mode 100644
19610 index cc15cd5..0000000
19611 --- a/1500_XATTR_USER_PREFIX.patch
19612 +++ /dev/null
19613 @@ -1,54 +0,0 @@
19614 -From: Anthony G. Basile <blueness@g.o>
19615 -
19616 -This patch adds support for a restricted user-controlled namespace on
19617 -tmpfs filesystem used to house PaX flags. The namespace must be of the
19618 -form user.pax.* and its value cannot exceed a size of 8 bytes.
19619 -
19620 -This is needed even on all Gentoo systems so that XATTR_PAX flags
19621 -are preserved for users who might build packages using portage on
19622 -a tmpfs system with a non-hardened kernel and then switch to a
19623 -hardened kernel with XATTR_PAX enabled.
19624 -
19625 -The namespace is added to any user with Extended Attribute support
19626 -enabled for tmpfs. Users who do not enable xattrs will not have
19627 -the XATTR_PAX flags preserved.
19628 -
19629 -diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
19630 -index e4629b9..6958086 100644
19631 ---- a/include/uapi/linux/xattr.h
19632 -+++ b/include/uapi/linux/xattr.h
19633 -@@ -63,5 +63,9 @@
19634 - #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
19635 - #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
19636 -
19637 -+/* User namespace */
19638 -+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
19639 -+#define XATTR_PAX_FLAGS_SUFFIX "flags"
19640 -+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
19641 -
19642 - #endif /* _UAPI_LINUX_XATTR_H */
19643 -diff --git a/mm/shmem.c b/mm/shmem.c
19644 -index 1c44af7..f23bb1b 100644
19645 ---- a/mm/shmem.c
19646 -+++ b/mm/shmem.c
19647 -@@ -2201,6 +2201,7 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
19648 - static int shmem_xattr_validate(const char *name)
19649 - {
19650 - struct { const char *prefix; size_t len; } arr[] = {
19651 -+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
19652 - { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
19653 - { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
19654 - };
19655 -@@ -2256,6 +2257,12 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
19656 - if (err)
19657 - return err;
19658 -
19659 -+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
19660 -+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
19661 -+ return -EOPNOTSUPP;
19662 -+ if (size > 8)
19663 -+ return -EINVAL;
19664 -+ }
19665 - return simple_xattr_set(&info->xattrs, name, value, size, flags);
19666 - }
19667 -
19668
19669 diff --git a/1510_fs-enable-link-security-restrictions-by-default.patch b/1510_fs-enable-link-security-restrictions-by-default.patch
19670 deleted file mode 100644
19671 index 639fb3c..0000000
19672 --- a/1510_fs-enable-link-security-restrictions-by-default.patch
19673 +++ /dev/null
19674 @@ -1,22 +0,0 @@
19675 -From: Ben Hutchings <ben@××××××××××××.uk>
19676 -Subject: fs: Enable link security restrictions by default
19677 -Date: Fri, 02 Nov 2012 05:32:06 +0000
19678 -Bug-Debian: https://bugs.debian.org/609455
19679 -Forwarded: not-needed
19680 -
19681 -This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
19682 -('VFS: don't do protected {sym,hard}links by default').
19683 -
19684 ---- a/fs/namei.c
19685 -+++ b/fs/namei.c
19686 -@@ -651,8 +651,8 @@ static inline void put_link(struct namei
19687 - path_put(link);
19688 - }
19689 -
19690 --int sysctl_protected_symlinks __read_mostly = 0;
19691 --int sysctl_protected_hardlinks __read_mostly = 0;
19692 -+int sysctl_protected_symlinks __read_mostly = 1;
19693 -+int sysctl_protected_hardlinks __read_mostly = 1;
19694 -
19695 - /**
19696 - * may_follow_link - Check symlink following for unsafe situations
19697
19698 diff --git a/2600_select-REGMAP_IRQ-for-rt5033.patch b/2600_select-REGMAP_IRQ-for-rt5033.patch
19699 deleted file mode 100644
19700 index 92fb2e0..0000000
19701 --- a/2600_select-REGMAP_IRQ-for-rt5033.patch
19702 +++ /dev/null
19703 @@ -1,30 +0,0 @@
19704 -From 23a2a22a3f3f17de094f386a893f7047c10e44a0 Mon Sep 17 00:00:00 2001
19705 -From: Artem Savkov <asavkov@××××××.com>
19706 -Date: Thu, 5 Mar 2015 12:42:27 +0100
19707 -Subject: mfd: rt5033: MFD_RT5033 needs to select REGMAP_IRQ
19708 -
19709 -Since commit 0b2712585(linux-next.git) this driver uses regmap_irq and so needs
19710 -to select REGMAP_IRQ.
19711 -
19712 -This fixes the following compilation errors:
19713 -ERROR: "regmap_irq_get_domain" [drivers/mfd/rt5033.ko] undefined!
19714 -ERROR: "regmap_add_irq_chip" [drivers/mfd/rt5033.ko] undefined!
19715 -
19716 -Signed-off-by: Artem Savkov <asavkov@××××××.com>
19717 -Signed-off-by: Lee Jones <lee.jones@××××××.org>
19718 -
19719 -diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
19720 -index f8ef77d9a..f49f404 100644
19721 ---- a/drivers/mfd/Kconfig
19722 -+++ b/drivers/mfd/Kconfig
19723 -@@ -680,6 +680,7 @@ config MFD_RT5033
19724 - depends on I2C=y
19725 - select MFD_CORE
19726 - select REGMAP_I2C
19727 -+ select REGMAP_IRQ
19728 - help
19729 - This driver provides for the Richtek RT5033 Power Management IC,
19730 - which includes the I2C driver and the Core APIs. This driver provides
19731 ---
19732 -cgit v0.10.2
19733 -
19734
19735 diff --git a/2700_ThinkPad-30-brightness-control-fix.patch b/2700_ThinkPad-30-brightness-control-fix.patch
19736 deleted file mode 100644
19737 index b548c6d..0000000
19738 --- a/2700_ThinkPad-30-brightness-control-fix.patch
19739 +++ /dev/null
19740 @@ -1,67 +0,0 @@
19741 -diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
19742 -index cb96296..6c242ed 100644
19743 ---- a/drivers/acpi/blacklist.c
19744 -+++ b/drivers/acpi/blacklist.c
19745 -@@ -269,6 +276,61 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
19746 - },
19747 -
19748 - /*
19749 -+ * The following Lenovo models have a broken workaround in the
19750 -+ * acpi_video backlight implementation to meet the Windows 8
19751 -+ * requirement of 101 backlight levels. Reverting to pre-Win8
19752 -+ * behavior fixes the problem.
19753 -+ */
19754 -+ {
19755 -+ .callback = dmi_disable_osi_win8,
19756 -+ .ident = "Lenovo ThinkPad L430",
19757 -+ .matches = {
19758 -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
19759 -+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L430"),
19760 -+ },
19761 -+ },
19762 -+ {
19763 -+ .callback = dmi_disable_osi_win8,
19764 -+ .ident = "Lenovo ThinkPad T430s",
19765 -+ .matches = {
19766 -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
19767 -+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
19768 -+ },
19769 -+ },
19770 -+ {
19771 -+ .callback = dmi_disable_osi_win8,
19772 -+ .ident = "Lenovo ThinkPad T530",
19773 -+ .matches = {
19774 -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
19775 -+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T530"),
19776 -+ },
19777 -+ },
19778 -+ {
19779 -+ .callback = dmi_disable_osi_win8,
19780 -+ .ident = "Lenovo ThinkPad W530",
19781 -+ .matches = {
19782 -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
19783 -+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
19784 -+ },
19785 -+ },
19786 -+ {
19787 -+ .callback = dmi_disable_osi_win8,
19788 -+ .ident = "Lenovo ThinkPad X1 Carbon",
19789 -+ .matches = {
19790 -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
19791 -+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
19792 -+ },
19793 -+ },
19794 -+ {
19795 -+ .callback = dmi_disable_osi_win8,
19796 -+ .ident = "Lenovo ThinkPad X230",
19797 -+ .matches = {
19798 -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
19799 -+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
19800 -+ },
19801 -+ },
19802 -+
19803 -+ /*
19804 - * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
19805 - * Linux ignores it, except for the machines enumerated below.
19806 - */
19807 -
19808
19809 diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
19810 deleted file mode 100644
19811 index 4cd558e..0000000
19812 --- a/2900_dev-root-proc-mount-fix.patch
19813 +++ /dev/null
19814 @@ -1,30 +0,0 @@
19815 ---- a/init/do_mounts.c 2014-08-26 08:03:30.000013100 -0400
19816 -+++ b/init/do_mounts.c 2014-08-26 08:11:19.720014712 -0400
19817 -@@ -484,7 +484,10 @@ void __init change_floppy(char *fmt, ...
19818 - va_start(args, fmt);
19819 - vsprintf(buf, fmt, args);
19820 - va_end(args);
19821 -- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
19822 -+ if (saved_root_name[0])
19823 -+ fd = sys_open(saved_root_name, O_RDWR | O_NDELAY, 0);
19824 -+ else
19825 -+ fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
19826 - if (fd >= 0) {
19827 - sys_ioctl(fd, FDEJECT, 0);
19828 - sys_close(fd);
19829 -@@ -527,8 +530,13 @@ void __init mount_root(void)
19830 - }
19831 - #endif
19832 - #ifdef CONFIG_BLOCK
19833 -- create_dev("/dev/root", ROOT_DEV);
19834 -- mount_block_root("/dev/root", root_mountflags);
19835 -+ if (saved_root_name[0] == '/') {
19836 -+ create_dev(saved_root_name, ROOT_DEV);
19837 -+ mount_block_root(saved_root_name, root_mountflags);
19838 -+ } else {
19839 -+ create_dev("/dev/root", ROOT_DEV);
19840 -+ mount_block_root("/dev/root", root_mountflags);
19841 -+ }
19842 - #endif
19843 - }
19844 -
19845
19846 diff --git a/2905_2disk-resume-image-fix.patch b/2905_2disk-resume-image-fix.patch
19847 deleted file mode 100644
19848 index 7e95d29..0000000
19849 --- a/2905_2disk-resume-image-fix.patch
19850 +++ /dev/null
19851 @@ -1,24 +0,0 @@
19852 -diff --git a/kernel/kmod.c b/kernel/kmod.c
19853 -index fb32636..d968882 100644
19854 ---- a/kernel/kmod.c
19855 -+++ b/kernel/kmod.c
19856 -@@ -575,7 +575,8 @@
19857 - call_usermodehelper_freeinfo(sub_info);
19858 - return -EINVAL;
19859 - }
19860 -- helper_lock();
19861 -+ if (!(current->flags & PF_FREEZER_SKIP))
19862 -+ helper_lock();
19863 - if (!khelper_wq || usermodehelper_disabled) {
19864 - retval = -EBUSY;
19865 - goto out;
19866 -@@ -611,7 +612,8 @@ wait_done:
19867 - out:
19868 - call_usermodehelper_freeinfo(sub_info);
19869 - unlock:
19870 -- helper_unlock();
19871 -+ if (!(current->flags & PF_FREEZER_SKIP))
19872 -+ helper_unlock();
19873 - return retval;
19874 - }
19875 - EXPORT_SYMBOL(call_usermodehelper_exec);
19876
19877 diff --git a/2910_lz4-compression-fix.patch b/2910_lz4-compression-fix.patch
19878 deleted file mode 100644
19879 index 1c55f32..0000000
19880 --- a/2910_lz4-compression-fix.patch
19881 +++ /dev/null
19882 @@ -1,30 +0,0 @@
19883 ---- a/lib/lz4/lz4_decompress.c 2015-04-13 16:20:04.896315560 +0800
19884 -+++ b/lib/lz4/lz4_decompress.c 2015-04-13 16:27:08.929317053 +0800
19885 -@@ -139,8 +139,12 @@
19886 - /* Error: request to write beyond destination buffer */
19887 - if (cpy > oend)
19888 - goto _output_error;
19889 -+#if LZ4_ARCH64
19890 -+ if ((ref + COPYLENGTH) > oend)
19891 -+#else
19892 - if ((ref + COPYLENGTH) > oend ||
19893 - (op + COPYLENGTH) > oend)
19894 -+#endif
19895 - goto _output_error;
19896 - LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
19897 - while (op < cpy)
19898 -@@ -270,7 +274,13 @@
19899 - if (cpy > oend - COPYLENGTH) {
19900 - if (cpy > oend)
19901 - goto _output_error; /* write outside of buf */
19902 --
19903 -+#if LZ4_ARCH64
19904 -+ if ((ref + COPYLENGTH) > oend)
19905 -+#else
19906 -+ if ((ref + COPYLENGTH) > oend ||
19907 -+ (op + COPYLENGTH) > oend)
19908 -+#endif
19909 -+ goto _output_error;
19910 - LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
19911 - while (op < cpy)
19912 - *op++ = *ref++;
19913
19914 diff --git a/4200_fbcondecor-3.19.patch b/4200_fbcondecor-3.19.patch
19915 deleted file mode 100644
19916 index 29c379f..0000000
19917 --- a/4200_fbcondecor-3.19.patch
19918 +++ /dev/null
19919 @@ -1,2119 +0,0 @@
19920 -diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
19921 -index fe85e7c..2230930 100644
19922 ---- a/Documentation/fb/00-INDEX
19923 -+++ b/Documentation/fb/00-INDEX
19924 -@@ -23,6 +23,8 @@ ep93xx-fb.txt
19925 - - info on the driver for EP93xx LCD controller.
19926 - fbcon.txt
19927 - - intro to and usage guide for the framebuffer console (fbcon).
19928 -+fbcondecor.txt
19929 -+ - info on the Framebuffer Console Decoration
19930 - framebuffer.txt
19931 - - introduction to frame buffer devices.
19932 - gxfb.txt
19933 -diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
19934 -new file mode 100644
19935 -index 0000000..3388c61
19936 ---- /dev/null
19937 -+++ b/Documentation/fb/fbcondecor.txt
19938 -@@ -0,0 +1,207 @@
19939 -+What is it?
19940 -+-----------
19941 -+
19942 -+The framebuffer decorations are a kernel feature which allows displaying a
19943 -+background picture on selected consoles.
19944 -+
19945 -+What do I need to get it to work?
19946 -+---------------------------------
19947 -+
19948 -+To get fbcondecor up-and-running you will have to:
19949 -+ 1) get a copy of splashutils [1] or a similar program
19950 -+ 2) get some fbcondecor themes
19951 -+ 3) build the kernel helper program
19952 -+ 4) build your kernel with the FB_CON_DECOR option enabled.
19953 -+
19954 -+To get fbcondecor operational right after fbcon initialization is finished, you
19955 -+will have to include a theme and the kernel helper into your initramfs image.
19956 -+Please refer to splashutils documentation for instructions on how to do that.
19957 -+
19958 -+[1] The splashutils package can be downloaded from:
19959 -+ http://github.com/alanhaggai/fbsplash
19960 -+
19961 -+The userspace helper
19962 -+--------------------
19963 -+
19964 -+The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
19965 -+kernel whenever an important event occurs and the kernel needs some kind of
19966 -+job to be carried out. Important events include console switches and video
19967 -+mode switches (the kernel requests background images and configuration
19968 -+parameters for the current console). The fbcondecor helper must be accessible at
19969 -+all times. If it's not, fbcondecor will be switched off automatically.
19970 -+
19971 -+It's possible to set path to the fbcondecor helper by writing it to
19972 -+/proc/sys/kernel/fbcondecor.
19973 -+
19974 -+*****************************************************************************
19975 -+
19976 -+The information below is mostly technical stuff. There's probably no need to
19977 -+read it unless you plan to develop a userspace helper.
19978 -+
19979 -+The fbcondecor protocol
19980 -+-----------------------
19981 -+
19982 -+The fbcondecor protocol defines a communication interface between the kernel and
19983 -+the userspace fbcondecor helper.
19984 -+
19985 -+The kernel side is responsible for:
19986 -+
19987 -+ * rendering console text, using an image as a background (instead of a
19988 -+ standard solid color fbcon uses),
19989 -+ * accepting commands from the user via ioctls on the fbcondecor device,
19990 -+ * calling the userspace helper to set things up as soon as the fb subsystem
19991 -+ is initialized.
19992 -+
19993 -+The userspace helper is responsible for everything else, including parsing
19994 -+configuration files, decompressing the image files whenever the kernel needs
19995 -+it, and communicating with the kernel if necessary.
19996 -+
19997 -+The fbcondecor protocol specifies how communication is done in both ways:
19998 -+kernel->userspace and userspace->helper.
19999 -+
20000 -+Kernel -> Userspace
20001 -+-------------------
20002 -+
20003 -+The kernel communicates with the userspace helper by calling it and specifying
20004 -+the task to be done in a series of arguments.
20005 -+
20006 -+The arguments follow the pattern:
20007 -+<fbcondecor protocol version> <command> <parameters>
20008 -+
20009 -+All commands defined in fbcondecor protocol v2 have the following parameters:
20010 -+ virtual console
20011 -+ framebuffer number
20012 -+ theme
20013 -+
20014 -+Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
20015 -+framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
20016 -+
20017 -+Fbcondecor protocol v2 specifies the following commands:
20018 -+
20019 -+getpic
20020 -+------
20021 -+ The kernel issues this command to request image data. It's up to the
20022 -+ userspace helper to find a background image appropriate for the specified
20023 -+ theme and the current resolution. The userspace helper should respond by
20024 -+ issuing the FBIOCONDECOR_SETPIC ioctl.
20025 -+
20026 -+init
20027 -+----
20028 -+ The kernel issues this command after the fbcondecor device is created and
20029 -+ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
20030 -+ helper should parse the kernel command line (/proc/cmdline) or otherwise
20031 -+ decide whether fbcondecor is to be activated.
20032 -+
20033 -+ To activate fbcondecor on the first console the helper should issue the
20034 -+ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
20035 -+ in the above-mentioned order.
20036 -+
20037 -+ When the userspace helper is called in an early phase of the boot process
20038 -+ (right after the initialization of fbcon), no filesystems will be mounted.
20039 -+ The helper program should mount sysfs and then create the appropriate
20040 -+ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
20041 -+ current display settings and to be able to communicate with the kernel side.
20042 -+ It should probably also mount the procfs to be able to parse the kernel
20043 -+ command line parameters.
20044 -+
20045 -+ Note that the console sem is not held when the kernel calls fbcondecor_helper
20046 -+ with the 'init' command. The fbcondecor helper should perform all ioctls with
20047 -+ origin set to FBCON_DECOR_IO_ORIG_USER.
20048 -+
20049 -+modechange
20050 -+----------
20051 -+ The kernel issues this command on a mode change. The helper's response should
20052 -+ be similar to the response to the 'init' command. Note that this time the
20053 -+ console sem is held and all ioctls must be performed with origin set to
20054 -+ FBCON_DECOR_IO_ORIG_KERNEL.
20055 -+
20056 -+
20057 -+Userspace -> Kernel
20058 -+-------------------
20059 -+
20060 -+Userspace programs can communicate with fbcondecor via ioctls on the
20061 -+fbcondecor device. These ioctls are to be used by both the userspace helper
20062 -+(called only by the kernel) and userspace configuration tools (run by the users).
20063 -+
20064 -+The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
20065 -+when doing the appropriate ioctls. All userspace configuration tools should
20066 -+use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
20067 -+field when performing ioctls from the kernel helper will most likely result
20068 -+in a console deadlock.
20069 -+
20070 -+FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
20071 -+semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
20072 -+the console sem.
20073 -+
20074 -+The framebuffer console decoration provides the following ioctls (all defined in
20075 -+linux/fb.h):
20076 -+
20077 -+FBIOCONDECOR_SETPIC
20078 -+description: loads a background picture for a virtual console
20079 -+argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
20080 -+notes:
20081 -+If called for consoles other than the current foreground one, the picture data
20082 -+will be ignored.
20083 -+
20084 -+If the current virtual console is running in a 8-bpp mode, the cmap substruct
20085 -+of fb_image has to be filled appropriately: start should be set to 16 (first
20086 -+16 colors are reserved for fbcon), len to a value <= 240 and red, green and
20087 -+blue should point to valid cmap data. The transp field is ingored. The fields
20088 -+dx, dy, bg_color, fg_color in fb_image are ignored as well.
20089 -+
20090 -+FBIOCONDECOR_SETCFG
20091 -+description: sets the fbcondecor config for a virtual console
20092 -+argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
20093 -+notes: The structure has to be filled with valid data.
20094 -+
20095 -+FBIOCONDECOR_GETCFG
20096 -+description: gets the fbcondecor config for a virtual console
20097 -+argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
20098 -+
20099 -+FBIOCONDECOR_SETSTATE
20100 -+description: sets the fbcondecor state for a virtual console
20101 -+argument: struct fbcon_decor_iowrapper*; data: unsigned int*
20102 -+ values: 0 = disabled, 1 = enabled.
20103 -+
20104 -+FBIOCONDECOR_GETSTATE
20105 -+description: gets the fbcondecor state for a virtual console
20106 -+argument: struct fbcon_decor_iowrapper*; data: unsigned int*
20107 -+ values: as in FBIOCONDECOR_SETSTATE
20108 -+
20109 -+Info on used structures:
20110 -+
20111 -+Definition of struct vc_decor can be found in linux/console_decor.h. It's
20112 -+heavily commented. Note that the 'theme' field should point to a string
20113 -+no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
20114 -+performed, the theme field should point to a char buffer of length
20115 -+FBCON_DECOR_THEME_LEN.
20116 -+
20117 -+Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
20118 -+The fields in this struct have the following meaning:
20119 -+
20120 -+vc:
20121 -+Virtual console number.
20122 -+
20123 -+origin:
20124 -+Specifies if the ioctl is performed as a response to a kernel request. The
20125 -+fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
20126 -+programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
20127 -+avoid console semaphore deadlocks.
20128 -+
20129 -+data:
20130 -+Pointer to a data structure appropriate for the performed ioctl. Type of
20131 -+the data struct is specified in the ioctls description.
20132 -+
20133 -+*****************************************************************************
20134 -+
20135 -+Credit
20136 -+------
20137 -+
20138 -+Original 'bootsplash' project & implementation by:
20139 -+ Volker Poplawski <volker@×××××××××.de>, Stefan Reinauer <stepan@××××.de>,
20140 -+ Steffen Winterfeldt <snwint@××××.de>, Michael Schroeder <mls@××××.de>,
20141 -+ Ken Wimer <wimer@××××.de>.
20142 -+
20143 -+Fbcondecor, fbcondecor protocol design, current implementation & docs by:
20144 -+ Michal Januszewski <michalj+fbcondecor@×××××.com>
20145 -+
20146 -diff --git a/drivers/Makefile b/drivers/Makefile
20147 -index 7183b6a..d576148 100644
20148 ---- a/drivers/Makefile
20149 -+++ b/drivers/Makefile
20150 -@@ -17,6 +17,10 @@ obj-y += pwm/
20151 - obj-$(CONFIG_PCI) += pci/
20152 - obj-$(CONFIG_PARISC) += parisc/
20153 - obj-$(CONFIG_RAPIDIO) += rapidio/
20154 -+# tty/ comes before char/ so that the VT console is the boot-time
20155 -+# default.
20156 -+obj-y += tty/
20157 -+obj-y += char/
20158 - obj-y += video/
20159 - obj-y += idle/
20160 -
20161 -@@ -42,11 +46,6 @@ obj-$(CONFIG_REGULATOR) += regulator/
20162 - # reset controllers early, since gpu drivers might rely on them to initialize
20163 - obj-$(CONFIG_RESET_CONTROLLER) += reset/
20164 -
20165 --# tty/ comes before char/ so that the VT console is the boot-time
20166 --# default.
20167 --obj-y += tty/
20168 --obj-y += char/
20169 --
20170 - # iommu/ comes before gpu as gpu are using iommu controllers
20171 - obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
20172 -
20173 -diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
20174 -index fe1cd01..6d2e87a 100644
20175 ---- a/drivers/video/console/Kconfig
20176 -+++ b/drivers/video/console/Kconfig
20177 -@@ -126,6 +126,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
20178 - such that other users of the framebuffer will remain normally
20179 - oriented.
20180 -
20181 -+config FB_CON_DECOR
20182 -+ bool "Support for the Framebuffer Console Decorations"
20183 -+ depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
20184 -+ default n
20185 -+ ---help---
20186 -+ This option enables support for framebuffer console decorations which
20187 -+ makes it possible to display images in the background of the system
20188 -+ consoles. Note that userspace utilities are necessary in order to take
20189 -+ advantage of these features. Refer to Documentation/fb/fbcondecor.txt
20190 -+ for more information.
20191 -+
20192 -+ If unsure, say N.
20193 -+
20194 - config STI_CONSOLE
20195 - bool "STI text console"
20196 - depends on PARISC
20197 -diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
20198 -index 43bfa48..cc104b6f 100644
20199 ---- a/drivers/video/console/Makefile
20200 -+++ b/drivers/video/console/Makefile
20201 -@@ -16,4 +16,5 @@ obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
20202 - fbcon_ccw.o
20203 - endif
20204 -
20205 -+obj-$(CONFIG_FB_CON_DECOR) += fbcondecor.o cfbcondecor.o
20206 - obj-$(CONFIG_FB_STI) += sticore.o
20207 -diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
20208 -index 61b182b..984384b 100644
20209 ---- a/drivers/video/console/bitblit.c
20210 -+++ b/drivers/video/console/bitblit.c
20211 -@@ -18,6 +18,7 @@
20212 - #include <linux/console.h>
20213 - #include <asm/types.h>
20214 - #include "fbcon.h"
20215 -+#include "fbcondecor.h"
20216 -
20217 - /*
20218 - * Accelerated handlers.
20219 -@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
20220 - area.height = height * vc->vc_font.height;
20221 - area.width = width * vc->vc_font.width;
20222 -
20223 -+ if (fbcon_decor_active(info, vc)) {
20224 -+ area.sx += vc->vc_decor.tx;
20225 -+ area.sy += vc->vc_decor.ty;
20226 -+ area.dx += vc->vc_decor.tx;
20227 -+ area.dy += vc->vc_decor.ty;
20228 -+ }
20229 -+
20230 - info->fbops->fb_copyarea(info, &area);
20231 - }
20232 -
20233 -@@ -380,11 +388,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
20234 - cursor.image.depth = 1;
20235 - cursor.rop = ROP_XOR;
20236 -
20237 -- if (info->fbops->fb_cursor)
20238 -- err = info->fbops->fb_cursor(info, &cursor);
20239 -+ if (fbcon_decor_active(info, vc)) {
20240 -+ fbcon_decor_cursor(info, &cursor);
20241 -+ } else {
20242 -+ if (info->fbops->fb_cursor)
20243 -+ err = info->fbops->fb_cursor(info, &cursor);
20244 -
20245 -- if (err)
20246 -- soft_cursor(info, &cursor);
20247 -+ if (err)
20248 -+ soft_cursor(info, &cursor);
20249 -+ }
20250 -
20251 - ops->cursor_reset = 0;
20252 - }
20253 -diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
20254 -new file mode 100644
20255 -index 0000000..a2b4497
20256 ---- /dev/null
20257 -+++ b/drivers/video/console/cfbcondecor.c
20258 -@@ -0,0 +1,471 @@
20259 -+/*
20260 -+ * linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
20261 -+ *
20262 -+ * Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@×××××.com>
20263 -+ *
20264 -+ * Code based upon "Bootdecor" (C) 2001-2003
20265 -+ * Volker Poplawski <volker@×××××××××.de>,
20266 -+ * Stefan Reinauer <stepan@××××.de>,
20267 -+ * Steffen Winterfeldt <snwint@××××.de>,
20268 -+ * Michael Schroeder <mls@××××.de>,
20269 -+ * Ken Wimer <wimer@××××.de>.
20270 -+ *
20271 -+ * This file is subject to the terms and conditions of the GNU General Public
20272 -+ * License. See the file COPYING in the main directory of this archive for
20273 -+ * more details.
20274 -+ */
20275 -+#include <linux/module.h>
20276 -+#include <linux/types.h>
20277 -+#include <linux/fb.h>
20278 -+#include <linux/selection.h>
20279 -+#include <linux/slab.h>
20280 -+#include <linux/vt_kern.h>
20281 -+#include <asm/irq.h>
20282 -+
20283 -+#include "fbcon.h"
20284 -+#include "fbcondecor.h"
20285 -+
20286 -+#define parse_pixel(shift,bpp,type) \
20287 -+ do { \
20288 -+ if (d & (0x80 >> (shift))) \
20289 -+ dd2[(shift)] = fgx; \
20290 -+ else \
20291 -+ dd2[(shift)] = transparent ? *(type *)decor_src : bgx; \
20292 -+ decor_src += (bpp); \
20293 -+ } while (0) \
20294 -+
20295 -+extern int get_color(struct vc_data *vc, struct fb_info *info,
20296 -+ u16 c, int is_fg);
20297 -+
20298 -+void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
20299 -+{
20300 -+ int i, j, k;
20301 -+ int minlen = min(min(info->var.red.length, info->var.green.length),
20302 -+ info->var.blue.length);
20303 -+ u32 col;
20304 -+
20305 -+ for (j = i = 0; i < 16; i++) {
20306 -+ k = color_table[i];
20307 -+
20308 -+ col = ((vc->vc_palette[j++] >> (8-minlen))
20309 -+ << info->var.red.offset);
20310 -+ col |= ((vc->vc_palette[j++] >> (8-minlen))
20311 -+ << info->var.green.offset);
20312 -+ col |= ((vc->vc_palette[j++] >> (8-minlen))
20313 -+ << info->var.blue.offset);
20314 -+ ((u32 *)info->pseudo_palette)[k] = col;
20315 -+ }
20316 -+}
20317 -+
20318 -+void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
20319 -+ int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
20320 -+{
20321 -+ unsigned int x, y;
20322 -+ u32 dd;
20323 -+ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
20324 -+ unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
20325 -+ unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
20326 -+ u16 dd2[4];
20327 -+
20328 -+ u8* decor_src = (u8 *)(info->bgdecor.data + ds);
20329 -+ u8* dst = (u8 *)(info->screen_base + d);
20330 -+
20331 -+ if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
20332 -+ return;
20333 -+
20334 -+ for (y = 0; y < height; y++) {
20335 -+ switch (info->var.bits_per_pixel) {
20336 -+
20337 -+ case 32:
20338 -+ for (x = 0; x < width; x++) {
20339 -+
20340 -+ if ((x & 7) == 0)
20341 -+ d = *src++;
20342 -+ if (d & 0x80)
20343 -+ dd = fgx;
20344 -+ else
20345 -+ dd = transparent ?
20346 -+ *(u32 *)decor_src : bgx;
20347 -+
20348 -+ d <<= 1;
20349 -+ decor_src += 4;
20350 -+ fb_writel(dd, dst);
20351 -+ dst += 4;
20352 -+ }
20353 -+ break;
20354 -+ case 24:
20355 -+ for (x = 0; x < width; x++) {
20356 -+
20357 -+ if ((x & 7) == 0)
20358 -+ d = *src++;
20359 -+ if (d & 0x80)
20360 -+ dd = fgx;
20361 -+ else
20362 -+ dd = transparent ?
20363 -+ (*(u32 *)decor_src & 0xffffff) : bgx;
20364 -+
20365 -+ d <<= 1;
20366 -+ decor_src += 3;
20367 -+#ifdef __LITTLE_ENDIAN
20368 -+ fb_writew(dd & 0xffff, dst);
20369 -+ dst += 2;
20370 -+ fb_writeb((dd >> 16), dst);
20371 -+#else
20372 -+ fb_writew(dd >> 8, dst);
20373 -+ dst += 2;
20374 -+ fb_writeb(dd & 0xff, dst);
20375 -+#endif
20376 -+ dst++;
20377 -+ }
20378 -+ break;
20379 -+ case 16:
20380 -+ for (x = 0; x < width; x += 2) {
20381 -+ if ((x & 7) == 0)
20382 -+ d = *src++;
20383 -+
20384 -+ parse_pixel(0, 2, u16);
20385 -+ parse_pixel(1, 2, u16);
20386 -+#ifdef __LITTLE_ENDIAN
20387 -+ dd = dd2[0] | (dd2[1] << 16);
20388 -+#else
20389 -+ dd = dd2[1] | (dd2[0] << 16);
20390 -+#endif
20391 -+ d <<= 2;
20392 -+ fb_writel(dd, dst);
20393 -+ dst += 4;
20394 -+ }
20395 -+ break;
20396 -+
20397 -+ case 8:
20398 -+ for (x = 0; x < width; x += 4) {
20399 -+ if ((x & 7) == 0)
20400 -+ d = *src++;
20401 -+
20402 -+ parse_pixel(0, 1, u8);
20403 -+ parse_pixel(1, 1, u8);
20404 -+ parse_pixel(2, 1, u8);
20405 -+ parse_pixel(3, 1, u8);
20406 -+
20407 -+#ifdef __LITTLE_ENDIAN
20408 -+ dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
20409 -+#else
20410 -+ dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
20411 -+#endif
20412 -+ d <<= 4;
20413 -+ fb_writel(dd, dst);
20414 -+ dst += 4;
20415 -+ }
20416 -+ }
20417 -+
20418 -+ dst += info->fix.line_length - width * bytespp;
20419 -+ decor_src += (info->var.xres - width) * bytespp;
20420 -+ }
20421 -+}
20422 -+
20423 -+#define cc2cx(a) \
20424 -+ ((info->fix.visual == FB_VISUAL_TRUECOLOR || \
20425 -+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? \
20426 -+ ((u32*)info->pseudo_palette)[a] : a)
20427 -+
20428 -+void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
20429 -+ const unsigned short *s, int count, int yy, int xx)
20430 -+{
20431 -+ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
20432 -+ struct fbcon_ops *ops = info->fbcon_par;
20433 -+ int fg_color, bg_color, transparent;
20434 -+ u8 *src;
20435 -+ u32 bgx, fgx;
20436 -+ u16 c = scr_readw(s);
20437 -+
20438 -+ fg_color = get_color(vc, info, c, 1);
20439 -+ bg_color = get_color(vc, info, c, 0);
20440 -+
20441 -+ /* Don't paint the background image if console is blanked */
20442 -+ transparent = ops->blank_state ? 0 :
20443 -+ (vc->vc_decor.bg_color == bg_color);
20444 -+
20445 -+ xx = xx * vc->vc_font.width + vc->vc_decor.tx;
20446 -+ yy = yy * vc->vc_font.height + vc->vc_decor.ty;
20447 -+
20448 -+ fgx = cc2cx(fg_color);
20449 -+ bgx = cc2cx(bg_color);
20450 -+
20451 -+ while (count--) {
20452 -+ c = scr_readw(s++);
20453 -+ src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
20454 -+ ((vc->vc_font.width + 7) >> 3);
20455 -+
20456 -+ fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
20457 -+ vc->vc_font.width, src, fgx, bgx, transparent);
20458 -+ xx += vc->vc_font.width;
20459 -+ }
20460 -+}
20461 -+
20462 -+void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
20463 -+{
20464 -+ int i;
20465 -+ unsigned int dsize, s_pitch;
20466 -+ struct fbcon_ops *ops = info->fbcon_par;
20467 -+ struct vc_data* vc;
20468 -+ u8 *src;
20469 -+
20470 -+ /* we really don't need any cursors while the console is blanked */
20471 -+ if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
20472 -+ return;
20473 -+
20474 -+ vc = vc_cons[ops->currcon].d;
20475 -+
20476 -+ src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
20477 -+ if (!src)
20478 -+ return;
20479 -+
20480 -+ s_pitch = (cursor->image.width + 7) >> 3;
20481 -+ dsize = s_pitch * cursor->image.height;
20482 -+ if (cursor->enable) {
20483 -+ switch (cursor->rop) {
20484 -+ case ROP_XOR:
20485 -+ for (i = 0; i < dsize; i++)
20486 -+ src[i] = cursor->image.data[i] ^ cursor->mask[i];
20487 -+ break;
20488 -+ case ROP_COPY:
20489 -+ default:
20490 -+ for (i = 0; i < dsize; i++)
20491 -+ src[i] = cursor->image.data[i] & cursor->mask[i];
20492 -+ break;
20493 -+ }
20494 -+ } else
20495 -+ memcpy(src, cursor->image.data, dsize);
20496 -+
20497 -+ fbcon_decor_renderc(info,
20498 -+ cursor->image.dy + vc->vc_decor.ty,
20499 -+ cursor->image.dx + vc->vc_decor.tx,
20500 -+ cursor->image.height,
20501 -+ cursor->image.width,
20502 -+ (u8*)src,
20503 -+ cc2cx(cursor->image.fg_color),
20504 -+ cc2cx(cursor->image.bg_color),
20505 -+ cursor->image.bg_color == vc->vc_decor.bg_color);
20506 -+
20507 -+ kfree(src);
20508 -+}
20509 -+
20510 -+static void decorset(u8 *dst, int height, int width, int dstbytes,
20511 -+ u32 bgx, int bpp)
20512 -+{
20513 -+ int i;
20514 -+
20515 -+ if (bpp == 8)
20516 -+ bgx |= bgx << 8;
20517 -+ if (bpp == 16 || bpp == 8)
20518 -+ bgx |= bgx << 16;
20519 -+
20520 -+ while (height-- > 0) {
20521 -+ u8 *p = dst;
20522 -+
20523 -+ switch (bpp) {
20524 -+
20525 -+ case 32:
20526 -+ for (i=0; i < width; i++) {
20527 -+ fb_writel(bgx, p); p += 4;
20528 -+ }
20529 -+ break;
20530 -+ case 24:
20531 -+ for (i=0; i < width; i++) {
20532 -+#ifdef __LITTLE_ENDIAN
20533 -+ fb_writew((bgx & 0xffff),(u16*)p); p += 2;
20534 -+ fb_writeb((bgx >> 16),p++);
20535 -+#else
20536 -+ fb_writew((bgx >> 8),(u16*)p); p += 2;
20537 -+ fb_writeb((bgx & 0xff),p++);
20538 -+#endif
20539 -+ }
20540 -+ case 16:
20541 -+ for (i=0; i < width/4; i++) {
20542 -+ fb_writel(bgx,p); p += 4;
20543 -+ fb_writel(bgx,p); p += 4;
20544 -+ }
20545 -+ if (width & 2) {
20546 -+ fb_writel(bgx,p); p += 4;
20547 -+ }
20548 -+ if (width & 1)
20549 -+ fb_writew(bgx,(u16*)p);
20550 -+ break;
20551 -+ case 8:
20552 -+ for (i=0; i < width/4; i++) {
20553 -+ fb_writel(bgx,p); p += 4;
20554 -+ }
20555 -+
20556 -+ if (width & 2) {
20557 -+ fb_writew(bgx,p); p += 2;
20558 -+ }
20559 -+ if (width & 1)
20560 -+ fb_writeb(bgx,(u8*)p);
20561 -+ break;
20562 -+
20563 -+ }
20564 -+ dst += dstbytes;
20565 -+ }
20566 -+}
20567 -+
20568 -+void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
20569 -+ int srclinebytes, int bpp)
20570 -+{
20571 -+ int i;
20572 -+
20573 -+ while (height-- > 0) {
20574 -+ u32 *p = (u32 *)dst;
20575 -+ u32 *q = (u32 *)src;
20576 -+
20577 -+ switch (bpp) {
20578 -+
20579 -+ case 32:
20580 -+ for (i=0; i < width; i++)
20581 -+ fb_writel(*q++, p++);
20582 -+ break;
20583 -+ case 24:
20584 -+ for (i=0; i < (width*3/4); i++)
20585 -+ fb_writel(*q++, p++);
20586 -+ if ((width*3) % 4) {
20587 -+ if (width & 2) {
20588 -+ fb_writeb(*(u8*)q, (u8*)p);
20589 -+ } else if (width & 1) {
20590 -+ fb_writew(*(u16*)q, (u16*)p);
20591 -+ fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
20592 -+ }
20593 -+ }
20594 -+ break;
20595 -+ case 16:
20596 -+ for (i=0; i < width/4; i++) {
20597 -+ fb_writel(*q++, p++);
20598 -+ fb_writel(*q++, p++);
20599 -+ }
20600 -+ if (width & 2)
20601 -+ fb_writel(*q++, p++);
20602 -+ if (width & 1)
20603 -+ fb_writew(*(u16*)q, (u16*)p);
20604 -+ break;
20605 -+ case 8:
20606 -+ for (i=0; i < width/4; i++)
20607 -+ fb_writel(*q++, p++);
20608 -+
20609 -+ if (width & 2) {
20610 -+ fb_writew(*(u16*)q, (u16*)p);
20611 -+ q = (u32*) ((u16*)q + 1);
20612 -+ p = (u32*) ((u16*)p + 1);
20613 -+ }
20614 -+ if (width & 1)
20615 -+ fb_writeb(*(u8*)q, (u8*)p);
20616 -+ break;
20617 -+ }
20618 -+
20619 -+ dst += linebytes;
20620 -+ src += srclinebytes;
20621 -+ }
20622 -+}
20623 -+
20624 -+static void decorfill(struct fb_info *info, int sy, int sx, int height,
20625 -+ int width)
20626 -+{
20627 -+ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
20628 -+ int d = sy * info->fix.line_length + sx * bytespp;
20629 -+ int ds = (sy * info->var.xres + sx) * bytespp;
20630 -+
20631 -+ fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
20632 -+ height, width, info->fix.line_length, info->var.xres * bytespp,
20633 -+ info->var.bits_per_pixel);
20634 -+}
20635 -+
20636 -+void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
20637 -+ int height, int width)
20638 -+{
20639 -+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
20640 -+ struct fbcon_ops *ops = info->fbcon_par;
20641 -+ u8 *dst;
20642 -+ int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
20643 -+
20644 -+ transparent = (vc->vc_decor.bg_color == bg_color);
20645 -+ sy = sy * vc->vc_font.height + vc->vc_decor.ty;
20646 -+ sx = sx * vc->vc_font.width + vc->vc_decor.tx;
20647 -+ height *= vc->vc_font.height;
20648 -+ width *= vc->vc_font.width;
20649 -+
20650 -+ /* Don't paint the background image if console is blanked */
20651 -+ if (transparent && !ops->blank_state) {
20652 -+ decorfill(info, sy, sx, height, width);
20653 -+ } else {
20654 -+ dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
20655 -+ sx * ((info->var.bits_per_pixel + 7) >> 3));
20656 -+ decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
20657 -+ info->var.bits_per_pixel);
20658 -+ }
20659 -+}
20660 -+
20661 -+void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
20662 -+ int bottom_only)
20663 -+{
20664 -+ unsigned int tw = vc->vc_cols*vc->vc_font.width;
20665 -+ unsigned int th = vc->vc_rows*vc->vc_font.height;
20666 -+
20667 -+ if (!bottom_only) {
20668 -+ /* top margin */
20669 -+ decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
20670 -+ /* left margin */
20671 -+ decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
20672 -+ /* right margin */
20673 -+ decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th,
20674 -+ info->var.xres - vc->vc_decor.tx - tw);
20675 -+ }
20676 -+ decorfill(info, vc->vc_decor.ty + th, 0,
20677 -+ info->var.yres - vc->vc_decor.ty - th, info->var.xres);
20678 -+}
20679 -+
20680 -+void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y,
20681 -+ int sx, int dx, int width)
20682 -+{
20683 -+ u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
20684 -+ u16 *s = d + (dx - sx);
20685 -+ u16 *start = d;
20686 -+ u16 *ls = d;
20687 -+ u16 *le = d + width;
20688 -+ u16 c;
20689 -+ int x = dx;
20690 -+ u16 attr = 1;
20691 -+
20692 -+ do {
20693 -+ c = scr_readw(d);
20694 -+ if (attr != (c & 0xff00)) {
20695 -+ attr = c & 0xff00;
20696 -+ if (d > start) {
20697 -+ fbcon_decor_putcs(vc, info, start, d - start, y, x);
20698 -+ x += d - start;
20699 -+ start = d;
20700 -+ }
20701 -+ }
20702 -+ if (s >= ls && s < le && c == scr_readw(s)) {
20703 -+ if (d > start) {
20704 -+ fbcon_decor_putcs(vc, info, start, d - start, y, x);
20705 -+ x += d - start + 1;
20706 -+ start = d + 1;
20707 -+ } else {
20708 -+ x++;
20709 -+ start++;
20710 -+ }
20711 -+ }
20712 -+ s++;
20713 -+ d++;
20714 -+ } while (d < le);
20715 -+ if (d > start)
20716 -+ fbcon_decor_putcs(vc, info, start, d - start, y, x);
20717 -+}
20718 -+
20719 -+void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
20720 -+{
20721 -+ if (blank) {
20722 -+ decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
20723 -+ info->fix.line_length, 0, info->var.bits_per_pixel);
20724 -+ } else {
20725 -+ update_screen(vc);
20726 -+ fbcon_decor_clear_margins(vc, info, 0);
20727 -+ }
20728 -+}
20729 -+
20730 -diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
20731 -index f447734..da50d61 100644
20732 ---- a/drivers/video/console/fbcon.c
20733 -+++ b/drivers/video/console/fbcon.c
20734 -@@ -79,6 +79,7 @@
20735 - #include <asm/irq.h>
20736 -
20737 - #include "fbcon.h"
20738 -+#include "../console/fbcondecor.h"
20739 -
20740 - #ifdef FBCONDEBUG
20741 - # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
20742 -@@ -94,7 +95,7 @@ enum {
20743 -
20744 - static struct display fb_display[MAX_NR_CONSOLES];
20745 -
20746 --static signed char con2fb_map[MAX_NR_CONSOLES];
20747 -+signed char con2fb_map[MAX_NR_CONSOLES];
20748 - static signed char con2fb_map_boot[MAX_NR_CONSOLES];
20749 -
20750 - static int logo_lines;
20751 -@@ -286,7 +287,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
20752 - !vt_force_oops_output(vc);
20753 - }
20754 -
20755 --static int get_color(struct vc_data *vc, struct fb_info *info,
20756 -+int get_color(struct vc_data *vc, struct fb_info *info,
20757 - u16 c, int is_fg)
20758 - {
20759 - int depth = fb_get_color_depth(&info->var, &info->fix);
20760 -@@ -551,6 +552,9 @@ static int do_fbcon_takeover(int show_logo)
20761 - info_idx = -1;
20762 - } else {
20763 - fbcon_has_console_bind = 1;
20764 -+#ifdef CONFIG_FB_CON_DECOR
20765 -+ fbcon_decor_init();
20766 -+#endif
20767 - }
20768 -
20769 - return err;
20770 -@@ -1007,6 +1011,12 @@ static const char *fbcon_startup(void)
20771 - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
20772 - cols /= vc->vc_font.width;
20773 - rows /= vc->vc_font.height;
20774 -+
20775 -+ if (fbcon_decor_active(info, vc)) {
20776 -+ cols = vc->vc_decor.twidth / vc->vc_font.width;
20777 -+ rows = vc->vc_decor.theight / vc->vc_font.height;
20778 -+ }
20779 -+
20780 - vc_resize(vc, cols, rows);
20781 -
20782 - DPRINTK("mode: %s\n", info->fix.id);
20783 -@@ -1036,7 +1046,7 @@ static void fbcon_init(struct vc_data *vc, int init)
20784 - cap = info->flags;
20785 -
20786 - if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
20787 -- (info->fix.type == FB_TYPE_TEXT))
20788 -+ (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
20789 - logo = 0;
20790 -
20791 - if (var_to_display(p, &info->var, info))
20792 -@@ -1260,6 +1270,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
20793 - fbcon_clear_margins(vc, 0);
20794 - }
20795 -
20796 -+ if (fbcon_decor_active(info, vc)) {
20797 -+ fbcon_decor_clear(vc, info, sy, sx, height, width);
20798 -+ return;
20799 -+ }
20800 -+
20801 - /* Split blits that cross physical y_wrap boundary */
20802 -
20803 - y_break = p->vrows - p->yscroll;
20804 -@@ -1279,10 +1294,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
20805 - struct display *p = &fb_display[vc->vc_num];
20806 - struct fbcon_ops *ops = info->fbcon_par;
20807 -
20808 -- if (!fbcon_is_inactive(vc, info))
20809 -- ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
20810 -- get_color(vc, info, scr_readw(s), 1),
20811 -- get_color(vc, info, scr_readw(s), 0));
20812 -+ if (!fbcon_is_inactive(vc, info)) {
20813 -+
20814 -+ if (fbcon_decor_active(info, vc))
20815 -+ fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
20816 -+ else
20817 -+ ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
20818 -+ get_color(vc, info, scr_readw(s), 1),
20819 -+ get_color(vc, info, scr_readw(s), 0));
20820 -+ }
20821 - }
20822 -
20823 - static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
20824 -@@ -1298,8 +1318,13 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
20825 - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
20826 - struct fbcon_ops *ops = info->fbcon_par;
20827 -
20828 -- if (!fbcon_is_inactive(vc, info))
20829 -- ops->clear_margins(vc, info, bottom_only);
20830 -+ if (!fbcon_is_inactive(vc, info)) {
20831 -+ if (fbcon_decor_active(info, vc)) {
20832 -+ fbcon_decor_clear_margins(vc, info, bottom_only);
20833 -+ } else {
20834 -+ ops->clear_margins(vc, info, bottom_only);
20835 -+ }
20836 -+ }
20837 - }
20838 -
20839 - static void fbcon_cursor(struct vc_data *vc, int mode)
20840 -@@ -1819,7 +1844,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
20841 - count = vc->vc_rows;
20842 - if (softback_top)
20843 - fbcon_softback_note(vc, t, count);
20844 -- if (logo_shown >= 0)
20845 -+ if (logo_shown >= 0 || fbcon_decor_active(info, vc))
20846 - goto redraw_up;
20847 - switch (p->scrollmode) {
20848 - case SCROLL_MOVE:
20849 -@@ -1912,6 +1937,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
20850 - count = vc->vc_rows;
20851 - if (logo_shown >= 0)
20852 - goto redraw_down;
20853 -+ if (fbcon_decor_active(info, vc))
20854 -+ goto redraw_down;
20855 - switch (p->scrollmode) {
20856 - case SCROLL_MOVE:
20857 - fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
20858 -@@ -2060,6 +2087,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
20859 - }
20860 - return;
20861 - }
20862 -+
20863 -+ if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
20864 -+ /* must use slower redraw bmove to keep background pic intact */
20865 -+ fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
20866 -+ return;
20867 -+ }
20868 -+
20869 - ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
20870 - height, width);
20871 - }
20872 -@@ -2130,8 +2164,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
20873 - var.yres = virt_h * virt_fh;
20874 - x_diff = info->var.xres - var.xres;
20875 - y_diff = info->var.yres - var.yres;
20876 -- if (x_diff < 0 || x_diff > virt_fw ||
20877 -- y_diff < 0 || y_diff > virt_fh) {
20878 -+ if ((x_diff < 0 || x_diff > virt_fw ||
20879 -+ y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
20880 - const struct fb_videomode *mode;
20881 -
20882 - DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
20883 -@@ -2167,6 +2201,21 @@ static int fbcon_switch(struct vc_data *vc)
20884 -
20885 - info = registered_fb[con2fb_map[vc->vc_num]];
20886 - ops = info->fbcon_par;
20887 -+ prev_console = ops->currcon;
20888 -+ if (prev_console != -1)
20889 -+ old_info = registered_fb[con2fb_map[prev_console]];
20890 -+
20891 -+#ifdef CONFIG_FB_CON_DECOR
20892 -+ if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
20893 -+ struct vc_data *vc_curr = vc_cons[prev_console].d;
20894 -+ if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
20895 -+ /* Clear the screen to avoid displaying funky colors during
20896 -+ * palette updates. */
20897 -+ memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
20898 -+ 0, info->var.yres * info->fix.line_length);
20899 -+ }
20900 -+ }
20901 -+#endif
20902 -
20903 - if (softback_top) {
20904 - if (softback_lines)
20905 -@@ -2185,9 +2234,6 @@ static int fbcon_switch(struct vc_data *vc)
20906 - logo_shown = FBCON_LOGO_CANSHOW;
20907 - }
20908 -
20909 -- prev_console = ops->currcon;
20910 -- if (prev_console != -1)
20911 -- old_info = registered_fb[con2fb_map[prev_console]];
20912 - /*
20913 - * FIXME: If we have multiple fbdev's loaded, we need to
20914 - * update all info->currcon. Perhaps, we can place this
20915 -@@ -2231,6 +2277,18 @@ static int fbcon_switch(struct vc_data *vc)
20916 - fbcon_del_cursor_timer(old_info);
20917 - }
20918 -
20919 -+ if (fbcon_decor_active_vc(vc)) {
20920 -+ struct vc_data *vc_curr = vc_cons[prev_console].d;
20921 -+
20922 -+ if (!vc_curr->vc_decor.theme ||
20923 -+ strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
20924 -+ (fbcon_decor_active_nores(info, vc_curr) &&
20925 -+ !fbcon_decor_active(info, vc_curr))) {
20926 -+ fbcon_decor_disable(vc, 0);
20927 -+ fbcon_decor_call_helper("modechange", vc->vc_num);
20928 -+ }
20929 -+ }
20930 -+
20931 - if (fbcon_is_inactive(vc, info) ||
20932 - ops->blank_state != FB_BLANK_UNBLANK)
20933 - fbcon_del_cursor_timer(info);
20934 -@@ -2339,15 +2397,20 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
20935 - }
20936 - }
20937 -
20938 -- if (!fbcon_is_inactive(vc, info)) {
20939 -+ if (!fbcon_is_inactive(vc, info)) {
20940 - if (ops->blank_state != blank) {
20941 - ops->blank_state = blank;
20942 - fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
20943 - ops->cursor_flash = (!blank);
20944 -
20945 -- if (!(info->flags & FBINFO_MISC_USEREVENT))
20946 -- if (fb_blank(info, blank))
20947 -- fbcon_generic_blank(vc, info, blank);
20948 -+ if (!(info->flags & FBINFO_MISC_USEREVENT)) {
20949 -+ if (fb_blank(info, blank)) {
20950 -+ if (fbcon_decor_active(info, vc))
20951 -+ fbcon_decor_blank(vc, info, blank);
20952 -+ else
20953 -+ fbcon_generic_blank(vc, info, blank);
20954 -+ }
20955 -+ }
20956 - }
20957 -
20958 - if (!blank)
20959 -@@ -2522,13 +2585,22 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
20960 - }
20961 -
20962 - if (resize) {
20963 -+ /* reset wrap/pan */
20964 - int cols, rows;
20965 -
20966 - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
20967 - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
20968 -+
20969 -+ if (fbcon_decor_active(info, vc)) {
20970 -+ info->var.xoffset = info->var.yoffset = p->yscroll = 0;
20971 -+ cols = vc->vc_decor.twidth;
20972 -+ rows = vc->vc_decor.theight;
20973 -+ }
20974 - cols /= w;
20975 - rows /= h;
20976 -+
20977 - vc_resize(vc, cols, rows);
20978 -+
20979 - if (CON_IS_VISIBLE(vc) && softback_buf)
20980 - fbcon_update_softback(vc);
20981 - } else if (CON_IS_VISIBLE(vc)
20982 -@@ -2657,7 +2729,11 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
20983 - int i, j, k, depth;
20984 - u8 val;
20985 -
20986 -- if (fbcon_is_inactive(vc, info))
20987 -+ if (fbcon_is_inactive(vc, info)
20988 -+#ifdef CONFIG_FB_CON_DECOR
20989 -+ || vc->vc_num != fg_console
20990 -+#endif
20991 -+ )
20992 - return -EINVAL;
20993 -
20994 - if (!CON_IS_VISIBLE(vc))
20995 -@@ -2683,14 +2759,56 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
20996 - } else
20997 - fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
20998 -
20999 -- return fb_set_cmap(&palette_cmap, info);
21000 -+ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
21001 -+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
21002 -+
21003 -+ u16 *red, *green, *blue;
21004 -+ int minlen = min(min(info->var.red.length, info->var.green.length),
21005 -+ info->var.blue.length);
21006 -+ int h;
21007 -+
21008 -+ struct fb_cmap cmap = {
21009 -+ .start = 0,
21010 -+ .len = (1 << minlen),
21011 -+ .red = NULL,
21012 -+ .green = NULL,
21013 -+ .blue = NULL,
21014 -+ .transp = NULL
21015 -+ };
21016 -+
21017 -+ red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
21018 -+
21019 -+ if (!red)
21020 -+ goto out;
21021 -+
21022 -+ green = red + 256;
21023 -+ blue = green + 256;
21024 -+ cmap.red = red;
21025 -+ cmap.green = green;
21026 -+ cmap.blue = blue;
21027 -+
21028 -+ for (i = 0; i < cmap.len; i++) {
21029 -+ red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
21030 -+ }
21031 -+
21032 -+ h = fb_set_cmap(&cmap, info);
21033 -+ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
21034 -+ kfree(red);
21035 -+
21036 -+ return h;
21037 -+
21038 -+ } else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
21039 -+ info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
21040 -+ fb_set_cmap(&info->bgdecor.cmap, info);
21041 -+
21042 -+out: return fb_set_cmap(&palette_cmap, info);
21043 - }
21044 -
21045 - static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
21046 - {
21047 - unsigned long p;
21048 - int line;
21049 --
21050 -+
21051 - if (vc->vc_num != fg_console || !softback_lines)
21052 - return (u16 *) (vc->vc_origin + offset);
21053 - line = offset / vc->vc_size_row;
21054 -@@ -2909,7 +3027,14 @@ static void fbcon_modechanged(struct fb_info *info)
21055 - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
21056 - cols /= vc->vc_font.width;
21057 - rows /= vc->vc_font.height;
21058 -- vc_resize(vc, cols, rows);
21059 -+
21060 -+ if (!fbcon_decor_active_nores(info, vc)) {
21061 -+ vc_resize(vc, cols, rows);
21062 -+ } else {
21063 -+ fbcon_decor_disable(vc, 0);
21064 -+ fbcon_decor_call_helper("modechange", vc->vc_num);
21065 -+ }
21066 -+
21067 - updatescrollmode(p, info, vc);
21068 - scrollback_max = 0;
21069 - scrollback_current = 0;
21070 -@@ -2954,7 +3079,9 @@ static void fbcon_set_all_vcs(struct fb_info *info)
21071 - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
21072 - cols /= vc->vc_font.width;
21073 - rows /= vc->vc_font.height;
21074 -- vc_resize(vc, cols, rows);
21075 -+ if (!fbcon_decor_active_nores(info, vc)) {
21076 -+ vc_resize(vc, cols, rows);
21077 -+ }
21078 - }
21079 -
21080 - if (fg != -1)
21081 -@@ -3596,6 +3723,7 @@ static void fbcon_exit(void)
21082 - }
21083 - }
21084 -
21085 -+ fbcon_decor_exit();
21086 - fbcon_has_exited = 1;
21087 - }
21088 -
21089 -diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
21090 -new file mode 100644
21091 -index 0000000..babc8c5
21092 ---- /dev/null
21093 -+++ b/drivers/video/console/fbcondecor.c
21094 -@@ -0,0 +1,555 @@
21095 -+/*
21096 -+ * linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
21097 -+ *
21098 -+ * Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@×××××.com>
21099 -+ *
21100 -+ * Code based upon "Bootsplash" (C) 2001-2003
21101 -+ * Volker Poplawski <volker@×××××××××.de>,
21102 -+ * Stefan Reinauer <stepan@××××.de>,
21103 -+ * Steffen Winterfeldt <snwint@××××.de>,
21104 -+ * Michael Schroeder <mls@××××.de>,
21105 -+ * Ken Wimer <wimer@××××.de>.
21106 -+ *
21107 -+ * Compat ioctl support by Thorsten Klein <TK@××××××××××××××.de>.
21108 -+ *
21109 -+ * This file is subject to the terms and conditions of the GNU General Public
21110 -+ * License. See the file COPYING in the main directory of this archive for
21111 -+ * more details.
21112 -+ *
21113 -+ */
21114 -+#include <linux/module.h>
21115 -+#include <linux/kernel.h>
21116 -+#include <linux/string.h>
21117 -+#include <linux/types.h>
21118 -+#include <linux/fb.h>
21119 -+#include <linux/vt_kern.h>
21120 -+#include <linux/vmalloc.h>
21121 -+#include <linux/unistd.h>
21122 -+#include <linux/syscalls.h>
21123 -+#include <linux/init.h>
21124 -+#include <linux/proc_fs.h>
21125 -+#include <linux/workqueue.h>
21126 -+#include <linux/kmod.h>
21127 -+#include <linux/miscdevice.h>
21128 -+#include <linux/device.h>
21129 -+#include <linux/fs.h>
21130 -+#include <linux/compat.h>
21131 -+#include <linux/console.h>
21132 -+
21133 -+#include <asm/uaccess.h>
21134 -+#include <asm/irq.h>
21135 -+
21136 -+#include "fbcon.h"
21137 -+#include "fbcondecor.h"
21138 -+
21139 -+extern signed char con2fb_map[];
21140 -+static int fbcon_decor_enable(struct vc_data *vc);
21141 -+char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
21142 -+static int initialized = 0;
21143 -+
21144 -+int fbcon_decor_call_helper(char* cmd, unsigned short vc)
21145 -+{
21146 -+ char *envp[] = {
21147 -+ "HOME=/",
21148 -+ "PATH=/sbin:/bin",
21149 -+ NULL
21150 -+ };
21151 -+
21152 -+ char tfb[5];
21153 -+ char tcons[5];
21154 -+ unsigned char fb = (int) con2fb_map[vc];
21155 -+
21156 -+ char *argv[] = {
21157 -+ fbcon_decor_path,
21158 -+ "2",
21159 -+ cmd,
21160 -+ tcons,
21161 -+ tfb,
21162 -+ vc_cons[vc].d->vc_decor.theme,
21163 -+ NULL
21164 -+ };
21165 -+
21166 -+ snprintf(tfb,5,"%d",fb);
21167 -+ snprintf(tcons,5,"%d",vc);
21168 -+
21169 -+ return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
21170 -+}
21171 -+
21172 -+/* Disables fbcondecor on a virtual console; called with console sem held. */
21173 -+int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
21174 -+{
21175 -+ struct fb_info* info;
21176 -+
21177 -+ if (!vc->vc_decor.state)
21178 -+ return -EINVAL;
21179 -+
21180 -+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
21181 -+
21182 -+ if (info == NULL)
21183 -+ return -EINVAL;
21184 -+
21185 -+ vc->vc_decor.state = 0;
21186 -+ vc_resize(vc, info->var.xres / vc->vc_font.width,
21187 -+ info->var.yres / vc->vc_font.height);
21188 -+
21189 -+ if (fg_console == vc->vc_num && redraw) {
21190 -+ redraw_screen(vc, 0);
21191 -+ update_region(vc, vc->vc_origin +
21192 -+ vc->vc_size_row * vc->vc_top,
21193 -+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
21194 -+ }
21195 -+
21196 -+ printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
21197 -+ vc->vc_num);
21198 -+
21199 -+ return 0;
21200 -+}
21201 -+
21202 -+/* Enables fbcondecor on a virtual console; called with console sem held. */
21203 -+static int fbcon_decor_enable(struct vc_data *vc)
21204 -+{
21205 -+ struct fb_info* info;
21206 -+
21207 -+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
21208 -+
21209 -+ if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
21210 -+ info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
21211 -+ vc->vc_num == fg_console))
21212 -+ return -EINVAL;
21213 -+
21214 -+ vc->vc_decor.state = 1;
21215 -+ vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
21216 -+ vc->vc_decor.theight / vc->vc_font.height);
21217 -+
21218 -+ if (fg_console == vc->vc_num) {
21219 -+ redraw_screen(vc, 0);
21220 -+ update_region(vc, vc->vc_origin +
21221 -+ vc->vc_size_row * vc->vc_top,
21222 -+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
21223 -+ fbcon_decor_clear_margins(vc, info, 0);
21224 -+ }
21225 -+
21226 -+ printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
21227 -+ vc->vc_num);
21228 -+
21229 -+ return 0;
21230 -+}
21231 -+
21232 -+static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
21233 -+{
21234 -+ int ret;
21235 -+
21236 -+// if (origin == FBCON_DECOR_IO_ORIG_USER)
21237 -+ console_lock();
21238 -+ if (!state)
21239 -+ ret = fbcon_decor_disable(vc, 1);
21240 -+ else
21241 -+ ret = fbcon_decor_enable(vc);
21242 -+// if (origin == FBCON_DECOR_IO_ORIG_USER)
21243 -+ console_unlock();
21244 -+
21245 -+ return ret;
21246 -+}
21247 -+
21248 -+static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
21249 -+{
21250 -+ *state = vc->vc_decor.state;
21251 -+}
21252 -+
21253 -+static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
21254 -+{
21255 -+ struct fb_info *info;
21256 -+ int len;
21257 -+ char *tmp;
21258 -+
21259 -+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
21260 -+
21261 -+ if (info == NULL || !cfg->twidth || !cfg->theight ||
21262 -+ cfg->tx + cfg->twidth > info->var.xres ||
21263 -+ cfg->ty + cfg->theight > info->var.yres)
21264 -+ return -EINVAL;
21265 -+
21266 -+ len = strlen_user(cfg->theme);
21267 -+ if (!len || len > FBCON_DECOR_THEME_LEN)
21268 -+ return -EINVAL;
21269 -+ tmp = kmalloc(len, GFP_KERNEL);
21270 -+ if (!tmp)
21271 -+ return -ENOMEM;
21272 -+ if (copy_from_user(tmp, (void __user *)cfg->theme, len))
21273 -+ return -EFAULT;
21274 -+ cfg->theme = tmp;
21275 -+ cfg->state = 0;
21276 -+
21277 -+ /* If this ioctl is a response to a request from kernel, the console sem
21278 -+ * is already held; we also don't need to disable decor because either the
21279 -+ * new config and background picture will be successfully loaded, and the
21280 -+ * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
21281 -+// if (origin == FBCON_DECOR_IO_ORIG_USER) {
21282 -+ console_lock();
21283 -+ if (vc->vc_decor.state)
21284 -+ fbcon_decor_disable(vc, 1);
21285 -+// }
21286 -+
21287 -+ if (vc->vc_decor.theme)
21288 -+ kfree(vc->vc_decor.theme);
21289 -+
21290 -+ vc->vc_decor = *cfg;
21291 -+
21292 -+// if (origin == FBCON_DECOR_IO_ORIG_USER)
21293 -+ console_unlock();
21294 -+
21295 -+ printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
21296 -+ vc->vc_num, vc->vc_decor.theme);
21297 -+ return 0;
21298 -+}
21299 -+
21300 -+static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
21301 -+{
21302 -+ char __user *tmp;
21303 -+
21304 -+ tmp = decor->theme;
21305 -+ *decor = vc->vc_decor;
21306 -+ decor->theme = tmp;
21307 -+
21308 -+ if (vc->vc_decor.theme) {
21309 -+ if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
21310 -+ return -EFAULT;
21311 -+ } else
21312 -+ if (put_user(0, tmp))
21313 -+ return -EFAULT;
21314 -+
21315 -+ return 0;
21316 -+}
21317 -+
21318 -+static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
21319 -+{
21320 -+ struct fb_info *info;
21321 -+ int len;
21322 -+ u8 *tmp;
21323 -+
21324 -+ if (vc->vc_num != fg_console)
21325 -+ return -EINVAL;
21326 -+
21327 -+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
21328 -+
21329 -+ if (info == NULL)
21330 -+ return -EINVAL;
21331 -+
21332 -+ if (img->width != info->var.xres || img->height != info->var.yres) {
21333 -+ printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
21334 -+ printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
21335 -+ return -EINVAL;
21336 -+ }
21337 -+
21338 -+ if (img->depth != info->var.bits_per_pixel) {
21339 -+ printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
21340 -+ return -EINVAL;
21341 -+ }
21342 -+
21343 -+ if (img->depth == 8) {
21344 -+ if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
21345 -+ !img->cmap.blue)
21346 -+ return -EINVAL;
21347 -+
21348 -+ tmp = vmalloc(img->cmap.len * 3 * 2);
21349 -+ if (!tmp)
21350 -+ return -ENOMEM;
21351 -+
21352 -+ if (copy_from_user(tmp,
21353 -+ (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
21354 -+ copy_from_user(tmp + (img->cmap.len << 1),
21355 -+ (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
21356 -+ copy_from_user(tmp + (img->cmap.len << 2),
21357 -+ (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
21358 -+ vfree(tmp);
21359 -+ return -EFAULT;
21360 -+ }
21361 -+
21362 -+ img->cmap.transp = NULL;
21363 -+ img->cmap.red = (u16*)tmp;
21364 -+ img->cmap.green = img->cmap.red + img->cmap.len;
21365 -+ img->cmap.blue = img->cmap.green + img->cmap.len;
21366 -+ } else {
21367 -+ img->cmap.red = NULL;
21368 -+ }
21369 -+
21370 -+ len = ((img->depth + 7) >> 3) * img->width * img->height;
21371 -+
21372 -+ /*
21373 -+ * Allocate an additional byte so that we never go outside of the
21374 -+ * buffer boundaries in the rendering functions in a 24 bpp mode.
21375 -+ */
21376 -+ tmp = vmalloc(len + 1);
21377 -+
21378 -+ if (!tmp)
21379 -+ goto out;
21380 -+
21381 -+ if (copy_from_user(tmp, (void __user*)img->data, len))
21382 -+ goto out;
21383 -+
21384 -+ img->data = tmp;
21385 -+
21386 -+ /* If this ioctl is a response to a request from kernel, the console sem
21387 -+ * is already held. */
21388 -+// if (origin == FBCON_DECOR_IO_ORIG_USER)
21389 -+ console_lock();
21390 -+
21391 -+ if (info->bgdecor.data)
21392 -+ vfree((u8*)info->bgdecor.data);
21393 -+ if (info->bgdecor.cmap.red)
21394 -+ vfree(info->bgdecor.cmap.red);
21395 -+
21396 -+ info->bgdecor = *img;
21397 -+
21398 -+ if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
21399 -+ redraw_screen(vc, 0);
21400 -+ update_region(vc, vc->vc_origin +
21401 -+ vc->vc_size_row * vc->vc_top,
21402 -+ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
21403 -+ fbcon_decor_clear_margins(vc, info, 0);
21404 -+ }
21405 -+
21406 -+// if (origin == FBCON_DECOR_IO_ORIG_USER)
21407 -+ console_unlock();
21408 -+
21409 -+ return 0;
21410 -+
21411 -+out: if (img->cmap.red)
21412 -+ vfree(img->cmap.red);
21413 -+
21414 -+ if (tmp)
21415 -+ vfree(tmp);
21416 -+ return -ENOMEM;
21417 -+}
21418 -+
21419 -+static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
21420 -+{
21421 -+ struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
21422 -+ struct vc_data *vc = NULL;
21423 -+ unsigned short vc_num = 0;
21424 -+ unsigned char origin = 0;
21425 -+ void __user *data = NULL;
21426 -+
21427 -+ if (!access_ok(VERIFY_READ, wrapper,
21428 -+ sizeof(struct fbcon_decor_iowrapper)))
21429 -+ return -EFAULT;
21430 -+
21431 -+ __get_user(vc_num, &wrapper->vc);
21432 -+ __get_user(origin, &wrapper->origin);
21433 -+ __get_user(data, &wrapper->data);
21434 -+
21435 -+ if (!vc_cons_allocated(vc_num))
21436 -+ return -EINVAL;
21437 -+
21438 -+ vc = vc_cons[vc_num].d;
21439 -+
21440 -+ switch (cmd) {
21441 -+ case FBIOCONDECOR_SETPIC:
21442 -+ {
21443 -+ struct fb_image img;
21444 -+ if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
21445 -+ return -EFAULT;
21446 -+
21447 -+ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
21448 -+ }
21449 -+ case FBIOCONDECOR_SETCFG:
21450 -+ {
21451 -+ struct vc_decor cfg;
21452 -+ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
21453 -+ return -EFAULT;
21454 -+
21455 -+ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
21456 -+ }
21457 -+ case FBIOCONDECOR_GETCFG:
21458 -+ {
21459 -+ int rval;
21460 -+ struct vc_decor cfg;
21461 -+
21462 -+ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
21463 -+ return -EFAULT;
21464 -+
21465 -+ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
21466 -+
21467 -+ if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
21468 -+ return -EFAULT;
21469 -+ return rval;
21470 -+ }
21471 -+ case FBIOCONDECOR_SETSTATE:
21472 -+ {
21473 -+ unsigned int state = 0;
21474 -+ if (get_user(state, (unsigned int __user *)data))
21475 -+ return -EFAULT;
21476 -+ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
21477 -+ }
21478 -+ case FBIOCONDECOR_GETSTATE:
21479 -+ {
21480 -+ unsigned int state = 0;
21481 -+ fbcon_decor_ioctl_dogetstate(vc, &state);
21482 -+ return put_user(state, (unsigned int __user *)data);
21483 -+ }
21484 -+
21485 -+ default:
21486 -+ return -ENOIOCTLCMD;
21487 -+ }
21488 -+}
21489 -+
21490 -+#ifdef CONFIG_COMPAT
21491 -+
21492 -+static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
21493 -+
21494 -+ struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
21495 -+ struct vc_data *vc = NULL;
21496 -+ unsigned short vc_num = 0;
21497 -+ unsigned char origin = 0;
21498 -+ compat_uptr_t data_compat = 0;
21499 -+ void __user *data = NULL;
21500 -+
21501 -+ if (!access_ok(VERIFY_READ, wrapper,
21502 -+ sizeof(struct fbcon_decor_iowrapper32)))
21503 -+ return -EFAULT;
21504 -+
21505 -+ __get_user(vc_num, &wrapper->vc);
21506 -+ __get_user(origin, &wrapper->origin);
21507 -+ __get_user(data_compat, &wrapper->data);
21508 -+ data = compat_ptr(data_compat);
21509 -+
21510 -+ if (!vc_cons_allocated(vc_num))
21511 -+ return -EINVAL;
21512 -+
21513 -+ vc = vc_cons[vc_num].d;
21514 -+
21515 -+ switch (cmd) {
21516 -+ case FBIOCONDECOR_SETPIC32:
21517 -+ {
21518 -+ struct fb_image32 img_compat;
21519 -+ struct fb_image img;
21520 -+
21521 -+ if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
21522 -+ return -EFAULT;
21523 -+
21524 -+ fb_image_from_compat(img, img_compat);
21525 -+
21526 -+ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
21527 -+ }
21528 -+
21529 -+ case FBIOCONDECOR_SETCFG32:
21530 -+ {
21531 -+ struct vc_decor32 cfg_compat;
21532 -+ struct vc_decor cfg;
21533 -+
21534 -+ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
21535 -+ return -EFAULT;
21536 -+
21537 -+ vc_decor_from_compat(cfg, cfg_compat);
21538 -+
21539 -+ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
21540 -+ }
21541 -+
21542 -+ case FBIOCONDECOR_GETCFG32:
21543 -+ {
21544 -+ int rval;
21545 -+ struct vc_decor32 cfg_compat;
21546 -+ struct vc_decor cfg;
21547 -+
21548 -+ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
21549 -+ return -EFAULT;
21550 -+ cfg.theme = compat_ptr(cfg_compat.theme);
21551 -+
21552 -+ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
21553 -+
21554 -+ vc_decor_to_compat(cfg_compat, cfg);
21555 -+
21556 -+ if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
21557 -+ return -EFAULT;
21558 -+ return rval;
21559 -+ }
21560 -+
21561 -+ case FBIOCONDECOR_SETSTATE32:
21562 -+ {
21563 -+ compat_uint_t state_compat = 0;
21564 -+ unsigned int state = 0;
21565 -+
21566 -+ if (get_user(state_compat, (compat_uint_t __user *)data))
21567 -+ return -EFAULT;
21568 -+
21569 -+ state = (unsigned int)state_compat;
21570 -+
21571 -+ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
21572 -+ }
21573 -+
21574 -+ case FBIOCONDECOR_GETSTATE32:
21575 -+ {
21576 -+ compat_uint_t state_compat = 0;
21577 -+ unsigned int state = 0;
21578 -+
21579 -+ fbcon_decor_ioctl_dogetstate(vc, &state);
21580 -+ state_compat = (compat_uint_t)state;
21581 -+
21582 -+ return put_user(state_compat, (compat_uint_t __user *)data);
21583 -+ }
21584 -+
21585 -+ default:
21586 -+ return -ENOIOCTLCMD;
21587 -+ }
21588 -+}
21589 -+#else
21590 -+ #define fbcon_decor_compat_ioctl NULL
21591 -+#endif
21592 -+
21593 -+static struct file_operations fbcon_decor_ops = {
21594 -+ .owner = THIS_MODULE,
21595 -+ .unlocked_ioctl = fbcon_decor_ioctl,
21596 -+ .compat_ioctl = fbcon_decor_compat_ioctl
21597 -+};
21598 -+
21599 -+static struct miscdevice fbcon_decor_dev = {
21600 -+ .minor = MISC_DYNAMIC_MINOR,
21601 -+ .name = "fbcondecor",
21602 -+ .fops = &fbcon_decor_ops
21603 -+};
21604 -+
21605 -+void fbcon_decor_reset(void)
21606 -+{
21607 -+ int i;
21608 -+
21609 -+ for (i = 0; i < num_registered_fb; i++) {
21610 -+ registered_fb[i]->bgdecor.data = NULL;
21611 -+ registered_fb[i]->bgdecor.cmap.red = NULL;
21612 -+ }
21613 -+
21614 -+ for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
21615 -+ vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
21616 -+ vc_cons[i].d->vc_decor.theight = 0;
21617 -+ vc_cons[i].d->vc_decor.theme = NULL;
21618 -+ }
21619 -+
21620 -+ return;
21621 -+}
21622 -+
21623 -+int fbcon_decor_init(void)
21624 -+{
21625 -+ int i;
21626 -+
21627 -+ fbcon_decor_reset();
21628 -+
21629 -+ if (initialized)
21630 -+ return 0;
21631 -+
21632 -+ i = misc_register(&fbcon_decor_dev);
21633 -+ if (i) {
21634 -+ printk(KERN_ERR "fbcondecor: failed to register device\n");
21635 -+ return i;
21636 -+ }
21637 -+
21638 -+ fbcon_decor_call_helper("init", 0);
21639 -+ initialized = 1;
21640 -+ return 0;
21641 -+}
21642 -+
21643 -+int fbcon_decor_exit(void)
21644 -+{
21645 -+ fbcon_decor_reset();
21646 -+ return 0;
21647 -+}
21648 -+
21649 -+EXPORT_SYMBOL(fbcon_decor_path);
21650 -diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
21651 -new file mode 100644
21652 -index 0000000..3b3724b
21653 ---- /dev/null
21654 -+++ b/drivers/video/console/fbcondecor.h
21655 -@@ -0,0 +1,78 @@
21656 -+/*
21657 -+ * linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
21658 -+ *
21659 -+ * Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@×××××.com>
21660 -+ *
21661 -+ */
21662 -+
21663 -+#ifndef __FBCON_DECOR_H
21664 -+#define __FBCON_DECOR_H
21665 -+
21666 -+#ifndef _LINUX_FB_H
21667 -+#include <linux/fb.h>
21668 -+#endif
21669 -+
21670 -+/* This is needed for vc_cons in fbcmap.c */
21671 -+#include <linux/vt_kern.h>
21672 -+
21673 -+struct fb_cursor;
21674 -+struct fb_info;
21675 -+struct vc_data;
21676 -+
21677 -+#ifdef CONFIG_FB_CON_DECOR
21678 -+/* fbcondecor.c */
21679 -+int fbcon_decor_init(void);
21680 -+int fbcon_decor_exit(void);
21681 -+int fbcon_decor_call_helper(char* cmd, unsigned short cons);
21682 -+int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
21683 -+
21684 -+/* cfbcondecor.c */
21685 -+void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
21686 -+void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
21687 -+void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
21688 -+void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
21689 -+void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
21690 -+void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
21691 -+void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
21692 -+void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
21693 -+
21694 -+/* vt.c */
21695 -+void acquire_console_sem(void);
21696 -+void release_console_sem(void);
21697 -+void do_unblank_screen(int entering_gfx);
21698 -+
21699 -+/* struct vc_data *y */
21700 -+#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme)
21701 -+
21702 -+/* struct fb_info *x, struct vc_data *y */
21703 -+#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
21704 -+
21705 -+/* struct fb_info *x, struct vc_data *y */
21706 -+#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) && \
21707 -+ x->bgdecor.width == x->var.xres && \
21708 -+ x->bgdecor.height == x->var.yres && \
21709 -+ x->bgdecor.depth == x->var.bits_per_pixel)
21710 -+
21711 -+
21712 -+#else /* CONFIG_FB_CON_DECOR */
21713 -+
21714 -+static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
21715 -+static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
21716 -+static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
21717 -+static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
21718 -+static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
21719 -+static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
21720 -+static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
21721 -+static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
21722 -+static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
21723 -+static inline int fbcon_decor_init(void) { return 0; }
21724 -+static inline int fbcon_decor_exit(void) { return 0; }
21725 -+static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
21726 -+
21727 -+#define fbcon_decor_active_vc(y) (0)
21728 -+#define fbcon_decor_active_nores(x,y) (0)
21729 -+#define fbcon_decor_active(x,y) (0)
21730 -+
21731 -+#endif /* CONFIG_FB_CON_DECOR */
21732 -+
21733 -+#endif /* __FBCON_DECOR_H */
21734 -diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
21735 -index e1f4727..2952e33 100644
21736 ---- a/drivers/video/fbdev/Kconfig
21737 -+++ b/drivers/video/fbdev/Kconfig
21738 -@@ -1204,7 +1204,6 @@ config FB_MATROX
21739 - select FB_CFB_FILLRECT
21740 - select FB_CFB_COPYAREA
21741 - select FB_CFB_IMAGEBLIT
21742 -- select FB_TILEBLITTING
21743 - select FB_MACMODES if PPC_PMAC
21744 - ---help---
21745 - Say Y here if you have a Matrox Millennium, Matrox Millennium II,
21746 -diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
21747 -index f89245b..05e036c 100644
21748 ---- a/drivers/video/fbdev/core/fbcmap.c
21749 -+++ b/drivers/video/fbdev/core/fbcmap.c
21750 -@@ -17,6 +17,8 @@
21751 - #include <linux/slab.h>
21752 - #include <linux/uaccess.h>
21753 -
21754 -+#include "../../console/fbcondecor.h"
21755 -+
21756 - static u16 red2[] __read_mostly = {
21757 - 0x0000, 0xaaaa
21758 - };
21759 -@@ -249,14 +251,17 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
21760 - if (transp)
21761 - htransp = *transp++;
21762 - if (info->fbops->fb_setcolreg(start++,
21763 -- hred, hgreen, hblue,
21764 -+ hred, hgreen, hblue,
21765 - htransp, info))
21766 - break;
21767 - }
21768 - }
21769 -- if (rc == 0)
21770 -+ if (rc == 0) {
21771 - fb_copy_cmap(cmap, &info->cmap);
21772 --
21773 -+ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
21774 -+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
21775 -+ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
21776 -+ }
21777 - return rc;
21778 - }
21779 -
21780 -diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
21781 -index b6d5008..d6703f2 100644
21782 ---- a/drivers/video/fbdev/core/fbmem.c
21783 -+++ b/drivers/video/fbdev/core/fbmem.c
21784 -@@ -1250,15 +1250,6 @@ struct fb_fix_screeninfo32 {
21785 - u16 reserved[3];
21786 - };
21787 -
21788 --struct fb_cmap32 {
21789 -- u32 start;
21790 -- u32 len;
21791 -- compat_caddr_t red;
21792 -- compat_caddr_t green;
21793 -- compat_caddr_t blue;
21794 -- compat_caddr_t transp;
21795 --};
21796 --
21797 - static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
21798 - unsigned long arg)
21799 - {
21800 -diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
21801 -new file mode 100644
21802 -index 0000000..04b8d80
21803 ---- /dev/null
21804 -+++ b/include/linux/console_decor.h
21805 -@@ -0,0 +1,46 @@
21806 -+#ifndef _LINUX_CONSOLE_DECOR_H_
21807 -+#define _LINUX_CONSOLE_DECOR_H_ 1
21808 -+
21809 -+/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
21810 -+struct vc_decor {
21811 -+ __u8 bg_color; /* The color that is to be treated as transparent */
21812 -+ __u8 state; /* Current decor state: 0 = off, 1 = on */
21813 -+ __u16 tx, ty; /* Top left corner coordinates of the text field */
21814 -+ __u16 twidth, theight; /* Width and height of the text field */
21815 -+ char* theme;
21816 -+};
21817 -+
21818 -+#ifdef __KERNEL__
21819 -+#ifdef CONFIG_COMPAT
21820 -+#include <linux/compat.h>
21821 -+
21822 -+struct vc_decor32 {
21823 -+ __u8 bg_color; /* The color that is to be treated as transparent */
21824 -+ __u8 state; /* Current decor state: 0 = off, 1 = on */
21825 -+ __u16 tx, ty; /* Top left corner coordinates of the text field */
21826 -+ __u16 twidth, theight; /* Width and height of the text field */
21827 -+ compat_uptr_t theme;
21828 -+};
21829 -+
21830 -+#define vc_decor_from_compat(to, from) \
21831 -+ (to).bg_color = (from).bg_color; \
21832 -+ (to).state = (from).state; \
21833 -+ (to).tx = (from).tx; \
21834 -+ (to).ty = (from).ty; \
21835 -+ (to).twidth = (from).twidth; \
21836 -+ (to).theight = (from).theight; \
21837 -+ (to).theme = compat_ptr((from).theme)
21838 -+
21839 -+#define vc_decor_to_compat(to, from) \
21840 -+ (to).bg_color = (from).bg_color; \
21841 -+ (to).state = (from).state; \
21842 -+ (to).tx = (from).tx; \
21843 -+ (to).ty = (from).ty; \
21844 -+ (to).twidth = (from).twidth; \
21845 -+ (to).theight = (from).theight; \
21846 -+ (to).theme = ptr_to_compat((from).theme)
21847 -+
21848 -+#endif /* CONFIG_COMPAT */
21849 -+#endif /* __KERNEL__ */
21850 -+
21851 -+#endif
21852 -diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
21853 -index 7f0c329..98f5d60 100644
21854 ---- a/include/linux/console_struct.h
21855 -+++ b/include/linux/console_struct.h
21856 -@@ -19,6 +19,7 @@
21857 - struct vt_struct;
21858 -
21859 - #define NPAR 16
21860 -+#include <linux/console_decor.h>
21861 -
21862 - struct vc_data {
21863 - struct tty_port port; /* Upper level data */
21864 -@@ -107,6 +108,8 @@ struct vc_data {
21865 - unsigned long vc_uni_pagedir;
21866 - unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
21867 - bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
21868 -+
21869 -+ struct vc_decor vc_decor;
21870 - /* additional information is in vt_kern.h */
21871 - };
21872 -
21873 -diff --git a/include/linux/fb.h b/include/linux/fb.h
21874 -index fe6ac95..1e36b03 100644
21875 ---- a/include/linux/fb.h
21876 -+++ b/include/linux/fb.h
21877 -@@ -219,6 +219,34 @@ struct fb_deferred_io {
21878 - };
21879 - #endif
21880 -
21881 -+#ifdef __KERNEL__
21882 -+#ifdef CONFIG_COMPAT
21883 -+struct fb_image32 {
21884 -+ __u32 dx; /* Where to place image */
21885 -+ __u32 dy;
21886 -+ __u32 width; /* Size of image */
21887 -+ __u32 height;
21888 -+ __u32 fg_color; /* Only used when a mono bitmap */
21889 -+ __u32 bg_color;
21890 -+ __u8 depth; /* Depth of the image */
21891 -+ const compat_uptr_t data; /* Pointer to image data */
21892 -+ struct fb_cmap32 cmap; /* color map info */
21893 -+};
21894 -+
21895 -+#define fb_image_from_compat(to, from) \
21896 -+ (to).dx = (from).dx; \
21897 -+ (to).dy = (from).dy; \
21898 -+ (to).width = (from).width; \
21899 -+ (to).height = (from).height; \
21900 -+ (to).fg_color = (from).fg_color; \
21901 -+ (to).bg_color = (from).bg_color; \
21902 -+ (to).depth = (from).depth; \
21903 -+ (to).data = compat_ptr((from).data); \
21904 -+ fb_cmap_from_compat((to).cmap, (from).cmap)
21905 -+
21906 -+#endif /* CONFIG_COMPAT */
21907 -+#endif /* __KERNEL__ */
21908 -+
21909 - /*
21910 - * Frame buffer operations
21911 - *
21912 -@@ -489,6 +517,9 @@ struct fb_info {
21913 - #define FBINFO_STATE_SUSPENDED 1
21914 - u32 state; /* Hardware state i.e suspend */
21915 - void *fbcon_par; /* fbcon use-only private area */
21916 -+
21917 -+ struct fb_image bgdecor;
21918 -+
21919 - /* From here on everything is device dependent */
21920 - void *par;
21921 - /* we need the PCI or similar aperture base/size not
21922 -diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
21923 -index fb795c3..dc77a03 100644
21924 ---- a/include/uapi/linux/fb.h
21925 -+++ b/include/uapi/linux/fb.h
21926 -@@ -8,6 +8,25 @@
21927 -
21928 - #define FB_MAX 32 /* sufficient for now */
21929 -
21930 -+struct fbcon_decor_iowrapper
21931 -+{
21932 -+ unsigned short vc; /* Virtual console */
21933 -+ unsigned char origin; /* Point of origin of the request */
21934 -+ void *data;
21935 -+};
21936 -+
21937 -+#ifdef __KERNEL__
21938 -+#ifdef CONFIG_COMPAT
21939 -+#include <linux/compat.h>
21940 -+struct fbcon_decor_iowrapper32
21941 -+{
21942 -+ unsigned short vc; /* Virtual console */
21943 -+ unsigned char origin; /* Point of origin of the request */
21944 -+ compat_uptr_t data;
21945 -+};
21946 -+#endif /* CONFIG_COMPAT */
21947 -+#endif /* __KERNEL__ */
21948 -+
21949 - /* ioctls
21950 - 0x46 is 'F' */
21951 - #define FBIOGET_VSCREENINFO 0x4600
21952 -@@ -35,6 +54,25 @@
21953 - #define FBIOGET_DISPINFO 0x4618
21954 - #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
21955 -
21956 -+#define FBIOCONDECOR_SETCFG _IOWR('F', 0x19, struct fbcon_decor_iowrapper)
21957 -+#define FBIOCONDECOR_GETCFG _IOR('F', 0x1A, struct fbcon_decor_iowrapper)
21958 -+#define FBIOCONDECOR_SETSTATE _IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
21959 -+#define FBIOCONDECOR_GETSTATE _IOR('F', 0x1C, struct fbcon_decor_iowrapper)
21960 -+#define FBIOCONDECOR_SETPIC _IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
21961 -+#ifdef __KERNEL__
21962 -+#ifdef CONFIG_COMPAT
21963 -+#define FBIOCONDECOR_SETCFG32 _IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
21964 -+#define FBIOCONDECOR_GETCFG32 _IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
21965 -+#define FBIOCONDECOR_SETSTATE32 _IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
21966 -+#define FBIOCONDECOR_GETSTATE32 _IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
21967 -+#define FBIOCONDECOR_SETPIC32 _IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
21968 -+#endif /* CONFIG_COMPAT */
21969 -+#endif /* __KERNEL__ */
21970 -+
21971 -+#define FBCON_DECOR_THEME_LEN 128 /* Maximum lenght of a theme name */
21972 -+#define FBCON_DECOR_IO_ORIG_KERNEL 0 /* Kernel ioctl origin */
21973 -+#define FBCON_DECOR_IO_ORIG_USER 1 /* User ioctl origin */
21974 -+
21975 - #define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */
21976 - #define FB_TYPE_PLANES 1 /* Non interleaved planes */
21977 - #define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */
21978 -@@ -277,6 +315,29 @@ struct fb_var_screeninfo {
21979 - __u32 reserved[4]; /* Reserved for future compatibility */
21980 - };
21981 -
21982 -+#ifdef __KERNEL__
21983 -+#ifdef CONFIG_COMPAT
21984 -+struct fb_cmap32 {
21985 -+ __u32 start;
21986 -+ __u32 len; /* Number of entries */
21987 -+ compat_uptr_t red; /* Red values */
21988 -+ compat_uptr_t green;
21989 -+ compat_uptr_t blue;
21990 -+ compat_uptr_t transp; /* transparency, can be NULL */
21991 -+};
21992 -+
21993 -+#define fb_cmap_from_compat(to, from) \
21994 -+ (to).start = (from).start; \
21995 -+ (to).len = (from).len; \
21996 -+ (to).red = compat_ptr((from).red); \
21997 -+ (to).green = compat_ptr((from).green); \
21998 -+ (to).blue = compat_ptr((from).blue); \
21999 -+ (to).transp = compat_ptr((from).transp)
22000 -+
22001 -+#endif /* CONFIG_COMPAT */
22002 -+#endif /* __KERNEL__ */
22003 -+
22004 -+
22005 - struct fb_cmap {
22006 - __u32 start; /* First entry */
22007 - __u32 len; /* Number of entries */
22008 -diff --git a/kernel/sysctl.c b/kernel/sysctl.c
22009 -index 74f5b58..6386ab0 100644
22010 ---- a/kernel/sysctl.c
22011 -+++ b/kernel/sysctl.c
22012 -@@ -146,6 +146,10 @@ static const int cap_last_cap = CAP_LAST_CAP;
22013 - static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
22014 - #endif
22015 -
22016 -+#ifdef CONFIG_FB_CON_DECOR
22017 -+extern char fbcon_decor_path[];
22018 -+#endif
22019 -+
22020 - #ifdef CONFIG_INOTIFY_USER
22021 - #include <linux/inotify.h>
22022 - #endif
22023 -@@ -255,6 +259,15 @@ static struct ctl_table sysctl_base_table[] = {
22024 - .mode = 0555,
22025 - .child = dev_table,
22026 - },
22027 -+#ifdef CONFIG_FB_CON_DECOR
22028 -+ {
22029 -+ .procname = "fbcondecor",
22030 -+ .data = &fbcon_decor_path,
22031 -+ .maxlen = KMOD_PATH_LEN,
22032 -+ .mode = 0644,
22033 -+ .proc_handler = &proc_dostring,
22034 -+ },
22035 -+#endif
22036 - { }
22037 - };
22038 -
22039
22040 diff --git a/5000_enable-additional-cpu-optimizations-for-gcc.patch b/5000_enable-additional-cpu-optimizations-for-gcc.patch
22041 deleted file mode 100644
22042 index f7ab6f0..0000000
22043 --- a/5000_enable-additional-cpu-optimizations-for-gcc.patch
22044 +++ /dev/null
22045 @@ -1,327 +0,0 @@
22046 -This patch has been tested on and known to work with kernel versions from 3.2
22047 -up to the latest git version (pulled on 12/14/2013).
22048 -
22049 -This patch will expand the number of microarchitectures to include new
22050 -processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
22051 -14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
22052 -Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 2nd Gen Core
22053 -i3/i5/i7 (Sandybridge), Intel 3rd Gen Core i3/i5/i7 (Ivybridge), and Intel 4th
22054 -Gen Core i3/i5/i7 (Haswell). It also offers the compiler the 'native' flag.
22055 -
22056 -Small but real speed increases are measurable using a make endpoint comparing
22057 -a generic kernel to one built with one of the respective microarchs.
22058 -
22059 -See the following experimental evidence supporting this statement:
22060 -https://github.com/graysky2/kernel_gcc_patch
22061 -
22062 -REQUIREMENTS
22063 -linux version >=3.15
22064 -gcc version <4.9
22065 -
22066 ----
22067 -diff -uprN a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
22068 ---- a/arch/x86/include/asm/module.h 2013-11-03 18:41:51.000000000 -0500
22069 -+++ b/arch/x86/include/asm/module.h 2013-12-15 06:21:24.351122516 -0500
22070 -@@ -15,6 +15,16 @@
22071 - #define MODULE_PROC_FAMILY "586MMX "
22072 - #elif defined CONFIG_MCORE2
22073 - #define MODULE_PROC_FAMILY "CORE2 "
22074 -+#elif defined CONFIG_MNATIVE
22075 -+#define MODULE_PROC_FAMILY "NATIVE "
22076 -+#elif defined CONFIG_MCOREI7
22077 -+#define MODULE_PROC_FAMILY "COREI7 "
22078 -+#elif defined CONFIG_MCOREI7AVX
22079 -+#define MODULE_PROC_FAMILY "COREI7AVX "
22080 -+#elif defined CONFIG_MCOREAVXI
22081 -+#define MODULE_PROC_FAMILY "COREAVXI "
22082 -+#elif defined CONFIG_MCOREAVX2
22083 -+#define MODULE_PROC_FAMILY "COREAVX2 "
22084 - #elif defined CONFIG_MATOM
22085 - #define MODULE_PROC_FAMILY "ATOM "
22086 - #elif defined CONFIG_M686
22087 -@@ -33,6 +43,18 @@
22088 - #define MODULE_PROC_FAMILY "K7 "
22089 - #elif defined CONFIG_MK8
22090 - #define MODULE_PROC_FAMILY "K8 "
22091 -+#elif defined CONFIG_MK10
22092 -+#define MODULE_PROC_FAMILY "K10 "
22093 -+#elif defined CONFIG_MBARCELONA
22094 -+#define MODULE_PROC_FAMILY "BARCELONA "
22095 -+#elif defined CONFIG_MBOBCAT
22096 -+#define MODULE_PROC_FAMILY "BOBCAT "
22097 -+#elif defined CONFIG_MBULLDOZER
22098 -+#define MODULE_PROC_FAMILY "BULLDOZER "
22099 -+#elif defined CONFIG_MPILEDRIVER
22100 -+#define MODULE_PROC_FAMILY "PILEDRIVER "
22101 -+#elif defined CONFIG_MJAGUAR
22102 -+#define MODULE_PROC_FAMILY "JAGUAR "
22103 - #elif defined CONFIG_MELAN
22104 - #define MODULE_PROC_FAMILY "ELAN "
22105 - #elif defined CONFIG_MCRUSOE
22106 -diff -uprN a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
22107 ---- a/arch/x86/Kconfig.cpu 2013-11-03 18:41:51.000000000 -0500
22108 -+++ b/arch/x86/Kconfig.cpu 2013-12-15 06:21:24.351122516 -0500
22109 -@@ -139,7 +139,7 @@ config MPENTIUM4
22110 -
22111 -
22112 - config MK6
22113 -- bool "K6/K6-II/K6-III"
22114 -+ bool "AMD K6/K6-II/K6-III"
22115 - depends on X86_32
22116 - ---help---
22117 - Select this for an AMD K6-family processor. Enables use of
22118 -@@ -147,7 +147,7 @@ config MK6
22119 - flags to GCC.
22120 -
22121 - config MK7
22122 -- bool "Athlon/Duron/K7"
22123 -+ bool "AMD Athlon/Duron/K7"
22124 - depends on X86_32
22125 - ---help---
22126 - Select this for an AMD Athlon K7-family processor. Enables use of
22127 -@@ -155,12 +155,55 @@ config MK7
22128 - flags to GCC.
22129 -
22130 - config MK8
22131 -- bool "Opteron/Athlon64/Hammer/K8"
22132 -+ bool "AMD Opteron/Athlon64/Hammer/K8"
22133 - ---help---
22134 - Select this for an AMD Opteron or Athlon64 Hammer-family processor.
22135 - Enables use of some extended instructions, and passes appropriate
22136 - optimization flags to GCC.
22137 -
22138 -+config MK10
22139 -+ bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
22140 -+ ---help---
22141 -+ Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
22142 -+ Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
22143 -+ Enables use of some extended instructions, and passes appropriate
22144 -+ optimization flags to GCC.
22145 -+
22146 -+config MBARCELONA
22147 -+ bool "AMD Barcelona"
22148 -+ ---help---
22149 -+ Select this for AMD Barcelona and newer processors.
22150 -+
22151 -+ Enables -march=barcelona
22152 -+
22153 -+config MBOBCAT
22154 -+ bool "AMD Bobcat"
22155 -+ ---help---
22156 -+ Select this for AMD Bobcat processors.
22157 -+
22158 -+ Enables -march=btver1
22159 -+
22160 -+config MBULLDOZER
22161 -+ bool "AMD Bulldozer"
22162 -+ ---help---
22163 -+ Select this for AMD Bulldozer processors.
22164 -+
22165 -+ Enables -march=bdver1
22166 -+
22167 -+config MPILEDRIVER
22168 -+ bool "AMD Piledriver"
22169 -+ ---help---
22170 -+ Select this for AMD Piledriver processors.
22171 -+
22172 -+ Enables -march=bdver2
22173 -+
22174 -+config MJAGUAR
22175 -+ bool "AMD Jaguar"
22176 -+ ---help---
22177 -+ Select this for AMD Jaguar processors.
22178 -+
22179 -+ Enables -march=btver2
22180 -+
22181 - config MCRUSOE
22182 - bool "Crusoe"
22183 - depends on X86_32
22184 -@@ -251,8 +294,17 @@ config MPSC
22185 - using the cpu family field
22186 - in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
22187 -
22188 -+config MATOM
22189 -+ bool "Intel Atom"
22190 -+ ---help---
22191 -+
22192 -+ Select this for the Intel Atom platform. Intel Atom CPUs have an
22193 -+ in-order pipelining architecture and thus can benefit from
22194 -+ accordingly optimized code. Use a recent GCC with specific Atom
22195 -+ support in order to fully benefit from selecting this option.
22196 -+
22197 - config MCORE2
22198 -- bool "Core 2/newer Xeon"
22199 -+ bool "Intel Core 2"
22200 - ---help---
22201 -
22202 - Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
22203 -@@ -260,14 +312,40 @@ config MCORE2
22204 - family in /proc/cpuinfo. Newer ones have 6 and older ones 15
22205 - (not a typo)
22206 -
22207 --config MATOM
22208 -- bool "Intel Atom"
22209 -+ Enables -march=core2
22210 -+
22211 -+config MCOREI7
22212 -+ bool "Intel Core i7"
22213 - ---help---
22214 -
22215 -- Select this for the Intel Atom platform. Intel Atom CPUs have an
22216 -- in-order pipelining architecture and thus can benefit from
22217 -- accordingly optimized code. Use a recent GCC with specific Atom
22218 -- support in order to fully benefit from selecting this option.
22219 -+ Select this for the Intel Nehalem platform. Intel Nehalem proecessors
22220 -+ include Core i3, i5, i7, Xeon: 34xx, 35xx, 55xx, 56xx, 75xx processors.
22221 -+
22222 -+ Enables -march=corei7
22223 -+
22224 -+config MCOREI7AVX
22225 -+ bool "Intel Core 2nd Gen AVX"
22226 -+ ---help---
22227 -+
22228 -+ Select this for 2nd Gen Core processors including Sandy Bridge.
22229 -+
22230 -+ Enables -march=corei7-avx
22231 -+
22232 -+config MCOREAVXI
22233 -+ bool "Intel Core 3rd Gen AVX"
22234 -+ ---help---
22235 -+
22236 -+ Select this for 3rd Gen Core processors including Ivy Bridge.
22237 -+
22238 -+ Enables -march=core-avx-i
22239 -+
22240 -+config MCOREAVX2
22241 -+ bool "Intel Core AVX2"
22242 -+ ---help---
22243 -+
22244 -+ Select this for AVX2 enabled processors including Haswell.
22245 -+
22246 -+ Enables -march=core-avx2
22247 -
22248 - config GENERIC_CPU
22249 - bool "Generic-x86-64"
22250 -@@ -276,6 +354,19 @@ config GENERIC_CPU
22251 - Generic x86-64 CPU.
22252 - Run equally well on all x86-64 CPUs.
22253 -
22254 -+config MNATIVE
22255 -+ bool "Native optimizations autodetected by GCC"
22256 -+ ---help---
22257 -+
22258 -+ GCC 4.2 and above support -march=native, which automatically detects
22259 -+ the optimum settings to use based on your processor. -march=native
22260 -+ also detects and applies additional settings beyond -march specific
22261 -+ to your CPU, (eg. -msse4). Unless you have a specific reason not to
22262 -+ (e.g. distcc cross-compiling), you should probably be using
22263 -+ -march=native rather than anything listed below.
22264 -+
22265 -+ Enables -march=native
22266 -+
22267 - endchoice
22268 -
22269 - config X86_GENERIC
22270 -@@ -300,7 +391,7 @@ config X86_INTERNODE_CACHE_SHIFT
22271 - config X86_L1_CACHE_SHIFT
22272 - int
22273 - default "7" if MPENTIUM4 || MPSC
22274 -- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
22275 -+ default "6" if MK7 || MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MATOM || MVIAC7 || X86_GENERIC || MNATIVE || GENERIC_CPU
22276 - default "4" if MELAN || M486 || MGEODEGX1
22277 - default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
22278 -
22279 -@@ -331,11 +422,11 @@ config X86_ALIGNMENT_16
22280 -
22281 - config X86_INTEL_USERCOPY
22282 - def_bool y
22283 -- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
22284 -+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || MNATIVE || X86_GENERIC || MK8 || MK7 || MK10 || MBARCELONA || MEFFICEON || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2
22285 -
22286 - config X86_USE_PPRO_CHECKSUM
22287 - def_bool y
22288 -- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
22289 -+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MATOM || MNATIVE
22290 -
22291 - config X86_USE_3DNOW
22292 - def_bool y
22293 -@@ -363,17 +454,17 @@ config X86_P6_NOP
22294 -
22295 - config X86_TSC
22296 - def_bool y
22297 -- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
22298 -+ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MCOREI7 || MCOREI7-AVX || MATOM) || X86_64 || MNATIVE
22299 -
22300 - config X86_CMPXCHG64
22301 - def_bool y
22302 -- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
22303 -+ depends on X86_PAE || X86_64 || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
22304 -
22305 - # this should be set for all -march=.. options where the compiler
22306 - # generates cmov.
22307 - config X86_CMOV
22308 - def_bool y
22309 -- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
22310 -+ depends on (MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
22311 -
22312 - config X86_MINIMUM_CPU_FAMILY
22313 - int
22314 -diff -uprN a/arch/x86/Makefile b/arch/x86/Makefile
22315 ---- a/arch/x86/Makefile 2013-11-03 18:41:51.000000000 -0500
22316 -+++ b/arch/x86/Makefile 2013-12-15 06:21:24.354455723 -0500
22317 -@@ -61,11 +61,26 @@ else
22318 - KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
22319 -
22320 - # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
22321 -+ cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
22322 - cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
22323 -+ cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
22324 -+ cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
22325 -+ cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
22326 -+ cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
22327 -+ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
22328 -+ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
22329 - cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
22330 -
22331 - cflags-$(CONFIG_MCORE2) += \
22332 -- $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
22333 -+ $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
22334 -+ cflags-$(CONFIG_MCOREI7) += \
22335 -+ $(call cc-option,-march=corei7,$(call cc-option,-mtune=corei7))
22336 -+ cflags-$(CONFIG_MCOREI7AVX) += \
22337 -+ $(call cc-option,-march=corei7-avx,$(call cc-option,-mtune=corei7-avx))
22338 -+ cflags-$(CONFIG_MCOREAVXI) += \
22339 -+ $(call cc-option,-march=core-avx-i,$(call cc-option,-mtune=core-avx-i))
22340 -+ cflags-$(CONFIG_MCOREAVX2) += \
22341 -+ $(call cc-option,-march=core-avx2,$(call cc-option,-mtune=core-avx2))
22342 - cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
22343 - $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
22344 - cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
22345 -diff -uprN a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
22346 ---- a/arch/x86/Makefile_32.cpu 2013-11-03 18:41:51.000000000 -0500
22347 -+++ b/arch/x86/Makefile_32.cpu 2013-12-15 06:21:24.354455723 -0500
22348 -@@ -23,7 +23,14 @@ cflags-$(CONFIG_MK6) += -march=k6
22349 - # Please note, that patches that add -march=athlon-xp and friends are pointless.
22350 - # They make zero difference whatsosever to performance at this time.
22351 - cflags-$(CONFIG_MK7) += -march=athlon
22352 -+cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
22353 - cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
22354 -+cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon)
22355 -+cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
22356 -+cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon)
22357 -+cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
22358 -+cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon)
22359 -+cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon)
22360 - cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
22361 - cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
22362 - cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
22363 -@@ -32,6 +39,10 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-
22364 - cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
22365 - cflags-$(CONFIG_MVIAC7) += -march=i686
22366 - cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
22367 -+cflags-$(CONFIG_MCOREI7) += -march=i686 $(call tune,corei7)
22368 -+cflags-$(CONFIG_MCOREI7AVX) += -march=i686 $(call tune,corei7-avx)
22369 -+cflags-$(CONFIG_MCOREAVXI) += -march=i686 $(call tune,core-avx-i)
22370 -+cflags-$(CONFIG_MCOREAVX2) += -march=i686 $(call tune,core-avx2)
22371 - cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
22372 - $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
22373
22374 diff --git a/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-4.0.patch b/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-4.0.patch
22375 deleted file mode 100644
22376 index 468d157..0000000
22377 --- a/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-4.0.patch
22378 +++ /dev/null
22379 @@ -1,104 +0,0 @@
22380 -From 63e26848e2df36a3c29d2d38ce8b008539d64a5d Mon Sep 17 00:00:00 2001
22381 -From: Paolo Valente <paolo.valente@×××××××.it>
22382 -Date: Tue, 7 Apr 2015 13:39:12 +0200
22383 -Subject: [PATCH 1/3] block: cgroups, kconfig, build bits for BFQ-v7r7-4.0
22384 -
22385 -Update Kconfig.iosched and do the related Makefile changes to include
22386 -kernel configuration options for BFQ. Also add the bfqio controller
22387 -to the cgroups subsystem.
22388 -
22389 -Signed-off-by: Paolo Valente <paolo.valente@×××××××.it>
22390 -Signed-off-by: Arianna Avanzini <avanzini.arianna@×××××.com>
22391 ----
22392 - block/Kconfig.iosched | 32 ++++++++++++++++++++++++++++++++
22393 - block/Makefile | 1 +
22394 - include/linux/cgroup_subsys.h | 4 ++++
22395 - 3 files changed, 37 insertions(+)
22396 -
22397 -diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
22398 -index 421bef9..0ee5f0f 100644
22399 ---- a/block/Kconfig.iosched
22400 -+++ b/block/Kconfig.iosched
22401 -@@ -39,6 +39,27 @@ config CFQ_GROUP_IOSCHED
22402 - ---help---
22403 - Enable group IO scheduling in CFQ.
22404 -
22405 -+config IOSCHED_BFQ
22406 -+ tristate "BFQ I/O scheduler"
22407 -+ default n
22408 -+ ---help---
22409 -+ The BFQ I/O scheduler tries to distribute bandwidth among
22410 -+ all processes according to their weights.
22411 -+ It aims at distributing the bandwidth as desired, independently of
22412 -+ the disk parameters and with any workload. It also tries to
22413 -+ guarantee low latency to interactive and soft real-time
22414 -+ applications. If compiled built-in (saying Y here), BFQ can
22415 -+ be configured to support hierarchical scheduling.
22416 -+
22417 -+config CGROUP_BFQIO
22418 -+ bool "BFQ hierarchical scheduling support"
22419 -+ depends on CGROUPS && IOSCHED_BFQ=y
22420 -+ default n
22421 -+ ---help---
22422 -+ Enable hierarchical scheduling in BFQ, using the cgroups
22423 -+ filesystem interface. The name of the subsystem will be
22424 -+ bfqio.
22425 -+
22426 - choice
22427 - prompt "Default I/O scheduler"
22428 - default DEFAULT_CFQ
22429 -@@ -52,6 +73,16 @@ choice
22430 - config DEFAULT_CFQ
22431 - bool "CFQ" if IOSCHED_CFQ=y
22432 -
22433 -+ config DEFAULT_BFQ
22434 -+ bool "BFQ" if IOSCHED_BFQ=y
22435 -+ help
22436 -+ Selects BFQ as the default I/O scheduler which will be
22437 -+ used by default for all block devices.
22438 -+ The BFQ I/O scheduler aims at distributing the bandwidth
22439 -+ as desired, independently of the disk parameters and with
22440 -+ any workload. It also tries to guarantee low latency to
22441 -+ interactive and soft real-time applications.
22442 -+
22443 - config DEFAULT_NOOP
22444 - bool "No-op"
22445 -
22446 -@@ -61,6 +92,7 @@ config DEFAULT_IOSCHED
22447 - string
22448 - default "deadline" if DEFAULT_DEADLINE
22449 - default "cfq" if DEFAULT_CFQ
22450 -+ default "bfq" if DEFAULT_BFQ
22451 - default "noop" if DEFAULT_NOOP
22452 -
22453 - endmenu
22454 -diff --git a/block/Makefile b/block/Makefile
22455 -index 00ecc97..1ed86d5 100644
22456 ---- a/block/Makefile
22457 -+++ b/block/Makefile
22458 -@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
22459 - obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
22460 - obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
22461 - obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
22462 -+obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
22463 -
22464 - obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
22465 - obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
22466 -diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
22467 -index e4a96fb..267d681 100644
22468 ---- a/include/linux/cgroup_subsys.h
22469 -+++ b/include/linux/cgroup_subsys.h
22470 -@@ -35,6 +35,10 @@ SUBSYS(freezer)
22471 - SUBSYS(net_cls)
22472 - #endif
22473 -
22474 -+#if IS_ENABLED(CONFIG_CGROUP_BFQIO)
22475 -+SUBSYS(bfqio)
22476 -+#endif
22477 -+
22478 - #if IS_ENABLED(CONFIG_CGROUP_PERF)
22479 - SUBSYS(perf_event)
22480 - #endif
22481 ---
22482 -2.1.0
22483 -
22484
22485 diff --git a/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-4.0.patch1 b/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-4.0.patch1
22486 deleted file mode 100644
22487 index a6cfc58..0000000
22488 --- a/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-4.0.patch1
22489 +++ /dev/null
22490 @@ -1,6966 +0,0 @@
22491 -From 8cdf2dae6ee87049c7bb086d34e2ce981b545813 Mon Sep 17 00:00:00 2001
22492 -From: Paolo Valente <paolo.valente@×××××××.it>
22493 -Date: Thu, 9 May 2013 19:10:02 +0200
22494 -Subject: [PATCH 2/3] block: introduce the BFQ-v7r7 I/O sched for 4.0
22495 -
22496 -Add the BFQ-v7r7 I/O scheduler to 4.0.
22497 -The general structure is borrowed from CFQ, as much of the code for
22498 -handling I/O contexts. Over time, several useful features have been
22499 -ported from CFQ as well (details in the changelog in README.BFQ). A
22500 -(bfq_)queue is associated to each task doing I/O on a device, and each
22501 -time a scheduling decision has to be made a queue is selected and served
22502 -until it expires.
22503 -
22504 - - Slices are given in the service domain: tasks are assigned
22505 - budgets, measured in number of sectors. Once got the disk, a task
22506 - must however consume its assigned budget within a configurable
22507 - maximum time (by default, the maximum possible value of the
22508 - budgets is automatically computed to comply with this timeout).
22509 - This allows the desired latency vs "throughput boosting" tradeoff
22510 - to be set.
22511 -
22512 - - Budgets are scheduled according to a variant of WF2Q+, implemented
22513 - using an augmented rb-tree to take eligibility into account while
22514 - preserving an O(log N) overall complexity.
22515 -
22516 - - A low-latency tunable is provided; if enabled, both interactive
22517 - and soft real-time applications are guaranteed a very low latency.
22518 -
22519 - - Latency guarantees are preserved also in the presence of NCQ.
22520 -
22521 - - Also with flash-based devices, a high throughput is achieved
22522 - while still preserving latency guarantees.
22523 -
22524 - - BFQ features Early Queue Merge (EQM), a sort of fusion of the
22525 - cooperating-queue-merging and the preemption mechanisms present
22526 - in CFQ. EQM is in fact a unified mechanism that tries to get a
22527 - sequential read pattern, and hence a high throughput, with any
22528 - set of processes performing interleaved I/O over a contiguous
22529 - sequence of sectors.
22530 -
22531 - - BFQ supports full hierarchical scheduling, exporting a cgroups
22532 - interface. Since each node has a full scheduler, each group can
22533 - be assigned its own weight.
22534 -
22535 - - If the cgroups interface is not used, only I/O priorities can be
22536 - assigned to processes, with ioprio values mapped to weights
22537 - with the relation weight = IOPRIO_BE_NR - ioprio.
22538 -
22539 - - ioprio classes are served in strict priority order, i.e., lower
22540 - priority queues are not served as long as there are higher
22541 - priority queues. Among queues in the same class the bandwidth is
22542 - distributed in proportion to the weight of each queue. A very
22543 - thin extra bandwidth is however guaranteed to the Idle class, to
22544 - prevent it from starving.
22545 -
22546 -Signed-off-by: Paolo Valente <paolo.valente@×××××××.it>
22547 -Signed-off-by: Arianna Avanzini <avanzini.arianna@×××××.com>
22548 ----
22549 - block/bfq-cgroup.c | 936 ++++++++++++
22550 - block/bfq-ioc.c | 36 +
22551 - block/bfq-iosched.c | 3902 +++++++++++++++++++++++++++++++++++++++++++++++++++
22552 - block/bfq-sched.c | 1214 ++++++++++++++++
22553 - block/bfq.h | 775 ++++++++++
22554 - 5 files changed, 6863 insertions(+)
22555 - create mode 100644 block/bfq-cgroup.c
22556 - create mode 100644 block/bfq-ioc.c
22557 - create mode 100644 block/bfq-iosched.c
22558 - create mode 100644 block/bfq-sched.c
22559 - create mode 100644 block/bfq.h
22560 -
22561 -diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
22562 -new file mode 100644
22563 -index 0000000..11e2f1d
22564 ---- /dev/null
22565 -+++ b/block/bfq-cgroup.c
22566 -@@ -0,0 +1,936 @@
22567 -+/*
22568 -+ * BFQ: CGROUPS support.
22569 -+ *
22570 -+ * Based on ideas and code from CFQ:
22571 -+ * Copyright (C) 2003 Jens Axboe <axboe@××××××.dk>
22572 -+ *
22573 -+ * Copyright (C) 2008 Fabio Checconi <fabio@×××××××××××××.it>
22574 -+ * Paolo Valente <paolo.valente@×××××××.it>
22575 -+ *
22576 -+ * Copyright (C) 2010 Paolo Valente <paolo.valente@×××××××.it>
22577 -+ *
22578 -+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
22579 -+ * file.
22580 -+ */
22581 -+
22582 -+#ifdef CONFIG_CGROUP_BFQIO
22583 -+
22584 -+static DEFINE_MUTEX(bfqio_mutex);
22585 -+
22586 -+static bool bfqio_is_removed(struct bfqio_cgroup *bgrp)
22587 -+{
22588 -+ return bgrp ? !bgrp->online : false;
22589 -+}
22590 -+
22591 -+static struct bfqio_cgroup bfqio_root_cgroup = {
22592 -+ .weight = BFQ_DEFAULT_GRP_WEIGHT,
22593 -+ .ioprio = BFQ_DEFAULT_GRP_IOPRIO,
22594 -+ .ioprio_class = BFQ_DEFAULT_GRP_CLASS,
22595 -+};
22596 -+
22597 -+static inline void bfq_init_entity(struct bfq_entity *entity,
22598 -+ struct bfq_group *bfqg)
22599 -+{
22600 -+ entity->weight = entity->new_weight;
22601 -+ entity->orig_weight = entity->new_weight;
22602 -+ entity->ioprio = entity->new_ioprio;
22603 -+ entity->ioprio_class = entity->new_ioprio_class;
22604 -+ entity->parent = bfqg->my_entity;
22605 -+ entity->sched_data = &bfqg->sched_data;
22606 -+}
22607 -+
22608 -+static struct bfqio_cgroup *css_to_bfqio(struct cgroup_subsys_state *css)
22609 -+{
22610 -+ return css ? container_of(css, struct bfqio_cgroup, css) : NULL;
22611 -+}
22612 -+
22613 -+/*
22614 -+ * Search the bfq_group for bfqd into the hash table (by now only a list)
22615 -+ * of bgrp. Must be called under rcu_read_lock().
22616 -+ */
22617 -+static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp,
22618 -+ struct bfq_data *bfqd)
22619 -+{
22620 -+ struct bfq_group *bfqg;
22621 -+ void *key;
22622 -+
22623 -+ hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) {
22624 -+ key = rcu_dereference(bfqg->bfqd);
22625 -+ if (key == bfqd)
22626 -+ return bfqg;
22627 -+ }
22628 -+
22629 -+ return NULL;
22630 -+}
22631 -+
22632 -+static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp,
22633 -+ struct bfq_group *bfqg)
22634 -+{
22635 -+ struct bfq_entity *entity = &bfqg->entity;
22636 -+
22637 -+ /*
22638 -+ * If the weight of the entity has never been set via the sysfs
22639 -+ * interface, then bgrp->weight == 0. In this case we initialize
22640 -+ * the weight from the current ioprio value. Otherwise, the group
22641 -+ * weight, if set, has priority over the ioprio value.
22642 -+ */
22643 -+ if (bgrp->weight == 0) {
22644 -+ entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio);
22645 -+ entity->new_ioprio = bgrp->ioprio;
22646 -+ } else {
22647 -+ if (bgrp->weight < BFQ_MIN_WEIGHT ||
22648 -+ bgrp->weight > BFQ_MAX_WEIGHT) {
22649 -+ printk(KERN_CRIT "bfq_group_init_entity: "
22650 -+ "bgrp->weight %d\n", bgrp->weight);
22651 -+ BUG();
22652 -+ }
22653 -+ entity->new_weight = bgrp->weight;
22654 -+ entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight);
22655 -+ }
22656 -+ entity->orig_weight = entity->weight = entity->new_weight;
22657 -+ entity->ioprio = entity->new_ioprio;
22658 -+ entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class;
22659 -+ entity->my_sched_data = &bfqg->sched_data;
22660 -+ bfqg->active_entities = 0;
22661 -+}
22662 -+
22663 -+static inline void bfq_group_set_parent(struct bfq_group *bfqg,
22664 -+ struct bfq_group *parent)
22665 -+{
22666 -+ struct bfq_entity *entity;
22667 -+
22668 -+ BUG_ON(parent == NULL);
22669 -+ BUG_ON(bfqg == NULL);
22670 -+
22671 -+ entity = &bfqg->entity;
22672 -+ entity->parent = parent->my_entity;
22673 -+ entity->sched_data = &parent->sched_data;
22674 -+}
22675 -+
22676 -+/**
22677 -+ * bfq_group_chain_alloc - allocate a chain of groups.
22678 -+ * @bfqd: queue descriptor.
22679 -+ * @css: the leaf cgroup_subsys_state this chain starts from.
22680 -+ *
22681 -+ * Allocate a chain of groups starting from the one belonging to
22682 -+ * @cgroup up to the root cgroup. Stop if a cgroup on the chain
22683 -+ * to the root has already an allocated group on @bfqd.
22684 -+ */
22685 -+static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
22686 -+ struct cgroup_subsys_state *css)
22687 -+{
22688 -+ struct bfqio_cgroup *bgrp;
22689 -+ struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
22690 -+
22691 -+ for (; css != NULL; css = css->parent) {
22692 -+ bgrp = css_to_bfqio(css);
22693 -+
22694 -+ bfqg = bfqio_lookup_group(bgrp, bfqd);
22695 -+ if (bfqg != NULL) {
22696 -+ /*
22697 -+ * All the cgroups in the path from there to the
22698 -+ * root must have a bfq_group for bfqd, so we don't
22699 -+ * need any more allocations.
22700 -+ */
22701 -+ break;
22702 -+ }
22703 -+
22704 -+ bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC);
22705 -+ if (bfqg == NULL)
22706 -+ goto cleanup;
22707 -+
22708 -+ bfq_group_init_entity(bgrp, bfqg);
22709 -+ bfqg->my_entity = &bfqg->entity;
22710 -+
22711 -+ if (leaf == NULL) {
22712 -+ leaf = bfqg;
22713 -+ prev = leaf;
22714 -+ } else {
22715 -+ bfq_group_set_parent(prev, bfqg);
22716 -+ /*
22717 -+ * Build a list of allocated nodes using the bfqd
22718 -+ * filed, that is still unused and will be
22719 -+ * initialized only after the node will be
22720 -+ * connected.
22721 -+ */
22722 -+ prev->bfqd = bfqg;
22723 -+ prev = bfqg;
22724 -+ }
22725 -+ }
22726 -+
22727 -+ return leaf;
22728 -+
22729 -+cleanup:
22730 -+ while (leaf != NULL) {
22731 -+ prev = leaf;
22732 -+ leaf = leaf->bfqd;
22733 -+ kfree(prev);
22734 -+ }
22735 -+
22736 -+ return NULL;
22737 -+}
22738 -+
22739 -+/**
22740 -+ * bfq_group_chain_link - link an allocated group chain to a cgroup
22741 -+ * hierarchy.
22742 -+ * @bfqd: the queue descriptor.
22743 -+ * @css: the leaf cgroup_subsys_state to start from.
22744 -+ * @leaf: the leaf group (to be associated to @cgroup).
22745 -+ *
22746 -+ * Try to link a chain of groups to a cgroup hierarchy, connecting the
22747 -+ * nodes bottom-up, so we can be sure that when we find a cgroup in the
22748 -+ * hierarchy that already as a group associated to @bfqd all the nodes
22749 -+ * in the path to the root cgroup have one too.
22750 -+ *
22751 -+ * On locking: the queue lock protects the hierarchy (there is a hierarchy
22752 -+ * per device) while the bfqio_cgroup lock protects the list of groups
22753 -+ * belonging to the same cgroup.
22754 -+ */
22755 -+static void bfq_group_chain_link(struct bfq_data *bfqd,
22756 -+ struct cgroup_subsys_state *css,
22757 -+ struct bfq_group *leaf)
22758 -+{
22759 -+ struct bfqio_cgroup *bgrp;
22760 -+ struct bfq_group *bfqg, *next, *prev = NULL;
22761 -+ unsigned long flags;
22762 -+
22763 -+ assert_spin_locked(bfqd->queue->queue_lock);
22764 -+
22765 -+ for (; css != NULL && leaf != NULL; css = css->parent) {
22766 -+ bgrp = css_to_bfqio(css);
22767 -+ next = leaf->bfqd;
22768 -+
22769 -+ bfqg = bfqio_lookup_group(bgrp, bfqd);
22770 -+ BUG_ON(bfqg != NULL);
22771 -+
22772 -+ spin_lock_irqsave(&bgrp->lock, flags);
22773 -+
22774 -+ rcu_assign_pointer(leaf->bfqd, bfqd);
22775 -+ hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data);
22776 -+ hlist_add_head(&leaf->bfqd_node, &bfqd->group_list);
22777 -+
22778 -+ spin_unlock_irqrestore(&bgrp->lock, flags);
22779 -+
22780 -+ prev = leaf;
22781 -+ leaf = next;
22782 -+ }
22783 -+
22784 -+ BUG_ON(css == NULL && leaf != NULL);
22785 -+ if (css != NULL && prev != NULL) {
22786 -+ bgrp = css_to_bfqio(css);
22787 -+ bfqg = bfqio_lookup_group(bgrp, bfqd);
22788 -+ bfq_group_set_parent(prev, bfqg);
22789 -+ }
22790 -+}
22791 -+
22792 -+/**
22793 -+ * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
22794 -+ * @bfqd: queue descriptor.
22795 -+ * @cgroup: cgroup being searched for.
22796 -+ *
22797 -+ * Return a group associated to @bfqd in @cgroup, allocating one if
22798 -+ * necessary. When a group is returned all the cgroups in the path
22799 -+ * to the root have a group associated to @bfqd.
22800 -+ *
22801 -+ * If the allocation fails, return the root group: this breaks guarantees
22802 -+ * but is a safe fallback. If this loss becomes a problem it can be
22803 -+ * mitigated using the equivalent weight (given by the product of the
22804 -+ * weights of the groups in the path from @group to the root) in the
22805 -+ * root scheduler.
22806 -+ *
22807 -+ * We allocate all the missing nodes in the path from the leaf cgroup
22808 -+ * to the root and we connect the nodes only after all the allocations
22809 -+ * have been successful.
22810 -+ */
22811 -+static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
22812 -+ struct cgroup_subsys_state *css)
22813 -+{
22814 -+ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
22815 -+ struct bfq_group *bfqg;
22816 -+
22817 -+ bfqg = bfqio_lookup_group(bgrp, bfqd);
22818 -+ if (bfqg != NULL)
22819 -+ return bfqg;
22820 -+
22821 -+ bfqg = bfq_group_chain_alloc(bfqd, css);
22822 -+ if (bfqg != NULL)
22823 -+ bfq_group_chain_link(bfqd, css, bfqg);
22824 -+ else
22825 -+ bfqg = bfqd->root_group;
22826 -+
22827 -+ return bfqg;
22828 -+}
22829 -+
22830 -+/**
22831 -+ * bfq_bfqq_move - migrate @bfqq to @bfqg.
22832 -+ * @bfqd: queue descriptor.
22833 -+ * @bfqq: the queue to move.
22834 -+ * @entity: @bfqq's entity.
22835 -+ * @bfqg: the group to move to.
22836 -+ *
22837 -+ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
22838 -+ * it on the new one. Avoid putting the entity on the old group idle tree.
22839 -+ *
22840 -+ * Must be called under the queue lock; the cgroup owning @bfqg must
22841 -+ * not disappear (by now this just means that we are called under
22842 -+ * rcu_read_lock()).
22843 -+ */
22844 -+static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
22845 -+ struct bfq_entity *entity, struct bfq_group *bfqg)
22846 -+{
22847 -+ int busy, resume;
22848 -+
22849 -+ busy = bfq_bfqq_busy(bfqq);
22850 -+ resume = !RB_EMPTY_ROOT(&bfqq->sort_list);
22851 -+
22852 -+ BUG_ON(resume && !entity->on_st);
22853 -+ BUG_ON(busy && !resume && entity->on_st &&
22854 -+ bfqq != bfqd->in_service_queue);
22855 -+
22856 -+ if (busy) {
22857 -+ BUG_ON(atomic_read(&bfqq->ref) < 2);
22858 -+
22859 -+ if (!resume)
22860 -+ bfq_del_bfqq_busy(bfqd, bfqq, 0);
22861 -+ else
22862 -+ bfq_deactivate_bfqq(bfqd, bfqq, 0);
22863 -+ } else if (entity->on_st)
22864 -+ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
22865 -+
22866 -+ /*
22867 -+ * Here we use a reference to bfqg. We don't need a refcounter
22868 -+ * as the cgroup reference will not be dropped, so that its
22869 -+ * destroy() callback will not be invoked.
22870 -+ */
22871 -+ entity->parent = bfqg->my_entity;
22872 -+ entity->sched_data = &bfqg->sched_data;
22873 -+
22874 -+ if (busy && resume)
22875 -+ bfq_activate_bfqq(bfqd, bfqq);
22876 -+
22877 -+ if (bfqd->in_service_queue == NULL && !bfqd->rq_in_driver)
22878 -+ bfq_schedule_dispatch(bfqd);
22879 -+}
22880 -+
22881 -+/**
22882 -+ * __bfq_bic_change_cgroup - move @bic to @cgroup.
22883 -+ * @bfqd: the queue descriptor.
22884 -+ * @bic: the bic to move.
22885 -+ * @cgroup: the cgroup to move to.
22886 -+ *
22887 -+ * Move bic to cgroup, assuming that bfqd->queue is locked; the caller
22888 -+ * has to make sure that the reference to cgroup is valid across the call.
22889 -+ *
22890 -+ * NOTE: an alternative approach might have been to store the current
22891 -+ * cgroup in bfqq and getting a reference to it, reducing the lookup
22892 -+ * time here, at the price of slightly more complex code.
22893 -+ */
22894 -+static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
22895 -+ struct bfq_io_cq *bic,
22896 -+ struct cgroup_subsys_state *css)
22897 -+{
22898 -+ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
22899 -+ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
22900 -+ struct bfq_entity *entity;
22901 -+ struct bfq_group *bfqg;
22902 -+ struct bfqio_cgroup *bgrp;
22903 -+
22904 -+ bgrp = css_to_bfqio(css);
22905 -+
22906 -+ bfqg = bfq_find_alloc_group(bfqd, css);
22907 -+ if (async_bfqq != NULL) {
22908 -+ entity = &async_bfqq->entity;
22909 -+
22910 -+ if (entity->sched_data != &bfqg->sched_data) {
22911 -+ bic_set_bfqq(bic, NULL, 0);
22912 -+ bfq_log_bfqq(bfqd, async_bfqq,
22913 -+ "bic_change_group: %p %d",
22914 -+ async_bfqq, atomic_read(&async_bfqq->ref));
22915 -+ bfq_put_queue(async_bfqq);
22916 -+ }
22917 -+ }
22918 -+
22919 -+ if (sync_bfqq != NULL) {
22920 -+ entity = &sync_bfqq->entity;
22921 -+ if (entity->sched_data != &bfqg->sched_data)
22922 -+ bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg);
22923 -+ }
22924 -+
22925 -+ return bfqg;
22926 -+}
22927 -+
22928 -+/**
22929 -+ * bfq_bic_change_cgroup - move @bic to @cgroup.
22930 -+ * @bic: the bic being migrated.
22931 -+ * @cgroup: the destination cgroup.
22932 -+ *
22933 -+ * When the task owning @bic is moved to @cgroup, @bic is immediately
22934 -+ * moved into its new parent group.
22935 -+ */
22936 -+static void bfq_bic_change_cgroup(struct bfq_io_cq *bic,
22937 -+ struct cgroup_subsys_state *css)
22938 -+{
22939 -+ struct bfq_data *bfqd;
22940 -+ unsigned long uninitialized_var(flags);
22941 -+
22942 -+ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
22943 -+ &flags);
22944 -+ if (bfqd != NULL) {
22945 -+ __bfq_bic_change_cgroup(bfqd, bic, css);
22946 -+ bfq_put_bfqd_unlock(bfqd, &flags);
22947 -+ }
22948 -+}
22949 -+
22950 -+/**
22951 -+ * bfq_bic_update_cgroup - update the cgroup of @bic.
22952 -+ * @bic: the @bic to update.
22953 -+ *
22954 -+ * Make sure that @bic is enqueued in the cgroup of the current task.
22955 -+ * We need this in addition to moving bics during the cgroup attach
22956 -+ * phase because the task owning @bic could be at its first disk
22957 -+ * access or we may end up in the root cgroup as the result of a
22958 -+ * memory allocation failure and here we try to move to the right
22959 -+ * group.
22960 -+ *
22961 -+ * Must be called under the queue lock. It is safe to use the returned
22962 -+ * value even after the rcu_read_unlock() as the migration/destruction
22963 -+ * paths act under the queue lock too. IOW it is impossible to race with
22964 -+ * group migration/destruction and end up with an invalid group as:
22965 -+ * a) here cgroup has not yet been destroyed, nor its destroy callback
22966 -+ * has started execution, as current holds a reference to it,
22967 -+ * b) if it is destroyed after rcu_read_unlock() [after current is
22968 -+ * migrated to a different cgroup] its attach() callback will have
22969 -+ * taken care of remove all the references to the old cgroup data.
22970 -+ */
22971 -+static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic)
22972 -+{
22973 -+ struct bfq_data *bfqd = bic_to_bfqd(bic);
22974 -+ struct bfq_group *bfqg;
22975 -+ struct cgroup_subsys_state *css;
22976 -+
22977 -+ BUG_ON(bfqd == NULL);
22978 -+
22979 -+ rcu_read_lock();
22980 -+ css = task_css(current, bfqio_cgrp_id);
22981 -+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, css);
22982 -+ rcu_read_unlock();
22983 -+
22984 -+ return bfqg;
22985 -+}
22986 -+
22987 -+/**
22988 -+ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
22989 -+ * @st: the service tree being flushed.
22990 -+ */
22991 -+static inline void bfq_flush_idle_tree(struct bfq_service_tree *st)
22992 -+{
22993 -+ struct bfq_entity *entity = st->first_idle;
22994 -+
22995 -+ for (; entity != NULL; entity = st->first_idle)
22996 -+ __bfq_deactivate_entity(entity, 0);
22997 -+}
22998 -+
22999 -+/**
23000 -+ * bfq_reparent_leaf_entity - move leaf entity to the root_group.
23001 -+ * @bfqd: the device data structure with the root group.
23002 -+ * @entity: the entity to move.
23003 -+ */
23004 -+static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
23005 -+ struct bfq_entity *entity)
23006 -+{
23007 -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
23008 -+
23009 -+ BUG_ON(bfqq == NULL);
23010 -+ bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
23011 -+ return;
23012 -+}
23013 -+
23014 -+/**
23015 -+ * bfq_reparent_active_entities - move to the root group all active
23016 -+ * entities.
23017 -+ * @bfqd: the device data structure with the root group.
23018 -+ * @bfqg: the group to move from.
23019 -+ * @st: the service tree with the entities.
23020 -+ *
23021 -+ * Needs queue_lock to be taken and reference to be valid over the call.
23022 -+ */
23023 -+static inline void bfq_reparent_active_entities(struct bfq_data *bfqd,
23024 -+ struct bfq_group *bfqg,
23025 -+ struct bfq_service_tree *st)
23026 -+{
23027 -+ struct rb_root *active = &st->active;
23028 -+ struct bfq_entity *entity = NULL;
23029 -+
23030 -+ if (!RB_EMPTY_ROOT(&st->active))
23031 -+ entity = bfq_entity_of(rb_first(active));
23032 -+
23033 -+ for (; entity != NULL; entity = bfq_entity_of(rb_first(active)))
23034 -+ bfq_reparent_leaf_entity(bfqd, entity);
23035 -+
23036 -+ if (bfqg->sched_data.in_service_entity != NULL)
23037 -+ bfq_reparent_leaf_entity(bfqd,
23038 -+ bfqg->sched_data.in_service_entity);
23039 -+
23040 -+ return;
23041 -+}
23042 -+
23043 -+/**
23044 -+ * bfq_destroy_group - destroy @bfqg.
23045 -+ * @bgrp: the bfqio_cgroup containing @bfqg.
23046 -+ * @bfqg: the group being destroyed.
23047 -+ *
23048 -+ * Destroy @bfqg, making sure that it is not referenced from its parent.
23049 -+ */
23050 -+static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg)
23051 -+{
23052 -+ struct bfq_data *bfqd;
23053 -+ struct bfq_service_tree *st;
23054 -+ struct bfq_entity *entity = bfqg->my_entity;
23055 -+ unsigned long uninitialized_var(flags);
23056 -+ int i;
23057 -+
23058 -+ hlist_del(&bfqg->group_node);
23059 -+
23060 -+ /*
23061 -+ * Empty all service_trees belonging to this group before
23062 -+ * deactivating the group itself.
23063 -+ */
23064 -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
23065 -+ st = bfqg->sched_data.service_tree + i;
23066 -+
23067 -+ /*
23068 -+ * The idle tree may still contain bfq_queues belonging
23069 -+ * to exited task because they never migrated to a different
23070 -+ * cgroup from the one being destroyed now. No one else
23071 -+ * can access them so it's safe to act without any lock.
23072 -+ */
23073 -+ bfq_flush_idle_tree(st);
23074 -+
23075 -+ /*
23076 -+ * It may happen that some queues are still active
23077 -+ * (busy) upon group destruction (if the corresponding
23078 -+ * processes have been forced to terminate). We move
23079 -+ * all the leaf entities corresponding to these queues
23080 -+ * to the root_group.
23081 -+ * Also, it may happen that the group has an entity
23082 -+ * in service, which is disconnected from the active
23083 -+ * tree: it must be moved, too.
23084 -+ * There is no need to put the sync queues, as the
23085 -+ * scheduler has taken no reference.
23086 -+ */
23087 -+ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
23088 -+ if (bfqd != NULL) {
23089 -+ bfq_reparent_active_entities(bfqd, bfqg, st);
23090 -+ bfq_put_bfqd_unlock(bfqd, &flags);
23091 -+ }
23092 -+ BUG_ON(!RB_EMPTY_ROOT(&st->active));
23093 -+ BUG_ON(!RB_EMPTY_ROOT(&st->idle));
23094 -+ }
23095 -+ BUG_ON(bfqg->sched_data.next_in_service != NULL);
23096 -+ BUG_ON(bfqg->sched_data.in_service_entity != NULL);
23097 -+
23098 -+ /*
23099 -+ * We may race with device destruction, take extra care when
23100 -+ * dereferencing bfqg->bfqd.
23101 -+ */
23102 -+ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
23103 -+ if (bfqd != NULL) {
23104 -+ hlist_del(&bfqg->bfqd_node);
23105 -+ __bfq_deactivate_entity(entity, 0);
23106 -+ bfq_put_async_queues(bfqd, bfqg);
23107 -+ bfq_put_bfqd_unlock(bfqd, &flags);
23108 -+ }
23109 -+ BUG_ON(entity->tree != NULL);
23110 -+
23111 -+ /*
23112 -+ * No need to defer the kfree() to the end of the RCU grace
23113 -+ * period: we are called from the destroy() callback of our
23114 -+ * cgroup, so we can be sure that no one is a) still using
23115 -+ * this cgroup or b) doing lookups in it.
23116 -+ */
23117 -+ kfree(bfqg);
23118 -+}
23119 -+
23120 -+static void bfq_end_wr_async(struct bfq_data *bfqd)
23121 -+{
23122 -+ struct hlist_node *tmp;
23123 -+ struct bfq_group *bfqg;
23124 -+
23125 -+ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node)
23126 -+ bfq_end_wr_async_queues(bfqd, bfqg);
23127 -+ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
23128 -+}
23129 -+
23130 -+/**
23131 -+ * bfq_disconnect_groups - disconnect @bfqd from all its groups.
23132 -+ * @bfqd: the device descriptor being exited.
23133 -+ *
23134 -+ * When the device exits we just make sure that no lookup can return
23135 -+ * the now unused group structures. They will be deallocated on cgroup
23136 -+ * destruction.
23137 -+ */
23138 -+static void bfq_disconnect_groups(struct bfq_data *bfqd)
23139 -+{
23140 -+ struct hlist_node *tmp;
23141 -+ struct bfq_group *bfqg;
23142 -+
23143 -+ bfq_log(bfqd, "disconnect_groups beginning");
23144 -+ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) {
23145 -+ hlist_del(&bfqg->bfqd_node);
23146 -+
23147 -+ __bfq_deactivate_entity(bfqg->my_entity, 0);
23148 -+
23149 -+ /*
23150 -+ * Don't remove from the group hash, just set an
23151 -+ * invalid key. No lookups can race with the
23152 -+ * assignment as bfqd is being destroyed; this
23153 -+ * implies also that new elements cannot be added
23154 -+ * to the list.
23155 -+ */
23156 -+ rcu_assign_pointer(bfqg->bfqd, NULL);
23157 -+
23158 -+ bfq_log(bfqd, "disconnect_groups: put async for group %p",
23159 -+ bfqg);
23160 -+ bfq_put_async_queues(bfqd, bfqg);
23161 -+ }
23162 -+}
23163 -+
23164 -+static inline void bfq_free_root_group(struct bfq_data *bfqd)
23165 -+{
23166 -+ struct bfqio_cgroup *bgrp = &bfqio_root_cgroup;
23167 -+ struct bfq_group *bfqg = bfqd->root_group;
23168 -+
23169 -+ bfq_put_async_queues(bfqd, bfqg);
23170 -+
23171 -+ spin_lock_irq(&bgrp->lock);
23172 -+ hlist_del_rcu(&bfqg->group_node);
23173 -+ spin_unlock_irq(&bgrp->lock);
23174 -+
23175 -+ /*
23176 -+ * No need to synchronize_rcu() here: since the device is gone
23177 -+ * there cannot be any read-side access to its root_group.
23178 -+ */
23179 -+ kfree(bfqg);
23180 -+}
23181 -+
23182 -+static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
23183 -+{
23184 -+ struct bfq_group *bfqg;
23185 -+ struct bfqio_cgroup *bgrp;
23186 -+ int i;
23187 -+
23188 -+ bfqg = kzalloc_node(sizeof(*bfqg), GFP_KERNEL, node);
23189 -+ if (bfqg == NULL)
23190 -+ return NULL;
23191 -+
23192 -+ bfqg->entity.parent = NULL;
23193 -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
23194 -+ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
23195 -+
23196 -+ bgrp = &bfqio_root_cgroup;
23197 -+ spin_lock_irq(&bgrp->lock);
23198 -+ rcu_assign_pointer(bfqg->bfqd, bfqd);
23199 -+ hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data);
23200 -+ spin_unlock_irq(&bgrp->lock);
23201 -+
23202 -+ return bfqg;
23203 -+}
23204 -+
23205 -+#define SHOW_FUNCTION(__VAR) \
23206 -+static u64 bfqio_cgroup_##__VAR##_read(struct cgroup_subsys_state *css, \
23207 -+ struct cftype *cftype) \
23208 -+{ \
23209 -+ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
23210 -+ u64 ret = -ENODEV; \
23211 -+ \
23212 -+ mutex_lock(&bfqio_mutex); \
23213 -+ if (bfqio_is_removed(bgrp)) \
23214 -+ goto out_unlock; \
23215 -+ \
23216 -+ spin_lock_irq(&bgrp->lock); \
23217 -+ ret = bgrp->__VAR; \
23218 -+ spin_unlock_irq(&bgrp->lock); \
23219 -+ \
23220 -+out_unlock: \
23221 -+ mutex_unlock(&bfqio_mutex); \
23222 -+ return ret; \
23223 -+}
23224 -+
23225 -+SHOW_FUNCTION(weight);
23226 -+SHOW_FUNCTION(ioprio);
23227 -+SHOW_FUNCTION(ioprio_class);
23228 -+#undef SHOW_FUNCTION
23229 -+
23230 -+#define STORE_FUNCTION(__VAR, __MIN, __MAX) \
23231 -+static int bfqio_cgroup_##__VAR##_write(struct cgroup_subsys_state *css,\
23232 -+ struct cftype *cftype, \
23233 -+ u64 val) \
23234 -+{ \
23235 -+ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
23236 -+ struct bfq_group *bfqg; \
23237 -+ int ret = -EINVAL; \
23238 -+ \
23239 -+ if (val < (__MIN) || val > (__MAX)) \
23240 -+ return ret; \
23241 -+ \
23242 -+ ret = -ENODEV; \
23243 -+ mutex_lock(&bfqio_mutex); \
23244 -+ if (bfqio_is_removed(bgrp)) \
23245 -+ goto out_unlock; \
23246 -+ ret = 0; \
23247 -+ \
23248 -+ spin_lock_irq(&bgrp->lock); \
23249 -+ bgrp->__VAR = (unsigned short)val; \
23250 -+ hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) { \
23251 -+ /* \
23252 -+ * Setting the ioprio_changed flag of the entity \
23253 -+ * to 1 with new_##__VAR == ##__VAR would re-set \
23254 -+ * the value of the weight to its ioprio mapping. \
23255 -+ * Set the flag only if necessary. \
23256 -+ */ \
23257 -+ if ((unsigned short)val != bfqg->entity.new_##__VAR) { \
23258 -+ bfqg->entity.new_##__VAR = (unsigned short)val; \
23259 -+ /* \
23260 -+ * Make sure that the above new value has been \
23261 -+ * stored in bfqg->entity.new_##__VAR before \
23262 -+ * setting the ioprio_changed flag. In fact, \
23263 -+ * this flag may be read asynchronously (in \
23264 -+ * critical sections protected by a different \
23265 -+ * lock than that held here), and finding this \
23266 -+ * flag set may cause the execution of the code \
23267 -+ * for updating parameters whose value may \
23268 -+ * depend also on bfqg->entity.new_##__VAR (in \
23269 -+ * __bfq_entity_update_weight_prio). \
23270 -+ * This barrier makes sure that the new value \
23271 -+ * of bfqg->entity.new_##__VAR is correctly \
23272 -+ * seen in that code. \
23273 -+ */ \
23274 -+ smp_wmb(); \
23275 -+ bfqg->entity.ioprio_changed = 1; \
23276 -+ } \
23277 -+ } \
23278 -+ spin_unlock_irq(&bgrp->lock); \
23279 -+ \
23280 -+out_unlock: \
23281 -+ mutex_unlock(&bfqio_mutex); \
23282 -+ return ret; \
23283 -+}
23284 -+
23285 -+STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT);
23286 -+STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1);
23287 -+STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE);
23288 -+#undef STORE_FUNCTION
23289 -+
23290 -+static struct cftype bfqio_files[] = {
23291 -+ {
23292 -+ .name = "weight",
23293 -+ .read_u64 = bfqio_cgroup_weight_read,
23294 -+ .write_u64 = bfqio_cgroup_weight_write,
23295 -+ },
23296 -+ {
23297 -+ .name = "ioprio",
23298 -+ .read_u64 = bfqio_cgroup_ioprio_read,
23299 -+ .write_u64 = bfqio_cgroup_ioprio_write,
23300 -+ },
23301 -+ {
23302 -+ .name = "ioprio_class",
23303 -+ .read_u64 = bfqio_cgroup_ioprio_class_read,
23304 -+ .write_u64 = bfqio_cgroup_ioprio_class_write,
23305 -+ },
23306 -+ { }, /* terminate */
23307 -+};
23308 -+
23309 -+static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys_state
23310 -+ *parent_css)
23311 -+{
23312 -+ struct bfqio_cgroup *bgrp;
23313 -+
23314 -+ if (parent_css != NULL) {
23315 -+ bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
23316 -+ if (bgrp == NULL)
23317 -+ return ERR_PTR(-ENOMEM);
23318 -+ } else
23319 -+ bgrp = &bfqio_root_cgroup;
23320 -+
23321 -+ spin_lock_init(&bgrp->lock);
23322 -+ INIT_HLIST_HEAD(&bgrp->group_data);
23323 -+ bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO;
23324 -+ bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS;
23325 -+
23326 -+ return &bgrp->css;
23327 -+}
23328 -+
23329 -+/*
23330 -+ * We cannot support shared io contexts, as we have no means to support
23331 -+ * two tasks with the same ioc in two different groups without major rework
23332 -+ * of the main bic/bfqq data structures. By now we allow a task to change
23333 -+ * its cgroup only if it's the only owner of its ioc; the drawback of this
23334 -+ * behavior is that a group containing a task that forked using CLONE_IO
23335 -+ * will not be destroyed until the tasks sharing the ioc die.
23336 -+ */
23337 -+static int bfqio_can_attach(struct cgroup_subsys_state *css,
23338 -+ struct cgroup_taskset *tset)
23339 -+{
23340 -+ struct task_struct *task;
23341 -+ struct io_context *ioc;
23342 -+ int ret = 0;
23343 -+
23344 -+ cgroup_taskset_for_each(task, tset) {
23345 -+ /*
23346 -+ * task_lock() is needed to avoid races with
23347 -+ * exit_io_context()
23348 -+ */
23349 -+ task_lock(task);
23350 -+ ioc = task->io_context;
23351 -+ if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1)
23352 -+ /*
23353 -+ * ioc == NULL means that the task is either too
23354 -+ * young or exiting: if it has still no ioc the
23355 -+ * ioc can't be shared, if the task is exiting the
23356 -+ * attach will fail anyway, no matter what we
23357 -+ * return here.
23358 -+ */
23359 -+ ret = -EINVAL;
23360 -+ task_unlock(task);
23361 -+ if (ret)
23362 -+ break;
23363 -+ }
23364 -+
23365 -+ return ret;
23366 -+}
23367 -+
23368 -+static void bfqio_attach(struct cgroup_subsys_state *css,
23369 -+ struct cgroup_taskset *tset)
23370 -+{
23371 -+ struct task_struct *task;
23372 -+ struct io_context *ioc;
23373 -+ struct io_cq *icq;
23374 -+
23375 -+ /*
23376 -+ * IMPORTANT NOTE: The move of more than one process at a time to a
23377 -+ * new group has not yet been tested.
23378 -+ */
23379 -+ cgroup_taskset_for_each(task, tset) {
23380 -+ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
23381 -+ if (ioc) {
23382 -+ /*
23383 -+ * Handle cgroup change here.
23384 -+ */
23385 -+ rcu_read_lock();
23386 -+ hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node)
23387 -+ if (!strncmp(
23388 -+ icq->q->elevator->type->elevator_name,
23389 -+ "bfq", ELV_NAME_MAX))
23390 -+ bfq_bic_change_cgroup(icq_to_bic(icq),
23391 -+ css);
23392 -+ rcu_read_unlock();
23393 -+ put_io_context(ioc);
23394 -+ }
23395 -+ }
23396 -+}
23397 -+
23398 -+static void bfqio_destroy(struct cgroup_subsys_state *css)
23399 -+{
23400 -+ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
23401 -+ struct hlist_node *tmp;
23402 -+ struct bfq_group *bfqg;
23403 -+
23404 -+ /*
23405 -+ * Since we are destroying the cgroup, there are no more tasks
23406 -+ * referencing it, and all the RCU grace periods that may have
23407 -+ * referenced it are ended (as the destruction of the parent
23408 -+ * cgroup is RCU-safe); bgrp->group_data will not be accessed by
23409 -+ * anything else and we don't need any synchronization.
23410 -+ */
23411 -+ hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node)
23412 -+ bfq_destroy_group(bgrp, bfqg);
23413 -+
23414 -+ BUG_ON(!hlist_empty(&bgrp->group_data));
23415 -+
23416 -+ kfree(bgrp);
23417 -+}
23418 -+
23419 -+static int bfqio_css_online(struct cgroup_subsys_state *css)
23420 -+{
23421 -+ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
23422 -+
23423 -+ mutex_lock(&bfqio_mutex);
23424 -+ bgrp->online = true;
23425 -+ mutex_unlock(&bfqio_mutex);
23426 -+
23427 -+ return 0;
23428 -+}
23429 -+
23430 -+static void bfqio_css_offline(struct cgroup_subsys_state *css)
23431 -+{
23432 -+ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
23433 -+
23434 -+ mutex_lock(&bfqio_mutex);
23435 -+ bgrp->online = false;
23436 -+ mutex_unlock(&bfqio_mutex);
23437 -+}
23438 -+
23439 -+struct cgroup_subsys bfqio_cgrp_subsys = {
23440 -+ .css_alloc = bfqio_create,
23441 -+ .css_online = bfqio_css_online,
23442 -+ .css_offline = bfqio_css_offline,
23443 -+ .can_attach = bfqio_can_attach,
23444 -+ .attach = bfqio_attach,
23445 -+ .css_free = bfqio_destroy,
23446 -+ .legacy_cftypes = bfqio_files,
23447 -+};
23448 -+#else
23449 -+static inline void bfq_init_entity(struct bfq_entity *entity,
23450 -+ struct bfq_group *bfqg)
23451 -+{
23452 -+ entity->weight = entity->new_weight;
23453 -+ entity->orig_weight = entity->new_weight;
23454 -+ entity->ioprio = entity->new_ioprio;
23455 -+ entity->ioprio_class = entity->new_ioprio_class;
23456 -+ entity->sched_data = &bfqg->sched_data;
23457 -+}
23458 -+
23459 -+static inline struct bfq_group *
23460 -+bfq_bic_update_cgroup(struct bfq_io_cq *bic)
23461 -+{
23462 -+ struct bfq_data *bfqd = bic_to_bfqd(bic);
23463 -+ return bfqd->root_group;
23464 -+}
23465 -+
23466 -+static inline void bfq_bfqq_move(struct bfq_data *bfqd,
23467 -+ struct bfq_queue *bfqq,
23468 -+ struct bfq_entity *entity,
23469 -+ struct bfq_group *bfqg)
23470 -+{
23471 -+}
23472 -+
23473 -+static void bfq_end_wr_async(struct bfq_data *bfqd)
23474 -+{
23475 -+ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
23476 -+}
23477 -+
23478 -+static inline void bfq_disconnect_groups(struct bfq_data *bfqd)
23479 -+{
23480 -+ bfq_put_async_queues(bfqd, bfqd->root_group);
23481 -+}
23482 -+
23483 -+static inline void bfq_free_root_group(struct bfq_data *bfqd)
23484 -+{
23485 -+ kfree(bfqd->root_group);
23486 -+}
23487 -+
23488 -+static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
23489 -+{
23490 -+ struct bfq_group *bfqg;
23491 -+ int i;
23492 -+
23493 -+ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
23494 -+ if (bfqg == NULL)
23495 -+ return NULL;
23496 -+
23497 -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
23498 -+ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
23499 -+
23500 -+ return bfqg;
23501 -+}
23502 -+#endif
23503 -diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c
23504 -new file mode 100644
23505 -index 0000000..7f6b000
23506 ---- /dev/null
23507 -+++ b/block/bfq-ioc.c
23508 -@@ -0,0 +1,36 @@
23509 -+/*
23510 -+ * BFQ: I/O context handling.
23511 -+ *
23512 -+ * Based on ideas and code from CFQ:
23513 -+ * Copyright (C) 2003 Jens Axboe <axboe@××××××.dk>
23514 -+ *
23515 -+ * Copyright (C) 2008 Fabio Checconi <fabio@×××××××××××××.it>
23516 -+ * Paolo Valente <paolo.valente@×××××××.it>
23517 -+ *
23518 -+ * Copyright (C) 2010 Paolo Valente <paolo.valente@×××××××.it>
23519 -+ */
23520 -+
23521 -+/**
23522 -+ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
23523 -+ * @icq: the iocontext queue.
23524 -+ */
23525 -+static inline struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
23526 -+{
23527 -+ /* bic->icq is the first member, %NULL will convert to %NULL */
23528 -+ return container_of(icq, struct bfq_io_cq, icq);
23529 -+}
23530 -+
23531 -+/**
23532 -+ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
23533 -+ * @bfqd: the lookup key.
23534 -+ * @ioc: the io_context of the process doing I/O.
23535 -+ *
23536 -+ * Queue lock must be held.
23537 -+ */
23538 -+static inline struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
23539 -+ struct io_context *ioc)
23540 -+{
23541 -+ if (ioc)
23542 -+ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue));
23543 -+ return NULL;
23544 -+}
23545 -diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
23546 -new file mode 100644
23547 -index 0000000..97ee934
23548 ---- /dev/null
23549 -+++ b/block/bfq-iosched.c
23550 -@@ -0,0 +1,3902 @@
23551 -+/*
23552 -+ * Budget Fair Queueing (BFQ) disk scheduler.
23553 -+ *
23554 -+ * Based on ideas and code from CFQ:
23555 -+ * Copyright (C) 2003 Jens Axboe <axboe@××××××.dk>
23556 -+ *
23557 -+ * Copyright (C) 2008 Fabio Checconi <fabio@×××××××××××××.it>
23558 -+ * Paolo Valente <paolo.valente@×××××××.it>
23559 -+ *
23560 -+ * Copyright (C) 2010 Paolo Valente <paolo.valente@×××××××.it>
23561 -+ *
23562 -+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
23563 -+ * file.
23564 -+ *
23565 -+ * BFQ is a proportional-share storage-I/O scheduling algorithm based on
23566 -+ * the slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
23567 -+ * measured in number of sectors, to processes instead of time slices. The
23568 -+ * device is not granted to the in-service process for a given time slice,
23569 -+ * but until it has exhausted its assigned budget. This change from the time
23570 -+ * to the service domain allows BFQ to distribute the device throughput
23571 -+ * among processes as desired, without any distortion due to ZBR, workload
23572 -+ * fluctuations or other factors. BFQ uses an ad hoc internal scheduler,
23573 -+ * called B-WF2Q+, to schedule processes according to their budgets. More
23574 -+ * precisely, BFQ schedules queues associated to processes. Thanks to the
23575 -+ * accurate policy of B-WF2Q+, BFQ can afford to assign high budgets to
23576 -+ * I/O-bound processes issuing sequential requests (to boost the
23577 -+ * throughput), and yet guarantee a low latency to interactive and soft
23578 -+ * real-time applications.
23579 -+ *
23580 -+ * BFQ is described in [1], where also a reference to the initial, more
23581 -+ * theoretical paper on BFQ can be found. The interested reader can find
23582 -+ * in the latter paper full details on the main algorithm, as well as
23583 -+ * formulas of the guarantees and formal proofs of all the properties.
23584 -+ * With respect to the version of BFQ presented in these papers, this
23585 -+ * implementation adds a few more heuristics, such as the one that
23586 -+ * guarantees a low latency to soft real-time applications, and a
23587 -+ * hierarchical extension based on H-WF2Q+.
23588 -+ *
23589 -+ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
23590 -+ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
23591 -+ * complexity derives from the one introduced with EEVDF in [3].
23592 -+ *
23593 -+ * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness
23594 -+ * with the BFQ Disk I/O Scheduler'',
23595 -+ * Proceedings of the 5th Annual International Systems and Storage
23596 -+ * Conference (SYSTOR '12), June 2012.
23597 -+ *
23598 -+ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
23599 -+ *
23600 -+ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
23601 -+ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
23602 -+ * Oct 1997.
23603 -+ *
23604 -+ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
23605 -+ *
23606 -+ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
23607 -+ * First: A Flexible and Accurate Mechanism for Proportional Share
23608 -+ * Resource Allocation,'' technical report.
23609 -+ *
23610 -+ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
23611 -+ */
23612 -+#include <linux/module.h>
23613 -+#include <linux/slab.h>
23614 -+#include <linux/blkdev.h>
23615 -+#include <linux/cgroup.h>
23616 -+#include <linux/elevator.h>
23617 -+#include <linux/jiffies.h>
23618 -+#include <linux/rbtree.h>
23619 -+#include <linux/ioprio.h>
23620 -+#include "bfq.h"
23621 -+#include "blk.h"
23622 -+
23623 -+/* Max number of dispatches in one round of service. */
23624 -+static const int bfq_quantum = 4;
23625 -+
23626 -+/* Expiration time of sync (0) and async (1) requests, in jiffies. */
23627 -+static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
23628 -+
23629 -+/* Maximum backwards seek, in KiB. */
23630 -+static const int bfq_back_max = 16 * 1024;
23631 -+
23632 -+/* Penalty of a backwards seek, in number of sectors. */
23633 -+static const int bfq_back_penalty = 2;
23634 -+
23635 -+/* Idling period duration, in jiffies. */
23636 -+static int bfq_slice_idle = HZ / 125;
23637 -+
23638 -+/* Default maximum budget values, in sectors and number of requests. */
23639 -+static const int bfq_default_max_budget = 16 * 1024;
23640 -+static const int bfq_max_budget_async_rq = 4;
23641 -+
23642 -+/*
23643 -+ * Async to sync throughput distribution is controlled as follows:
23644 -+ * when an async request is served, the entity is charged the number
23645 -+ * of sectors of the request, multiplied by the factor below
23646 -+ */
23647 -+static const int bfq_async_charge_factor = 10;
23648 -+
23649 -+/* Default timeout values, in jiffies, approximating CFQ defaults. */
23650 -+static const int bfq_timeout_sync = HZ / 8;
23651 -+static int bfq_timeout_async = HZ / 25;
23652 -+
23653 -+struct kmem_cache *bfq_pool;
23654 -+
23655 -+/* Below this threshold (in ms), we consider thinktime immediate. */
23656 -+#define BFQ_MIN_TT 2
23657 -+
23658 -+/* hw_tag detection: parallel requests threshold and min samples needed. */
23659 -+#define BFQ_HW_QUEUE_THRESHOLD 4
23660 -+#define BFQ_HW_QUEUE_SAMPLES 32
23661 -+
23662 -+#define BFQQ_SEEK_THR (sector_t)(8 * 1024)
23663 -+#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
23664 -+
23665 -+/* Min samples used for peak rate estimation (for autotuning). */
23666 -+#define BFQ_PEAK_RATE_SAMPLES 32
23667 -+
23668 -+/* Shift used for peak rate fixed precision calculations. */
23669 -+#define BFQ_RATE_SHIFT 16
23670 -+
23671 -+/*
23672 -+ * By default, BFQ computes the duration of the weight raising for
23673 -+ * interactive applications automatically, using the following formula:
23674 -+ * duration = (R / r) * T, where r is the peak rate of the device, and
23675 -+ * R and T are two reference parameters.
23676 -+ * In particular, R is the peak rate of the reference device (see below),
23677 -+ * and T is a reference time: given the systems that are likely to be
23678 -+ * installed on the reference device according to its speed class, T is
23679 -+ * about the maximum time needed, under BFQ and while reading two files in
23680 -+ * parallel, to load typical large applications on these systems.
23681 -+ * In practice, the slower/faster the device at hand is, the more/less it
23682 -+ * takes to load applications with respect to the reference device.
23683 -+ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
23684 -+ * applications.
23685 -+ *
23686 -+ * BFQ uses four different reference pairs (R, T), depending on:
23687 -+ * . whether the device is rotational or non-rotational;
23688 -+ * . whether the device is slow, such as old or portable HDDs, as well as
23689 -+ * SD cards, or fast, such as newer HDDs and SSDs.
23690 -+ *
23691 -+ * The device's speed class is dynamically (re)detected in
23692 -+ * bfq_update_peak_rate() every time the estimated peak rate is updated.
23693 -+ *
23694 -+ * In the following definitions, R_slow[0]/R_fast[0] and T_slow[0]/T_fast[0]
23695 -+ * are the reference values for a slow/fast rotational device, whereas
23696 -+ * R_slow[1]/R_fast[1] and T_slow[1]/T_fast[1] are the reference values for
23697 -+ * a slow/fast non-rotational device. Finally, device_speed_thresh are the
23698 -+ * thresholds used to switch between speed classes.
23699 -+ * Both the reference peak rates and the thresholds are measured in
23700 -+ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
23701 -+ */
23702 -+static int R_slow[2] = {1536, 10752};
23703 -+static int R_fast[2] = {17415, 34791};
23704 -+/*
23705 -+ * To improve readability, a conversion function is used to initialize the
23706 -+ * following arrays, which entails that they can be initialized only in a
23707 -+ * function.
23708 -+ */
23709 -+static int T_slow[2];
23710 -+static int T_fast[2];
23711 -+static int device_speed_thresh[2];
23712 -+
23713 -+#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
23714 -+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
23715 -+
23716 -+#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
23717 -+#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
23718 -+
23719 -+static inline void bfq_schedule_dispatch(struct bfq_data *bfqd);
23720 -+
23721 -+#include "bfq-ioc.c"
23722 -+#include "bfq-sched.c"
23723 -+#include "bfq-cgroup.c"
23724 -+
23725 -+#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\
23726 -+ IOPRIO_CLASS_IDLE)
23727 -+#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\
23728 -+ IOPRIO_CLASS_RT)
23729 -+
23730 -+#define bfq_sample_valid(samples) ((samples) > 80)
23731 -+
23732 -+/*
23733 -+ * We regard a request as SYNC, if either it's a read or has the SYNC bit
23734 -+ * set (in which case it could also be a direct WRITE).
23735 -+ */
23736 -+static inline int bfq_bio_sync(struct bio *bio)
23737 -+{
23738 -+ if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
23739 -+ return 1;
23740 -+
23741 -+ return 0;
23742 -+}
23743 -+
23744 -+/*
23745 -+ * Scheduler run of queue, if there are requests pending and no one in the
23746 -+ * driver that will restart queueing.
23747 -+ */
23748 -+static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
23749 -+{
23750 -+ if (bfqd->queued != 0) {
23751 -+ bfq_log(bfqd, "schedule dispatch");
23752 -+ kblockd_schedule_work(&bfqd->unplug_work);
23753 -+ }
23754 -+}
23755 -+
23756 -+/*
23757 -+ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
23758 -+ * We choose the request that is closesr to the head right now. Distance
23759 -+ * behind the head is penalized and only allowed to a certain extent.
23760 -+ */
23761 -+static struct request *bfq_choose_req(struct bfq_data *bfqd,
23762 -+ struct request *rq1,
23763 -+ struct request *rq2,
23764 -+ sector_t last)
23765 -+{
23766 -+ sector_t s1, s2, d1 = 0, d2 = 0;
23767 -+ unsigned long back_max;
23768 -+#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
23769 -+#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
23770 -+ unsigned wrap = 0; /* bit mask: requests behind the disk head? */
23771 -+
23772 -+ if (rq1 == NULL || rq1 == rq2)
23773 -+ return rq2;
23774 -+ if (rq2 == NULL)
23775 -+ return rq1;
23776 -+
23777 -+ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
23778 -+ return rq1;
23779 -+ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
23780 -+ return rq2;
23781 -+ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
23782 -+ return rq1;
23783 -+ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
23784 -+ return rq2;
23785 -+
23786 -+ s1 = blk_rq_pos(rq1);
23787 -+ s2 = blk_rq_pos(rq2);
23788 -+
23789 -+ /*
23790 -+ * By definition, 1KiB is 2 sectors.
23791 -+ */
23792 -+ back_max = bfqd->bfq_back_max * 2;
23793 -+
23794 -+ /*
23795 -+ * Strict one way elevator _except_ in the case where we allow
23796 -+ * short backward seeks which are biased as twice the cost of a
23797 -+ * similar forward seek.
23798 -+ */
23799 -+ if (s1 >= last)
23800 -+ d1 = s1 - last;
23801 -+ else if (s1 + back_max >= last)
23802 -+ d1 = (last - s1) * bfqd->bfq_back_penalty;
23803 -+ else
23804 -+ wrap |= BFQ_RQ1_WRAP;
23805 -+
23806 -+ if (s2 >= last)
23807 -+ d2 = s2 - last;
23808 -+ else if (s2 + back_max >= last)
23809 -+ d2 = (last - s2) * bfqd->bfq_back_penalty;
23810 -+ else
23811 -+ wrap |= BFQ_RQ2_WRAP;
23812 -+
23813 -+ /* Found required data */
23814 -+
23815 -+ /*
23816 -+ * By doing switch() on the bit mask "wrap" we avoid having to
23817 -+ * check two variables for all permutations: --> faster!
23818 -+ */
23819 -+ switch (wrap) {
23820 -+ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
23821 -+ if (d1 < d2)
23822 -+ return rq1;
23823 -+ else if (d2 < d1)
23824 -+ return rq2;
23825 -+ else {
23826 -+ if (s1 >= s2)
23827 -+ return rq1;
23828 -+ else
23829 -+ return rq2;
23830 -+ }
23831 -+
23832 -+ case BFQ_RQ2_WRAP:
23833 -+ return rq1;
23834 -+ case BFQ_RQ1_WRAP:
23835 -+ return rq2;
23836 -+ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
23837 -+ default:
23838 -+ /*
23839 -+ * Since both rqs are wrapped,
23840 -+ * start with the one that's further behind head
23841 -+ * (--> only *one* back seek required),
23842 -+ * since back seek takes more time than forward.
23843 -+ */
23844 -+ if (s1 <= s2)
23845 -+ return rq1;
23846 -+ else
23847 -+ return rq2;
23848 -+ }
23849 -+}
23850 -+
23851 -+static struct bfq_queue *
23852 -+bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
23853 -+ sector_t sector, struct rb_node **ret_parent,
23854 -+ struct rb_node ***rb_link)
23855 -+{
23856 -+ struct rb_node **p, *parent;
23857 -+ struct bfq_queue *bfqq = NULL;
23858 -+
23859 -+ parent = NULL;
23860 -+ p = &root->rb_node;
23861 -+ while (*p) {
23862 -+ struct rb_node **n;
23863 -+
23864 -+ parent = *p;
23865 -+ bfqq = rb_entry(parent, struct bfq_queue, pos_node);
23866 -+
23867 -+ /*
23868 -+ * Sort strictly based on sector. Smallest to the left,
23869 -+ * largest to the right.
23870 -+ */
23871 -+ if (sector > blk_rq_pos(bfqq->next_rq))
23872 -+ n = &(*p)->rb_right;
23873 -+ else if (sector < blk_rq_pos(bfqq->next_rq))
23874 -+ n = &(*p)->rb_left;
23875 -+ else
23876 -+ break;
23877 -+ p = n;
23878 -+ bfqq = NULL;
23879 -+ }
23880 -+
23881 -+ *ret_parent = parent;
23882 -+ if (rb_link)
23883 -+ *rb_link = p;
23884 -+
23885 -+ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
23886 -+ (long long unsigned)sector,
23887 -+ bfqq != NULL ? bfqq->pid : 0);
23888 -+
23889 -+ return bfqq;
23890 -+}
23891 -+
23892 -+static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
23893 -+{
23894 -+ struct rb_node **p, *parent;
23895 -+ struct bfq_queue *__bfqq;
23896 -+
23897 -+ if (bfqq->pos_root != NULL) {
23898 -+ rb_erase(&bfqq->pos_node, bfqq->pos_root);
23899 -+ bfqq->pos_root = NULL;
23900 -+ }
23901 -+
23902 -+ if (bfq_class_idle(bfqq))
23903 -+ return;
23904 -+ if (!bfqq->next_rq)
23905 -+ return;
23906 -+
23907 -+ bfqq->pos_root = &bfqd->rq_pos_tree;
23908 -+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
23909 -+ blk_rq_pos(bfqq->next_rq), &parent, &p);
23910 -+ if (__bfqq == NULL) {
23911 -+ rb_link_node(&bfqq->pos_node, parent, p);
23912 -+ rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
23913 -+ } else
23914 -+ bfqq->pos_root = NULL;
23915 -+}
23916 -+
23917 -+/*
23918 -+ * Tell whether there are active queues or groups with differentiated weights.
23919 -+ */
23920 -+static inline bool bfq_differentiated_weights(struct bfq_data *bfqd)
23921 -+{
23922 -+ BUG_ON(!bfqd->hw_tag);
23923 -+ /*
23924 -+ * For weights to differ, at least one of the trees must contain
23925 -+ * at least two nodes.
23926 -+ */
23927 -+ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
23928 -+ (bfqd->queue_weights_tree.rb_node->rb_left ||
23929 -+ bfqd->queue_weights_tree.rb_node->rb_right)
23930 -+#ifdef CONFIG_CGROUP_BFQIO
23931 -+ ) ||
23932 -+ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
23933 -+ (bfqd->group_weights_tree.rb_node->rb_left ||
23934 -+ bfqd->group_weights_tree.rb_node->rb_right)
23935 -+#endif
23936 -+ );
23937 -+}
23938 -+
23939 -+/*
23940 -+ * If the weight-counter tree passed as input contains no counter for
23941 -+ * the weight of the input entity, then add that counter; otherwise just
23942 -+ * increment the existing counter.
23943 -+ *
23944 -+ * Note that weight-counter trees contain few nodes in mostly symmetric
23945 -+ * scenarios. For example, if all queues have the same weight, then the
23946 -+ * weight-counter tree for the queues may contain at most one node.
23947 -+ * This holds even if low_latency is on, because weight-raised queues
23948 -+ * are not inserted in the tree.
23949 -+ * In most scenarios, the rate at which nodes are created/destroyed
23950 -+ * should be low too.
23951 -+ */
23952 -+static void bfq_weights_tree_add(struct bfq_data *bfqd,
23953 -+ struct bfq_entity *entity,
23954 -+ struct rb_root *root)
23955 -+{
23956 -+ struct rb_node **new = &(root->rb_node), *parent = NULL;
23957 -+
23958 -+ /*
23959 -+ * Do not insert if:
23960 -+ * - the device does not support queueing;
23961 -+ * - the entity is already associated with a counter, which happens if:
23962 -+ * 1) the entity is associated with a queue, 2) a request arrival
23963 -+ * has caused the queue to become both non-weight-raised, and hence
23964 -+ * change its weight, and backlogged; in this respect, each
23965 -+ * of the two events causes an invocation of this function,
23966 -+ * 3) this is the invocation of this function caused by the second
23967 -+ * event. This second invocation is actually useless, and we handle
23968 -+ * this fact by exiting immediately. More efficient or clearer
23969 -+ * solutions might possibly be adopted.
23970 -+ */
23971 -+ if (!bfqd->hw_tag || entity->weight_counter)
23972 -+ return;
23973 -+
23974 -+ while (*new) {
23975 -+ struct bfq_weight_counter *__counter = container_of(*new,
23976 -+ struct bfq_weight_counter,
23977 -+ weights_node);
23978 -+ parent = *new;
23979 -+
23980 -+ if (entity->weight == __counter->weight) {
23981 -+ entity->weight_counter = __counter;
23982 -+ goto inc_counter;
23983 -+ }
23984 -+ if (entity->weight < __counter->weight)
23985 -+ new = &((*new)->rb_left);
23986 -+ else
23987 -+ new = &((*new)->rb_right);
23988 -+ }
23989 -+
23990 -+ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
23991 -+ GFP_ATOMIC);
23992 -+ entity->weight_counter->weight = entity->weight;
23993 -+ rb_link_node(&entity->weight_counter->weights_node, parent, new);
23994 -+ rb_insert_color(&entity->weight_counter->weights_node, root);
23995 -+
23996 -+inc_counter:
23997 -+ entity->weight_counter->num_active++;
23998 -+}
23999 -+
24000 -+/*
24001 -+ * Decrement the weight counter associated with the entity, and, if the
24002 -+ * counter reaches 0, remove the counter from the tree.
24003 -+ * See the comments to the function bfq_weights_tree_add() for considerations
24004 -+ * about overhead.
24005 -+ */
24006 -+static void bfq_weights_tree_remove(struct bfq_data *bfqd,
24007 -+ struct bfq_entity *entity,
24008 -+ struct rb_root *root)
24009 -+{
24010 -+ /*
24011 -+ * Check whether the entity is actually associated with a counter.
24012 -+ * In fact, the device may not be considered NCQ-capable for a while,
24013 -+ * which implies that no insertion in the weight trees is performed,
24014 -+ * after which the device may start to be deemed NCQ-capable, and hence
24015 -+ * this function may start to be invoked. This may cause the function
24016 -+ * to be invoked for entities that are not associated with any counter.
24017 -+ */
24018 -+ if (!entity->weight_counter)
24019 -+ return;
24020 -+
24021 -+ BUG_ON(RB_EMPTY_ROOT(root));
24022 -+ BUG_ON(entity->weight_counter->weight != entity->weight);
24023 -+
24024 -+ BUG_ON(!entity->weight_counter->num_active);
24025 -+ entity->weight_counter->num_active--;
24026 -+ if (entity->weight_counter->num_active > 0)
24027 -+ goto reset_entity_pointer;
24028 -+
24029 -+ rb_erase(&entity->weight_counter->weights_node, root);
24030 -+ kfree(entity->weight_counter);
24031 -+
24032 -+reset_entity_pointer:
24033 -+ entity->weight_counter = NULL;
24034 -+}
24035 -+
24036 -+static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
24037 -+ struct bfq_queue *bfqq,
24038 -+ struct request *last)
24039 -+{
24040 -+ struct rb_node *rbnext = rb_next(&last->rb_node);
24041 -+ struct rb_node *rbprev = rb_prev(&last->rb_node);
24042 -+ struct request *next = NULL, *prev = NULL;
24043 -+
24044 -+ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
24045 -+
24046 -+ if (rbprev != NULL)
24047 -+ prev = rb_entry_rq(rbprev);
24048 -+
24049 -+ if (rbnext != NULL)
24050 -+ next = rb_entry_rq(rbnext);
24051 -+ else {
24052 -+ rbnext = rb_first(&bfqq->sort_list);
24053 -+ if (rbnext && rbnext != &last->rb_node)
24054 -+ next = rb_entry_rq(rbnext);
24055 -+ }
24056 -+
24057 -+ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
24058 -+}
24059 -+
24060 -+/* see the definition of bfq_async_charge_factor for details */
24061 -+static inline unsigned long bfq_serv_to_charge(struct request *rq,
24062 -+ struct bfq_queue *bfqq)
24063 -+{
24064 -+ return blk_rq_sectors(rq) *
24065 -+ (1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->wr_coeff == 1) *
24066 -+ bfq_async_charge_factor));
24067 -+}
24068 -+
24069 -+/**
24070 -+ * bfq_updated_next_req - update the queue after a new next_rq selection.
24071 -+ * @bfqd: the device data the queue belongs to.
24072 -+ * @bfqq: the queue to update.
24073 -+ *
24074 -+ * If the first request of a queue changes we make sure that the queue
24075 -+ * has enough budget to serve at least its first request (if the
24076 -+ * request has grown). We do this because if the queue has not enough
24077 -+ * budget for its first request, it has to go through two dispatch
24078 -+ * rounds to actually get it dispatched.
24079 -+ */
24080 -+static void bfq_updated_next_req(struct bfq_data *bfqd,
24081 -+ struct bfq_queue *bfqq)
24082 -+{
24083 -+ struct bfq_entity *entity = &bfqq->entity;
24084 -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
24085 -+ struct request *next_rq = bfqq->next_rq;
24086 -+ unsigned long new_budget;
24087 -+
24088 -+ if (next_rq == NULL)
24089 -+ return;
24090 -+
24091 -+ if (bfqq == bfqd->in_service_queue)
24092 -+ /*
24093 -+ * In order not to break guarantees, budgets cannot be
24094 -+ * changed after an entity has been selected.
24095 -+ */
24096 -+ return;
24097 -+
24098 -+ BUG_ON(entity->tree != &st->active);
24099 -+ BUG_ON(entity == entity->sched_data->in_service_entity);
24100 -+
24101 -+ new_budget = max_t(unsigned long, bfqq->max_budget,
24102 -+ bfq_serv_to_charge(next_rq, bfqq));
24103 -+ if (entity->budget != new_budget) {
24104 -+ entity->budget = new_budget;
24105 -+ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
24106 -+ new_budget);
24107 -+ bfq_activate_bfqq(bfqd, bfqq);
24108 -+ }
24109 -+}
24110 -+
24111 -+static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
24112 -+{
24113 -+ u64 dur;
24114 -+
24115 -+ if (bfqd->bfq_wr_max_time > 0)
24116 -+ return bfqd->bfq_wr_max_time;
24117 -+
24118 -+ dur = bfqd->RT_prod;
24119 -+ do_div(dur, bfqd->peak_rate);
24120 -+
24121 -+ return dur;
24122 -+}
24123 -+
24124 -+/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
24125 -+static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
24126 -+ struct bfq_queue *bfqq)
24127 -+{
24128 -+ struct bfq_queue *item;
24129 -+ struct hlist_node *n;
24130 -+
24131 -+ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
24132 -+ hlist_del_init(&item->burst_list_node);
24133 -+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
24134 -+ bfqd->burst_size = 1;
24135 -+}
24136 -+
24137 -+/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
24138 -+static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
24139 -+{
24140 -+ /* Increment burst size to take into account also bfqq */
24141 -+ bfqd->burst_size++;
24142 -+
24143 -+ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
24144 -+ struct bfq_queue *pos, *bfqq_item;
24145 -+ struct hlist_node *n;
24146 -+
24147 -+ /*
24148 -+ * Enough queues have been activated shortly after each
24149 -+ * other to consider this burst as large.
24150 -+ */
24151 -+ bfqd->large_burst = true;
24152 -+
24153 -+ /*
24154 -+ * We can now mark all queues in the burst list as
24155 -+ * belonging to a large burst.
24156 -+ */
24157 -+ hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
24158 -+ burst_list_node)
24159 -+ bfq_mark_bfqq_in_large_burst(bfqq_item);
24160 -+ bfq_mark_bfqq_in_large_burst(bfqq);
24161 -+
24162 -+ /*
24163 -+ * From now on, and until the current burst finishes, any
24164 -+ * new queue being activated shortly after the last queue
24165 -+ * was inserted in the burst can be immediately marked as
24166 -+ * belonging to a large burst. So the burst list is not
24167 -+ * needed any more. Remove it.
24168 -+ */
24169 -+ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
24170 -+ burst_list_node)
24171 -+ hlist_del_init(&pos->burst_list_node);
24172 -+ } else /* burst not yet large: add bfqq to the burst list */
24173 -+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
24174 -+}
24175 -+
24176 -+/*
24177 -+ * If many queues happen to become active shortly after each other, then,
24178 -+ * to help the processes associated to these queues get their job done as
24179 -+ * soon as possible, it is usually better to not grant either weight-raising
24180 -+ * or device idling to these queues. In this comment we describe, firstly,
24181 -+ * the reasons why this fact holds, and, secondly, the next function, which
24182 -+ * implements the main steps needed to properly mark these queues so that
24183 -+ * they can then be treated in a different way.
24184 -+ *
24185 -+ * As for the terminology, we say that a queue becomes active, i.e.,
24186 -+ * switches from idle to backlogged, either when it is created (as a
24187 -+ * consequence of the arrival of an I/O request), or, if already existing,
24188 -+ * when a new request for the queue arrives while the queue is idle.
24189 -+ * Bursts of activations, i.e., activations of different queues occurring
24190 -+ * shortly after each other, are typically caused by services or applications
24191 -+ * that spawn or reactivate many parallel threads/processes. Examples are
24192 -+ * systemd during boot or git grep.
24193 -+ *
24194 -+ * These services or applications benefit mostly from a high throughput:
24195 -+ * the quicker the requests of the activated queues are cumulatively served,
24196 -+ * the sooner the target job of these queues gets completed. As a consequence,
24197 -+ * weight-raising any of these queues, which also implies idling the device
24198 -+ * for it, is almost always counterproductive: in most cases it just lowers
24199 -+ * throughput.
24200 -+ *
24201 -+ * On the other hand, a burst of activations may be also caused by the start
24202 -+ * of an application that does not consist in a lot of parallel I/O-bound
24203 -+ * threads. In fact, with a complex application, the burst may be just a
24204 -+ * consequence of the fact that several processes need to be executed to
24205 -+ * start-up the application. To start an application as quickly as possible,
24206 -+ * the best thing to do is to privilege the I/O related to the application
24207 -+ * with respect to all other I/O. Therefore, the best strategy to start as
24208 -+ * quickly as possible an application that causes a burst of activations is
24209 -+ * to weight-raise all the queues activated during the burst. This is the
24210 -+ * exact opposite of the best strategy for the other type of bursts.
24211 -+ *
24212 -+ * In the end, to take the best action for each of the two cases, the two
24213 -+ * types of bursts need to be distinguished. Fortunately, this seems
24214 -+ * relatively easy to do, by looking at the sizes of the bursts. In
24215 -+ * particular, we found a threshold such that bursts with a larger size
24216 -+ * than that threshold are apparently caused only by services or commands
24217 -+ * such as systemd or git grep. For brevity, hereafter we call just 'large'
24218 -+ * these bursts. BFQ *does not* weight-raise queues whose activations occur
24219 -+ * in a large burst. In addition, for each of these queues BFQ performs or
24220 -+ * does not perform idling depending on which choice boosts the throughput
24221 -+ * most. The exact choice depends on the device and request pattern at
24222 -+ * hand.
24223 -+ *
24224 -+ * Turning back to the next function, it implements all the steps needed
24225 -+ * to detect the occurrence of a large burst and to properly mark all the
24226 -+ * queues belonging to it (so that they can then be treated in a different
24227 -+ * way). This goal is achieved by maintaining a special "burst list" that
24228 -+ * holds, temporarily, the queues that belong to the burst in progress. The
24229 -+ * list is then used to mark these queues as belonging to a large burst if
24230 -+ * the burst does become large. The main steps are the following.
24231 -+ *
24232 -+ * . when the very first queue is activated, the queue is inserted into the
24233 -+ * list (as it could be the first queue in a possible burst)
24234 -+ *
24235 -+ * . if the current burst has not yet become large, and a queue Q that does
24236 -+ * not yet belong to the burst is activated shortly after the last time
24237 -+ * at which a new queue entered the burst list, then the function appends
24238 -+ * Q to the burst list
24239 -+ *
24240 -+ * . if, as a consequence of the previous step, the burst size reaches
24241 -+ * the large-burst threshold, then
24242 -+ *
24243 -+ * . all the queues in the burst list are marked as belonging to a
24244 -+ * large burst
24245 -+ *
24246 -+ * . the burst list is deleted; in fact, the burst list already served
24247 -+ * its purpose (keeping temporarily track of the queues in a burst,
24248 -+ * so as to be able to mark them as belonging to a large burst in the
24249 -+ * previous sub-step), and now is not needed any more
24250 -+ *
24251 -+ * . the device enters a large-burst mode
24252 -+ *
24253 -+ * . if a queue Q that does not belong to the burst is activated while
24254 -+ * the device is in large-burst mode and shortly after the last time
24255 -+ * at which a queue either entered the burst list or was marked as
24256 -+ * belonging to the current large burst, then Q is immediately marked
24257 -+ * as belonging to a large burst.
24258 -+ *
24259 -+ * . if a queue Q that does not belong to the burst is activated a while
24260 -+ * later, i.e., not shortly after, than the last time at which a queue
24261 -+ * either entered the burst list or was marked as belonging to the
24262 -+ * current large burst, then the current burst is deemed as finished and:
24263 -+ *
24264 -+ * . the large-burst mode is reset if set
24265 -+ *
24266 -+ * . the burst list is emptied
24267 -+ *
24268 -+ * . Q is inserted in the burst list, as Q may be the first queue
24269 -+ * in a possible new burst (then the burst list contains just Q
24270 -+ * after this step).
24271 -+ */
24272 -+static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq,
24273 -+ bool idle_for_long_time)
24274 -+{
24275 -+ /*
24276 -+ * If bfqq happened to be activated in a burst, but has been idle
24277 -+ * for at least as long as an interactive queue, then we assume
24278 -+ * that, in the overall I/O initiated in the burst, the I/O
24279 -+ * associated to bfqq is finished. So bfqq does not need to be
24280 -+ * treated as a queue belonging to a burst anymore. Accordingly,
24281 -+ * we reset bfqq's in_large_burst flag if set, and remove bfqq
24282 -+ * from the burst list if it's there. We do not decrement instead
24283 -+ * burst_size, because the fact that bfqq does not need to belong
24284 -+ * to the burst list any more does not invalidate the fact that
24285 -+ * bfqq may have been activated during the current burst.
24286 -+ */
24287 -+ if (idle_for_long_time) {
24288 -+ hlist_del_init(&bfqq->burst_list_node);
24289 -+ bfq_clear_bfqq_in_large_burst(bfqq);
24290 -+ }
24291 -+
24292 -+ /*
24293 -+ * If bfqq is already in the burst list or is part of a large
24294 -+ * burst, then there is nothing else to do.
24295 -+ */
24296 -+ if (!hlist_unhashed(&bfqq->burst_list_node) ||
24297 -+ bfq_bfqq_in_large_burst(bfqq))
24298 -+ return;
24299 -+
24300 -+ /*
24301 -+ * If bfqq's activation happens late enough, then the current
24302 -+ * burst is finished, and related data structures must be reset.
24303 -+ *
24304 -+ * In this respect, consider the special case where bfqq is the very
24305 -+ * first queue being activated. In this case, last_ins_in_burst is
24306 -+ * not yet significant when we get here. But it is easy to verify
24307 -+ * that, whether or not the following condition is true, bfqq will
24308 -+ * end up being inserted into the burst list. In particular the
24309 -+ * list will happen to contain only bfqq. And this is exactly what
24310 -+ * has to happen, as bfqq may be the first queue in a possible
24311 -+ * burst.
24312 -+ */
24313 -+ if (time_is_before_jiffies(bfqd->last_ins_in_burst +
24314 -+ bfqd->bfq_burst_interval)) {
24315 -+ bfqd->large_burst = false;
24316 -+ bfq_reset_burst_list(bfqd, bfqq);
24317 -+ return;
24318 -+ }
24319 -+
24320 -+ /*
24321 -+ * If we get here, then bfqq is being activated shortly after the
24322 -+ * last queue. So, if the current burst is also large, we can mark
24323 -+ * bfqq as belonging to this large burst immediately.
24324 -+ */
24325 -+ if (bfqd->large_burst) {
24326 -+ bfq_mark_bfqq_in_large_burst(bfqq);
24327 -+ return;
24328 -+ }
24329 -+
24330 -+ /*
24331 -+ * If we get here, then a large-burst state has not yet been
24332 -+ * reached, but bfqq is being activated shortly after the last
24333 -+ * queue. Then we add bfqq to the burst.
24334 -+ */
24335 -+ bfq_add_to_burst(bfqd, bfqq);
24336 -+}
24337 -+
24338 -+static void bfq_add_request(struct request *rq)
24339 -+{
24340 -+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
24341 -+ struct bfq_entity *entity = &bfqq->entity;
24342 -+ struct bfq_data *bfqd = bfqq->bfqd;
24343 -+ struct request *next_rq, *prev;
24344 -+ unsigned long old_wr_coeff = bfqq->wr_coeff;
24345 -+ bool interactive = false;
24346 -+
24347 -+ bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
24348 -+ bfqq->queued[rq_is_sync(rq)]++;
24349 -+ bfqd->queued++;
24350 -+
24351 -+ elv_rb_add(&bfqq->sort_list, rq);
24352 -+
24353 -+ /*
24354 -+ * Check if this request is a better next-serve candidate.
24355 -+ */
24356 -+ prev = bfqq->next_rq;
24357 -+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
24358 -+ BUG_ON(next_rq == NULL);
24359 -+ bfqq->next_rq = next_rq;
24360 -+
24361 -+ /*
24362 -+ * Adjust priority tree position, if next_rq changes.
24363 -+ */
24364 -+ if (prev != bfqq->next_rq)
24365 -+ bfq_rq_pos_tree_add(bfqd, bfqq);
24366 -+
24367 -+ if (!bfq_bfqq_busy(bfqq)) {
24368 -+ bool soft_rt,
24369 -+ idle_for_long_time = time_is_before_jiffies(
24370 -+ bfqq->budget_timeout +
24371 -+ bfqd->bfq_wr_min_idle_time);
24372 -+
24373 -+ if (bfq_bfqq_sync(bfqq)) {
24374 -+ bool already_in_burst =
24375 -+ !hlist_unhashed(&bfqq->burst_list_node) ||
24376 -+ bfq_bfqq_in_large_burst(bfqq);
24377 -+ bfq_handle_burst(bfqd, bfqq, idle_for_long_time);
24378 -+ /*
24379 -+ * If bfqq was not already in the current burst,
24380 -+ * then, at this point, bfqq either has been
24381 -+ * added to the current burst or has caused the
24382 -+ * current burst to terminate. In particular, in
24383 -+ * the second case, bfqq has become the first
24384 -+ * queue in a possible new burst.
24385 -+ * In both cases last_ins_in_burst needs to be
24386 -+ * moved forward.
24387 -+ */
24388 -+ if (!already_in_burst)
24389 -+ bfqd->last_ins_in_burst = jiffies;
24390 -+ }
24391 -+
24392 -+ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
24393 -+ !bfq_bfqq_in_large_burst(bfqq) &&
24394 -+ time_is_before_jiffies(bfqq->soft_rt_next_start);
24395 -+ interactive = !bfq_bfqq_in_large_burst(bfqq) &&
24396 -+ idle_for_long_time;
24397 -+ entity->budget = max_t(unsigned long, bfqq->max_budget,
24398 -+ bfq_serv_to_charge(next_rq, bfqq));
24399 -+
24400 -+ if (!bfq_bfqq_IO_bound(bfqq)) {
24401 -+ if (time_before(jiffies,
24402 -+ RQ_BIC(rq)->ttime.last_end_request +
24403 -+ bfqd->bfq_slice_idle)) {
24404 -+ bfqq->requests_within_timer++;
24405 -+ if (bfqq->requests_within_timer >=
24406 -+ bfqd->bfq_requests_within_timer)
24407 -+ bfq_mark_bfqq_IO_bound(bfqq);
24408 -+ } else
24409 -+ bfqq->requests_within_timer = 0;
24410 -+ }
24411 -+
24412 -+ if (!bfqd->low_latency)
24413 -+ goto add_bfqq_busy;
24414 -+
24415 -+ /*
24416 -+ * If the queue is not being boosted and has been idle
24417 -+ * for enough time, start a weight-raising period
24418 -+ */
24419 -+ if (old_wr_coeff == 1 && (interactive || soft_rt)) {
24420 -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
24421 -+ if (interactive)
24422 -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
24423 -+ else
24424 -+ bfqq->wr_cur_max_time =
24425 -+ bfqd->bfq_wr_rt_max_time;
24426 -+ bfq_log_bfqq(bfqd, bfqq,
24427 -+ "wrais starting at %lu, rais_max_time %u",
24428 -+ jiffies,
24429 -+ jiffies_to_msecs(bfqq->wr_cur_max_time));
24430 -+ } else if (old_wr_coeff > 1) {
24431 -+ if (interactive)
24432 -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
24433 -+ else if (bfq_bfqq_in_large_burst(bfqq) ||
24434 -+ (bfqq->wr_cur_max_time ==
24435 -+ bfqd->bfq_wr_rt_max_time &&
24436 -+ !soft_rt)) {
24437 -+ bfqq->wr_coeff = 1;
24438 -+ bfq_log_bfqq(bfqd, bfqq,
24439 -+ "wrais ending at %lu, rais_max_time %u",
24440 -+ jiffies,
24441 -+ jiffies_to_msecs(bfqq->
24442 -+ wr_cur_max_time));
24443 -+ } else if (time_before(
24444 -+ bfqq->last_wr_start_finish +
24445 -+ bfqq->wr_cur_max_time,
24446 -+ jiffies +
24447 -+ bfqd->bfq_wr_rt_max_time) &&
24448 -+ soft_rt) {
24449 -+ /*
24450 -+ *
24451 -+ * The remaining weight-raising time is lower
24452 -+ * than bfqd->bfq_wr_rt_max_time, which
24453 -+ * means that the application is enjoying
24454 -+ * weight raising either because deemed soft-
24455 -+ * rt in the near past, or because deemed
24456 -+ * interactive a long ago. In both cases,
24457 -+ * resetting now the current remaining weight-
24458 -+ * raising time for the application to the
24459 -+ * weight-raising duration for soft rt
24460 -+ * applications would not cause any latency
24461 -+ * increase for the application (as the new
24462 -+ * duration would be higher than the remaining
24463 -+ * time).
24464 -+ *
24465 -+ * In addition, the application is now meeting
24466 -+ * the requirements for being deemed soft rt.
24467 -+ * In the end we can correctly and safely
24468 -+ * (re)charge the weight-raising duration for
24469 -+ * the application with the weight-raising
24470 -+ * duration for soft rt applications.
24471 -+ *
24472 -+ * In particular, doing this recharge now, i.e.,
24473 -+ * before the weight-raising period for the
24474 -+ * application finishes, reduces the probability
24475 -+ * of the following negative scenario:
24476 -+ * 1) the weight of a soft rt application is
24477 -+ * raised at startup (as for any newly
24478 -+ * created application),
24479 -+ * 2) since the application is not interactive,
24480 -+ * at a certain time weight-raising is
24481 -+ * stopped for the application,
24482 -+ * 3) at that time the application happens to
24483 -+ * still have pending requests, and hence
24484 -+ * is destined to not have a chance to be
24485 -+ * deemed soft rt before these requests are
24486 -+ * completed (see the comments to the
24487 -+ * function bfq_bfqq_softrt_next_start()
24488 -+ * for details on soft rt detection),
24489 -+ * 4) these pending requests experience a high
24490 -+ * latency because the application is not
24491 -+ * weight-raised while they are pending.
24492 -+ */
24493 -+ bfqq->last_wr_start_finish = jiffies;
24494 -+ bfqq->wr_cur_max_time =
24495 -+ bfqd->bfq_wr_rt_max_time;
24496 -+ }
24497 -+ }
24498 -+ if (old_wr_coeff != bfqq->wr_coeff)
24499 -+ entity->ioprio_changed = 1;
24500 -+add_bfqq_busy:
24501 -+ bfqq->last_idle_bklogged = jiffies;
24502 -+ bfqq->service_from_backlogged = 0;
24503 -+ bfq_clear_bfqq_softrt_update(bfqq);
24504 -+ bfq_add_bfqq_busy(bfqd, bfqq);
24505 -+ } else {
24506 -+ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
24507 -+ time_is_before_jiffies(
24508 -+ bfqq->last_wr_start_finish +
24509 -+ bfqd->bfq_wr_min_inter_arr_async)) {
24510 -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
24511 -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
24512 -+
24513 -+ bfqd->wr_busy_queues++;
24514 -+ entity->ioprio_changed = 1;
24515 -+ bfq_log_bfqq(bfqd, bfqq,
24516 -+ "non-idle wrais starting at %lu, rais_max_time %u",
24517 -+ jiffies,
24518 -+ jiffies_to_msecs(bfqq->wr_cur_max_time));
24519 -+ }
24520 -+ if (prev != bfqq->next_rq)
24521 -+ bfq_updated_next_req(bfqd, bfqq);
24522 -+ }
24523 -+
24524 -+ if (bfqd->low_latency &&
24525 -+ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
24526 -+ bfqq->last_wr_start_finish = jiffies;
24527 -+}
24528 -+
24529 -+static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
24530 -+ struct bio *bio)
24531 -+{
24532 -+ struct task_struct *tsk = current;
24533 -+ struct bfq_io_cq *bic;
24534 -+ struct bfq_queue *bfqq;
24535 -+
24536 -+ bic = bfq_bic_lookup(bfqd, tsk->io_context);
24537 -+ if (bic == NULL)
24538 -+ return NULL;
24539 -+
24540 -+ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
24541 -+ if (bfqq != NULL)
24542 -+ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
24543 -+
24544 -+ return NULL;
24545 -+}
24546 -+
24547 -+static void bfq_activate_request(struct request_queue *q, struct request *rq)
24548 -+{
24549 -+ struct bfq_data *bfqd = q->elevator->elevator_data;
24550 -+
24551 -+ bfqd->rq_in_driver++;
24552 -+ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
24553 -+ bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
24554 -+ (long long unsigned)bfqd->last_position);
24555 -+}
24556 -+
24557 -+static inline void bfq_deactivate_request(struct request_queue *q,
24558 -+ struct request *rq)
24559 -+{
24560 -+ struct bfq_data *bfqd = q->elevator->elevator_data;
24561 -+
24562 -+ BUG_ON(bfqd->rq_in_driver == 0);
24563 -+ bfqd->rq_in_driver--;
24564 -+}
24565 -+
24566 -+static void bfq_remove_request(struct request *rq)
24567 -+{
24568 -+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
24569 -+ struct bfq_data *bfqd = bfqq->bfqd;
24570 -+ const int sync = rq_is_sync(rq);
24571 -+
24572 -+ if (bfqq->next_rq == rq) {
24573 -+ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
24574 -+ bfq_updated_next_req(bfqd, bfqq);
24575 -+ }
24576 -+
24577 -+ list_del_init(&rq->queuelist);
24578 -+ BUG_ON(bfqq->queued[sync] == 0);
24579 -+ bfqq->queued[sync]--;
24580 -+ bfqd->queued--;
24581 -+ elv_rb_del(&bfqq->sort_list, rq);
24582 -+
24583 -+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
24584 -+ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue)
24585 -+ bfq_del_bfqq_busy(bfqd, bfqq, 1);
24586 -+ /*
24587 -+ * Remove queue from request-position tree as it is empty.
24588 -+ */
24589 -+ if (bfqq->pos_root != NULL) {
24590 -+ rb_erase(&bfqq->pos_node, bfqq->pos_root);
24591 -+ bfqq->pos_root = NULL;
24592 -+ }
24593 -+ }
24594 -+
24595 -+ if (rq->cmd_flags & REQ_META) {
24596 -+ BUG_ON(bfqq->meta_pending == 0);
24597 -+ bfqq->meta_pending--;
24598 -+ }
24599 -+}
24600 -+
24601 -+static int bfq_merge(struct request_queue *q, struct request **req,
24602 -+ struct bio *bio)
24603 -+{
24604 -+ struct bfq_data *bfqd = q->elevator->elevator_data;
24605 -+ struct request *__rq;
24606 -+
24607 -+ __rq = bfq_find_rq_fmerge(bfqd, bio);
24608 -+ if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
24609 -+ *req = __rq;
24610 -+ return ELEVATOR_FRONT_MERGE;
24611 -+ }
24612 -+
24613 -+ return ELEVATOR_NO_MERGE;
24614 -+}
24615 -+
24616 -+static void bfq_merged_request(struct request_queue *q, struct request *req,
24617 -+ int type)
24618 -+{
24619 -+ if (type == ELEVATOR_FRONT_MERGE &&
24620 -+ rb_prev(&req->rb_node) &&
24621 -+ blk_rq_pos(req) <
24622 -+ blk_rq_pos(container_of(rb_prev(&req->rb_node),
24623 -+ struct request, rb_node))) {
24624 -+ struct bfq_queue *bfqq = RQ_BFQQ(req);
24625 -+ struct bfq_data *bfqd = bfqq->bfqd;
24626 -+ struct request *prev, *next_rq;
24627 -+
24628 -+ /* Reposition request in its sort_list */
24629 -+ elv_rb_del(&bfqq->sort_list, req);
24630 -+ elv_rb_add(&bfqq->sort_list, req);
24631 -+ /* Choose next request to be served for bfqq */
24632 -+ prev = bfqq->next_rq;
24633 -+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
24634 -+ bfqd->last_position);
24635 -+ BUG_ON(next_rq == NULL);
24636 -+ bfqq->next_rq = next_rq;
24637 -+ /*
24638 -+ * If next_rq changes, update both the queue's budget to
24639 -+ * fit the new request and the queue's position in its
24640 -+ * rq_pos_tree.
24641 -+ */
24642 -+ if (prev != bfqq->next_rq) {
24643 -+ bfq_updated_next_req(bfqd, bfqq);
24644 -+ bfq_rq_pos_tree_add(bfqd, bfqq);
24645 -+ }
24646 -+ }
24647 -+}
24648 -+
24649 -+static void bfq_merged_requests(struct request_queue *q, struct request *rq,
24650 -+ struct request *next)
24651 -+{
24652 -+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
24653 -+
24654 -+ /*
24655 -+ * Reposition in fifo if next is older than rq.
24656 -+ */
24657 -+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
24658 -+ time_before(next->fifo_time, rq->fifo_time)) {
24659 -+ list_move(&rq->queuelist, &next->queuelist);
24660 -+ rq->fifo_time = next->fifo_time;
24661 -+ }
24662 -+
24663 -+ if (bfqq->next_rq == next)
24664 -+ bfqq->next_rq = rq;
24665 -+
24666 -+ bfq_remove_request(next);
24667 -+}
24668 -+
24669 -+/* Must be called with bfqq != NULL */
24670 -+static inline void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
24671 -+{
24672 -+ BUG_ON(bfqq == NULL);
24673 -+ if (bfq_bfqq_busy(bfqq))
24674 -+ bfqq->bfqd->wr_busy_queues--;
24675 -+ bfqq->wr_coeff = 1;
24676 -+ bfqq->wr_cur_max_time = 0;
24677 -+ /* Trigger a weight change on the next activation of the queue */
24678 -+ bfqq->entity.ioprio_changed = 1;
24679 -+}
24680 -+
24681 -+static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
24682 -+ struct bfq_group *bfqg)
24683 -+{
24684 -+ int i, j;
24685 -+
24686 -+ for (i = 0; i < 2; i++)
24687 -+ for (j = 0; j < IOPRIO_BE_NR; j++)
24688 -+ if (bfqg->async_bfqq[i][j] != NULL)
24689 -+ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
24690 -+ if (bfqg->async_idle_bfqq != NULL)
24691 -+ bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
24692 -+}
24693 -+
24694 -+static void bfq_end_wr(struct bfq_data *bfqd)
24695 -+{
24696 -+ struct bfq_queue *bfqq;
24697 -+
24698 -+ spin_lock_irq(bfqd->queue->queue_lock);
24699 -+
24700 -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
24701 -+ bfq_bfqq_end_wr(bfqq);
24702 -+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
24703 -+ bfq_bfqq_end_wr(bfqq);
24704 -+ bfq_end_wr_async(bfqd);
24705 -+
24706 -+ spin_unlock_irq(bfqd->queue->queue_lock);
24707 -+}
24708 -+
24709 -+static int bfq_allow_merge(struct request_queue *q, struct request *rq,
24710 -+ struct bio *bio)
24711 -+{
24712 -+ struct bfq_data *bfqd = q->elevator->elevator_data;
24713 -+ struct bfq_io_cq *bic;
24714 -+ struct bfq_queue *bfqq;
24715 -+
24716 -+ /*
24717 -+ * Disallow merge of a sync bio into an async request.
24718 -+ */
24719 -+ if (bfq_bio_sync(bio) && !rq_is_sync(rq))
24720 -+ return 0;
24721 -+
24722 -+ /*
24723 -+ * Lookup the bfqq that this bio will be queued with. Allow
24724 -+ * merge only if rq is queued there.
24725 -+ * Queue lock is held here.
24726 -+ */
24727 -+ bic = bfq_bic_lookup(bfqd, current->io_context);
24728 -+ if (bic == NULL)
24729 -+ return 0;
24730 -+
24731 -+ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
24732 -+ return bfqq == RQ_BFQQ(rq);
24733 -+}
24734 -+
24735 -+static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
24736 -+ struct bfq_queue *bfqq)
24737 -+{
24738 -+ if (bfqq != NULL) {
24739 -+ bfq_mark_bfqq_must_alloc(bfqq);
24740 -+ bfq_mark_bfqq_budget_new(bfqq);
24741 -+ bfq_clear_bfqq_fifo_expire(bfqq);
24742 -+
24743 -+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
24744 -+
24745 -+ bfq_log_bfqq(bfqd, bfqq,
24746 -+ "set_in_service_queue, cur-budget = %lu",
24747 -+ bfqq->entity.budget);
24748 -+ }
24749 -+
24750 -+ bfqd->in_service_queue = bfqq;
24751 -+}
24752 -+
24753 -+/*
24754 -+ * Get and set a new queue for service.
24755 -+ */
24756 -+static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd,
24757 -+ struct bfq_queue *bfqq)
24758 -+{
24759 -+ if (!bfqq)
24760 -+ bfqq = bfq_get_next_queue(bfqd);
24761 -+ else
24762 -+ bfq_get_next_queue_forced(bfqd, bfqq);
24763 -+
24764 -+ __bfq_set_in_service_queue(bfqd, bfqq);
24765 -+ return bfqq;
24766 -+}
24767 -+
24768 -+static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
24769 -+ struct request *rq)
24770 -+{
24771 -+ if (blk_rq_pos(rq) >= bfqd->last_position)
24772 -+ return blk_rq_pos(rq) - bfqd->last_position;
24773 -+ else
24774 -+ return bfqd->last_position - blk_rq_pos(rq);
24775 -+}
24776 -+
24777 -+/*
24778 -+ * Return true if bfqq has no request pending and rq is close enough to
24779 -+ * bfqd->last_position, or if rq is closer to bfqd->last_position than
24780 -+ * bfqq->next_rq
24781 -+ */
24782 -+static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
24783 -+{
24784 -+ return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
24785 -+}
24786 -+
24787 -+static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
24788 -+{
24789 -+ struct rb_root *root = &bfqd->rq_pos_tree;
24790 -+ struct rb_node *parent, *node;
24791 -+ struct bfq_queue *__bfqq;
24792 -+ sector_t sector = bfqd->last_position;
24793 -+
24794 -+ if (RB_EMPTY_ROOT(root))
24795 -+ return NULL;
24796 -+
24797 -+ /*
24798 -+ * First, if we find a request starting at the end of the last
24799 -+ * request, choose it.
24800 -+ */
24801 -+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
24802 -+ if (__bfqq != NULL)
24803 -+ return __bfqq;
24804 -+
24805 -+ /*
24806 -+ * If the exact sector wasn't found, the parent of the NULL leaf
24807 -+ * will contain the closest sector (rq_pos_tree sorted by
24808 -+ * next_request position).
24809 -+ */
24810 -+ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
24811 -+ if (bfq_rq_close(bfqd, __bfqq->next_rq))
24812 -+ return __bfqq;
24813 -+
24814 -+ if (blk_rq_pos(__bfqq->next_rq) < sector)
24815 -+ node = rb_next(&__bfqq->pos_node);
24816 -+ else
24817 -+ node = rb_prev(&__bfqq->pos_node);
24818 -+ if (node == NULL)
24819 -+ return NULL;
24820 -+
24821 -+ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
24822 -+ if (bfq_rq_close(bfqd, __bfqq->next_rq))
24823 -+ return __bfqq;
24824 -+
24825 -+ return NULL;
24826 -+}
24827 -+
24828 -+/*
24829 -+ * bfqd - obvious
24830 -+ * cur_bfqq - passed in so that we don't decide that the current queue
24831 -+ * is closely cooperating with itself.
24832 -+ *
24833 -+ * We are assuming that cur_bfqq has dispatched at least one request,
24834 -+ * and that bfqd->last_position reflects a position on the disk associated
24835 -+ * with the I/O issued by cur_bfqq.
24836 -+ */
24837 -+static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
24838 -+ struct bfq_queue *cur_bfqq)
24839 -+{
24840 -+ struct bfq_queue *bfqq;
24841 -+
24842 -+ if (bfq_class_idle(cur_bfqq))
24843 -+ return NULL;
24844 -+ if (!bfq_bfqq_sync(cur_bfqq))
24845 -+ return NULL;
24846 -+ if (BFQQ_SEEKY(cur_bfqq))
24847 -+ return NULL;
24848 -+
24849 -+ /* If device has only one backlogged bfq_queue, don't search. */
24850 -+ if (bfqd->busy_queues == 1)
24851 -+ return NULL;
24852 -+
24853 -+ /*
24854 -+ * We should notice if some of the queues are cooperating, e.g.
24855 -+ * working closely on the same area of the disk. In that case,
24856 -+ * we can group them together and don't waste time idling.
24857 -+ */
24858 -+ bfqq = bfqq_close(bfqd);
24859 -+ if (bfqq == NULL || bfqq == cur_bfqq)
24860 -+ return NULL;
24861 -+
24862 -+ /*
24863 -+ * Do not merge queues from different bfq_groups.
24864 -+ */
24865 -+ if (bfqq->entity.parent != cur_bfqq->entity.parent)
24866 -+ return NULL;
24867 -+
24868 -+ /*
24869 -+ * It only makes sense to merge sync queues.
24870 -+ */
24871 -+ if (!bfq_bfqq_sync(bfqq))
24872 -+ return NULL;
24873 -+ if (BFQQ_SEEKY(bfqq))
24874 -+ return NULL;
24875 -+
24876 -+ /*
24877 -+ * Do not merge queues of different priority classes.
24878 -+ */
24879 -+ if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
24880 -+ return NULL;
24881 -+
24882 -+ return bfqq;
24883 -+}
24884 -+
24885 -+/*
24886 -+ * If enough samples have been computed, return the current max budget
24887 -+ * stored in bfqd, which is dynamically updated according to the
24888 -+ * estimated disk peak rate; otherwise return the default max budget
24889 -+ */
24890 -+static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
24891 -+{
24892 -+ if (bfqd->budgets_assigned < 194)
24893 -+ return bfq_default_max_budget;
24894 -+ else
24895 -+ return bfqd->bfq_max_budget;
24896 -+}
24897 -+
24898 -+/*
24899 -+ * Return min budget, which is a fraction of the current or default
24900 -+ * max budget (trying with 1/32)
24901 -+ */
24902 -+static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
24903 -+{
24904 -+ if (bfqd->budgets_assigned < 194)
24905 -+ return bfq_default_max_budget / 32;
24906 -+ else
24907 -+ return bfqd->bfq_max_budget / 32;
24908 -+}
24909 -+
24910 -+static void bfq_arm_slice_timer(struct bfq_data *bfqd)
24911 -+{
24912 -+ struct bfq_queue *bfqq = bfqd->in_service_queue;
24913 -+ struct bfq_io_cq *bic;
24914 -+ unsigned long sl;
24915 -+
24916 -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
24917 -+
24918 -+ /* Processes have exited, don't wait. */
24919 -+ bic = bfqd->in_service_bic;
24920 -+ if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0)
24921 -+ return;
24922 -+
24923 -+ bfq_mark_bfqq_wait_request(bfqq);
24924 -+
24925 -+ /*
24926 -+ * We don't want to idle for seeks, but we do want to allow
24927 -+ * fair distribution of slice time for a process doing back-to-back
24928 -+ * seeks. So allow a little bit of time for him to submit a new rq.
24929 -+ *
24930 -+ * To prevent processes with (partly) seeky workloads from
24931 -+ * being too ill-treated, grant them a small fraction of the
24932 -+ * assigned budget before reducing the waiting time to
24933 -+ * BFQ_MIN_TT. This happened to help reduce latency.
24934 -+ */
24935 -+ sl = bfqd->bfq_slice_idle;
24936 -+ /*
24937 -+ * Unless the queue is being weight-raised, grant only minimum idle
24938 -+ * time if the queue either has been seeky for long enough or has
24939 -+ * already proved to be constantly seeky.
24940 -+ */
24941 -+ if (bfq_sample_valid(bfqq->seek_samples) &&
24942 -+ ((BFQQ_SEEKY(bfqq) && bfqq->entity.service >
24943 -+ bfq_max_budget(bfqq->bfqd) / 8) ||
24944 -+ bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1)
24945 -+ sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
24946 -+ else if (bfqq->wr_coeff > 1)
24947 -+ sl = sl * 3;
24948 -+ bfqd->last_idling_start = ktime_get();
24949 -+ mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
24950 -+ bfq_log(bfqd, "arm idle: %u/%u ms",
24951 -+ jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
24952 -+}
24953 -+
24954 -+/*
24955 -+ * Set the maximum time for the in-service queue to consume its
24956 -+ * budget. This prevents seeky processes from lowering the disk
24957 -+ * throughput (always guaranteed with a time slice scheme as in CFQ).
24958 -+ */
24959 -+static void bfq_set_budget_timeout(struct bfq_data *bfqd)
24960 -+{
24961 -+ struct bfq_queue *bfqq = bfqd->in_service_queue;
24962 -+ unsigned int timeout_coeff;
24963 -+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
24964 -+ timeout_coeff = 1;
24965 -+ else
24966 -+ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
24967 -+
24968 -+ bfqd->last_budget_start = ktime_get();
24969 -+
24970 -+ bfq_clear_bfqq_budget_new(bfqq);
24971 -+ bfqq->budget_timeout = jiffies +
24972 -+ bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
24973 -+
24974 -+ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
24975 -+ jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
24976 -+ timeout_coeff));
24977 -+}
24978 -+
24979 -+/*
24980 -+ * Move request from internal lists to the request queue dispatch list.
24981 -+ */
24982 -+static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
24983 -+{
24984 -+ struct bfq_data *bfqd = q->elevator->elevator_data;
24985 -+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
24986 -+
24987 -+ /*
24988 -+ * For consistency, the next instruction should have been executed
24989 -+ * after removing the request from the queue and dispatching it.
24990 -+ * We execute instead this instruction before bfq_remove_request()
24991 -+ * (and hence introduce a temporary inconsistency), for efficiency.
24992 -+ * In fact, in a forced_dispatch, this prevents two counters related
24993 -+ * to bfqq->dispatched to risk to be uselessly decremented if bfqq
24994 -+ * is not in service, and then to be incremented again after
24995 -+ * incrementing bfqq->dispatched.
24996 -+ */
24997 -+ bfqq->dispatched++;
24998 -+ bfq_remove_request(rq);
24999 -+ elv_dispatch_sort(q, rq);
25000 -+
25001 -+ if (bfq_bfqq_sync(bfqq))
25002 -+ bfqd->sync_flight++;
25003 -+}
25004 -+
25005 -+/*
25006 -+ * Return expired entry, or NULL to just start from scratch in rbtree.
25007 -+ */
25008 -+static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
25009 -+{
25010 -+ struct request *rq = NULL;
25011 -+
25012 -+ if (bfq_bfqq_fifo_expire(bfqq))
25013 -+ return NULL;
25014 -+
25015 -+ bfq_mark_bfqq_fifo_expire(bfqq);
25016 -+
25017 -+ if (list_empty(&bfqq->fifo))
25018 -+ return NULL;
25019 -+
25020 -+ rq = rq_entry_fifo(bfqq->fifo.next);
25021 -+
25022 -+ if (time_before(jiffies, rq->fifo_time))
25023 -+ return NULL;
25024 -+
25025 -+ return rq;
25026 -+}
25027 -+
25028 -+/* Must be called with the queue_lock held. */
25029 -+static int bfqq_process_refs(struct bfq_queue *bfqq)
25030 -+{
25031 -+ int process_refs, io_refs;
25032 -+
25033 -+ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
25034 -+ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
25035 -+ BUG_ON(process_refs < 0);
25036 -+ return process_refs;
25037 -+}
25038 -+
25039 -+static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
25040 -+{
25041 -+ int process_refs, new_process_refs;
25042 -+ struct bfq_queue *__bfqq;
25043 -+
25044 -+ /*
25045 -+ * If there are no process references on the new_bfqq, then it is
25046 -+ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
25047 -+ * may have dropped their last reference (not just their last process
25048 -+ * reference).
25049 -+ */
25050 -+ if (!bfqq_process_refs(new_bfqq))
25051 -+ return;
25052 -+
25053 -+ /* Avoid a circular list and skip interim queue merges. */
25054 -+ while ((__bfqq = new_bfqq->new_bfqq)) {
25055 -+ if (__bfqq == bfqq)
25056 -+ return;
25057 -+ new_bfqq = __bfqq;
25058 -+ }
25059 -+
25060 -+ process_refs = bfqq_process_refs(bfqq);
25061 -+ new_process_refs = bfqq_process_refs(new_bfqq);
25062 -+ /*
25063 -+ * If the process for the bfqq has gone away, there is no
25064 -+ * sense in merging the queues.
25065 -+ */
25066 -+ if (process_refs == 0 || new_process_refs == 0)
25067 -+ return;
25068 -+
25069 -+ /*
25070 -+ * Merge in the direction of the lesser amount of work.
25071 -+ */
25072 -+ if (new_process_refs >= process_refs) {
25073 -+ bfqq->new_bfqq = new_bfqq;
25074 -+ atomic_add(process_refs, &new_bfqq->ref);
25075 -+ } else {
25076 -+ new_bfqq->new_bfqq = bfqq;
25077 -+ atomic_add(new_process_refs, &bfqq->ref);
25078 -+ }
25079 -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
25080 -+ new_bfqq->pid);
25081 -+}
25082 -+
25083 -+static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
25084 -+{
25085 -+ struct bfq_entity *entity = &bfqq->entity;
25086 -+ return entity->budget - entity->service;
25087 -+}
25088 -+
25089 -+static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
25090 -+{
25091 -+ BUG_ON(bfqq != bfqd->in_service_queue);
25092 -+
25093 -+ __bfq_bfqd_reset_in_service(bfqd);
25094 -+
25095 -+ /*
25096 -+ * If this bfqq is shared between multiple processes, check
25097 -+ * to make sure that those processes are still issuing I/Os
25098 -+ * within the mean seek distance. If not, it may be time to
25099 -+ * break the queues apart again.
25100 -+ */
25101 -+ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
25102 -+ bfq_mark_bfqq_split_coop(bfqq);
25103 -+
25104 -+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
25105 -+ /*
25106 -+ * Overloading budget_timeout field to store the time
25107 -+ * at which the queue remains with no backlog; used by
25108 -+ * the weight-raising mechanism.
25109 -+ */
25110 -+ bfqq->budget_timeout = jiffies;
25111 -+ bfq_del_bfqq_busy(bfqd, bfqq, 1);
25112 -+ } else {
25113 -+ bfq_activate_bfqq(bfqd, bfqq);
25114 -+ /*
25115 -+ * Resort priority tree of potential close cooperators.
25116 -+ */
25117 -+ bfq_rq_pos_tree_add(bfqd, bfqq);
25118 -+ }
25119 -+}
25120 -+
25121 -+/**
25122 -+ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
25123 -+ * @bfqd: device data.
25124 -+ * @bfqq: queue to update.
25125 -+ * @reason: reason for expiration.
25126 -+ *
25127 -+ * Handle the feedback on @bfqq budget. See the body for detailed
25128 -+ * comments.
25129 -+ */
25130 -+static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
25131 -+ struct bfq_queue *bfqq,
25132 -+ enum bfqq_expiration reason)
25133 -+{
25134 -+ struct request *next_rq;
25135 -+ unsigned long budget, min_budget;
25136 -+
25137 -+ budget = bfqq->max_budget;
25138 -+ min_budget = bfq_min_budget(bfqd);
25139 -+
25140 -+ BUG_ON(bfqq != bfqd->in_service_queue);
25141 -+
25142 -+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
25143 -+ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
25144 -+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
25145 -+ budget, bfq_min_budget(bfqd));
25146 -+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
25147 -+ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
25148 -+
25149 -+ if (bfq_bfqq_sync(bfqq)) {
25150 -+ switch (reason) {
25151 -+ /*
25152 -+ * Caveat: in all the following cases we trade latency
25153 -+ * for throughput.
25154 -+ */
25155 -+ case BFQ_BFQQ_TOO_IDLE:
25156 -+ /*
25157 -+ * This is the only case where we may reduce
25158 -+ * the budget: if there is no request of the
25159 -+ * process still waiting for completion, then
25160 -+ * we assume (tentatively) that the timer has
25161 -+ * expired because the batch of requests of
25162 -+ * the process could have been served with a
25163 -+ * smaller budget. Hence, betting that
25164 -+ * process will behave in the same way when it
25165 -+ * becomes backlogged again, we reduce its
25166 -+ * next budget. As long as we guess right,
25167 -+ * this budget cut reduces the latency
25168 -+ * experienced by the process.
25169 -+ *
25170 -+ * However, if there are still outstanding
25171 -+ * requests, then the process may have not yet
25172 -+ * issued its next request just because it is
25173 -+ * still waiting for the completion of some of
25174 -+ * the still outstanding ones. So in this
25175 -+ * subcase we do not reduce its budget, on the
25176 -+ * contrary we increase it to possibly boost
25177 -+ * the throughput, as discussed in the
25178 -+ * comments to the BUDGET_TIMEOUT case.
25179 -+ */
25180 -+ if (bfqq->dispatched > 0) /* still outstanding reqs */
25181 -+ budget = min(budget * 2, bfqd->bfq_max_budget);
25182 -+ else {
25183 -+ if (budget > 5 * min_budget)
25184 -+ budget -= 4 * min_budget;
25185 -+ else
25186 -+ budget = min_budget;
25187 -+ }
25188 -+ break;
25189 -+ case BFQ_BFQQ_BUDGET_TIMEOUT:
25190 -+ /*
25191 -+ * We double the budget here because: 1) it
25192 -+ * gives the chance to boost the throughput if
25193 -+ * this is not a seeky process (which may have
25194 -+ * bumped into this timeout because of, e.g.,
25195 -+ * ZBR), 2) together with charge_full_budget
25196 -+ * it helps give seeky processes higher
25197 -+ * timestamps, and hence be served less
25198 -+ * frequently.
25199 -+ */
25200 -+ budget = min(budget * 2, bfqd->bfq_max_budget);
25201 -+ break;
25202 -+ case BFQ_BFQQ_BUDGET_EXHAUSTED:
25203 -+ /*
25204 -+ * The process still has backlog, and did not
25205 -+ * let either the budget timeout or the disk
25206 -+ * idling timeout expire. Hence it is not
25207 -+ * seeky, has a short thinktime and may be
25208 -+ * happy with a higher budget too. So
25209 -+ * definitely increase the budget of this good
25210 -+ * candidate to boost the disk throughput.
25211 -+ */
25212 -+ budget = min(budget * 4, bfqd->bfq_max_budget);
25213 -+ break;
25214 -+ case BFQ_BFQQ_NO_MORE_REQUESTS:
25215 -+ /*
25216 -+ * Leave the budget unchanged.
25217 -+ */
25218 -+ default:
25219 -+ return;
25220 -+ }
25221 -+ } else /* async queue */
25222 -+ /* async queues get always the maximum possible budget
25223 -+ * (their ability to dispatch is limited by
25224 -+ * @bfqd->bfq_max_budget_async_rq).
25225 -+ */
25226 -+ budget = bfqd->bfq_max_budget;
25227 -+
25228 -+ bfqq->max_budget = budget;
25229 -+
25230 -+ if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
25231 -+ bfqq->max_budget > bfqd->bfq_max_budget)
25232 -+ bfqq->max_budget = bfqd->bfq_max_budget;
25233 -+
25234 -+ /*
25235 -+ * Make sure that we have enough budget for the next request.
25236 -+ * Since the finish time of the bfqq must be kept in sync with
25237 -+ * the budget, be sure to call __bfq_bfqq_expire() after the
25238 -+ * update.
25239 -+ */
25240 -+ next_rq = bfqq->next_rq;
25241 -+ if (next_rq != NULL)
25242 -+ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
25243 -+ bfq_serv_to_charge(next_rq, bfqq));
25244 -+ else
25245 -+ bfqq->entity.budget = bfqq->max_budget;
25246 -+
25247 -+ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
25248 -+ next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
25249 -+ bfqq->entity.budget);
25250 -+}
25251 -+
25252 -+static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
25253 -+{
25254 -+ unsigned long max_budget;
25255 -+
25256 -+ /*
25257 -+ * The max_budget calculated when autotuning is equal to the
25258 -+ * amount of sectors transfered in timeout_sync at the
25259 -+ * estimated peak rate.
25260 -+ */
25261 -+ max_budget = (unsigned long)(peak_rate * 1000 *
25262 -+ timeout >> BFQ_RATE_SHIFT);
25263 -+
25264 -+ return max_budget;
25265 -+}
25266 -+
25267 -+/*
25268 -+ * In addition to updating the peak rate, checks whether the process
25269 -+ * is "slow", and returns 1 if so. This slow flag is used, in addition
25270 -+ * to the budget timeout, to reduce the amount of service provided to
25271 -+ * seeky processes, and hence reduce their chances to lower the
25272 -+ * throughput. See the code for more details.
25273 -+ */
25274 -+static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
25275 -+ int compensate, enum bfqq_expiration reason)
25276 -+{
25277 -+ u64 bw, usecs, expected, timeout;
25278 -+ ktime_t delta;
25279 -+ int update = 0;
25280 -+
25281 -+ if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
25282 -+ return 0;
25283 -+
25284 -+ if (compensate)
25285 -+ delta = bfqd->last_idling_start;
25286 -+ else
25287 -+ delta = ktime_get();
25288 -+ delta = ktime_sub(delta, bfqd->last_budget_start);
25289 -+ usecs = ktime_to_us(delta);
25290 -+
25291 -+ /* Don't trust short/unrealistic values. */
25292 -+ if (usecs < 100 || usecs >= LONG_MAX)
25293 -+ return 0;
25294 -+
25295 -+ /*
25296 -+ * Calculate the bandwidth for the last slice. We use a 64 bit
25297 -+ * value to store the peak rate, in sectors per usec in fixed
25298 -+ * point math. We do so to have enough precision in the estimate
25299 -+ * and to avoid overflows.
25300 -+ */
25301 -+ bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
25302 -+ do_div(bw, (unsigned long)usecs);
25303 -+
25304 -+ timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
25305 -+
25306 -+ /*
25307 -+ * Use only long (> 20ms) intervals to filter out spikes for
25308 -+ * the peak rate estimation.
25309 -+ */
25310 -+ if (usecs > 20000) {
25311 -+ if (bw > bfqd->peak_rate ||
25312 -+ (!BFQQ_SEEKY(bfqq) &&
25313 -+ reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
25314 -+ bfq_log(bfqd, "measured bw =%llu", bw);
25315 -+ /*
25316 -+ * To smooth oscillations use a low-pass filter with
25317 -+ * alpha=7/8, i.e.,
25318 -+ * new_rate = (7/8) * old_rate + (1/8) * bw
25319 -+ */
25320 -+ do_div(bw, 8);
25321 -+ if (bw == 0)
25322 -+ return 0;
25323 -+ bfqd->peak_rate *= 7;
25324 -+ do_div(bfqd->peak_rate, 8);
25325 -+ bfqd->peak_rate += bw;
25326 -+ update = 1;
25327 -+ bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
25328 -+ }
25329 -+
25330 -+ update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
25331 -+
25332 -+ if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
25333 -+ bfqd->peak_rate_samples++;
25334 -+
25335 -+ if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
25336 -+ update) {
25337 -+ int dev_type = blk_queue_nonrot(bfqd->queue);
25338 -+ if (bfqd->bfq_user_max_budget == 0) {
25339 -+ bfqd->bfq_max_budget =
25340 -+ bfq_calc_max_budget(bfqd->peak_rate,
25341 -+ timeout);
25342 -+ bfq_log(bfqd, "new max_budget=%lu",
25343 -+ bfqd->bfq_max_budget);
25344 -+ }
25345 -+ if (bfqd->device_speed == BFQ_BFQD_FAST &&
25346 -+ bfqd->peak_rate < device_speed_thresh[dev_type]) {
25347 -+ bfqd->device_speed = BFQ_BFQD_SLOW;
25348 -+ bfqd->RT_prod = R_slow[dev_type] *
25349 -+ T_slow[dev_type];
25350 -+ } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
25351 -+ bfqd->peak_rate > device_speed_thresh[dev_type]) {
25352 -+ bfqd->device_speed = BFQ_BFQD_FAST;
25353 -+ bfqd->RT_prod = R_fast[dev_type] *
25354 -+ T_fast[dev_type];
25355 -+ }
25356 -+ }
25357 -+ }
25358 -+
25359 -+ /*
25360 -+ * If the process has been served for a too short time
25361 -+ * interval to let its possible sequential accesses prevail on
25362 -+ * the initial seek time needed to move the disk head on the
25363 -+ * first sector it requested, then give the process a chance
25364 -+ * and for the moment return false.
25365 -+ */
25366 -+ if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
25367 -+ return 0;
25368 -+
25369 -+ /*
25370 -+ * A process is considered ``slow'' (i.e., seeky, so that we
25371 -+ * cannot treat it fairly in the service domain, as it would
25372 -+ * slow down too much the other processes) if, when a slice
25373 -+ * ends for whatever reason, it has received service at a
25374 -+ * rate that would not be high enough to complete the budget
25375 -+ * before the budget timeout expiration.
25376 -+ */
25377 -+ expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
25378 -+
25379 -+ /*
25380 -+ * Caveat: processes doing IO in the slower disk zones will
25381 -+ * tend to be slow(er) even if not seeky. And the estimated
25382 -+ * peak rate will actually be an average over the disk
25383 -+ * surface. Hence, to not be too harsh with unlucky processes,
25384 -+ * we keep a budget/3 margin of safety before declaring a
25385 -+ * process slow.
25386 -+ */
25387 -+ return expected > (4 * bfqq->entity.budget) / 3;
25388 -+}
25389 -+
25390 -+/*
25391 -+ * To be deemed as soft real-time, an application must meet two
25392 -+ * requirements. First, the application must not require an average
25393 -+ * bandwidth higher than the approximate bandwidth required to playback or
25394 -+ * record a compressed high-definition video.
25395 -+ * The next function is invoked on the completion of the last request of a
25396 -+ * batch, to compute the next-start time instant, soft_rt_next_start, such
25397 -+ * that, if the next request of the application does not arrive before
25398 -+ * soft_rt_next_start, then the above requirement on the bandwidth is met.
25399 -+ *
25400 -+ * The second requirement is that the request pattern of the application is
25401 -+ * isochronous, i.e., that, after issuing a request or a batch of requests,
25402 -+ * the application stops issuing new requests until all its pending requests
25403 -+ * have been completed. After that, the application may issue a new batch,
25404 -+ * and so on.
25405 -+ * For this reason the next function is invoked to compute
25406 -+ * soft_rt_next_start only for applications that meet this requirement,
25407 -+ * whereas soft_rt_next_start is set to infinity for applications that do
25408 -+ * not.
25409 -+ *
25410 -+ * Unfortunately, even a greedy application may happen to behave in an
25411 -+ * isochronous way if the CPU load is high. In fact, the application may
25412 -+ * stop issuing requests while the CPUs are busy serving other processes,
25413 -+ * then restart, then stop again for a while, and so on. In addition, if
25414 -+ * the disk achieves a low enough throughput with the request pattern
25415 -+ * issued by the application (e.g., because the request pattern is random
25416 -+ * and/or the device is slow), then the application may meet the above
25417 -+ * bandwidth requirement too. To prevent such a greedy application to be
25418 -+ * deemed as soft real-time, a further rule is used in the computation of
25419 -+ * soft_rt_next_start: soft_rt_next_start must be higher than the current
25420 -+ * time plus the maximum time for which the arrival of a request is waited
25421 -+ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
25422 -+ * This filters out greedy applications, as the latter issue instead their
25423 -+ * next request as soon as possible after the last one has been completed
25424 -+ * (in contrast, when a batch of requests is completed, a soft real-time
25425 -+ * application spends some time processing data).
25426 -+ *
25427 -+ * Unfortunately, the last filter may easily generate false positives if
25428 -+ * only bfqd->bfq_slice_idle is used as a reference time interval and one
25429 -+ * or both the following cases occur:
25430 -+ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
25431 -+ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
25432 -+ * HZ=100.
25433 -+ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
25434 -+ * for a while, then suddenly 'jump' by several units to recover the lost
25435 -+ * increments. This seems to happen, e.g., inside virtual machines.
25436 -+ * To address this issue, we do not use as a reference time interval just
25437 -+ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
25438 -+ * particular we add the minimum number of jiffies for which the filter
25439 -+ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
25440 -+ * machines.
25441 -+ */
25442 -+static inline unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
25443 -+ struct bfq_queue *bfqq)
25444 -+{
25445 -+ return max(bfqq->last_idle_bklogged +
25446 -+ HZ * bfqq->service_from_backlogged /
25447 -+ bfqd->bfq_wr_max_softrt_rate,
25448 -+ jiffies + bfqq->bfqd->bfq_slice_idle + 4);
25449 -+}
25450 -+
25451 -+/*
25452 -+ * Return the largest-possible time instant such that, for as long as possible,
25453 -+ * the current time will be lower than this time instant according to the macro
25454 -+ * time_is_before_jiffies().
25455 -+ */
25456 -+static inline unsigned long bfq_infinity_from_now(unsigned long now)
25457 -+{
25458 -+ return now + ULONG_MAX / 2;
25459 -+}
25460 -+
25461 -+/**
25462 -+ * bfq_bfqq_expire - expire a queue.
25463 -+ * @bfqd: device owning the queue.
25464 -+ * @bfqq: the queue to expire.
25465 -+ * @compensate: if true, compensate for the time spent idling.
25466 -+ * @reason: the reason causing the expiration.
25467 -+ *
25468 -+ *
25469 -+ * If the process associated to the queue is slow (i.e., seeky), or in
25470 -+ * case of budget timeout, or, finally, if it is async, we
25471 -+ * artificially charge it an entire budget (independently of the
25472 -+ * actual service it received). As a consequence, the queue will get
25473 -+ * higher timestamps than the correct ones upon reactivation, and
25474 -+ * hence it will be rescheduled as if it had received more service
25475 -+ * than what it actually received. In the end, this class of processes
25476 -+ * will receive less service in proportion to how slowly they consume
25477 -+ * their budgets (and hence how seriously they tend to lower the
25478 -+ * throughput).
25479 -+ *
25480 -+ * In contrast, when a queue expires because it has been idling for
25481 -+ * too much or because it exhausted its budget, we do not touch the
25482 -+ * amount of service it has received. Hence when the queue will be
25483 -+ * reactivated and its timestamps updated, the latter will be in sync
25484 -+ * with the actual service received by the queue until expiration.
25485 -+ *
25486 -+ * Charging a full budget to the first type of queues and the exact
25487 -+ * service to the others has the effect of using the WF2Q+ policy to
25488 -+ * schedule the former on a timeslice basis, without violating the
25489 -+ * service domain guarantees of the latter.
25490 -+ */
25491 -+static void bfq_bfqq_expire(struct bfq_data *bfqd,
25492 -+ struct bfq_queue *bfqq,
25493 -+ int compensate,
25494 -+ enum bfqq_expiration reason)
25495 -+{
25496 -+ int slow;
25497 -+ BUG_ON(bfqq != bfqd->in_service_queue);
25498 -+
25499 -+ /* Update disk peak rate for autotuning and check whether the
25500 -+ * process is slow (see bfq_update_peak_rate).
25501 -+ */
25502 -+ slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
25503 -+
25504 -+ /*
25505 -+ * As above explained, 'punish' slow (i.e., seeky), timed-out
25506 -+ * and async queues, to favor sequential sync workloads.
25507 -+ *
25508 -+ * Processes doing I/O in the slower disk zones will tend to be
25509 -+ * slow(er) even if not seeky. Hence, since the estimated peak
25510 -+ * rate is actually an average over the disk surface, these
25511 -+ * processes may timeout just for bad luck. To avoid punishing
25512 -+ * them we do not charge a full budget to a process that
25513 -+ * succeeded in consuming at least 2/3 of its budget.
25514 -+ */
25515 -+ if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
25516 -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3))
25517 -+ bfq_bfqq_charge_full_budget(bfqq);
25518 -+
25519 -+ bfqq->service_from_backlogged += bfqq->entity.service;
25520 -+
25521 -+ if (BFQQ_SEEKY(bfqq) && reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
25522 -+ !bfq_bfqq_constantly_seeky(bfqq)) {
25523 -+ bfq_mark_bfqq_constantly_seeky(bfqq);
25524 -+ if (!blk_queue_nonrot(bfqd->queue))
25525 -+ bfqd->const_seeky_busy_in_flight_queues++;
25526 -+ }
25527 -+
25528 -+ if (reason == BFQ_BFQQ_TOO_IDLE &&
25529 -+ bfqq->entity.service <= 2 * bfqq->entity.budget / 10 )
25530 -+ bfq_clear_bfqq_IO_bound(bfqq);
25531 -+
25532 -+ if (bfqd->low_latency && bfqq->wr_coeff == 1)
25533 -+ bfqq->last_wr_start_finish = jiffies;
25534 -+
25535 -+ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
25536 -+ RB_EMPTY_ROOT(&bfqq->sort_list)) {
25537 -+ /*
25538 -+ * If we get here, and there are no outstanding requests,
25539 -+ * then the request pattern is isochronous (see the comments
25540 -+ * to the function bfq_bfqq_softrt_next_start()). Hence we
25541 -+ * can compute soft_rt_next_start. If, instead, the queue
25542 -+ * still has outstanding requests, then we have to wait
25543 -+ * for the completion of all the outstanding requests to
25544 -+ * discover whether the request pattern is actually
25545 -+ * isochronous.
25546 -+ */
25547 -+ if (bfqq->dispatched == 0)
25548 -+ bfqq->soft_rt_next_start =
25549 -+ bfq_bfqq_softrt_next_start(bfqd, bfqq);
25550 -+ else {
25551 -+ /*
25552 -+ * The application is still waiting for the
25553 -+ * completion of one or more requests:
25554 -+ * prevent it from possibly being incorrectly
25555 -+ * deemed as soft real-time by setting its
25556 -+ * soft_rt_next_start to infinity. In fact,
25557 -+ * without this assignment, the application
25558 -+ * would be incorrectly deemed as soft
25559 -+ * real-time if:
25560 -+ * 1) it issued a new request before the
25561 -+ * completion of all its in-flight
25562 -+ * requests, and
25563 -+ * 2) at that time, its soft_rt_next_start
25564 -+ * happened to be in the past.
25565 -+ */
25566 -+ bfqq->soft_rt_next_start =
25567 -+ bfq_infinity_from_now(jiffies);
25568 -+ /*
25569 -+ * Schedule an update of soft_rt_next_start to when
25570 -+ * the task may be discovered to be isochronous.
25571 -+ */
25572 -+ bfq_mark_bfqq_softrt_update(bfqq);
25573 -+ }
25574 -+ }
25575 -+
25576 -+ bfq_log_bfqq(bfqd, bfqq,
25577 -+ "expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
25578 -+ slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
25579 -+
25580 -+ /*
25581 -+ * Increase, decrease or leave budget unchanged according to
25582 -+ * reason.
25583 -+ */
25584 -+ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
25585 -+ __bfq_bfqq_expire(bfqd, bfqq);
25586 -+}
25587 -+
25588 -+/*
25589 -+ * Budget timeout is not implemented through a dedicated timer, but
25590 -+ * just checked on request arrivals and completions, as well as on
25591 -+ * idle timer expirations.
25592 -+ */
25593 -+static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
25594 -+{
25595 -+ if (bfq_bfqq_budget_new(bfqq) ||
25596 -+ time_before(jiffies, bfqq->budget_timeout))
25597 -+ return 0;
25598 -+ return 1;
25599 -+}
25600 -+
25601 -+/*
25602 -+ * If we expire a queue that is waiting for the arrival of a new
25603 -+ * request, we may prevent the fictitious timestamp back-shifting that
25604 -+ * allows the guarantees of the queue to be preserved (see [1] for
25605 -+ * this tricky aspect). Hence we return true only if this condition
25606 -+ * does not hold, or if the queue is slow enough to deserve only to be
25607 -+ * kicked off for preserving a high throughput.
25608 -+*/
25609 -+static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
25610 -+{
25611 -+ bfq_log_bfqq(bfqq->bfqd, bfqq,
25612 -+ "may_budget_timeout: wait_request %d left %d timeout %d",
25613 -+ bfq_bfqq_wait_request(bfqq),
25614 -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
25615 -+ bfq_bfqq_budget_timeout(bfqq));
25616 -+
25617 -+ return (!bfq_bfqq_wait_request(bfqq) ||
25618 -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
25619 -+ &&
25620 -+ bfq_bfqq_budget_timeout(bfqq);
25621 -+}
25622 -+
25623 -+/*
25624 -+ * Device idling is allowed only for the queues for which this function
25625 -+ * returns true. For this reason, the return value of this function plays a
25626 -+ * critical role for both throughput boosting and service guarantees. The
25627 -+ * return value is computed through a logical expression. In this rather
25628 -+ * long comment, we try to briefly describe all the details and motivations
25629 -+ * behind the components of this logical expression.
25630 -+ *
25631 -+ * First, the expression is false if bfqq is not sync, or if: bfqq happened
25632 -+ * to become active during a large burst of queue activations, and the
25633 -+ * pattern of requests bfqq contains boosts the throughput if bfqq is
25634 -+ * expired. In fact, queues that became active during a large burst benefit
25635 -+ * only from throughput, as discussed in the comments to bfq_handle_burst.
25636 -+ * In this respect, expiring bfqq certainly boosts the throughput on NCQ-
25637 -+ * capable flash-based devices, whereas, on rotational devices, it boosts
25638 -+ * the throughput only if bfqq contains random requests.
25639 -+ *
25640 -+ * On the opposite end, if (a) bfqq is sync, (b) the above burst-related
25641 -+ * condition does not hold, and (c) bfqq is being weight-raised, then the
25642 -+ * expression always evaluates to true, as device idling is instrumental
25643 -+ * for preserving low-latency guarantees (see [1]). If, instead, conditions
25644 -+ * (a) and (b) do hold, but (c) does not, then the expression evaluates to
25645 -+ * true only if: (1) bfqq is I/O-bound and has a non-null idle window, and
25646 -+ * (2) at least one of the following two conditions holds.
25647 -+ * The first condition is that the device is not performing NCQ, because
25648 -+ * idling the device most certainly boosts the throughput if this condition
25649 -+ * holds and bfqq is I/O-bound and has been granted a non-null idle window.
25650 -+ * The second compound condition is made of the logical AND of two components.
25651 -+ *
25652 -+ * The first component is true only if there is no weight-raised busy
25653 -+ * queue. This guarantees that the device is not idled for a sync non-
25654 -+ * weight-raised queue when there are busy weight-raised queues. The former
25655 -+ * is then expired immediately if empty. Combined with the timestamping
25656 -+ * rules of BFQ (see [1] for details), this causes sync non-weight-raised
25657 -+ * queues to get a lower number of requests served, and hence to ask for a
25658 -+ * lower number of requests from the request pool, before the busy weight-
25659 -+ * raised queues get served again.
25660 -+ *
25661 -+ * This is beneficial for the processes associated with weight-raised
25662 -+ * queues, when the request pool is saturated (e.g., in the presence of
25663 -+ * write hogs). In fact, if the processes associated with the other queues
25664 -+ * ask for requests at a lower rate, then weight-raised processes have a
25665 -+ * higher probability to get a request from the pool immediately (or at
25666 -+ * least soon) when they need one. Hence they have a higher probability to
25667 -+ * actually get a fraction of the disk throughput proportional to their
25668 -+ * high weight. This is especially true with NCQ-capable drives, which
25669 -+ * enqueue several requests in advance and further reorder internally-
25670 -+ * queued requests.
25671 -+ *
25672 -+ * In the end, mistreating non-weight-raised queues when there are busy
25673 -+ * weight-raised queues seems to mitigate starvation problems in the
25674 -+ * presence of heavy write workloads and NCQ, and hence to guarantee a
25675 -+ * higher application and system responsiveness in these hostile scenarios.
25676 -+ *
25677 -+ * If the first component of the compound condition is instead true, i.e.,
25678 -+ * there is no weight-raised busy queue, then the second component of the
25679 -+ * compound condition takes into account service-guarantee and throughput
25680 -+ * issues related to NCQ (recall that the compound condition is evaluated
25681 -+ * only if the device is detected as supporting NCQ).
25682 -+ *
25683 -+ * As for service guarantees, allowing the drive to enqueue more than one
25684 -+ * request at a time, and hence delegating de facto final scheduling
25685 -+ * decisions to the drive's internal scheduler, causes loss of control on
25686 -+ * the actual request service order. In this respect, when the drive is
25687 -+ * allowed to enqueue more than one request at a time, the service
25688 -+ * distribution enforced by the drive's internal scheduler is likely to
25689 -+ * coincide with the desired device-throughput distribution only in the
25690 -+ * following, perfectly symmetric, scenario:
25691 -+ * 1) all active queues have the same weight,
25692 -+ * 2) all active groups at the same level in the groups tree have the same
25693 -+ * weight,
25694 -+ * 3) all active groups at the same level in the groups tree have the same
25695 -+ * number of children.
25696 -+ *
25697 -+ * Even in such a scenario, sequential I/O may still receive a preferential
25698 -+ * treatment, but this is not likely to be a big issue with flash-based
25699 -+ * devices, because of their non-dramatic loss of throughput with random
25700 -+ * I/O. Things do differ with HDDs, for which additional care is taken, as
25701 -+ * explained after completing the discussion for flash-based devices.
25702 -+ *
25703 -+ * Unfortunately, keeping the necessary state for evaluating exactly the
25704 -+ * above symmetry conditions would be quite complex and time-consuming.
25705 -+ * Therefore BFQ evaluates instead the following stronger sub-conditions,
25706 -+ * for which it is much easier to maintain the needed state:
25707 -+ * 1) all active queues have the same weight,
25708 -+ * 2) all active groups have the same weight,
25709 -+ * 3) all active groups have at most one active child each.
25710 -+ * In particular, the last two conditions are always true if hierarchical
25711 -+ * support and the cgroups interface are not enabled, hence no state needs
25712 -+ * to be maintained in this case.
25713 -+ *
25714 -+ * According to the above considerations, the second component of the
25715 -+ * compound condition evaluates to true if any of the above symmetry
25716 -+ * sub-condition does not hold, or the device is not flash-based. Therefore,
25717 -+ * if also the first component is true, then idling is allowed for a sync
25718 -+ * queue. These are the only sub-conditions considered if the device is
25719 -+ * flash-based, as, for such a device, it is sensible to force idling only
25720 -+ * for service-guarantee issues. In fact, as for throughput, idling
25721 -+ * NCQ-capable flash-based devices would not boost the throughput even
25722 -+ * with sequential I/O; rather it would lower the throughput in proportion
25723 -+ * to how fast the device is. In the end, (only) if all the three
25724 -+ * sub-conditions hold and the device is flash-based, the compound
25725 -+ * condition evaluates to false and therefore no idling is performed.
25726 -+ *
25727 -+ * As already said, things change with a rotational device, where idling
25728 -+ * boosts the throughput with sequential I/O (even with NCQ). Hence, for
25729 -+ * such a device the second component of the compound condition evaluates
25730 -+ * to true also if the following additional sub-condition does not hold:
25731 -+ * the queue is constantly seeky. Unfortunately, this different behavior
25732 -+ * with respect to flash-based devices causes an additional asymmetry: if
25733 -+ * some sync queues enjoy idling and some other sync queues do not, then
25734 -+ * the latter get a low share of the device throughput, simply because the
25735 -+ * former get many requests served after being set as in service, whereas
25736 -+ * the latter do not. As a consequence, to guarantee the desired throughput
25737 -+ * distribution, on HDDs the compound expression evaluates to true (and
25738 -+ * hence device idling is performed) also if the following last symmetry
25739 -+ * condition does not hold: no other queue is benefiting from idling. Also
25740 -+ * this last condition is actually replaced with a simpler-to-maintain and
25741 -+ * stronger condition: there is no busy queue which is not constantly seeky
25742 -+ * (and hence may also benefit from idling).
25743 -+ *
25744 -+ * To sum up, when all the required symmetry and throughput-boosting
25745 -+ * sub-conditions hold, the second component of the compound condition
25746 -+ * evaluates to false, and hence no idling is performed. This helps to
25747 -+ * keep the drives' internal queues full on NCQ-capable devices, and hence
25748 -+ * to boost the throughput, without causing 'almost' any loss of service
25749 -+ * guarantees. The 'almost' follows from the fact that, if the internal
25750 -+ * queue of one such device is filled while all the sub-conditions hold,
25751 -+ * but at some point in time some sub-condition stops to hold, then it may
25752 -+ * become impossible to let requests be served in the new desired order
25753 -+ * until all the requests already queued in the device have been served.
25754 -+ */
25755 -+static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq)
25756 -+{
25757 -+ struct bfq_data *bfqd = bfqq->bfqd;
25758 -+#ifdef CONFIG_CGROUP_BFQIO
25759 -+#define symmetric_scenario (!bfqd->active_numerous_groups && \
25760 -+ !bfq_differentiated_weights(bfqd))
25761 -+#else
25762 -+#define symmetric_scenario (!bfq_differentiated_weights(bfqd))
25763 -+#endif
25764 -+#define cond_for_seeky_on_ncq_hdd (bfq_bfqq_constantly_seeky(bfqq) && \
25765 -+ bfqd->busy_in_flight_queues == \
25766 -+ bfqd->const_seeky_busy_in_flight_queues)
25767 -+
25768 -+#define cond_for_expiring_in_burst (bfq_bfqq_in_large_burst(bfqq) && \
25769 -+ bfqd->hw_tag && \
25770 -+ (blk_queue_nonrot(bfqd->queue) || \
25771 -+ bfq_bfqq_constantly_seeky(bfqq)))
25772 -+
25773 -+/*
25774 -+ * Condition for expiring a non-weight-raised queue (and hence not idling
25775 -+ * the device).
25776 -+ */
25777 -+#define cond_for_expiring_non_wr (bfqd->hw_tag && \
25778 -+ (bfqd->wr_busy_queues > 0 || \
25779 -+ (symmetric_scenario && \
25780 -+ (blk_queue_nonrot(bfqd->queue) || \
25781 -+ cond_for_seeky_on_ncq_hdd))))
25782 -+
25783 -+ return bfq_bfqq_sync(bfqq) &&
25784 -+ !cond_for_expiring_in_burst &&
25785 -+ (bfqq->wr_coeff > 1 ||
25786 -+ (bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_idle_window(bfqq) &&
25787 -+ !cond_for_expiring_non_wr)
25788 -+ );
25789 -+}
25790 -+
25791 -+/*
25792 -+ * If the in-service queue is empty but sync, and the function
25793 -+ * bfq_bfqq_must_not_expire returns true, then:
25794 -+ * 1) the queue must remain in service and cannot be expired, and
25795 -+ * 2) the disk must be idled to wait for the possible arrival of a new
25796 -+ * request for the queue.
25797 -+ * See the comments to the function bfq_bfqq_must_not_expire for the reasons
25798 -+ * why performing device idling is the best choice to boost the throughput
25799 -+ * and preserve service guarantees when bfq_bfqq_must_not_expire itself
25800 -+ * returns true.
25801 -+ */
25802 -+static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
25803 -+{
25804 -+ struct bfq_data *bfqd = bfqq->bfqd;
25805 -+
25806 -+ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
25807 -+ bfq_bfqq_must_not_expire(bfqq);
25808 -+}
25809 -+
25810 -+/*
25811 -+ * Select a queue for service. If we have a current queue in service,
25812 -+ * check whether to continue servicing it, or retrieve and set a new one.
25813 -+ */
25814 -+static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
25815 -+{
25816 -+ struct bfq_queue *bfqq, *new_bfqq = NULL;
25817 -+ struct request *next_rq;
25818 -+ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
25819 -+
25820 -+ bfqq = bfqd->in_service_queue;
25821 -+ if (bfqq == NULL)
25822 -+ goto new_queue;
25823 -+
25824 -+ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
25825 -+
25826 -+ /*
25827 -+ * If another queue has a request waiting within our mean seek
25828 -+ * distance, let it run. The expire code will check for close
25829 -+ * cooperators and put the close queue at the front of the
25830 -+ * service tree. If possible, merge the expiring queue with the
25831 -+ * new bfqq.
25832 -+ */
25833 -+ new_bfqq = bfq_close_cooperator(bfqd, bfqq);
25834 -+ if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
25835 -+ bfq_setup_merge(bfqq, new_bfqq);
25836 -+
25837 -+ if (bfq_may_expire_for_budg_timeout(bfqq) &&
25838 -+ !timer_pending(&bfqd->idle_slice_timer) &&
25839 -+ !bfq_bfqq_must_idle(bfqq))
25840 -+ goto expire;
25841 -+
25842 -+ next_rq = bfqq->next_rq;
25843 -+ /*
25844 -+ * If bfqq has requests queued and it has enough budget left to
25845 -+ * serve them, keep the queue, otherwise expire it.
25846 -+ */
25847 -+ if (next_rq != NULL) {
25848 -+ if (bfq_serv_to_charge(next_rq, bfqq) >
25849 -+ bfq_bfqq_budget_left(bfqq)) {
25850 -+ reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
25851 -+ goto expire;
25852 -+ } else {
25853 -+ /*
25854 -+ * The idle timer may be pending because we may
25855 -+ * not disable disk idling even when a new request
25856 -+ * arrives.
25857 -+ */
25858 -+ if (timer_pending(&bfqd->idle_slice_timer)) {
25859 -+ /*
25860 -+ * If we get here: 1) at least a new request
25861 -+ * has arrived but we have not disabled the
25862 -+ * timer because the request was too small,
25863 -+ * 2) then the block layer has unplugged
25864 -+ * the device, causing the dispatch to be
25865 -+ * invoked.
25866 -+ *
25867 -+ * Since the device is unplugged, now the
25868 -+ * requests are probably large enough to
25869 -+ * provide a reasonable throughput.
25870 -+ * So we disable idling.
25871 -+ */
25872 -+ bfq_clear_bfqq_wait_request(bfqq);
25873 -+ del_timer(&bfqd->idle_slice_timer);
25874 -+ }
25875 -+ if (new_bfqq == NULL)
25876 -+ goto keep_queue;
25877 -+ else
25878 -+ goto expire;
25879 -+ }
25880 -+ }
25881 -+
25882 -+ /*
25883 -+ * No requests pending. If the in-service queue still has requests
25884 -+ * in flight (possibly waiting for a completion) or is idling for a
25885 -+ * new request, then keep it.
25886 -+ */
25887 -+ if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
25888 -+ (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) {
25889 -+ bfqq = NULL;
25890 -+ goto keep_queue;
25891 -+ } else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
25892 -+ /*
25893 -+ * Expiring the queue because there is a close cooperator,
25894 -+ * cancel timer.
25895 -+ */
25896 -+ bfq_clear_bfqq_wait_request(bfqq);
25897 -+ del_timer(&bfqd->idle_slice_timer);
25898 -+ }
25899 -+
25900 -+ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
25901 -+expire:
25902 -+ bfq_bfqq_expire(bfqd, bfqq, 0, reason);
25903 -+new_queue:
25904 -+ bfqq = bfq_set_in_service_queue(bfqd, new_bfqq);
25905 -+ bfq_log(bfqd, "select_queue: new queue %d returned",
25906 -+ bfqq != NULL ? bfqq->pid : 0);
25907 -+keep_queue:
25908 -+ return bfqq;
25909 -+}
25910 -+
25911 -+static void bfq_update_wr_data(struct bfq_data *bfqd,
25912 -+ struct bfq_queue *bfqq)
25913 -+{
25914 -+ if (bfqq->wr_coeff > 1) { /* queue is being boosted */
25915 -+ struct bfq_entity *entity = &bfqq->entity;
25916 -+
25917 -+ bfq_log_bfqq(bfqd, bfqq,
25918 -+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
25919 -+ jiffies_to_msecs(jiffies -
25920 -+ bfqq->last_wr_start_finish),
25921 -+ jiffies_to_msecs(bfqq->wr_cur_max_time),
25922 -+ bfqq->wr_coeff,
25923 -+ bfqq->entity.weight, bfqq->entity.orig_weight);
25924 -+
25925 -+ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
25926 -+ entity->orig_weight * bfqq->wr_coeff);
25927 -+ if (entity->ioprio_changed)
25928 -+ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
25929 -+ /*
25930 -+ * If the queue was activated in a burst, or
25931 -+ * too much time has elapsed from the beginning
25932 -+ * of this weight-raising, then end weight raising.
25933 -+ */
25934 -+ if (bfq_bfqq_in_large_burst(bfqq) ||
25935 -+ time_is_before_jiffies(bfqq->last_wr_start_finish +
25936 -+ bfqq->wr_cur_max_time)) {
25937 -+ bfqq->last_wr_start_finish = jiffies;
25938 -+ bfq_log_bfqq(bfqd, bfqq,
25939 -+ "wrais ending at %lu, rais_max_time %u",
25940 -+ bfqq->last_wr_start_finish,
25941 -+ jiffies_to_msecs(bfqq->wr_cur_max_time));
25942 -+ bfq_bfqq_end_wr(bfqq);
25943 -+ __bfq_entity_update_weight_prio(
25944 -+ bfq_entity_service_tree(entity),
25945 -+ entity);
25946 -+ }
25947 -+ }
25948 -+}
25949 -+
25950 -+/*
25951 -+ * Dispatch one request from bfqq, moving it to the request queue
25952 -+ * dispatch list.
25953 -+ */
25954 -+static int bfq_dispatch_request(struct bfq_data *bfqd,
25955 -+ struct bfq_queue *bfqq)
25956 -+{
25957 -+ int dispatched = 0;
25958 -+ struct request *rq;
25959 -+ unsigned long service_to_charge;
25960 -+
25961 -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
25962 -+
25963 -+ /* Follow expired path, else get first next available. */
25964 -+ rq = bfq_check_fifo(bfqq);
25965 -+ if (rq == NULL)
25966 -+ rq = bfqq->next_rq;
25967 -+ service_to_charge = bfq_serv_to_charge(rq, bfqq);
25968 -+
25969 -+ if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
25970 -+ /*
25971 -+ * This may happen if the next rq is chosen in fifo order
25972 -+ * instead of sector order. The budget is properly
25973 -+ * dimensioned to be always sufficient to serve the next
25974 -+ * request only if it is chosen in sector order. The reason
25975 -+ * is that it would be quite inefficient and little useful
25976 -+ * to always make sure that the budget is large enough to
25977 -+ * serve even the possible next rq in fifo order.
25978 -+ * In fact, requests are seldom served in fifo order.
25979 -+ *
25980 -+ * Expire the queue for budget exhaustion, and make sure
25981 -+ * that the next act_budget is enough to serve the next
25982 -+ * request, even if it comes from the fifo expired path.
25983 -+ */
25984 -+ bfqq->next_rq = rq;
25985 -+ /*
25986 -+ * Since this dispatch is failed, make sure that
25987 -+ * a new one will be performed
25988 -+ */
25989 -+ if (!bfqd->rq_in_driver)
25990 -+ bfq_schedule_dispatch(bfqd);
25991 -+ goto expire;
25992 -+ }
25993 -+
25994 -+ /* Finally, insert request into driver dispatch list. */
25995 -+ bfq_bfqq_served(bfqq, service_to_charge);
25996 -+ bfq_dispatch_insert(bfqd->queue, rq);
25997 -+
25998 -+ bfq_update_wr_data(bfqd, bfqq);
25999 -+
26000 -+ bfq_log_bfqq(bfqd, bfqq,
26001 -+ "dispatched %u sec req (%llu), budg left %lu",
26002 -+ blk_rq_sectors(rq),
26003 -+ (long long unsigned)blk_rq_pos(rq),
26004 -+ bfq_bfqq_budget_left(bfqq));
26005 -+
26006 -+ dispatched++;
26007 -+
26008 -+ if (bfqd->in_service_bic == NULL) {
26009 -+ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
26010 -+ bfqd->in_service_bic = RQ_BIC(rq);
26011 -+ }
26012 -+
26013 -+ if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
26014 -+ dispatched >= bfqd->bfq_max_budget_async_rq) ||
26015 -+ bfq_class_idle(bfqq)))
26016 -+ goto expire;
26017 -+
26018 -+ return dispatched;
26019 -+
26020 -+expire:
26021 -+ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
26022 -+ return dispatched;
26023 -+}
26024 -+
26025 -+static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
26026 -+{
26027 -+ int dispatched = 0;
26028 -+
26029 -+ while (bfqq->next_rq != NULL) {
26030 -+ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
26031 -+ dispatched++;
26032 -+ }
26033 -+
26034 -+ BUG_ON(!list_empty(&bfqq->fifo));
26035 -+ return dispatched;
26036 -+}
26037 -+
26038 -+/*
26039 -+ * Drain our current requests.
26040 -+ * Used for barriers and when switching io schedulers on-the-fly.
26041 -+ */
26042 -+static int bfq_forced_dispatch(struct bfq_data *bfqd)
26043 -+{
26044 -+ struct bfq_queue *bfqq, *n;
26045 -+ struct bfq_service_tree *st;
26046 -+ int dispatched = 0;
26047 -+
26048 -+ bfqq = bfqd->in_service_queue;
26049 -+ if (bfqq != NULL)
26050 -+ __bfq_bfqq_expire(bfqd, bfqq);
26051 -+
26052 -+ /*
26053 -+ * Loop through classes, and be careful to leave the scheduler
26054 -+ * in a consistent state, as feedback mechanisms and vtime
26055 -+ * updates cannot be disabled during the process.
26056 -+ */
26057 -+ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
26058 -+ st = bfq_entity_service_tree(&bfqq->entity);
26059 -+
26060 -+ dispatched += __bfq_forced_dispatch_bfqq(bfqq);
26061 -+ bfqq->max_budget = bfq_max_budget(bfqd);
26062 -+
26063 -+ bfq_forget_idle(st);
26064 -+ }
26065 -+
26066 -+ BUG_ON(bfqd->busy_queues != 0);
26067 -+
26068 -+ return dispatched;
26069 -+}
26070 -+
26071 -+static int bfq_dispatch_requests(struct request_queue *q, int force)
26072 -+{
26073 -+ struct bfq_data *bfqd = q->elevator->elevator_data;
26074 -+ struct bfq_queue *bfqq;
26075 -+ int max_dispatch;
26076 -+
26077 -+ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
26078 -+ if (bfqd->busy_queues == 0)
26079 -+ return 0;
26080 -+
26081 -+ if (unlikely(force))
26082 -+ return bfq_forced_dispatch(bfqd);
26083 -+
26084 -+ bfqq = bfq_select_queue(bfqd);
26085 -+ if (bfqq == NULL)
26086 -+ return 0;
26087 -+
26088 -+ max_dispatch = bfqd->bfq_quantum;
26089 -+ if (bfq_class_idle(bfqq))
26090 -+ max_dispatch = 1;
26091 -+
26092 -+ if (!bfq_bfqq_sync(bfqq))
26093 -+ max_dispatch = bfqd->bfq_max_budget_async_rq;
26094 -+
26095 -+ if (bfqq->dispatched >= max_dispatch) {
26096 -+ if (bfqd->busy_queues > 1)
26097 -+ return 0;
26098 -+ if (bfqq->dispatched >= 4 * max_dispatch)
26099 -+ return 0;
26100 -+ }
26101 -+
26102 -+ if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
26103 -+ return 0;
26104 -+
26105 -+ bfq_clear_bfqq_wait_request(bfqq);
26106 -+ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
26107 -+
26108 -+ if (!bfq_dispatch_request(bfqd, bfqq))
26109 -+ return 0;
26110 -+
26111 -+ bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d (max_disp %d)",
26112 -+ bfqq->pid, max_dispatch);
26113 -+
26114 -+ return 1;
26115 -+}
26116 -+
26117 -+/*
26118 -+ * Task holds one reference to the queue, dropped when task exits. Each rq
26119 -+ * in-flight on this queue also holds a reference, dropped when rq is freed.
26120 -+ *
26121 -+ * Queue lock must be held here.
26122 -+ */
26123 -+static void bfq_put_queue(struct bfq_queue *bfqq)
26124 -+{
26125 -+ struct bfq_data *bfqd = bfqq->bfqd;
26126 -+
26127 -+ BUG_ON(atomic_read(&bfqq->ref) <= 0);
26128 -+
26129 -+ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
26130 -+ atomic_read(&bfqq->ref));
26131 -+ if (!atomic_dec_and_test(&bfqq->ref))
26132 -+ return;
26133 -+
26134 -+ BUG_ON(rb_first(&bfqq->sort_list) != NULL);
26135 -+ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
26136 -+ BUG_ON(bfqq->entity.tree != NULL);
26137 -+ BUG_ON(bfq_bfqq_busy(bfqq));
26138 -+ BUG_ON(bfqd->in_service_queue == bfqq);
26139 -+
26140 -+ if (bfq_bfqq_sync(bfqq))
26141 -+ /*
26142 -+ * The fact that this queue is being destroyed does not
26143 -+ * invalidate the fact that this queue may have been
26144 -+ * activated during the current burst. As a consequence,
26145 -+ * although the queue does not exist anymore, and hence
26146 -+ * needs to be removed from the burst list if there,
26147 -+ * the burst size has not to be decremented.
26148 -+ */
26149 -+ hlist_del_init(&bfqq->burst_list_node);
26150 -+
26151 -+ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
26152 -+
26153 -+ kmem_cache_free(bfq_pool, bfqq);
26154 -+}
26155 -+
26156 -+static void bfq_put_cooperator(struct bfq_queue *bfqq)
26157 -+{
26158 -+ struct bfq_queue *__bfqq, *next;
26159 -+
26160 -+ /*
26161 -+ * If this queue was scheduled to merge with another queue, be
26162 -+ * sure to drop the reference taken on that queue (and others in
26163 -+ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
26164 -+ */
26165 -+ __bfqq = bfqq->new_bfqq;
26166 -+ while (__bfqq) {
26167 -+ if (__bfqq == bfqq)
26168 -+ break;
26169 -+ next = __bfqq->new_bfqq;
26170 -+ bfq_put_queue(__bfqq);
26171 -+ __bfqq = next;
26172 -+ }
26173 -+}
26174 -+
26175 -+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
26176 -+{
26177 -+ if (bfqq == bfqd->in_service_queue) {
26178 -+ __bfq_bfqq_expire(bfqd, bfqq);
26179 -+ bfq_schedule_dispatch(bfqd);
26180 -+ }
26181 -+
26182 -+ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
26183 -+ atomic_read(&bfqq->ref));
26184 -+
26185 -+ bfq_put_cooperator(bfqq);
26186 -+
26187 -+ bfq_put_queue(bfqq);
26188 -+}
26189 -+
26190 -+static inline void bfq_init_icq(struct io_cq *icq)
26191 -+{
26192 -+ struct bfq_io_cq *bic = icq_to_bic(icq);
26193 -+
26194 -+ bic->ttime.last_end_request = jiffies;
26195 -+}
26196 -+
26197 -+static void bfq_exit_icq(struct io_cq *icq)
26198 -+{
26199 -+ struct bfq_io_cq *bic = icq_to_bic(icq);
26200 -+ struct bfq_data *bfqd = bic_to_bfqd(bic);
26201 -+
26202 -+ if (bic->bfqq[BLK_RW_ASYNC]) {
26203 -+ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]);
26204 -+ bic->bfqq[BLK_RW_ASYNC] = NULL;
26205 -+ }
26206 -+
26207 -+ if (bic->bfqq[BLK_RW_SYNC]) {
26208 -+ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
26209 -+ bic->bfqq[BLK_RW_SYNC] = NULL;
26210 -+ }
26211 -+}
26212 -+
26213 -+/*
26214 -+ * Update the entity prio values; note that the new values will not
26215 -+ * be used until the next (re)activation.
26216 -+ */
26217 -+static void bfq_init_prio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
26218 -+{
26219 -+ struct task_struct *tsk = current;
26220 -+ int ioprio_class;
26221 -+
26222 -+ if (!bfq_bfqq_prio_changed(bfqq))
26223 -+ return;
26224 -+
26225 -+ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
26226 -+ switch (ioprio_class) {
26227 -+ default:
26228 -+ dev_err(bfqq->bfqd->queue->backing_dev_info.dev,
26229 -+ "bfq: bad prio class %d\n", ioprio_class);
26230 -+ case IOPRIO_CLASS_NONE:
26231 -+ /*
26232 -+ * No prio set, inherit CPU scheduling settings.
26233 -+ */
26234 -+ bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
26235 -+ bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
26236 -+ break;
26237 -+ case IOPRIO_CLASS_RT:
26238 -+ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
26239 -+ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
26240 -+ break;
26241 -+ case IOPRIO_CLASS_BE:
26242 -+ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
26243 -+ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
26244 -+ break;
26245 -+ case IOPRIO_CLASS_IDLE:
26246 -+ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
26247 -+ bfqq->entity.new_ioprio = 7;
26248 -+ bfq_clear_bfqq_idle_window(bfqq);
26249 -+ break;
26250 -+ }
26251 -+
26252 -+ if (bfqq->entity.new_ioprio < 0 ||
26253 -+ bfqq->entity.new_ioprio >= IOPRIO_BE_NR) {
26254 -+ printk(KERN_CRIT "bfq_init_prio_data: new_ioprio %d\n",
26255 -+ bfqq->entity.new_ioprio);
26256 -+ BUG();
26257 -+ }
26258 -+
26259 -+ bfqq->entity.ioprio_changed = 1;
26260 -+
26261 -+ bfq_clear_bfqq_prio_changed(bfqq);
26262 -+}
26263 -+
26264 -+static void bfq_changed_ioprio(struct bfq_io_cq *bic)
26265 -+{
26266 -+ struct bfq_data *bfqd;
26267 -+ struct bfq_queue *bfqq, *new_bfqq;
26268 -+ struct bfq_group *bfqg;
26269 -+ unsigned long uninitialized_var(flags);
26270 -+ int ioprio = bic->icq.ioc->ioprio;
26271 -+
26272 -+ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
26273 -+ &flags);
26274 -+ /*
26275 -+ * This condition may trigger on a newly created bic, be sure to
26276 -+ * drop the lock before returning.
26277 -+ */
26278 -+ if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio))
26279 -+ goto out;
26280 -+
26281 -+ bfqq = bic->bfqq[BLK_RW_ASYNC];
26282 -+ if (bfqq != NULL) {
26283 -+ bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
26284 -+ sched_data);
26285 -+ new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic,
26286 -+ GFP_ATOMIC);
26287 -+ if (new_bfqq != NULL) {
26288 -+ bic->bfqq[BLK_RW_ASYNC] = new_bfqq;
26289 -+ bfq_log_bfqq(bfqd, bfqq,
26290 -+ "changed_ioprio: bfqq %p %d",
26291 -+ bfqq, atomic_read(&bfqq->ref));
26292 -+ bfq_put_queue(bfqq);
26293 -+ }
26294 -+ }
26295 -+
26296 -+ bfqq = bic->bfqq[BLK_RW_SYNC];
26297 -+ if (bfqq != NULL)
26298 -+ bfq_mark_bfqq_prio_changed(bfqq);
26299 -+
26300 -+ bic->ioprio = ioprio;
26301 -+
26302 -+out:
26303 -+ bfq_put_bfqd_unlock(bfqd, &flags);
26304 -+}
26305 -+
26306 -+static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
26307 -+ pid_t pid, int is_sync)
26308 -+{
26309 -+ RB_CLEAR_NODE(&bfqq->entity.rb_node);
26310 -+ INIT_LIST_HEAD(&bfqq->fifo);
26311 -+ INIT_HLIST_NODE(&bfqq->burst_list_node);
26312 -+
26313 -+ atomic_set(&bfqq->ref, 0);
26314 -+ bfqq->bfqd = bfqd;
26315 -+
26316 -+ bfq_mark_bfqq_prio_changed(bfqq);
26317 -+
26318 -+ if (is_sync) {
26319 -+ if (!bfq_class_idle(bfqq))
26320 -+ bfq_mark_bfqq_idle_window(bfqq);
26321 -+ bfq_mark_bfqq_sync(bfqq);
26322 -+ }
26323 -+ bfq_mark_bfqq_IO_bound(bfqq);
26324 -+
26325 -+ /* Tentative initial value to trade off between thr and lat */
26326 -+ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
26327 -+ bfqq->pid = pid;
26328 -+
26329 -+ bfqq->wr_coeff = 1;
26330 -+ bfqq->last_wr_start_finish = 0;
26331 -+ /*
26332 -+ * Set to the value for which bfqq will not be deemed as
26333 -+ * soft rt when it becomes backlogged.
26334 -+ */
26335 -+ bfqq->soft_rt_next_start = bfq_infinity_from_now(jiffies);
26336 -+}
26337 -+
26338 -+static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
26339 -+ struct bfq_group *bfqg,
26340 -+ int is_sync,
26341 -+ struct bfq_io_cq *bic,
26342 -+ gfp_t gfp_mask)
26343 -+{
26344 -+ struct bfq_queue *bfqq, *new_bfqq = NULL;
26345 -+
26346 -+retry:
26347 -+ /* bic always exists here */
26348 -+ bfqq = bic_to_bfqq(bic, is_sync);
26349 -+
26350 -+ /*
26351 -+ * Always try a new alloc if we fall back to the OOM bfqq
26352 -+ * originally, since it should just be a temporary situation.
26353 -+ */
26354 -+ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
26355 -+ bfqq = NULL;
26356 -+ if (new_bfqq != NULL) {
26357 -+ bfqq = new_bfqq;
26358 -+ new_bfqq = NULL;
26359 -+ } else if (gfp_mask & __GFP_WAIT) {
26360 -+ spin_unlock_irq(bfqd->queue->queue_lock);
26361 -+ new_bfqq = kmem_cache_alloc_node(bfq_pool,
26362 -+ gfp_mask | __GFP_ZERO,
26363 -+ bfqd->queue->node);
26364 -+ spin_lock_irq(bfqd->queue->queue_lock);
26365 -+ if (new_bfqq != NULL)
26366 -+ goto retry;
26367 -+ } else {
26368 -+ bfqq = kmem_cache_alloc_node(bfq_pool,
26369 -+ gfp_mask | __GFP_ZERO,
26370 -+ bfqd->queue->node);
26371 -+ }
26372 -+
26373 -+ if (bfqq != NULL) {
26374 -+ bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
26375 -+ bfq_init_prio_data(bfqq, bic);
26376 -+ bfq_init_entity(&bfqq->entity, bfqg);
26377 -+ bfq_log_bfqq(bfqd, bfqq, "allocated");
26378 -+ } else {
26379 -+ bfqq = &bfqd->oom_bfqq;
26380 -+ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
26381 -+ }
26382 -+ }
26383 -+
26384 -+ if (new_bfqq != NULL)
26385 -+ kmem_cache_free(bfq_pool, new_bfqq);
26386 -+
26387 -+ return bfqq;
26388 -+}
26389 -+
26390 -+static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
26391 -+ struct bfq_group *bfqg,
26392 -+ int ioprio_class, int ioprio)
26393 -+{
26394 -+ switch (ioprio_class) {
26395 -+ case IOPRIO_CLASS_RT:
26396 -+ return &bfqg->async_bfqq[0][ioprio];
26397 -+ case IOPRIO_CLASS_NONE:
26398 -+ ioprio = IOPRIO_NORM;
26399 -+ /* fall through */
26400 -+ case IOPRIO_CLASS_BE:
26401 -+ return &bfqg->async_bfqq[1][ioprio];
26402 -+ case IOPRIO_CLASS_IDLE:
26403 -+ return &bfqg->async_idle_bfqq;
26404 -+ default:
26405 -+ BUG();
26406 -+ }
26407 -+}
26408 -+
26409 -+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
26410 -+ struct bfq_group *bfqg, int is_sync,
26411 -+ struct bfq_io_cq *bic, gfp_t gfp_mask)
26412 -+{
26413 -+ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
26414 -+ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
26415 -+ struct bfq_queue **async_bfqq = NULL;
26416 -+ struct bfq_queue *bfqq = NULL;
26417 -+
26418 -+ if (!is_sync) {
26419 -+ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
26420 -+ ioprio);
26421 -+ bfqq = *async_bfqq;
26422 -+ }
26423 -+
26424 -+ if (bfqq == NULL)
26425 -+ bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
26426 -+
26427 -+ /*
26428 -+ * Pin the queue now that it's allocated, scheduler exit will
26429 -+ * prune it.
26430 -+ */
26431 -+ if (!is_sync && *async_bfqq == NULL) {
26432 -+ atomic_inc(&bfqq->ref);
26433 -+ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
26434 -+ bfqq, atomic_read(&bfqq->ref));
26435 -+ *async_bfqq = bfqq;
26436 -+ }
26437 -+
26438 -+ atomic_inc(&bfqq->ref);
26439 -+ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
26440 -+ atomic_read(&bfqq->ref));
26441 -+ return bfqq;
26442 -+}
26443 -+
26444 -+static void bfq_update_io_thinktime(struct bfq_data *bfqd,
26445 -+ struct bfq_io_cq *bic)
26446 -+{
26447 -+ unsigned long elapsed = jiffies - bic->ttime.last_end_request;
26448 -+ unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
26449 -+
26450 -+ bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
26451 -+ bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8;
26452 -+ bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) /
26453 -+ bic->ttime.ttime_samples;
26454 -+}
26455 -+
26456 -+static void bfq_update_io_seektime(struct bfq_data *bfqd,
26457 -+ struct bfq_queue *bfqq,
26458 -+ struct request *rq)
26459 -+{
26460 -+ sector_t sdist;
26461 -+ u64 total;
26462 -+
26463 -+ if (bfqq->last_request_pos < blk_rq_pos(rq))
26464 -+ sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
26465 -+ else
26466 -+ sdist = bfqq->last_request_pos - blk_rq_pos(rq);
26467 -+
26468 -+ /*
26469 -+ * Don't allow the seek distance to get too large from the
26470 -+ * odd fragment, pagein, etc.
26471 -+ */
26472 -+ if (bfqq->seek_samples == 0) /* first request, not really a seek */
26473 -+ sdist = 0;
26474 -+ else if (bfqq->seek_samples <= 60) /* second & third seek */
26475 -+ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
26476 -+ else
26477 -+ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
26478 -+
26479 -+ bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
26480 -+ bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
26481 -+ total = bfqq->seek_total + (bfqq->seek_samples/2);
26482 -+ do_div(total, bfqq->seek_samples);
26483 -+ bfqq->seek_mean = (sector_t)total;
26484 -+
26485 -+ bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
26486 -+ (u64)bfqq->seek_mean);
26487 -+}
26488 -+
26489 -+/*
26490 -+ * Disable idle window if the process thinks too long or seeks so much that
26491 -+ * it doesn't matter.
26492 -+ */
26493 -+static void bfq_update_idle_window(struct bfq_data *bfqd,
26494 -+ struct bfq_queue *bfqq,
26495 -+ struct bfq_io_cq *bic)
26496 -+{
26497 -+ int enable_idle;
26498 -+
26499 -+ /* Don't idle for async or idle io prio class. */
26500 -+ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
26501 -+ return;
26502 -+
26503 -+ enable_idle = bfq_bfqq_idle_window(bfqq);
26504 -+
26505 -+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
26506 -+ bfqd->bfq_slice_idle == 0 ||
26507 -+ (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
26508 -+ bfqq->wr_coeff == 1))
26509 -+ enable_idle = 0;
26510 -+ else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
26511 -+ if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
26512 -+ bfqq->wr_coeff == 1)
26513 -+ enable_idle = 0;
26514 -+ else
26515 -+ enable_idle = 1;
26516 -+ }
26517 -+ bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
26518 -+ enable_idle);
26519 -+
26520 -+ if (enable_idle)
26521 -+ bfq_mark_bfqq_idle_window(bfqq);
26522 -+ else
26523 -+ bfq_clear_bfqq_idle_window(bfqq);
26524 -+}
26525 -+
26526 -+/*
26527 -+ * Called when a new fs request (rq) is added to bfqq. Check if there's
26528 -+ * something we should do about it.
26529 -+ */
26530 -+static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
26531 -+ struct request *rq)
26532 -+{
26533 -+ struct bfq_io_cq *bic = RQ_BIC(rq);
26534 -+
26535 -+ if (rq->cmd_flags & REQ_META)
26536 -+ bfqq->meta_pending++;
26537 -+
26538 -+ bfq_update_io_thinktime(bfqd, bic);
26539 -+ bfq_update_io_seektime(bfqd, bfqq, rq);
26540 -+ if (!BFQQ_SEEKY(bfqq) && bfq_bfqq_constantly_seeky(bfqq)) {
26541 -+ bfq_clear_bfqq_constantly_seeky(bfqq);
26542 -+ if (!blk_queue_nonrot(bfqd->queue)) {
26543 -+ BUG_ON(!bfqd->const_seeky_busy_in_flight_queues);
26544 -+ bfqd->const_seeky_busy_in_flight_queues--;
26545 -+ }
26546 -+ }
26547 -+ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
26548 -+ !BFQQ_SEEKY(bfqq))
26549 -+ bfq_update_idle_window(bfqd, bfqq, bic);
26550 -+
26551 -+ bfq_log_bfqq(bfqd, bfqq,
26552 -+ "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
26553 -+ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
26554 -+ (long long unsigned)bfqq->seek_mean);
26555 -+
26556 -+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
26557 -+
26558 -+ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
26559 -+ int small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
26560 -+ blk_rq_sectors(rq) < 32;
26561 -+ int budget_timeout = bfq_bfqq_budget_timeout(bfqq);
26562 -+
26563 -+ /*
26564 -+ * There is just this request queued: if the request
26565 -+ * is small and the queue is not to be expired, then
26566 -+ * just exit.
26567 -+ *
26568 -+ * In this way, if the disk is being idled to wait for
26569 -+ * a new request from the in-service queue, we avoid
26570 -+ * unplugging the device and committing the disk to serve
26571 -+ * just a small request. On the contrary, we wait for
26572 -+ * the block layer to decide when to unplug the device:
26573 -+ * hopefully, new requests will be merged to this one
26574 -+ * quickly, then the device will be unplugged and
26575 -+ * larger requests will be dispatched.
26576 -+ */
26577 -+ if (small_req && !budget_timeout)
26578 -+ return;
26579 -+
26580 -+ /*
26581 -+ * A large enough request arrived, or the queue is to
26582 -+ * be expired: in both cases disk idling is to be
26583 -+ * stopped, so clear wait_request flag and reset
26584 -+ * timer.
26585 -+ */
26586 -+ bfq_clear_bfqq_wait_request(bfqq);
26587 -+ del_timer(&bfqd->idle_slice_timer);
26588 -+
26589 -+ /*
26590 -+ * The queue is not empty, because a new request just
26591 -+ * arrived. Hence we can safely expire the queue, in
26592 -+ * case of budget timeout, without risking that the
26593 -+ * timestamps of the queue are not updated correctly.
26594 -+ * See [1] for more details.
26595 -+ */
26596 -+ if (budget_timeout)
26597 -+ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
26598 -+
26599 -+ /*
26600 -+ * Let the request rip immediately, or let a new queue be
26601 -+ * selected if bfqq has just been expired.
26602 -+ */
26603 -+ __blk_run_queue(bfqd->queue);
26604 -+ }
26605 -+}
26606 -+
26607 -+static void bfq_insert_request(struct request_queue *q, struct request *rq)
26608 -+{
26609 -+ struct bfq_data *bfqd = q->elevator->elevator_data;
26610 -+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
26611 -+
26612 -+ assert_spin_locked(bfqd->queue->queue_lock);
26613 -+ bfq_init_prio_data(bfqq, RQ_BIC(rq));
26614 -+
26615 -+ bfq_add_request(rq);
26616 -+
26617 -+ rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
26618 -+ list_add_tail(&rq->queuelist, &bfqq->fifo);
26619 -+
26620 -+ bfq_rq_enqueued(bfqd, bfqq, rq);
26621 -+}
26622 -+
26623 -+static void bfq_update_hw_tag(struct bfq_data *bfqd)
26624 -+{
26625 -+ bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
26626 -+ bfqd->rq_in_driver);
26627 -+
26628 -+ if (bfqd->hw_tag == 1)
26629 -+ return;
26630 -+
26631 -+ /*
26632 -+ * This sample is valid if the number of outstanding requests
26633 -+ * is large enough to allow a queueing behavior. Note that the
26634 -+ * sum is not exact, as it's not taking into account deactivated
26635 -+ * requests.
26636 -+ */
26637 -+ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
26638 -+ return;
26639 -+
26640 -+ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
26641 -+ return;
26642 -+
26643 -+ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
26644 -+ bfqd->max_rq_in_driver = 0;
26645 -+ bfqd->hw_tag_samples = 0;
26646 -+}
26647 -+
26648 -+static void bfq_completed_request(struct request_queue *q, struct request *rq)
26649 -+{
26650 -+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
26651 -+ struct bfq_data *bfqd = bfqq->bfqd;
26652 -+ bool sync = bfq_bfqq_sync(bfqq);
26653 -+
26654 -+ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left (%d)",
26655 -+ blk_rq_sectors(rq), sync);
26656 -+
26657 -+ bfq_update_hw_tag(bfqd);
26658 -+
26659 -+ BUG_ON(!bfqd->rq_in_driver);
26660 -+ BUG_ON(!bfqq->dispatched);
26661 -+ bfqd->rq_in_driver--;
26662 -+ bfqq->dispatched--;
26663 -+
26664 -+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
26665 -+ bfq_weights_tree_remove(bfqd, &bfqq->entity,
26666 -+ &bfqd->queue_weights_tree);
26667 -+ if (!blk_queue_nonrot(bfqd->queue)) {
26668 -+ BUG_ON(!bfqd->busy_in_flight_queues);
26669 -+ bfqd->busy_in_flight_queues--;
26670 -+ if (bfq_bfqq_constantly_seeky(bfqq)) {
26671 -+ BUG_ON(!bfqd->
26672 -+ const_seeky_busy_in_flight_queues);
26673 -+ bfqd->const_seeky_busy_in_flight_queues--;
26674 -+ }
26675 -+ }
26676 -+ }
26677 -+
26678 -+ if (sync) {
26679 -+ bfqd->sync_flight--;
26680 -+ RQ_BIC(rq)->ttime.last_end_request = jiffies;
26681 -+ }
26682 -+
26683 -+ /*
26684 -+ * If we are waiting to discover whether the request pattern of the
26685 -+ * task associated with the queue is actually isochronous, and
26686 -+ * both requisites for this condition to hold are satisfied, then
26687 -+ * compute soft_rt_next_start (see the comments to the function
26688 -+ * bfq_bfqq_softrt_next_start()).
26689 -+ */
26690 -+ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
26691 -+ RB_EMPTY_ROOT(&bfqq->sort_list))
26692 -+ bfqq->soft_rt_next_start =
26693 -+ bfq_bfqq_softrt_next_start(bfqd, bfqq);
26694 -+
26695 -+ /*
26696 -+ * If this is the in-service queue, check if it needs to be expired,
26697 -+ * or if we want to idle in case it has no pending requests.
26698 -+ */
26699 -+ if (bfqd->in_service_queue == bfqq) {
26700 -+ if (bfq_bfqq_budget_new(bfqq))
26701 -+ bfq_set_budget_timeout(bfqd);
26702 -+
26703 -+ if (bfq_bfqq_must_idle(bfqq)) {
26704 -+ bfq_arm_slice_timer(bfqd);
26705 -+ goto out;
26706 -+ } else if (bfq_may_expire_for_budg_timeout(bfqq))
26707 -+ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
26708 -+ else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
26709 -+ (bfqq->dispatched == 0 ||
26710 -+ !bfq_bfqq_must_not_expire(bfqq)))
26711 -+ bfq_bfqq_expire(bfqd, bfqq, 0,
26712 -+ BFQ_BFQQ_NO_MORE_REQUESTS);
26713 -+ }
26714 -+
26715 -+ if (!bfqd->rq_in_driver)
26716 -+ bfq_schedule_dispatch(bfqd);
26717 -+
26718 -+out:
26719 -+ return;
26720 -+}
26721 -+
26722 -+static inline int __bfq_may_queue(struct bfq_queue *bfqq)
26723 -+{
26724 -+ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
26725 -+ bfq_clear_bfqq_must_alloc(bfqq);
26726 -+ return ELV_MQUEUE_MUST;
26727 -+ }
26728 -+
26729 -+ return ELV_MQUEUE_MAY;
26730 -+}
26731 -+
26732 -+static int bfq_may_queue(struct request_queue *q, int rw)
26733 -+{
26734 -+ struct bfq_data *bfqd = q->elevator->elevator_data;
26735 -+ struct task_struct *tsk = current;
26736 -+ struct bfq_io_cq *bic;
26737 -+ struct bfq_queue *bfqq;
26738 -+
26739 -+ /*
26740 -+ * Don't force setup of a queue from here, as a call to may_queue
26741 -+ * does not necessarily imply that a request actually will be
26742 -+ * queued. So just lookup a possibly existing queue, or return
26743 -+ * 'may queue' if that fails.
26744 -+ */
26745 -+ bic = bfq_bic_lookup(bfqd, tsk->io_context);
26746 -+ if (bic == NULL)
26747 -+ return ELV_MQUEUE_MAY;
26748 -+
26749 -+ bfqq = bic_to_bfqq(bic, rw_is_sync(rw));
26750 -+ if (bfqq != NULL) {
26751 -+ bfq_init_prio_data(bfqq, bic);
26752 -+
26753 -+ return __bfq_may_queue(bfqq);
26754 -+ }
26755 -+
26756 -+ return ELV_MQUEUE_MAY;
26757 -+}
26758 -+
26759 -+/*
26760 -+ * Queue lock held here.
26761 -+ */
26762 -+static void bfq_put_request(struct request *rq)
26763 -+{
26764 -+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
26765 -+
26766 -+ if (bfqq != NULL) {
26767 -+ const int rw = rq_data_dir(rq);
26768 -+
26769 -+ BUG_ON(!bfqq->allocated[rw]);
26770 -+ bfqq->allocated[rw]--;
26771 -+
26772 -+ rq->elv.priv[0] = NULL;
26773 -+ rq->elv.priv[1] = NULL;
26774 -+
26775 -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
26776 -+ bfqq, atomic_read(&bfqq->ref));
26777 -+ bfq_put_queue(bfqq);
26778 -+ }
26779 -+}
26780 -+
26781 -+static struct bfq_queue *
26782 -+bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
26783 -+ struct bfq_queue *bfqq)
26784 -+{
26785 -+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
26786 -+ (long unsigned)bfqq->new_bfqq->pid);
26787 -+ bic_set_bfqq(bic, bfqq->new_bfqq, 1);
26788 -+ bfq_mark_bfqq_coop(bfqq->new_bfqq);
26789 -+ bfq_put_queue(bfqq);
26790 -+ return bic_to_bfqq(bic, 1);
26791 -+}
26792 -+
26793 -+/*
26794 -+ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
26795 -+ * was the last process referring to said bfqq.
26796 -+ */
26797 -+static struct bfq_queue *
26798 -+bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
26799 -+{
26800 -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
26801 -+ if (bfqq_process_refs(bfqq) == 1) {
26802 -+ bfqq->pid = current->pid;
26803 -+ bfq_clear_bfqq_coop(bfqq);
26804 -+ bfq_clear_bfqq_split_coop(bfqq);
26805 -+ return bfqq;
26806 -+ }
26807 -+
26808 -+ bic_set_bfqq(bic, NULL, 1);
26809 -+
26810 -+ bfq_put_cooperator(bfqq);
26811 -+
26812 -+ bfq_put_queue(bfqq);
26813 -+ return NULL;
26814 -+}
26815 -+
26816 -+/*
26817 -+ * Allocate bfq data structures associated with this request.
26818 -+ */
26819 -+static int bfq_set_request(struct request_queue *q, struct request *rq,
26820 -+ struct bio *bio, gfp_t gfp_mask)
26821 -+{
26822 -+ struct bfq_data *bfqd = q->elevator->elevator_data;
26823 -+ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
26824 -+ const int rw = rq_data_dir(rq);
26825 -+ const int is_sync = rq_is_sync(rq);
26826 -+ struct bfq_queue *bfqq;
26827 -+ struct bfq_group *bfqg;
26828 -+ unsigned long flags;
26829 -+
26830 -+ might_sleep_if(gfp_mask & __GFP_WAIT);
26831 -+
26832 -+ bfq_changed_ioprio(bic);
26833 -+
26834 -+ spin_lock_irqsave(q->queue_lock, flags);
26835 -+
26836 -+ if (bic == NULL)
26837 -+ goto queue_fail;
26838 -+
26839 -+ bfqg = bfq_bic_update_cgroup(bic);
26840 -+
26841 -+new_queue:
26842 -+ bfqq = bic_to_bfqq(bic, is_sync);
26843 -+ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
26844 -+ bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
26845 -+ bic_set_bfqq(bic, bfqq, is_sync);
26846 -+ } else {
26847 -+ /*
26848 -+ * If the queue was seeky for too long, break it apart.
26849 -+ */
26850 -+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
26851 -+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
26852 -+ bfqq = bfq_split_bfqq(bic, bfqq);
26853 -+ if (!bfqq)
26854 -+ goto new_queue;
26855 -+ }
26856 -+
26857 -+ /*
26858 -+ * Check to see if this queue is scheduled to merge with
26859 -+ * another closely cooperating queue. The merging of queues
26860 -+ * happens here as it must be done in process context.
26861 -+ * The reference on new_bfqq was taken in merge_bfqqs.
26862 -+ */
26863 -+ if (bfqq->new_bfqq != NULL)
26864 -+ bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq);
26865 -+ }
26866 -+
26867 -+ bfqq->allocated[rw]++;
26868 -+ atomic_inc(&bfqq->ref);
26869 -+ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
26870 -+ atomic_read(&bfqq->ref));
26871 -+
26872 -+ rq->elv.priv[0] = bic;
26873 -+ rq->elv.priv[1] = bfqq;
26874 -+
26875 -+ spin_unlock_irqrestore(q->queue_lock, flags);
26876 -+
26877 -+ return 0;
26878 -+
26879 -+queue_fail:
26880 -+ bfq_schedule_dispatch(bfqd);
26881 -+ spin_unlock_irqrestore(q->queue_lock, flags);
26882 -+
26883 -+ return 1;
26884 -+}
26885 -+
26886 -+static void bfq_kick_queue(struct work_struct *work)
26887 -+{
26888 -+ struct bfq_data *bfqd =
26889 -+ container_of(work, struct bfq_data, unplug_work);
26890 -+ struct request_queue *q = bfqd->queue;
26891 -+
26892 -+ spin_lock_irq(q->queue_lock);
26893 -+ __blk_run_queue(q);
26894 -+ spin_unlock_irq(q->queue_lock);
26895 -+}
26896 -+
26897 -+/*
26898 -+ * Handler of the expiration of the timer running if the in-service queue
26899 -+ * is idling inside its time slice.
26900 -+ */
26901 -+static void bfq_idle_slice_timer(unsigned long data)
26902 -+{
26903 -+ struct bfq_data *bfqd = (struct bfq_data *)data;
26904 -+ struct bfq_queue *bfqq;
26905 -+ unsigned long flags;
26906 -+ enum bfqq_expiration reason;
26907 -+
26908 -+ spin_lock_irqsave(bfqd->queue->queue_lock, flags);
26909 -+
26910 -+ bfqq = bfqd->in_service_queue;
26911 -+ /*
26912 -+ * Theoretical race here: the in-service queue can be NULL or
26913 -+ * different from the queue that was idling if the timer handler
26914 -+ * spins on the queue_lock and a new request arrives for the
26915 -+ * current queue and there is a full dispatch cycle that changes
26916 -+ * the in-service queue. This can hardly happen, but in the worst
26917 -+ * case we just expire a queue too early.
26918 -+ */
26919 -+ if (bfqq != NULL) {
26920 -+ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
26921 -+ if (bfq_bfqq_budget_timeout(bfqq))
26922 -+ /*
26923 -+ * Also here the queue can be safely expired
26924 -+ * for budget timeout without wasting
26925 -+ * guarantees
26926 -+ */
26927 -+ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
26928 -+ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
26929 -+ /*
26930 -+ * The queue may not be empty upon timer expiration,
26931 -+ * because we may not disable the timer when the
26932 -+ * first request of the in-service queue arrives
26933 -+ * during disk idling.
26934 -+ */
26935 -+ reason = BFQ_BFQQ_TOO_IDLE;
26936 -+ else
26937 -+ goto schedule_dispatch;
26938 -+
26939 -+ bfq_bfqq_expire(bfqd, bfqq, 1, reason);
26940 -+ }
26941 -+
26942 -+schedule_dispatch:
26943 -+ bfq_schedule_dispatch(bfqd);
26944 -+
26945 -+ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
26946 -+}
26947 -+
26948 -+static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
26949 -+{
26950 -+ del_timer_sync(&bfqd->idle_slice_timer);
26951 -+ cancel_work_sync(&bfqd->unplug_work);
26952 -+}
26953 -+
26954 -+static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
26955 -+ struct bfq_queue **bfqq_ptr)
26956 -+{
26957 -+ struct bfq_group *root_group = bfqd->root_group;
26958 -+ struct bfq_queue *bfqq = *bfqq_ptr;
26959 -+
26960 -+ bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
26961 -+ if (bfqq != NULL) {
26962 -+ bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
26963 -+ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
26964 -+ bfqq, atomic_read(&bfqq->ref));
26965 -+ bfq_put_queue(bfqq);
26966 -+ *bfqq_ptr = NULL;
26967 -+ }
26968 -+}
26969 -+
26970 -+/*
26971 -+ * Release all the bfqg references to its async queues. If we are
26972 -+ * deallocating the group these queues may still contain requests, so
26973 -+ * we reparent them to the root cgroup (i.e., the only one that will
26974 -+ * exist for sure until all the requests on a device are gone).
26975 -+ */
26976 -+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
26977 -+{
26978 -+ int i, j;
26979 -+
26980 -+ for (i = 0; i < 2; i++)
26981 -+ for (j = 0; j < IOPRIO_BE_NR; j++)
26982 -+ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
26983 -+
26984 -+ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
26985 -+}
26986 -+
26987 -+static void bfq_exit_queue(struct elevator_queue *e)
26988 -+{
26989 -+ struct bfq_data *bfqd = e->elevator_data;
26990 -+ struct request_queue *q = bfqd->queue;
26991 -+ struct bfq_queue *bfqq, *n;
26992 -+
26993 -+ bfq_shutdown_timer_wq(bfqd);
26994 -+
26995 -+ spin_lock_irq(q->queue_lock);
26996 -+
26997 -+ BUG_ON(bfqd->in_service_queue != NULL);
26998 -+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
26999 -+ bfq_deactivate_bfqq(bfqd, bfqq, 0);
27000 -+
27001 -+ bfq_disconnect_groups(bfqd);
27002 -+ spin_unlock_irq(q->queue_lock);
27003 -+
27004 -+ bfq_shutdown_timer_wq(bfqd);
27005 -+
27006 -+ synchronize_rcu();
27007 -+
27008 -+ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
27009 -+
27010 -+ bfq_free_root_group(bfqd);
27011 -+ kfree(bfqd);
27012 -+}
27013 -+
27014 -+static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
27015 -+{
27016 -+ struct bfq_group *bfqg;
27017 -+ struct bfq_data *bfqd;
27018 -+ struct elevator_queue *eq;
27019 -+
27020 -+ eq = elevator_alloc(q, e);
27021 -+ if (eq == NULL)
27022 -+ return -ENOMEM;
27023 -+
27024 -+ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
27025 -+ if (bfqd == NULL) {
27026 -+ kobject_put(&eq->kobj);
27027 -+ return -ENOMEM;
27028 -+ }
27029 -+ eq->elevator_data = bfqd;
27030 -+
27031 -+ /*
27032 -+ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
27033 -+ * Grab a permanent reference to it, so that the normal code flow
27034 -+ * will not attempt to free it.
27035 -+ */
27036 -+ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
27037 -+ atomic_inc(&bfqd->oom_bfqq.ref);
27038 -+ bfqd->oom_bfqq.entity.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
27039 -+ bfqd->oom_bfqq.entity.new_ioprio_class = IOPRIO_CLASS_BE;
27040 -+ /*
27041 -+ * Trigger weight initialization, according to ioprio, at the
27042 -+ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
27043 -+ * class won't be changed any more.
27044 -+ */
27045 -+ bfqd->oom_bfqq.entity.ioprio_changed = 1;
27046 -+
27047 -+ bfqd->queue = q;
27048 -+
27049 -+ spin_lock_irq(q->queue_lock);
27050 -+ q->elevator = eq;
27051 -+ spin_unlock_irq(q->queue_lock);
27052 -+
27053 -+ bfqg = bfq_alloc_root_group(bfqd, q->node);
27054 -+ if (bfqg == NULL) {
27055 -+ kfree(bfqd);
27056 -+ kobject_put(&eq->kobj);
27057 -+ return -ENOMEM;
27058 -+ }
27059 -+
27060 -+ bfqd->root_group = bfqg;
27061 -+ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
27062 -+#ifdef CONFIG_CGROUP_BFQIO
27063 -+ bfqd->active_numerous_groups = 0;
27064 -+#endif
27065 -+
27066 -+ init_timer(&bfqd->idle_slice_timer);
27067 -+ bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
27068 -+ bfqd->idle_slice_timer.data = (unsigned long)bfqd;
27069 -+
27070 -+ bfqd->rq_pos_tree = RB_ROOT;
27071 -+ bfqd->queue_weights_tree = RB_ROOT;
27072 -+ bfqd->group_weights_tree = RB_ROOT;
27073 -+
27074 -+ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
27075 -+
27076 -+ INIT_LIST_HEAD(&bfqd->active_list);
27077 -+ INIT_LIST_HEAD(&bfqd->idle_list);
27078 -+ INIT_HLIST_HEAD(&bfqd->burst_list);
27079 -+
27080 -+ bfqd->hw_tag = -1;
27081 -+
27082 -+ bfqd->bfq_max_budget = bfq_default_max_budget;
27083 -+
27084 -+ bfqd->bfq_quantum = bfq_quantum;
27085 -+ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
27086 -+ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
27087 -+ bfqd->bfq_back_max = bfq_back_max;
27088 -+ bfqd->bfq_back_penalty = bfq_back_penalty;
27089 -+ bfqd->bfq_slice_idle = bfq_slice_idle;
27090 -+ bfqd->bfq_class_idle_last_service = 0;
27091 -+ bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
27092 -+ bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
27093 -+ bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
27094 -+
27095 -+ bfqd->bfq_coop_thresh = 2;
27096 -+ bfqd->bfq_failed_cooperations = 7000;
27097 -+ bfqd->bfq_requests_within_timer = 120;
27098 -+
27099 -+ bfqd->bfq_large_burst_thresh = 11;
27100 -+ bfqd->bfq_burst_interval = msecs_to_jiffies(500);
27101 -+
27102 -+ bfqd->low_latency = true;
27103 -+
27104 -+ bfqd->bfq_wr_coeff = 20;
27105 -+ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
27106 -+ bfqd->bfq_wr_max_time = 0;
27107 -+ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
27108 -+ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
27109 -+ bfqd->bfq_wr_max_softrt_rate = 7000; /*
27110 -+ * Approximate rate required
27111 -+ * to playback or record a
27112 -+ * high-definition compressed
27113 -+ * video.
27114 -+ */
27115 -+ bfqd->wr_busy_queues = 0;
27116 -+ bfqd->busy_in_flight_queues = 0;
27117 -+ bfqd->const_seeky_busy_in_flight_queues = 0;
27118 -+
27119 -+ /*
27120 -+ * Begin by assuming, optimistically, that the device peak rate is
27121 -+ * equal to the highest reference rate.
27122 -+ */
27123 -+ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
27124 -+ T_fast[blk_queue_nonrot(bfqd->queue)];
27125 -+ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)];
27126 -+ bfqd->device_speed = BFQ_BFQD_FAST;
27127 -+
27128 -+ return 0;
27129 -+}
27130 -+
27131 -+static void bfq_slab_kill(void)
27132 -+{
27133 -+ if (bfq_pool != NULL)
27134 -+ kmem_cache_destroy(bfq_pool);
27135 -+}
27136 -+
27137 -+static int __init bfq_slab_setup(void)
27138 -+{
27139 -+ bfq_pool = KMEM_CACHE(bfq_queue, 0);
27140 -+ if (bfq_pool == NULL)
27141 -+ return -ENOMEM;
27142 -+ return 0;
27143 -+}
27144 -+
27145 -+static ssize_t bfq_var_show(unsigned int var, char *page)
27146 -+{
27147 -+ return sprintf(page, "%d\n", var);
27148 -+}
27149 -+
27150 -+static ssize_t bfq_var_store(unsigned long *var, const char *page,
27151 -+ size_t count)
27152 -+{
27153 -+ unsigned long new_val;
27154 -+ int ret = kstrtoul(page, 10, &new_val);
27155 -+
27156 -+ if (ret == 0)
27157 -+ *var = new_val;
27158 -+
27159 -+ return count;
27160 -+}
27161 -+
27162 -+static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
27163 -+{
27164 -+ struct bfq_data *bfqd = e->elevator_data;
27165 -+ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
27166 -+ jiffies_to_msecs(bfqd->bfq_wr_max_time) :
27167 -+ jiffies_to_msecs(bfq_wr_duration(bfqd)));
27168 -+}
27169 -+
27170 -+static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
27171 -+{
27172 -+ struct bfq_queue *bfqq;
27173 -+ struct bfq_data *bfqd = e->elevator_data;
27174 -+ ssize_t num_char = 0;
27175 -+
27176 -+ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
27177 -+ bfqd->queued);
27178 -+
27179 -+ spin_lock_irq(bfqd->queue->queue_lock);
27180 -+
27181 -+ num_char += sprintf(page + num_char, "Active:\n");
27182 -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
27183 -+ num_char += sprintf(page + num_char,
27184 -+ "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n",
27185 -+ bfqq->pid,
27186 -+ bfqq->entity.weight,
27187 -+ bfqq->queued[0],
27188 -+ bfqq->queued[1],
27189 -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
27190 -+ jiffies_to_msecs(bfqq->wr_cur_max_time));
27191 -+ }
27192 -+
27193 -+ num_char += sprintf(page + num_char, "Idle:\n");
27194 -+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
27195 -+ num_char += sprintf(page + num_char,
27196 -+ "pid%d: weight %hu, dur %d/%u\n",
27197 -+ bfqq->pid,
27198 -+ bfqq->entity.weight,
27199 -+ jiffies_to_msecs(jiffies -
27200 -+ bfqq->last_wr_start_finish),
27201 -+ jiffies_to_msecs(bfqq->wr_cur_max_time));
27202 -+ }
27203 -+
27204 -+ spin_unlock_irq(bfqd->queue->queue_lock);
27205 -+
27206 -+ return num_char;
27207 -+}
27208 -+
27209 -+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
27210 -+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
27211 -+{ \
27212 -+ struct bfq_data *bfqd = e->elevator_data; \
27213 -+ unsigned int __data = __VAR; \
27214 -+ if (__CONV) \
27215 -+ __data = jiffies_to_msecs(__data); \
27216 -+ return bfq_var_show(__data, (page)); \
27217 -+}
27218 -+SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
27219 -+SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
27220 -+SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
27221 -+SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
27222 -+SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
27223 -+SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
27224 -+SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
27225 -+SHOW_FUNCTION(bfq_max_budget_async_rq_show,
27226 -+ bfqd->bfq_max_budget_async_rq, 0);
27227 -+SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
27228 -+SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
27229 -+SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
27230 -+SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
27231 -+SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
27232 -+SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
27233 -+SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
27234 -+ 1);
27235 -+SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
27236 -+#undef SHOW_FUNCTION
27237 -+
27238 -+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
27239 -+static ssize_t \
27240 -+__FUNC(struct elevator_queue *e, const char *page, size_t count) \
27241 -+{ \
27242 -+ struct bfq_data *bfqd = e->elevator_data; \
27243 -+ unsigned long uninitialized_var(__data); \
27244 -+ int ret = bfq_var_store(&__data, (page), count); \
27245 -+ if (__data < (MIN)) \
27246 -+ __data = (MIN); \
27247 -+ else if (__data > (MAX)) \
27248 -+ __data = (MAX); \
27249 -+ if (__CONV) \
27250 -+ *(__PTR) = msecs_to_jiffies(__data); \
27251 -+ else \
27252 -+ *(__PTR) = __data; \
27253 -+ return ret; \
27254 -+}
27255 -+STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
27256 -+STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
27257 -+ INT_MAX, 1);
27258 -+STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
27259 -+ INT_MAX, 1);
27260 -+STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
27261 -+STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
27262 -+ INT_MAX, 0);
27263 -+STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
27264 -+STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
27265 -+ 1, INT_MAX, 0);
27266 -+STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
27267 -+ INT_MAX, 1);
27268 -+STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
27269 -+STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
27270 -+STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
27271 -+ 1);
27272 -+STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
27273 -+ INT_MAX, 1);
27274 -+STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
27275 -+ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
27276 -+STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
27277 -+ INT_MAX, 0);
27278 -+#undef STORE_FUNCTION
27279 -+
27280 -+/* do nothing for the moment */
27281 -+static ssize_t bfq_weights_store(struct elevator_queue *e,
27282 -+ const char *page, size_t count)
27283 -+{
27284 -+ return count;
27285 -+}
27286 -+
27287 -+static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
27288 -+{
27289 -+ u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
27290 -+
27291 -+ if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
27292 -+ return bfq_calc_max_budget(bfqd->peak_rate, timeout);
27293 -+ else
27294 -+ return bfq_default_max_budget;
27295 -+}
27296 -+
27297 -+static ssize_t bfq_max_budget_store(struct elevator_queue *e,
27298 -+ const char *page, size_t count)
27299 -+{
27300 -+ struct bfq_data *bfqd = e->elevator_data;
27301 -+ unsigned long uninitialized_var(__data);
27302 -+ int ret = bfq_var_store(&__data, (page), count);
27303 -+
27304 -+ if (__data == 0)
27305 -+ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
27306 -+ else {
27307 -+ if (__data > INT_MAX)
27308 -+ __data = INT_MAX;
27309 -+ bfqd->bfq_max_budget = __data;
27310 -+ }
27311 -+
27312 -+ bfqd->bfq_user_max_budget = __data;
27313 -+
27314 -+ return ret;
27315 -+}
27316 -+
27317 -+static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
27318 -+ const char *page, size_t count)
27319 -+{
27320 -+ struct bfq_data *bfqd = e->elevator_data;
27321 -+ unsigned long uninitialized_var(__data);
27322 -+ int ret = bfq_var_store(&__data, (page), count);
27323 -+
27324 -+ if (__data < 1)
27325 -+ __data = 1;
27326 -+ else if (__data > INT_MAX)
27327 -+ __data = INT_MAX;
27328 -+
27329 -+ bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
27330 -+ if (bfqd->bfq_user_max_budget == 0)
27331 -+ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
27332 -+
27333 -+ return ret;
27334 -+}
27335 -+
27336 -+static ssize_t bfq_low_latency_store(struct elevator_queue *e,
27337 -+ const char *page, size_t count)
27338 -+{
27339 -+ struct bfq_data *bfqd = e->elevator_data;
27340 -+ unsigned long uninitialized_var(__data);
27341 -+ int ret = bfq_var_store(&__data, (page), count);
27342 -+
27343 -+ if (__data > 1)
27344 -+ __data = 1;
27345 -+ if (__data == 0 && bfqd->low_latency != 0)
27346 -+ bfq_end_wr(bfqd);
27347 -+ bfqd->low_latency = __data;
27348 -+
27349 -+ return ret;
27350 -+}
27351 -+
27352 -+#define BFQ_ATTR(name) \
27353 -+ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
27354 -+
27355 -+static struct elv_fs_entry bfq_attrs[] = {
27356 -+ BFQ_ATTR(quantum),
27357 -+ BFQ_ATTR(fifo_expire_sync),
27358 -+ BFQ_ATTR(fifo_expire_async),
27359 -+ BFQ_ATTR(back_seek_max),
27360 -+ BFQ_ATTR(back_seek_penalty),
27361 -+ BFQ_ATTR(slice_idle),
27362 -+ BFQ_ATTR(max_budget),
27363 -+ BFQ_ATTR(max_budget_async_rq),
27364 -+ BFQ_ATTR(timeout_sync),
27365 -+ BFQ_ATTR(timeout_async),
27366 -+ BFQ_ATTR(low_latency),
27367 -+ BFQ_ATTR(wr_coeff),
27368 -+ BFQ_ATTR(wr_max_time),
27369 -+ BFQ_ATTR(wr_rt_max_time),
27370 -+ BFQ_ATTR(wr_min_idle_time),
27371 -+ BFQ_ATTR(wr_min_inter_arr_async),
27372 -+ BFQ_ATTR(wr_max_softrt_rate),
27373 -+ BFQ_ATTR(weights),
27374 -+ __ATTR_NULL
27375 -+};
27376 -+
27377 -+static struct elevator_type iosched_bfq = {
27378 -+ .ops = {
27379 -+ .elevator_merge_fn = bfq_merge,
27380 -+ .elevator_merged_fn = bfq_merged_request,
27381 -+ .elevator_merge_req_fn = bfq_merged_requests,
27382 -+ .elevator_allow_merge_fn = bfq_allow_merge,
27383 -+ .elevator_dispatch_fn = bfq_dispatch_requests,
27384 -+ .elevator_add_req_fn = bfq_insert_request,
27385 -+ .elevator_activate_req_fn = bfq_activate_request,
27386 -+ .elevator_deactivate_req_fn = bfq_deactivate_request,
27387 -+ .elevator_completed_req_fn = bfq_completed_request,
27388 -+ .elevator_former_req_fn = elv_rb_former_request,
27389 -+ .elevator_latter_req_fn = elv_rb_latter_request,
27390 -+ .elevator_init_icq_fn = bfq_init_icq,
27391 -+ .elevator_exit_icq_fn = bfq_exit_icq,
27392 -+ .elevator_set_req_fn = bfq_set_request,
27393 -+ .elevator_put_req_fn = bfq_put_request,
27394 -+ .elevator_may_queue_fn = bfq_may_queue,
27395 -+ .elevator_init_fn = bfq_init_queue,
27396 -+ .elevator_exit_fn = bfq_exit_queue,
27397 -+ },
27398 -+ .icq_size = sizeof(struct bfq_io_cq),
27399 -+ .icq_align = __alignof__(struct bfq_io_cq),
27400 -+ .elevator_attrs = bfq_attrs,
27401 -+ .elevator_name = "bfq",
27402 -+ .elevator_owner = THIS_MODULE,
27403 -+};
27404 -+
27405 -+static int __init bfq_init(void)
27406 -+{
27407 -+ /*
27408 -+ * Can be 0 on HZ < 1000 setups.
27409 -+ */
27410 -+ if (bfq_slice_idle == 0)
27411 -+ bfq_slice_idle = 1;
27412 -+
27413 -+ if (bfq_timeout_async == 0)
27414 -+ bfq_timeout_async = 1;
27415 -+
27416 -+ if (bfq_slab_setup())
27417 -+ return -ENOMEM;
27418 -+
27419 -+ /*
27420 -+ * Times to load large popular applications for the typical systems
27421 -+ * installed on the reference devices (see the comments before the
27422 -+ * definitions of the two arrays).
27423 -+ */
27424 -+ T_slow[0] = msecs_to_jiffies(2600);
27425 -+ T_slow[1] = msecs_to_jiffies(1000);
27426 -+ T_fast[0] = msecs_to_jiffies(5500);
27427 -+ T_fast[1] = msecs_to_jiffies(2000);
27428 -+
27429 -+ /*
27430 -+ * Thresholds that determine the switch between speed classes (see
27431 -+ * the comments before the definition of the array).
27432 -+ */
27433 -+ device_speed_thresh[0] = (R_fast[0] + R_slow[0]) / 2;
27434 -+ device_speed_thresh[1] = (R_fast[1] + R_slow[1]) / 2;
27435 -+
27436 -+ elv_register(&iosched_bfq);
27437 -+ pr_info("BFQ I/O-scheduler version: v7r7");
27438 -+
27439 -+ return 0;
27440 -+}
27441 -+
27442 -+static void __exit bfq_exit(void)
27443 -+{
27444 -+ elv_unregister(&iosched_bfq);
27445 -+ bfq_slab_kill();
27446 -+}
27447 -+
27448 -+module_init(bfq_init);
27449 -+module_exit(bfq_exit);
27450 -+
27451 -+MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
27452 -+MODULE_LICENSE("GPL");
27453 -diff --git a/block/bfq-sched.c b/block/bfq-sched.c
27454 -new file mode 100644
27455 -index 0000000..2931563
27456 ---- /dev/null
27457 -+++ b/block/bfq-sched.c
27458 -@@ -0,0 +1,1214 @@
27459 -+/*
27460 -+ * BFQ: Hierarchical B-WF2Q+ scheduler.
27461 -+ *
27462 -+ * Based on ideas and code from CFQ:
27463 -+ * Copyright (C) 2003 Jens Axboe <axboe@××××××.dk>
27464 -+ *
27465 -+ * Copyright (C) 2008 Fabio Checconi <fabio@×××××××××××××.it>
27466 -+ * Paolo Valente <paolo.valente@×××××××.it>
27467 -+ *
27468 -+ * Copyright (C) 2010 Paolo Valente <paolo.valente@×××××××.it>
27469 -+ */
27470 -+
27471 -+#ifdef CONFIG_CGROUP_BFQIO
27472 -+#define for_each_entity(entity) \
27473 -+ for (; entity != NULL; entity = entity->parent)
27474 -+
27475 -+#define for_each_entity_safe(entity, parent) \
27476 -+ for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
27477 -+
27478 -+static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
27479 -+ int extract,
27480 -+ struct bfq_data *bfqd);
27481 -+
27482 -+static inline void bfq_update_budget(struct bfq_entity *next_in_service)
27483 -+{
27484 -+ struct bfq_entity *bfqg_entity;
27485 -+ struct bfq_group *bfqg;
27486 -+ struct bfq_sched_data *group_sd;
27487 -+
27488 -+ BUG_ON(next_in_service == NULL);
27489 -+
27490 -+ group_sd = next_in_service->sched_data;
27491 -+
27492 -+ bfqg = container_of(group_sd, struct bfq_group, sched_data);
27493 -+ /*
27494 -+ * bfq_group's my_entity field is not NULL only if the group
27495 -+ * is not the root group. We must not touch the root entity
27496 -+ * as it must never become an in-service entity.
27497 -+ */
27498 -+ bfqg_entity = bfqg->my_entity;
27499 -+ if (bfqg_entity != NULL)
27500 -+ bfqg_entity->budget = next_in_service->budget;
27501 -+}
27502 -+
27503 -+static int bfq_update_next_in_service(struct bfq_sched_data *sd)
27504 -+{
27505 -+ struct bfq_entity *next_in_service;
27506 -+
27507 -+ if (sd->in_service_entity != NULL)
27508 -+ /* will update/requeue at the end of service */
27509 -+ return 0;
27510 -+
27511 -+ /*
27512 -+ * NOTE: this can be improved in many ways, such as returning
27513 -+ * 1 (and thus propagating upwards the update) only when the
27514 -+ * budget changes, or caching the bfqq that will be scheduled
27515 -+ * next from this subtree. By now we worry more about
27516 -+ * correctness than about performance...
27517 -+ */
27518 -+ next_in_service = bfq_lookup_next_entity(sd, 0, NULL);
27519 -+ sd->next_in_service = next_in_service;
27520 -+
27521 -+ if (next_in_service != NULL)
27522 -+ bfq_update_budget(next_in_service);
27523 -+
27524 -+ return 1;
27525 -+}
27526 -+
27527 -+static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
27528 -+ struct bfq_entity *entity)
27529 -+{
27530 -+ BUG_ON(sd->next_in_service != entity);
27531 -+}
27532 -+#else
27533 -+#define for_each_entity(entity) \
27534 -+ for (; entity != NULL; entity = NULL)
27535 -+
27536 -+#define for_each_entity_safe(entity, parent) \
27537 -+ for (parent = NULL; entity != NULL; entity = parent)
27538 -+
27539 -+static inline int bfq_update_next_in_service(struct bfq_sched_data *sd)
27540 -+{
27541 -+ return 0;
27542 -+}
27543 -+
27544 -+static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
27545 -+ struct bfq_entity *entity)
27546 -+{
27547 -+}
27548 -+
27549 -+static inline void bfq_update_budget(struct bfq_entity *next_in_service)
27550 -+{
27551 -+}
27552 -+#endif
27553 -+
27554 -+/*
27555 -+ * Shift for timestamp calculations. This actually limits the maximum
27556 -+ * service allowed in one timestamp delta (small shift values increase it),
27557 -+ * the maximum total weight that can be used for the queues in the system
27558 -+ * (big shift values increase it), and the period of virtual time
27559 -+ * wraparounds.
27560 -+ */
27561 -+#define WFQ_SERVICE_SHIFT 22
27562 -+
27563 -+/**
27564 -+ * bfq_gt - compare two timestamps.
27565 -+ * @a: first ts.
27566 -+ * @b: second ts.
27567 -+ *
27568 -+ * Return @a > @b, dealing with wrapping correctly.
27569 -+ */
27570 -+static inline int bfq_gt(u64 a, u64 b)
27571 -+{
27572 -+ return (s64)(a - b) > 0;
27573 -+}
27574 -+
27575 -+static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
27576 -+{
27577 -+ struct bfq_queue *bfqq = NULL;
27578 -+
27579 -+ BUG_ON(entity == NULL);
27580 -+
27581 -+ if (entity->my_sched_data == NULL)
27582 -+ bfqq = container_of(entity, struct bfq_queue, entity);
27583 -+
27584 -+ return bfqq;
27585 -+}
27586 -+
27587 -+
27588 -+/**
27589 -+ * bfq_delta - map service into the virtual time domain.
27590 -+ * @service: amount of service.
27591 -+ * @weight: scale factor (weight of an entity or weight sum).
27592 -+ */
27593 -+static inline u64 bfq_delta(unsigned long service,
27594 -+ unsigned long weight)
27595 -+{
27596 -+ u64 d = (u64)service << WFQ_SERVICE_SHIFT;
27597 -+
27598 -+ do_div(d, weight);
27599 -+ return d;
27600 -+}
27601 -+
27602 -+/**
27603 -+ * bfq_calc_finish - assign the finish time to an entity.
27604 -+ * @entity: the entity to act upon.
27605 -+ * @service: the service to be charged to the entity.
27606 -+ */
27607 -+static inline void bfq_calc_finish(struct bfq_entity *entity,
27608 -+ unsigned long service)
27609 -+{
27610 -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
27611 -+
27612 -+ BUG_ON(entity->weight == 0);
27613 -+
27614 -+ entity->finish = entity->start +
27615 -+ bfq_delta(service, entity->weight);
27616 -+
27617 -+ if (bfqq != NULL) {
27618 -+ bfq_log_bfqq(bfqq->bfqd, bfqq,
27619 -+ "calc_finish: serv %lu, w %d",
27620 -+ service, entity->weight);
27621 -+ bfq_log_bfqq(bfqq->bfqd, bfqq,
27622 -+ "calc_finish: start %llu, finish %llu, delta %llu",
27623 -+ entity->start, entity->finish,
27624 -+ bfq_delta(service, entity->weight));
27625 -+ }
27626 -+}
27627 -+
27628 -+/**
27629 -+ * bfq_entity_of - get an entity from a node.
27630 -+ * @node: the node field of the entity.
27631 -+ *
27632 -+ * Convert a node pointer to the relative entity. This is used only
27633 -+ * to simplify the logic of some functions and not as the generic
27634 -+ * conversion mechanism because, e.g., in the tree walking functions,
27635 -+ * the check for a %NULL value would be redundant.
27636 -+ */
27637 -+static inline struct bfq_entity *bfq_entity_of(struct rb_node *node)
27638 -+{
27639 -+ struct bfq_entity *entity = NULL;
27640 -+
27641 -+ if (node != NULL)
27642 -+ entity = rb_entry(node, struct bfq_entity, rb_node);
27643 -+
27644 -+ return entity;
27645 -+}
27646 -+
27647 -+/**
27648 -+ * bfq_extract - remove an entity from a tree.
27649 -+ * @root: the tree root.
27650 -+ * @entity: the entity to remove.
27651 -+ */
27652 -+static inline void bfq_extract(struct rb_root *root,
27653 -+ struct bfq_entity *entity)
27654 -+{
27655 -+ BUG_ON(entity->tree != root);
27656 -+
27657 -+ entity->tree = NULL;
27658 -+ rb_erase(&entity->rb_node, root);
27659 -+}
27660 -+
27661 -+/**
27662 -+ * bfq_idle_extract - extract an entity from the idle tree.
27663 -+ * @st: the service tree of the owning @entity.
27664 -+ * @entity: the entity being removed.
27665 -+ */
27666 -+static void bfq_idle_extract(struct bfq_service_tree *st,
27667 -+ struct bfq_entity *entity)
27668 -+{
27669 -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
27670 -+ struct rb_node *next;
27671 -+
27672 -+ BUG_ON(entity->tree != &st->idle);
27673 -+
27674 -+ if (entity == st->first_idle) {
27675 -+ next = rb_next(&entity->rb_node);
27676 -+ st->first_idle = bfq_entity_of(next);
27677 -+ }
27678 -+
27679 -+ if (entity == st->last_idle) {
27680 -+ next = rb_prev(&entity->rb_node);
27681 -+ st->last_idle = bfq_entity_of(next);
27682 -+ }
27683 -+
27684 -+ bfq_extract(&st->idle, entity);
27685 -+
27686 -+ if (bfqq != NULL)
27687 -+ list_del(&bfqq->bfqq_list);
27688 -+}
27689 -+
27690 -+/**
27691 -+ * bfq_insert - generic tree insertion.
27692 -+ * @root: tree root.
27693 -+ * @entity: entity to insert.
27694 -+ *
27695 -+ * This is used for the idle and the active tree, since they are both
27696 -+ * ordered by finish time.
27697 -+ */
27698 -+static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
27699 -+{
27700 -+ struct bfq_entity *entry;
27701 -+ struct rb_node **node = &root->rb_node;
27702 -+ struct rb_node *parent = NULL;
27703 -+
27704 -+ BUG_ON(entity->tree != NULL);
27705 -+
27706 -+ while (*node != NULL) {
27707 -+ parent = *node;
27708 -+ entry = rb_entry(parent, struct bfq_entity, rb_node);
27709 -+
27710 -+ if (bfq_gt(entry->finish, entity->finish))
27711 -+ node = &parent->rb_left;
27712 -+ else
27713 -+ node = &parent->rb_right;
27714 -+ }
27715 -+
27716 -+ rb_link_node(&entity->rb_node, parent, node);
27717 -+ rb_insert_color(&entity->rb_node, root);
27718 -+
27719 -+ entity->tree = root;
27720 -+}
27721 -+
27722 -+/**
27723 -+ * bfq_update_min - update the min_start field of a entity.
27724 -+ * @entity: the entity to update.
27725 -+ * @node: one of its children.
27726 -+ *
27727 -+ * This function is called when @entity may store an invalid value for
27728 -+ * min_start due to updates to the active tree. The function assumes
27729 -+ * that the subtree rooted at @node (which may be its left or its right
27730 -+ * child) has a valid min_start value.
27731 -+ */
27732 -+static inline void bfq_update_min(struct bfq_entity *entity,
27733 -+ struct rb_node *node)
27734 -+{
27735 -+ struct bfq_entity *child;
27736 -+
27737 -+ if (node != NULL) {
27738 -+ child = rb_entry(node, struct bfq_entity, rb_node);
27739 -+ if (bfq_gt(entity->min_start, child->min_start))
27740 -+ entity->min_start = child->min_start;
27741 -+ }
27742 -+}
27743 -+
27744 -+/**
27745 -+ * bfq_update_active_node - recalculate min_start.
27746 -+ * @node: the node to update.
27747 -+ *
27748 -+ * @node may have changed position or one of its children may have moved,
27749 -+ * this function updates its min_start value. The left and right subtrees
27750 -+ * are assumed to hold a correct min_start value.
27751 -+ */
27752 -+static inline void bfq_update_active_node(struct rb_node *node)
27753 -+{
27754 -+ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
27755 -+
27756 -+ entity->min_start = entity->start;
27757 -+ bfq_update_min(entity, node->rb_right);
27758 -+ bfq_update_min(entity, node->rb_left);
27759 -+}
27760 -+
27761 -+/**
27762 -+ * bfq_update_active_tree - update min_start for the whole active tree.
27763 -+ * @node: the starting node.
27764 -+ *
27765 -+ * @node must be the deepest modified node after an update. This function
27766 -+ * updates its min_start using the values held by its children, assuming
27767 -+ * that they did not change, and then updates all the nodes that may have
27768 -+ * changed in the path to the root. The only nodes that may have changed
27769 -+ * are the ones in the path or their siblings.
27770 -+ */
27771 -+static void bfq_update_active_tree(struct rb_node *node)
27772 -+{
27773 -+ struct rb_node *parent;
27774 -+
27775 -+up:
27776 -+ bfq_update_active_node(node);
27777 -+
27778 -+ parent = rb_parent(node);
27779 -+ if (parent == NULL)
27780 -+ return;
27781 -+
27782 -+ if (node == parent->rb_left && parent->rb_right != NULL)
27783 -+ bfq_update_active_node(parent->rb_right);
27784 -+ else if (parent->rb_left != NULL)
27785 -+ bfq_update_active_node(parent->rb_left);
27786 -+
27787 -+ node = parent;
27788 -+ goto up;
27789 -+}
27790 -+
27791 -+static void bfq_weights_tree_add(struct bfq_data *bfqd,
27792 -+ struct bfq_entity *entity,
27793 -+ struct rb_root *root);
27794 -+
27795 -+static void bfq_weights_tree_remove(struct bfq_data *bfqd,
27796 -+ struct bfq_entity *entity,
27797 -+ struct rb_root *root);
27798 -+
27799 -+
27800 -+/**
27801 -+ * bfq_active_insert - insert an entity in the active tree of its
27802 -+ * group/device.
27803 -+ * @st: the service tree of the entity.
27804 -+ * @entity: the entity being inserted.
27805 -+ *
27806 -+ * The active tree is ordered by finish time, but an extra key is kept
27807 -+ * per each node, containing the minimum value for the start times of
27808 -+ * its children (and the node itself), so it's possible to search for
27809 -+ * the eligible node with the lowest finish time in logarithmic time.
27810 -+ */
27811 -+static void bfq_active_insert(struct bfq_service_tree *st,
27812 -+ struct bfq_entity *entity)
27813 -+{
27814 -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
27815 -+ struct rb_node *node = &entity->rb_node;
27816 -+#ifdef CONFIG_CGROUP_BFQIO
27817 -+ struct bfq_sched_data *sd = NULL;
27818 -+ struct bfq_group *bfqg = NULL;
27819 -+ struct bfq_data *bfqd = NULL;
27820 -+#endif
27821 -+
27822 -+ bfq_insert(&st->active, entity);
27823 -+
27824 -+ if (node->rb_left != NULL)
27825 -+ node = node->rb_left;
27826 -+ else if (node->rb_right != NULL)
27827 -+ node = node->rb_right;
27828 -+
27829 -+ bfq_update_active_tree(node);
27830 -+
27831 -+#ifdef CONFIG_CGROUP_BFQIO
27832 -+ sd = entity->sched_data;
27833 -+ bfqg = container_of(sd, struct bfq_group, sched_data);
27834 -+ BUG_ON(!bfqg);
27835 -+ bfqd = (struct bfq_data *)bfqg->bfqd;
27836 -+#endif
27837 -+ if (bfqq != NULL)
27838 -+ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
27839 -+#ifdef CONFIG_CGROUP_BFQIO
27840 -+ else { /* bfq_group */
27841 -+ BUG_ON(!bfqd);
27842 -+ bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
27843 -+ }
27844 -+ if (bfqg != bfqd->root_group) {
27845 -+ BUG_ON(!bfqg);
27846 -+ BUG_ON(!bfqd);
27847 -+ bfqg->active_entities++;
27848 -+ if (bfqg->active_entities == 2)
27849 -+ bfqd->active_numerous_groups++;
27850 -+ }
27851 -+#endif
27852 -+}
27853 -+
27854 -+/**
27855 -+ * bfq_ioprio_to_weight - calc a weight from an ioprio.
27856 -+ * @ioprio: the ioprio value to convert.
27857 -+ */
27858 -+static inline unsigned short bfq_ioprio_to_weight(int ioprio)
27859 -+{
27860 -+ BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
27861 -+ return IOPRIO_BE_NR - ioprio;
27862 -+}
27863 -+
27864 -+/**
27865 -+ * bfq_weight_to_ioprio - calc an ioprio from a weight.
27866 -+ * @weight: the weight value to convert.
27867 -+ *
27868 -+ * To preserve as mush as possible the old only-ioprio user interface,
27869 -+ * 0 is used as an escape ioprio value for weights (numerically) equal or
27870 -+ * larger than IOPRIO_BE_NR
27871 -+ */
27872 -+static inline unsigned short bfq_weight_to_ioprio(int weight)
27873 -+{
27874 -+ BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
27875 -+ return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight;
27876 -+}
27877 -+
27878 -+static inline void bfq_get_entity(struct bfq_entity *entity)
27879 -+{
27880 -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
27881 -+
27882 -+ if (bfqq != NULL) {
27883 -+ atomic_inc(&bfqq->ref);
27884 -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
27885 -+ bfqq, atomic_read(&bfqq->ref));
27886 -+ }
27887 -+}
27888 -+
27889 -+/**
27890 -+ * bfq_find_deepest - find the deepest node that an extraction can modify.
27891 -+ * @node: the node being removed.
27892 -+ *
27893 -+ * Do the first step of an extraction in an rb tree, looking for the
27894 -+ * node that will replace @node, and returning the deepest node that
27895 -+ * the following modifications to the tree can touch. If @node is the
27896 -+ * last node in the tree return %NULL.
27897 -+ */
27898 -+static struct rb_node *bfq_find_deepest(struct rb_node *node)
27899 -+{
27900 -+ struct rb_node *deepest;
27901 -+
27902 -+ if (node->rb_right == NULL && node->rb_left == NULL)
27903 -+ deepest = rb_parent(node);
27904 -+ else if (node->rb_right == NULL)
27905 -+ deepest = node->rb_left;
27906 -+ else if (node->rb_left == NULL)
27907 -+ deepest = node->rb_right;
27908 -+ else {
27909 -+ deepest = rb_next(node);
27910 -+ if (deepest->rb_right != NULL)
27911 -+ deepest = deepest->rb_right;
27912 -+ else if (rb_parent(deepest) != node)
27913 -+ deepest = rb_parent(deepest);
27914 -+ }
27915 -+
27916 -+ return deepest;
27917 -+}
27918 -+
27919 -+/**
27920 -+ * bfq_active_extract - remove an entity from the active tree.
27921 -+ * @st: the service_tree containing the tree.
27922 -+ * @entity: the entity being removed.
27923 -+ */
27924 -+static void bfq_active_extract(struct bfq_service_tree *st,
27925 -+ struct bfq_entity *entity)
27926 -+{
27927 -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
27928 -+ struct rb_node *node;
27929 -+#ifdef CONFIG_CGROUP_BFQIO
27930 -+ struct bfq_sched_data *sd = NULL;
27931 -+ struct bfq_group *bfqg = NULL;
27932 -+ struct bfq_data *bfqd = NULL;
27933 -+#endif
27934 -+
27935 -+ node = bfq_find_deepest(&entity->rb_node);
27936 -+ bfq_extract(&st->active, entity);
27937 -+
27938 -+ if (node != NULL)
27939 -+ bfq_update_active_tree(node);
27940 -+
27941 -+#ifdef CONFIG_CGROUP_BFQIO
27942 -+ sd = entity->sched_data;
27943 -+ bfqg = container_of(sd, struct bfq_group, sched_data);
27944 -+ BUG_ON(!bfqg);
27945 -+ bfqd = (struct bfq_data *)bfqg->bfqd;
27946 -+#endif
27947 -+ if (bfqq != NULL)
27948 -+ list_del(&bfqq->bfqq_list);
27949 -+#ifdef CONFIG_CGROUP_BFQIO
27950 -+ else { /* bfq_group */
27951 -+ BUG_ON(!bfqd);
27952 -+ bfq_weights_tree_remove(bfqd, entity,
27953 -+ &bfqd->group_weights_tree);
27954 -+ }
27955 -+ if (bfqg != bfqd->root_group) {
27956 -+ BUG_ON(!bfqg);
27957 -+ BUG_ON(!bfqd);
27958 -+ BUG_ON(!bfqg->active_entities);
27959 -+ bfqg->active_entities--;
27960 -+ if (bfqg->active_entities == 1) {
27961 -+ BUG_ON(!bfqd->active_numerous_groups);
27962 -+ bfqd->active_numerous_groups--;
27963 -+ }
27964 -+ }
27965 -+#endif
27966 -+}
27967 -+
27968 -+/**
27969 -+ * bfq_idle_insert - insert an entity into the idle tree.
27970 -+ * @st: the service tree containing the tree.
27971 -+ * @entity: the entity to insert.
27972 -+ */
27973 -+static void bfq_idle_insert(struct bfq_service_tree *st,
27974 -+ struct bfq_entity *entity)
27975 -+{
27976 -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
27977 -+ struct bfq_entity *first_idle = st->first_idle;
27978 -+ struct bfq_entity *last_idle = st->last_idle;
27979 -+
27980 -+ if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
27981 -+ st->first_idle = entity;
27982 -+ if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish))
27983 -+ st->last_idle = entity;
27984 -+
27985 -+ bfq_insert(&st->idle, entity);
27986 -+
27987 -+ if (bfqq != NULL)
27988 -+ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
27989 -+}
27990 -+
27991 -+/**
27992 -+ * bfq_forget_entity - remove an entity from the wfq trees.
27993 -+ * @st: the service tree.
27994 -+ * @entity: the entity being removed.
27995 -+ *
27996 -+ * Update the device status and forget everything about @entity, putting
27997 -+ * the device reference to it, if it is a queue. Entities belonging to
27998 -+ * groups are not refcounted.
27999 -+ */
28000 -+static void bfq_forget_entity(struct bfq_service_tree *st,
28001 -+ struct bfq_entity *entity)
28002 -+{
28003 -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
28004 -+ struct bfq_sched_data *sd;
28005 -+
28006 -+ BUG_ON(!entity->on_st);
28007 -+
28008 -+ entity->on_st = 0;
28009 -+ st->wsum -= entity->weight;
28010 -+ if (bfqq != NULL) {
28011 -+ sd = entity->sched_data;
28012 -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d",
28013 -+ bfqq, atomic_read(&bfqq->ref));
28014 -+ bfq_put_queue(bfqq);
28015 -+ }
28016 -+}
28017 -+
28018 -+/**
28019 -+ * bfq_put_idle_entity - release the idle tree ref of an entity.
28020 -+ * @st: service tree for the entity.
28021 -+ * @entity: the entity being released.
28022 -+ */
28023 -+static void bfq_put_idle_entity(struct bfq_service_tree *st,
28024 -+ struct bfq_entity *entity)
28025 -+{
28026 -+ bfq_idle_extract(st, entity);
28027 -+ bfq_forget_entity(st, entity);
28028 -+}
28029 -+
28030 -+/**
28031 -+ * bfq_forget_idle - update the idle tree if necessary.
28032 -+ * @st: the service tree to act upon.
28033 -+ *
28034 -+ * To preserve the global O(log N) complexity we only remove one entry here;
28035 -+ * as the idle tree will not grow indefinitely this can be done safely.
28036 -+ */
28037 -+static void bfq_forget_idle(struct bfq_service_tree *st)
28038 -+{
28039 -+ struct bfq_entity *first_idle = st->first_idle;
28040 -+ struct bfq_entity *last_idle = st->last_idle;
28041 -+
28042 -+ if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL &&
28043 -+ !bfq_gt(last_idle->finish, st->vtime)) {
28044 -+ /*
28045 -+ * Forget the whole idle tree, increasing the vtime past
28046 -+ * the last finish time of idle entities.
28047 -+ */
28048 -+ st->vtime = last_idle->finish;
28049 -+ }
28050 -+
28051 -+ if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime))
28052 -+ bfq_put_idle_entity(st, first_idle);
28053 -+}
28054 -+
28055 -+static struct bfq_service_tree *
28056 -+__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
28057 -+ struct bfq_entity *entity)
28058 -+{
28059 -+ struct bfq_service_tree *new_st = old_st;
28060 -+
28061 -+ if (entity->ioprio_changed) {
28062 -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
28063 -+ unsigned short prev_weight, new_weight;
28064 -+ struct bfq_data *bfqd = NULL;
28065 -+ struct rb_root *root;
28066 -+#ifdef CONFIG_CGROUP_BFQIO
28067 -+ struct bfq_sched_data *sd;
28068 -+ struct bfq_group *bfqg;
28069 -+#endif
28070 -+
28071 -+ if (bfqq != NULL)
28072 -+ bfqd = bfqq->bfqd;
28073 -+#ifdef CONFIG_CGROUP_BFQIO
28074 -+ else {
28075 -+ sd = entity->my_sched_data;
28076 -+ bfqg = container_of(sd, struct bfq_group, sched_data);
28077 -+ BUG_ON(!bfqg);
28078 -+ bfqd = (struct bfq_data *)bfqg->bfqd;
28079 -+ BUG_ON(!bfqd);
28080 -+ }
28081 -+#endif
28082 -+
28083 -+ BUG_ON(old_st->wsum < entity->weight);
28084 -+ old_st->wsum -= entity->weight;
28085 -+
28086 -+ if (entity->new_weight != entity->orig_weight) {
28087 -+ if (entity->new_weight < BFQ_MIN_WEIGHT ||
28088 -+ entity->new_weight > BFQ_MAX_WEIGHT) {
28089 -+ printk(KERN_CRIT "update_weight_prio: "
28090 -+ "new_weight %d\n",
28091 -+ entity->new_weight);
28092 -+ BUG();
28093 -+ }
28094 -+ entity->orig_weight = entity->new_weight;
28095 -+ entity->ioprio =
28096 -+ bfq_weight_to_ioprio(entity->orig_weight);
28097 -+ } else if (entity->new_ioprio != entity->ioprio) {
28098 -+ entity->ioprio = entity->new_ioprio;
28099 -+ entity->orig_weight =
28100 -+ bfq_ioprio_to_weight(entity->ioprio);
28101 -+ } else
28102 -+ entity->new_weight = entity->orig_weight =
28103 -+ bfq_ioprio_to_weight(entity->ioprio);
28104 -+
28105 -+ entity->ioprio_class = entity->new_ioprio_class;
28106 -+ entity->ioprio_changed = 0;
28107 -+
28108 -+ /*
28109 -+ * NOTE: here we may be changing the weight too early,
28110 -+ * this will cause unfairness. The correct approach
28111 -+ * would have required additional complexity to defer
28112 -+ * weight changes to the proper time instants (i.e.,
28113 -+ * when entity->finish <= old_st->vtime).
28114 -+ */
28115 -+ new_st = bfq_entity_service_tree(entity);
28116 -+
28117 -+ prev_weight = entity->weight;
28118 -+ new_weight = entity->orig_weight *
28119 -+ (bfqq != NULL ? bfqq->wr_coeff : 1);
28120 -+ /*
28121 -+ * If the weight of the entity changes, remove the entity
28122 -+ * from its old weight counter (if there is a counter
28123 -+ * associated with the entity), and add it to the counter
28124 -+ * associated with its new weight.
28125 -+ */
28126 -+ if (prev_weight != new_weight) {
28127 -+ root = bfqq ? &bfqd->queue_weights_tree :
28128 -+ &bfqd->group_weights_tree;
28129 -+ bfq_weights_tree_remove(bfqd, entity, root);
28130 -+ }
28131 -+ entity->weight = new_weight;
28132 -+ /*
28133 -+ * Add the entity to its weights tree only if it is
28134 -+ * not associated with a weight-raised queue.
28135 -+ */
28136 -+ if (prev_weight != new_weight &&
28137 -+ (bfqq ? bfqq->wr_coeff == 1 : 1))
28138 -+ /* If we get here, root has been initialized. */
28139 -+ bfq_weights_tree_add(bfqd, entity, root);
28140 -+
28141 -+ new_st->wsum += entity->weight;
28142 -+
28143 -+ if (new_st != old_st)
28144 -+ entity->start = new_st->vtime;
28145 -+ }
28146 -+
28147 -+ return new_st;
28148 -+}
28149 -+
28150 -+/**
28151 -+ * bfq_bfqq_served - update the scheduler status after selection for
28152 -+ * service.
28153 -+ * @bfqq: the queue being served.
28154 -+ * @served: bytes to transfer.
28155 -+ *
28156 -+ * NOTE: this can be optimized, as the timestamps of upper level entities
28157 -+ * are synchronized every time a new bfqq is selected for service. By now,
28158 -+ * we keep it to better check consistency.
28159 -+ */
28160 -+static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served)
28161 -+{
28162 -+ struct bfq_entity *entity = &bfqq->entity;
28163 -+ struct bfq_service_tree *st;
28164 -+
28165 -+ for_each_entity(entity) {
28166 -+ st = bfq_entity_service_tree(entity);
28167 -+
28168 -+ entity->service += served;
28169 -+ BUG_ON(entity->service > entity->budget);
28170 -+ BUG_ON(st->wsum == 0);
28171 -+
28172 -+ st->vtime += bfq_delta(served, st->wsum);
28173 -+ bfq_forget_idle(st);
28174 -+ }
28175 -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served);
28176 -+}
28177 -+
28178 -+/**
28179 -+ * bfq_bfqq_charge_full_budget - set the service to the entity budget.
28180 -+ * @bfqq: the queue that needs a service update.
28181 -+ *
28182 -+ * When it's not possible to be fair in the service domain, because
28183 -+ * a queue is not consuming its budget fast enough (the meaning of
28184 -+ * fast depends on the timeout parameter), we charge it a full
28185 -+ * budget. In this way we should obtain a sort of time-domain
28186 -+ * fairness among all the seeky/slow queues.
28187 -+ */
28188 -+static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
28189 -+{
28190 -+ struct bfq_entity *entity = &bfqq->entity;
28191 -+
28192 -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
28193 -+
28194 -+ bfq_bfqq_served(bfqq, entity->budget - entity->service);
28195 -+}
28196 -+
28197 -+/**
28198 -+ * __bfq_activate_entity - activate an entity.
28199 -+ * @entity: the entity being activated.
28200 -+ *
28201 -+ * Called whenever an entity is activated, i.e., it is not active and one
28202 -+ * of its children receives a new request, or has to be reactivated due to
28203 -+ * budget exhaustion. It uses the current budget of the entity (and the
28204 -+ * service received if @entity is active) of the queue to calculate its
28205 -+ * timestamps.
28206 -+ */
28207 -+static void __bfq_activate_entity(struct bfq_entity *entity)
28208 -+{
28209 -+ struct bfq_sched_data *sd = entity->sched_data;
28210 -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
28211 -+
28212 -+ if (entity == sd->in_service_entity) {
28213 -+ BUG_ON(entity->tree != NULL);
28214 -+ /*
28215 -+ * If we are requeueing the current entity we have
28216 -+ * to take care of not charging to it service it has
28217 -+ * not received.
28218 -+ */
28219 -+ bfq_calc_finish(entity, entity->service);
28220 -+ entity->start = entity->finish;
28221 -+ sd->in_service_entity = NULL;
28222 -+ } else if (entity->tree == &st->active) {
28223 -+ /*
28224 -+ * Requeueing an entity due to a change of some
28225 -+ * next_in_service entity below it. We reuse the
28226 -+ * old start time.
28227 -+ */
28228 -+ bfq_active_extract(st, entity);
28229 -+ } else if (entity->tree == &st->idle) {
28230 -+ /*
28231 -+ * Must be on the idle tree, bfq_idle_extract() will
28232 -+ * check for that.
28233 -+ */
28234 -+ bfq_idle_extract(st, entity);
28235 -+ entity->start = bfq_gt(st->vtime, entity->finish) ?
28236 -+ st->vtime : entity->finish;
28237 -+ } else {
28238 -+ /*
28239 -+ * The finish time of the entity may be invalid, and
28240 -+ * it is in the past for sure, otherwise the queue
28241 -+ * would have been on the idle tree.
28242 -+ */
28243 -+ entity->start = st->vtime;
28244 -+ st->wsum += entity->weight;
28245 -+ bfq_get_entity(entity);
28246 -+
28247 -+ BUG_ON(entity->on_st);
28248 -+ entity->on_st = 1;
28249 -+ }
28250 -+
28251 -+ st = __bfq_entity_update_weight_prio(st, entity);
28252 -+ bfq_calc_finish(entity, entity->budget);
28253 -+ bfq_active_insert(st, entity);
28254 -+}
28255 -+
28256 -+/**
28257 -+ * bfq_activate_entity - activate an entity and its ancestors if necessary.
28258 -+ * @entity: the entity to activate.
28259 -+ *
28260 -+ * Activate @entity and all the entities on the path from it to the root.
28261 -+ */
28262 -+static void bfq_activate_entity(struct bfq_entity *entity)
28263 -+{
28264 -+ struct bfq_sched_data *sd;
28265 -+
28266 -+ for_each_entity(entity) {
28267 -+ __bfq_activate_entity(entity);
28268 -+
28269 -+ sd = entity->sched_data;
28270 -+ if (!bfq_update_next_in_service(sd))
28271 -+ /*
28272 -+ * No need to propagate the activation to the
28273 -+ * upper entities, as they will be updated when
28274 -+ * the in-service entity is rescheduled.
28275 -+ */
28276 -+ break;
28277 -+ }
28278 -+}
28279 -+
28280 -+/**
28281 -+ * __bfq_deactivate_entity - deactivate an entity from its service tree.
28282 -+ * @entity: the entity to deactivate.
28283 -+ * @requeue: if false, the entity will not be put into the idle tree.
28284 -+ *
28285 -+ * Deactivate an entity, independently from its previous state. If the
28286 -+ * entity was not on a service tree just return, otherwise if it is on
28287 -+ * any scheduler tree, extract it from that tree, and if necessary
28288 -+ * and if the caller did not specify @requeue, put it on the idle tree.
28289 -+ *
28290 -+ * Return %1 if the caller should update the entity hierarchy, i.e.,
28291 -+ * if the entity was in service or if it was the next_in_service for
28292 -+ * its sched_data; return %0 otherwise.
28293 -+ */
28294 -+static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
28295 -+{
28296 -+ struct bfq_sched_data *sd = entity->sched_data;
28297 -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
28298 -+ int was_in_service = entity == sd->in_service_entity;
28299 -+ int ret = 0;
28300 -+
28301 -+ if (!entity->on_st)
28302 -+ return 0;
28303 -+
28304 -+ BUG_ON(was_in_service && entity->tree != NULL);
28305 -+
28306 -+ if (was_in_service) {
28307 -+ bfq_calc_finish(entity, entity->service);
28308 -+ sd->in_service_entity = NULL;
28309 -+ } else if (entity->tree == &st->active)
28310 -+ bfq_active_extract(st, entity);
28311 -+ else if (entity->tree == &st->idle)
28312 -+ bfq_idle_extract(st, entity);
28313 -+ else if (entity->tree != NULL)
28314 -+ BUG();
28315 -+
28316 -+ if (was_in_service || sd->next_in_service == entity)
28317 -+ ret = bfq_update_next_in_service(sd);
28318 -+
28319 -+ if (!requeue || !bfq_gt(entity->finish, st->vtime))
28320 -+ bfq_forget_entity(st, entity);
28321 -+ else
28322 -+ bfq_idle_insert(st, entity);
28323 -+
28324 -+ BUG_ON(sd->in_service_entity == entity);
28325 -+ BUG_ON(sd->next_in_service == entity);
28326 -+
28327 -+ return ret;
28328 -+}
28329 -+
28330 -+/**
28331 -+ * bfq_deactivate_entity - deactivate an entity.
28332 -+ * @entity: the entity to deactivate.
28333 -+ * @requeue: true if the entity can be put on the idle tree
28334 -+ */
28335 -+static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
28336 -+{
28337 -+ struct bfq_sched_data *sd;
28338 -+ struct bfq_entity *parent;
28339 -+
28340 -+ for_each_entity_safe(entity, parent) {
28341 -+ sd = entity->sched_data;
28342 -+
28343 -+ if (!__bfq_deactivate_entity(entity, requeue))
28344 -+ /*
28345 -+ * The parent entity is still backlogged, and
28346 -+ * we don't need to update it as it is still
28347 -+ * in service.
28348 -+ */
28349 -+ break;
28350 -+
28351 -+ if (sd->next_in_service != NULL)
28352 -+ /*
28353 -+ * The parent entity is still backlogged and
28354 -+ * the budgets on the path towards the root
28355 -+ * need to be updated.
28356 -+ */
28357 -+ goto update;
28358 -+
28359 -+ /*
28360 -+ * If we reach there the parent is no more backlogged and
28361 -+ * we want to propagate the dequeue upwards.
28362 -+ */
28363 -+ requeue = 1;
28364 -+ }
28365 -+
28366 -+ return;
28367 -+
28368 -+update:
28369 -+ entity = parent;
28370 -+ for_each_entity(entity) {
28371 -+ __bfq_activate_entity(entity);
28372 -+
28373 -+ sd = entity->sched_data;
28374 -+ if (!bfq_update_next_in_service(sd))
28375 -+ break;
28376 -+ }
28377 -+}
28378 -+
28379 -+/**
28380 -+ * bfq_update_vtime - update vtime if necessary.
28381 -+ * @st: the service tree to act upon.
28382 -+ *
28383 -+ * If necessary update the service tree vtime to have at least one
28384 -+ * eligible entity, skipping to its start time. Assumes that the
28385 -+ * active tree of the device is not empty.
28386 -+ *
28387 -+ * NOTE: this hierarchical implementation updates vtimes quite often,
28388 -+ * we may end up with reactivated processes getting timestamps after a
28389 -+ * vtime skip done because we needed a ->first_active entity on some
28390 -+ * intermediate node.
28391 -+ */
28392 -+static void bfq_update_vtime(struct bfq_service_tree *st)
28393 -+{
28394 -+ struct bfq_entity *entry;
28395 -+ struct rb_node *node = st->active.rb_node;
28396 -+
28397 -+ entry = rb_entry(node, struct bfq_entity, rb_node);
28398 -+ if (bfq_gt(entry->min_start, st->vtime)) {
28399 -+ st->vtime = entry->min_start;
28400 -+ bfq_forget_idle(st);
28401 -+ }
28402 -+}
28403 -+
28404 -+/**
28405 -+ * bfq_first_active_entity - find the eligible entity with
28406 -+ * the smallest finish time
28407 -+ * @st: the service tree to select from.
28408 -+ *
28409 -+ * This function searches the first schedulable entity, starting from the
28410 -+ * root of the tree and going on the left every time on this side there is
28411 -+ * a subtree with at least one eligible (start >= vtime) entity. The path on
28412 -+ * the right is followed only if a) the left subtree contains no eligible
28413 -+ * entities and b) no eligible entity has been found yet.
28414 -+ */
28415 -+static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
28416 -+{
28417 -+ struct bfq_entity *entry, *first = NULL;
28418 -+ struct rb_node *node = st->active.rb_node;
28419 -+
28420 -+ while (node != NULL) {
28421 -+ entry = rb_entry(node, struct bfq_entity, rb_node);
28422 -+left:
28423 -+ if (!bfq_gt(entry->start, st->vtime))
28424 -+ first = entry;
28425 -+
28426 -+ BUG_ON(bfq_gt(entry->min_start, st->vtime));
28427 -+
28428 -+ if (node->rb_left != NULL) {
28429 -+ entry = rb_entry(node->rb_left,
28430 -+ struct bfq_entity, rb_node);
28431 -+ if (!bfq_gt(entry->min_start, st->vtime)) {
28432 -+ node = node->rb_left;
28433 -+ goto left;
28434 -+ }
28435 -+ }
28436 -+ if (first != NULL)
28437 -+ break;
28438 -+ node = node->rb_right;
28439 -+ }
28440 -+
28441 -+ BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active));
28442 -+ return first;
28443 -+}
28444 -+
28445 -+/**
28446 -+ * __bfq_lookup_next_entity - return the first eligible entity in @st.
28447 -+ * @st: the service tree.
28448 -+ *
28449 -+ * Update the virtual time in @st and return the first eligible entity
28450 -+ * it contains.
28451 -+ */
28452 -+static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st,
28453 -+ bool force)
28454 -+{
28455 -+ struct bfq_entity *entity, *new_next_in_service = NULL;
28456 -+
28457 -+ if (RB_EMPTY_ROOT(&st->active))
28458 -+ return NULL;
28459 -+
28460 -+ bfq_update_vtime(st);
28461 -+ entity = bfq_first_active_entity(st);
28462 -+ BUG_ON(bfq_gt(entity->start, st->vtime));
28463 -+
28464 -+ /*
28465 -+ * If the chosen entity does not match with the sched_data's
28466 -+ * next_in_service and we are forcedly serving the IDLE priority
28467 -+ * class tree, bubble up budget update.
28468 -+ */
28469 -+ if (unlikely(force && entity != entity->sched_data->next_in_service)) {
28470 -+ new_next_in_service = entity;
28471 -+ for_each_entity(new_next_in_service)
28472 -+ bfq_update_budget(new_next_in_service);
28473 -+ }
28474 -+
28475 -+ return entity;
28476 -+}
28477 -+
28478 -+/**
28479 -+ * bfq_lookup_next_entity - return the first eligible entity in @sd.
28480 -+ * @sd: the sched_data.
28481 -+ * @extract: if true the returned entity will be also extracted from @sd.
28482 -+ *
28483 -+ * NOTE: since we cache the next_in_service entity at each level of the
28484 -+ * hierarchy, the complexity of the lookup can be decreased with
28485 -+ * absolutely no effort just returning the cached next_in_service value;
28486 -+ * we prefer to do full lookups to test the consistency of * the data
28487 -+ * structures.
28488 -+ */
28489 -+static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
28490 -+ int extract,
28491 -+ struct bfq_data *bfqd)
28492 -+{
28493 -+ struct bfq_service_tree *st = sd->service_tree;
28494 -+ struct bfq_entity *entity;
28495 -+ int i = 0;
28496 -+
28497 -+ BUG_ON(sd->in_service_entity != NULL);
28498 -+
28499 -+ if (bfqd != NULL &&
28500 -+ jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) {
28501 -+ entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1,
28502 -+ true);
28503 -+ if (entity != NULL) {
28504 -+ i = BFQ_IOPRIO_CLASSES - 1;
28505 -+ bfqd->bfq_class_idle_last_service = jiffies;
28506 -+ sd->next_in_service = entity;
28507 -+ }
28508 -+ }
28509 -+ for (; i < BFQ_IOPRIO_CLASSES; i++) {
28510 -+ entity = __bfq_lookup_next_entity(st + i, false);
28511 -+ if (entity != NULL) {
28512 -+ if (extract) {
28513 -+ bfq_check_next_in_service(sd, entity);
28514 -+ bfq_active_extract(st + i, entity);
28515 -+ sd->in_service_entity = entity;
28516 -+ sd->next_in_service = NULL;
28517 -+ }
28518 -+ break;
28519 -+ }
28520 -+ }
28521 -+
28522 -+ return entity;
28523 -+}
28524 -+
28525 -+/*
28526 -+ * Get next queue for service.
28527 -+ */
28528 -+static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
28529 -+{
28530 -+ struct bfq_entity *entity = NULL;
28531 -+ struct bfq_sched_data *sd;
28532 -+ struct bfq_queue *bfqq;
28533 -+
28534 -+ BUG_ON(bfqd->in_service_queue != NULL);
28535 -+
28536 -+ if (bfqd->busy_queues == 0)
28537 -+ return NULL;
28538 -+
28539 -+ sd = &bfqd->root_group->sched_data;
28540 -+ for (; sd != NULL; sd = entity->my_sched_data) {
28541 -+ entity = bfq_lookup_next_entity(sd, 1, bfqd);
28542 -+ BUG_ON(entity == NULL);
28543 -+ entity->service = 0;
28544 -+ }
28545 -+
28546 -+ bfqq = bfq_entity_to_bfqq(entity);
28547 -+ BUG_ON(bfqq == NULL);
28548 -+
28549 -+ return bfqq;
28550 -+}
28551 -+
28552 -+/*
28553 -+ * Forced extraction of the given queue.
28554 -+ */
28555 -+static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
28556 -+ struct bfq_queue *bfqq)
28557 -+{
28558 -+ struct bfq_entity *entity;
28559 -+ struct bfq_sched_data *sd;
28560 -+
28561 -+ BUG_ON(bfqd->in_service_queue != NULL);
28562 -+
28563 -+ entity = &bfqq->entity;
28564 -+ /*
28565 -+ * Bubble up extraction/update from the leaf to the root.
28566 -+ */
28567 -+ for_each_entity(entity) {
28568 -+ sd = entity->sched_data;
28569 -+ bfq_update_budget(entity);
28570 -+ bfq_update_vtime(bfq_entity_service_tree(entity));
28571 -+ bfq_active_extract(bfq_entity_service_tree(entity), entity);
28572 -+ sd->in_service_entity = entity;
28573 -+ sd->next_in_service = NULL;
28574 -+ entity->service = 0;
28575 -+ }
28576 -+
28577 -+ return;
28578 -+}
28579 -+
28580 -+static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
28581 -+{
28582 -+ if (bfqd->in_service_bic != NULL) {
28583 -+ put_io_context(bfqd->in_service_bic->icq.ioc);
28584 -+ bfqd->in_service_bic = NULL;
28585 -+ }
28586 -+
28587 -+ bfqd->in_service_queue = NULL;
28588 -+ del_timer(&bfqd->idle_slice_timer);
28589 -+}
28590 -+
28591 -+static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
28592 -+ int requeue)
28593 -+{
28594 -+ struct bfq_entity *entity = &bfqq->entity;
28595 -+
28596 -+ if (bfqq == bfqd->in_service_queue)
28597 -+ __bfq_bfqd_reset_in_service(bfqd);
28598 -+
28599 -+ bfq_deactivate_entity(entity, requeue);
28600 -+}
28601 -+
28602 -+static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
28603 -+{
28604 -+ struct bfq_entity *entity = &bfqq->entity;
28605 -+
28606 -+ bfq_activate_entity(entity);
28607 -+}
28608 -+
28609 -+/*
28610 -+ * Called when the bfqq no longer has requests pending, remove it from
28611 -+ * the service tree.
28612 -+ */
28613 -+static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
28614 -+ int requeue)
28615 -+{
28616 -+ BUG_ON(!bfq_bfqq_busy(bfqq));
28617 -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
28618 -+
28619 -+ bfq_log_bfqq(bfqd, bfqq, "del from busy");
28620 -+
28621 -+ bfq_clear_bfqq_busy(bfqq);
28622 -+
28623 -+ BUG_ON(bfqd->busy_queues == 0);
28624 -+ bfqd->busy_queues--;
28625 -+
28626 -+ if (!bfqq->dispatched) {
28627 -+ bfq_weights_tree_remove(bfqd, &bfqq->entity,
28628 -+ &bfqd->queue_weights_tree);
28629 -+ if (!blk_queue_nonrot(bfqd->queue)) {
28630 -+ BUG_ON(!bfqd->busy_in_flight_queues);
28631 -+ bfqd->busy_in_flight_queues--;
28632 -+ if (bfq_bfqq_constantly_seeky(bfqq)) {
28633 -+ BUG_ON(!bfqd->
28634 -+ const_seeky_busy_in_flight_queues);
28635 -+ bfqd->const_seeky_busy_in_flight_queues--;
28636 -+ }
28637 -+ }
28638 -+ }
28639 -+ if (bfqq->wr_coeff > 1)
28640 -+ bfqd->wr_busy_queues--;
28641 -+
28642 -+ bfq_deactivate_bfqq(bfqd, bfqq, requeue);
28643 -+}
28644 -+
28645 -+/*
28646 -+ * Called when an inactive queue receives a new request.
28647 -+ */
28648 -+static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
28649 -+{
28650 -+ BUG_ON(bfq_bfqq_busy(bfqq));
28651 -+ BUG_ON(bfqq == bfqd->in_service_queue);
28652 -+
28653 -+ bfq_log_bfqq(bfqd, bfqq, "add to busy");
28654 -+
28655 -+ bfq_activate_bfqq(bfqd, bfqq);
28656 -+
28657 -+ bfq_mark_bfqq_busy(bfqq);
28658 -+ bfqd->busy_queues++;
28659 -+
28660 -+ if (!bfqq->dispatched) {
28661 -+ if (bfqq->wr_coeff == 1)
28662 -+ bfq_weights_tree_add(bfqd, &bfqq->entity,
28663 -+ &bfqd->queue_weights_tree);
28664 -+ if (!blk_queue_nonrot(bfqd->queue)) {
28665 -+ bfqd->busy_in_flight_queues++;
28666 -+ if (bfq_bfqq_constantly_seeky(bfqq))
28667 -+ bfqd->const_seeky_busy_in_flight_queues++;
28668 -+ }
28669 -+ }
28670 -+ if (bfqq->wr_coeff > 1)
28671 -+ bfqd->wr_busy_queues++;
28672 -+}
28673 -diff --git a/block/bfq.h b/block/bfq.h
28674 -new file mode 100644
28675 -index 0000000..518f2ac
28676 ---- /dev/null
28677 -+++ b/block/bfq.h
28678 -@@ -0,0 +1,775 @@
28679 -+/*
28680 -+ * BFQ-v7r7 for 4.0.0: data structures and common functions prototypes.
28681 -+ *
28682 -+ * Based on ideas and code from CFQ:
28683 -+ * Copyright (C) 2003 Jens Axboe <axboe@××××××.dk>
28684 -+ *
28685 -+ * Copyright (C) 2008 Fabio Checconi <fabio@×××××××××××××.it>
28686 -+ * Paolo Valente <paolo.valente@×××××××.it>
28687 -+ *
28688 -+ * Copyright (C) 2010 Paolo Valente <paolo.valente@×××××××.it>
28689 -+ */
28690 -+
28691 -+#ifndef _BFQ_H
28692 -+#define _BFQ_H
28693 -+
28694 -+#include <linux/blktrace_api.h>
28695 -+#include <linux/hrtimer.h>
28696 -+#include <linux/ioprio.h>
28697 -+#include <linux/rbtree.h>
28698 -+
28699 -+#define BFQ_IOPRIO_CLASSES 3
28700 -+#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
28701 -+
28702 -+#define BFQ_MIN_WEIGHT 1
28703 -+#define BFQ_MAX_WEIGHT 1000
28704 -+
28705 -+#define BFQ_DEFAULT_QUEUE_IOPRIO 4
28706 -+
28707 -+#define BFQ_DEFAULT_GRP_WEIGHT 10
28708 -+#define BFQ_DEFAULT_GRP_IOPRIO 0
28709 -+#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
28710 -+
28711 -+struct bfq_entity;
28712 -+
28713 -+/**
28714 -+ * struct bfq_service_tree - per ioprio_class service tree.
28715 -+ * @active: tree for active entities (i.e., those backlogged).
28716 -+ * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i).
28717 -+ * @first_idle: idle entity with minimum F_i.
28718 -+ * @last_idle: idle entity with maximum F_i.
28719 -+ * @vtime: scheduler virtual time.
28720 -+ * @wsum: scheduler weight sum; active and idle entities contribute to it.
28721 -+ *
28722 -+ * Each service tree represents a B-WF2Q+ scheduler on its own. Each
28723 -+ * ioprio_class has its own independent scheduler, and so its own
28724 -+ * bfq_service_tree. All the fields are protected by the queue lock
28725 -+ * of the containing bfqd.
28726 -+ */
28727 -+struct bfq_service_tree {
28728 -+ struct rb_root active;
28729 -+ struct rb_root idle;
28730 -+
28731 -+ struct bfq_entity *first_idle;
28732 -+ struct bfq_entity *last_idle;
28733 -+
28734 -+ u64 vtime;
28735 -+ unsigned long wsum;
28736 -+};
28737 -+
28738 -+/**
28739 -+ * struct bfq_sched_data - multi-class scheduler.
28740 -+ * @in_service_entity: entity in service.
28741 -+ * @next_in_service: head-of-the-line entity in the scheduler.
28742 -+ * @service_tree: array of service trees, one per ioprio_class.
28743 -+ *
28744 -+ * bfq_sched_data is the basic scheduler queue. It supports three
28745 -+ * ioprio_classes, and can be used either as a toplevel queue or as
28746 -+ * an intermediate queue on a hierarchical setup.
28747 -+ * @next_in_service points to the active entity of the sched_data
28748 -+ * service trees that will be scheduled next.
28749 -+ *
28750 -+ * The supported ioprio_classes are the same as in CFQ, in descending
28751 -+ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
28752 -+ * Requests from higher priority queues are served before all the
28753 -+ * requests from lower priority queues; among requests of the same
28754 -+ * queue requests are served according to B-WF2Q+.
28755 -+ * All the fields are protected by the queue lock of the containing bfqd.
28756 -+ */
28757 -+struct bfq_sched_data {
28758 -+ struct bfq_entity *in_service_entity;
28759 -+ struct bfq_entity *next_in_service;
28760 -+ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
28761 -+};
28762 -+
28763 -+/**
28764 -+ * struct bfq_weight_counter - counter of the number of all active entities
28765 -+ * with a given weight.
28766 -+ * @weight: weight of the entities that this counter refers to.
28767 -+ * @num_active: number of active entities with this weight.
28768 -+ * @weights_node: weights tree member (see bfq_data's @queue_weights_tree
28769 -+ * and @group_weights_tree).
28770 -+ */
28771 -+struct bfq_weight_counter {
28772 -+ short int weight;
28773 -+ unsigned int num_active;
28774 -+ struct rb_node weights_node;
28775 -+};
28776 -+
28777 -+/**
28778 -+ * struct bfq_entity - schedulable entity.
28779 -+ * @rb_node: service_tree member.
28780 -+ * @weight_counter: pointer to the weight counter associated with this entity.
28781 -+ * @on_st: flag, true if the entity is on a tree (either the active or
28782 -+ * the idle one of its service_tree).
28783 -+ * @finish: B-WF2Q+ finish timestamp (aka F_i).
28784 -+ * @start: B-WF2Q+ start timestamp (aka S_i).
28785 -+ * @tree: tree the entity is enqueued into; %NULL if not on a tree.
28786 -+ * @min_start: minimum start time of the (active) subtree rooted at
28787 -+ * this entity; used for O(log N) lookups into active trees.
28788 -+ * @service: service received during the last round of service.
28789 -+ * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight.
28790 -+ * @weight: weight of the queue
28791 -+ * @parent: parent entity, for hierarchical scheduling.
28792 -+ * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the
28793 -+ * associated scheduler queue, %NULL on leaf nodes.
28794 -+ * @sched_data: the scheduler queue this entity belongs to.
28795 -+ * @ioprio: the ioprio in use.
28796 -+ * @new_weight: when a weight change is requested, the new weight value.
28797 -+ * @orig_weight: original weight, used to implement weight boosting
28798 -+ * @new_ioprio: when an ioprio change is requested, the new ioprio value.
28799 -+ * @ioprio_class: the ioprio_class in use.
28800 -+ * @new_ioprio_class: when an ioprio_class change is requested, the new
28801 -+ * ioprio_class value.
28802 -+ * @ioprio_changed: flag, true when the user requested a weight, ioprio or
28803 -+ * ioprio_class change.
28804 -+ *
28805 -+ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
28806 -+ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
28807 -+ * entity belongs to the sched_data of the parent group in the cgroup
28808 -+ * hierarchy. Non-leaf entities have also their own sched_data, stored
28809 -+ * in @my_sched_data.
28810 -+ *
28811 -+ * Each entity stores independently its priority values; this would
28812 -+ * allow different weights on different devices, but this
28813 -+ * functionality is not exported to userspace by now. Priorities and
28814 -+ * weights are updated lazily, first storing the new values into the
28815 -+ * new_* fields, then setting the @ioprio_changed flag. As soon as
28816 -+ * there is a transition in the entity state that allows the priority
28817 -+ * update to take place the effective and the requested priority
28818 -+ * values are synchronized.
28819 -+ *
28820 -+ * Unless cgroups are used, the weight value is calculated from the
28821 -+ * ioprio to export the same interface as CFQ. When dealing with
28822 -+ * ``well-behaved'' queues (i.e., queues that do not spend too much
28823 -+ * time to consume their budget and have true sequential behavior, and
28824 -+ * when there are no external factors breaking anticipation) the
28825 -+ * relative weights at each level of the cgroups hierarchy should be
28826 -+ * guaranteed. All the fields are protected by the queue lock of the
28827 -+ * containing bfqd.
28828 -+ */
28829 -+struct bfq_entity {
28830 -+ struct rb_node rb_node;
28831 -+ struct bfq_weight_counter *weight_counter;
28832 -+
28833 -+ int on_st;
28834 -+
28835 -+ u64 finish;
28836 -+ u64 start;
28837 -+
28838 -+ struct rb_root *tree;
28839 -+
28840 -+ u64 min_start;
28841 -+
28842 -+ unsigned long service, budget;
28843 -+ unsigned short weight, new_weight;
28844 -+ unsigned short orig_weight;
28845 -+
28846 -+ struct bfq_entity *parent;
28847 -+
28848 -+ struct bfq_sched_data *my_sched_data;
28849 -+ struct bfq_sched_data *sched_data;
28850 -+
28851 -+ unsigned short ioprio, new_ioprio;
28852 -+ unsigned short ioprio_class, new_ioprio_class;
28853 -+
28854 -+ int ioprio_changed;
28855 -+};
28856 -+
28857 -+struct bfq_group;
28858 -+
28859 -+/**
28860 -+ * struct bfq_queue - leaf schedulable entity.
28861 -+ * @ref: reference counter.
28862 -+ * @bfqd: parent bfq_data.
28863 -+ * @new_bfqq: shared bfq_queue if queue is cooperating with
28864 -+ * one or more other queues.
28865 -+ * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree).
28866 -+ * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree).
28867 -+ * @sort_list: sorted list of pending requests.
28868 -+ * @next_rq: if fifo isn't expired, next request to serve.
28869 -+ * @queued: nr of requests queued in @sort_list.
28870 -+ * @allocated: currently allocated requests.
28871 -+ * @meta_pending: pending metadata requests.
28872 -+ * @fifo: fifo list of requests in sort_list.
28873 -+ * @entity: entity representing this queue in the scheduler.
28874 -+ * @max_budget: maximum budget allowed from the feedback mechanism.
28875 -+ * @budget_timeout: budget expiration (in jiffies).
28876 -+ * @dispatched: number of requests on the dispatch list or inside driver.
28877 -+ * @flags: status flags.
28878 -+ * @bfqq_list: node for active/idle bfqq list inside our bfqd.
28879 -+ * @burst_list_node: node for the device's burst list.
28880 -+ * @seek_samples: number of seeks sampled
28881 -+ * @seek_total: sum of the distances of the seeks sampled
28882 -+ * @seek_mean: mean seek distance
28883 -+ * @last_request_pos: position of the last request enqueued
28884 -+ * @requests_within_timer: number of consecutive pairs of request completion
28885 -+ * and arrival, such that the queue becomes idle
28886 -+ * after the completion, but the next request arrives
28887 -+ * within an idle time slice; used only if the queue's
28888 -+ * IO_bound has been cleared.
28889 -+ * @pid: pid of the process owning the queue, used for logging purposes.
28890 -+ * @last_wr_start_finish: start time of the current weight-raising period if
28891 -+ * the @bfq-queue is being weight-raised, otherwise
28892 -+ * finish time of the last weight-raising period
28893 -+ * @wr_cur_max_time: current max raising time for this queue
28894 -+ * @soft_rt_next_start: minimum time instant such that, only if a new
28895 -+ * request is enqueued after this time instant in an
28896 -+ * idle @bfq_queue with no outstanding requests, then
28897 -+ * the task associated with the queue it is deemed as
28898 -+ * soft real-time (see the comments to the function
28899 -+ * bfq_bfqq_softrt_next_start()).
28900 -+ * @last_idle_bklogged: time of the last transition of the @bfq_queue from
28901 -+ * idle to backlogged
28902 -+ * @service_from_backlogged: cumulative service received from the @bfq_queue
28903 -+ * since the last transition from idle to
28904 -+ * backlogged
28905 -+ *
28906 -+ * A bfq_queue is a leaf request queue; it can be associated with an io_context
28907 -+ * or more, if it is async or shared between cooperating processes. @cgroup
28908 -+ * holds a reference to the cgroup, to be sure that it does not disappear while
28909 -+ * a bfqq still references it (mostly to avoid races between request issuing and
28910 -+ * task migration followed by cgroup destruction).
28911 -+ * All the fields are protected by the queue lock of the containing bfqd.
28912 -+ */
28913 -+struct bfq_queue {
28914 -+ atomic_t ref;
28915 -+ struct bfq_data *bfqd;
28916 -+
28917 -+ /* fields for cooperating queues handling */
28918 -+ struct bfq_queue *new_bfqq;
28919 -+ struct rb_node pos_node;
28920 -+ struct rb_root *pos_root;
28921 -+
28922 -+ struct rb_root sort_list;
28923 -+ struct request *next_rq;
28924 -+ int queued[2];
28925 -+ int allocated[2];
28926 -+ int meta_pending;
28927 -+ struct list_head fifo;
28928 -+
28929 -+ struct bfq_entity entity;
28930 -+
28931 -+ unsigned long max_budget;
28932 -+ unsigned long budget_timeout;
28933 -+
28934 -+ int dispatched;
28935 -+
28936 -+ unsigned int flags;
28937 -+
28938 -+ struct list_head bfqq_list;
28939 -+
28940 -+ struct hlist_node burst_list_node;
28941 -+
28942 -+ unsigned int seek_samples;
28943 -+ u64 seek_total;
28944 -+ sector_t seek_mean;
28945 -+ sector_t last_request_pos;
28946 -+
28947 -+ unsigned int requests_within_timer;
28948 -+
28949 -+ pid_t pid;
28950 -+
28951 -+ /* weight-raising fields */
28952 -+ unsigned long wr_cur_max_time;
28953 -+ unsigned long soft_rt_next_start;
28954 -+ unsigned long last_wr_start_finish;
28955 -+ unsigned int wr_coeff;
28956 -+ unsigned long last_idle_bklogged;
28957 -+ unsigned long service_from_backlogged;
28958 -+};
28959 -+
28960 -+/**
28961 -+ * struct bfq_ttime - per process thinktime stats.
28962 -+ * @ttime_total: total process thinktime
28963 -+ * @ttime_samples: number of thinktime samples
28964 -+ * @ttime_mean: average process thinktime
28965 -+ */
28966 -+struct bfq_ttime {
28967 -+ unsigned long last_end_request;
28968 -+
28969 -+ unsigned long ttime_total;
28970 -+ unsigned long ttime_samples;
28971 -+ unsigned long ttime_mean;
28972 -+};
28973 -+
28974 -+/**
28975 -+ * struct bfq_io_cq - per (request_queue, io_context) structure.
28976 -+ * @icq: associated io_cq structure
28977 -+ * @bfqq: array of two process queues, the sync and the async
28978 -+ * @ttime: associated @bfq_ttime struct
28979 -+ */
28980 -+struct bfq_io_cq {
28981 -+ struct io_cq icq; /* must be the first member */
28982 -+ struct bfq_queue *bfqq[2];
28983 -+ struct bfq_ttime ttime;
28984 -+ int ioprio;
28985 -+};
28986 -+
28987 -+enum bfq_device_speed {
28988 -+ BFQ_BFQD_FAST,
28989 -+ BFQ_BFQD_SLOW,
28990 -+};
28991 -+
28992 -+/**
28993 -+ * struct bfq_data - per device data structure.
28994 -+ * @queue: request queue for the managed device.
28995 -+ * @root_group: root bfq_group for the device.
28996 -+ * @rq_pos_tree: rbtree sorted by next_request position, used when
28997 -+ * determining if two or more queues have interleaving
28998 -+ * requests (see bfq_close_cooperator()).
28999 -+ * @active_numerous_groups: number of bfq_groups containing more than one
29000 -+ * active @bfq_entity.
29001 -+ * @queue_weights_tree: rbtree of weight counters of @bfq_queues, sorted by
29002 -+ * weight. Used to keep track of whether all @bfq_queues
29003 -+ * have the same weight. The tree contains one counter
29004 -+ * for each distinct weight associated to some active
29005 -+ * and not weight-raised @bfq_queue (see the comments to
29006 -+ * the functions bfq_weights_tree_[add|remove] for
29007 -+ * further details).
29008 -+ * @group_weights_tree: rbtree of non-queue @bfq_entity weight counters, sorted
29009 -+ * by weight. Used to keep track of whether all
29010 -+ * @bfq_groups have the same weight. The tree contains
29011 -+ * one counter for each distinct weight associated to
29012 -+ * some active @bfq_group (see the comments to the
29013 -+ * functions bfq_weights_tree_[add|remove] for further
29014 -+ * details).
29015 -+ * @busy_queues: number of bfq_queues containing requests (including the
29016 -+ * queue in service, even if it is idling).
29017 -+ * @busy_in_flight_queues: number of @bfq_queues containing pending or
29018 -+ * in-flight requests, plus the @bfq_queue in
29019 -+ * service, even if idle but waiting for the
29020 -+ * possible arrival of its next sync request. This
29021 -+ * field is updated only if the device is rotational,
29022 -+ * but used only if the device is also NCQ-capable.
29023 -+ * The reason why the field is updated also for non-
29024 -+ * NCQ-capable rotational devices is related to the
29025 -+ * fact that the value of @hw_tag may be set also
29026 -+ * later than when busy_in_flight_queues may need to
29027 -+ * be incremented for the first time(s). Taking also
29028 -+ * this possibility into account, to avoid unbalanced
29029 -+ * increments/decrements, would imply more overhead
29030 -+ * than just updating busy_in_flight_queues
29031 -+ * regardless of the value of @hw_tag.
29032 -+ * @const_seeky_busy_in_flight_queues: number of constantly-seeky @bfq_queues
29033 -+ * (that is, seeky queues that expired
29034 -+ * for budget timeout at least once)
29035 -+ * containing pending or in-flight
29036 -+ * requests, including the in-service
29037 -+ * @bfq_queue if constantly seeky. This
29038 -+ * field is updated only if the device
29039 -+ * is rotational, but used only if the
29040 -+ * device is also NCQ-capable (see the
29041 -+ * comments to @busy_in_flight_queues).
29042 -+ * @wr_busy_queues: number of weight-raised busy @bfq_queues.
29043 -+ * @queued: number of queued requests.
29044 -+ * @rq_in_driver: number of requests dispatched and waiting for completion.
29045 -+ * @sync_flight: number of sync requests in the driver.
29046 -+ * @max_rq_in_driver: max number of reqs in driver in the last
29047 -+ * @hw_tag_samples completed requests.
29048 -+ * @hw_tag_samples: nr of samples used to calculate hw_tag.
29049 -+ * @hw_tag: flag set to one if the driver is showing a queueing behavior.
29050 -+ * @budgets_assigned: number of budgets assigned.
29051 -+ * @idle_slice_timer: timer set when idling for the next sequential request
29052 -+ * from the queue in service.
29053 -+ * @unplug_work: delayed work to restart dispatching on the request queue.
29054 -+ * @in_service_queue: bfq_queue in service.
29055 -+ * @in_service_bic: bfq_io_cq (bic) associated with the @in_service_queue.
29056 -+ * @last_position: on-disk position of the last served request.
29057 -+ * @last_budget_start: beginning of the last budget.
29058 -+ * @last_idling_start: beginning of the last idle slice.
29059 -+ * @peak_rate: peak transfer rate observed for a budget.
29060 -+ * @peak_rate_samples: number of samples used to calculate @peak_rate.
29061 -+ * @bfq_max_budget: maximum budget allotted to a bfq_queue before
29062 -+ * rescheduling.
29063 -+ * @group_list: list of all the bfq_groups active on the device.
29064 -+ * @active_list: list of all the bfq_queues active on the device.
29065 -+ * @idle_list: list of all the bfq_queues idle on the device.
29066 -+ * @bfq_quantum: max number of requests dispatched per dispatch round.
29067 -+ * @bfq_fifo_expire: timeout for async/sync requests; when it expires
29068 -+ * requests are served in fifo order.
29069 -+ * @bfq_back_penalty: weight of backward seeks wrt forward ones.
29070 -+ * @bfq_back_max: maximum allowed backward seek.
29071 -+ * @bfq_slice_idle: maximum idling time.
29072 -+ * @bfq_user_max_budget: user-configured max budget value
29073 -+ * (0 for auto-tuning).
29074 -+ * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to
29075 -+ * async queues.
29076 -+ * @bfq_timeout: timeout for bfq_queues to consume their budget; used to
29077 -+ * to prevent seeky queues to impose long latencies to well
29078 -+ * behaved ones (this also implies that seeky queues cannot
29079 -+ * receive guarantees in the service domain; after a timeout
29080 -+ * they are charged for the whole allocated budget, to try
29081 -+ * to preserve a behavior reasonably fair among them, but
29082 -+ * without service-domain guarantees).
29083 -+ * @bfq_coop_thresh: number of queue merges after which a @bfq_queue is
29084 -+ * no more granted any weight-raising.
29085 -+ * @bfq_failed_cooperations: number of consecutive failed cooperation
29086 -+ * chances after which weight-raising is restored
29087 -+ * to a queue subject to more than bfq_coop_thresh
29088 -+ * queue merges.
29089 -+ * @bfq_requests_within_timer: number of consecutive requests that must be
29090 -+ * issued within the idle time slice to set
29091 -+ * again idling to a queue which was marked as
29092 -+ * non-I/O-bound (see the definition of the
29093 -+ * IO_bound flag for further details).
29094 -+ * @last_ins_in_burst: last time at which a queue entered the current
29095 -+ * burst of queues being activated shortly after
29096 -+ * each other; for more details about this and the
29097 -+ * following parameters related to a burst of
29098 -+ * activations, see the comments to the function
29099 -+ * @bfq_handle_burst.
29100 -+ * @bfq_burst_interval: reference time interval used to decide whether a
29101 -+ * queue has been activated shortly after
29102 -+ * @last_ins_in_burst.
29103 -+ * @burst_size: number of queues in the current burst of queue activations.
29104 -+ * @bfq_large_burst_thresh: maximum burst size above which the current
29105 -+ * queue-activation burst is deemed as 'large'.
29106 -+ * @large_burst: true if a large queue-activation burst is in progress.
29107 -+ * @burst_list: head of the burst list (as for the above fields, more details
29108 -+ * in the comments to the function bfq_handle_burst).
29109 -+ * @low_latency: if set to true, low-latency heuristics are enabled.
29110 -+ * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised
29111 -+ * queue is multiplied.
29112 -+ * @bfq_wr_max_time: maximum duration of a weight-raising period (jiffies).
29113 -+ * @bfq_wr_rt_max_time: maximum duration for soft real-time processes.
29114 -+ * @bfq_wr_min_idle_time: minimum idle period after which weight-raising
29115 -+ * may be reactivated for a queue (in jiffies).
29116 -+ * @bfq_wr_min_inter_arr_async: minimum period between request arrivals
29117 -+ * after which weight-raising may be
29118 -+ * reactivated for an already busy queue
29119 -+ * (in jiffies).
29120 -+ * @bfq_wr_max_softrt_rate: max service-rate for a soft real-time queue,
29121 -+ * sectors per seconds.
29122 -+ * @RT_prod: cached value of the product R*T used for computing the maximum
29123 -+ * duration of the weight raising automatically.
29124 -+ * @device_speed: device-speed class for the low-latency heuristic.
29125 -+ * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions.
29126 -+ *
29127 -+ * All the fields are protected by the @queue lock.
29128 -+ */
29129 -+struct bfq_data {
29130 -+ struct request_queue *queue;
29131 -+
29132 -+ struct bfq_group *root_group;
29133 -+ struct rb_root rq_pos_tree;
29134 -+
29135 -+#ifdef CONFIG_CGROUP_BFQIO
29136 -+ int active_numerous_groups;
29137 -+#endif
29138 -+
29139 -+ struct rb_root queue_weights_tree;
29140 -+ struct rb_root group_weights_tree;
29141 -+
29142 -+ int busy_queues;
29143 -+ int busy_in_flight_queues;
29144 -+ int const_seeky_busy_in_flight_queues;
29145 -+ int wr_busy_queues;
29146 -+ int queued;
29147 -+ int rq_in_driver;
29148 -+ int sync_flight;
29149 -+
29150 -+ int max_rq_in_driver;
29151 -+ int hw_tag_samples;
29152 -+ int hw_tag;
29153 -+
29154 -+ int budgets_assigned;
29155 -+
29156 -+ struct timer_list idle_slice_timer;
29157 -+ struct work_struct unplug_work;
29158 -+
29159 -+ struct bfq_queue *in_service_queue;
29160 -+ struct bfq_io_cq *in_service_bic;
29161 -+
29162 -+ sector_t last_position;
29163 -+
29164 -+ ktime_t last_budget_start;
29165 -+ ktime_t last_idling_start;
29166 -+ int peak_rate_samples;
29167 -+ u64 peak_rate;
29168 -+ unsigned long bfq_max_budget;
29169 -+
29170 -+ struct hlist_head group_list;
29171 -+ struct list_head active_list;
29172 -+ struct list_head idle_list;
29173 -+
29174 -+ unsigned int bfq_quantum;
29175 -+ unsigned int bfq_fifo_expire[2];
29176 -+ unsigned int bfq_back_penalty;
29177 -+ unsigned int bfq_back_max;
29178 -+ unsigned int bfq_slice_idle;
29179 -+ u64 bfq_class_idle_last_service;
29180 -+
29181 -+ unsigned int bfq_user_max_budget;
29182 -+ unsigned int bfq_max_budget_async_rq;
29183 -+ unsigned int bfq_timeout[2];
29184 -+
29185 -+ unsigned int bfq_coop_thresh;
29186 -+ unsigned int bfq_failed_cooperations;
29187 -+ unsigned int bfq_requests_within_timer;
29188 -+
29189 -+ unsigned long last_ins_in_burst;
29190 -+ unsigned long bfq_burst_interval;
29191 -+ int burst_size;
29192 -+ unsigned long bfq_large_burst_thresh;
29193 -+ bool large_burst;
29194 -+ struct hlist_head burst_list;
29195 -+
29196 -+ bool low_latency;
29197 -+
29198 -+ /* parameters of the low_latency heuristics */
29199 -+ unsigned int bfq_wr_coeff;
29200 -+ unsigned int bfq_wr_max_time;
29201 -+ unsigned int bfq_wr_rt_max_time;
29202 -+ unsigned int bfq_wr_min_idle_time;
29203 -+ unsigned long bfq_wr_min_inter_arr_async;
29204 -+ unsigned int bfq_wr_max_softrt_rate;
29205 -+ u64 RT_prod;
29206 -+ enum bfq_device_speed device_speed;
29207 -+
29208 -+ struct bfq_queue oom_bfqq;
29209 -+};
29210 -+
29211 -+enum bfqq_state_flags {
29212 -+ BFQ_BFQQ_FLAG_busy = 0, /* has requests or is in service */
29213 -+ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
29214 -+ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
29215 -+ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
29216 -+ BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
29217 -+ BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */
29218 -+ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
29219 -+ BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */
29220 -+ BFQ_BFQQ_FLAG_IO_bound, /*
29221 -+ * bfqq has timed-out at least once
29222 -+ * having consumed at most 2/10 of
29223 -+ * its budget
29224 -+ */
29225 -+ BFQ_BFQQ_FLAG_in_large_burst, /*
29226 -+ * bfqq activated in a large burst,
29227 -+ * see comments to bfq_handle_burst.
29228 -+ */
29229 -+ BFQ_BFQQ_FLAG_constantly_seeky, /*
29230 -+ * bfqq has proved to be slow and
29231 -+ * seeky until budget timeout
29232 -+ */
29233 -+ BFQ_BFQQ_FLAG_softrt_update, /*
29234 -+ * may need softrt-next-start
29235 -+ * update
29236 -+ */
29237 -+ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
29238 -+ BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be splitted */
29239 -+};
29240 -+
29241 -+#define BFQ_BFQQ_FNS(name) \
29242 -+static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
29243 -+{ \
29244 -+ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \
29245 -+} \
29246 -+static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
29247 -+{ \
29248 -+ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \
29249 -+} \
29250 -+static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
29251 -+{ \
29252 -+ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \
29253 -+}
29254 -+
29255 -+BFQ_BFQQ_FNS(busy);
29256 -+BFQ_BFQQ_FNS(wait_request);
29257 -+BFQ_BFQQ_FNS(must_alloc);
29258 -+BFQ_BFQQ_FNS(fifo_expire);
29259 -+BFQ_BFQQ_FNS(idle_window);
29260 -+BFQ_BFQQ_FNS(prio_changed);
29261 -+BFQ_BFQQ_FNS(sync);
29262 -+BFQ_BFQQ_FNS(budget_new);
29263 -+BFQ_BFQQ_FNS(IO_bound);
29264 -+BFQ_BFQQ_FNS(in_large_burst);
29265 -+BFQ_BFQQ_FNS(constantly_seeky);
29266 -+BFQ_BFQQ_FNS(coop);
29267 -+BFQ_BFQQ_FNS(split_coop);
29268 -+BFQ_BFQQ_FNS(softrt_update);
29269 -+#undef BFQ_BFQQ_FNS
29270 -+
29271 -+/* Logging facilities. */
29272 -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
29273 -+ blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args)
29274 -+
29275 -+#define bfq_log(bfqd, fmt, args...) \
29276 -+ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
29277 -+
29278 -+/* Expiration reasons. */
29279 -+enum bfqq_expiration {
29280 -+ BFQ_BFQQ_TOO_IDLE = 0, /*
29281 -+ * queue has been idling for
29282 -+ * too long
29283 -+ */
29284 -+ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */
29285 -+ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */
29286 -+ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */
29287 -+};
29288 -+
29289 -+#ifdef CONFIG_CGROUP_BFQIO
29290 -+/**
29291 -+ * struct bfq_group - per (device, cgroup) data structure.
29292 -+ * @entity: schedulable entity to insert into the parent group sched_data.
29293 -+ * @sched_data: own sched_data, to contain child entities (they may be
29294 -+ * both bfq_queues and bfq_groups).
29295 -+ * @group_node: node to be inserted into the bfqio_cgroup->group_data
29296 -+ * list of the containing cgroup's bfqio_cgroup.
29297 -+ * @bfqd_node: node to be inserted into the @bfqd->group_list list
29298 -+ * of the groups active on the same device; used for cleanup.
29299 -+ * @bfqd: the bfq_data for the device this group acts upon.
29300 -+ * @async_bfqq: array of async queues for all the tasks belonging to
29301 -+ * the group, one queue per ioprio value per ioprio_class,
29302 -+ * except for the idle class that has only one queue.
29303 -+ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
29304 -+ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
29305 -+ * to avoid too many special cases during group creation/
29306 -+ * migration.
29307 -+ * @active_entities: number of active entities belonging to the group;
29308 -+ * unused for the root group. Used to know whether there
29309 -+ * are groups with more than one active @bfq_entity
29310 -+ * (see the comments to the function
29311 -+ * bfq_bfqq_must_not_expire()).
29312 -+ *
29313 -+ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
29314 -+ * there is a set of bfq_groups, each one collecting the lower-level
29315 -+ * entities belonging to the group that are acting on the same device.
29316 -+ *
29317 -+ * Locking works as follows:
29318 -+ * o @group_node is protected by the bfqio_cgroup lock, and is accessed
29319 -+ * via RCU from its readers.
29320 -+ * o @bfqd is protected by the queue lock, RCU is used to access it
29321 -+ * from the readers.
29322 -+ * o All the other fields are protected by the @bfqd queue lock.
29323 -+ */
29324 -+struct bfq_group {
29325 -+ struct bfq_entity entity;
29326 -+ struct bfq_sched_data sched_data;
29327 -+
29328 -+ struct hlist_node group_node;
29329 -+ struct hlist_node bfqd_node;
29330 -+
29331 -+ void *bfqd;
29332 -+
29333 -+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
29334 -+ struct bfq_queue *async_idle_bfqq;
29335 -+
29336 -+ struct bfq_entity *my_entity;
29337 -+
29338 -+ int active_entities;
29339 -+};
29340 -+
29341 -+/**
29342 -+ * struct bfqio_cgroup - bfq cgroup data structure.
29343 -+ * @css: subsystem state for bfq in the containing cgroup.
29344 -+ * @online: flag marked when the subsystem is inserted.
29345 -+ * @weight: cgroup weight.
29346 -+ * @ioprio: cgroup ioprio.
29347 -+ * @ioprio_class: cgroup ioprio_class.
29348 -+ * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data.
29349 -+ * @group_data: list containing the bfq_group belonging to this cgroup.
29350 -+ *
29351 -+ * @group_data is accessed using RCU, with @lock protecting the updates,
29352 -+ * @ioprio and @ioprio_class are protected by @lock.
29353 -+ */
29354 -+struct bfqio_cgroup {
29355 -+ struct cgroup_subsys_state css;
29356 -+ bool online;
29357 -+
29358 -+ unsigned short weight, ioprio, ioprio_class;
29359 -+
29360 -+ spinlock_t lock;
29361 -+ struct hlist_head group_data;
29362 -+};
29363 -+#else
29364 -+struct bfq_group {
29365 -+ struct bfq_sched_data sched_data;
29366 -+
29367 -+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
29368 -+ struct bfq_queue *async_idle_bfqq;
29369 -+};
29370 -+#endif
29371 -+
29372 -+static inline struct bfq_service_tree *
29373 -+bfq_entity_service_tree(struct bfq_entity *entity)
29374 -+{
29375 -+ struct bfq_sched_data *sched_data = entity->sched_data;
29376 -+ unsigned int idx = entity->ioprio_class - 1;
29377 -+
29378 -+ BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
29379 -+ BUG_ON(sched_data == NULL);
29380 -+
29381 -+ return sched_data->service_tree + idx;
29382 -+}
29383 -+
29384 -+static inline struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic,
29385 -+ bool is_sync)
29386 -+{
29387 -+ return bic->bfqq[is_sync];
29388 -+}
29389 -+
29390 -+static inline void bic_set_bfqq(struct bfq_io_cq *bic,
29391 -+ struct bfq_queue *bfqq, bool is_sync)
29392 -+{
29393 -+ bic->bfqq[is_sync] = bfqq;
29394 -+}
29395 -+
29396 -+static inline struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
29397 -+{
29398 -+ return bic->icq.q->elevator->elevator_data;
29399 -+}
29400 -+
29401 -+/**
29402 -+ * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer.
29403 -+ * @ptr: a pointer to a bfqd.
29404 -+ * @flags: storage for the flags to be saved.
29405 -+ *
29406 -+ * This function allows bfqg->bfqd to be protected by the
29407 -+ * queue lock of the bfqd they reference; the pointer is dereferenced
29408 -+ * under RCU, so the storage for bfqd is assured to be safe as long
29409 -+ * as the RCU read side critical section does not end. After the
29410 -+ * bfqd->queue->queue_lock is taken the pointer is rechecked, to be
29411 -+ * sure that no other writer accessed it. If we raced with a writer,
29412 -+ * the function returns NULL, with the queue unlocked, otherwise it
29413 -+ * returns the dereferenced pointer, with the queue locked.
29414 -+ */
29415 -+static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr,
29416 -+ unsigned long *flags)
29417 -+{
29418 -+ struct bfq_data *bfqd;
29419 -+
29420 -+ rcu_read_lock();
29421 -+ bfqd = rcu_dereference(*(struct bfq_data **)ptr);
29422 -+
29423 -+ if (bfqd != NULL) {
29424 -+ spin_lock_irqsave(bfqd->queue->queue_lock, *flags);
29425 -+ if (*ptr == bfqd)
29426 -+ goto out;
29427 -+ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
29428 -+ }
29429 -+
29430 -+ bfqd = NULL;
29431 -+out:
29432 -+ rcu_read_unlock();
29433 -+ return bfqd;
29434 -+}
29435 -+
29436 -+static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd,
29437 -+ unsigned long *flags)
29438 -+{
29439 -+ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
29440 -+}
29441 -+
29442 -+static void bfq_changed_ioprio(struct bfq_io_cq *bic);
29443 -+static void bfq_put_queue(struct bfq_queue *bfqq);
29444 -+static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
29445 -+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
29446 -+ struct bfq_group *bfqg, int is_sync,
29447 -+ struct bfq_io_cq *bic, gfp_t gfp_mask);
29448 -+static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
29449 -+ struct bfq_group *bfqg);
29450 -+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
29451 -+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
29452 -+
29453 -+#endif /* _BFQ_H */
29454 ---
29455 -2.1.0
29456 -
29457
29458 diff --git a/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-4.0.0.patch b/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-4.0.0.patch
29459 deleted file mode 100644
29460 index 53267cd..0000000
29461 --- a/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-4.0.0.patch
29462 +++ /dev/null
29463 @@ -1,1222 +0,0 @@
29464 -From d49cf2e7913ec1c4b86a9de657140d9ec5fa8c19 Mon Sep 17 00:00:00 2001
29465 -From: Mauro Andreolini <mauro.andreolini@×××××××.it>
29466 -Date: Thu, 18 Dec 2014 21:32:08 +0100
29467 -Subject: [PATCH 3/3] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r7 for
29468 - 4.0.0
29469 -
29470 -A set of processes may happen to perform interleaved reads, i.e.,requests
29471 -whose union would give rise to a sequential read pattern. There are two
29472 -typical cases: in the first case, processes read fixed-size chunks of
29473 -data at a fixed distance from each other, while in the second case processes
29474 -may read variable-size chunks at variable distances. The latter case occurs
29475 -for example with QEMU, which splits the I/O generated by the guest into
29476 -multiple chunks, and lets these chunks be served by a pool of cooperating
29477 -processes, iteratively assigning the next chunk of I/O to the first
29478 -available process. CFQ uses actual queue merging for the first type of
29479 -rocesses, whereas it uses preemption to get a sequential read pattern out
29480 -of the read requests performed by the second type of processes. In the end
29481 -it uses two different mechanisms to achieve the same goal: boosting the
29482 -throughput with interleaved I/O.
29483 -
29484 -This patch introduces Early Queue Merge (EQM), a unified mechanism to get a
29485 -sequential read pattern with both types of processes. The main idea is
29486 -checking newly arrived requests against the next request of the active queue
29487 -both in case of actual request insert and in case of request merge. By doing
29488 -so, both the types of processes can be handled by just merging their queues.
29489 -EQM is then simpler and more compact than the pair of mechanisms used in
29490 -CFQ.
29491 -
29492 -Finally, EQM also preserves the typical low-latency properties of BFQ, by
29493 -properly restoring the weight-raising state of a queue when it gets back to
29494 -a non-merged state.
29495 -
29496 -Signed-off-by: Mauro Andreolini <mauro.andreolini@×××××××.it>
29497 -Signed-off-by: Arianna Avanzini <avanzini.arianna@×××××.com>
29498 -Signed-off-by: Paolo Valente <paolo.valente@×××××××.it>
29499 ----
29500 - block/bfq-iosched.c | 751 +++++++++++++++++++++++++++++++++++++---------------
29501 - block/bfq-sched.c | 28 --
29502 - block/bfq.h | 54 +++-
29503 - 3 files changed, 581 insertions(+), 252 deletions(-)
29504 -
29505 -diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
29506 -index 97ee934..328f33c 100644
29507 ---- a/block/bfq-iosched.c
29508 -+++ b/block/bfq-iosched.c
29509 -@@ -571,6 +571,57 @@ static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
29510 - return dur;
29511 - }
29512 -
29513 -+static inline unsigned
29514 -+bfq_bfqq_cooperations(struct bfq_queue *bfqq)
29515 -+{
29516 -+ return bfqq->bic ? bfqq->bic->cooperations : 0;
29517 -+}
29518 -+
29519 -+static inline void
29520 -+bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
29521 -+{
29522 -+ if (bic->saved_idle_window)
29523 -+ bfq_mark_bfqq_idle_window(bfqq);
29524 -+ else
29525 -+ bfq_clear_bfqq_idle_window(bfqq);
29526 -+ if (bic->saved_IO_bound)
29527 -+ bfq_mark_bfqq_IO_bound(bfqq);
29528 -+ else
29529 -+ bfq_clear_bfqq_IO_bound(bfqq);
29530 -+ /* Assuming that the flag in_large_burst is already correctly set */
29531 -+ if (bic->wr_time_left && bfqq->bfqd->low_latency &&
29532 -+ !bfq_bfqq_in_large_burst(bfqq) &&
29533 -+ bic->cooperations < bfqq->bfqd->bfq_coop_thresh) {
29534 -+ /*
29535 -+ * Start a weight raising period with the duration given by
29536 -+ * the raising_time_left snapshot.
29537 -+ */
29538 -+ if (bfq_bfqq_busy(bfqq))
29539 -+ bfqq->bfqd->wr_busy_queues++;
29540 -+ bfqq->wr_coeff = bfqq->bfqd->bfq_wr_coeff;
29541 -+ bfqq->wr_cur_max_time = bic->wr_time_left;
29542 -+ bfqq->last_wr_start_finish = jiffies;
29543 -+ bfqq->entity.ioprio_changed = 1;
29544 -+ }
29545 -+ /*
29546 -+ * Clear wr_time_left to prevent bfq_bfqq_save_state() from
29547 -+ * getting confused about the queue's need of a weight-raising
29548 -+ * period.
29549 -+ */
29550 -+ bic->wr_time_left = 0;
29551 -+}
29552 -+
29553 -+/* Must be called with the queue_lock held. */
29554 -+static int bfqq_process_refs(struct bfq_queue *bfqq)
29555 -+{
29556 -+ int process_refs, io_refs;
29557 -+
29558 -+ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
29559 -+ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
29560 -+ BUG_ON(process_refs < 0);
29561 -+ return process_refs;
29562 -+}
29563 -+
29564 - /* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
29565 - static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
29566 - struct bfq_queue *bfqq)
29567 -@@ -815,7 +866,7 @@ static void bfq_add_request(struct request *rq)
29568 - bfq_rq_pos_tree_add(bfqd, bfqq);
29569 -
29570 - if (!bfq_bfqq_busy(bfqq)) {
29571 -- bool soft_rt,
29572 -+ bool soft_rt, coop_or_in_burst,
29573 - idle_for_long_time = time_is_before_jiffies(
29574 - bfqq->budget_timeout +
29575 - bfqd->bfq_wr_min_idle_time);
29576 -@@ -839,11 +890,12 @@ static void bfq_add_request(struct request *rq)
29577 - bfqd->last_ins_in_burst = jiffies;
29578 - }
29579 -
29580 -+ coop_or_in_burst = bfq_bfqq_in_large_burst(bfqq) ||
29581 -+ bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh;
29582 - soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
29583 -- !bfq_bfqq_in_large_burst(bfqq) &&
29584 -+ !coop_or_in_burst &&
29585 - time_is_before_jiffies(bfqq->soft_rt_next_start);
29586 -- interactive = !bfq_bfqq_in_large_burst(bfqq) &&
29587 -- idle_for_long_time;
29588 -+ interactive = !coop_or_in_burst && idle_for_long_time;
29589 - entity->budget = max_t(unsigned long, bfqq->max_budget,
29590 - bfq_serv_to_charge(next_rq, bfqq));
29591 -
29592 -@@ -862,11 +914,20 @@ static void bfq_add_request(struct request *rq)
29593 - if (!bfqd->low_latency)
29594 - goto add_bfqq_busy;
29595 -
29596 -+ if (bfq_bfqq_just_split(bfqq))
29597 -+ goto set_ioprio_changed;
29598 -+
29599 - /*
29600 -- * If the queue is not being boosted and has been idle
29601 -- * for enough time, start a weight-raising period
29602 -+ * If the queue:
29603 -+ * - is not being boosted,
29604 -+ * - has been idle for enough time,
29605 -+ * - is not a sync queue or is linked to a bfq_io_cq (it is
29606 -+ * shared "for its nature" or it is not shared and its
29607 -+ * requests have not been redirected to a shared queue)
29608 -+ * start a weight-raising period.
29609 - */
29610 -- if (old_wr_coeff == 1 && (interactive || soft_rt)) {
29611 -+ if (old_wr_coeff == 1 && (interactive || soft_rt) &&
29612 -+ (!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) {
29613 - bfqq->wr_coeff = bfqd->bfq_wr_coeff;
29614 - if (interactive)
29615 - bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
29616 -@@ -880,7 +941,7 @@ static void bfq_add_request(struct request *rq)
29617 - } else if (old_wr_coeff > 1) {
29618 - if (interactive)
29619 - bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
29620 -- else if (bfq_bfqq_in_large_burst(bfqq) ||
29621 -+ else if (coop_or_in_burst ||
29622 - (bfqq->wr_cur_max_time ==
29623 - bfqd->bfq_wr_rt_max_time &&
29624 - !soft_rt)) {
29625 -@@ -899,18 +960,18 @@ static void bfq_add_request(struct request *rq)
29626 - /*
29627 - *
29628 - * The remaining weight-raising time is lower
29629 -- * than bfqd->bfq_wr_rt_max_time, which
29630 -- * means that the application is enjoying
29631 -- * weight raising either because deemed soft-
29632 -- * rt in the near past, or because deemed
29633 -- * interactive a long ago. In both cases,
29634 -- * resetting now the current remaining weight-
29635 -- * raising time for the application to the
29636 -- * weight-raising duration for soft rt
29637 -- * applications would not cause any latency
29638 -- * increase for the application (as the new
29639 -- * duration would be higher than the remaining
29640 -- * time).
29641 -+ * than bfqd->bfq_wr_rt_max_time, which means
29642 -+ * that the application is enjoying weight
29643 -+ * raising either because deemed soft-rt in
29644 -+ * the near past, or because deemed interactive
29645 -+ * a long ago.
29646 -+ * In both cases, resetting now the current
29647 -+ * remaining weight-raising time for the
29648 -+ * application to the weight-raising duration
29649 -+ * for soft rt applications would not cause any
29650 -+ * latency increase for the application (as the
29651 -+ * new duration would be higher than the
29652 -+ * remaining time).
29653 - *
29654 - * In addition, the application is now meeting
29655 - * the requirements for being deemed soft rt.
29656 -@@ -945,6 +1006,7 @@ static void bfq_add_request(struct request *rq)
29657 - bfqd->bfq_wr_rt_max_time;
29658 - }
29659 - }
29660 -+set_ioprio_changed:
29661 - if (old_wr_coeff != bfqq->wr_coeff)
29662 - entity->ioprio_changed = 1;
29663 - add_bfqq_busy:
29664 -@@ -1156,90 +1218,35 @@ static void bfq_end_wr(struct bfq_data *bfqd)
29665 - spin_unlock_irq(bfqd->queue->queue_lock);
29666 - }
29667 -
29668 --static int bfq_allow_merge(struct request_queue *q, struct request *rq,
29669 -- struct bio *bio)
29670 -+static inline sector_t bfq_io_struct_pos(void *io_struct, bool request)
29671 - {
29672 -- struct bfq_data *bfqd = q->elevator->elevator_data;
29673 -- struct bfq_io_cq *bic;
29674 -- struct bfq_queue *bfqq;
29675 --
29676 -- /*
29677 -- * Disallow merge of a sync bio into an async request.
29678 -- */
29679 -- if (bfq_bio_sync(bio) && !rq_is_sync(rq))
29680 -- return 0;
29681 --
29682 -- /*
29683 -- * Lookup the bfqq that this bio will be queued with. Allow
29684 -- * merge only if rq is queued there.
29685 -- * Queue lock is held here.
29686 -- */
29687 -- bic = bfq_bic_lookup(bfqd, current->io_context);
29688 -- if (bic == NULL)
29689 -- return 0;
29690 --
29691 -- bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
29692 -- return bfqq == RQ_BFQQ(rq);
29693 --}
29694 --
29695 --static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
29696 -- struct bfq_queue *bfqq)
29697 --{
29698 -- if (bfqq != NULL) {
29699 -- bfq_mark_bfqq_must_alloc(bfqq);
29700 -- bfq_mark_bfqq_budget_new(bfqq);
29701 -- bfq_clear_bfqq_fifo_expire(bfqq);
29702 --
29703 -- bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
29704 --
29705 -- bfq_log_bfqq(bfqd, bfqq,
29706 -- "set_in_service_queue, cur-budget = %lu",
29707 -- bfqq->entity.budget);
29708 -- }
29709 --
29710 -- bfqd->in_service_queue = bfqq;
29711 --}
29712 --
29713 --/*
29714 -- * Get and set a new queue for service.
29715 -- */
29716 --static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd,
29717 -- struct bfq_queue *bfqq)
29718 --{
29719 -- if (!bfqq)
29720 -- bfqq = bfq_get_next_queue(bfqd);
29721 -+ if (request)
29722 -+ return blk_rq_pos(io_struct);
29723 - else
29724 -- bfq_get_next_queue_forced(bfqd, bfqq);
29725 --
29726 -- __bfq_set_in_service_queue(bfqd, bfqq);
29727 -- return bfqq;
29728 -+ return ((struct bio *)io_struct)->bi_iter.bi_sector;
29729 - }
29730 -
29731 --static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
29732 -- struct request *rq)
29733 -+static inline sector_t bfq_dist_from(sector_t pos1,
29734 -+ sector_t pos2)
29735 - {
29736 -- if (blk_rq_pos(rq) >= bfqd->last_position)
29737 -- return blk_rq_pos(rq) - bfqd->last_position;
29738 -+ if (pos1 >= pos2)
29739 -+ return pos1 - pos2;
29740 - else
29741 -- return bfqd->last_position - blk_rq_pos(rq);
29742 -+ return pos2 - pos1;
29743 - }
29744 -
29745 --/*
29746 -- * Return true if bfqq has no request pending and rq is close enough to
29747 -- * bfqd->last_position, or if rq is closer to bfqd->last_position than
29748 -- * bfqq->next_rq
29749 -- */
29750 --static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
29751 -+static inline int bfq_rq_close_to_sector(void *io_struct, bool request,
29752 -+ sector_t sector)
29753 - {
29754 -- return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
29755 -+ return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <=
29756 -+ BFQQ_SEEK_THR;
29757 - }
29758 -
29759 --static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
29760 -+static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector)
29761 - {
29762 - struct rb_root *root = &bfqd->rq_pos_tree;
29763 - struct rb_node *parent, *node;
29764 - struct bfq_queue *__bfqq;
29765 -- sector_t sector = bfqd->last_position;
29766 -
29767 - if (RB_EMPTY_ROOT(root))
29768 - return NULL;
29769 -@@ -1258,7 +1265,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
29770 - * next_request position).
29771 - */
29772 - __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
29773 -- if (bfq_rq_close(bfqd, __bfqq->next_rq))
29774 -+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
29775 - return __bfqq;
29776 -
29777 - if (blk_rq_pos(__bfqq->next_rq) < sector)
29778 -@@ -1269,7 +1276,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
29779 - return NULL;
29780 -
29781 - __bfqq = rb_entry(node, struct bfq_queue, pos_node);
29782 -- if (bfq_rq_close(bfqd, __bfqq->next_rq))
29783 -+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
29784 - return __bfqq;
29785 -
29786 - return NULL;
29787 -@@ -1278,14 +1285,12 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
29788 - /*
29789 - * bfqd - obvious
29790 - * cur_bfqq - passed in so that we don't decide that the current queue
29791 -- * is closely cooperating with itself.
29792 -- *
29793 -- * We are assuming that cur_bfqq has dispatched at least one request,
29794 -- * and that bfqd->last_position reflects a position on the disk associated
29795 -- * with the I/O issued by cur_bfqq.
29796 -+ * is closely cooperating with itself
29797 -+ * sector - used as a reference point to search for a close queue
29798 - */
29799 - static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
29800 -- struct bfq_queue *cur_bfqq)
29801 -+ struct bfq_queue *cur_bfqq,
29802 -+ sector_t sector)
29803 - {
29804 - struct bfq_queue *bfqq;
29805 -
29806 -@@ -1305,7 +1310,7 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
29807 - * working closely on the same area of the disk. In that case,
29808 - * we can group them together and don't waste time idling.
29809 - */
29810 -- bfqq = bfqq_close(bfqd);
29811 -+ bfqq = bfqq_close(bfqd, sector);
29812 - if (bfqq == NULL || bfqq == cur_bfqq)
29813 - return NULL;
29814 -
29815 -@@ -1332,6 +1337,315 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
29816 - return bfqq;
29817 - }
29818 -
29819 -+static struct bfq_queue *
29820 -+bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
29821 -+{
29822 -+ int process_refs, new_process_refs;
29823 -+ struct bfq_queue *__bfqq;
29824 -+
29825 -+ /*
29826 -+ * If there are no process references on the new_bfqq, then it is
29827 -+ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
29828 -+ * may have dropped their last reference (not just their last process
29829 -+ * reference).
29830 -+ */
29831 -+ if (!bfqq_process_refs(new_bfqq))
29832 -+ return NULL;
29833 -+
29834 -+ /* Avoid a circular list and skip interim queue merges. */
29835 -+ while ((__bfqq = new_bfqq->new_bfqq)) {
29836 -+ if (__bfqq == bfqq)
29837 -+ return NULL;
29838 -+ new_bfqq = __bfqq;
29839 -+ }
29840 -+
29841 -+ process_refs = bfqq_process_refs(bfqq);
29842 -+ new_process_refs = bfqq_process_refs(new_bfqq);
29843 -+ /*
29844 -+ * If the process for the bfqq has gone away, there is no
29845 -+ * sense in merging the queues.
29846 -+ */
29847 -+ if (process_refs == 0 || new_process_refs == 0)
29848 -+ return NULL;
29849 -+
29850 -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
29851 -+ new_bfqq->pid);
29852 -+
29853 -+ /*
29854 -+ * Merging is just a redirection: the requests of the process
29855 -+ * owning one of the two queues are redirected to the other queue.
29856 -+ * The latter queue, in its turn, is set as shared if this is the
29857 -+ * first time that the requests of some process are redirected to
29858 -+ * it.
29859 -+ *
29860 -+ * We redirect bfqq to new_bfqq and not the opposite, because we
29861 -+ * are in the context of the process owning bfqq, hence we have
29862 -+ * the io_cq of this process. So we can immediately configure this
29863 -+ * io_cq to redirect the requests of the process to new_bfqq.
29864 -+ *
29865 -+ * NOTE, even if new_bfqq coincides with the in-service queue, the
29866 -+ * io_cq of new_bfqq is not available, because, if the in-service
29867 -+ * queue is shared, bfqd->in_service_bic may not point to the
29868 -+ * io_cq of the in-service queue.
29869 -+ * Redirecting the requests of the process owning bfqq to the
29870 -+ * currently in-service queue is in any case the best option, as
29871 -+ * we feed the in-service queue with new requests close to the
29872 -+ * last request served and, by doing so, hopefully increase the
29873 -+ * throughput.
29874 -+ */
29875 -+ bfqq->new_bfqq = new_bfqq;
29876 -+ atomic_add(process_refs, &new_bfqq->ref);
29877 -+ return new_bfqq;
29878 -+}
29879 -+
29880 -+/*
29881 -+ * Attempt to schedule a merge of bfqq with the currently in-service queue
29882 -+ * or with a close queue among the scheduled queues.
29883 -+ * Return NULL if no merge was scheduled, a pointer to the shared bfq_queue
29884 -+ * structure otherwise.
29885 -+ *
29886 -+ * The OOM queue is not allowed to participate to cooperation: in fact, since
29887 -+ * the requests temporarily redirected to the OOM queue could be redirected
29888 -+ * again to dedicated queues at any time, the state needed to correctly
29889 -+ * handle merging with the OOM queue would be quite complex and expensive
29890 -+ * to maintain. Besides, in such a critical condition as an out of memory,
29891 -+ * the benefits of queue merging may be little relevant, or even negligible.
29892 -+ */
29893 -+static struct bfq_queue *
29894 -+bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
29895 -+ void *io_struct, bool request)
29896 -+{
29897 -+ struct bfq_queue *in_service_bfqq, *new_bfqq;
29898 -+
29899 -+ if (bfqq->new_bfqq)
29900 -+ return bfqq->new_bfqq;
29901 -+
29902 -+ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
29903 -+ return NULL;
29904 -+
29905 -+ in_service_bfqq = bfqd->in_service_queue;
29906 -+
29907 -+ if (in_service_bfqq == NULL || in_service_bfqq == bfqq ||
29908 -+ !bfqd->in_service_bic ||
29909 -+ unlikely(in_service_bfqq == &bfqd->oom_bfqq))
29910 -+ goto check_scheduled;
29911 -+
29912 -+ if (bfq_class_idle(in_service_bfqq) || bfq_class_idle(bfqq))
29913 -+ goto check_scheduled;
29914 -+
29915 -+ if (bfq_class_rt(in_service_bfqq) != bfq_class_rt(bfqq))
29916 -+ goto check_scheduled;
29917 -+
29918 -+ if (in_service_bfqq->entity.parent != bfqq->entity.parent)
29919 -+ goto check_scheduled;
29920 -+
29921 -+ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
29922 -+ bfq_bfqq_sync(in_service_bfqq) && bfq_bfqq_sync(bfqq)) {
29923 -+ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
29924 -+ if (new_bfqq != NULL)
29925 -+ return new_bfqq; /* Merge with in-service queue */
29926 -+ }
29927 -+
29928 -+ /*
29929 -+ * Check whether there is a cooperator among currently scheduled
29930 -+ * queues. The only thing we need is that the bio/request is not
29931 -+ * NULL, as we need it to establish whether a cooperator exists.
29932 -+ */
29933 -+check_scheduled:
29934 -+ new_bfqq = bfq_close_cooperator(bfqd, bfqq,
29935 -+ bfq_io_struct_pos(io_struct, request));
29936 -+ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq))
29937 -+ return bfq_setup_merge(bfqq, new_bfqq);
29938 -+
29939 -+ return NULL;
29940 -+}
29941 -+
29942 -+static inline void
29943 -+bfq_bfqq_save_state(struct bfq_queue *bfqq)
29944 -+{
29945 -+ /*
29946 -+ * If bfqq->bic == NULL, the queue is already shared or its requests
29947 -+ * have already been redirected to a shared queue; both idle window
29948 -+ * and weight raising state have already been saved. Do nothing.
29949 -+ */
29950 -+ if (bfqq->bic == NULL)
29951 -+ return;
29952 -+ if (bfqq->bic->wr_time_left)
29953 -+ /*
29954 -+ * This is the queue of a just-started process, and would
29955 -+ * deserve weight raising: we set wr_time_left to the full
29956 -+ * weight-raising duration to trigger weight-raising when
29957 -+ * and if the queue is split and the first request of the
29958 -+ * queue is enqueued.
29959 -+ */
29960 -+ bfqq->bic->wr_time_left = bfq_wr_duration(bfqq->bfqd);
29961 -+ else if (bfqq->wr_coeff > 1) {
29962 -+ unsigned long wr_duration =
29963 -+ jiffies - bfqq->last_wr_start_finish;
29964 -+ /*
29965 -+ * It may happen that a queue's weight raising period lasts
29966 -+ * longer than its wr_cur_max_time, as weight raising is
29967 -+ * handled only when a request is enqueued or dispatched (it
29968 -+ * does not use any timer). If the weight raising period is
29969 -+ * about to end, don't save it.
29970 -+ */
29971 -+ if (bfqq->wr_cur_max_time <= wr_duration)
29972 -+ bfqq->bic->wr_time_left = 0;
29973 -+ else
29974 -+ bfqq->bic->wr_time_left =
29975 -+ bfqq->wr_cur_max_time - wr_duration;
29976 -+ /*
29977 -+ * The bfq_queue is becoming shared or the requests of the
29978 -+ * process owning the queue are being redirected to a shared
29979 -+ * queue. Stop the weight raising period of the queue, as in
29980 -+ * both cases it should not be owned by an interactive or
29981 -+ * soft real-time application.
29982 -+ */
29983 -+ bfq_bfqq_end_wr(bfqq);
29984 -+ } else
29985 -+ bfqq->bic->wr_time_left = 0;
29986 -+ bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
29987 -+ bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
29988 -+ bfqq->bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
29989 -+ bfqq->bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
29990 -+ bfqq->bic->cooperations++;
29991 -+ bfqq->bic->failed_cooperations = 0;
29992 -+}
29993 -+
29994 -+static inline void
29995 -+bfq_get_bic_reference(struct bfq_queue *bfqq)
29996 -+{
29997 -+ /*
29998 -+ * If bfqq->bic has a non-NULL value, the bic to which it belongs
29999 -+ * is about to begin using a shared bfq_queue.
30000 -+ */
30001 -+ if (bfqq->bic)
30002 -+ atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
30003 -+}
30004 -+
30005 -+static void
30006 -+bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
30007 -+ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
30008 -+{
30009 -+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
30010 -+ (long unsigned)new_bfqq->pid);
30011 -+ /* Save weight raising and idle window of the merged queues */
30012 -+ bfq_bfqq_save_state(bfqq);
30013 -+ bfq_bfqq_save_state(new_bfqq);
30014 -+ if (bfq_bfqq_IO_bound(bfqq))
30015 -+ bfq_mark_bfqq_IO_bound(new_bfqq);
30016 -+ bfq_clear_bfqq_IO_bound(bfqq);
30017 -+ /*
30018 -+ * Grab a reference to the bic, to prevent it from being destroyed
30019 -+ * before being possibly touched by a bfq_split_bfqq().
30020 -+ */
30021 -+ bfq_get_bic_reference(bfqq);
30022 -+ bfq_get_bic_reference(new_bfqq);
30023 -+ /*
30024 -+ * Merge queues (that is, let bic redirect its requests to new_bfqq)
30025 -+ */
30026 -+ bic_set_bfqq(bic, new_bfqq, 1);
30027 -+ bfq_mark_bfqq_coop(new_bfqq);
30028 -+ /*
30029 -+ * new_bfqq now belongs to at least two bics (it is a shared queue):
30030 -+ * set new_bfqq->bic to NULL. bfqq either:
30031 -+ * - does not belong to any bic any more, and hence bfqq->bic must
30032 -+ * be set to NULL, or
30033 -+ * - is a queue whose owning bics have already been redirected to a
30034 -+ * different queue, hence the queue is destined to not belong to
30035 -+ * any bic soon and bfqq->bic is already NULL (therefore the next
30036 -+ * assignment causes no harm).
30037 -+ */
30038 -+ new_bfqq->bic = NULL;
30039 -+ bfqq->bic = NULL;
30040 -+ bfq_put_queue(bfqq);
30041 -+}
30042 -+
30043 -+static inline void bfq_bfqq_increase_failed_cooperations(struct bfq_queue *bfqq)
30044 -+{
30045 -+ struct bfq_io_cq *bic = bfqq->bic;
30046 -+ struct bfq_data *bfqd = bfqq->bfqd;
30047 -+
30048 -+ if (bic && bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh) {
30049 -+ bic->failed_cooperations++;
30050 -+ if (bic->failed_cooperations >= bfqd->bfq_failed_cooperations)
30051 -+ bic->cooperations = 0;
30052 -+ }
30053 -+}
30054 -+
30055 -+static int bfq_allow_merge(struct request_queue *q, struct request *rq,
30056 -+ struct bio *bio)
30057 -+{
30058 -+ struct bfq_data *bfqd = q->elevator->elevator_data;
30059 -+ struct bfq_io_cq *bic;
30060 -+ struct bfq_queue *bfqq, *new_bfqq;
30061 -+
30062 -+ /*
30063 -+ * Disallow merge of a sync bio into an async request.
30064 -+ */
30065 -+ if (bfq_bio_sync(bio) && !rq_is_sync(rq))
30066 -+ return 0;
30067 -+
30068 -+ /*
30069 -+ * Lookup the bfqq that this bio will be queued with. Allow
30070 -+ * merge only if rq is queued there.
30071 -+ * Queue lock is held here.
30072 -+ */
30073 -+ bic = bfq_bic_lookup(bfqd, current->io_context);
30074 -+ if (bic == NULL)
30075 -+ return 0;
30076 -+
30077 -+ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
30078 -+ /*
30079 -+ * We take advantage of this function to perform an early merge
30080 -+ * of the queues of possible cooperating processes.
30081 -+ */
30082 -+ if (bfqq != NULL) {
30083 -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
30084 -+ if (new_bfqq != NULL) {
30085 -+ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
30086 -+ /*
30087 -+ * If we get here, the bio will be queued in the
30088 -+ * shared queue, i.e., new_bfqq, so use new_bfqq
30089 -+ * to decide whether bio and rq can be merged.
30090 -+ */
30091 -+ bfqq = new_bfqq;
30092 -+ } else
30093 -+ bfq_bfqq_increase_failed_cooperations(bfqq);
30094 -+ }
30095 -+
30096 -+ return bfqq == RQ_BFQQ(rq);
30097 -+}
30098 -+
30099 -+static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
30100 -+ struct bfq_queue *bfqq)
30101 -+{
30102 -+ if (bfqq != NULL) {
30103 -+ bfq_mark_bfqq_must_alloc(bfqq);
30104 -+ bfq_mark_bfqq_budget_new(bfqq);
30105 -+ bfq_clear_bfqq_fifo_expire(bfqq);
30106 -+
30107 -+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
30108 -+
30109 -+ bfq_log_bfqq(bfqd, bfqq,
30110 -+ "set_in_service_queue, cur-budget = %lu",
30111 -+ bfqq->entity.budget);
30112 -+ }
30113 -+
30114 -+ bfqd->in_service_queue = bfqq;
30115 -+}
30116 -+
30117 -+/*
30118 -+ * Get and set a new queue for service.
30119 -+ */
30120 -+static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
30121 -+{
30122 -+ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
30123 -+
30124 -+ __bfq_set_in_service_queue(bfqd, bfqq);
30125 -+ return bfqq;
30126 -+}
30127 -+
30128 - /*
30129 - * If enough samples have been computed, return the current max budget
30130 - * stored in bfqd, which is dynamically updated according to the
30131 -@@ -1475,61 +1789,6 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
30132 - return rq;
30133 - }
30134 -
30135 --/* Must be called with the queue_lock held. */
30136 --static int bfqq_process_refs(struct bfq_queue *bfqq)
30137 --{
30138 -- int process_refs, io_refs;
30139 --
30140 -- io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
30141 -- process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
30142 -- BUG_ON(process_refs < 0);
30143 -- return process_refs;
30144 --}
30145 --
30146 --static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
30147 --{
30148 -- int process_refs, new_process_refs;
30149 -- struct bfq_queue *__bfqq;
30150 --
30151 -- /*
30152 -- * If there are no process references on the new_bfqq, then it is
30153 -- * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
30154 -- * may have dropped their last reference (not just their last process
30155 -- * reference).
30156 -- */
30157 -- if (!bfqq_process_refs(new_bfqq))
30158 -- return;
30159 --
30160 -- /* Avoid a circular list and skip interim queue merges. */
30161 -- while ((__bfqq = new_bfqq->new_bfqq)) {
30162 -- if (__bfqq == bfqq)
30163 -- return;
30164 -- new_bfqq = __bfqq;
30165 -- }
30166 --
30167 -- process_refs = bfqq_process_refs(bfqq);
30168 -- new_process_refs = bfqq_process_refs(new_bfqq);
30169 -- /*
30170 -- * If the process for the bfqq has gone away, there is no
30171 -- * sense in merging the queues.
30172 -- */
30173 -- if (process_refs == 0 || new_process_refs == 0)
30174 -- return;
30175 --
30176 -- /*
30177 -- * Merge in the direction of the lesser amount of work.
30178 -- */
30179 -- if (new_process_refs >= process_refs) {
30180 -- bfqq->new_bfqq = new_bfqq;
30181 -- atomic_add(process_refs, &new_bfqq->ref);
30182 -- } else {
30183 -- new_bfqq->new_bfqq = bfqq;
30184 -- atomic_add(new_process_refs, &bfqq->ref);
30185 -- }
30186 -- bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
30187 -- new_bfqq->pid);
30188 --}
30189 --
30190 - static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
30191 - {
30192 - struct bfq_entity *entity = &bfqq->entity;
30193 -@@ -2263,7 +2522,7 @@ static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
30194 - */
30195 - static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
30196 - {
30197 -- struct bfq_queue *bfqq, *new_bfqq = NULL;
30198 -+ struct bfq_queue *bfqq;
30199 - struct request *next_rq;
30200 - enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
30201 -
30202 -@@ -2273,17 +2532,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
30203 -
30204 - bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
30205 -
30206 -- /*
30207 -- * If another queue has a request waiting within our mean seek
30208 -- * distance, let it run. The expire code will check for close
30209 -- * cooperators and put the close queue at the front of the
30210 -- * service tree. If possible, merge the expiring queue with the
30211 -- * new bfqq.
30212 -- */
30213 -- new_bfqq = bfq_close_cooperator(bfqd, bfqq);
30214 -- if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
30215 -- bfq_setup_merge(bfqq, new_bfqq);
30216 --
30217 - if (bfq_may_expire_for_budg_timeout(bfqq) &&
30218 - !timer_pending(&bfqd->idle_slice_timer) &&
30219 - !bfq_bfqq_must_idle(bfqq))
30220 -@@ -2322,10 +2570,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
30221 - bfq_clear_bfqq_wait_request(bfqq);
30222 - del_timer(&bfqd->idle_slice_timer);
30223 - }
30224 -- if (new_bfqq == NULL)
30225 -- goto keep_queue;
30226 -- else
30227 -- goto expire;
30228 -+ goto keep_queue;
30229 - }
30230 - }
30231 -
30232 -@@ -2334,40 +2579,30 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
30233 - * in flight (possibly waiting for a completion) or is idling for a
30234 - * new request, then keep it.
30235 - */
30236 -- if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
30237 -- (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) {
30238 -+ if (timer_pending(&bfqd->idle_slice_timer) ||
30239 -+ (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq))) {
30240 - bfqq = NULL;
30241 - goto keep_queue;
30242 -- } else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
30243 -- /*
30244 -- * Expiring the queue because there is a close cooperator,
30245 -- * cancel timer.
30246 -- */
30247 -- bfq_clear_bfqq_wait_request(bfqq);
30248 -- del_timer(&bfqd->idle_slice_timer);
30249 - }
30250 -
30251 - reason = BFQ_BFQQ_NO_MORE_REQUESTS;
30252 - expire:
30253 - bfq_bfqq_expire(bfqd, bfqq, 0, reason);
30254 - new_queue:
30255 -- bfqq = bfq_set_in_service_queue(bfqd, new_bfqq);
30256 -+ bfqq = bfq_set_in_service_queue(bfqd);
30257 - bfq_log(bfqd, "select_queue: new queue %d returned",
30258 - bfqq != NULL ? bfqq->pid : 0);
30259 - keep_queue:
30260 - return bfqq;
30261 - }
30262 -
30263 --static void bfq_update_wr_data(struct bfq_data *bfqd,
30264 -- struct bfq_queue *bfqq)
30265 -+static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
30266 - {
30267 -- if (bfqq->wr_coeff > 1) { /* queue is being boosted */
30268 -- struct bfq_entity *entity = &bfqq->entity;
30269 --
30270 -+ struct bfq_entity *entity = &bfqq->entity;
30271 -+ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
30272 - bfq_log_bfqq(bfqd, bfqq,
30273 - "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
30274 -- jiffies_to_msecs(jiffies -
30275 -- bfqq->last_wr_start_finish),
30276 -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
30277 - jiffies_to_msecs(bfqq->wr_cur_max_time),
30278 - bfqq->wr_coeff,
30279 - bfqq->entity.weight, bfqq->entity.orig_weight);
30280 -@@ -2376,12 +2611,16 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
30281 - entity->orig_weight * bfqq->wr_coeff);
30282 - if (entity->ioprio_changed)
30283 - bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
30284 -+
30285 - /*
30286 - * If the queue was activated in a burst, or
30287 - * too much time has elapsed from the beginning
30288 -- * of this weight-raising, then end weight raising.
30289 -+ * of this weight-raising period, or the queue has
30290 -+ * exceeded the acceptable number of cooperations,
30291 -+ * then end weight raising.
30292 - */
30293 - if (bfq_bfqq_in_large_burst(bfqq) ||
30294 -+ bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh ||
30295 - time_is_before_jiffies(bfqq->last_wr_start_finish +
30296 - bfqq->wr_cur_max_time)) {
30297 - bfqq->last_wr_start_finish = jiffies;
30298 -@@ -2390,11 +2629,13 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
30299 - bfqq->last_wr_start_finish,
30300 - jiffies_to_msecs(bfqq->wr_cur_max_time));
30301 - bfq_bfqq_end_wr(bfqq);
30302 -- __bfq_entity_update_weight_prio(
30303 -- bfq_entity_service_tree(entity),
30304 -- entity);
30305 - }
30306 - }
30307 -+ /* Update weight both if it must be raised and if it must be lowered */
30308 -+ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
30309 -+ __bfq_entity_update_weight_prio(
30310 -+ bfq_entity_service_tree(entity),
30311 -+ entity);
30312 - }
30313 -
30314 - /*
30315 -@@ -2642,6 +2883,25 @@ static inline void bfq_init_icq(struct io_cq *icq)
30316 - struct bfq_io_cq *bic = icq_to_bic(icq);
30317 -
30318 - bic->ttime.last_end_request = jiffies;
30319 -+ /*
30320 -+ * A newly created bic indicates that the process has just
30321 -+ * started doing I/O, and is probably mapping into memory its
30322 -+ * executable and libraries: it definitely needs weight raising.
30323 -+ * There is however the possibility that the process performs,
30324 -+ * for a while, I/O close to some other process. EQM intercepts
30325 -+ * this behavior and may merge the queue corresponding to the
30326 -+ * process with some other queue, BEFORE the weight of the queue
30327 -+ * is raised. Merged queues are not weight-raised (they are assumed
30328 -+ * to belong to processes that benefit only from high throughput).
30329 -+ * If the merge is basically the consequence of an accident, then
30330 -+ * the queue will be split soon and will get back its old weight.
30331 -+ * It is then important to write down somewhere that this queue
30332 -+ * does need weight raising, even if it did not make it to get its
30333 -+ * weight raised before being merged. To this purpose, we overload
30334 -+ * the field raising_time_left and assign 1 to it, to mark the queue
30335 -+ * as needing weight raising.
30336 -+ */
30337 -+ bic->wr_time_left = 1;
30338 - }
30339 -
30340 - static void bfq_exit_icq(struct io_cq *icq)
30341 -@@ -2655,6 +2915,13 @@ static void bfq_exit_icq(struct io_cq *icq)
30342 - }
30343 -
30344 - if (bic->bfqq[BLK_RW_SYNC]) {
30345 -+ /*
30346 -+ * If the bic is using a shared queue, put the reference
30347 -+ * taken on the io_context when the bic started using a
30348 -+ * shared bfq_queue.
30349 -+ */
30350 -+ if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC]))
30351 -+ put_io_context(icq->ioc);
30352 - bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
30353 - bic->bfqq[BLK_RW_SYNC] = NULL;
30354 - }
30355 -@@ -2950,6 +3217,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
30356 - if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
30357 - return;
30358 -
30359 -+ /* Idle window just restored, statistics are meaningless. */
30360 -+ if (bfq_bfqq_just_split(bfqq))
30361 -+ return;
30362 -+
30363 - enable_idle = bfq_bfqq_idle_window(bfqq);
30364 -
30365 - if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
30366 -@@ -2997,6 +3268,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
30367 - if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
30368 - !BFQQ_SEEKY(bfqq))
30369 - bfq_update_idle_window(bfqd, bfqq, bic);
30370 -+ bfq_clear_bfqq_just_split(bfqq);
30371 -
30372 - bfq_log_bfqq(bfqd, bfqq,
30373 - "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
30374 -@@ -3057,13 +3329,49 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
30375 - static void bfq_insert_request(struct request_queue *q, struct request *rq)
30376 - {
30377 - struct bfq_data *bfqd = q->elevator->elevator_data;
30378 -- struct bfq_queue *bfqq = RQ_BFQQ(rq);
30379 -+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
30380 -
30381 - assert_spin_locked(bfqd->queue->queue_lock);
30382 -+
30383 -+ /*
30384 -+ * An unplug may trigger a requeue of a request from the device
30385 -+ * driver: make sure we are in process context while trying to
30386 -+ * merge two bfq_queues.
30387 -+ */
30388 -+ if (!in_interrupt()) {
30389 -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
30390 -+ if (new_bfqq != NULL) {
30391 -+ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
30392 -+ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
30393 -+ /*
30394 -+ * Release the request's reference to the old bfqq
30395 -+ * and make sure one is taken to the shared queue.
30396 -+ */
30397 -+ new_bfqq->allocated[rq_data_dir(rq)]++;
30398 -+ bfqq->allocated[rq_data_dir(rq)]--;
30399 -+ atomic_inc(&new_bfqq->ref);
30400 -+ bfq_put_queue(bfqq);
30401 -+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
30402 -+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
30403 -+ bfqq, new_bfqq);
30404 -+ rq->elv.priv[1] = new_bfqq;
30405 -+ bfqq = new_bfqq;
30406 -+ } else
30407 -+ bfq_bfqq_increase_failed_cooperations(bfqq);
30408 -+ }
30409 -+
30410 - bfq_init_prio_data(bfqq, RQ_BIC(rq));
30411 -
30412 - bfq_add_request(rq);
30413 -
30414 -+ /*
30415 -+ * Here a newly-created bfq_queue has already started a weight-raising
30416 -+ * period: clear raising_time_left to prevent bfq_bfqq_save_state()
30417 -+ * from assigning it a full weight-raising period. See the detailed
30418 -+ * comments about this field in bfq_init_icq().
30419 -+ */
30420 -+ if (bfqq->bic != NULL)
30421 -+ bfqq->bic->wr_time_left = 0;
30422 - rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
30423 - list_add_tail(&rq->queuelist, &bfqq->fifo);
30424 -
30425 -@@ -3228,18 +3536,6 @@ static void bfq_put_request(struct request *rq)
30426 - }
30427 - }
30428 -
30429 --static struct bfq_queue *
30430 --bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
30431 -- struct bfq_queue *bfqq)
30432 --{
30433 -- bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
30434 -- (long unsigned)bfqq->new_bfqq->pid);
30435 -- bic_set_bfqq(bic, bfqq->new_bfqq, 1);
30436 -- bfq_mark_bfqq_coop(bfqq->new_bfqq);
30437 -- bfq_put_queue(bfqq);
30438 -- return bic_to_bfqq(bic, 1);
30439 --}
30440 --
30441 - /*
30442 - * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
30443 - * was the last process referring to said bfqq.
30444 -@@ -3248,6 +3544,9 @@ static struct bfq_queue *
30445 - bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
30446 - {
30447 - bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
30448 -+
30449 -+ put_io_context(bic->icq.ioc);
30450 -+
30451 - if (bfqq_process_refs(bfqq) == 1) {
30452 - bfqq->pid = current->pid;
30453 - bfq_clear_bfqq_coop(bfqq);
30454 -@@ -3276,6 +3575,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
30455 - struct bfq_queue *bfqq;
30456 - struct bfq_group *bfqg;
30457 - unsigned long flags;
30458 -+ bool split = false;
30459 -
30460 - might_sleep_if(gfp_mask & __GFP_WAIT);
30461 -
30462 -@@ -3293,25 +3593,26 @@ new_queue:
30463 - if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
30464 - bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
30465 - bic_set_bfqq(bic, bfqq, is_sync);
30466 -+ if (split && is_sync) {
30467 -+ if ((bic->was_in_burst_list && bfqd->large_burst) ||
30468 -+ bic->saved_in_large_burst)
30469 -+ bfq_mark_bfqq_in_large_burst(bfqq);
30470 -+ else {
30471 -+ bfq_clear_bfqq_in_large_burst(bfqq);
30472 -+ if (bic->was_in_burst_list)
30473 -+ hlist_add_head(&bfqq->burst_list_node,
30474 -+ &bfqd->burst_list);
30475 -+ }
30476 -+ }
30477 - } else {
30478 -- /*
30479 -- * If the queue was seeky for too long, break it apart.
30480 -- */
30481 -+ /* If the queue was seeky for too long, break it apart. */
30482 - if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
30483 - bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
30484 - bfqq = bfq_split_bfqq(bic, bfqq);
30485 -+ split = true;
30486 - if (!bfqq)
30487 - goto new_queue;
30488 - }
30489 --
30490 -- /*
30491 -- * Check to see if this queue is scheduled to merge with
30492 -- * another closely cooperating queue. The merging of queues
30493 -- * happens here as it must be done in process context.
30494 -- * The reference on new_bfqq was taken in merge_bfqqs.
30495 -- */
30496 -- if (bfqq->new_bfqq != NULL)
30497 -- bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq);
30498 - }
30499 -
30500 - bfqq->allocated[rw]++;
30501 -@@ -3322,6 +3623,26 @@ new_queue:
30502 - rq->elv.priv[0] = bic;
30503 - rq->elv.priv[1] = bfqq;
30504 -
30505 -+ /*
30506 -+ * If a bfq_queue has only one process reference, it is owned
30507 -+ * by only one bfq_io_cq: we can set the bic field of the
30508 -+ * bfq_queue to the address of that structure. Also, if the
30509 -+ * queue has just been split, mark a flag so that the
30510 -+ * information is available to the other scheduler hooks.
30511 -+ */
30512 -+ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
30513 -+ bfqq->bic = bic;
30514 -+ if (split) {
30515 -+ bfq_mark_bfqq_just_split(bfqq);
30516 -+ /*
30517 -+ * If the queue has just been split from a shared
30518 -+ * queue, restore the idle window and the possible
30519 -+ * weight raising period.
30520 -+ */
30521 -+ bfq_bfqq_resume_state(bfqq, bic);
30522 -+ }
30523 -+ }
30524 -+
30525 - spin_unlock_irqrestore(q->queue_lock, flags);
30526 -
30527 - return 0;
30528 -diff --git a/block/bfq-sched.c b/block/bfq-sched.c
30529 -index 2931563..6764a7e 100644
30530 ---- a/block/bfq-sched.c
30531 -+++ b/block/bfq-sched.c
30532 -@@ -1091,34 +1091,6 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
30533 - return bfqq;
30534 - }
30535 -
30536 --/*
30537 -- * Forced extraction of the given queue.
30538 -- */
30539 --static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
30540 -- struct bfq_queue *bfqq)
30541 --{
30542 -- struct bfq_entity *entity;
30543 -- struct bfq_sched_data *sd;
30544 --
30545 -- BUG_ON(bfqd->in_service_queue != NULL);
30546 --
30547 -- entity = &bfqq->entity;
30548 -- /*
30549 -- * Bubble up extraction/update from the leaf to the root.
30550 -- */
30551 -- for_each_entity(entity) {
30552 -- sd = entity->sched_data;
30553 -- bfq_update_budget(entity);
30554 -- bfq_update_vtime(bfq_entity_service_tree(entity));
30555 -- bfq_active_extract(bfq_entity_service_tree(entity), entity);
30556 -- sd->in_service_entity = entity;
30557 -- sd->next_in_service = NULL;
30558 -- entity->service = 0;
30559 -- }
30560 --
30561 -- return;
30562 --}
30563 --
30564 - static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
30565 - {
30566 - if (bfqd->in_service_bic != NULL) {
30567 -diff --git a/block/bfq.h b/block/bfq.h
30568 -index 518f2ac..4f519ea 100644
30569 ---- a/block/bfq.h
30570 -+++ b/block/bfq.h
30571 -@@ -218,18 +218,21 @@ struct bfq_group;
30572 - * idle @bfq_queue with no outstanding requests, then
30573 - * the task associated with the queue it is deemed as
30574 - * soft real-time (see the comments to the function
30575 -- * bfq_bfqq_softrt_next_start()).
30576 -+ * bfq_bfqq_softrt_next_start())
30577 - * @last_idle_bklogged: time of the last transition of the @bfq_queue from
30578 - * idle to backlogged
30579 - * @service_from_backlogged: cumulative service received from the @bfq_queue
30580 - * since the last transition from idle to
30581 - * backlogged
30582 -+ * @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the
30583 -+ * queue is shared
30584 - *
30585 -- * A bfq_queue is a leaf request queue; it can be associated with an io_context
30586 -- * or more, if it is async or shared between cooperating processes. @cgroup
30587 -- * holds a reference to the cgroup, to be sure that it does not disappear while
30588 -- * a bfqq still references it (mostly to avoid races between request issuing and
30589 -- * task migration followed by cgroup destruction).
30590 -+ * A bfq_queue is a leaf request queue; it can be associated with an
30591 -+ * io_context or more, if it is async or shared between cooperating
30592 -+ * processes. @cgroup holds a reference to the cgroup, to be sure that it
30593 -+ * does not disappear while a bfqq still references it (mostly to avoid
30594 -+ * races between request issuing and task migration followed by cgroup
30595 -+ * destruction).
30596 - * All the fields are protected by the queue lock of the containing bfqd.
30597 - */
30598 - struct bfq_queue {
30599 -@@ -269,6 +272,7 @@ struct bfq_queue {
30600 - unsigned int requests_within_timer;
30601 -
30602 - pid_t pid;
30603 -+ struct bfq_io_cq *bic;
30604 -
30605 - /* weight-raising fields */
30606 - unsigned long wr_cur_max_time;
30607 -@@ -298,12 +302,42 @@ struct bfq_ttime {
30608 - * @icq: associated io_cq structure
30609 - * @bfqq: array of two process queues, the sync and the async
30610 - * @ttime: associated @bfq_ttime struct
30611 -+ * @wr_time_left: snapshot of the time left before weight raising ends
30612 -+ * for the sync queue associated to this process; this
30613 -+ * snapshot is taken to remember this value while the weight
30614 -+ * raising is suspended because the queue is merged with a
30615 -+ * shared queue, and is used to set @raising_cur_max_time
30616 -+ * when the queue is split from the shared queue and its
30617 -+ * weight is raised again
30618 -+ * @saved_idle_window: same purpose as the previous field for the idle
30619 -+ * window
30620 -+ * @saved_IO_bound: same purpose as the previous two fields for the I/O
30621 -+ * bound classification of a queue
30622 -+ * @saved_in_large_burst: same purpose as the previous fields for the
30623 -+ * value of the field keeping the queue's belonging
30624 -+ * to a large burst
30625 -+ * @was_in_burst_list: true if the queue belonged to a burst list
30626 -+ * before its merge with another cooperating queue
30627 -+ * @cooperations: counter of consecutive successful queue merges underwent
30628 -+ * by any of the process' @bfq_queues
30629 -+ * @failed_cooperations: counter of consecutive failed queue merges of any
30630 -+ * of the process' @bfq_queues
30631 - */
30632 - struct bfq_io_cq {
30633 - struct io_cq icq; /* must be the first member */
30634 - struct bfq_queue *bfqq[2];
30635 - struct bfq_ttime ttime;
30636 - int ioprio;
30637 -+
30638 -+ unsigned int wr_time_left;
30639 -+ bool saved_idle_window;
30640 -+ bool saved_IO_bound;
30641 -+
30642 -+ bool saved_in_large_burst;
30643 -+ bool was_in_burst_list;
30644 -+
30645 -+ unsigned int cooperations;
30646 -+ unsigned int failed_cooperations;
30647 - };
30648 -
30649 - enum bfq_device_speed {
30650 -@@ -539,7 +573,7 @@ enum bfqq_state_flags {
30651 - BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */
30652 - BFQ_BFQQ_FLAG_sync, /* synchronous queue */
30653 - BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */
30654 -- BFQ_BFQQ_FLAG_IO_bound, /*
30655 -+ BFQ_BFQQ_FLAG_IO_bound, /*
30656 - * bfqq has timed-out at least once
30657 - * having consumed at most 2/10 of
30658 - * its budget
30659 -@@ -552,12 +586,13 @@ enum bfqq_state_flags {
30660 - * bfqq has proved to be slow and
30661 - * seeky until budget timeout
30662 - */
30663 -- BFQ_BFQQ_FLAG_softrt_update, /*
30664 -+ BFQ_BFQQ_FLAG_softrt_update, /*
30665 - * may need softrt-next-start
30666 - * update
30667 - */
30668 - BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
30669 -- BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be splitted */
30670 -+ BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be split */
30671 -+ BFQ_BFQQ_FLAG_just_split, /* queue has just been split */
30672 - };
30673 -
30674 - #define BFQ_BFQQ_FNS(name) \
30675 -@@ -587,6 +622,7 @@ BFQ_BFQQ_FNS(in_large_burst);
30676 - BFQ_BFQQ_FNS(constantly_seeky);
30677 - BFQ_BFQQ_FNS(coop);
30678 - BFQ_BFQQ_FNS(split_coop);
30679 -+BFQ_BFQQ_FNS(just_split);
30680 - BFQ_BFQQ_FNS(softrt_update);
30681 - #undef BFQ_BFQQ_FNS
30682 -
30683 ---
30684 -2.1.0
30685 -
30686
30687 diff --git a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
30688 deleted file mode 100644
30689 index c4efd06..0000000
30690 --- a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
30691 +++ /dev/null
30692 @@ -1,402 +0,0 @@
30693 -WARNING - this version of the patch works with version 4.9+ of gcc and with
30694 -kernel version 3.15.x+ and should NOT be applied when compiling on older
30695 -versions due to name changes of the flags with the 4.9 release of gcc.
30696 -Use the older version of this patch hosted on the same github for older
30697 -versions of gcc. For example:
30698 -
30699 -corei7 --> nehalem
30700 -corei7-avx --> sandybridge
30701 -core-avx-i --> ivybridge
30702 -core-avx2 --> haswell
30703 -
30704 -For more, see: https://gcc.gnu.org/gcc-4.9/changes.html
30705 -
30706 -It also changes 'atom' to 'bonnell' in accordance with the gcc v4.9 changes.
30707 -Note that upstream is using the deprecated 'match=atom' flags when I believe it
30708 -should use the newer 'march=bonnell' flag for atom processors.
30709 -
30710 -I have made that change to this patch set as well. See the following kernel
30711 -bug report to see if I'm right: https://bugzilla.kernel.org/show_bug.cgi?id=77461
30712 -
30713 -This patch will expand the number of microarchitectures to include newer
30714 -processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
30715 -14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
30716 -Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 1.5 Gen Core
30717 -i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7 (Sandybridge), Intel 3rd Gen
30718 -Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core i3/i5/i7 (Haswell), Intel 5th
30719 -Gen Core i3/i5/i7 (Broadwell), and the low power Silvermont series of Atom
30720 -processors (Silvermont). It also offers the compiler the 'native' flag.
30721 -
30722 -Small but real speed increases are measurable using a make endpoint comparing
30723 -a generic kernel to one built with one of the respective microarchs.
30724 -
30725 -See the following experimental evidence supporting this statement:
30726 -https://github.com/graysky2/kernel_gcc_patch
30727 -
30728 -REQUIREMENTS
30729 -linux version >=3.15
30730 -gcc version >=4.9
30731 -
30732 ---- a/arch/x86/include/asm/module.h 2014-06-16 16:44:27.000000000 -0400
30733 -+++ b/arch/x86/include/asm/module.h 2015-03-07 03:27:32.556672424 -0500
30734 -@@ -15,6 +15,22 @@
30735 - #define MODULE_PROC_FAMILY "586MMX "
30736 - #elif defined CONFIG_MCORE2
30737 - #define MODULE_PROC_FAMILY "CORE2 "
30738 -+#elif defined CONFIG_MNATIVE
30739 -+#define MODULE_PROC_FAMILY "NATIVE "
30740 -+#elif defined CONFIG_MNEHALEM
30741 -+#define MODULE_PROC_FAMILY "NEHALEM "
30742 -+#elif defined CONFIG_MWESTMERE
30743 -+#define MODULE_PROC_FAMILY "WESTMERE "
30744 -+#elif defined CONFIG_MSILVERMONT
30745 -+#define MODULE_PROC_FAMILY "SILVERMONT "
30746 -+#elif defined CONFIG_MSANDYBRIDGE
30747 -+#define MODULE_PROC_FAMILY "SANDYBRIDGE "
30748 -+#elif defined CONFIG_MIVYBRIDGE
30749 -+#define MODULE_PROC_FAMILY "IVYBRIDGE "
30750 -+#elif defined CONFIG_MHASWELL
30751 -+#define MODULE_PROC_FAMILY "HASWELL "
30752 -+#elif defined CONFIG_MBROADWELL
30753 -+#define MODULE_PROC_FAMILY "BROADWELL "
30754 - #elif defined CONFIG_MATOM
30755 - #define MODULE_PROC_FAMILY "ATOM "
30756 - #elif defined CONFIG_M686
30757 -@@ -33,6 +49,20 @@
30758 - #define MODULE_PROC_FAMILY "K7 "
30759 - #elif defined CONFIG_MK8
30760 - #define MODULE_PROC_FAMILY "K8 "
30761 -+#elif defined CONFIG_MK8SSE3
30762 -+#define MODULE_PROC_FAMILY "K8SSE3 "
30763 -+#elif defined CONFIG_MK10
30764 -+#define MODULE_PROC_FAMILY "K10 "
30765 -+#elif defined CONFIG_MBARCELONA
30766 -+#define MODULE_PROC_FAMILY "BARCELONA "
30767 -+#elif defined CONFIG_MBOBCAT
30768 -+#define MODULE_PROC_FAMILY "BOBCAT "
30769 -+#elif defined CONFIG_MBULLDOZER
30770 -+#define MODULE_PROC_FAMILY "BULLDOZER "
30771 -+#elif defined CONFIG_MPILEDRIVER
30772 -+#define MODULE_PROC_FAMILY "PILEDRIVER "
30773 -+#elif defined CONFIG_MJAGUAR
30774 -+#define MODULE_PROC_FAMILY "JAGUAR "
30775 - #elif defined CONFIG_MELAN
30776 - #define MODULE_PROC_FAMILY "ELAN "
30777 - #elif defined CONFIG_MCRUSOE
30778 ---- a/arch/x86/Kconfig.cpu 2014-06-16 16:44:27.000000000 -0400
30779 -+++ b/arch/x86/Kconfig.cpu 2015-03-07 03:32:14.337713226 -0500
30780 -@@ -137,9 +137,8 @@ config MPENTIUM4
30781 - -Paxville
30782 - -Dempsey
30783 -
30784 --
30785 - config MK6
30786 -- bool "K6/K6-II/K6-III"
30787 -+ bool "AMD K6/K6-II/K6-III"
30788 - depends on X86_32
30789 - ---help---
30790 - Select this for an AMD K6-family processor. Enables use of
30791 -@@ -147,7 +146,7 @@ config MK6
30792 - flags to GCC.
30793 -
30794 - config MK7
30795 -- bool "Athlon/Duron/K7"
30796 -+ bool "AMD Athlon/Duron/K7"
30797 - depends on X86_32
30798 - ---help---
30799 - Select this for an AMD Athlon K7-family processor. Enables use of
30800 -@@ -155,12 +154,62 @@ config MK7
30801 - flags to GCC.
30802 -
30803 - config MK8
30804 -- bool "Opteron/Athlon64/Hammer/K8"
30805 -+ bool "AMD Opteron/Athlon64/Hammer/K8"
30806 - ---help---
30807 - Select this for an AMD Opteron or Athlon64 Hammer-family processor.
30808 - Enables use of some extended instructions, and passes appropriate
30809 - optimization flags to GCC.
30810 -
30811 -+config MK8SSE3
30812 -+ bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
30813 -+ ---help---
30814 -+ Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
30815 -+ Enables use of some extended instructions, and passes appropriate
30816 -+ optimization flags to GCC.
30817 -+
30818 -+config MK10
30819 -+ bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
30820 -+ ---help---
30821 -+ Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
30822 -+ Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
30823 -+ Enables use of some extended instructions, and passes appropriate
30824 -+ optimization flags to GCC.
30825 -+
30826 -+config MBARCELONA
30827 -+ bool "AMD Barcelona"
30828 -+ ---help---
30829 -+ Select this for AMD Barcelona and newer processors.
30830 -+
30831 -+ Enables -march=barcelona
30832 -+
30833 -+config MBOBCAT
30834 -+ bool "AMD Bobcat"
30835 -+ ---help---
30836 -+ Select this for AMD Bobcat processors.
30837 -+
30838 -+ Enables -march=btver1
30839 -+
30840 -+config MBULLDOZER
30841 -+ bool "AMD Bulldozer"
30842 -+ ---help---
30843 -+ Select this for AMD Bulldozer processors.
30844 -+
30845 -+ Enables -march=bdver1
30846 -+
30847 -+config MPILEDRIVER
30848 -+ bool "AMD Piledriver"
30849 -+ ---help---
30850 -+ Select this for AMD Piledriver processors.
30851 -+
30852 -+ Enables -march=bdver2
30853 -+
30854 -+config MJAGUAR
30855 -+ bool "AMD Jaguar"
30856 -+ ---help---
30857 -+ Select this for AMD Jaguar processors.
30858 -+
30859 -+ Enables -march=btver2
30860 -+
30861 - config MCRUSOE
30862 - bool "Crusoe"
30863 - depends on X86_32
30864 -@@ -251,8 +300,17 @@ config MPSC
30865 - using the cpu family field
30866 - in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
30867 -
30868 -+config MATOM
30869 -+ bool "Intel Atom"
30870 -+ ---help---
30871 -+
30872 -+ Select this for the Intel Atom platform. Intel Atom CPUs have an
30873 -+ in-order pipelining architecture and thus can benefit from
30874 -+ accordingly optimized code. Use a recent GCC with specific Atom
30875 -+ support in order to fully benefit from selecting this option.
30876 -+
30877 - config MCORE2
30878 -- bool "Core 2/newer Xeon"
30879 -+ bool "Intel Core 2"
30880 - ---help---
30881 -
30882 - Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
30883 -@@ -260,14 +318,63 @@ config MCORE2
30884 - family in /proc/cpuinfo. Newer ones have 6 and older ones 15
30885 - (not a typo)
30886 -
30887 --config MATOM
30888 -- bool "Intel Atom"
30889 -+ Enables -march=core2
30890 -+
30891 -+config MNEHALEM
30892 -+ bool "Intel Nehalem"
30893 - ---help---
30894 -
30895 -- Select this for the Intel Atom platform. Intel Atom CPUs have an
30896 -- in-order pipelining architecture and thus can benefit from
30897 -- accordingly optimized code. Use a recent GCC with specific Atom
30898 -- support in order to fully benefit from selecting this option.
30899 -+ Select this for 1st Gen Core processors in the Nehalem family.
30900 -+
30901 -+ Enables -march=nehalem
30902 -+
30903 -+config MWESTMERE
30904 -+ bool "Intel Westmere"
30905 -+ ---help---
30906 -+
30907 -+ Select this for the Intel Westmere formerly Nehalem-C family.
30908 -+
30909 -+ Enables -march=westmere
30910 -+
30911 -+config MSILVERMONT
30912 -+ bool "Intel Silvermont"
30913 -+ ---help---
30914 -+
30915 -+ Select this for the Intel Silvermont platform.
30916 -+
30917 -+ Enables -march=silvermont
30918 -+
30919 -+config MSANDYBRIDGE
30920 -+ bool "Intel Sandy Bridge"
30921 -+ ---help---
30922 -+
30923 -+ Select this for 2nd Gen Core processors in the Sandy Bridge family.
30924 -+
30925 -+ Enables -march=sandybridge
30926 -+
30927 -+config MIVYBRIDGE
30928 -+ bool "Intel Ivy Bridge"
30929 -+ ---help---
30930 -+
30931 -+ Select this for 3rd Gen Core processors in the Ivy Bridge family.
30932 -+
30933 -+ Enables -march=ivybridge
30934 -+
30935 -+config MHASWELL
30936 -+ bool "Intel Haswell"
30937 -+ ---help---
30938 -+
30939 -+ Select this for 4th Gen Core processors in the Haswell family.
30940 -+
30941 -+ Enables -march=haswell
30942 -+
30943 -+config MBROADWELL
30944 -+ bool "Intel Broadwell"
30945 -+ ---help---
30946 -+
30947 -+ Select this for 5th Gen Core processors in the Broadwell family.
30948 -+
30949 -+ Enables -march=broadwell
30950 -
30951 - config GENERIC_CPU
30952 - bool "Generic-x86-64"
30953 -@@ -276,6 +383,19 @@ config GENERIC_CPU
30954 - Generic x86-64 CPU.
30955 - Run equally well on all x86-64 CPUs.
30956 -
30957 -+config MNATIVE
30958 -+ bool "Native optimizations autodetected by GCC"
30959 -+ ---help---
30960 -+
30961 -+ GCC 4.2 and above support -march=native, which automatically detects
30962 -+ the optimum settings to use based on your processor. -march=native
30963 -+ also detects and applies additional settings beyond -march specific
30964 -+ to your CPU, (eg. -msse4). Unless you have a specific reason not to
30965 -+ (e.g. distcc cross-compiling), you should probably be using
30966 -+ -march=native rather than anything listed below.
30967 -+
30968 -+ Enables -march=native
30969 -+
30970 - endchoice
30971 -
30972 - config X86_GENERIC
30973 -@@ -300,7 +420,7 @@ config X86_INTERNODE_CACHE_SHIFT
30974 - config X86_L1_CACHE_SHIFT
30975 - int
30976 - default "7" if MPENTIUM4 || MPSC
30977 -- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
30978 -+ default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || BROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
30979 - default "4" if MELAN || M486 || MGEODEGX1
30980 - default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
30981 -
30982 -@@ -331,11 +451,11 @@ config X86_ALIGNMENT_16
30983 -
30984 - config X86_INTEL_USERCOPY
30985 - def_bool y
30986 -- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
30987 -+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE
30988 -
30989 - config X86_USE_PPRO_CHECKSUM
30990 - def_bool y
30991 -- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
30992 -+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MATOM || MNATIVE
30993 -
30994 - config X86_USE_3DNOW
30995 - def_bool y
30996 -@@ -359,17 +479,17 @@ config X86_P6_NOP
30997 -
30998 - config X86_TSC
30999 - def_bool y
31000 -- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
31001 -+ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM) || X86_64
31002 -
31003 - config X86_CMPXCHG64
31004 - def_bool y
31005 -- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
31006 -+ depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
31007 -
31008 - # this should be set for all -march=.. options where the compiler
31009 - # generates cmov.
31010 - config X86_CMOV
31011 - def_bool y
31012 -- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
31013 -+ depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
31014 -
31015 - config X86_MINIMUM_CPU_FAMILY
31016 - int
31017 ---- a/arch/x86/Makefile 2014-06-16 16:44:27.000000000 -0400
31018 -+++ b/arch/x86/Makefile 2015-03-07 03:33:27.650843211 -0500
31019 -@@ -92,13 +92,35 @@ else
31020 - KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
31021 -
31022 - # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
31023 -+ cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
31024 - cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
31025 -+ cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
31026 -+ cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
31027 -+ cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
31028 -+ cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
31029 -+ cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
31030 -+ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
31031 -+ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
31032 - cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
31033 -
31034 - cflags-$(CONFIG_MCORE2) += \
31035 -- $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
31036 -- cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
31037 -- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
31038 -+ $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
31039 -+ cflags-$(CONFIG_MNEHALEM) += \
31040 -+ $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
31041 -+ cflags-$(CONFIG_MWESTMERE) += \
31042 -+ $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
31043 -+ cflags-$(CONFIG_MSILVERMONT) += \
31044 -+ $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
31045 -+ cflags-$(CONFIG_MSANDYBRIDGE) += \
31046 -+ $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
31047 -+ cflags-$(CONFIG_MIVYBRIDGE) += \
31048 -+ $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
31049 -+ cflags-$(CONFIG_MHASWELL) += \
31050 -+ $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
31051 -+ cflags-$(CONFIG_MBROADWELL) += \
31052 -+ $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
31053 -+ cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
31054 -+ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
31055 - cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
31056 - KBUILD_CFLAGS += $(cflags-y)
31057 -
31058 ---- a/arch/x86/Makefile_32.cpu 2014-06-16 16:44:27.000000000 -0400
31059 -+++ b/arch/x86/Makefile_32.cpu 2015-03-07 03:34:15.203586024 -0500
31060 -@@ -23,7 +23,15 @@ cflags-$(CONFIG_MK6) += -march=k6
31061 - # Please note, that patches that add -march=athlon-xp and friends are pointless.
31062 - # They make zero difference whatsosever to performance at this time.
31063 - cflags-$(CONFIG_MK7) += -march=athlon
31064 -+cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
31065 - cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
31066 -+cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-march=athlon)
31067 -+cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon)
31068 -+cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
31069 -+cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon)
31070 -+cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
31071 -+cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon)
31072 -+cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon)
31073 - cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
31074 - cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
31075 - cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
31076 -@@ -32,8 +40,15 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-
31077 - cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
31078 - cflags-$(CONFIG_MVIAC7) += -march=i686
31079 - cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
31080 --cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
31081 -- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
31082 -+cflags-$(CONFIG_MNEHALEM) += -march=i686 $(call tune,nehalem)
31083 -+cflags-$(CONFIG_MWESTMERE) += -march=i686 $(call tune,westmere)
31084 -+cflags-$(CONFIG_MSILVERMONT) += -march=i686 $(call tune,silvermont)
31085 -+cflags-$(CONFIG_MSANDYBRIDGE) += -march=i686 $(call tune,sandybridge)
31086 -+cflags-$(CONFIG_MIVYBRIDGE) += -march=i686 $(call tune,ivybridge)
31087 -+cflags-$(CONFIG_MHASWELL) += -march=i686 $(call tune,haswell)
31088 -+cflags-$(CONFIG_MBROADWELL) += -march=i686 $(call tune,broadwell)
31089 -+cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
31090 -+ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
31091 -
31092 - # AMD Elan support
31093 - cflags-$(CONFIG_MELAN) += -march=i486
31094 -