Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 29 Dec 2021 13:11:22
Message-Id: 1640783458.dc6725ef86a129d4df053dc29b953e560f5db220.mpagano@gentoo
1 commit: dc6725ef86a129d4df053dc29b953e560f5db220
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Dec 29 13:10:58 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Dec 29 13:10:58 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dc6725ef
7
8 Linux patch 4.19.223
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1222_linux-4.19.223.patch | 1396 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1400 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 7dda2480..ed044b8e 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -927,6 +927,10 @@ Patch: 1221_linux-4.19.222.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.222
23
24 +Patch: 1222_linux-4.19.223.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.223
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1222_linux-4.19.223.patch b/1222_linux-4.19.223.patch
33 new file mode 100644
34 index 00000000..f6334a18
35 --- /dev/null
36 +++ b/1222_linux-4.19.223.patch
37 @@ -0,0 +1,1396 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index f179e20eb8a0b..607db9519cfbd 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -2019,8 +2019,12 @@
43 + Default is 1 (enabled)
44 +
45 + kvm-intel.emulate_invalid_guest_state=
46 +- [KVM,Intel] Enable emulation of invalid guest states
47 +- Default is 0 (disabled)
48 ++ [KVM,Intel] Disable emulation of invalid guest state.
49 ++ Ignored if kvm-intel.enable_unrestricted_guest=1, as
50 ++ guest state is never invalid for unrestricted guests.
51 ++ This param doesn't apply to nested guests (L2), as KVM
52 ++ never emulates invalid L2 guest state.
53 ++ Default is 1 (enabled)
54 +
55 + kvm-intel.flexpriority=
56 + [KVM,Intel] Disable FlexPriority feature (TPR shadow).
57 +diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
58 +index d3e5dd26db12d..4035a495c0606 100644
59 +--- a/Documentation/networking/bonding.txt
60 ++++ b/Documentation/networking/bonding.txt
61 +@@ -191,11 +191,12 @@ ad_actor_sys_prio
62 + ad_actor_system
63 +
64 + In an AD system, this specifies the mac-address for the actor in
65 +- protocol packet exchanges (LACPDUs). The value cannot be NULL or
66 +- multicast. It is preferred to have the local-admin bit set for this
67 +- mac but driver does not enforce it. If the value is not given then
68 +- system defaults to using the masters' mac address as actors' system
69 +- address.
70 ++ protocol packet exchanges (LACPDUs). The value cannot be a multicast
71 ++ address. If the all-zeroes MAC is specified, bonding will internally
72 ++ use the MAC of the bond itself. It is preferred to have the
73 ++ local-admin bit set for this mac but driver does not enforce it. If
74 ++ the value is not given then system defaults to using the masters'
75 ++ mac address as actors' system address.
76 +
77 + This parameter has effect only in 802.3ad mode and is available through
78 + SysFs interface.
79 +diff --git a/Makefile b/Makefile
80 +index aa6cdaebe18b2..6637882cb5e54 100644
81 +--- a/Makefile
82 ++++ b/Makefile
83 +@@ -1,7 +1,7 @@
84 + # SPDX-License-Identifier: GPL-2.0
85 + VERSION = 4
86 + PATCHLEVEL = 19
87 +-SUBLEVEL = 222
88 ++SUBLEVEL = 223
89 + EXTRAVERSION =
90 + NAME = "People's Front"
91 +
92 +diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
93 +index 89e551eebff1e..cde22c04ad2b8 100644
94 +--- a/arch/arm/kernel/entry-armv.S
95 ++++ b/arch/arm/kernel/entry-armv.S
96 +@@ -620,11 +620,9 @@ call_fpe:
97 + tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
98 + reteq lr
99 + and r8, r0, #0x00000f00 @ mask out CP number
100 +- THUMB( lsr r8, r8, #8 )
101 + mov r7, #1
102 +- add r6, r10, #TI_USED_CP
103 +- ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
104 +- THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
105 ++ add r6, r10, r8, lsr #8 @ add used_cp[] array offset first
106 ++ strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[]
107 + #ifdef CONFIG_IWMMXT
108 + @ Test if we need to give access to iWMMXt coprocessors
109 + ldr r5, [r10, #TI_FLAGS]
110 +@@ -633,7 +631,7 @@ call_fpe:
111 + bcs iwmmxt_task_enable
112 + #endif
113 + ARM( add pc, pc, r8, lsr #6 )
114 +- THUMB( lsl r8, r8, #2 )
115 ++ THUMB( lsr r8, r8, #6 )
116 + THUMB( add pc, r8 )
117 + nop
118 +
119 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
120 +index 1238de25a9691..9b1789504f7a0 100644
121 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
122 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
123 +@@ -72,7 +72,7 @@
124 + pinctrl-0 = <&emac_rgmii_pins>;
125 + phy-supply = <&reg_gmac_3v3>;
126 + phy-handle = <&ext_rgmii_phy>;
127 +- phy-mode = "rgmii";
128 ++ phy-mode = "rgmii-id";
129 + status = "okay";
130 + };
131 +
132 +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
133 +index 61a647a55c695..1ae007ec65c51 100644
134 +--- a/arch/parisc/kernel/syscall.S
135 ++++ b/arch/parisc/kernel/syscall.S
136 +@@ -478,7 +478,7 @@ lws_start:
137 + extrd,u %r1,PSW_W_BIT,1,%r1
138 + /* sp must be aligned on 4, so deposit the W bit setting into
139 + * the bottom of sp temporarily */
140 +- or,ev %r1,%r30,%r30
141 ++ or,od %r1,%r30,%r30
142 +
143 + /* Clip LWS number to a 32-bit value for 32-bit processes */
144 + depdi 0, 31, 32, %r20
145 +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
146 +index 2a9c12ffb5cbc..7de459cf36b54 100644
147 +--- a/arch/x86/include/asm/pgtable.h
148 ++++ b/arch/x86/include/asm/pgtable.h
149 +@@ -1356,8 +1356,8 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
150 + #endif
151 + #endif
152 +
153 +-#define PKRU_AD_BIT 0x1
154 +-#define PKRU_WD_BIT 0x2
155 ++#define PKRU_AD_BIT 0x1u
156 ++#define PKRU_WD_BIT 0x2u
157 + #define PKRU_BITS_PER_PKEY 2
158 +
159 + static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
160 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
161 +index b2bad345c523f..c2529dfda3e53 100644
162 +--- a/block/bfq-iosched.c
163 ++++ b/block/bfq-iosched.c
164 +@@ -625,12 +625,13 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
165 + }
166 +
167 + /*
168 +- * Tell whether there are active queues or groups with differentiated weights.
169 ++ * Tell whether there are active queues with different weights or
170 ++ * active groups.
171 + */
172 +-static bool bfq_differentiated_weights(struct bfq_data *bfqd)
173 ++static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
174 + {
175 + /*
176 +- * For weights to differ, at least one of the trees must contain
177 ++ * For queue weights to differ, queue_weights_tree must contain
178 + * at least two nodes.
179 + */
180 + return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
181 +@@ -638,9 +639,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
182 + bfqd->queue_weights_tree.rb_node->rb_right)
183 + #ifdef CONFIG_BFQ_GROUP_IOSCHED
184 + ) ||
185 +- (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
186 +- (bfqd->group_weights_tree.rb_node->rb_left ||
187 +- bfqd->group_weights_tree.rb_node->rb_right)
188 ++ (bfqd->num_groups_with_pending_reqs > 0
189 + #endif
190 + );
191 + }
192 +@@ -658,26 +657,25 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
193 + * 3) all active groups at the same level in the groups tree have the same
194 + * number of children.
195 + *
196 +- * Unfortunately, keeping the necessary state for evaluating exactly the
197 +- * above symmetry conditions would be quite complex and time-consuming.
198 +- * Therefore this function evaluates, instead, the following stronger
199 +- * sub-conditions, for which it is much easier to maintain the needed
200 +- * state:
201 ++ * Unfortunately, keeping the necessary state for evaluating exactly
202 ++ * the last two symmetry sub-conditions above would be quite complex
203 ++ * and time consuming. Therefore this function evaluates, instead,
204 ++ * only the following stronger two sub-conditions, for which it is
205 ++ * much easier to maintain the needed state:
206 + * 1) all active queues have the same weight,
207 +- * 2) all active groups have the same weight,
208 +- * 3) all active groups have at most one active child each.
209 +- * In particular, the last two conditions are always true if hierarchical
210 +- * support and the cgroups interface are not enabled, thus no state needs
211 +- * to be maintained in this case.
212 ++ * 2) there are no active groups.
213 ++ * In particular, the last condition is always true if hierarchical
214 ++ * support or the cgroups interface are not enabled, thus no state
215 ++ * needs to be maintained in this case.
216 + */
217 + static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
218 + {
219 +- return !bfq_differentiated_weights(bfqd);
220 ++ return !bfq_varied_queue_weights_or_active_groups(bfqd);
221 + }
222 +
223 + /*
224 + * If the weight-counter tree passed as input contains no counter for
225 +- * the weight of the input entity, then add that counter; otherwise just
226 ++ * the weight of the input queue, then add that counter; otherwise just
227 + * increment the existing counter.
228 + *
229 + * Note that weight-counter trees contain few nodes in mostly symmetric
230 +@@ -688,25 +686,25 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
231 + * In most scenarios, the rate at which nodes are created/destroyed
232 + * should be low too.
233 + */
234 +-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
235 ++void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
236 + struct rb_root *root)
237 + {
238 ++ struct bfq_entity *entity = &bfqq->entity;
239 + struct rb_node **new = &(root->rb_node), *parent = NULL;
240 +
241 + /*
242 +- * Do not insert if the entity is already associated with a
243 ++ * Do not insert if the queue is already associated with a
244 + * counter, which happens if:
245 +- * 1) the entity is associated with a queue,
246 +- * 2) a request arrival has caused the queue to become both
247 ++ * 1) a request arrival has caused the queue to become both
248 + * non-weight-raised, and hence change its weight, and
249 + * backlogged; in this respect, each of the two events
250 + * causes an invocation of this function,
251 +- * 3) this is the invocation of this function caused by the
252 ++ * 2) this is the invocation of this function caused by the
253 + * second event. This second invocation is actually useless,
254 + * and we handle this fact by exiting immediately. More
255 + * efficient or clearer solutions might possibly be adopted.
256 + */
257 +- if (entity->weight_counter)
258 ++ if (bfqq->weight_counter)
259 + return;
260 +
261 + while (*new) {
262 +@@ -716,7 +714,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
263 + parent = *new;
264 +
265 + if (entity->weight == __counter->weight) {
266 +- entity->weight_counter = __counter;
267 ++ bfqq->weight_counter = __counter;
268 + goto inc_counter;
269 + }
270 + if (entity->weight < __counter->weight)
271 +@@ -725,68 +723,68 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
272 + new = &((*new)->rb_right);
273 + }
274 +
275 +- entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
276 +- GFP_ATOMIC);
277 ++ bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
278 ++ GFP_ATOMIC);
279 +
280 + /*
281 + * In the unlucky event of an allocation failure, we just
282 +- * exit. This will cause the weight of entity to not be
283 +- * considered in bfq_differentiated_weights, which, in its
284 +- * turn, causes the scenario to be deemed wrongly symmetric in
285 +- * case entity's weight would have been the only weight making
286 +- * the scenario asymmetric. On the bright side, no unbalance
287 +- * will however occur when entity becomes inactive again (the
288 +- * invocation of this function is triggered by an activation
289 +- * of entity). In fact, bfq_weights_tree_remove does nothing
290 +- * if !entity->weight_counter.
291 ++ * exit. This will cause the weight of queue to not be
292 ++ * considered in bfq_varied_queue_weights_or_active_groups,
293 ++ * which, in its turn, causes the scenario to be deemed
294 ++ * wrongly symmetric in case bfqq's weight would have been
295 ++ * the only weight making the scenario asymmetric. On the
296 ++ * bright side, no unbalance will however occur when bfqq
297 ++ * becomes inactive again (the invocation of this function
298 ++ * is triggered by an activation of queue). In fact,
299 ++ * bfq_weights_tree_remove does nothing if
300 ++ * !bfqq->weight_counter.
301 + */
302 +- if (unlikely(!entity->weight_counter))
303 ++ if (unlikely(!bfqq->weight_counter))
304 + return;
305 +
306 +- entity->weight_counter->weight = entity->weight;
307 +- rb_link_node(&entity->weight_counter->weights_node, parent, new);
308 +- rb_insert_color(&entity->weight_counter->weights_node, root);
309 ++ bfqq->weight_counter->weight = entity->weight;
310 ++ rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
311 ++ rb_insert_color(&bfqq->weight_counter->weights_node, root);
312 +
313 + inc_counter:
314 +- entity->weight_counter->num_active++;
315 ++ bfqq->weight_counter->num_active++;
316 ++ bfqq->ref++;
317 + }
318 +
319 + /*
320 +- * Decrement the weight counter associated with the entity, and, if the
321 ++ * Decrement the weight counter associated with the queue, and, if the
322 + * counter reaches 0, remove the counter from the tree.
323 + * See the comments to the function bfq_weights_tree_add() for considerations
324 + * about overhead.
325 + */
326 + void __bfq_weights_tree_remove(struct bfq_data *bfqd,
327 +- struct bfq_entity *entity,
328 ++ struct bfq_queue *bfqq,
329 + struct rb_root *root)
330 + {
331 +- if (!entity->weight_counter)
332 ++ if (!bfqq->weight_counter)
333 + return;
334 +
335 +- entity->weight_counter->num_active--;
336 +- if (entity->weight_counter->num_active > 0)
337 ++ bfqq->weight_counter->num_active--;
338 ++ if (bfqq->weight_counter->num_active > 0)
339 + goto reset_entity_pointer;
340 +
341 +- rb_erase(&entity->weight_counter->weights_node, root);
342 +- kfree(entity->weight_counter);
343 ++ rb_erase(&bfqq->weight_counter->weights_node, root);
344 ++ kfree(bfqq->weight_counter);
345 +
346 + reset_entity_pointer:
347 +- entity->weight_counter = NULL;
348 ++ bfqq->weight_counter = NULL;
349 ++ bfq_put_queue(bfqq);
350 + }
351 +
352 + /*
353 +- * Invoke __bfq_weights_tree_remove on bfqq and all its inactive
354 +- * parent entities.
355 ++ * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
356 ++ * of active groups for each queue's inactive parent entity.
357 + */
358 + void bfq_weights_tree_remove(struct bfq_data *bfqd,
359 + struct bfq_queue *bfqq)
360 + {
361 + struct bfq_entity *entity = bfqq->entity.parent;
362 +
363 +- __bfq_weights_tree_remove(bfqd, &bfqq->entity,
364 +- &bfqd->queue_weights_tree);
365 +-
366 + for_each_entity(entity) {
367 + struct bfq_sched_data *sd = entity->my_sched_data;
368 +
369 +@@ -798,18 +796,37 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
370 + * next_in_service for details on why
371 + * in_service_entity must be checked too).
372 + *
373 +- * As a consequence, the weight of entity is
374 +- * not to be removed. In addition, if entity
375 +- * is active, then its parent entities are
376 +- * active as well, and thus their weights are
377 +- * not to be removed either. In the end, this
378 +- * loop must stop here.
379 ++ * As a consequence, its parent entities are
380 ++ * active as well, and thus this loop must
381 ++ * stop here.
382 + */
383 + break;
384 + }
385 +- __bfq_weights_tree_remove(bfqd, entity,
386 +- &bfqd->group_weights_tree);
387 ++
388 ++ /*
389 ++ * The decrement of num_groups_with_pending_reqs is
390 ++ * not performed immediately upon the deactivation of
391 ++ * entity, but it is delayed to when it also happens
392 ++ * that the first leaf descendant bfqq of entity gets
393 ++ * all its pending requests completed. The following
394 ++ * instructions perform this delayed decrement, if
395 ++ * needed. See the comments on
396 ++ * num_groups_with_pending_reqs for details.
397 ++ */
398 ++ if (entity->in_groups_with_pending_reqs) {
399 ++ entity->in_groups_with_pending_reqs = false;
400 ++ bfqd->num_groups_with_pending_reqs--;
401 ++ }
402 + }
403 ++
404 ++ /*
405 ++ * Next function is invoked last, because it causes bfqq to be
406 ++ * freed if the following holds: bfqq is not in service and
407 ++ * has no dispatched request. DO NOT use bfqq after the next
408 ++ * function invocation.
409 ++ */
410 ++ __bfq_weights_tree_remove(bfqd, bfqq,
411 ++ &bfqd->queue_weights_tree);
412 + }
413 +
414 + /*
415 +@@ -1003,7 +1020,8 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
416 +
417 + static int bfqq_process_refs(struct bfq_queue *bfqq)
418 + {
419 +- return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
420 ++ return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
421 ++ (bfqq->weight_counter != NULL);
422 + }
423 +
424 + /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
425 +@@ -2798,7 +2816,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
426 + bfq_remove_request(q, rq);
427 + }
428 +
429 +-static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
430 ++static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
431 + {
432 + /*
433 + * If this bfqq is shared between multiple processes, check
434 +@@ -2831,9 +2849,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
435 + /*
436 + * All in-service entities must have been properly deactivated
437 + * or requeued before executing the next function, which
438 +- * resets all in-service entites as no more in service.
439 ++ * resets all in-service entities as no more in service. This
440 ++ * may cause bfqq to be freed. If this happens, the next
441 ++ * function returns true.
442 + */
443 +- __bfq_bfqd_reset_in_service(bfqd);
444 ++ return __bfq_bfqd_reset_in_service(bfqd);
445 + }
446 +
447 + /**
448 +@@ -3238,7 +3258,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
449 + bool slow;
450 + unsigned long delta = 0;
451 + struct bfq_entity *entity = &bfqq->entity;
452 +- int ref;
453 +
454 + /*
455 + * Check whether the process is slow (see bfq_bfqq_is_slow).
456 +@@ -3307,10 +3326,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
457 + * reason.
458 + */
459 + __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
460 +- ref = bfqq->ref;
461 +- __bfq_bfqq_expire(bfqd, bfqq);
462 +-
463 +- if (ref == 1) /* bfqq is gone, no more actions on it */
464 ++ if (__bfq_bfqq_expire(bfqd, bfqq))
465 ++ /* bfqq is gone, no more actions on it */
466 + return;
467 +
468 + bfqq->injected_service = 0;
469 +@@ -3521,9 +3538,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
470 + * symmetric scenario where:
471 + * (i) each of these processes must get the same throughput as
472 + * the others;
473 +- * (ii) all these processes have the same I/O pattern
474 +- (either sequential or random).
475 +- * In fact, in such a scenario, the drive will tend to treat
476 ++ * (ii) the I/O of each process has the same properties, in
477 ++ * terms of locality (sequential or random), direction
478 ++ * (reads or writes), request sizes, greediness
479 ++ * (from I/O-bound to sporadic), and so on.
480 ++ * In fact, in such a scenario, the drive tends to treat
481 + * the requests of each of these processes in about the same
482 + * way as the requests of the others, and thus to provide
483 + * each of these processes with about the same throughput
484 +@@ -3532,18 +3551,67 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
485 + * certainly needed to guarantee that bfqq receives its
486 + * assigned fraction of the device throughput (see [1] for
487 + * details).
488 ++ * The problem is that idling may significantly reduce
489 ++ * throughput with certain combinations of types of I/O and
490 ++ * devices. An important example is sync random I/O, on flash
491 ++ * storage with command queueing. So, unless bfqq falls in the
492 ++ * above cases where idling also boosts throughput, it would
493 ++ * be important to check conditions (i) and (ii) accurately,
494 ++ * so as to avoid idling when not strictly needed for service
495 ++ * guarantees.
496 ++ *
497 ++ * Unfortunately, it is extremely difficult to thoroughly
498 ++ * check condition (ii). And, in case there are active groups,
499 ++ * it becomes very difficult to check condition (i) too. In
500 ++ * fact, if there are active groups, then, for condition (i)
501 ++ * to become false, it is enough that an active group contains
502 ++ * more active processes or sub-groups than some other active
503 ++ * group. More precisely, for condition (i) to hold because of
504 ++ * such a group, it is not even necessary that the group is
505 ++ * (still) active: it is sufficient that, even if the group
506 ++ * has become inactive, some of its descendant processes still
507 ++ * have some request already dispatched but still waiting for
508 ++ * completion. In fact, requests have still to be guaranteed
509 ++ * their share of the throughput even after being
510 ++ * dispatched. In this respect, it is easy to show that, if a
511 ++ * group frequently becomes inactive while still having
512 ++ * in-flight requests, and if, when this happens, the group is
513 ++ * not considered in the calculation of whether the scenario
514 ++ * is asymmetric, then the group may fail to be guaranteed its
515 ++ * fair share of the throughput (basically because idling may
516 ++ * not be performed for the descendant processes of the group,
517 ++ * but it had to be). We address this issue with the
518 ++ * following bi-modal behavior, implemented in the function
519 ++ * bfq_symmetric_scenario().
520 ++ *
521 ++ * If there are groups with requests waiting for completion
522 ++ * (as commented above, some of these groups may even be
523 ++ * already inactive), then the scenario is tagged as
524 ++ * asymmetric, conservatively, without checking any of the
525 ++ * conditions (i) and (ii). So the device is idled for bfqq.
526 ++ * This behavior matches also the fact that groups are created
527 ++ * exactly if controlling I/O is a primary concern (to
528 ++ * preserve bandwidth and latency guarantees).
529 ++ *
530 ++ * On the opposite end, if there are no groups with requests
531 ++ * waiting for completion, then only condition (i) is actually
532 ++ * controlled, i.e., provided that condition (i) holds, idling
533 ++ * is not performed, regardless of whether condition (ii)
534 ++ * holds. In other words, only if condition (i) does not hold,
535 ++ * then idling is allowed, and the device tends to be
536 ++ * prevented from queueing many requests, possibly of several
537 ++ * processes. Since there are no groups with requests waiting
538 ++ * for completion, then, to control condition (i) it is enough
539 ++ * to check just whether all the queues with requests waiting
540 ++ * for completion also have the same weight.
541 + *
542 +- * We address this issue by controlling, actually, only the
543 +- * symmetry sub-condition (i), i.e., provided that
544 +- * sub-condition (i) holds, idling is not performed,
545 +- * regardless of whether sub-condition (ii) holds. In other
546 +- * words, only if sub-condition (i) holds, then idling is
547 +- * allowed, and the device tends to be prevented from queueing
548 +- * many requests, possibly of several processes. The reason
549 +- * for not controlling also sub-condition (ii) is that we
550 +- * exploit preemption to preserve guarantees in case of
551 +- * symmetric scenarios, even if (ii) does not hold, as
552 +- * explained in the next two paragraphs.
553 ++ * Not checking condition (ii) evidently exposes bfqq to the
554 ++ * risk of getting less throughput than its fair share.
555 ++ * However, for queues with the same weight, a further
556 ++ * mechanism, preemption, mitigates or even eliminates this
557 ++ * problem. And it does so without consequences on overall
558 ++ * throughput. This mechanism and its benefits are explained
559 ++ * in the next three paragraphs.
560 + *
561 + * Even if a queue, say Q, is expired when it remains idle, Q
562 + * can still preempt the new in-service queue if the next
563 +@@ -3557,11 +3625,7 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
564 + * idling allows the internal queues of the device to contain
565 + * many requests, and thus to reorder requests, we can rather
566 + * safely assume that the internal scheduler still preserves a
567 +- * minimum of mid-term fairness. The motivation for using
568 +- * preemption instead of idling is that, by not idling,
569 +- * service guarantees are preserved without minimally
570 +- * sacrificing throughput. In other words, both a high
571 +- * throughput and its desired distribution are obtained.
572 ++ * minimum of mid-term fairness.
573 + *
574 + * More precisely, this preemption-based, idleless approach
575 + * provides fairness in terms of IOPS, and not sectors per
576 +@@ -3580,27 +3644,28 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
577 + * 1024/8 times as high as the service received by the other
578 + * queue.
579 + *
580 +- * On the other hand, device idling is performed, and thus
581 +- * pure sector-domain guarantees are provided, for the
582 +- * following queues, which are likely to need stronger
583 +- * throughput guarantees: weight-raised queues, and queues
584 +- * with a higher weight than other queues. When such queues
585 +- * are active, sub-condition (i) is false, which triggers
586 +- * device idling.
587 ++ * The motivation for using preemption instead of idling (for
588 ++ * queues with the same weight) is that, by not idling,
589 ++ * service guarantees are preserved (completely or at least in
590 ++ * part) without minimally sacrificing throughput. And, if
591 ++ * there is no active group, then the primary expectation for
592 ++ * this device is probably a high throughput.
593 + *
594 +- * According to the above considerations, the next variable is
595 +- * true (only) if sub-condition (i) holds. To compute the
596 +- * value of this variable, we not only use the return value of
597 +- * the function bfq_symmetric_scenario(), but also check
598 +- * whether bfqq is being weight-raised, because
599 +- * bfq_symmetric_scenario() does not take into account also
600 +- * weight-raised queues (see comments on
601 +- * bfq_weights_tree_add()). In particular, if bfqq is being
602 +- * weight-raised, it is important to idle only if there are
603 +- * other, non-weight-raised queues that may steal throughput
604 +- * to bfqq. Actually, we should be even more precise, and
605 +- * differentiate between interactive weight raising and
606 +- * soft real-time weight raising.
607 ++ * We are now left only with explaining the additional
608 ++ * compound condition that is checked below for deciding
609 ++ * whether the scenario is asymmetric. To explain this
610 ++ * compound condition, we need to add that the function
611 ++ * bfq_symmetric_scenario checks the weights of only
612 ++ * non-weight-raised queues, for efficiency reasons (see
613 ++ * comments on bfq_weights_tree_add()). Then the fact that
614 ++ * bfqq is weight-raised is checked explicitly here. More
615 ++ * precisely, the compound condition below takes into account
616 ++ * also the fact that, even if bfqq is being weight-raised,
617 ++ * the scenario is still symmetric if all queues with requests
618 ++ * waiting for completion happen to be
619 ++ * weight-raised. Actually, we should be even more precise
620 ++ * here, and differentiate between interactive weight raising
621 ++ * and soft real-time weight raising.
622 + *
623 + * As a side note, it is worth considering that the above
624 + * device-idling countermeasures may however fail in the
625 +@@ -5422,7 +5487,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
626 + bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
627 +
628 + bfqd->queue_weights_tree = RB_ROOT;
629 +- bfqd->group_weights_tree = RB_ROOT;
630 ++ bfqd->num_groups_with_pending_reqs = 0;
631 +
632 + INIT_LIST_HEAD(&bfqd->active_list);
633 + INIT_LIST_HEAD(&bfqd->idle_list);
634 +diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
635 +index a41e9884f2dd2..ca98c98a8179b 100644
636 +--- a/block/bfq-iosched.h
637 ++++ b/block/bfq-iosched.h
638 +@@ -108,15 +108,14 @@ struct bfq_sched_data {
639 + };
640 +
641 + /**
642 +- * struct bfq_weight_counter - counter of the number of all active entities
643 ++ * struct bfq_weight_counter - counter of the number of all active queues
644 + * with a given weight.
645 + */
646 + struct bfq_weight_counter {
647 +- unsigned int weight; /* weight of the entities this counter refers to */
648 +- unsigned int num_active; /* nr of active entities with this weight */
649 ++ unsigned int weight; /* weight of the queues this counter refers to */
650 ++ unsigned int num_active; /* nr of active queues with this weight */
651 + /*
652 +- * Weights tree member (see bfq_data's @queue_weights_tree and
653 +- * @group_weights_tree)
654 ++ * Weights tree member (see bfq_data's @queue_weights_tree)
655 + */
656 + struct rb_node weights_node;
657 + };
658 +@@ -151,8 +150,6 @@ struct bfq_weight_counter {
659 + struct bfq_entity {
660 + /* service_tree member */
661 + struct rb_node rb_node;
662 +- /* pointer to the weight counter associated with this entity */
663 +- struct bfq_weight_counter *weight_counter;
664 +
665 + /*
666 + * Flag, true if the entity is on a tree (either the active or
667 +@@ -199,6 +196,9 @@ struct bfq_entity {
668 +
669 + /* flag, set to request a weight, ioprio or ioprio_class change */
670 + int prio_changed;
671 ++
672 ++ /* flag, set if the entity is counted in groups_with_pending_reqs */
673 ++ bool in_groups_with_pending_reqs;
674 + };
675 +
676 + struct bfq_group;
677 +@@ -266,6 +266,9 @@ struct bfq_queue {
678 + /* entity representing this queue in the scheduler */
679 + struct bfq_entity entity;
680 +
681 ++ /* pointer to the weight counter associated with this entity */
682 ++ struct bfq_weight_counter *weight_counter;
683 ++
684 + /* maximum budget allowed from the feedback mechanism */
685 + int max_budget;
686 + /* budget expiration (in jiffies) */
687 +@@ -448,15 +451,54 @@ struct bfq_data {
688 + * bfq_weights_tree_[add|remove] for further details).
689 + */
690 + struct rb_root queue_weights_tree;
691 ++
692 + /*
693 +- * rbtree of non-queue @bfq_entity weight counters, sorted by
694 +- * weight. Used to keep track of whether all @bfq_groups have
695 +- * the same weight. The tree contains one counter for each
696 +- * distinct weight associated to some active @bfq_group (see
697 +- * the comments to the functions bfq_weights_tree_[add|remove]
698 +- * for further details).
699 ++ * Number of groups with at least one descendant process that
700 ++ * has at least one request waiting for completion. Note that
701 ++ * this accounts for also requests already dispatched, but not
702 ++ * yet completed. Therefore this number of groups may differ
703 ++ * (be larger) than the number of active groups, as a group is
704 ++ * considered active only if its corresponding entity has
705 ++ * descendant queues with at least one request queued. This
706 ++ * number is used to decide whether a scenario is symmetric.
707 ++ * For a detailed explanation see comments on the computation
708 ++ * of the variable asymmetric_scenario in the function
709 ++ * bfq_better_to_idle().
710 ++ *
711 ++ * However, it is hard to compute this number exactly, for
712 ++ * groups with multiple descendant processes. Consider a group
713 ++ * that is inactive, i.e., that has no descendant process with
714 ++ * pending I/O inside BFQ queues. Then suppose that
715 ++ * num_groups_with_pending_reqs is still accounting for this
716 ++ * group, because the group has descendant processes with some
717 ++ * I/O request still in flight. num_groups_with_pending_reqs
718 ++ * should be decremented when the in-flight request of the
719 ++ * last descendant process is finally completed (assuming that
720 ++ * nothing else has changed for the group in the meantime, in
721 ++ * terms of composition of the group and active/inactive state of child
722 ++ * groups and processes). To accomplish this, an additional
723 ++ * pending-request counter must be added to entities, and must
724 ++ * be updated correctly. To avoid this additional field and operations,
725 ++ * we resort to the following tradeoff between simplicity and
726 ++ * accuracy: for an inactive group that is still counted in
727 ++ * num_groups_with_pending_reqs, we decrement
728 ++ * num_groups_with_pending_reqs when the first descendant
729 ++ * process of the group remains with no request waiting for
730 ++ * completion.
731 ++ *
732 ++ * Even this simpler decrement strategy requires a little
733 ++ * carefulness: to avoid multiple decrements, we flag a group,
734 ++ * more precisely an entity representing a group, as still
735 ++ * counted in num_groups_with_pending_reqs when it becomes
736 ++ * inactive. Then, when the first descendant queue of the
737 ++ * entity remains with no request waiting for completion,
738 ++ * num_groups_with_pending_reqs is decremented, and this flag
739 ++ * is reset. After this flag is reset for the entity,
740 ++ * num_groups_with_pending_reqs won't be decremented any
741 ++ * longer in case a new descendant queue of the entity remains
742 ++ * with no request waiting for completion.
743 + */
744 +- struct rb_root group_weights_tree;
745 ++ unsigned int num_groups_with_pending_reqs;
746 +
747 + /*
748 + * Number of bfq_queues containing requests (including the
749 +@@ -854,10 +896,10 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync);
750 + void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
751 + struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
752 + void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
753 +-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
754 ++void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
755 + struct rb_root *root);
756 + void __bfq_weights_tree_remove(struct bfq_data *bfqd,
757 +- struct bfq_entity *entity,
758 ++ struct bfq_queue *bfqq,
759 + struct rb_root *root);
760 + void bfq_weights_tree_remove(struct bfq_data *bfqd,
761 + struct bfq_queue *bfqq);
762 +@@ -951,7 +993,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
763 + bool ins_into_idle_tree);
764 + bool next_queue_may_preempt(struct bfq_data *bfqd);
765 + struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
766 +-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
767 ++bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
768 + void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
769 + bool ins_into_idle_tree, bool expiration);
770 + void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
771 +diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
772 +index ff7c2d470bb82..11ff5ceae02b4 100644
773 +--- a/block/bfq-wf2q.c
774 ++++ b/block/bfq-wf2q.c
775 +@@ -788,25 +788,23 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
776 + new_weight = entity->orig_weight *
777 + (bfqq ? bfqq->wr_coeff : 1);
778 + /*
779 +- * If the weight of the entity changes, remove the entity
780 +- * from its old weight counter (if there is a counter
781 +- * associated with the entity), and add it to the counter
782 +- * associated with its new weight.
783 ++ * If the weight of the entity changes, and the entity is a
784 ++ * queue, remove the entity from its old weight counter (if
785 ++ * there is a counter associated with the entity).
786 + */
787 +- if (prev_weight != new_weight) {
788 +- root = bfqq ? &bfqd->queue_weights_tree :
789 +- &bfqd->group_weights_tree;
790 +- __bfq_weights_tree_remove(bfqd, entity, root);
791 ++ if (prev_weight != new_weight && bfqq) {
792 ++ root = &bfqd->queue_weights_tree;
793 ++ __bfq_weights_tree_remove(bfqd, bfqq, root);
794 + }
795 + entity->weight = new_weight;
796 + /*
797 +- * Add the entity to its weights tree only if it is
798 +- * not associated with a weight-raised queue.
799 ++ * Add the entity, if it is not a weight-raised queue,
800 ++ * to the counter associated with its new weight.
801 + */
802 +- if (prev_weight != new_weight &&
803 +- (bfqq ? bfqq->wr_coeff == 1 : 1))
804 ++ if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) {
805 + /* If we get here, root has been initialized. */
806 +- bfq_weights_tree_add(bfqd, entity, root);
807 ++ bfq_weights_tree_add(bfqd, bfqq, root);
808 ++ }
809 +
810 + new_st->wsum += entity->weight;
811 +
812 +@@ -1012,9 +1010,12 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
813 + if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
814 + struct bfq_group *bfqg =
815 + container_of(entity, struct bfq_group, entity);
816 ++ struct bfq_data *bfqd = bfqg->bfqd;
817 +
818 +- bfq_weights_tree_add(bfqg->bfqd, entity,
819 +- &bfqd->group_weights_tree);
820 ++ if (!entity->in_groups_with_pending_reqs) {
821 ++ entity->in_groups_with_pending_reqs = true;
822 ++ bfqd->num_groups_with_pending_reqs++;
823 ++ }
824 + }
825 + #endif
826 +
827 +@@ -1599,7 +1600,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
828 + return bfqq;
829 + }
830 +
831 +-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
832 ++/* returns true if the in-service queue gets freed */
833 ++bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
834 + {
835 + struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
836 + struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
837 +@@ -1623,8 +1625,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
838 + * service tree either, then release the service reference to
839 + * the queue it represents (taken with bfq_get_entity).
840 + */
841 +- if (!in_serv_entity->on_st)
842 ++ if (!in_serv_entity->on_st) {
843 ++ /*
844 ++ * If no process is referencing in_serv_bfqq any
845 ++ * longer, then the service reference may be the only
846 ++ * reference to the queue. If this is the case, then
847 ++ * bfqq gets freed here.
848 ++ */
849 ++ int ref = in_serv_bfqq->ref;
850 + bfq_put_queue(in_serv_bfqq);
851 ++ if (ref == 1)
852 ++ return true;
853 ++ }
854 ++
855 ++ return false;
856 + }
857 +
858 + void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
859 +@@ -1667,15 +1681,15 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
860 +
861 + bfqd->busy_queues--;
862 +
863 +- if (!bfqq->dispatched)
864 +- bfq_weights_tree_remove(bfqd, bfqq);
865 +-
866 + if (bfqq->wr_coeff > 1)
867 + bfqd->wr_busy_queues--;
868 +
869 + bfqg_stats_update_dequeue(bfqq_group(bfqq));
870 +
871 + bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
872 ++
873 ++ if (!bfqq->dispatched)
874 ++ bfq_weights_tree_remove(bfqd, bfqq);
875 + }
876 +
877 + /*
878 +@@ -1692,7 +1706,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
879 +
880 + if (!bfqq->dispatched)
881 + if (bfqq->wr_coeff == 1)
882 +- bfq_weights_tree_add(bfqd, &bfqq->entity,
883 ++ bfq_weights_tree_add(bfqd, bfqq,
884 + &bfqd->queue_weights_tree);
885 +
886 + if (bfqq->wr_coeff > 1)
887 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
888 +index 48929df7673b1..4cf3ef4ddec35 100644
889 +--- a/drivers/char/ipmi/ipmi_msghandler.c
890 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
891 +@@ -2863,7 +2863,7 @@ cleanup_bmc_device(struct kref *ref)
892 + * with removing the device attributes while reading a device
893 + * attribute.
894 + */
895 +- schedule_work(&bmc->remove_work);
896 ++ queue_work(remove_work_wq, &bmc->remove_work);
897 + }
898 +
899 + /*
900 +@@ -5085,22 +5085,27 @@ static int ipmi_init_msghandler(void)
901 + if (initialized)
902 + goto out;
903 +
904 +- init_srcu_struct(&ipmi_interfaces_srcu);
905 +-
906 +- timer_setup(&ipmi_timer, ipmi_timeout, 0);
907 +- mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
908 +-
909 +- atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
910 ++ rv = init_srcu_struct(&ipmi_interfaces_srcu);
911 ++ if (rv)
912 ++ goto out;
913 +
914 + remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
915 + if (!remove_work_wq) {
916 + pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
917 + rv = -ENOMEM;
918 +- goto out;
919 ++ goto out_wq;
920 + }
921 +
922 ++ timer_setup(&ipmi_timer, ipmi_timeout, 0);
923 ++ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
924 ++
925 ++ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
926 ++
927 + initialized = true;
928 +
929 ++out_wq:
930 ++ if (rv)
931 ++ cleanup_srcu_struct(&ipmi_interfaces_srcu);
932 + out:
933 + mutex_unlock(&ipmi_interfaces_mutex);
934 + return rv;
935 +diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
936 +index 27c08ddab0e1a..96db7e96fcea9 100644
937 +--- a/drivers/hid/hid-holtek-mouse.c
938 ++++ b/drivers/hid/hid-holtek-mouse.c
939 +@@ -68,8 +68,23 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
940 + static int holtek_mouse_probe(struct hid_device *hdev,
941 + const struct hid_device_id *id)
942 + {
943 ++ int ret;
944 ++
945 + if (!hid_is_usb(hdev))
946 + return -EINVAL;
947 ++
948 ++ ret = hid_parse(hdev);
949 ++ if (ret) {
950 ++ hid_err(hdev, "hid parse failed: %d\n", ret);
951 ++ return ret;
952 ++ }
953 ++
954 ++ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
955 ++ if (ret) {
956 ++ hid_err(hdev, "hw start failed: %d\n", ret);
957 ++ return ret;
958 ++ }
959 ++
960 + return 0;
961 + }
962 +
963 +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
964 +index c187e557678ef..30a7f7fde6511 100644
965 +--- a/drivers/hwmon/lm90.c
966 ++++ b/drivers/hwmon/lm90.c
967 +@@ -197,6 +197,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
968 + #define LM90_STATUS_RHIGH (1 << 4) /* remote high temp limit tripped */
969 + #define LM90_STATUS_LLOW (1 << 5) /* local low temp limit tripped */
970 + #define LM90_STATUS_LHIGH (1 << 6) /* local high temp limit tripped */
971 ++#define LM90_STATUS_BUSY (1 << 7) /* conversion is ongoing */
972 +
973 + #define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */
974 + #define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */
975 +@@ -786,7 +787,7 @@ static int lm90_update_device(struct device *dev)
976 + val = lm90_read_reg(client, LM90_REG_R_STATUS);
977 + if (val < 0)
978 + return val;
979 +- data->alarms = val; /* lower 8 bit of alarms */
980 ++ data->alarms = val & ~LM90_STATUS_BUSY;
981 +
982 + if (data->kind == max6696) {
983 + val = lm90_select_remote_channel(client, data, 1);
984 +@@ -1439,12 +1440,11 @@ static int lm90_detect(struct i2c_client *client,
985 + if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0)
986 + return -ENODEV;
987 +
988 +- if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
989 ++ if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) {
990 + config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2);
991 + if (config2 < 0)
992 + return -ENODEV;
993 +- } else
994 +- config2 = 0; /* Make compiler happy */
995 ++ }
996 +
997 + if ((address == 0x4C || address == 0x4D)
998 + && man_id == 0x01) { /* National Semiconductor */
999 +diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
1000 +index 47ed3ab25dc95..6e6730f036b03 100644
1001 +--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
1002 ++++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
1003 +@@ -945,7 +945,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
1004 + &addrlimit) ||
1005 + addrlimit > type_max(typeof(pkt->addrlimit))) {
1006 + ret = -EINVAL;
1007 +- goto free_pbc;
1008 ++ goto free_pkt;
1009 + }
1010 + pkt->addrlimit = addrlimit;
1011 +
1012 +diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
1013 +index e8f98de60df3a..a2e10cae654f0 100644
1014 +--- a/drivers/input/touchscreen/atmel_mxt_ts.c
1015 ++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
1016 +@@ -1809,7 +1809,7 @@ static int mxt_read_info_block(struct mxt_data *data)
1017 + if (error) {
1018 + dev_err(&client->dev, "Error %d parsing object table\n", error);
1019 + mxt_free_object_table(data);
1020 +- goto err_free_mem;
1021 ++ return error;
1022 + }
1023 +
1024 + data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
1025 +diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
1026 +index 80867bd8f44c3..c9aa28eee191d 100644
1027 +--- a/drivers/net/bonding/bond_options.c
1028 ++++ b/drivers/net/bonding/bond_options.c
1029 +@@ -1439,7 +1439,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
1030 + mac = (u8 *)&newval->value;
1031 + }
1032 +
1033 +- if (!is_valid_ether_addr(mac))
1034 ++ if (is_multicast_ether_addr(mac))
1035 + goto err;
1036 +
1037 + netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
1038 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
1039 +index 5f327659efa7a..85b688f60b876 100644
1040 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
1041 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
1042 +@@ -202,7 +202,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
1043 + struct qlcnic_info *, u16);
1044 + int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
1045 + void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
1046 +-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
1047 ++int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
1048 + bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
1049 + void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
1050 + struct qlcnic_vf_info *, u16);
1051 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1052 +index 77e386ebff09c..98275f18a87b0 100644
1053 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1054 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1055 +@@ -433,7 +433,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
1056 + struct qlcnic_cmd_args *cmd)
1057 + {
1058 + struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1059 +- int i, num_vlans;
1060 ++ int i, num_vlans, ret;
1061 + u16 *vlans;
1062 +
1063 + if (sriov->allowed_vlans)
1064 +@@ -444,7 +444,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
1065 + dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
1066 + sriov->num_allowed_vlans);
1067 +
1068 +- qlcnic_sriov_alloc_vlans(adapter);
1069 ++ ret = qlcnic_sriov_alloc_vlans(adapter);
1070 ++ if (ret)
1071 ++ return ret;
1072 +
1073 + if (!sriov->any_vlan)
1074 + return 0;
1075 +@@ -2164,7 +2166,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
1076 + return err;
1077 + }
1078 +
1079 +-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
1080 ++int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
1081 + {
1082 + struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1083 + struct qlcnic_vf_info *vf;
1084 +@@ -2174,7 +2176,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
1085 + vf = &sriov->vf_info[i];
1086 + vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
1087 + sizeof(*vf->sriov_vlans), GFP_KERNEL);
1088 ++ if (!vf->sriov_vlans)
1089 ++ return -ENOMEM;
1090 + }
1091 ++
1092 ++ return 0;
1093 + }
1094 +
1095 + void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
1096 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
1097 +index 50eaafa3eaba3..c9f2cd2462230 100644
1098 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
1099 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
1100 +@@ -598,7 +598,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
1101 + if (err)
1102 + goto del_flr_queue;
1103 +
1104 +- qlcnic_sriov_alloc_vlans(adapter);
1105 ++ err = qlcnic_sriov_alloc_vlans(adapter);
1106 ++ if (err)
1107 ++ goto del_flr_queue;
1108 +
1109 + return err;
1110 +
1111 +diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
1112 +index 02456ed13a7d4..5b93a3af4575d 100644
1113 +--- a/drivers/net/ethernet/sfc/falcon/rx.c
1114 ++++ b/drivers/net/ethernet/sfc/falcon/rx.c
1115 +@@ -732,7 +732,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
1116 + efx->rx_bufs_per_page);
1117 + rx_queue->page_ring = kcalloc(page_ring_size,
1118 + sizeof(*rx_queue->page_ring), GFP_KERNEL);
1119 +- rx_queue->page_ptr_mask = page_ring_size - 1;
1120 ++ if (!rx_queue->page_ring)
1121 ++ rx_queue->page_ptr_mask = 0;
1122 ++ else
1123 ++ rx_queue->page_ptr_mask = page_ring_size - 1;
1124 + }
1125 +
1126 + void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
1127 +diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
1128 +index f97b35430c840..ac1ad00e2fc55 100644
1129 +--- a/drivers/net/ethernet/smsc/smc911x.c
1130 ++++ b/drivers/net/ethernet/smsc/smc911x.c
1131 +@@ -2080,6 +2080,11 @@ static int smc911x_drv_probe(struct platform_device *pdev)
1132 +
1133 + ndev->dma = (unsigned char)-1;
1134 + ndev->irq = platform_get_irq(pdev, 0);
1135 ++ if (ndev->irq < 0) {
1136 ++ ret = ndev->irq;
1137 ++ goto release_both;
1138 ++ }
1139 ++
1140 + lp = netdev_priv(ndev);
1141 + lp->netdev = ndev;
1142 + #ifdef SMC_DYNAMIC_BUS_CONFIG
1143 +diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
1144 +index 778d3729f460a..89b3bc389f469 100644
1145 +--- a/drivers/net/fjes/fjes_main.c
1146 ++++ b/drivers/net/fjes/fjes_main.c
1147 +@@ -1284,6 +1284,11 @@ static int fjes_probe(struct platform_device *plat_dev)
1148 + hw->hw_res.start = res->start;
1149 + hw->hw_res.size = resource_size(res);
1150 + hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1151 ++ if (hw->hw_res.irq < 0) {
1152 ++ err = hw->hw_res.irq;
1153 ++ goto err_free_control_wq;
1154 ++ }
1155 ++
1156 + err = fjes_hw_init(&adapter->hw);
1157 + if (err)
1158 + goto err_free_control_wq;
1159 +diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
1160 +index 940aa7a19f50b..ba0ca85e3d766 100644
1161 +--- a/drivers/net/hamradio/mkiss.c
1162 ++++ b/drivers/net/hamradio/mkiss.c
1163 +@@ -803,13 +803,14 @@ static void mkiss_close(struct tty_struct *tty)
1164 + */
1165 + netif_stop_queue(ax->dev);
1166 +
1167 +- /* Free all AX25 frame buffers. */
1168 ++ unregister_netdev(ax->dev);
1169 ++
1170 ++ /* Free all AX25 frame buffers after unreg. */
1171 + kfree(ax->rbuff);
1172 + kfree(ax->xbuff);
1173 +
1174 + ax->tty = NULL;
1175 +
1176 +- unregister_netdev(ax->dev);
1177 + free_netdev(ax->dev);
1178 + }
1179 +
1180 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1181 +index f438be83d2594..a44968d5cac48 100644
1182 +--- a/drivers/net/usb/lan78xx.c
1183 ++++ b/drivers/net/usb/lan78xx.c
1184 +@@ -75,6 +75,8 @@
1185 + #define LAN7801_USB_PRODUCT_ID (0x7801)
1186 + #define LAN78XX_EEPROM_MAGIC (0x78A5)
1187 + #define LAN78XX_OTP_MAGIC (0x78F3)
1188 ++#define AT29M2AF_USB_VENDOR_ID (0x07C9)
1189 ++#define AT29M2AF_USB_PRODUCT_ID (0x0012)
1190 +
1191 + #define MII_READ 1
1192 + #define MII_WRITE 0
1193 +@@ -4170,6 +4172,10 @@ static const struct usb_device_id products[] = {
1194 + /* LAN7801 USB Gigabit Ethernet Device */
1195 + USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
1196 + },
1197 ++ {
1198 ++ /* ATM2-AF USB Gigabit Ethernet Device */
1199 ++ USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
1200 ++ },
1201 + {},
1202 + };
1203 + MODULE_DEVICE_TABLE(usb, products);
1204 +diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
1205 +index 4e17728f29375..08f1688dfeb28 100644
1206 +--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
1207 ++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
1208 +@@ -1011,10 +1011,10 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
1209 + bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
1210 + bank->gpio_chip.base = args.args[1];
1211 +
1212 +- npins = args.args[2];
1213 +- while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
1214 +- ++i, &args))
1215 +- npins += args.args[2];
1216 ++ /* get the last defined gpio line (offset + nb of pins) */
1217 ++ npins = args.args[0] + args.args[2];
1218 ++ while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, ++i, &args))
1219 ++ npins = max(npins, (int)(args.args[0] + args.args[2]));
1220 + } else {
1221 + bank_nr = pctl->nbanks;
1222 + bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
1223 +diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
1224 +index 7dcb14d303eb4..d8715954f4e08 100644
1225 +--- a/drivers/spi/spi-armada-3700.c
1226 ++++ b/drivers/spi/spi-armada-3700.c
1227 +@@ -912,7 +912,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
1228 + return 0;
1229 +
1230 + error_clk:
1231 +- clk_disable_unprepare(spi->clk);
1232 ++ clk_unprepare(spi->clk);
1233 + error:
1234 + spi_master_put(master);
1235 + out:
1236 +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
1237 +index d7a12161e5531..1b3e674e6330d 100644
1238 +--- a/drivers/usb/gadget/function/u_ether.c
1239 ++++ b/drivers/usb/gadget/function/u_ether.c
1240 +@@ -860,19 +860,23 @@ int gether_register_netdev(struct net_device *net)
1241 + {
1242 + struct eth_dev *dev;
1243 + struct usb_gadget *g;
1244 +- struct sockaddr sa;
1245 + int status;
1246 +
1247 + if (!net->dev.parent)
1248 + return -EINVAL;
1249 + dev = netdev_priv(net);
1250 + g = dev->gadget;
1251 ++
1252 ++ memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN);
1253 ++ net->addr_assign_type = NET_ADDR_RANDOM;
1254 ++
1255 + status = register_netdev(net);
1256 + if (status < 0) {
1257 + dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
1258 + return status;
1259 + } else {
1260 + INFO(dev, "HOST MAC %pM\n", dev->host_mac);
1261 ++ INFO(dev, "MAC %pM\n", dev->dev_mac);
1262 +
1263 + /* two kinds of host-initiated state changes:
1264 + * - iff DATA transfer is active, carrier is "on"
1265 +@@ -880,15 +884,6 @@ int gether_register_netdev(struct net_device *net)
1266 + */
1267 + netif_carrier_off(net);
1268 + }
1269 +- sa.sa_family = net->type;
1270 +- memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
1271 +- rtnl_lock();
1272 +- status = dev_set_mac_address(net, &sa);
1273 +- rtnl_unlock();
1274 +- if (status)
1275 +- pr_warn("cannot set self ethernet address: %d\n", status);
1276 +- else
1277 +- INFO(dev, "MAC %pM\n", dev->dev_mac);
1278 +
1279 + return status;
1280 + }
1281 +diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
1282 +index 201e9da1692a4..64352d2833e2e 100644
1283 +--- a/fs/f2fs/xattr.c
1284 ++++ b/fs/f2fs/xattr.c
1285 +@@ -658,8 +658,15 @@ static int __f2fs_setxattr(struct inode *inode, int index,
1286 + }
1287 +
1288 + last = here;
1289 +- while (!IS_XATTR_LAST_ENTRY(last))
1290 ++ while (!IS_XATTR_LAST_ENTRY(last)) {
1291 ++ if ((void *)(last) + sizeof(__u32) > last_base_addr ||
1292 ++ (void *)XATTR_NEXT_ENTRY(last) > last_base_addr) {
1293 ++ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1294 ++ error = -EFSCORRUPTED;
1295 ++ goto exit;
1296 ++ }
1297 + last = XATTR_NEXT_ENTRY(last);
1298 ++ }
1299 +
1300 + newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + len + size);
1301 +
1302 +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
1303 +index e7330a9a7d7dc..faee73c084d49 100644
1304 +--- a/include/linux/virtio_net.h
1305 ++++ b/include/linux/virtio_net.h
1306 +@@ -7,9 +7,27 @@
1307 + #include <uapi/linux/udp.h>
1308 + #include <uapi/linux/virtio_net.h>
1309 +
1310 ++static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
1311 ++{
1312 ++ switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1313 ++ case VIRTIO_NET_HDR_GSO_TCPV4:
1314 ++ return protocol == cpu_to_be16(ETH_P_IP);
1315 ++ case VIRTIO_NET_HDR_GSO_TCPV6:
1316 ++ return protocol == cpu_to_be16(ETH_P_IPV6);
1317 ++ case VIRTIO_NET_HDR_GSO_UDP:
1318 ++ return protocol == cpu_to_be16(ETH_P_IP) ||
1319 ++ protocol == cpu_to_be16(ETH_P_IPV6);
1320 ++ default:
1321 ++ return false;
1322 ++ }
1323 ++}
1324 ++
1325 + static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
1326 + const struct virtio_net_hdr *hdr)
1327 + {
1328 ++ if (skb->protocol)
1329 ++ return 0;
1330 ++
1331 + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1332 + case VIRTIO_NET_HDR_GSO_TCPV4:
1333 + case VIRTIO_NET_HDR_GSO_UDP:
1334 +@@ -88,9 +106,12 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
1335 + if (!skb->protocol) {
1336 + __be16 protocol = dev_parse_header_protocol(skb);
1337 +
1338 +- virtio_net_hdr_set_proto(skb, hdr);
1339 +- if (protocol && protocol != skb->protocol)
1340 ++ if (!protocol)
1341 ++ virtio_net_hdr_set_proto(skb, hdr);
1342 ++ else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
1343 + return -EINVAL;
1344 ++ else
1345 ++ skb->protocol = protocol;
1346 + }
1347 + retry:
1348 + if (!skb_flow_dissect_flow_keys_basic(skb, &keys,
1349 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
1350 +index a45db78eaf00a..567fdfd9678d5 100644
1351 +--- a/net/ax25/af_ax25.c
1352 ++++ b/net/ax25/af_ax25.c
1353 +@@ -88,8 +88,10 @@ static void ax25_kill_by_device(struct net_device *dev)
1354 + again:
1355 + ax25_for_each(s, &ax25_list) {
1356 + if (s->ax25_dev == ax25_dev) {
1357 +- s->ax25_dev = NULL;
1358 + spin_unlock_bh(&ax25_list_lock);
1359 ++ lock_sock(s->sk);
1360 ++ s->ax25_dev = NULL;
1361 ++ release_sock(s->sk);
1362 + ax25_disconnect(s, ENETUNREACH);
1363 + spin_lock_bh(&ax25_list_lock);
1364 +
1365 +diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
1366 +index 25298b3eb8546..17ca9a681d47b 100644
1367 +--- a/net/netfilter/nfnetlink_log.c
1368 ++++ b/net/netfilter/nfnetlink_log.c
1369 +@@ -509,7 +509,8 @@ __build_packet_message(struct nfnl_log_net *log,
1370 + goto nla_put_failure;
1371 +
1372 + if (indev && skb->dev &&
1373 +- skb->mac_header != skb->network_header) {
1374 ++ skb_mac_header_was_set(skb) &&
1375 ++ skb_mac_header_len(skb) != 0) {
1376 + struct nfulnl_msg_packet_hw phw;
1377 + int len;
1378 +
1379 +diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
1380 +index eb5a052d3b252..8955431f2ab26 100644
1381 +--- a/net/netfilter/nfnetlink_queue.c
1382 ++++ b/net/netfilter/nfnetlink_queue.c
1383 +@@ -566,7 +566,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
1384 + goto nla_put_failure;
1385 +
1386 + if (indev && entskb->dev &&
1387 +- skb_mac_header_was_set(entskb)) {
1388 ++ skb_mac_header_was_set(entskb) &&
1389 ++ skb_mac_header_len(entskb) != 0) {
1390 + struct nfqnl_msg_packet_hw phw;
1391 + int len;
1392 +
1393 +diff --git a/net/phonet/pep.c b/net/phonet/pep.c
1394 +index db34735403035..c0b4cc1e108b3 100644
1395 +--- a/net/phonet/pep.c
1396 ++++ b/net/phonet/pep.c
1397 +@@ -959,6 +959,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
1398 + ret = -EBUSY;
1399 + else if (sk->sk_state == TCP_ESTABLISHED)
1400 + ret = -EISCONN;
1401 ++ else if (!pn->pn_sk.sobject)
1402 ++ ret = -EADDRNOTAVAIL;
1403 + else
1404 + ret = pep_sock_enable(sk, NULL, 0);
1405 + release_sock(sk);
1406 +diff --git a/sound/core/jack.c b/sound/core/jack.c
1407 +index 84c2a17c56ee3..847a8f3fd06ea 100644
1408 +--- a/sound/core/jack.c
1409 ++++ b/sound/core/jack.c
1410 +@@ -234,6 +234,10 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
1411 + return -ENOMEM;
1412 +
1413 + jack->id = kstrdup(id, GFP_KERNEL);
1414 ++ if (jack->id == NULL) {
1415 ++ kfree(jack);
1416 ++ return -ENOMEM;
1417 ++ }
1418 +
1419 + /* don't creat input device for phantom jack */
1420 + if (!phantom_jack) {
1421 +diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
1422 +index a33cb744e96c8..4e77b1dcacc8b 100644
1423 +--- a/sound/drivers/opl3/opl3_midi.c
1424 ++++ b/sound/drivers/opl3/opl3_midi.c
1425 +@@ -412,7 +412,7 @@ void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
1426 + }
1427 + if (instr_4op) {
1428 + vp2 = &opl3->voices[voice + 3];
1429 +- if (vp->state > 0) {
1430 ++ if (vp2->state > 0) {
1431 + opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK +
1432 + voice_offset + 3);
1433 + reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;