1 |
Author: tomwij |
2 |
Date: 2013-07-01 11:36:08 +0000 (Mon, 01 Jul 2013) |
3 |
New Revision: 2425 |
4 |
|
5 |
Added: |
6 |
genpatches-2.6/trunk/3.10/1804_block-bfq-remove-cgroups.patch |
7 |
Modified: |
8 |
genpatches-2.6/trunk/3.10/0000_README |
9 |
Log: |
10 |
Remove BFQ cgroups which compilation broke with 3.10, we need to await its author to fix it. |
11 |
|
12 |
Modified: genpatches-2.6/trunk/3.10/0000_README |
13 |
=================================================================== |
14 |
--- genpatches-2.6/trunk/3.10/0000_README 2013-07-01 07:12:17 UTC (rev 2424) |
15 |
+++ genpatches-2.6/trunk/3.10/0000_README 2013-07-01 11:36:08 UTC (rev 2425) |
16 |
@@ -63,6 +63,10 @@ |
17 |
From: http://algo.ing.unimo.it/people/paolo/disk_sched/ |
18 |
Desc: BFQ v6r2 patch 3 for 3.9: Early Queue Merge (EQM) |
19 |
|
20 |
+Patch: 1804_block-bfq-remove-cgroups.patch |
21 |
+From: Tom Wijsman <TomWij@g.o>, discovered by Justin Lecher <jlec@g.o>. |
22 |
+Desc: Remove BFQ cgroups which compilation broke with 3.10, we need to await its author to fix it. |
23 |
+ |
24 |
Patch: 2400_kcopy-patch-for-infiniband-driver.patch |
25 |
From: Alexey Shvetsov <alexxy@g.o> |
26 |
Desc: Zero copy for infiniband psm userspace driver |
27 |
|
28 |
Added: genpatches-2.6/trunk/3.10/1804_block-bfq-remove-cgroups.patch |
29 |
=================================================================== |
30 |
--- genpatches-2.6/trunk/3.10/1804_block-bfq-remove-cgroups.patch (rev 0) |
31 |
+++ genpatches-2.6/trunk/3.10/1804_block-bfq-remove-cgroups.patch 2013-07-01 11:36:08 UTC (rev 2425) |
32 |
@@ -0,0 +1,1005 @@ |
33 |
+ |
34 |
+--- a/block/Kconfig.iosched |
35 |
++++ b/block/Kconfig.iosched |
36 |
+@@ -51,15 +51,6 @@ |
37 |
+ applications. If compiled built-in (saying Y here), BFQ can |
38 |
+ be configured to support hierarchical scheduling. |
39 |
+ |
40 |
+-config CGROUP_BFQIO |
41 |
+- bool "BFQ hierarchical scheduling support" |
42 |
+- depends on CGROUPS && IOSCHED_BFQ=y |
43 |
+- default n |
44 |
+- ---help--- |
45 |
+- Enable hierarchical scheduling in BFQ, using the cgroups |
46 |
+- filesystem interface. The name of the subsystem will be |
47 |
+- bfqio. |
48 |
+- |
49 |
+ choice |
50 |
+ prompt "Default I/O scheduler" |
51 |
+ default DEFAULT_CFQ |
52 |
+--- a/block/bfq-cgroup.c |
53 |
++++ b/block/bfq-cgroup.c |
54 |
+@@ -12,806 +12,6 @@ |
55 |
+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file. |
56 |
+ */ |
57 |
+ |
58 |
+-#ifdef CONFIG_CGROUP_BFQIO |
59 |
+-static struct bfqio_cgroup bfqio_root_cgroup = { |
60 |
+- .weight = BFQ_DEFAULT_GRP_WEIGHT, |
61 |
+- .ioprio = BFQ_DEFAULT_GRP_IOPRIO, |
62 |
+- .ioprio_class = BFQ_DEFAULT_GRP_CLASS, |
63 |
+-}; |
64 |
+- |
65 |
+-static inline void bfq_init_entity(struct bfq_entity *entity, |
66 |
+- struct bfq_group *bfqg) |
67 |
+-{ |
68 |
+- entity->weight = entity->new_weight; |
69 |
+- entity->orig_weight = entity->new_weight; |
70 |
+- entity->ioprio = entity->new_ioprio; |
71 |
+- entity->ioprio_class = entity->new_ioprio_class; |
72 |
+- entity->parent = bfqg->my_entity; |
73 |
+- entity->sched_data = &bfqg->sched_data; |
74 |
+-} |
75 |
+- |
76 |
+-static struct bfqio_cgroup *cgroup_to_bfqio(struct cgroup *cgroup) |
77 |
+-{ |
78 |
+- return container_of(cgroup_subsys_state(cgroup, bfqio_subsys_id), |
79 |
+- struct bfqio_cgroup, css); |
80 |
+-} |
81 |
+- |
82 |
+-/* |
83 |
+- * Search the bfq_group for bfqd into the hash table (by now only a list) |
84 |
+- * of bgrp. Must be called under rcu_read_lock(). |
85 |
+- */ |
86 |
+-static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp, |
87 |
+- struct bfq_data *bfqd) |
88 |
+-{ |
89 |
+- struct bfq_group *bfqg; |
90 |
+- void *key; |
91 |
+- |
92 |
+- hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) { |
93 |
+- key = rcu_dereference(bfqg->bfqd); |
94 |
+- if (key == bfqd) |
95 |
+- return bfqg; |
96 |
+- } |
97 |
+- |
98 |
+- return NULL; |
99 |
+-} |
100 |
+- |
101 |
+-static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp, |
102 |
+- struct bfq_group *bfqg) |
103 |
+-{ |
104 |
+- struct bfq_entity *entity = &bfqg->entity; |
105 |
+- |
106 |
+- /* |
107 |
+- * If the weight of the entity has never been set via the sysfs |
108 |
+- * interface, then bgrp->weight == 0. In this case we initialize |
109 |
+- * the weight from the current ioprio value. Otherwise, the group |
110 |
+- * weight, if set, has priority over the ioprio value. |
111 |
+- */ |
112 |
+- if (bgrp->weight == 0) { |
113 |
+- entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio); |
114 |
+- entity->new_ioprio = bgrp->ioprio; |
115 |
+- } else { |
116 |
+- entity->new_weight = bgrp->weight; |
117 |
+- entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight); |
118 |
+- } |
119 |
+- entity->orig_weight = entity->weight = entity->new_weight; |
120 |
+- entity->ioprio = entity->new_ioprio; |
121 |
+- entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class; |
122 |
+- entity->my_sched_data = &bfqg->sched_data; |
123 |
+-} |
124 |
+- |
125 |
+-static inline void bfq_group_set_parent(struct bfq_group *bfqg, |
126 |
+- struct bfq_group *parent) |
127 |
+-{ |
128 |
+- struct bfq_entity *entity; |
129 |
+- |
130 |
+- BUG_ON(parent == NULL); |
131 |
+- BUG_ON(bfqg == NULL); |
132 |
+- |
133 |
+- entity = &bfqg->entity; |
134 |
+- entity->parent = parent->my_entity; |
135 |
+- entity->sched_data = &parent->sched_data; |
136 |
+-} |
137 |
+- |
138 |
+-/** |
139 |
+- * bfq_group_chain_alloc - allocate a chain of groups. |
140 |
+- * @bfqd: queue descriptor. |
141 |
+- * @cgroup: the leaf cgroup this chain starts from. |
142 |
+- * |
143 |
+- * Allocate a chain of groups starting from the one belonging to |
144 |
+- * @cgroup up to the root cgroup. Stop if a cgroup on the chain |
145 |
+- * to the root has already an allocated group on @bfqd. |
146 |
+- */ |
147 |
+-static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd, |
148 |
+- struct cgroup *cgroup) |
149 |
+-{ |
150 |
+- struct bfqio_cgroup *bgrp; |
151 |
+- struct bfq_group *bfqg, *prev = NULL, *leaf = NULL; |
152 |
+- |
153 |
+- for (; cgroup != NULL; cgroup = cgroup->parent) { |
154 |
+- bgrp = cgroup_to_bfqio(cgroup); |
155 |
+- |
156 |
+- bfqg = bfqio_lookup_group(bgrp, bfqd); |
157 |
+- if (bfqg != NULL) { |
158 |
+- /* |
159 |
+- * All the cgroups in the path from there to the |
160 |
+- * root must have a bfq_group for bfqd, so we don't |
161 |
+- * need any more allocations. |
162 |
+- */ |
163 |
+- break; |
164 |
+- } |
165 |
+- |
166 |
+- bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC); |
167 |
+- if (bfqg == NULL) |
168 |
+- goto cleanup; |
169 |
+- |
170 |
+- bfq_group_init_entity(bgrp, bfqg); |
171 |
+- bfqg->my_entity = &bfqg->entity; |
172 |
+- |
173 |
+- if (leaf == NULL) { |
174 |
+- leaf = bfqg; |
175 |
+- prev = leaf; |
176 |
+- } else { |
177 |
+- bfq_group_set_parent(prev, bfqg); |
178 |
+- /* |
179 |
+- * Build a list of allocated nodes using the bfqd |
180 |
+- * filed, that is still unused and will be initialized |
181 |
+- * only after the node will be connected. |
182 |
+- */ |
183 |
+- prev->bfqd = bfqg; |
184 |
+- prev = bfqg; |
185 |
+- } |
186 |
+- } |
187 |
+- |
188 |
+- return leaf; |
189 |
+- |
190 |
+-cleanup: |
191 |
+- while (leaf != NULL) { |
192 |
+- prev = leaf; |
193 |
+- leaf = leaf->bfqd; |
194 |
+- kfree(prev); |
195 |
+- } |
196 |
+- |
197 |
+- return NULL; |
198 |
+-} |
199 |
+- |
200 |
+-/** |
201 |
+- * bfq_group_chain_link - link an allocatd group chain to a cgroup hierarchy. |
202 |
+- * @bfqd: the queue descriptor. |
203 |
+- * @cgroup: the leaf cgroup to start from. |
204 |
+- * @leaf: the leaf group (to be associated to @cgroup). |
205 |
+- * |
206 |
+- * Try to link a chain of groups to a cgroup hierarchy, connecting the |
207 |
+- * nodes bottom-up, so we can be sure that when we find a cgroup in the |
208 |
+- * hierarchy that already as a group associated to @bfqd all the nodes |
209 |
+- * in the path to the root cgroup have one too. |
210 |
+- * |
211 |
+- * On locking: the queue lock protects the hierarchy (there is a hierarchy |
212 |
+- * per device) while the bfqio_cgroup lock protects the list of groups |
213 |
+- * belonging to the same cgroup. |
214 |
+- */ |
215 |
+-static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup, |
216 |
+- struct bfq_group *leaf) |
217 |
+-{ |
218 |
+- struct bfqio_cgroup *bgrp; |
219 |
+- struct bfq_group *bfqg, *next, *prev = NULL; |
220 |
+- unsigned long flags; |
221 |
+- |
222 |
+- assert_spin_locked(bfqd->queue->queue_lock); |
223 |
+- |
224 |
+- for (; cgroup != NULL && leaf != NULL; cgroup = cgroup->parent) { |
225 |
+- bgrp = cgroup_to_bfqio(cgroup); |
226 |
+- next = leaf->bfqd; |
227 |
+- |
228 |
+- bfqg = bfqio_lookup_group(bgrp, bfqd); |
229 |
+- BUG_ON(bfqg != NULL); |
230 |
+- |
231 |
+- spin_lock_irqsave(&bgrp->lock, flags); |
232 |
+- |
233 |
+- rcu_assign_pointer(leaf->bfqd, bfqd); |
234 |
+- hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data); |
235 |
+- hlist_add_head(&leaf->bfqd_node, &bfqd->group_list); |
236 |
+- |
237 |
+- spin_unlock_irqrestore(&bgrp->lock, flags); |
238 |
+- |
239 |
+- prev = leaf; |
240 |
+- leaf = next; |
241 |
+- } |
242 |
+- |
243 |
+- BUG_ON(cgroup == NULL && leaf != NULL); |
244 |
+- if (cgroup != NULL && prev != NULL) { |
245 |
+- bgrp = cgroup_to_bfqio(cgroup); |
246 |
+- bfqg = bfqio_lookup_group(bgrp, bfqd); |
247 |
+- bfq_group_set_parent(prev, bfqg); |
248 |
+- } |
249 |
+-} |
250 |
+- |
251 |
+-/** |
252 |
+- * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup. |
253 |
+- * @bfqd: queue descriptor. |
254 |
+- * @cgroup: cgroup being searched for. |
255 |
+- * |
256 |
+- * Return a group associated to @bfqd in @cgroup, allocating one if |
257 |
+- * necessary. When a group is returned all the cgroups in the path |
258 |
+- * to the root have a group associated to @bfqd. |
259 |
+- * |
260 |
+- * If the allocation fails, return the root group: this breaks guarantees |
261 |
+- * but is a safe fallbak. If this loss becames a problem it can be |
262 |
+- * mitigated using the equivalent weight (given by the product of the |
263 |
+- * weights of the groups in the path from @group to the root) in the |
264 |
+- * root scheduler. |
265 |
+- * |
266 |
+- * We allocate all the missing nodes in the path from the leaf cgroup |
267 |
+- * to the root and we connect the nodes only after all the allocations |
268 |
+- * have been successful. |
269 |
+- */ |
270 |
+-static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, |
271 |
+- struct cgroup *cgroup) |
272 |
+-{ |
273 |
+- struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup); |
274 |
+- struct bfq_group *bfqg; |
275 |
+- |
276 |
+- bfqg = bfqio_lookup_group(bgrp, bfqd); |
277 |
+- if (bfqg != NULL) |
278 |
+- return bfqg; |
279 |
+- |
280 |
+- bfqg = bfq_group_chain_alloc(bfqd, cgroup); |
281 |
+- if (bfqg != NULL) |
282 |
+- bfq_group_chain_link(bfqd, cgroup, bfqg); |
283 |
+- else |
284 |
+- bfqg = bfqd->root_group; |
285 |
+- |
286 |
+- return bfqg; |
287 |
+-} |
288 |
+- |
289 |
+-/** |
290 |
+- * bfq_bfqq_move - migrate @bfqq to @bfqg. |
291 |
+- * @bfqd: queue descriptor. |
292 |
+- * @bfqq: the queue to move. |
293 |
+- * @entity: @bfqq's entity. |
294 |
+- * @bfqg: the group to move to. |
295 |
+- * |
296 |
+- * Move @bfqq to @bfqg, deactivating it from its old group and reactivating |
297 |
+- * it on the new one. Avoid putting the entity on the old group idle tree. |
298 |
+- * |
299 |
+- * Must be called under the queue lock; the cgroup owning @bfqg must |
300 |
+- * not disappear (by now this just means that we are called under |
301 |
+- * rcu_read_lock()). |
302 |
+- */ |
303 |
+-static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
304 |
+- struct bfq_entity *entity, struct bfq_group *bfqg) |
305 |
+-{ |
306 |
+- int busy, resume; |
307 |
+- |
308 |
+- busy = bfq_bfqq_busy(bfqq); |
309 |
+- resume = !RB_EMPTY_ROOT(&bfqq->sort_list); |
310 |
+- |
311 |
+- BUG_ON(resume && !entity->on_st); |
312 |
+- BUG_ON(busy && !resume && entity->on_st && bfqq != bfqd->active_queue); |
313 |
+- |
314 |
+- if (busy) { |
315 |
+- BUG_ON(atomic_read(&bfqq->ref) < 2); |
316 |
+- |
317 |
+- if (!resume) |
318 |
+- bfq_del_bfqq_busy(bfqd, bfqq, 0); |
319 |
+- else |
320 |
+- bfq_deactivate_bfqq(bfqd, bfqq, 0); |
321 |
+- } else if (entity->on_st) |
322 |
+- bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); |
323 |
+- |
324 |
+- /* |
325 |
+- * Here we use a reference to bfqg. We don't need a refcounter |
326 |
+- * as the cgroup reference will not be dropped, so that its |
327 |
+- * destroy() callback will not be invoked. |
328 |
+- */ |
329 |
+- entity->parent = bfqg->my_entity; |
330 |
+- entity->sched_data = &bfqg->sched_data; |
331 |
+- |
332 |
+- if (busy && resume) |
333 |
+- bfq_activate_bfqq(bfqd, bfqq); |
334 |
+- |
335 |
+- if (bfqd->active_queue == NULL && !bfqd->rq_in_driver) |
336 |
+- bfq_schedule_dispatch(bfqd); |
337 |
+-} |
338 |
+- |
339 |
+-/** |
340 |
+- * __bfq_bic_change_cgroup - move @bic to @cgroup. |
341 |
+- * @bfqd: the queue descriptor. |
342 |
+- * @bic: the bic to move. |
343 |
+- * @cgroup: the cgroup to move to. |
344 |
+- * |
345 |
+- * Move bic to cgroup, assuming that bfqd->queue is locked; the caller |
346 |
+- * has to make sure that the reference to cgroup is valid across the call. |
347 |
+- * |
348 |
+- * NOTE: an alternative approach might have been to store the current |
349 |
+- * cgroup in bfqq and getting a reference to it, reducing the lookup |
350 |
+- * time here, at the price of slightly more complex code. |
351 |
+- */ |
352 |
+-static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, |
353 |
+- struct bfq_io_cq *bic, |
354 |
+- struct cgroup *cgroup) |
355 |
+-{ |
356 |
+- struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); |
357 |
+- struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); |
358 |
+- struct bfq_entity *entity; |
359 |
+- struct bfq_group *bfqg; |
360 |
+- struct bfqio_cgroup *bgrp; |
361 |
+- |
362 |
+- bgrp = cgroup_to_bfqio(cgroup); |
363 |
+- |
364 |
+- bfqg = bfq_find_alloc_group(bfqd, cgroup); |
365 |
+- if (async_bfqq != NULL) { |
366 |
+- entity = &async_bfqq->entity; |
367 |
+- |
368 |
+- if (entity->sched_data != &bfqg->sched_data) { |
369 |
+- bic_set_bfqq(bic, NULL, 0); |
370 |
+- bfq_log_bfqq(bfqd, async_bfqq, |
371 |
+- "bic_change_group: %p %d", |
372 |
+- async_bfqq, atomic_read(&async_bfqq->ref)); |
373 |
+- bfq_put_queue(async_bfqq); |
374 |
+- } |
375 |
+- } |
376 |
+- |
377 |
+- if (sync_bfqq != NULL) { |
378 |
+- entity = &sync_bfqq->entity; |
379 |
+- if (entity->sched_data != &bfqg->sched_data) |
380 |
+- bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg); |
381 |
+- } |
382 |
+- |
383 |
+- return bfqg; |
384 |
+-} |
385 |
+- |
386 |
+-/** |
387 |
+- * bfq_bic_change_cgroup - move @bic to @cgroup. |
388 |
+- * @bic: the bic being migrated. |
389 |
+- * @cgroup: the destination cgroup. |
390 |
+- * |
391 |
+- * When the task owning @bic is moved to @cgroup, @bic is immediately |
392 |
+- * moved into its new parent group. |
393 |
+- */ |
394 |
+-static void bfq_bic_change_cgroup(struct bfq_io_cq *bic, |
395 |
+- struct cgroup *cgroup) |
396 |
+-{ |
397 |
+- struct bfq_data *bfqd; |
398 |
+- unsigned long uninitialized_var(flags); |
399 |
+- |
400 |
+- bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data), &flags); |
401 |
+- if (bfqd != NULL) { |
402 |
+- __bfq_bic_change_cgroup(bfqd, bic, cgroup); |
403 |
+- bfq_put_bfqd_unlock(bfqd, &flags); |
404 |
+- } |
405 |
+-} |
406 |
+- |
407 |
+-/** |
408 |
+- * bfq_bic_update_cgroup - update the cgroup of @bic. |
409 |
+- * @bic: the @bic to update. |
410 |
+- * |
411 |
+- * Make sure that @bic is enqueued in the cgroup of the current task. |
412 |
+- * We need this in addition to moving bics during the cgroup attach |
413 |
+- * phase because the task owning @bic could be at its first disk |
414 |
+- * access or we may end up in the root cgroup as the result of a |
415 |
+- * memory allocation failure and here we try to move to the right |
416 |
+- * group. |
417 |
+- * |
418 |
+- * Must be called under the queue lock. It is safe to use the returned |
419 |
+- * value even after the rcu_read_unlock() as the migration/destruction |
420 |
+- * paths act under the queue lock too. IOW it is impossible to race with |
421 |
+- * group migration/destruction and end up with an invalid group as: |
422 |
+- * a) here cgroup has not yet been destroyed, nor its destroy callback |
423 |
+- * has started execution, as current holds a reference to it, |
424 |
+- * b) if it is destroyed after rcu_read_unlock() [after current is |
425 |
+- * migrated to a different cgroup] its attach() callback will have |
426 |
+- * taken care of remove all the references to the old cgroup data. |
427 |
+- */ |
428 |
+-static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic) |
429 |
+-{ |
430 |
+- struct bfq_data *bfqd = bic_to_bfqd(bic); |
431 |
+- struct bfq_group *bfqg; |
432 |
+- struct cgroup *cgroup; |
433 |
+- |
434 |
+- BUG_ON(bfqd == NULL); |
435 |
+- |
436 |
+- rcu_read_lock(); |
437 |
+- cgroup = task_cgroup(current, bfqio_subsys_id); |
438 |
+- bfqg = __bfq_bic_change_cgroup(bfqd, bic, cgroup); |
439 |
+- rcu_read_unlock(); |
440 |
+- |
441 |
+- return bfqg; |
442 |
+-} |
443 |
+- |
444 |
+-/** |
445 |
+- * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. |
446 |
+- * @st: the service tree being flushed. |
447 |
+- */ |
448 |
+-static inline void bfq_flush_idle_tree(struct bfq_service_tree *st) |
449 |
+-{ |
450 |
+- struct bfq_entity *entity = st->first_idle; |
451 |
+- |
452 |
+- for (; entity != NULL; entity = st->first_idle) |
453 |
+- __bfq_deactivate_entity(entity, 0); |
454 |
+-} |
455 |
+- |
456 |
+-/** |
457 |
+- * bfq_reparent_leaf_entity - move leaf entity to the root_group. |
458 |
+- * @bfqd: the device data structure with the root group. |
459 |
+- * @entity: the entity to move. |
460 |
+- */ |
461 |
+-static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd, |
462 |
+- struct bfq_entity *entity) |
463 |
+-{ |
464 |
+- struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
465 |
+- |
466 |
+- BUG_ON(bfqq == NULL); |
467 |
+- bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group); |
468 |
+- return; |
469 |
+-} |
470 |
+- |
471 |
+-/** |
472 |
+- * bfq_reparent_active_entities - move to the root group all active entities. |
473 |
+- * @bfqd: the device data structure with the root group. |
474 |
+- * @bfqg: the group to move from. |
475 |
+- * @st: the service tree with the entities. |
476 |
+- * |
477 |
+- * Needs queue_lock to be taken and reference to be valid over the call. |
478 |
+- */ |
479 |
+-static inline void bfq_reparent_active_entities(struct bfq_data *bfqd, |
480 |
+- struct bfq_group *bfqg, |
481 |
+- struct bfq_service_tree *st) |
482 |
+-{ |
483 |
+- struct rb_root *active = &st->active; |
484 |
+- struct bfq_entity *entity = NULL; |
485 |
+- |
486 |
+- if (!RB_EMPTY_ROOT(&st->active)) |
487 |
+- entity = bfq_entity_of(rb_first(active)); |
488 |
+- |
489 |
+- for (; entity != NULL ; entity = bfq_entity_of(rb_first(active))) |
490 |
+- bfq_reparent_leaf_entity(bfqd, entity); |
491 |
+- |
492 |
+- if (bfqg->sched_data.active_entity != NULL) |
493 |
+- bfq_reparent_leaf_entity(bfqd, bfqg->sched_data.active_entity); |
494 |
+- |
495 |
+- return; |
496 |
+-} |
497 |
+- |
498 |
+-/** |
499 |
+- * bfq_destroy_group - destroy @bfqg. |
500 |
+- * @bgrp: the bfqio_cgroup containing @bfqg. |
501 |
+- * @bfqg: the group being destroyed. |
502 |
+- * |
503 |
+- * Destroy @bfqg, making sure that it is not referenced from its parent. |
504 |
+- */ |
505 |
+-static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg) |
506 |
+-{ |
507 |
+- struct bfq_data *bfqd; |
508 |
+- struct bfq_service_tree *st; |
509 |
+- struct bfq_entity *entity = bfqg->my_entity; |
510 |
+- unsigned long uninitialized_var(flags); |
511 |
+- int i; |
512 |
+- |
513 |
+- hlist_del(&bfqg->group_node); |
514 |
+- |
515 |
+- /* |
516 |
+- * Empty all service_trees belonging to this group before deactivating |
517 |
+- * the group itself. |
518 |
+- */ |
519 |
+- for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { |
520 |
+- st = bfqg->sched_data.service_tree + i; |
521 |
+- |
522 |
+- /* |
523 |
+- * The idle tree may still contain bfq_queues belonging |
524 |
+- * to exited task because they never migrated to a different |
525 |
+- * cgroup from the one being destroyed now. Noone else |
526 |
+- * can access them so it's safe to act without any lock. |
527 |
+- */ |
528 |
+- bfq_flush_idle_tree(st); |
529 |
+- |
530 |
+- /* |
531 |
+- * It may happen that some queues are still active |
532 |
+- * (busy) upon group destruction (if the corresponding |
533 |
+- * processes have been forced to terminate). We move |
534 |
+- * all the leaf entities corresponding to these queues |
535 |
+- * to the root_group. |
536 |
+- * Also, it may happen that the group has an entity |
537 |
+- * under service, which is disconnected from the active |
538 |
+- * tree: it must be moved, too. |
539 |
+- * There is no need to put the sync queues, as the |
540 |
+- * scheduler has taken no reference. |
541 |
+- */ |
542 |
+- bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags); |
543 |
+- if (bfqd != NULL) { |
544 |
+- bfq_reparent_active_entities(bfqd, bfqg, st); |
545 |
+- bfq_put_bfqd_unlock(bfqd, &flags); |
546 |
+- } |
547 |
+- BUG_ON(!RB_EMPTY_ROOT(&st->active)); |
548 |
+- BUG_ON(!RB_EMPTY_ROOT(&st->idle)); |
549 |
+- } |
550 |
+- BUG_ON(bfqg->sched_data.next_active != NULL); |
551 |
+- BUG_ON(bfqg->sched_data.active_entity != NULL); |
552 |
+- |
553 |
+- /* |
554 |
+- * We may race with device destruction, take extra care when |
555 |
+- * dereferencing bfqg->bfqd. |
556 |
+- */ |
557 |
+- bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags); |
558 |
+- if (bfqd != NULL) { |
559 |
+- hlist_del(&bfqg->bfqd_node); |
560 |
+- __bfq_deactivate_entity(entity, 0); |
561 |
+- bfq_put_async_queues(bfqd, bfqg); |
562 |
+- bfq_put_bfqd_unlock(bfqd, &flags); |
563 |
+- } |
564 |
+- BUG_ON(entity->tree != NULL); |
565 |
+- |
566 |
+- /* |
567 |
+- * No need to defer the kfree() to the end of the RCU grace |
568 |
+- * period: we are called from the destroy() callback of our |
569 |
+- * cgroup, so we can be sure that noone is a) still using |
570 |
+- * this cgroup or b) doing lookups in it. |
571 |
+- */ |
572 |
+- kfree(bfqg); |
573 |
+-} |
574 |
+- |
575 |
+-static void bfq_end_raising_async(struct bfq_data *bfqd) |
576 |
+-{ |
577 |
+- struct hlist_node *tmp; |
578 |
+- struct bfq_group *bfqg; |
579 |
+- |
580 |
+- hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) |
581 |
+- bfq_end_raising_async_queues(bfqd, bfqg); |
582 |
+-} |
583 |
+- |
584 |
+-/** |
585 |
+- * bfq_disconnect_groups - diconnect @bfqd from all its groups. |
586 |
+- * @bfqd: the device descriptor being exited. |
587 |
+- * |
588 |
+- * When the device exits we just make sure that no lookup can return |
589 |
+- * the now unused group structures. They will be deallocated on cgroup |
590 |
+- * destruction. |
591 |
+- */ |
592 |
+-static void bfq_disconnect_groups(struct bfq_data *bfqd) |
593 |
+-{ |
594 |
+- struct hlist_node *tmp; |
595 |
+- struct bfq_group *bfqg; |
596 |
+- |
597 |
+- bfq_log(bfqd, "disconnect_groups beginning") ; |
598 |
+- hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) { |
599 |
+- hlist_del(&bfqg->bfqd_node); |
600 |
+- |
601 |
+- __bfq_deactivate_entity(bfqg->my_entity, 0); |
602 |
+- |
603 |
+- /* |
604 |
+- * Don't remove from the group hash, just set an |
605 |
+- * invalid key. No lookups can race with the |
606 |
+- * assignment as bfqd is being destroyed; this |
607 |
+- * implies also that new elements cannot be added |
608 |
+- * to the list. |
609 |
+- */ |
610 |
+- rcu_assign_pointer(bfqg->bfqd, NULL); |
611 |
+- |
612 |
+- bfq_log(bfqd, "disconnect_groups: put async for group %p", |
613 |
+- bfqg) ; |
614 |
+- bfq_put_async_queues(bfqd, bfqg); |
615 |
+- } |
616 |
+-} |
617 |
+- |
618 |
+-static inline void bfq_free_root_group(struct bfq_data *bfqd) |
619 |
+-{ |
620 |
+- struct bfqio_cgroup *bgrp = &bfqio_root_cgroup; |
621 |
+- struct bfq_group *bfqg = bfqd->root_group; |
622 |
+- |
623 |
+- bfq_put_async_queues(bfqd, bfqg); |
624 |
+- |
625 |
+- spin_lock_irq(&bgrp->lock); |
626 |
+- hlist_del_rcu(&bfqg->group_node); |
627 |
+- spin_unlock_irq(&bgrp->lock); |
628 |
+- |
629 |
+- /* |
630 |
+- * No need to synchronize_rcu() here: since the device is gone |
631 |
+- * there cannot be any read-side access to its root_group. |
632 |
+- */ |
633 |
+- kfree(bfqg); |
634 |
+-} |
635 |
+- |
636 |
+-static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node) |
637 |
+-{ |
638 |
+- struct bfq_group *bfqg; |
639 |
+- struct bfqio_cgroup *bgrp; |
640 |
+- int i; |
641 |
+- |
642 |
+- bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); |
643 |
+- if (bfqg == NULL) |
644 |
+- return NULL; |
645 |
+- |
646 |
+- bfqg->entity.parent = NULL; |
647 |
+- for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) |
648 |
+- bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; |
649 |
+- |
650 |
+- bgrp = &bfqio_root_cgroup; |
651 |
+- spin_lock_irq(&bgrp->lock); |
652 |
+- rcu_assign_pointer(bfqg->bfqd, bfqd); |
653 |
+- hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data); |
654 |
+- spin_unlock_irq(&bgrp->lock); |
655 |
+- |
656 |
+- return bfqg; |
657 |
+-} |
658 |
+- |
659 |
+-#define SHOW_FUNCTION(__VAR) \ |
660 |
+-static u64 bfqio_cgroup_##__VAR##_read(struct cgroup *cgroup, \ |
661 |
+- struct cftype *cftype) \ |
662 |
+-{ \ |
663 |
+- struct bfqio_cgroup *bgrp; \ |
664 |
+- u64 ret; \ |
665 |
+- \ |
666 |
+- if (!cgroup_lock_live_group(cgroup)) \ |
667 |
+- return -ENODEV; \ |
668 |
+- \ |
669 |
+- bgrp = cgroup_to_bfqio(cgroup); \ |
670 |
+- spin_lock_irq(&bgrp->lock); \ |
671 |
+- ret = bgrp->__VAR; \ |
672 |
+- spin_unlock_irq(&bgrp->lock); \ |
673 |
+- \ |
674 |
+- cgroup_unlock(); \ |
675 |
+- \ |
676 |
+- return ret; \ |
677 |
+-} |
678 |
+- |
679 |
+-SHOW_FUNCTION(weight); |
680 |
+-SHOW_FUNCTION(ioprio); |
681 |
+-SHOW_FUNCTION(ioprio_class); |
682 |
+-#undef SHOW_FUNCTION |
683 |
+- |
684 |
+-#define STORE_FUNCTION(__VAR, __MIN, __MAX) \ |
685 |
+-static int bfqio_cgroup_##__VAR##_write(struct cgroup *cgroup, \ |
686 |
+- struct cftype *cftype, \ |
687 |
+- u64 val) \ |
688 |
+-{ \ |
689 |
+- struct bfqio_cgroup *bgrp; \ |
690 |
+- struct bfq_group *bfqg; \ |
691 |
+- \ |
692 |
+- if (val < (__MIN) || val > (__MAX)) \ |
693 |
+- return -EINVAL; \ |
694 |
+- \ |
695 |
+- if (!cgroup_lock_live_group(cgroup)) \ |
696 |
+- return -ENODEV; \ |
697 |
+- \ |
698 |
+- bgrp = cgroup_to_bfqio(cgroup); \ |
699 |
+- \ |
700 |
+- spin_lock_irq(&bgrp->lock); \ |
701 |
+- bgrp->__VAR = (unsigned short)val; \ |
702 |
+- hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) { \ |
703 |
+- /* \ |
704 |
+- * Setting the ioprio_changed flag of the entity \ |
705 |
+- * to 1 with new_##__VAR == ##__VAR would re-set \ |
706 |
+- * the value of the weight to its ioprio mapping. \ |
707 |
+- * Set the flag only if necessary. \ |
708 |
+- */ \ |
709 |
+- if ((unsigned short)val != bfqg->entity.new_##__VAR) { \ |
710 |
+- bfqg->entity.new_##__VAR = (unsigned short)val; \ |
711 |
+- smp_wmb(); \ |
712 |
+- bfqg->entity.ioprio_changed = 1; \ |
713 |
+- } \ |
714 |
+- } \ |
715 |
+- spin_unlock_irq(&bgrp->lock); \ |
716 |
+- \ |
717 |
+- cgroup_unlock(); \ |
718 |
+- \ |
719 |
+- return 0; \ |
720 |
+-} |
721 |
+- |
722 |
+-STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT); |
723 |
+-STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1); |
724 |
+-STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE); |
725 |
+-#undef STORE_FUNCTION |
726 |
+- |
727 |
+-static struct cftype bfqio_files[] = { |
728 |
+- { |
729 |
+- .name = "weight", |
730 |
+- .read_u64 = bfqio_cgroup_weight_read, |
731 |
+- .write_u64 = bfqio_cgroup_weight_write, |
732 |
+- }, |
733 |
+- { |
734 |
+- .name = "ioprio", |
735 |
+- .read_u64 = bfqio_cgroup_ioprio_read, |
736 |
+- .write_u64 = bfqio_cgroup_ioprio_write, |
737 |
+- }, |
738 |
+- { |
739 |
+- .name = "ioprio_class", |
740 |
+- .read_u64 = bfqio_cgroup_ioprio_class_read, |
741 |
+- .write_u64 = bfqio_cgroup_ioprio_class_write, |
742 |
+- }, |
743 |
+- { }, /* terminate */ |
744 |
+-}; |
745 |
+- |
746 |
+-static struct cgroup_subsys_state *bfqio_create(struct cgroup *cgroup) |
747 |
+-{ |
748 |
+- struct bfqio_cgroup *bgrp; |
749 |
+- |
750 |
+- if (cgroup->parent != NULL) { |
751 |
+- bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL); |
752 |
+- if (bgrp == NULL) |
753 |
+- return ERR_PTR(-ENOMEM); |
754 |
+- } else |
755 |
+- bgrp = &bfqio_root_cgroup; |
756 |
+- |
757 |
+- spin_lock_init(&bgrp->lock); |
758 |
+- INIT_HLIST_HEAD(&bgrp->group_data); |
759 |
+- bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO; |
760 |
+- bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS; |
761 |
+- |
762 |
+- return &bgrp->css; |
763 |
+-} |
764 |
+- |
765 |
+-/* |
766 |
+- * We cannot support shared io contexts, as we have no means to support |
767 |
+- * two tasks with the same ioc in two different groups without major rework |
768 |
+- * of the main bic/bfqq data structures. By now we allow a task to change |
769 |
+- * its cgroup only if it's the only owner of its ioc; the drawback of this |
770 |
+- * behavior is that a group containing a task that forked using CLONE_IO |
771 |
+- * will not be destroyed until the tasks sharing the ioc die. |
772 |
+- */ |
773 |
+-static int bfqio_can_attach(struct cgroup *cgroup, struct cgroup_taskset *tset) |
774 |
+-{ |
775 |
+- struct task_struct *task; |
776 |
+- struct io_context *ioc; |
777 |
+- int ret = 0; |
778 |
+- |
779 |
+- cgroup_taskset_for_each(task, cgroup, tset) { |
780 |
+- /* task_lock() is needed to avoid races with exit_io_context() */ |
781 |
+- task_lock(task); |
782 |
+- ioc = task->io_context; |
783 |
+- if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1) |
784 |
+- /* |
785 |
+- * ioc == NULL means that the task is either too young or |
786 |
+- * exiting: if it has still no ioc the ioc can't be shared, |
787 |
+- * if the task is exiting the attach will fail anyway, no |
788 |
+- * matter what we return here. |
789 |
+- */ |
790 |
+- ret = -EINVAL; |
791 |
+- task_unlock(task); |
792 |
+- if (ret) |
793 |
+- break; |
794 |
+- } |
795 |
+- |
796 |
+- return ret; |
797 |
+-} |
798 |
+- |
799 |
+-static void bfqio_attach(struct cgroup *cgroup, struct cgroup_taskset *tset) |
800 |
+-{ |
801 |
+- struct task_struct *task; |
802 |
+- struct io_context *ioc; |
803 |
+- struct io_cq *icq; |
804 |
+- |
805 |
+- /* |
806 |
+- * IMPORTANT NOTE: The move of more than one process at a time to a |
807 |
+- * new group has not yet been tested. |
808 |
+- */ |
809 |
+- cgroup_taskset_for_each(task, cgroup, tset) { |
810 |
+- ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); |
811 |
+- if (ioc) { |
812 |
+- /* |
813 |
+- * Handle cgroup change here. |
814 |
+- */ |
815 |
+- rcu_read_lock(); |
816 |
+- hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node) |
817 |
+- if (!strncmp(icq->q->elevator->type->elevator_name, |
818 |
+- "bfq", ELV_NAME_MAX)) |
819 |
+- bfq_bic_change_cgroup(icq_to_bic(icq), |
820 |
+- cgroup); |
821 |
+- rcu_read_unlock(); |
822 |
+- put_io_context(ioc); |
823 |
+- } |
824 |
+- } |
825 |
+-} |
826 |
+- |
827 |
+-static void bfqio_destroy(struct cgroup *cgroup) |
828 |
+-{ |
829 |
+- struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup); |
830 |
+- struct hlist_node *tmp; |
831 |
+- struct bfq_group *bfqg; |
832 |
+- |
833 |
+- /* |
834 |
+- * Since we are destroying the cgroup, there are no more tasks |
835 |
+- * referencing it, and all the RCU grace periods that may have |
836 |
+- * referenced it are ended (as the destruction of the parent |
837 |
+- * cgroup is RCU-safe); bgrp->group_data will not be accessed by |
838 |
+- * anything else and we don't need any synchronization. |
839 |
+- */ |
840 |
+- hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node) |
841 |
+- bfq_destroy_group(bgrp, bfqg); |
842 |
+- |
843 |
+- BUG_ON(!hlist_empty(&bgrp->group_data)); |
844 |
+- |
845 |
+- kfree(bgrp); |
846 |
+-} |
847 |
+- |
848 |
+-struct cgroup_subsys bfqio_subsys = { |
849 |
+- .name = "bfqio", |
850 |
+- .css_alloc = bfqio_create, |
851 |
+- .can_attach = bfqio_can_attach, |
852 |
+- .attach = bfqio_attach, |
853 |
+- .css_free = bfqio_destroy, |
854 |
+- .subsys_id = bfqio_subsys_id, |
855 |
+- .base_cftypes = bfqio_files, |
856 |
+-}; |
857 |
+-#else |
858 |
+ static inline void bfq_init_entity(struct bfq_entity *entity, |
859 |
+ struct bfq_group *bfqg) |
860 |
+ { |
861 |
+@@ -865,4 +65,3 @@ |
862 |
+ |
863 |
+ return bfqg; |
864 |
+ } |
865 |
+-#endif |
866 |
+--- a/block/bfq-sched.c |
867 |
++++ b/block/bfq-sched.c |
868 |
+@@ -10,68 +10,6 @@ |
869 |
+ * Copyright (C) 2010 Paolo Valente <paolo.valente@×××××××.it> |
870 |
+ */ |
871 |
+ |
872 |
+-#ifdef CONFIG_CGROUP_BFQIO |
873 |
+-#define for_each_entity(entity) \ |
874 |
+- for (; entity != NULL; entity = entity->parent) |
875 |
+- |
876 |
+-#define for_each_entity_safe(entity, parent) \ |
877 |
+- for (; entity && ({ parent = entity->parent; 1; }); entity = parent) |
878 |
+- |
879 |
+-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, |
880 |
+- int extract, |
881 |
+- struct bfq_data *bfqd); |
882 |
+- |
883 |
+-static inline void bfq_update_budget(struct bfq_entity *next_active) |
884 |
+-{ |
885 |
+- struct bfq_entity *bfqg_entity; |
886 |
+- struct bfq_group *bfqg; |
887 |
+- struct bfq_sched_data *group_sd; |
888 |
+- |
889 |
+- BUG_ON(next_active == NULL); |
890 |
+- |
891 |
+- group_sd = next_active->sched_data; |
892 |
+- |
893 |
+- bfqg = container_of(group_sd, struct bfq_group, sched_data); |
894 |
+- /* |
895 |
+- * bfq_group's my_entity field is not NULL only if the group |
896 |
+- * is not the root group. We must not touch the root entity |
897 |
+- * as it must never become an active entity. |
898 |
+- */ |
899 |
+- bfqg_entity = bfqg->my_entity; |
900 |
+- if (bfqg_entity != NULL) |
901 |
+- bfqg_entity->budget = next_active->budget; |
902 |
+-} |
903 |
+- |
904 |
+-static int bfq_update_next_active(struct bfq_sched_data *sd) |
905 |
+-{ |
906 |
+- struct bfq_entity *next_active; |
907 |
+- |
908 |
+- if (sd->active_entity != NULL) |
909 |
+- /* will update/requeue at the end of service */ |
910 |
+- return 0; |
911 |
+- |
912 |
+- /* |
913 |
+- * NOTE: this can be improved in many ways, such as returning |
914 |
+- * 1 (and thus propagating upwards the update) only when the |
915 |
+- * budget changes, or caching the bfqq that will be scheduled |
916 |
+- * next from this subtree. By now we worry more about |
917 |
+- * correctness than about performance... |
918 |
+- */ |
919 |
+- next_active = bfq_lookup_next_entity(sd, 0, NULL); |
920 |
+- sd->next_active = next_active; |
921 |
+- |
922 |
+- if (next_active != NULL) |
923 |
+- bfq_update_budget(next_active); |
924 |
+- |
925 |
+- return 1; |
926 |
+-} |
927 |
+- |
928 |
+-static inline void bfq_check_next_active(struct bfq_sched_data *sd, |
929 |
+- struct bfq_entity *entity) |
930 |
+-{ |
931 |
+- BUG_ON(sd->next_active != entity); |
932 |
+-} |
933 |
+-#else |
934 |
+ #define for_each_entity(entity) \ |
935 |
+ for (; entity != NULL; entity = NULL) |
936 |
+ |
937 |
+@@ -91,7 +29,6 @@ |
938 |
+ static inline void bfq_update_budget(struct bfq_entity *next_active) |
939 |
+ { |
940 |
+ } |
941 |
+-#endif |
942 |
+ |
943 |
+ /* |
944 |
+ * Shift for timestamp calculations. This actually limits the maximum |
945 |
+--- a/block/bfq.h |
946 |
++++ b/block/bfq.h |
947 |
+@@ -463,78 +463,12 @@ |
948 |
+ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ |
949 |
+ }; |
950 |
+ |
951 |
+-#ifdef CONFIG_CGROUP_BFQIO |
952 |
+-/** |
953 |
+- * struct bfq_group - per (device, cgroup) data structure. |
954 |
+- * @entity: schedulable entity to insert into the parent group sched_data. |
955 |
+- * @sched_data: own sched_data, to contain child entities (they may be |
956 |
+- * both bfq_queues and bfq_groups). |
957 |
+- * @group_node: node to be inserted into the bfqio_cgroup->group_data |
958 |
+- * list of the containing cgroup's bfqio_cgroup. |
959 |
+- * @bfqd_node: node to be inserted into the @bfqd->group_list list |
960 |
+- * of the groups active on the same device; used for cleanup. |
961 |
+- * @bfqd: the bfq_data for the device this group acts upon. |
962 |
+- * @async_bfqq: array of async queues for all the tasks belonging to |
963 |
+- * the group, one queue per ioprio value per ioprio_class, |
964 |
+- * except for the idle class that has only one queue. |
965 |
+- * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). |
966 |
+- * @my_entity: pointer to @entity, %NULL for the toplevel group; used |
967 |
+- * to avoid too many special cases during group creation/migration. |
968 |
+- * |
969 |
+- * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup |
970 |
+- * there is a set of bfq_groups, each one collecting the lower-level |
971 |
+- * entities belonging to the group that are acting on the same device. |
972 |
+- * |
973 |
+- * Locking works as follows: |
974 |
+- * o @group_node is protected by the bfqio_cgroup lock, and is accessed |
975 |
+- * via RCU from its readers. |
976 |
+- * o @bfqd is protected by the queue lock, RCU is used to access it |
977 |
+- * from the readers. |
978 |
+- * o All the other fields are protected by the @bfqd queue lock. |
979 |
+- */ |
980 |
+-struct bfq_group { |
981 |
+- struct bfq_entity entity; |
982 |
+- struct bfq_sched_data sched_data; |
983 |
+- |
984 |
+- struct hlist_node group_node; |
985 |
+- struct hlist_node bfqd_node; |
986 |
+- |
987 |
+- void *bfqd; |
988 |
+- |
989 |
+- struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; |
990 |
+- struct bfq_queue *async_idle_bfqq; |
991 |
+- |
992 |
+- struct bfq_entity *my_entity; |
993 |
+-}; |
994 |
+- |
995 |
+-/** |
996 |
+- * struct bfqio_cgroup - bfq cgroup data structure. |
997 |
+- * @css: subsystem state for bfq in the containing cgroup. |
998 |
+- * @weight: cgroup weight. |
999 |
+- * @ioprio: cgroup ioprio. |
1000 |
+- * @ioprio_class: cgroup ioprio_class. |
1001 |
+- * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data. |
1002 |
+- * @group_data: list containing the bfq_group belonging to this cgroup. |
1003 |
+- * |
1004 |
+- * @group_data is accessed using RCU, with @lock protecting the updates, |
1005 |
+- * @ioprio and @ioprio_class are protected by @lock. |
1006 |
+- */ |
1007 |
+-struct bfqio_cgroup { |
1008 |
+- struct cgroup_subsys_state css; |
1009 |
+- |
1010 |
+- unsigned short weight, ioprio, ioprio_class; |
1011 |
+- |
1012 |
+- spinlock_t lock; |
1013 |
+- struct hlist_head group_data; |
1014 |
+-}; |
1015 |
+-#else |
1016 |
+ struct bfq_group { |
1017 |
+ struct bfq_sched_data sched_data; |
1018 |
+ |
1019 |
+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; |
1020 |
+ struct bfq_queue *async_idle_bfqq; |
1021 |
+ }; |
1022 |
+-#endif |
1023 |
+ |
1024 |
+ static inline struct bfq_service_tree * |
1025 |
+ bfq_entity_service_tree(struct bfq_entity *entity) |
1026 |
+--- a/include/linux/cgroup_subsys.h |
1027 |
++++ b/include/linux/cgroup_subsys.h |
1028 |
+@@ -84,9 +84,3 @@ |
1029 |
+ #endif |
1030 |
+ |
1031 |
+ /* */ |
1032 |
+- |
1033 |
+-#ifdef CONFIG_CGROUP_BFQIO |
1034 |
+-SUBSYS(bfqio) |
1035 |
+-#endif |
1036 |
+- |
1037 |
+-/* */ |