Gentoo Archives: gentoo-commits

From: "Gordon Malm (gengor)" <gengor@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1414 - hardened/2.6/trunk/2.6.25
Date: Wed, 03 Dec 2008 00:32:05
Message-Id: E1L7fem-0008TI-It@stork.gentoo.org
1 Author: gengor
2 Date: 2008-12-03 00:31:56 +0000 (Wed, 03 Dec 2008)
3 New Revision: 1414
4
5 Added:
6 hardened/2.6/trunk/2.6.25/1401_cgroups-fix-invalid-cgrp-dentry-before-cgroup-has-been-completely-removed.patch
7 hardened/2.6/trunk/2.6.25/1402_cpqarry-fix-return-value-of-cpqarray_init.patch
8 hardened/2.6/trunk/2.6.25/1403_ext3-wait-on-all-pending-commits-in-ext3_sync_fs.patch
9 hardened/2.6/trunk/2.6.25/1404_hid-fix-incorrent-length-condition-in-hidraw_write.patch
10 hardened/2.6/trunk/2.6.25/1405_i-oat-fix-async_tx.callback-checking.patch
11 hardened/2.6/trunk/2.6.25/1406_i-oat-fix-channel-resources-free-for-not-allocated-channels.patch
12 hardened/2.6/trunk/2.6.25/1407_i-oat-fix-dma_pin_iovec_pages-error-handling.patch
13 hardened/2.6/trunk/2.6.25/1408_jffs2-fix-lack-of-locking-in-thread_should_wake.patch
14 hardened/2.6/trunk/2.6.25/1409_jffs2-fix-race-condition-in-jffs2_lzo_compress.patch
15 hardened/2.6/trunk/2.6.25/1410_md-linear-fix-a-division-by-zero-bug-for-very-small-arrays.patch
16 hardened/2.6/trunk/2.6.25/1411_mmc-increase-sd-write-timeout-for-crappy-cards.patch
17 hardened/2.6/trunk/2.6.25/1412_net-unix-fix-inflight-counting-bug-in-garbage-collector.patch
18 hardened/2.6/trunk/2.6.25/1413_block-fix-nr_phys_segments-miscalculation-bug.patch
19 hardened/2.6/trunk/2.6.25/1414_dm-raid1-flush-workqueue-before-destruction.patch
20 hardened/2.6/trunk/2.6.25/1415_net-fix-proc-net-snmp-as-memory-corruptor.patch
21 hardened/2.6/trunk/2.6.25/1416_touch_mnt_namespace-when-the-mount-flags-change.patch
22 hardened/2.6/trunk/2.6.25/1417_usb-ehci-remove-obsolete-workaround-for-bogus-IRQs.patch
23 hardened/2.6/trunk/2.6.25/1418_usb-ehci-fix-handling-of-dead-controllers.patch
24 hardened/2.6/trunk/2.6.25/1419_usb-don-t-register-endpoints-for-interfaces-that-are-going-away.patch
25 hardened/2.6/trunk/2.6.25/1420_usb-ehci-fix-divide-by-zero-bug.patch
26 hardened/2.6/trunk/2.6.25/1421_usb-fix-ps3-usb-shutdown-problems.patch
27 hardened/2.6/trunk/2.6.25/1422_v4l-dvb-cve-2008-5033-fix-oops-on-tvaudio-when-controlling-bass-treble.patch
28 hardened/2.6/trunk/2.6.25/1505_hfs-fix-namelength-memory-corruption.patch
29 hardened/2.6/trunk/2.6.25/1506_inotify-fix-watch-removal-or-umount-races.patch
30 hardened/2.6/trunk/2.6.25/1800_sched-disable-hrtick.patch
31 hardened/2.6/trunk/2.6.25/4460_pax-fix-mmap-BUG_ON-task-size-check.patch
32 hardened/2.6/trunk/2.6.25/4465_pax-fix-false-RLIMIT_STACK-warnings.patch
33 Modified:
34 hardened/2.6/trunk/2.6.25/0000_README
35 Log:
36 Update 2.6.25 trunk/
37
38 Modified: hardened/2.6/trunk/2.6.25/0000_README
39 ===================================================================
40 --- hardened/2.6/trunk/2.6.25/0000_README 2008-12-03 00:31:33 UTC (rev 1413)
41 +++ hardened/2.6/trunk/2.6.25/0000_README 2008-12-03 00:31:56 UTC (rev 1414)
42 @@ -15,6 +15,16 @@
43 From: http://www.kernel.org
44 Desc: Linux 2.6.25.20
45
46 +Patch: 1401* -> 1412*
47 +From http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git;
48 + a=commit;h=ff413e9814b3914ddf3d4634e9a6cc1c6b21e787
49 +Desc: Backported subset of 2.6.27.6 -stable release patches
50 +
51 +Patch: 1413* -> 1422*
52 +From http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git;
53 + a=commit;h=526550b7f86d9d395ee1b27ed804e6ffc3feb17c
54 +Desc: Backported subset of 2.6.27.7 -stable release patches
55 +
56 Patch: 1503_hfsplus-check-read_mapping_page-return-value.patch
57 From: Eric Sesterhenn <snakebyte@×××.de>
58 Desc: hfsplus: check return value of read_mapping_page()
59 @@ -23,6 +33,18 @@
60 From: Eric Sesterhenn <snakebyte@×××.de>
61 Desc: hfsplus: fix buffer overflow when mounting corrupted image
62
63 +Patch: 1505_hfs-fix-namelength-memory-corruption.patch
64 +From: Eric Sesterhenn <snakebyte@×××.de>
65 +Desc: hfsplug: Fix stack corruption due to corrupted hfs filesystem
66 +
67 +Patch: 1506_inotify-fix-watch-removal-or-umount-races.patch
68 +From: Al Viro <viro@×××××××××××××××.uk>
69 +Desc: Fix inotify watch removal/umount races (bug #248754)
70 +
71 +Patch: 1800_sched-disable-hrtick.patch
72 +From: Kerin Millar <kerframil@×××××.com>
73 +Desc: Disable high-resolution scheduler ticks (bug #247453)
74 +
75 Patch: 4420_grsec-2.1.12-2.6.25.16-200808201644.patch
76 From: http://www.grsecurity.net
77 Desc: hardened-sources base patch from upstream grsecurity
78 @@ -70,3 +92,11 @@
79 Patch: 4455_pax-fix-uvesafb-compile-and-misc.patch
80 From: Gordon Malm <gengor@g.o>
81 Desc: Fixes compilation and miscellaneous other problems in uvesafb
82 +
83 +Patch: 4460_pax-fix-mmap-BUG_ON-task-size-check.patch
84 +From: Gordon Malm <gengor@g.o>
85 +Desc: Fix incorrect vma task size check under SEGMEXEC
86 +
87 +Patch: 4465_pax-fix-false-RLIMIT_STACK-warnings.patch
88 +From: Gordon Malm <gengor@g.o>
89 +Desc: Fix false-positive RLIMIT_STACK warnings
90
91 Added: hardened/2.6/trunk/2.6.25/1401_cgroups-fix-invalid-cgrp-dentry-before-cgroup-has-been-completely-removed.patch
92 ===================================================================
93 --- hardened/2.6/trunk/2.6.25/1401_cgroups-fix-invalid-cgrp-dentry-before-cgroup-has-been-completely-removed.patch (rev 0)
94 +++ hardened/2.6/trunk/2.6.25/1401_cgroups-fix-invalid-cgrp-dentry-before-cgroup-has-been-completely-removed.patch 2008-12-03 00:31:56 UTC (rev 1414)
95 @@ -0,0 +1,65 @@
96 +Added-By: Gordon Malm <gengor@g.o>
97 +
98 +---
99 +
100 +From jejb@××××××.org Mon Nov 10 15:14:35 2008
101 +From: Li Zefan <lizf@××××××××××.com>
102 +Date: Fri, 7 Nov 2008 00:05:48 GMT
103 +Subject: cgroups: fix invalid cgrp->dentry before cgroup has been completely removed
104 +To: stable@××××××.org
105 +Message-ID: <200811070005.mA705mbU003066@×××××××××××.org>
106 +
107 +From: Li Zefan <lizf@××××××××××.com>
108 +
109 +commit 24eb089950ce44603b30a3145a2c8520e2b55bb1 upstream
110 +
111 +This fixes an oops when reading /proc/sched_debug.
112 +
113 +A cgroup won't be removed completely until finishing cgroup_diput(), so we
114 +shouldn't invalidate cgrp->dentry in cgroup_rmdir(). Otherwise, when a
115 +group is being removed while cgroup_path() gets called, we may trigger
116 +NULL dereference BUG.
117 +
118 +The bug can be reproduced:
119 +
120 + # cat test.sh
121 + #!/bin/sh
122 + mount -t cgroup -o cpu xxx /mnt
123 + for (( ; ; ))
124 + {
125 + mkdir /mnt/sub
126 + rmdir /mnt/sub
127 + }
128 + # ./test.sh &
129 + # cat /proc/sched_debug
130 +
131 +BUG: unable to handle kernel NULL pointer dereference at 00000038
132 +IP: [<c045a47f>] cgroup_path+0x39/0x90
133 +..
134 +Call Trace:
135 + [<c0420344>] ? print_cfs_rq+0x6e/0x75d
136 + [<c0421160>] ? sched_debug_show+0x72d/0xc1e
137 +..
138 +
139 +Signed-off-by: Li Zefan <lizf@××××××××××.com>
140 +Acked-by: Paul Menage <menage@××××××.com>
141 +Cc: Peter Zijlstra <a.p.zijlstra@××××××.nl>
142 +Cc: Ingo Molnar <mingo@××××.hu>
143 +Signed-off-by: Andrew Morton <akpm@××××××××××××××××.org>
144 +Signed-off-by: Linus Torvalds <torvalds@××××××××××××××××.org>
145 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
146 +
147 +---
148 + kernel/cgroup.c | 1 -
149 + 1 file changed, 1 deletion(-)
150 +
151 +--- a/kernel/cgroup.c
152 ++++ b/kernel/cgroup.c
153 +@@ -2443,7 +2443,6 @@ static int cgroup_rmdir(struct inode *un
154 + list_del(&cgrp->sibling);
155 + spin_lock(&cgrp->dentry->d_lock);
156 + d = dget(cgrp->dentry);
157 +- cgrp->dentry = NULL;
158 + spin_unlock(&d->d_lock);
159 +
160 + cgroup_d_remove_dir(d);
161
162 Added: hardened/2.6/trunk/2.6.25/1402_cpqarry-fix-return-value-of-cpqarray_init.patch
163 ===================================================================
164 --- hardened/2.6/trunk/2.6.25/1402_cpqarry-fix-return-value-of-cpqarray_init.patch (rev 0)
165 +++ hardened/2.6/trunk/2.6.25/1402_cpqarry-fix-return-value-of-cpqarray_init.patch 2008-12-03 00:31:56 UTC (rev 1414)
166 @@ -0,0 +1,55 @@
167 +Added-By: Gordon Malm <gengor@g.o>
168 +
169 +---
170 +
171 +From 2197d18ded232ef6eef63cce57b6b21eddf1b7b6 Mon Sep 17 00:00:00 2001
172 +From: Andrey Borzenkov <arvidjaar@××××.ru>
173 +Date: Thu, 6 Nov 2008 12:53:15 -0800
174 +Subject: cpqarry: fix return value of cpqarray_init()
175 +
176 +From: Andrey Borzenkov <arvidjaar@××××.ru>
177 +
178 +commit 2197d18ded232ef6eef63cce57b6b21eddf1b7b6 upstream.
179 +
180 +As reported by Dick Gevers on Compaq ProLiant:
181 +
182 +Oct 13 18:06:51 dvgcpl kernel: Compaq SMART2 Driver (v 2.6.0)
183 +Oct 13 18:06:51 dvgcpl kernel: sys_init_module: 'cpqarray'->init
184 +suspiciously returned 1, it should follow 0/-E convention
185 +Oct 13 18:06:51 dvgcpl kernel: sys_init_module: loading module anyway...
186 +Oct 13 18:06:51 dvgcpl kernel: Pid: 315, comm: modprobe Not tainted
187 +2.6.27-desktop-0.rc8.2mnb #1
188 +Oct 13 18:06:51 dvgcpl kernel: [<c0380612>] ? printk+0x18/0x1e
189 +Oct 13 18:06:51 dvgcpl kernel: [<c0158f85>] sys_init_module+0x155/0x1c0
190 +Oct 13 18:06:51 dvgcpl kernel: [<c0103f06>] syscall_call+0x7/0xb
191 +Oct 13 18:06:51 dvgcpl kernel: =======================
192 +
193 +Make it return 0 on success and -ENODEV if no array was found.
194 +
195 +Reported-by: Dick Gevers <dvgevers@××××××.nl>
196 +Signed-off-by: Andrey Borzenkov <arvidjaar@××××.ru>
197 +Cc: Jens Axboe <jens.axboe@××××××.com>
198 +Signed-off-by: Andrew Morton <akpm@××××××××××××××××.org>
199 +Signed-off-by: Linus Torvalds <torvalds@××××××××××××××××.org>
200 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
201 +
202 +---
203 + drivers/block/cpqarray.c | 7 ++++++-
204 + 1 file changed, 6 insertions(+), 1 deletion(-)
205 +
206 +--- a/drivers/block/cpqarray.c
207 ++++ b/drivers/block/cpqarray.c
208 +@@ -567,7 +567,12 @@ static int __init cpqarray_init(void)
209 + num_cntlrs_reg++;
210 + }
211 +
212 +- return(num_cntlrs_reg);
213 ++ if (num_cntlrs_reg)
214 ++ return 0;
215 ++ else {
216 ++ pci_unregister_driver(&cpqarray_pci_driver);
217 ++ return -ENODEV;
218 ++ }
219 + }
220 +
221 + /* Function to find the first free pointer into our hba[] array */
222
223 Added: hardened/2.6/trunk/2.6.25/1403_ext3-wait-on-all-pending-commits-in-ext3_sync_fs.patch
224 ===================================================================
225 --- hardened/2.6/trunk/2.6.25/1403_ext3-wait-on-all-pending-commits-in-ext3_sync_fs.patch (rev 0)
226 +++ hardened/2.6/trunk/2.6.25/1403_ext3-wait-on-all-pending-commits-in-ext3_sync_fs.patch 2008-12-03 00:31:56 UTC (rev 1414)
227 @@ -0,0 +1,82 @@
228 +Added-By: Gordon Malm <gengor@g.o>
229 +
230 +---
231 +
232 +From jejb@××××××.org Mon Nov 10 15:08:55 2008
233 +From: Arthur Jones <ajones@××××××××.com>
234 +Date: Fri, 7 Nov 2008 00:05:17 GMT
235 +Subject: ext3: wait on all pending commits in ext3_sync_fs
236 +To: stable@××××××.org
237 +Message-ID: <200811070005.mA705Htq002320@×××××××××××.org>
238 +
239 +From: Arthur Jones <ajones@××××××××.com>
240 +
241 +commit c87591b719737b4e91eb1a9fa8fd55a4ff1886d6 upstream
242 +
243 +In ext3_sync_fs, we only wait for a commit to finish if we started it, but
244 +there may be one already in progress which will not be synced.
245 +
246 +In the case of a data=ordered umount with pending long symlinks which are
247 +delayed due to a long list of other I/O on the backing block device, this
248 +causes the buffer associated with the long symlinks to not be moved to the
249 +inode dirty list in the second phase of fsync_super. Then, before they
250 +can be dirtied again, kjournald exits, seeing the UMOUNT flag and the
251 +dirty pages are never written to the backing block device, causing long
252 +symlink corruption and exposing new or previously freed block data to
253 +userspace.
254 +
255 +This can be reproduced with a script created
256 +by Eric Sandeen <sandeen@××××××.com>:
257 +
258 + #!/bin/bash
259 +
260 + umount /mnt/test2
261 + mount /dev/sdb4 /mnt/test2
262 + rm -f /mnt/test2/*
263 + dd if=/dev/zero of=/mnt/test2/bigfile bs=1M count=512
264 + touch
265 + /mnt/test2/thisisveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylongfilename
266 + ln -s
267 + /mnt/test2/thisisveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylongfilename
268 + /mnt/test2/link
269 + umount /mnt/test2
270 + mount /dev/sdb4 /mnt/test2
271 + ls /mnt/test2/
272 + umount /mnt/test2
273 +
274 +To ensure all commits are synced, we flush all journal commits now when
275 +sync_fs'ing ext3.
276 +
277 +Signed-off-by: Arthur Jones <ajones@××××××××.com>
278 +Cc: Eric Sandeen <sandeen@××××××.com>
279 +Cc: Theodore Ts'o <tytso@×××.edu>
280 +Cc: <linux-ext4@×××××××××××.org>
281 +Signed-off-by: Andrew Morton <akpm@××××××××××××××××.org>
282 +Signed-off-by: Linus Torvalds <torvalds@××××××××××××××××.org>
283 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
284 +
285 +---
286 + fs/ext3/super.c | 11 +++++------
287 + 1 file changed, 5 insertions(+), 6 deletions(-)
288 +
289 +--- a/fs/ext3/super.c
290 ++++ b/fs/ext3/super.c
291 +@@ -2365,13 +2365,12 @@ static void ext3_write_super (struct sup
292 +
293 + static int ext3_sync_fs(struct super_block *sb, int wait)
294 + {
295 +- tid_t target;
296 +-
297 + sb->s_dirt = 0;
298 +- if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) {
299 +- if (wait)
300 +- log_wait_commit(EXT3_SB(sb)->s_journal, target);
301 +- }
302 ++ if (wait)
303 ++ ext3_force_commit(sb);
304 ++ else
305 ++ journal_start_commit(EXT3_SB(sb)->s_journal, NULL);
306 ++
307 + return 0;
308 + }
309 +
310
311 Added: hardened/2.6/trunk/2.6.25/1404_hid-fix-incorrent-length-condition-in-hidraw_write.patch
312 ===================================================================
313 --- hardened/2.6/trunk/2.6.25/1404_hid-fix-incorrent-length-condition-in-hidraw_write.patch (rev 0)
314 +++ hardened/2.6/trunk/2.6.25/1404_hid-fix-incorrent-length-condition-in-hidraw_write.patch 2008-12-03 00:31:56 UTC (rev 1414)
315 @@ -0,0 +1,48 @@
316 +Added-By: Gordon Malm <gengor@g.o>
317 +
318 +---
319 +
320 +From jkosina@××××.cz Tue Nov 11 15:52:41 2008
321 +From: Jiri Kosina <jkosina@××××.cz>
322 +Date: Tue, 11 Nov 2008 23:45:38 +0100 (CET)
323 +Subject: HID: fix incorrent length condition in hidraw_write()
324 +To: stable@××××××.org
325 +Cc: Paul Stoffregen <paul@××××.com>
326 +Message-ID: <alpine.LNX.1.10.0811112344180.24889@××××××××××.cz>
327 +
328 +From: Jiri Kosina <jkosina@××××.cz>
329 +
330 +upstream commit 2b107d629dc0c35de606bb7b010b829cd247a93a
331 +
332 +From: Jiri Kosina <jkosina@××××.cz>
333 +
334 +The bound check on the buffer length
335 +
336 + if (count > HID_MIN_BUFFER_SIZE)
337 +
338 +is of course incorrent, the proper check is
339 +
340 + if (count > HID_MAX_BUFFER_SIZE)
341 +
342 +Fix it.
343 +
344 +Reported-by: Jerry Ryle <jerry@×××××××××.com>
345 +Signed-off-by: Jiri Kosina <jkosina@××××.cz>
346 +Cc: Paul Stoffregen <paul@××××.com>
347 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
348 +
349 +---
350 + drivers/hid/hidraw.c | 2 +-
351 + 1 file changed, 1 insertion(+), 1 deletion(-)
352 +
353 +--- a/drivers/hid/hidraw.c
354 ++++ b/drivers/hid/hidraw.c
355 +@@ -113,7 +113,7 @@ static ssize_t hidraw_write(struct file
356 + if (!dev->hid_output_raw_report)
357 + return -ENODEV;
358 +
359 +- if (count > HID_MIN_BUFFER_SIZE) {
360 ++ if (count > HID_MAX_BUFFER_SIZE) {
361 + printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
362 + task_pid_nr(current));
363 + return -EINVAL;
364
365 Added: hardened/2.6/trunk/2.6.25/1405_i-oat-fix-async_tx.callback-checking.patch
366 ===================================================================
367 --- hardened/2.6/trunk/2.6.25/1405_i-oat-fix-async_tx.callback-checking.patch (rev 0)
368 +++ hardened/2.6/trunk/2.6.25/1405_i-oat-fix-async_tx.callback-checking.patch 2008-12-03 00:31:56 UTC (rev 1414)
369 @@ -0,0 +1,48 @@
370 +Added-By: Gordon Malm <gengor@g.o>
371 +
372 +Note: Backported to earlier kernels. Original message included below.
373 +
374 +---
375 +
376 +From jejb@××××××.org Tue Nov 11 10:17:05 2008
377 +From: Maciej Sosnowski <maciej.sosnowski@×××××.com>
378 +Date: Tue, 11 Nov 2008 17:50:05 GMT
379 +Subject: I/OAT: fix async_tx.callback checking
380 +To: jejb@××××××.org, stable@××××××.org
381 +Message-ID: <200811111750.mABHo5Ai025612@×××××××××××.org>
382 +
383 +From: Maciej Sosnowski <maciej.sosnowski@×××××.com>
384 +
385 +commit 12ccea24e309d815d058cdc6ee8bf2c4b85f0c5f upstream
386 +
387 +async_tx.callback should be checked for the first
388 +not the last descriptor in the chain.
389 +
390 +Signed-off-by: Maciej Sosnowski <maciej.sosnowski@×××××.com>
391 +Signed-off-by: David S. Miller <davem@×××××××××.net>
392 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
393 +
394 +---
395 + drivers/dma/ioat_dma.c | 4 ++--
396 + 1 file changed, 2 insertions(+), 2 deletions(-)
397 +
398 +--- a/drivers/dma/ioat_dma.c
399 ++++ b/drivers/dma/ioat_dma.c
400 +@@ -251,7 +251,7 @@ static dma_cookie_t ioat1_tx_submit(stru
401 + } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
402 +
403 + hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
404 +- if (new->async_tx.callback) {
405 ++ if (first->async_tx.callback) {
406 + hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
407 + if (first != new) {
408 + /* move callback into to last desc */
409 +@@ -336,7 +336,7 @@ static dma_cookie_t ioat2_tx_submit(stru
410 + } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
411 +
412 + hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
413 +- if (new->async_tx.callback) {
414 ++ if (first->async_tx.callback) {
415 + hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
416 + if (first != new) {
417 + /* move callback into to last desc */
418
419 Added: hardened/2.6/trunk/2.6.25/1406_i-oat-fix-channel-resources-free-for-not-allocated-channels.patch
420 ===================================================================
421 --- hardened/2.6/trunk/2.6.25/1406_i-oat-fix-channel-resources-free-for-not-allocated-channels.patch (rev 0)
422 +++ hardened/2.6/trunk/2.6.25/1406_i-oat-fix-channel-resources-free-for-not-allocated-channels.patch 2008-12-03 00:31:56 UTC (rev 1414)
423 @@ -0,0 +1,54 @@
424 +Added-By: Gordon Malm <gengor@g.o>
425 +
426 +Note: Backported to earlier kernels. Original message below.
427 +
428 +---
429 +
430 +From jejb@××××××.org Tue Nov 11 10:15:37 2008
431 +From: Maciej Sosnowski <maciej.sosnowski@×××××.com>
432 +Date: Tue, 11 Nov 2008 17:50:09 GMT
433 +Subject: I/OAT: fix channel resources free for not allocated channels
434 +To: stable@××××××.org
435 +Message-ID: <200811111750.mABHo9IU025655@×××××××××××.org>
436 +
437 +From: Maciej Sosnowski <maciej.sosnowski@×××××.com>
438 +
439 +commit c3d4f44f50b65b0b0290e357f8739cfb3f4bcaca upstream
440 +
441 +If the ioatdma driver is loaded but not used it does not allocate descriptors.
442 +Before it frees channel resources it should first be sure
443 +that they have been previously allocated.
444 +
445 +Signed-off-by: Maciej Sosnowski <maciej.sosnowski@×××××.com>
446 +Tested-by: Tom Picard <tom.s.picard@×××××.com>
447 +Signed-off-by: Dan Williams <dan.j.williams@×××××.com>
448 +Signed-off-by: David S. Miller <davem@×××××××××.net>
449 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
450 +
451 +---
452 + drivers/dma/ioat_dma.c | 7 +++++++
453 + 1 file changed, 7 insertions(+)
454 +
455 +--- a/drivers/dma/ioat_dma.c
456 ++++ b/drivers/dma/ioat_dma.c
457 +@@ -524,6 +524,12 @@ static void ioat_dma_free_chan_resources
458 + struct ioat_desc_sw *desc, *_desc;
459 + int in_use_descs = 0;
460 +
461 ++ /* Before freeing channel resources first check
462 ++ * if they have been previously allocated for this channel.
463 ++ */
464 ++ if (ioat_chan->desccount == 0)
465 ++ return;
466 ++
467 + tasklet_disable(&ioat_chan->cleanup_task);
468 + ioat_dma_memcpy_cleanup(ioat_chan);
469 +
470 +@@ -585,6 +591,7 @@ static void ioat_dma_free_chan_resources
471 + ioat_chan->last_completion = ioat_chan->completion_addr = 0;
472 + ioat_chan->pending = 0;
473 + ioat_chan->dmacount = 0;
474 ++ ioat_chan->desccount = 0;
475 + }
476 +
477 + /**
478
479 Added: hardened/2.6/trunk/2.6.25/1407_i-oat-fix-dma_pin_iovec_pages-error-handling.patch
480 ===================================================================
481 --- hardened/2.6/trunk/2.6.25/1407_i-oat-fix-dma_pin_iovec_pages-error-handling.patch (rev 0)
482 +++ hardened/2.6/trunk/2.6.25/1407_i-oat-fix-dma_pin_iovec_pages-error-handling.patch 2008-12-03 00:31:56 UTC (rev 1414)
483 @@ -0,0 +1,89 @@
484 +Added-By: Gordon Malm <gengor@g.o>
485 +
486 +---
487 +
488 +From jejb@××××××.org Tue Nov 11 10:16:31 2008
489 +From: Maciej Sosnowski <maciej.sosnowski@×××××.com>
490 +Date: Tue, 11 Nov 2008 17:50:07 GMT
491 +Subject: I/OAT: fix dma_pin_iovec_pages() error handling
492 +To: stable@××××××.org
493 +Message-ID: <200811111750.mABHo7v5025633@×××××××××××.org>
494 +
495 +From: Maciej Sosnowski <maciej.sosnowski@×××××.com>
496 +
497 +commit c2c0b4c5434c0a25f7f7796b29155d53805909f5 upstream
498 +
499 +Error handling needs to be modified in dma_pin_iovec_pages().
500 +It should return NULL instead of ERR_PTR
501 +(pinned_list is checked for NULL in tcp_recvmsg() to determine
502 +if iovec pages have been successfully pinned down).
503 +In case of error for the first iovec,
504 +local_list->nr_iovecs needs to be initialized.
505 +
506 +Signed-off-by: Maciej Sosnowski <maciej.sosnowski@×××××.com>
507 +Signed-off-by: David S. Miller <davem@×××××××××.net>
508 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
509 +
510 +---
511 + drivers/dma/iovlock.c | 17 ++++++-----------
512 + 1 file changed, 6 insertions(+), 11 deletions(-)
513 +
514 +--- a/drivers/dma/iovlock.c
515 ++++ b/drivers/dma/iovlock.c
516 +@@ -55,7 +55,6 @@ struct dma_pinned_list *dma_pin_iovec_pa
517 + int nr_iovecs = 0;
518 + int iovec_len_used = 0;
519 + int iovec_pages_used = 0;
520 +- long err;
521 +
522 + /* don't pin down non-user-based iovecs */
523 + if (segment_eq(get_fs(), KERNEL_DS))
524 +@@ -72,23 +71,21 @@ struct dma_pinned_list *dma_pin_iovec_pa
525 + local_list = kmalloc(sizeof(*local_list)
526 + + (nr_iovecs * sizeof (struct dma_page_list))
527 + + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
528 +- if (!local_list) {
529 +- err = -ENOMEM;
530 ++ if (!local_list)
531 + goto out;
532 +- }
533 +
534 + /* list of pages starts right after the page list array */
535 + pages = (struct page **) &local_list->page_list[nr_iovecs];
536 +
537 ++ local_list->nr_iovecs = 0;
538 ++
539 + for (i = 0; i < nr_iovecs; i++) {
540 + struct dma_page_list *page_list = &local_list->page_list[i];
541 +
542 + len -= iov[i].iov_len;
543 +
544 +- if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) {
545 +- err = -EFAULT;
546 ++ if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
547 + goto unpin;
548 +- }
549 +
550 + page_list->nr_pages = num_pages_spanned(&iov[i]);
551 + page_list->base_address = iov[i].iov_base;
552 +@@ -109,10 +106,8 @@ struct dma_pinned_list *dma_pin_iovec_pa
553 + NULL);
554 + up_read(&current->mm->mmap_sem);
555 +
556 +- if (ret != page_list->nr_pages) {
557 +- err = -ENOMEM;
558 ++ if (ret != page_list->nr_pages)
559 + goto unpin;
560 +- }
561 +
562 + local_list->nr_iovecs = i + 1;
563 + }
564 +@@ -122,7 +117,7 @@ struct dma_pinned_list *dma_pin_iovec_pa
565 + unpin:
566 + dma_unpin_iovec_pages(local_list);
567 + out:
568 +- return ERR_PTR(err);
569 ++ return NULL;
570 + }
571 +
572 + void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
573
574 Added: hardened/2.6/trunk/2.6.25/1408_jffs2-fix-lack-of-locking-in-thread_should_wake.patch
575 ===================================================================
576 --- hardened/2.6/trunk/2.6.25/1408_jffs2-fix-lack-of-locking-in-thread_should_wake.patch (rev 0)
577 +++ hardened/2.6/trunk/2.6.25/1408_jffs2-fix-lack-of-locking-in-thread_should_wake.patch 2008-12-03 00:31:56 UTC (rev 1414)
578 @@ -0,0 +1,51 @@
579 +Added-By: Gordon Malm <gengor@g.o>
580 +
581 +---
582 +
583 +From jejb@××××××.org Tue Nov 11 09:53:44 2008
584 +From: David Woodhouse <David.Woodhouse@×××××.com>
585 +Date: Fri, 7 Nov 2008 00:08:59 GMT
586 +Subject: JFFS2: Fix lack of locking in thread_should_wake()
587 +To: stable@××××××.org
588 +Message-ID: <200811070008.mA708xQE008191@×××××××××××.org>
589 +
590 +From: David Woodhouse <David.Woodhouse@×××××.com>
591 +
592 +commit b27cf88e9592953ae292d05324887f2f44979433 upstream
593 +
594 +The thread_should_wake() function trawls through the list of 'very
595 +dirty' eraseblocks, determining whether the background GC thread should
596 +wake. Doing this without holding the appropriate locks is a bad idea.
597 +
598 +OLPC Trac #8615
599 +
600 +Signed-off-by: David Woodhouse <David.Woodhouse@×××××.com>
601 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
602 +
603 +---
604 + fs/jffs2/background.c | 10 +++++-----
605 + 1 file changed, 5 insertions(+), 5 deletions(-)
606 +
607 +--- a/fs/jffs2/background.c
608 ++++ b/fs/jffs2/background.c
609 +@@ -85,15 +85,15 @@ static int jffs2_garbage_collect_thread(
610 + for (;;) {
611 + allow_signal(SIGHUP);
612 + again:
613 ++ spin_lock(&c->erase_completion_lock);
614 + if (!jffs2_thread_should_wake(c)) {
615 + set_current_state (TASK_INTERRUPTIBLE);
616 ++ spin_unlock(&c->erase_completion_lock);
617 + D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
618 +- /* Yes, there's a race here; we checked jffs2_thread_should_wake()
619 +- before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
620 +- matter - We don't care if we miss a wakeup, because the GC thread
621 +- is only an optimisation anyway. */
622 + schedule();
623 +- }
624 ++ } else
625 ++ spin_unlock(&c->erase_completion_lock);
626 ++
627 +
628 + /* This thread is purely an optimisation. But if it runs when
629 + other things could be running, it actually makes things a
630
631 Added: hardened/2.6/trunk/2.6.25/1409_jffs2-fix-race-condition-in-jffs2_lzo_compress.patch
632 ===================================================================
633 --- hardened/2.6/trunk/2.6.25/1409_jffs2-fix-race-condition-in-jffs2_lzo_compress.patch (rev 0)
634 +++ hardened/2.6/trunk/2.6.25/1409_jffs2-fix-race-condition-in-jffs2_lzo_compress.patch 2008-12-03 00:31:56 UTC (rev 1414)
635 @@ -0,0 +1,70 @@
636 +Added-By: Gordon Malm <gengor@g.o>
637 +
638 +---
639 +
640 +From jejb@××××××.org Tue Nov 11 09:53:08 2008
641 +From: Geert Uytterhoeven <Geert.Uytterhoeven@×××××××.com>
642 +Date: Fri, 7 Nov 2008 00:08:19 GMT
643 +Subject: JFFS2: fix race condition in jffs2_lzo_compress()
644 +To: stable@××××××.org
645 +Message-ID: <200811070008.mA708Jdo007031@×××××××××××.org>
646 +
647 +From: Geert Uytterhoeven <Geert.Uytterhoeven@×××××××.com>
648 +
649 +commit dc8a0843a435b2c0891e7eaea64faaf1ebec9b11 upstream
650 +
651 +deflate_mutex protects the globals lzo_mem and lzo_compress_buf. However,
652 +jffs2_lzo_compress() unlocks deflate_mutex _before_ it has copied out the
653 +compressed data from lzo_compress_buf. Correct this by moving the mutex
654 +unlock after the copy.
655 +
656 +In addition, document what deflate_mutex actually protects.
657 +
658 +Signed-off-by: Geert Uytterhoeven <Geert.Uytterhoeven@×××××××.com>
659 +Acked-by: Richard Purdie <rpurdie@××××××××××.com>
660 +Signed-off-by: Andrew Morton <akpm@××××××××××××××××.org>
661 +Signed-off-by: David Woodhouse <David.Woodhouse@×××××.com>
662 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
663 +
664 +---
665 + fs/jffs2/compr_lzo.c | 15 +++++++++------
666 + 1 file changed, 9 insertions(+), 6 deletions(-)
667 +
668 +--- a/fs/jffs2/compr_lzo.c
669 ++++ b/fs/jffs2/compr_lzo.c
670 +@@ -19,7 +19,7 @@
671 +
672 + static void *lzo_mem;
673 + static void *lzo_compress_buf;
674 +-static DEFINE_MUTEX(deflate_mutex);
675 ++static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */
676 +
677 + static void free_workspace(void)
678 + {
679 +@@ -49,18 +49,21 @@ static int jffs2_lzo_compress(unsigned c
680 +
681 + mutex_lock(&deflate_mutex);
682 + ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem);
683 +- mutex_unlock(&deflate_mutex);
684 +-
685 + if (ret != LZO_E_OK)
686 +- return -1;
687 ++ goto fail;
688 +
689 + if (compress_size > *dstlen)
690 +- return -1;
691 ++ goto fail;
692 +
693 + memcpy(cpage_out, lzo_compress_buf, compress_size);
694 +- *dstlen = compress_size;
695 ++ mutex_unlock(&deflate_mutex);
696 +
697 ++ *dstlen = compress_size;
698 + return 0;
699 ++
700 ++ fail:
701 ++ mutex_unlock(&deflate_mutex);
702 ++ return -1;
703 + }
704 +
705 + static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out,
706
707 Added: hardened/2.6/trunk/2.6.25/1410_md-linear-fix-a-division-by-zero-bug-for-very-small-arrays.patch
708 ===================================================================
709 --- hardened/2.6/trunk/2.6.25/1410_md-linear-fix-a-division-by-zero-bug-for-very-small-arrays.patch (rev 0)
710 +++ hardened/2.6/trunk/2.6.25/1410_md-linear-fix-a-division-by-zero-bug-for-very-small-arrays.patch 2008-12-03 00:31:56 UTC (rev 1414)
711 @@ -0,0 +1,51 @@
712 +Added-By: Gordon Malm <gengor@g.o>
713 +
714 +Note: Changed patch slightly to eliminate fuzz.
715 +
716 +---
717 +
718 +From jejb@××××××.org Tue Nov 11 09:47:32 2008
719 +From: Andre Noll <maan@×××××××××××.org>
720 +Date: Fri, 7 Nov 2008 00:07:46 GMT
721 +Subject: md: linear: Fix a division by zero bug for very small arrays.
722 +To: stable@××××××.org
723 +Message-ID: <200811070007.mA707k6d006270@×××××××××××.org>
724 +
725 +From: Andre Noll <maan@×××××××××××.org>
726 +
727 +commit f1cd14ae52985634d0389e934eba25b5ecf24565 upstream
728 +
729 +Date: Thu, 6 Nov 2008 19:41:24 +1100
730 +Subject: md: linear: Fix a division by zero bug for very small arrays.
731 +
732 +We currently oops with a divide error on starting a linear software
733 +raid array consisting of at least two very small (< 500K) devices.
734 +
735 +The bug is caused by the calculation of the hash table size which
736 +tries to compute sector_div(sz, base) with "base" being zero due to
737 +the small size of the component devices of the array.
738 +
739 +Fix this by requiring the hash spacing to be at least one which
740 +implies that also "base" is non-zero.
741 +
742 +This bug has existed since about 2.6.14.
743 +
744 +Signed-off-by: Andre Noll <maan@×××××××××××.org>
745 +Signed-off-by: NeilBrown <neilb@××××.de>
746 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
747 +
748 +---
749 + drivers/md/linear.c | 2 ++
750 + 1 file changed, 2 insertions(+)
751 +
752 +--- a/drivers/md/linear.c
753 ++++ b/drivers/md/linear.c
754 +@@ -157,6 +157,8 @@ static linear_conf_t *linear_conf(mddev_
755 +
756 + min_spacing = conf->array_size;
757 + sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *));
758 ++ if (min_spacing == 0)
759 ++ min_spacing = 1;
760 +
761 + /* min_spacing is the minimum spacing that will fit the hash
762 + * table in one PAGE. This may be much smaller than needed.
763
764 Added: hardened/2.6/trunk/2.6.25/1411_mmc-increase-sd-write-timeout-for-crappy-cards.patch
765 ===================================================================
766 --- hardened/2.6/trunk/2.6.25/1411_mmc-increase-sd-write-timeout-for-crappy-cards.patch (rev 0)
767 +++ hardened/2.6/trunk/2.6.25/1411_mmc-increase-sd-write-timeout-for-crappy-cards.patch 2008-12-03 00:31:56 UTC (rev 1414)
768 @@ -0,0 +1,42 @@
769 +Added-By: Gordon Malm <gengor@g.o>
770 +
771 +---
772 +
773 +From 493890e75d98810a3470b4aae23be628ee5e9667 Mon Sep 17 00:00:00 2001
774 +From: Pierre Ossman <drzeus@××××××.cx>
775 +Date: Sun, 26 Oct 2008 12:37:25 +0100
776 +Subject: mmc: increase SD write timeout for crappy cards
777 +
778 +From: Pierre Ossman <drzeus@××××××.cx>
779 +
780 +commit 493890e75d98810a3470b4aae23be628ee5e9667 upstream.
781 +
782 +It seems that some cards are slightly out of spec and occasionally
783 +will not be able to complete a write in the alloted 250 ms [1].
784 +Incease the timeout slightly to allow even these cards to function
785 +properly.
786 +
787 +[1] http://lkml.org/lkml/2008/9/23/390
788 +
789 +Signed-off-by: Pierre Ossman <drzeus@××××××.cx>
790 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
791 +
792 +---
793 + drivers/mmc/core/core.c | 6 +++++-
794 + 1 file changed, 5 insertions(+), 1 deletion(-)
795 +
796 +--- a/drivers/mmc/core/core.c
797 ++++ b/drivers/mmc/core/core.c
798 +@@ -280,7 +280,11 @@ void mmc_set_data_timeout(struct mmc_dat
799 + (card->host->ios.clock / 1000);
800 +
801 + if (data->flags & MMC_DATA_WRITE)
802 +- limit_us = 250000;
803 ++ /*
804 ++ * The limit is really 250 ms, but that is
805 ++ * insufficient for some crappy cards.
806 ++ */
807 ++ limit_us = 300000;
808 + else
809 + limit_us = 100000;
810 +
811
812 Added: hardened/2.6/trunk/2.6.25/1412_net-unix-fix-inflight-counting-bug-in-garbage-collector.patch
813 ===================================================================
814 --- hardened/2.6/trunk/2.6.25/1412_net-unix-fix-inflight-counting-bug-in-garbage-collector.patch (rev 0)
815 +++ hardened/2.6/trunk/2.6.25/1412_net-unix-fix-inflight-counting-bug-in-garbage-collector.patch 2008-12-03 00:31:56 UTC (rev 1414)
816 @@ -0,0 +1,213 @@
817 +Added-By: Gordon Malm <gengor@g.o>
818 +
819 +Note: Backported to ealier kernels. Original message included below.
820 +
821 +---
822 +
823 +From jejb@××××××.org Tue Nov 11 09:59:05 2008
824 +From: Miklos Szeredi <mszeredi@××××.cz>
825 +Date: Sun, 9 Nov 2008 19:50:02 GMT
826 +Subject: net: unix: fix inflight counting bug in garbage collector
827 +To: stable@××××××.org
828 +Message-ID: <200811091950.mA9Jo2iL003804@×××××××××××.org>
829 +
830 +From: Miklos Szeredi <mszeredi@××××.cz>
831 +
832 +commit 6209344f5a3795d34b7f2c0061f49802283b6bdd upstream
833 +
834 +Previously I assumed that the receive queues of candidates don't
835 +change during the GC. This is only half true, nothing can be received
836 +from the queues (see comment in unix_gc()), but buffers could be added
837 +through the other half of the socket pair, which may still have file
838 +descriptors referring to it.
839 +
840 +This can result in inc_inflight_move_tail() erronously increasing the
841 +"inflight" counter for a unix socket for which dec_inflight() wasn't
842 +previously called. This in turn can trigger the "BUG_ON(total_refs <
843 +inflight_refs)" in a later garbage collection run.
844 +
845 +Fix this by only manipulating the "inflight" counter for sockets which
846 +are candidates themselves. Duplicating the file references in
847 +unix_attach_fds() is also needed to prevent a socket becoming a
848 +candidate for GC while the skb that contains it is not yet queued.
849 +
850 +Reported-by: Andrea Bittau <a.bittau@×××××××××.uk>
851 +Signed-off-by: Miklos Szeredi <mszeredi@××××.cz>
852 +Signed-off-by: Linus Torvalds <torvalds@××××××××××××××××.org>
853 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
854 +
855 +---
856 + include/net/af_unix.h | 1 +
857 + net/unix/af_unix.c | 31 ++++++++++++++++++++++++-------
858 + net/unix/garbage.c | 49 +++++++++++++++++++++++++++++++++++++------------
859 + 3 files changed, 62 insertions(+), 19 deletions(-)
860 +
861 +--- a/include/net/af_unix.h
862 ++++ b/include/net/af_unix.h
863 +@@ -54,6 +54,7 @@ struct unix_sock {
864 + atomic_t inflight;
865 + spinlock_t lock;
866 + unsigned int gc_candidate : 1;
867 ++ unsigned int gc_maybe_cycle : 1;
868 + wait_queue_head_t peer_wait;
869 + };
870 + #define unix_sk(__sk) ((struct unix_sock *)__sk)
871 +--- a/net/unix/af_unix.c
872 ++++ b/net/unix/af_unix.c
873 +@@ -1302,14 +1302,23 @@ static void unix_destruct_fds(struct sk_
874 + sock_wfree(skb);
875 + }
876 +
877 +-static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
878 ++static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
879 + {
880 + int i;
881 ++
882 ++ /*
883 ++ * Need to duplicate file references for the sake of garbage
884 ++ * collection. Otherwise a socket in the fps might become a
885 ++ * candidate for GC while the skb is not yet queued.
886 ++ */
887 ++ UNIXCB(skb).fp = scm_fp_dup(scm->fp);
888 ++ if (!UNIXCB(skb).fp)
889 ++ return -ENOMEM;
890 ++
891 + for (i=scm->fp->count-1; i>=0; i--)
892 + unix_inflight(scm->fp->fp[i]);
893 +- UNIXCB(skb).fp = scm->fp;
894 + skb->destructor = unix_destruct_fds;
895 +- scm->fp = NULL;
896 ++ return 0;
897 + }
898 +
899 + /*
900 +@@ -1368,8 +1377,11 @@ static int unix_dgram_sendmsg(struct kio
901 + goto out;
902 +
903 + memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
904 +- if (siocb->scm->fp)
905 +- unix_attach_fds(siocb->scm, skb);
906 ++ if (siocb->scm->fp) {
907 ++ err = unix_attach_fds(siocb->scm, skb);
908 ++ if (err)
909 ++ goto out_free;
910 ++ }
911 + unix_get_secdata(siocb->scm, skb);
912 +
913 + skb_reset_transport_header(skb);
914 +@@ -1538,8 +1550,13 @@ static int unix_stream_sendmsg(struct ki
915 + size = min_t(int, size, skb_tailroom(skb));
916 +
917 + memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
918 +- if (siocb->scm->fp)
919 +- unix_attach_fds(siocb->scm, skb);
920 ++ if (siocb->scm->fp) {
921 ++ err = unix_attach_fds(siocb->scm, skb);
922 ++ if (err) {
923 ++ kfree_skb(skb);
924 ++ goto out_err;
925 ++ }
926 ++ }
927 +
928 + if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
929 + kfree_skb(skb);
930 +--- a/net/unix/garbage.c
931 ++++ b/net/unix/garbage.c
932 +@@ -186,8 +186,17 @@ static void scan_inflight(struct sock *x
933 + */
934 + struct sock *sk = unix_get_socket(*fp++);
935 + if (sk) {
936 +- hit = true;
937 +- func(unix_sk(sk));
938 ++ struct unix_sock *u = unix_sk(sk);
939 ++
940 ++ /*
941 ++ * Ignore non-candidates, they could
942 ++ * have been added to the queues after
943 ++ * starting the garbage collection
944 ++ */
945 ++ if (u->gc_candidate) {
946 ++ hit = true;
947 ++ func(u);
948 ++ }
949 + }
950 + }
951 + if (hit && hitlist != NULL) {
952 +@@ -249,11 +258,11 @@ static void inc_inflight_move_tail(struc
953 + {
954 + atomic_inc(&u->inflight);
955 + /*
956 +- * If this is still a candidate, move it to the end of the
957 +- * list, so that it's checked even if it was already passed
958 +- * over
959 ++ * If this still might be part of a cycle, move it to the end
960 ++ * of the list, so that it's checked even if it was already
961 ++ * passed over
962 + */
963 +- if (u->gc_candidate)
964 ++ if (u->gc_maybe_cycle)
965 + list_move_tail(&u->link, &gc_candidates);
966 + }
967 +
968 +@@ -267,6 +276,7 @@ void unix_gc(void)
969 + struct unix_sock *next;
970 + struct sk_buff_head hitlist;
971 + struct list_head cursor;
972 ++ LIST_HEAD(not_cycle_list);
973 +
974 + spin_lock(&unix_gc_lock);
975 +
976 +@@ -282,10 +292,14 @@ void unix_gc(void)
977 + *
978 + * Holding unix_gc_lock will protect these candidates from
979 + * being detached, and hence from gaining an external
980 +- * reference. This also means, that since there are no
981 +- * possible receivers, the receive queues of these sockets are
982 +- * static during the GC, even though the dequeue is done
983 +- * before the detach without atomicity guarantees.
984 ++ * reference. Since there are no possible receivers, all
985 ++ * buffers currently on the candidates' queues stay there
986 ++ * during the garbage collection.
987 ++ *
988 ++ * We also know that no new candidate can be added onto the
989 ++ * receive queues. Other, non candidate sockets _can_ be
990 ++ * added to queue, so we must make sure only to touch
991 ++ * candidates.
992 + */
993 + list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
994 + int total_refs;
995 +@@ -299,6 +313,7 @@ void unix_gc(void)
996 + if (total_refs == inflight_refs) {
997 + list_move_tail(&u->link, &gc_candidates);
998 + u->gc_candidate = 1;
999 ++ u->gc_maybe_cycle = 1;
1000 + }
1001 + }
1002 +
1003 +@@ -325,14 +340,24 @@ void unix_gc(void)
1004 + list_move(&cursor, &u->link);
1005 +
1006 + if (atomic_read(&u->inflight) > 0) {
1007 +- list_move_tail(&u->link, &gc_inflight_list);
1008 +- u->gc_candidate = 0;
1009 ++ list_move_tail(&u->link, &not_cycle_list);
1010 ++ u->gc_maybe_cycle = 0;
1011 + scan_children(&u->sk, inc_inflight_move_tail, NULL);
1012 + }
1013 + }
1014 + list_del(&cursor);
1015 +
1016 + /*
1017 ++ * not_cycle_list contains those sockets which do not make up a
1018 ++ * cycle. Restore these to the inflight list.
1019 ++ */
1020 ++ while (!list_empty(&not_cycle_list)) {
1021 ++ u = list_entry(not_cycle_list.next, struct unix_sock, link);
1022 ++ u->gc_candidate = 0;
1023 ++ list_move_tail(&u->link, &gc_inflight_list);
1024 ++ }
1025 ++
1026 ++ /*
1027 + * Now gc_candidates contains only garbage. Restore original
1028 + * inflight counters for these as well, and remove the skbuffs
1029 + * which are creating the cycle(s).
1030
1031 Added: hardened/2.6/trunk/2.6.25/1413_block-fix-nr_phys_segments-miscalculation-bug.patch
1032 ===================================================================
1033 --- hardened/2.6/trunk/2.6.25/1413_block-fix-nr_phys_segments-miscalculation-bug.patch (rev 0)
1034 +++ hardened/2.6/trunk/2.6.25/1413_block-fix-nr_phys_segments-miscalculation-bug.patch 2008-12-03 00:31:56 UTC (rev 1414)
1035 @@ -0,0 +1,126 @@
1036 +Added-By: Gordon Malm <gengor@g.o>
1037 +
1038 +---
1039 +
1040 +From knikanth@××××.de Thu Nov 13 14:07:47 2008
1041 +From: FUJITA Tomonori <fujita.tomonori@××××××××××.jp>
1042 +Date: Wed, 12 Nov 2008 11:33:54 +0530
1043 +Subject: block: fix nr_phys_segments miscalculation bug
1044 +To: Greg KH <greg@×××××.com>
1045 +Cc: stable@××××××.org, FUJITA Tomonori <fujita.tomonori@××××××××××.jp>
1046 +Message-ID: <200811121133.55404.knikanth@××××.de>
1047 +Content-Disposition: inline
1048 +
1049 +From: FUJITA Tomonori <fujita.tomonori@××××××××××.jp>
1050 +
1051 +commit 8677142710516d986d932d6f1fba7be8382c1fec upstream
1052 +backported by Nikanth Karthikesan <knikanth@××××.de> to the 2.6.27.y tree.
1053 +
1054 +block: fix nr_phys_segments miscalculation bug
1055 +
1056 +This fixes the bug reported by Nikanth Karthikesan <knikanth@××××.de>:
1057 +
1058 +http://lkml.org/lkml/2008/10/2/203
1059 +
1060 +The root cause of the bug is that blk_phys_contig_segment
1061 +miscalculates q->max_segment_size.
1062 +
1063 +blk_phys_contig_segment checks:
1064 +
1065 +req->biotail->bi_size + next_req->bio->bi_size > q->max_segment_size
1066 +
1067 +But blk_recalc_rq_segments might expect that req->biotail and the
1068 +previous bio in the req are supposed be merged into one
1069 +segment. blk_recalc_rq_segments might also expect that next_req->bio
1070 +and the next bio in the next_req are supposed be merged into one
1071 +segment. In such case, we merge two requests that can't be merged
1072 +here. Later, blk_rq_map_sg gives more segments than it should.
1073 +
1074 +We need to keep track of segment size in blk_recalc_rq_segments and
1075 +use it to see if two requests can be merged. This patch implements it
1076 +in the similar way that we used to do for hw merging (virtual
1077 +merging).
1078 +
1079 +Signed-off-by: FUJITA Tomonori <fujita.tomonori@××××××××××.jp>
1080 +Signed-off-by: Jens Axboe <jens.axboe@××××××.com>
1081 +Cc: Nikanth Karthikesan <knikanth@××××.de>
1082 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1083 +
1084 +---
1085 + block/blk-merge.c | 19 +++++++++++++++++--
1086 + include/linux/bio.h | 7 +++++++
1087 + 2 files changed, 24 insertions(+), 2 deletions(-)
1088 +
1089 +--- a/block/blk-merge.c
1090 ++++ b/block/blk-merge.c
1091 +@@ -95,6 +95,9 @@ new_hw_segment:
1092 + nr_hw_segs++;
1093 + }
1094 +
1095 ++ if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
1096 ++ rq->bio->bi_seg_front_size = seg_size;
1097 ++
1098 + nr_phys_segs++;
1099 + bvprv = bv;
1100 + seg_size = bv->bv_len;
1101 +@@ -106,6 +109,10 @@ new_hw_segment:
1102 + rq->bio->bi_hw_front_size = hw_seg_size;
1103 + if (hw_seg_size > rq->biotail->bi_hw_back_size)
1104 + rq->biotail->bi_hw_back_size = hw_seg_size;
1105 ++ if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
1106 ++ rq->bio->bi_seg_front_size = seg_size;
1107 ++ if (seg_size > rq->biotail->bi_seg_back_size)
1108 ++ rq->biotail->bi_seg_back_size = seg_size;
1109 + rq->nr_phys_segments = nr_phys_segs;
1110 + rq->nr_hw_segments = nr_hw_segs;
1111 + }
1112 +@@ -133,7 +140,8 @@ static int blk_phys_contig_segment(struc
1113 +
1114 + if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
1115 + return 0;
1116 +- if (bio->bi_size + nxt->bi_size > q->max_segment_size)
1117 ++ if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
1118 ++ q->max_segment_size)
1119 + return 0;
1120 +
1121 + /*
1122 +@@ -377,6 +385,8 @@ static int ll_merge_requests_fn(struct r
1123 + {
1124 + int total_phys_segments;
1125 + int total_hw_segments;
1126 ++ unsigned int seg_size =
1127 ++ req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
1128 +
1129 + /*
1130 + * First check if the either of the requests are re-queued
1131 +@@ -392,8 +402,13 @@ static int ll_merge_requests_fn(struct r
1132 + return 0;
1133 +
1134 + total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
1135 +- if (blk_phys_contig_segment(q, req->biotail, next->bio))
1136 ++ if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
1137 ++ if (req->nr_phys_segments == 1)
1138 ++ req->bio->bi_seg_front_size = seg_size;
1139 ++ if (next->nr_phys_segments == 1)
1140 ++ next->biotail->bi_seg_back_size = seg_size;
1141 + total_phys_segments--;
1142 ++ }
1143 +
1144 + if (total_phys_segments > q->max_phys_segments)
1145 + return 0;
1146 +--- a/include/linux/bio.h
1147 ++++ b/include/linux/bio.h
1148 +@@ -98,6 +98,13 @@ struct bio {
1149 + unsigned int bi_size; /* residual I/O count */
1150 +
1151 + /*
1152 ++ * To keep track of the max segment size, we account for the
1153 ++ * sizes of the first and last mergeable segments in this bio.
1154 ++ */
1155 ++ unsigned int bi_seg_front_size;
1156 ++ unsigned int bi_seg_back_size;
1157 ++
1158 ++ /*
1159 + * To keep track of the max hw size, we account for the
1160 + * sizes of the first and last virtually mergeable segments
1161 + * in this bio
1162
1163 Added: hardened/2.6/trunk/2.6.25/1414_dm-raid1-flush-workqueue-before-destruction.patch
1164 ===================================================================
1165 --- hardened/2.6/trunk/2.6.25/1414_dm-raid1-flush-workqueue-before-destruction.patch (rev 0)
1166 +++ hardened/2.6/trunk/2.6.25/1414_dm-raid1-flush-workqueue-before-destruction.patch 2008-12-03 00:31:56 UTC (rev 1414)
1167 @@ -0,0 +1,36 @@
1168 +Added-By: Gordon Malm <gengor@g.o>
1169 +
1170 +Note: Backported to 2.6.25. Original message included below.
1171 +
1172 +---
1173 +
1174 +From 18776c7316545482a02bfaa2629a2aa1afc48357 Mon Sep 17 00:00:00 2001
1175 +From: Mikulas Patocka <mpatocka@××××××.com>
1176 +Date: Thu, 13 Nov 2008 23:38:52 +0000
1177 +Subject: dm raid1: flush workqueue before destruction
1178 +
1179 +From: Mikulas Patocka <mpatocka@××××××.com>
1180 +
1181 +commit 18776c7316545482a02bfaa2629a2aa1afc48357 upstream.
1182 +
1183 +We queue work on keventd queue --- so this queue must be flushed in the
1184 +destructor. Otherwise, keventd could access mirror_set after it was freed.
1185 +
1186 +Signed-off-by: Mikulas Patocka <mpatocka@××××××.com>
1187 +Signed-off-by: Alasdair G Kergon <agk@××××××.com>
1188 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1189 +
1190 +---
1191 + drivers/md/dm-raid1.c | 1 +
1192 + 1 file changed, 1 insertion(+)
1193 +
1194 +--- a/drivers/md/dm-raid1.c
1195 ++++ b/drivers/md/dm-raid1.c
1196 +@@ -1590,6 +1590,7 @@ static void mirror_dtr(struct dm_target
1197 + struct mirror_set *ms = (struct mirror_set *) ti->private;
1198 +
1199 + flush_workqueue(ms->kmirrord_wq);
1200 ++ flush_scheduled_work();
1201 + kcopyd_client_destroy(ms->kcopyd_client);
1202 + destroy_workqueue(ms->kmirrord_wq);
1203 + free_context(ms, ti, ms->nr_mirrors);
1204
1205 Added: hardened/2.6/trunk/2.6.25/1415_net-fix-proc-net-snmp-as-memory-corruptor.patch
1206 ===================================================================
1207 --- hardened/2.6/trunk/2.6.25/1415_net-fix-proc-net-snmp-as-memory-corruptor.patch (rev 0)
1208 +++ hardened/2.6/trunk/2.6.25/1415_net-fix-proc-net-snmp-as-memory-corruptor.patch 2008-12-03 00:31:56 UTC (rev 1414)
1209 @@ -0,0 +1,104 @@
1210 +Added-By: Gordon Malm <gengor@g.o>
1211 +
1212 +Note: Backported to earlier kernels. Original message included below.
1213 +
1214 +---
1215 +
1216 +From b971e7ac834e9f4bda96d5a96ae9abccd01c1dd8 Mon Sep 17 00:00:00 2001
1217 +From: Eric Dumazet <dada1@×××××××××.com>
1218 +Date: Mon, 10 Nov 2008 21:43:08 -0800
1219 +Subject: net: fix /proc/net/snmp as memory corruptor
1220 +
1221 +From: Eric Dumazet <dada1@×××××××××.com>
1222 +
1223 +commit b971e7ac834e9f4bda96d5a96ae9abccd01c1dd8 upstream.
1224 +
1225 +icmpmsg_put() can happily corrupt kernel memory, using a static
1226 +table and forgetting to reset an array index in a loop.
1227 +
1228 +Remove the static array since its not safe without proper locking.
1229 +
1230 +Signed-off-by: Alexey Dobriyan <adobriyan@×××××.com>
1231 +Signed-off-by: Eric Dumazet <dada1@×××××××××.com>
1232 +Signed-off-by: David S. Miller <davem@×××××××××.net>
1233 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1234 +
1235 +---
1236 + net/ipv4/proc.c | 58 ++++++++++++++++++++++++++++----------------------------
1237 + 1 file changed, 30 insertions(+), 28 deletions(-)
1238 +
1239 +--- a/net/ipv4/proc.c
1240 ++++ b/net/ipv4/proc.c
1241 +@@ -262,42 +262,44 @@ static const struct snmp_mib snmp4_net_l
1242 + SNMP_MIB_SENTINEL
1243 + };
1244 +
1245 +-static void icmpmsg_put(struct seq_file *seq)
1246 ++static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals,
1247 ++ unsigned short *type, int count)
1248 + {
1249 +-#define PERLINE 16
1250 +-
1251 +- int j, i, count;
1252 +- static int out[PERLINE];
1253 +-
1254 +- count = 0;
1255 +- for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
1256 +-
1257 +- if (snmp_fold_field((void **) icmpmsg_statistics, i))
1258 +- out[count++] = i;
1259 +- if (count < PERLINE)
1260 +- continue;
1261 ++ int j;
1262 +
1263 +- seq_printf(seq, "\nIcmpMsg:");
1264 +- for (j = 0; j < PERLINE; ++j)
1265 +- seq_printf(seq, " %sType%u", i & 0x100 ? "Out" : "In",
1266 +- i & 0xff);
1267 +- seq_printf(seq, "\nIcmpMsg: ");
1268 +- for (j = 0; j < PERLINE; ++j)
1269 +- seq_printf(seq, " %lu",
1270 +- snmp_fold_field((void **) icmpmsg_statistics,
1271 +- out[j]));
1272 +- seq_putc(seq, '\n');
1273 +- }
1274 + if (count) {
1275 + seq_printf(seq, "\nIcmpMsg:");
1276 + for (j = 0; j < count; ++j)
1277 +- seq_printf(seq, " %sType%u", out[j] & 0x100 ? "Out" :
1278 +- "In", out[j] & 0xff);
1279 ++ seq_printf(seq, " %sType%u",
1280 ++ type[j] & 0x100 ? "Out" : "In",
1281 ++ type[j] & 0xff);
1282 + seq_printf(seq, "\nIcmpMsg:");
1283 + for (j = 0; j < count; ++j)
1284 +- seq_printf(seq, " %lu", snmp_fold_field((void **)
1285 +- icmpmsg_statistics, out[j]));
1286 ++ seq_printf(seq, " %lu", vals[j]);
1287 ++ }
1288 ++}
1289 ++
1290 ++static void icmpmsg_put(struct seq_file *seq)
1291 ++{
1292 ++#define PERLINE 16
1293 ++
1294 ++ int i, count;
1295 ++ unsigned short type[PERLINE];
1296 ++ unsigned long vals[PERLINE], val;
1297 ++
1298 ++ count = 0;
1299 ++ for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
1300 ++ val = snmp_fold_field((void **) icmpmsg_statistics, i);
1301 ++ if (val) {
1302 ++ type[count] = i;
1303 ++ vals[count++] = val;
1304 ++ }
1305 ++ if (count == PERLINE) {
1306 ++ icmpmsg_put_line(seq, vals, type, count);
1307 ++ count = 0;
1308 ++ }
1309 + }
1310 ++ icmpmsg_put_line(seq, vals, type, count);
1311 +
1312 + #undef PERLINE
1313 + }
1314
1315 Added: hardened/2.6/trunk/2.6.25/1416_touch_mnt_namespace-when-the-mount-flags-change.patch
1316 ===================================================================
1317 --- hardened/2.6/trunk/2.6.25/1416_touch_mnt_namespace-when-the-mount-flags-change.patch (rev 0)
1318 +++ hardened/2.6/trunk/2.6.25/1416_touch_mnt_namespace-when-the-mount-flags-change.patch 2008-12-03 00:31:56 UTC (rev 1414)
1319 @@ -0,0 +1,43 @@
1320 +Added-By: Gordon Malm <gengor@g.o>
1321 +
1322 +---
1323 +
1324 +From 0e55a7cca4b66f625d67b292f80b6a976e77c51b Mon Sep 17 00:00:00 2001
1325 +From: Dan Williams <dan.j.williams@×××××.com>
1326 +Date: Fri, 26 Sep 2008 19:01:20 -0700
1327 +Subject: touch_mnt_namespace when the mount flags change
1328 +
1329 +From: Dan Williams <dan.j.williams@×××××.com>
1330 +
1331 +commit 0e55a7cca4b66f625d67b292f80b6a976e77c51b upstream
1332 +
1333 +Daemons that need to be launched while the rootfs is read-only can now
1334 +poll /proc/mounts to be notified when their O_RDWR requests may no
1335 +longer end in EROFS.
1336 +
1337 +Cc: Kay Sievers <kay.sievers@××××.org>
1338 +Cc: Neil Brown <neilb@××××.de>
1339 +Signed-off-by: Dan Williams <dan.j.williams@×××××.com>
1340 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1341 +
1342 +---
1343 + fs/namespace.c | 7 ++++++-
1344 + 1 file changed, 6 insertions(+), 1 deletion(-)
1345 +
1346 +--- a/fs/namespace.c
1347 ++++ b/fs/namespace.c
1348 +@@ -1553,8 +1553,13 @@ static noinline int do_remount(struct na
1349 + if (!err)
1350 + nd->path.mnt->mnt_flags = mnt_flags;
1351 + up_write(&sb->s_umount);
1352 +- if (!err)
1353 ++ if (!err) {
1354 + security_sb_post_remount(nd->path.mnt, flags, data);
1355 ++
1356 ++ spin_lock(&vfsmount_lock);
1357 ++ touch_mnt_namespace(nd->path.mnt->mnt_ns);
1358 ++ spin_unlock(&vfsmount_lock);
1359 ++ }
1360 + return err;
1361 + }
1362 +
1363
1364 Added: hardened/2.6/trunk/2.6.25/1417_usb-ehci-remove-obsolete-workaround-for-bogus-IRQs.patch
1365 ===================================================================
1366 --- hardened/2.6/trunk/2.6.25/1417_usb-ehci-remove-obsolete-workaround-for-bogus-IRQs.patch (rev 0)
1367 +++ hardened/2.6/trunk/2.6.25/1417_usb-ehci-remove-obsolete-workaround-for-bogus-IRQs.patch 2008-12-03 00:31:56 UTC (rev 1414)
1368 @@ -0,0 +1,54 @@
1369 +Added-By: Gordon Malm <gengor@g.o>
1370 +
1371 +---
1372 +
1373 +From: David Brownell <david-b@×××××××.net>
1374 +Date: Thu, 6 Mar 2008 07:37:52 +0000 (-0800)
1375 +Subject: USB: ehci: remove obsolete workaround for bogus IRQs
1376 +X-Git-Tag: v2.6.26-rc1~1061^2~74
1377 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=d1b1842c393cf322712b669ec887397b89ed2312
1378 +
1379 +USB: ehci: remove obsolete workaround for bogus IRQs
1380 +
1381 +It was pointed out that we found and fixed the cause of the "bogus"
1382 +fatal IRQ reports some time ago ... this patch removes the code
1383 +which was working around that bug ("status" got clobbered), and a
1384 +comment which needlessly confused folk reading this code.
1385 +
1386 +This also includes a minor cleanup to the code which fixed that bug.
1387 +
1388 +Signed-off-by: David Brownell <dbrownell@×××××××××××××××××.net>
1389 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1390 +---
1391 +
1392 +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
1393 +index 40f7391..8c3e860 100644
1394 +--- a/drivers/usb/host/ehci-hcd.c
1395 ++++ b/drivers/usb/host/ehci-hcd.c
1396 +@@ -686,6 +686,8 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
1397 + /* remote wakeup [4.3.1] */
1398 + if (status & STS_PCD) {
1399 + unsigned i = HCS_N_PORTS (ehci->hcs_params);
1400 ++
1401 ++ /* kick root hub later */
1402 + pcd_status = status;
1403 +
1404 + /* resume root hub? */
1405 +@@ -714,8 +716,6 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
1406 +
1407 + /* PCI errors [4.15.2.4] */
1408 + if (unlikely ((status & STS_FATAL) != 0)) {
1409 +- /* bogus "fatal" IRQs appear on some chips... why? */
1410 +- status = ehci_readl(ehci, &ehci->regs->status);
1411 + dbg_cmd (ehci, "fatal", ehci_readl(ehci,
1412 + &ehci->regs->command));
1413 + dbg_status (ehci, "fatal", status);
1414 +@@ -734,7 +734,7 @@ dead:
1415 + if (bh)
1416 + ehci_work (ehci);
1417 + spin_unlock (&ehci->lock);
1418 +- if (pcd_status & STS_PCD)
1419 ++ if (pcd_status)
1420 + usb_hcd_poll_rh_status(hcd);
1421 + return IRQ_HANDLED;
1422 + }
1423
1424 Added: hardened/2.6/trunk/2.6.25/1418_usb-ehci-fix-handling-of-dead-controllers.patch
1425 ===================================================================
1426 --- hardened/2.6/trunk/2.6.25/1418_usb-ehci-fix-handling-of-dead-controllers.patch (rev 0)
1427 +++ hardened/2.6/trunk/2.6.25/1418_usb-ehci-fix-handling-of-dead-controllers.patch 2008-12-03 00:31:56 UTC (rev 1414)
1428 @@ -0,0 +1,93 @@
1429 +Added-By: Gordon Malm <gengor@g.o>
1430 +
1431 +---
1432 +
1433 +From 67b2e029743a52670d77864723b4d0d40f7733b5 Mon Sep 17 00:00:00 2001
1434 +From: Alan Stern <stern@×××××××××××××××.edu>
1435 +Date: Wed, 12 Nov 2008 17:04:53 -0500
1436 +Subject: USB: EHCI: fix handling of dead controllers
1437 +
1438 +From: Alan Stern <stern@×××××××××××××××.edu>
1439 +
1440 +commit 67b2e029743a52670d77864723b4d0d40f7733b5 upstream.
1441 +
1442 +This patch (as1165) makes a few small changes in the logic used by
1443 +ehci-hcd when it encounters a controller error:
1444 +
1445 + Instead of printing out the masked status, it prints the
1446 + original status as read directly from the hardware.
1447 +
1448 + It doesn't check for the STS_HALT status bit before taking
1449 + action. The mere fact that the STS_FATAL bit is set means
1450 + that something bad has happened and the controller needs to
1451 + be reset. With the old code this test could never succeed
1452 + because the STS_HALT bit was masked out from the status.
1453 +
1454 +I anticipate that this will prevent the occasional "irq X: nobody cared"
1455 +problem people encounter when their EHCI controllers die.
1456 +
1457 +Signed-off-by: Alan Stern <stern@×××××××××××××××.edu>
1458 +Cc: David Brownell <david-b@×××××××.net>
1459 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1460 +
1461 +---
1462 + drivers/usb/host/ehci-hcd.c | 25 ++++++++++++-------------
1463 + 1 file changed, 12 insertions(+), 13 deletions(-)
1464 +
1465 +--- a/drivers/usb/host/ehci-hcd.c
1466 ++++ b/drivers/usb/host/ehci-hcd.c
1467 +@@ -643,7 +643,7 @@ static int ehci_run (struct usb_hcd *hcd
1468 + static irqreturn_t ehci_irq (struct usb_hcd *hcd)
1469 + {
1470 + struct ehci_hcd *ehci = hcd_to_ehci (hcd);
1471 +- u32 status, pcd_status = 0, cmd;
1472 ++ u32 status, masked_status, pcd_status = 0, cmd;
1473 + int bh;
1474 +
1475 + spin_lock (&ehci->lock);
1476 +@@ -656,14 +656,14 @@ static irqreturn_t ehci_irq (struct usb_
1477 + goto dead;
1478 + }
1479 +
1480 +- status &= INTR_MASK;
1481 +- if (!status) { /* irq sharing? */
1482 ++ masked_status = status & INTR_MASK;
1483 ++ if (!masked_status) { /* irq sharing? */
1484 + spin_unlock(&ehci->lock);
1485 + return IRQ_NONE;
1486 + }
1487 +
1488 + /* clear (just) interrupts */
1489 +- ehci_writel(ehci, status, &ehci->regs->status);
1490 ++ ehci_writel(ehci, masked_status, &ehci->regs->status);
1491 + cmd = ehci_readl(ehci, &ehci->regs->command);
1492 + bh = 0;
1493 +
1494 +@@ -731,19 +731,18 @@ static irqreturn_t ehci_irq (struct usb_
1495 +
1496 + /* PCI errors [4.15.2.4] */
1497 + if (unlikely ((status & STS_FATAL) != 0)) {
1498 ++ ehci_err(ehci, "fatal error\n");
1499 + dbg_cmd (ehci, "fatal", ehci_readl(ehci,
1500 + &ehci->regs->command));
1501 + dbg_status (ehci, "fatal", status);
1502 +- if (status & STS_HALT) {
1503 +- ehci_err (ehci, "fatal error\n");
1504 ++ ehci_halt(ehci);
1505 + dead:
1506 +- ehci_reset (ehci);
1507 +- ehci_writel(ehci, 0, &ehci->regs->configured_flag);
1508 +- /* generic layer kills/unlinks all urbs, then
1509 +- * uses ehci_stop to clean up the rest
1510 +- */
1511 +- bh = 1;
1512 +- }
1513 ++ ehci_reset(ehci);
1514 ++ ehci_writel(ehci, 0, &ehci->regs->configured_flag);
1515 ++ /* generic layer kills/unlinks all urbs, then
1516 ++ * uses ehci_stop to clean up the rest
1517 ++ */
1518 ++ bh = 1;
1519 + }
1520 +
1521 + if (bh)
1522
1523 Added: hardened/2.6/trunk/2.6.25/1419_usb-don-t-register-endpoints-for-interfaces-that-are-going-away.patch
1524 ===================================================================
1525 --- hardened/2.6/trunk/2.6.25/1419_usb-don-t-register-endpoints-for-interfaces-that-are-going-away.patch (rev 0)
1526 +++ hardened/2.6/trunk/2.6.25/1419_usb-don-t-register-endpoints-for-interfaces-that-are-going-away.patch 2008-12-03 00:31:56 UTC (rev 1414)
1527 @@ -0,0 +1,75 @@
1528 +Added-By: Gordon Malm <gengor@g.o>
1529 +
1530 +Note: Backported to kernel 2.6.25. Original message included below.
1531 +
1532 +---
1533 +
1534 +From 352d026338378b1f13f044e33c1047da6e470056 Mon Sep 17 00:00:00 2001
1535 +From: Alan Stern <stern@×××××××××××××××.edu>
1536 +Date: Wed, 29 Oct 2008 15:16:58 -0400
1537 +Subject: USB: don't register endpoints for interfaces that are going away
1538 +
1539 +From: Alan Stern <stern@×××××××××××××××.edu>
1540 +
1541 +commit 352d026338378b1f13f044e33c1047da6e470056 upstream.
1542 +
1543 +This patch (as1155) fixes a bug in usbcore. When interfaces are
1544 +deleted, either because the device was disconnected or because of a
1545 +configuration change, the extra attribute files and child endpoint
1546 +devices may get left behind. This is because the core removes them
1547 +before calling device_del(). But during device_del(), after the
1548 +driver is unbound the core will reinstall altsetting 0 and recreate
1549 +those extra attributes and children.
1550 +
1551 +The patch prevents this by adding a flag to record when the interface
1552 +is in the midst of being unregistered. When the flag is set, the
1553 +attribute files and child devices will not be created.
1554 +
1555 +Signed-off-by: Alan Stern <stern@×××××××××××××××.edu>
1556 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1557 +
1558 +---
1559 + drivers/usb/core/message.c | 1 +
1560 + drivers/usb/core/sysfs.c | 2 +-
1561 + include/linux/usb.h | 2 ++
1562 + 3 files changed, 4 insertions(+), 1 deletion(-)
1563 +
1564 +--- a/drivers/usb/core/message.c
1565 ++++ b/drivers/usb/core/message.c
1566 +@@ -1089,6 +1089,7 @@ void usb_disable_device(struct usb_devic
1567 + continue;
1568 + dev_dbg(&dev->dev, "unregistering interface %s\n",
1569 + interface->dev.bus_id);
1570 ++ interface->unregistering = 1;
1571 + usb_remove_sysfs_intf_files(interface);
1572 + device_del(&interface->dev);
1573 + }
1574 +--- a/drivers/usb/core/sysfs.c
1575 ++++ b/drivers/usb/core/sysfs.c
1576 +@@ -784,7 +784,7 @@ int usb_create_sysfs_intf_files(struct u
1577 + struct usb_host_interface *alt = intf->cur_altsetting;
1578 + int retval;
1579 +
1580 +- if (intf->sysfs_files_created)
1581 ++ if (intf->sysfs_files_created || intf->unregistering)
1582 + return 0;
1583 + retval = sysfs_create_group(&dev->kobj, &intf_attr_grp);
1584 + if (retval)
1585 +--- a/include/linux/usb.h
1586 ++++ b/include/linux/usb.h
1587 +@@ -107,6 +107,7 @@ enum usb_interface_condition {
1588 + * (in probe()), bound to a driver, or unbinding (in disconnect())
1589 + * @is_active: flag set when the interface is bound and not suspended.
1590 + * @sysfs_files_created: sysfs attributes exist
1591 ++ * @unregistering: flag set when the interface is being unregistered
1592 + * @needs_remote_wakeup: flag set when the driver requires remote-wakeup
1593 + * capability during autosuspend.
1594 + * @dev: driver model's view of this device
1595 +@@ -158,6 +159,7 @@ struct usb_interface {
1596 + enum usb_interface_condition condition; /* state of binding */
1597 + unsigned is_active:1; /* the interface is not suspended */
1598 + unsigned sysfs_files_created:1; /* the sysfs attributes exist */
1599 ++ unsigned unregistering:1; /* unregistration is in progress */
1600 + unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
1601 +
1602 + struct device dev; /* interface specific device info */
1603
1604 Added: hardened/2.6/trunk/2.6.25/1420_usb-ehci-fix-divide-by-zero-bug.patch
1605 ===================================================================
1606 --- hardened/2.6/trunk/2.6.25/1420_usb-ehci-fix-divide-by-zero-bug.patch (rev 0)
1607 +++ hardened/2.6/trunk/2.6.25/1420_usb-ehci-fix-divide-by-zero-bug.patch 2008-12-03 00:31:56 UTC (rev 1414)
1608 @@ -0,0 +1,46 @@
1609 +Added-By: Gordon Malm <gengor@g.o>
1610 +
1611 +---
1612 +
1613 +From 372dd6e8ed924e876f3beb598721e813ad7fa323 Mon Sep 17 00:00:00 2001
1614 +From: Alan Stern <stern@×××××××××××××××.edu>
1615 +Date: Wed, 12 Nov 2008 17:02:57 -0500
1616 +Subject: USB: EHCI: fix divide-by-zero bug
1617 +
1618 +From: Alan Stern <stern@×××××××××××××××.edu>
1619 +
1620 +commit 372dd6e8ed924e876f3beb598721e813ad7fa323 upstream.
1621 +
1622 +This patch (as1164) fixes a bug in the EHCI scheduler. The interval
1623 +value it uses is already in linear format, not logarithmically coded.
1624 +The existing code can sometimes crash the system by trying to divide
1625 +by zero.
1626 +
1627 +Signed-off-by: Alan Stern <stern@×××××××××××××××.edu>
1628 +Cc: David Brownell <david-b@×××××××.net>
1629 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1630 +
1631 +---
1632 + drivers/usb/host/ehci-sched.c | 4 ++--
1633 + 1 file changed, 2 insertions(+), 2 deletions(-)
1634 +
1635 +--- a/drivers/usb/host/ehci-sched.c
1636 ++++ b/drivers/usb/host/ehci-sched.c
1637 +@@ -918,7 +918,7 @@ iso_stream_init (
1638 + */
1639 + stream->usecs = HS_USECS_ISO (maxp);
1640 + bandwidth = stream->usecs * 8;
1641 +- bandwidth /= 1 << (interval - 1);
1642 ++ bandwidth /= interval;
1643 +
1644 + } else {
1645 + u32 addr;
1646 +@@ -951,7 +951,7 @@ iso_stream_init (
1647 + } else
1648 + stream->raw_mask = smask_out [hs_transfers - 1];
1649 + bandwidth = stream->usecs + stream->c_usecs;
1650 +- bandwidth /= 1 << (interval + 2);
1651 ++ bandwidth /= interval << 3;
1652 +
1653 + /* stream->splits gets created from raw_mask later */
1654 + stream->address = cpu_to_hc32(ehci, addr);
1655
1656 Added: hardened/2.6/trunk/2.6.25/1421_usb-fix-ps3-usb-shutdown-problems.patch
1657 ===================================================================
1658 --- hardened/2.6/trunk/2.6.25/1421_usb-fix-ps3-usb-shutdown-problems.patch (rev 0)
1659 +++ hardened/2.6/trunk/2.6.25/1421_usb-fix-ps3-usb-shutdown-problems.patch 2008-12-03 00:31:56 UTC (rev 1414)
1660 @@ -0,0 +1,64 @@
1661 +Added-By: Gordon Malm <gengor@g.o>
1662 +
1663 +---
1664 +
1665 +From ddcb01ff9bf49c4dbbb058423559f7bc90b89374 Mon Sep 17 00:00:00 2001
1666 +From: Geoff Levand <geoffrey.levand@×××××××.com>
1667 +Date: Fri, 31 Oct 2008 13:52:54 -0700
1668 +Subject: USB: Fix PS3 USB shutdown problems
1669 +
1670 +From: Geoff Levand <geoffrey.levand@×××××××.com>
1671 +
1672 +commit ddcb01ff9bf49c4dbbb058423559f7bc90b89374 upstream.
1673 +
1674 +Add ehci_shutdown() or ohci_shutdown() calls to the USB
1675 +PS3 bus glue. ehci_shutdown() and ohci_shutdown() do some
1676 +controller specific cleanups not done by usb_remove_hcd().
1677 +
1678 +Fixes errors on shutdown or reboot similar to these:
1679 +
1680 + ps3-ehci-driver sb_07: HC died; cleaning up
1681 + irq 51: nobody cared (try booting with the "irqpoll" option)
1682 +
1683 +Related bugzilla reports:
1684 +
1685 + http://bugzilla.kernel.org/show_bug.cgi?id=11819
1686 + http://bugzilla.terrasoftsolutions.com/show_bug.cgi?id=317
1687 +
1688 +Signed-off-by: Geoff Levand <geoffrey.levand@×××××××.com>
1689 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1690 +
1691 +---
1692 + drivers/usb/host/ehci-ps3.c | 1 +
1693 + drivers/usb/host/ohci-ps3.c | 3 ++-
1694 + 2 files changed, 3 insertions(+), 1 deletion(-)
1695 +
1696 +--- a/drivers/usb/host/ehci-ps3.c
1697 ++++ b/drivers/usb/host/ehci-ps3.c
1698 +@@ -205,6 +205,7 @@ static int ps3_ehci_remove(struct ps3_sy
1699 +
1700 + tmp = hcd->irq;
1701 +
1702 ++ ehci_shutdown(hcd);
1703 + usb_remove_hcd(hcd);
1704 +
1705 + ps3_system_bus_set_driver_data(dev, NULL);
1706 +--- a/drivers/usb/host/ohci-ps3.c
1707 ++++ b/drivers/usb/host/ohci-ps3.c
1708 +@@ -192,7 +192,7 @@ fail_start:
1709 + return result;
1710 + }
1711 +
1712 +-static int ps3_ohci_remove (struct ps3_system_bus_device *dev)
1713 ++static int ps3_ohci_remove(struct ps3_system_bus_device *dev)
1714 + {
1715 + unsigned int tmp;
1716 + struct usb_hcd *hcd =
1717 +@@ -205,6 +205,7 @@ static int ps3_ohci_remove (struct ps3_s
1718 +
1719 + tmp = hcd->irq;
1720 +
1721 ++ ohci_shutdown(hcd);
1722 + usb_remove_hcd(hcd);
1723 +
1724 + ps3_system_bus_set_driver_data(dev, NULL);
1725
1726 Added: hardened/2.6/trunk/2.6.25/1422_v4l-dvb-cve-2008-5033-fix-oops-on-tvaudio-when-controlling-bass-treble.patch
1727 ===================================================================
1728 --- hardened/2.6/trunk/2.6.25/1422_v4l-dvb-cve-2008-5033-fix-oops-on-tvaudio-when-controlling-bass-treble.patch (rev 0)
1729 +++ hardened/2.6/trunk/2.6.25/1422_v4l-dvb-cve-2008-5033-fix-oops-on-tvaudio-when-controlling-bass-treble.patch 2008-12-03 00:31:56 UTC (rev 1414)
1730 @@ -0,0 +1,135 @@
1731 +Added-By: Gordon Malm <gengor@g.o>
1732 +
1733 +---
1734 +
1735 +From 01a1a3cc1e3fbe718bd06a2a5d4d1a2d0fb4d7d9 Mon Sep 17 00:00:00 2001
1736 +From: Mauro Carvalho Chehab <mchehab@××××××.com>
1737 +Date: Fri, 14 Nov 2008 10:46:59 -0300
1738 +Subject: V4L/DVB (9624): CVE-2008-5033: fix OOPS on tvaudio when controlling bass/treble
1739 +
1740 +From: Mauro Carvalho Chehab <mchehab@××××××.com>
1741 +
1742 +commit 01a1a3cc1e3fbe718bd06a2a5d4d1a2d0fb4d7d9 upstream.
1743 +
1744 +This bug were supposed to be fixed by 5ba2f67afb02c5302b2898949ed6fc3b3d37dcf1,
1745 +where a call to NULL happens.
1746 +
1747 +Not all tvaudio chips allow controlling bass/treble. So, the driver
1748 +has a table with a flag to indicate if the chip does support it.
1749 +
1750 +Unfortunately, the handling of this logic were broken for a very long
1751 +time (probably since the first module version). Due to that, an OOPS
1752 +were generated for devices that don't support bass/treble.
1753 +
1754 +This were the resulting OOPS message before the patch, with debug messages
1755 +enabled:
1756 +
1757 +tvaudio' 1-005b: VIDIOC_S_CTRL
1758 +BUG: unable to handle kernel NULL pointer dereference at 00000000
1759 +IP: [<00000000>]
1760 +*pde = 22fda067 *pte = 00000000
1761 +Oops: 0000 [#1] SMP
1762 +Modules linked in: snd_hda_intel snd_seq_dummy snd_seq_oss snd_seq_midi_event snd_seq snd_seq_device
1763 +snd_pcm_oss snd_mixer_oss snd_pcm snd_timer snd_hwdep snd soundcore tuner_simple tuner_types tea5767 tuner
1764 +tvaudio bttv bridgebnep rfcomm l2cap bluetooth it87 hwmon_vid hwmon fuse sunrpc ipt_REJECT
1765 +nf_conntrack_ipv4 iptable_filter ip_tables ip6t_REJECT xt_tcpudp nf_conntrack_ipv6 xt_state nf_conntrack
1766 +ip6table_filter ip6_tables x_tables ipv6 dm_mirrordm_multipath dm_mod configfs videodev v4l1_compat
1767 +ir_common 8139cp compat_ioctl32 v4l2_common 8139too videobuf_dma_sg videobuf_core mii btcx_risc tveeprom
1768 +i915 button snd_page_alloc serio_raw drm pcspkr i2c_algo_bit i2c_i801 i2c_core iTCO_wdt
1769 +iTCO_vendor_support sr_mod cdrom sg ata_generic pata_acpi ata_piix libata sd_mod scsi_mod ext3 jbdmbcache
1770 +uhci_hcd ohci_hcd ehci_hcd [last unloaded: soundcore]
1771 +
1772 +Pid: 15413, comm: qv4l2 Not tainted (2.6.25.14-108.fc9.i686 #1)
1773 +EIP: 0060:[<00000000>] EFLAGS: 00210246 CPU: 0
1774 +EIP is at 0x0
1775 +EAX: 00008000 EBX: ebd21600 ECX: e2fd9ec4 EDX: 00200046
1776 +ESI: f8c0f0c4 EDI: f8c0f0c4 EBP: e2fd9d50 ESP: e2fd9d2c
1777 + DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
1778 +Process qv4l2 (pid: 15413, ti=e2fd9000 task=ebe44000 task.ti=e2fd9000)
1779 +Stack: f8c0c6ae e2ff2a00 00000d00 e2fd9ec4 ebc4e000 e2fd9d5c f8c0c448 00000000
1780 + f899c12a e2fd9d5c f899c154 e2fd9d68 e2fd9d80 c0560185 e2fd9d88 f8f3e1d8
1781 + f8f3e1dc ebc4e034 f8f3e18c e2fd9ec4 00000000 e2fd9d90 f899c286 c008561c
1782 +Call Trace:
1783 + [<f8c0c6ae>] ? chip_command+0x266/0x4b6 [tvaudio]
1784 + [<f8c0c448>] ? chip_command+0x0/0x4b6 [tvaudio]
1785 + [<f899c12a>] ? i2c_cmd+0x0/0x2f [i2c_core]
1786 + [<f899c154>] ? i2c_cmd+0x2a/0x2f [i2c_core]
1787 + [<c0560185>] ? device_for_each_child+0x21/0x49
1788 + [<f899c286>] ? i2c_clients_command+0x1c/0x1e [i2c_core]
1789 + [<f8f283d8>] ? bttv_call_i2c_clients+0x14/0x16 [bttv]
1790 + [<f8f23601>] ? bttv_s_ctrl+0x1bc/0x313 [bttv]
1791 + [<f8f23445>] ? bttv_s_ctrl+0x0/0x313 [bttv]
1792 + [<f8b6096d>] ? __video_do_ioctl+0x1f84/0x3726 [videodev]
1793 + [<c05abb4e>] ? sock_aio_write+0x100/0x10d
1794 + [<c041b23e>] ? kmap_atomic_prot+0x1dd/0x1df
1795 + [<c043a0c9>] ? enqueue_hrtimer+0xc2/0xcd
1796 + [<c04f4fa4>] ? copy_from_user+0x39/0x121
1797 + [<f8b622b9>] ? __video_ioctl2+0x1aa/0x24a [videodev]
1798 + [<c04054fd>] ? do_notify_resume+0x768/0x795
1799 + [<c043c0f7>] ? getnstimeofday+0x34/0xd1
1800 + [<c0437b77>] ? autoremove_wake_function+0x0/0x33
1801 + [<f8b62368>] ? video_ioctl2+0xf/0x13 [videodev]
1802 + [<c048c6f0>] ? vfs_ioctl+0x50/0x69
1803 + [<c048c942>] ? do_vfs_ioctl+0x239/0x24c
1804 + [<c048c995>] ? sys_ioctl+0x40/0x5b
1805 + [<c0405bf2>] ? syscall_call+0x7/0xb
1806 + [<c0620000>] ? cpuid4_cache_sysfs_exit+0x3d/0x69
1807 + =======================
1808 +Code: Bad EIP value.
1809 +EIP: [<00000000>] 0x0 SS:ESP 0068:e2fd9d2c
1810 +
1811 +Signed-off-by: Mauro Carvalho Chehab <mchehab@××××××.com>
1812 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1813 +
1814 +---
1815 + drivers/media/video/tvaudio.c | 15 +++++++--------
1816 + 1 file changed, 7 insertions(+), 8 deletions(-)
1817 +
1818 +--- a/drivers/media/video/tvaudio.c
1819 ++++ b/drivers/media/video/tvaudio.c
1820 +@@ -1576,13 +1576,13 @@ static int tvaudio_get_ctrl(struct CHIPS
1821 + return 0;
1822 + }
1823 + case V4L2_CID_AUDIO_BASS:
1824 +- if (desc->flags & CHIP_HAS_BASSTREBLE)
1825 ++ if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1826 + break;
1827 + ctrl->value = chip->bass;
1828 + return 0;
1829 + case V4L2_CID_AUDIO_TREBLE:
1830 +- if (desc->flags & CHIP_HAS_BASSTREBLE)
1831 +- return -EINVAL;
1832 ++ if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1833 ++ break;
1834 + ctrl->value = chip->treble;
1835 + return 0;
1836 + }
1837 +@@ -1642,16 +1642,15 @@ static int tvaudio_set_ctrl(struct CHIPS
1838 + return 0;
1839 + }
1840 + case V4L2_CID_AUDIO_BASS:
1841 +- if (desc->flags & CHIP_HAS_BASSTREBLE)
1842 ++ if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1843 + break;
1844 + chip->bass = ctrl->value;
1845 + chip_write(chip,desc->bassreg,desc->bassfunc(chip->bass));
1846 +
1847 + return 0;
1848 + case V4L2_CID_AUDIO_TREBLE:
1849 +- if (desc->flags & CHIP_HAS_BASSTREBLE)
1850 +- return -EINVAL;
1851 +-
1852 ++ if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1853 ++ break;
1854 + chip->treble = ctrl->value;
1855 + chip_write(chip,desc->treblereg,desc->treblefunc(chip->treble));
1856 +
1857 +@@ -1695,7 +1694,7 @@ static int chip_command(struct i2c_clien
1858 + break;
1859 + case V4L2_CID_AUDIO_BASS:
1860 + case V4L2_CID_AUDIO_TREBLE:
1861 +- if (desc->flags & CHIP_HAS_BASSTREBLE)
1862 ++ if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1863 + return -EINVAL;
1864 + break;
1865 + default:
1866
1867 Added: hardened/2.6/trunk/2.6.25/1505_hfs-fix-namelength-memory-corruption.patch
1868 ===================================================================
1869 --- hardened/2.6/trunk/2.6.25/1505_hfs-fix-namelength-memory-corruption.patch (rev 0)
1870 +++ hardened/2.6/trunk/2.6.25/1505_hfs-fix-namelength-memory-corruption.patch 2008-12-03 00:31:56 UTC (rev 1414)
1871 @@ -0,0 +1,41 @@
1872 +Added-By: Gordon Malm <gengor@g.o>
1873 +
1874 +---
1875 +
1876 +From d38b7aa7fc3371b52d036748028db50b585ade2e Mon Sep 17 00:00:00 2001
1877 +From: Eric Sesterhenn <snakebyte@×××.de>
1878 +Date: Wed, 15 Oct 2008 22:04:11 -0700
1879 +Subject: hfs: fix namelength memory corruption (CVE-2008-5025)
1880 +
1881 +From: Eric Sesterhenn <snakebyte@×××.de>
1882 +
1883 +commit d38b7aa7fc3371b52d036748028db50b585ade2e upstream
1884 +
1885 +Fix a stack corruption caused by a corrupted hfs filesystem. If the
1886 +catalog name length is corrupted the memcpy overwrites the catalog btree
1887 +structure. Since the field is limited to HFS_NAMELEN bytes in the
1888 +structure and the file format, we throw an error if it is too long.
1889 +
1890 +Cc: Roman Zippel <zippel@××××××××××.org>
1891 +Signed-off-by: Eric Sesterhenn <snakebyte@×××.de>
1892 +Signed-off-by: Andrew Morton <akpm@××××××××××××××××.org>
1893 +Signed-off-by: Linus Torvalds <torvalds@××××××××××××××××.org>
1894 +Signed-off-by: Greg Kroah-Hartman <gregkh@××××.de>
1895 +
1896 +---
1897 + fs/hfs/catalog.c | 4 ++++
1898 + 1 file changed, 4 insertions(+)
1899 +
1900 +--- a/fs/hfs/catalog.c
1901 ++++ b/fs/hfs/catalog.c
1902 +@@ -190,6 +190,10 @@ int hfs_cat_find_brec(struct super_block
1903 +
1904 + fd->search_key->cat.ParID = rec.thread.ParID;
1905 + len = fd->search_key->cat.CName.len = rec.thread.CName.len;
1906 ++ if (len > HFS_NAMELEN) {
1907 ++ printk(KERN_ERR "hfs: bad catalog namelength\n");
1908 ++ return -EIO;
1909 ++ }
1910 + memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len);
1911 + return hfs_brec_find(fd);
1912 + }
1913
1914 Added: hardened/2.6/trunk/2.6.25/1506_inotify-fix-watch-removal-or-umount-races.patch
1915 ===================================================================
1916 --- hardened/2.6/trunk/2.6.25/1506_inotify-fix-watch-removal-or-umount-races.patch (rev 0)
1917 +++ hardened/2.6/trunk/2.6.25/1506_inotify-fix-watch-removal-or-umount-races.patch 2008-12-03 00:31:56 UTC (rev 1414)
1918 @@ -0,0 +1,565 @@
1919 +Added-By: Gordon Malm <gengor@g.o>
1920 +
1921 +Note: Modified slightly to eliminate patch fuzz.
1922 +
1923 +---
1924 +
1925 +From: Al Viro <viro@×××××××××××××××.uk>
1926 +Date: Sat, 15 Nov 2008 01:15:43 +0000 (+0000)
1927 +Subject: Fix inotify watch removal/umount races
1928 +X-Git-Tag: v2.6.28-rc5~1
1929 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=8f7b0ba1c853919b85b54774775f567f30006107
1930 +
1931 +Fix inotify watch removal/umount races
1932 +
1933 +Inotify watch removals suck violently.
1934 +
1935 +To kick the watch out we need (in this order) inode->inotify_mutex and
1936 +ih->mutex. That's fine if we have a hold on inode; however, for all
1937 +other cases we need to make damn sure we don't race with umount. We can
1938 +*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
1939 +happily sail past it and we'll end with reference to inode potentially
1940 +outliving its superblock.
1941 +
1942 +Ideally we just want to grab an active reference to superblock if we
1943 +can; that will make sure we won't go into inotify_umount_inodes() until
1944 +we are done. Cleanup is just deactivate_super().
1945 +
1946 +However, that leaves a messy case - what if we *are* racing with
1947 +umount() and active references to superblock can't be acquired anymore?
1948 +We can bump ->s_count, grab ->s_umount, which will almost certainly wait
1949 +until the superblock is shut down and the watch in question is pining
1950 +for fjords. That's fine, but there is a problem - we might have hit the
1951 +window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
1952 +the moment when superblock is past the point of no return and is heading
1953 +for shutdown) and the moment when deactivate_super() acquires
1954 +->s_umount.
1955 +
1956 +We could just do drop_super() yield() and retry, but that's rather
1957 +antisocial and this stuff is luser-triggerable. OTOH, having grabbed
1958 +->s_umount and having found that we'd got there first (i.e. that
1959 +->s_root is non-NULL) we know that we won't race with
1960 +inotify_umount_inodes().
1961 +
1962 +So we could grab a reference to watch and do the rest as above, just
1963 +with drop_super() instead of deactivate_super(), right? Wrong. We had
1964 +to drop ih->mutex before we could grab ->s_umount. So the watch
1965 +could've been gone already.
1966 +
1967 +That still can be dealt with - we need to save watch->wd, do idr_find()
1968 +and compare its result with our pointer. If they match, we either have
1969 +the damn thing still alive or we'd lost not one but two races at once,
1970 +the watch had been killed and a new one got created with the same ->wd
1971 +at the same address. That couldn't have happened in inotify_destroy(),
1972 +but inotify_rm_wd() could run into that. Still, "new one got created"
1973 +is not a problem - we have every right to kill it or leave it alone,
1974 +whatever's more convenient.
1975 +
1976 +So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
1977 +"grab it and kill it" check. If it's been our original watch, we are
1978 +fine, if it's a newcomer - nevermind, just pretend that we'd won the
1979 +race and kill the fscker anyway; we are safe since we know that its
1980 +superblock won't be going away.
1981 +
1982 +And yes, this is far beyond mere "not very pretty"; so's the entire
1983 +concept of inotify to start with.
1984 +
1985 +Signed-off-by: Al Viro <viro@×××××××××××××××.uk>
1986 +Acked-by: Greg KH <greg@×××××.com>
1987 +Signed-off-by: Linus Torvalds <torvalds@××××××××××××××××.org>
1988 +---
1989 +
1990 +--- a/fs/inotify.c
1991 ++++ b/fs/inotify.c
1992 +@@ -106,6 +106,20 @@ void get_inotify_watch(struct inotify_wa
1993 + }
1994 + EXPORT_SYMBOL_GPL(get_inotify_watch);
1995 +
1996 ++int pin_inotify_watch(struct inotify_watch *watch)
1997 ++{
1998 ++ struct super_block *sb = watch->inode->i_sb;
1999 ++ spin_lock(&sb_lock);
2000 ++ if (sb->s_count >= S_BIAS) {
2001 ++ atomic_inc(&sb->s_active);
2002 ++ spin_unlock(&sb_lock);
2003 ++ atomic_inc(&watch->count);
2004 ++ return 1;
2005 ++ }
2006 ++ spin_unlock(&sb_lock);
2007 ++ return 0;
2008 ++}
2009 ++
2010 + /**
2011 + * put_inotify_watch - decrements the ref count on a given watch. cleans up
2012 + * watch references if the count reaches zero. inotify_watch is freed by
2013 +@@ -124,6 +138,13 @@ void put_inotify_watch(struct inotify_wa
2014 + }
2015 + EXPORT_SYMBOL_GPL(put_inotify_watch);
2016 +
2017 ++void unpin_inotify_watch(struct inotify_watch *watch)
2018 ++{
2019 ++ struct super_block *sb = watch->inode->i_sb;
2020 ++ put_inotify_watch(watch);
2021 ++ deactivate_super(sb);
2022 ++}
2023 ++
2024 + /*
2025 + * inotify_handle_get_wd - returns the next WD for use by the given handle
2026 + *
2027 +@@ -479,6 +500,112 @@ void inotify_init_watch(struct inotify_w
2028 + }
2029 + EXPORT_SYMBOL_GPL(inotify_init_watch);
2030 +
2031 ++/*
2032 ++ * Watch removals suck violently. To kick the watch out we need (in this
2033 ++ * order) inode->inotify_mutex and ih->mutex. That's fine if we have
2034 ++ * a hold on inode; however, for all other cases we need to make damn sure
2035 ++ * we don't race with umount. We can *NOT* just grab a reference to a
2036 ++ * watch - inotify_unmount_inodes() will happily sail past it and we'll end
2037 ++ * with reference to inode potentially outliving its superblock. Ideally
2038 ++ * we just want to grab an active reference to superblock if we can; that
2039 ++ * will make sure we won't go into inotify_umount_inodes() until we are
2040 ++ * done. Cleanup is just deactivate_super(). However, that leaves a messy
2041 ++ * case - what if we *are* racing with umount() and active references to
2042 ++ * superblock can't be acquired anymore? We can bump ->s_count, grab
2043 ++ * ->s_umount, which will almost certainly wait until the superblock is shut
2044 ++ * down and the watch in question is pining for fjords. That's fine, but
2045 ++ * there is a problem - we might have hit the window between ->s_active
2046 ++ * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock
2047 ++ * is past the point of no return and is heading for shutdown) and the
2048 ++ * moment when deactivate_super() acquires ->s_umount. We could just do
2049 ++ * drop_super() yield() and retry, but that's rather antisocial and this
2050 ++ * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having
2051 ++ * found that we'd got there first (i.e. that ->s_root is non-NULL) we know
2052 ++ * that we won't race with inotify_umount_inodes(). So we could grab a
2053 ++ * reference to watch and do the rest as above, just with drop_super() instead
2054 ++ * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we
2055 ++ * could grab ->s_umount. So the watch could've been gone already.
2056 ++ *
2057 ++ * That still can be dealt with - we need to save watch->wd, do idr_find()
2058 ++ * and compare its result with our pointer. If they match, we either have
2059 ++ * the damn thing still alive or we'd lost not one but two races at once,
2060 ++ * the watch had been killed and a new one got created with the same ->wd
2061 ++ * at the same address. That couldn't have happened in inotify_destroy(),
2062 ++ * but inotify_rm_wd() could run into that. Still, "new one got created"
2063 ++ * is not a problem - we have every right to kill it or leave it alone,
2064 ++ * whatever's more convenient.
2065 ++ *
2066 ++ * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
2067 ++ * "grab it and kill it" check. If it's been our original watch, we are
2068 ++ * fine, if it's a newcomer - nevermind, just pretend that we'd won the
2069 ++ * race and kill the fscker anyway; we are safe since we know that its
2070 ++ * superblock won't be going away.
2071 ++ *
2072 ++ * And yes, this is far beyond mere "not very pretty"; so's the entire
2073 ++ * concept of inotify to start with.
2074 ++ */
2075 ++
2076 ++/**
2077 ++ * pin_to_kill - pin the watch down for removal
2078 ++ * @ih: inotify handle
2079 ++ * @watch: watch to kill
2080 ++ *
2081 ++ * Called with ih->mutex held, drops it. Possible return values:
2082 ++ * 0 - nothing to do, it has died
2083 ++ * 1 - remove it, drop the reference and deactivate_super()
2084 ++ * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid
2085 ++ * that variant, since it involved a lot of PITA, but that's the best that
2086 ++ * could've been done.
2087 ++ */
2088 ++static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch)
2089 ++{
2090 ++ struct super_block *sb = watch->inode->i_sb;
2091 ++ s32 wd = watch->wd;
2092 ++
2093 ++ spin_lock(&sb_lock);
2094 ++ if (sb->s_count >= S_BIAS) {
2095 ++ atomic_inc(&sb->s_active);
2096 ++ spin_unlock(&sb_lock);
2097 ++ get_inotify_watch(watch);
2098 ++ mutex_unlock(&ih->mutex);
2099 ++ return 1; /* the best outcome */
2100 ++ }
2101 ++ sb->s_count++;
2102 ++ spin_unlock(&sb_lock);
2103 ++ mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */
2104 ++ down_read(&sb->s_umount);
2105 ++ if (likely(!sb->s_root)) {
2106 ++ /* fs is already shut down; the watch is dead */
2107 ++ drop_super(sb);
2108 ++ return 0;
2109 ++ }
2110 ++ /* raced with the final deactivate_super() */
2111 ++ mutex_lock(&ih->mutex);
2112 ++ if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) {
2113 ++ /* the watch is dead */
2114 ++ mutex_unlock(&ih->mutex);
2115 ++ drop_super(sb);
2116 ++ return 0;
2117 ++ }
2118 ++ /* still alive or freed and reused with the same sb and wd; kill */
2119 ++ get_inotify_watch(watch);
2120 ++ mutex_unlock(&ih->mutex);
2121 ++ return 2;
2122 ++}
2123 ++
2124 ++static void unpin_and_kill(struct inotify_watch *watch, int how)
2125 ++{
2126 ++ struct super_block *sb = watch->inode->i_sb;
2127 ++ put_inotify_watch(watch);
2128 ++ switch (how) {
2129 ++ case 1:
2130 ++ deactivate_super(sb);
2131 ++ break;
2132 ++ case 2:
2133 ++ drop_super(sb);
2134 ++ }
2135 ++}
2136 ++
2137 + /**
2138 + * inotify_destroy - clean up and destroy an inotify instance
2139 + * @ih: inotify handle
2140 +@@ -490,11 +617,15 @@ void inotify_destroy(struct inotify_hand
2141 + * pretty. We cannot do a simple iteration over the list, because we
2142 + * do not know the inode until we iterate to the watch. But we need to
2143 + * hold inode->inotify_mutex before ih->mutex. The following works.
2144 ++ *
2145 ++ * AV: it had to become even uglier to start working ;-/
2146 + */
2147 + while (1) {
2148 + struct inotify_watch *watch;
2149 + struct list_head *watches;
2150 ++ struct super_block *sb;
2151 + struct inode *inode;
2152 ++ int how;
2153 +
2154 + mutex_lock(&ih->mutex);
2155 + watches = &ih->watches;
2156 +@@ -503,8 +634,10 @@ void inotify_destroy(struct inotify_hand
2157 + break;
2158 + }
2159 + watch = list_first_entry(watches, struct inotify_watch, h_list);
2160 +- get_inotify_watch(watch);
2161 +- mutex_unlock(&ih->mutex);
2162 ++ sb = watch->inode->i_sb;
2163 ++ how = pin_to_kill(ih, watch);
2164 ++ if (!how)
2165 ++ continue;
2166 +
2167 + inode = watch->inode;
2168 + mutex_lock(&inode->inotify_mutex);
2169 +@@ -518,7 +651,7 @@ void inotify_destroy(struct inotify_hand
2170 +
2171 + mutex_unlock(&ih->mutex);
2172 + mutex_unlock(&inode->inotify_mutex);
2173 +- put_inotify_watch(watch);
2174 ++ unpin_and_kill(watch, how);
2175 + }
2176 +
2177 + /* free this handle: the put matching the get in inotify_init() */
2178 +@@ -719,7 +852,9 @@ void inotify_evict_watch(struct inotify_
2179 + int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
2180 + {
2181 + struct inotify_watch *watch;
2182 ++ struct super_block *sb;
2183 + struct inode *inode;
2184 ++ int how;
2185 +
2186 + mutex_lock(&ih->mutex);
2187 + watch = idr_find(&ih->idr, wd);
2188 +@@ -727,9 +862,12 @@ int inotify_rm_wd(struct inotify_handle
2189 + mutex_unlock(&ih->mutex);
2190 + return -EINVAL;
2191 + }
2192 +- get_inotify_watch(watch);
2193 ++ sb = watch->inode->i_sb;
2194 ++ how = pin_to_kill(ih, watch);
2195 ++ if (!how)
2196 ++ return 0;
2197 ++
2198 + inode = watch->inode;
2199 +- mutex_unlock(&ih->mutex);
2200 +
2201 + mutex_lock(&inode->inotify_mutex);
2202 + mutex_lock(&ih->mutex);
2203 +@@ -740,7 +878,7 @@ int inotify_rm_wd(struct inotify_handle
2204 +
2205 + mutex_unlock(&ih->mutex);
2206 + mutex_unlock(&inode->inotify_mutex);
2207 +- put_inotify_watch(watch);
2208 ++ unpin_and_kill(watch, how);
2209 +
2210 + return 0;
2211 + }
2212 +--- a/include/linux/inotify.h
2213 ++++ b/include/linux/inotify.h
2214 +@@ -128,6 +128,8 @@ extern void inotify_remove_watch_locked(
2215 + struct inotify_watch *);
2216 + extern void get_inotify_watch(struct inotify_watch *);
2217 + extern void put_inotify_watch(struct inotify_watch *);
2218 ++extern int pin_inotify_watch(struct inotify_watch *);
2219 ++extern void unpin_inotify_watch(struct inotify_watch *);
2220 +
2221 + #else
2222 +
2223 +@@ -222,6 +224,15 @@ static inline void put_inotify_watch(str
2224 + {
2225 + }
2226 +
2227 ++extern inline int pin_inotify_watch(struct inotify_watch *watch)
2228 ++{
2229 ++ return 0;
2230 ++}
2231 ++
2232 ++extern inline void unpin_inotify_watch(struct inotify_watch *watch)
2233 ++{
2234 ++}
2235 ++
2236 + #endif /* CONFIG_INOTIFY */
2237 +
2238 + #endif /* __KERNEL __ */
2239 +--- a/kernel/audit_tree.c
2240 ++++ b/kernel/audit_tree.c
2241 +@@ -24,6 +24,7 @@ struct audit_chunk {
2242 + struct list_head trees; /* with root here */
2243 + int dead;
2244 + int count;
2245 ++ atomic_long_t refs;
2246 + struct rcu_head head;
2247 + struct node {
2248 + struct list_head list;
2249 +@@ -56,7 +57,8 @@ static LIST_HEAD(prune_list);
2250 + * tree is refcounted; one reference for "some rules on rules_list refer to
2251 + * it", one for each chunk with pointer to it.
2252 + *
2253 +- * chunk is refcounted by embedded inotify_watch.
2254 ++ * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
2255 ++ * of watch contributes 1 to .refs).
2256 + *
2257 + * node.index allows to get from node.list to containing chunk.
2258 + * MSB of that sucker is stolen to mark taggings that we might have to
2259 +@@ -121,6 +123,7 @@ static struct audit_chunk *alloc_chunk(i
2260 + INIT_LIST_HEAD(&chunk->hash);
2261 + INIT_LIST_HEAD(&chunk->trees);
2262 + chunk->count = count;
2263 ++ atomic_long_set(&chunk->refs, 1);
2264 + for (i = 0; i < count; i++) {
2265 + INIT_LIST_HEAD(&chunk->owners[i].list);
2266 + chunk->owners[i].index = i;
2267 +@@ -129,9 +132,8 @@ static struct audit_chunk *alloc_chunk(i
2268 + return chunk;
2269 + }
2270 +
2271 +-static void __free_chunk(struct rcu_head *rcu)
2272 ++static void free_chunk(struct audit_chunk *chunk)
2273 + {
2274 +- struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
2275 + int i;
2276 +
2277 + for (i = 0; i < chunk->count; i++) {
2278 +@@ -141,14 +143,16 @@ static void __free_chunk(struct rcu_head
2279 + kfree(chunk);
2280 + }
2281 +
2282 +-static inline void free_chunk(struct audit_chunk *chunk)
2283 ++void audit_put_chunk(struct audit_chunk *chunk)
2284 + {
2285 +- call_rcu(&chunk->head, __free_chunk);
2286 ++ if (atomic_long_dec_and_test(&chunk->refs))
2287 ++ free_chunk(chunk);
2288 + }
2289 +
2290 +-void audit_put_chunk(struct audit_chunk *chunk)
2291 ++static void __put_chunk(struct rcu_head *rcu)
2292 + {
2293 +- put_inotify_watch(&chunk->watch);
2294 ++ struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
2295 ++ audit_put_chunk(chunk);
2296 + }
2297 +
2298 + enum {HASH_SIZE = 128};
2299 +@@ -177,7 +181,7 @@ struct audit_chunk *audit_tree_lookup(co
2300 + list_for_each_rcu(pos, list) {
2301 + struct audit_chunk *p = container_of(pos, struct audit_chunk, hash);
2302 + if (p->watch.inode == inode) {
2303 +- get_inotify_watch(&p->watch);
2304 ++ atomic_long_inc(&p->refs);
2305 + return p;
2306 + }
2307 + }
2308 +@@ -195,17 +199,49 @@ int audit_tree_match(struct audit_chunk
2309 +
2310 + /* tagging and untagging inodes with trees */
2311 +
2312 +-static void untag_chunk(struct audit_chunk *chunk, struct node *p)
2313 ++static struct audit_chunk *find_chunk(struct node *p)
2314 ++{
2315 ++ int index = p->index & ~(1U<<31);
2316 ++ p -= index;
2317 ++ return container_of(p, struct audit_chunk, owners[0]);
2318 ++}
2319 ++
2320 ++static void untag_chunk(struct node *p)
2321 + {
2322 ++ struct audit_chunk *chunk = find_chunk(p);
2323 + struct audit_chunk *new;
2324 + struct audit_tree *owner;
2325 + int size = chunk->count - 1;
2326 + int i, j;
2327 +
2328 ++ if (!pin_inotify_watch(&chunk->watch)) {
2329 ++ /*
2330 ++ * Filesystem is shutting down; all watches are getting
2331 ++ * evicted, just take it off the node list for this
2332 ++ * tree and let the eviction logics take care of the
2333 ++ * rest.
2334 ++ */
2335 ++ owner = p->owner;
2336 ++ if (owner->root == chunk) {
2337 ++ list_del_init(&owner->same_root);
2338 ++ owner->root = NULL;
2339 ++ }
2340 ++ list_del_init(&p->list);
2341 ++ p->owner = NULL;
2342 ++ put_tree(owner);
2343 ++ return;
2344 ++ }
2345 ++
2346 ++ spin_unlock(&hash_lock);
2347 ++
2348 ++ /*
2349 ++ * pin_inotify_watch() succeeded, so the watch won't go away
2350 ++ * from under us.
2351 ++ */
2352 + mutex_lock(&chunk->watch.inode->inotify_mutex);
2353 + if (chunk->dead) {
2354 + mutex_unlock(&chunk->watch.inode->inotify_mutex);
2355 +- return;
2356 ++ goto out;
2357 + }
2358 +
2359 + owner = p->owner;
2360 +@@ -222,7 +258,7 @@ static void untag_chunk(struct audit_chu
2361 + inotify_evict_watch(&chunk->watch);
2362 + mutex_unlock(&chunk->watch.inode->inotify_mutex);
2363 + put_inotify_watch(&chunk->watch);
2364 +- return;
2365 ++ goto out;
2366 + }
2367 +
2368 + new = alloc_chunk(size);
2369 +@@ -264,7 +300,7 @@ static void untag_chunk(struct audit_chu
2370 + inotify_evict_watch(&chunk->watch);
2371 + mutex_unlock(&chunk->watch.inode->inotify_mutex);
2372 + put_inotify_watch(&chunk->watch);
2373 +- return;
2374 ++ goto out;
2375 +
2376 + Fallback:
2377 + // do the best we can
2378 +@@ -278,6 +314,9 @@ Fallback:
2379 + put_tree(owner);
2380 + spin_unlock(&hash_lock);
2381 + mutex_unlock(&chunk->watch.inode->inotify_mutex);
2382 ++out:
2383 ++ unpin_inotify_watch(&chunk->watch);
2384 ++ spin_lock(&hash_lock);
2385 + }
2386 +
2387 + static int create_chunk(struct inode *inode, struct audit_tree *tree)
2388 +@@ -388,13 +427,6 @@ static int tag_chunk(struct inode *inode
2389 + return 0;
2390 + }
2391 +
2392 +-static struct audit_chunk *find_chunk(struct node *p)
2393 +-{
2394 +- int index = p->index & ~(1U<<31);
2395 +- p -= index;
2396 +- return container_of(p, struct audit_chunk, owners[0]);
2397 +-}
2398 +-
2399 + static void kill_rules(struct audit_tree *tree)
2400 + {
2401 + struct audit_krule *rule, *next;
2402 +@@ -432,17 +464,10 @@ static void prune_one(struct audit_tree
2403 + spin_lock(&hash_lock);
2404 + while (!list_empty(&victim->chunks)) {
2405 + struct node *p;
2406 +- struct audit_chunk *chunk;
2407 +
2408 + p = list_entry(victim->chunks.next, struct node, list);
2409 +- chunk = find_chunk(p);
2410 +- get_inotify_watch(&chunk->watch);
2411 +- spin_unlock(&hash_lock);
2412 +-
2413 +- untag_chunk(chunk, p);
2414 +
2415 +- put_inotify_watch(&chunk->watch);
2416 +- spin_lock(&hash_lock);
2417 ++ untag_chunk(p);
2418 + }
2419 + spin_unlock(&hash_lock);
2420 + put_tree(victim);
2421 +@@ -470,7 +495,6 @@ static void trim_marked(struct audit_tre
2422 +
2423 + while (!list_empty(&tree->chunks)) {
2424 + struct node *node;
2425 +- struct audit_chunk *chunk;
2426 +
2427 + node = list_entry(tree->chunks.next, struct node, list);
2428 +
2429 +@@ -478,14 +502,7 @@ static void trim_marked(struct audit_tre
2430 + if (!(node->index & (1U<<31)))
2431 + break;
2432 +
2433 +- chunk = find_chunk(node);
2434 +- get_inotify_watch(&chunk->watch);
2435 +- spin_unlock(&hash_lock);
2436 +-
2437 +- untag_chunk(chunk, node);
2438 +-
2439 +- put_inotify_watch(&chunk->watch);
2440 +- spin_lock(&hash_lock);
2441 ++ untag_chunk(node);
2442 + }
2443 + if (!tree->root && !tree->goner) {
2444 + tree->goner = 1;
2445 +@@ -879,7 +896,7 @@ static void handle_event(struct inotify_
2446 + static void destroy_watch(struct inotify_watch *watch)
2447 + {
2448 + struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
2449 +- free_chunk(chunk);
2450 ++ call_rcu(&chunk->head, __put_chunk);
2451 + }
2452 +
2453 + static const struct inotify_operations rtree_inotify_ops = {
2454 +--- a/kernel/auditfilter.c
2455 ++++ b/kernel/auditfilter.c
2456 +@@ -1085,8 +1085,8 @@ static void audit_inotify_unregister(str
2457 + list_for_each_entry_safe(p, n, in_list, ilist) {
2458 + list_del(&p->ilist);
2459 + inotify_rm_watch(audit_ih, &p->wdata);
2460 +- /* the put matching the get in audit_do_del_rule() */
2461 +- put_inotify_watch(&p->wdata);
2462 ++ /* the unpin matching the pin in audit_do_del_rule() */
2463 ++ unpin_inotify_watch(&p->wdata);
2464 + }
2465 + }
2466 +
2467 +@@ -1380,9 +1380,13 @@ static inline int audit_del_rule(struct
2468 + /* Put parent on the inotify un-registration
2469 + * list. Grab a reference before releasing
2470 + * audit_filter_mutex, to be released in
2471 +- * audit_inotify_unregister(). */
2472 +- list_add(&parent->ilist, &inotify_list);
2473 +- get_inotify_watch(&parent->wdata);
2474 ++ * audit_inotify_unregister().
2475 ++ * If filesystem is going away, just leave
2476 ++ * the sucker alone, eviction will take
2477 ++ * care of it.
2478 ++ */
2479 ++ if (pin_inotify_watch(&parent->wdata))
2480 ++ list_add(&parent->ilist, &inotify_list);
2481 + }
2482 + }
2483 + }
2484
2485 Added: hardened/2.6/trunk/2.6.25/1800_sched-disable-hrtick.patch
2486 ===================================================================
2487 --- hardened/2.6/trunk/2.6.25/1800_sched-disable-hrtick.patch (rev 0)
2488 +++ hardened/2.6/trunk/2.6.25/1800_sched-disable-hrtick.patch 2008-12-03 00:31:56 UTC (rev 1414)
2489 @@ -0,0 +1,17 @@
2490 +From: Kerin Millar <kerframil@×××××.com>
2491 +
2492 +This is a backport to 2.6.25 of commit 612f39d5e7baeb0518cfe50d53e37e14c0ca1475
2493 +from Ingo Molnar, which disables hrtick (high-resolution preemption ticks). For
2494 +further information, please refer to Gentoo Bug #247453.
2495 +
2496 +--- a/kernel/sched.c 2008-04-17 03:49:44.000000000 +0100
2497 ++++ b/kernel/sched.c 2008-11-18 20:30:33.000000000 +0000
2498 +@@ -602,7 +602,7 @@
2499 + SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
2500 + SCHED_FEAT_WAKEUP_PREEMPT * 1 |
2501 + SCHED_FEAT_START_DEBIT * 1 |
2502 +- SCHED_FEAT_HRTICK * 1 |
2503 ++ SCHED_FEAT_HRTICK * 0 |
2504 + SCHED_FEAT_DOUBLE_TICK * 0;
2505 +
2506 + #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
2507
2508 Added: hardened/2.6/trunk/2.6.25/4460_pax-fix-mmap-BUG_ON-task-size-check.patch
2509 ===================================================================
2510 --- hardened/2.6/trunk/2.6.25/4460_pax-fix-mmap-BUG_ON-task-size-check.patch (rev 0)
2511 +++ hardened/2.6/trunk/2.6.25/4460_pax-fix-mmap-BUG_ON-task-size-check.patch 2008-12-03 00:31:56 UTC (rev 1414)
2512 @@ -0,0 +1,22 @@
2513 +From: Gordon Malm <gengor@g.o>
2514 +
2515 +Fix incorrect vma task size check under SEGMEXEC.
2516 +
2517 +Fixes bug #246607.
2518 +
2519 +Thanks to Hugo Mildenberger for reporting and PaX Team for the fix.
2520 +
2521 +This patch is present in upstream grsecurity patches as of
2522 +pax-linux-2.6.27.7-test22.patch.
2523 +
2524 +--- a/mm/mmap.c
2525 ++++ b/mm/mmap.c
2526 +@@ -1703,7 +1703,7 @@ struct vm_area_struct *pax_find_mirror_v
2527 + BUG_ON(vma->vm_mirror);
2528 + return NULL;
2529 + }
2530 +- BUG_ON(vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < vma->vm_start - SEGMEXEC_TASK_SIZE - 1);
2531 ++ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
2532 + vma_m = vma->vm_mirror;
2533 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
2534 + BUG_ON(vma->vm_file != vma_m->vm_file);
2535
2536 Added: hardened/2.6/trunk/2.6.25/4465_pax-fix-false-RLIMIT_STACK-warnings.patch
2537 ===================================================================
2538 --- hardened/2.6/trunk/2.6.25/4465_pax-fix-false-RLIMIT_STACK-warnings.patch (rev 0)
2539 +++ hardened/2.6/trunk/2.6.25/4465_pax-fix-false-RLIMIT_STACK-warnings.patch 2008-12-03 00:31:56 UTC (rev 1414)
2540 @@ -0,0 +1,88 @@
2541 +From: Gordon Malm <gengor@g.o>
2542 +
2543 +Fix false-positive RLIMIT_STACK warnings.
2544 +
2545 +Thanks to PaX Team for the heads up.
2546 +
2547 +This patch is present in upstream grsecurity patches as of
2548 +pax-linux-2.6.27.7-test22.patch.
2549 +
2550 +--- a/arch/x86/mm/fault.c
2551 ++++ b/arch/x86/mm/fault.c
2552 +@@ -840,16 +840,14 @@ not_pax_fault:
2553 + goto good_area;
2554 + if (!(vma->vm_flags & VM_GROWSDOWN))
2555 + goto bad_area;
2556 +- if (error_code & PF_USER) {
2557 +- /*
2558 +- * Accessing the stack below %sp is always a bug.
2559 +- * The large cushion allows instructions like enter
2560 +- * and pusha to work. ("enter $65535,$31" pushes
2561 +- * 32 pointers and then decrements %sp by 65535.)
2562 +- */
2563 +- if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
2564 +- goto bad_area;
2565 +- }
2566 ++ /*
2567 ++ * Accessing the stack below %sp is always a bug.
2568 ++ * The large cushion allows instructions like enter
2569 ++ * and pusha to work. ("enter $65535,$31" pushes
2570 ++ * 32 pointers and then decrements %sp by 65535.)
2571 ++ */
2572 ++ if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
2573 ++ goto bad_area;
2574 +
2575 + #ifdef CONFIG_PAX_SEGMEXEC
2576 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)
2577 +--- a/kernel/exit.c
2578 ++++ b/kernel/exit.c
2579 +@@ -38,7 +38,6 @@
2580 + #include <linux/cn_proc.h>
2581 + #include <linux/mutex.h>
2582 + #include <linux/futex.h>
2583 +-#include <linux/compat.h>
2584 + #include <linux/pipe_fs_i.h>
2585 + #include <linux/audit.h> /* for audit_free() */
2586 + #include <linux/resource.h>
2587 +@@ -974,14 +973,6 @@ NORET_TYPE void do_exit(long code)
2588 + exit_itimers(tsk->signal);
2589 + }
2590 + acct_collect(code, group_dead);
2591 +-#ifdef CONFIG_FUTEX
2592 +- if (unlikely(tsk->robust_list))
2593 +- exit_robust_list(tsk);
2594 +-#ifdef CONFIG_COMPAT
2595 +- if (unlikely(tsk->compat_robust_list))
2596 +- compat_exit_robust_list(tsk);
2597 +-#endif
2598 +-#endif
2599 + if (group_dead)
2600 + tty_audit_exit();
2601 + if (unlikely(tsk->audit_context))
2602 +--- a/kernel/fork.c
2603 ++++ b/kernel/fork.c
2604 +@@ -35,6 +35,7 @@
2605 + #include <linux/syscalls.h>
2606 + #include <linux/jiffies.h>
2607 + #include <linux/futex.h>
2608 ++#include <linux/compat.h>
2609 + #include <linux/task_io_accounting_ops.h>
2610 + #include <linux/rcupdate.h>
2611 + #include <linux/ptrace.h>
2612 +@@ -491,6 +492,16 @@ void mm_release(struct task_struct *tsk,
2613 + {
2614 + struct completion *vfork_done = tsk->vfork_done;
2615 +
2616 ++ /* Get rid of any futexes when releasing the mm */
2617 ++#ifdef CONFIG_FUTEX
2618 ++ if (unlikely(tsk->robust_list))
2619 ++ exit_robust_list(tsk);
2620 ++#ifdef CONFIG_COMPAT
2621 ++ if (unlikely(tsk->compat_robust_list))
2622 ++ compat_exit_robust_list(tsk);
2623 ++#endif
2624 ++#endif
2625 ++
2626 + /* Get rid of any cached register state */
2627 + deactivate_mm(tsk, mm);
2628 +
2629 \ No newline at end of file