Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 3.0.8/
Date: Sun, 30 Oct 2011 15:42:57
Message-Id: c5074ab1cbd019ef06c277062e2b1ed662dd52fd.blueness@gentoo
1 commit: c5074ab1cbd019ef06c277062e2b1ed662dd52fd
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Sun Oct 30 15:42:41 2011 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Sun Oct 30 15:42:41 2011 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=c5074ab1
7
8 Add a missing patch to bump to 3.0.8
9
10 ---
11 3.0.8/0000_README | 4 +
12 3.0.8/1007_linux-3.0.8.patch | 1472 ++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 1476 insertions(+), 0 deletions(-)
14
15 diff --git a/3.0.8/0000_README b/3.0.8/0000_README
16 index 331ee41..3d0a189 100644
17 --- a/3.0.8/0000_README
18 +++ b/3.0.8/0000_README
19 @@ -3,6 +3,10 @@ README
20
21 Individual Patch Descriptions:
22 -----------------------------------------------------------------------------
23 +Patch: 1007_linux-3.0.8.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.0.8
26 +
27 Patch: 4420_grsecurity-2.2.2-3.0.8-201110250925.patch
28 From: http://www.grsecurity.net
29 Desc: hardened-sources base patch from upstream grsecurity
30
31 diff --git a/3.0.8/1007_linux-3.0.8.patch b/3.0.8/1007_linux-3.0.8.patch
32 new file mode 100644
33 index 0000000..62a4bb6
34 --- /dev/null
35 +++ b/3.0.8/1007_linux-3.0.8.patch
36 @@ -0,0 +1,1472 @@
37 +diff --git a/Makefile b/Makefile
38 +index 11c4249..9f6e3cd 100644
39 +--- a/Makefile
40 ++++ b/Makefile
41 +@@ -1,6 +1,6 @@
42 + VERSION = 3
43 + PATCHLEVEL = 0
44 +-SUBLEVEL = 7
45 ++SUBLEVEL = 8
46 + EXTRAVERSION =
47 + NAME = Sneaky Weasel
48 +
49 +diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
50 +index 4960686..4372763 100644
51 +--- a/arch/arm/kernel/perf_event_v7.c
52 ++++ b/arch/arm/kernel/perf_event_v7.c
53 +@@ -264,8 +264,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
54 + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
55 + [PERF_COUNT_HW_INSTRUCTIONS] =
56 + ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
57 +- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
58 +- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
59 ++ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
60 ++ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
61 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
62 + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
63 + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
64 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
65 +index c19571c..4a4eba5 100644
66 +--- a/arch/arm/mm/init.c
67 ++++ b/arch/arm/mm/init.c
68 +@@ -473,6 +473,13 @@ static void __init free_unused_memmap(struct meminfo *mi)
69 + */
70 + bank_start = min(bank_start,
71 + ALIGN(prev_bank_end, PAGES_PER_SECTION));
72 ++#else
73 ++ /*
74 ++ * Align down here since the VM subsystem insists that the
75 ++ * memmap entries are valid from the bank start aligned to
76 ++ * MAX_ORDER_NR_PAGES.
77 ++ */
78 ++ bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
79 + #endif
80 + /*
81 + * If we had a previous bank, and there is a space
82 +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
83 +index 3032644..87488b9 100644
84 +--- a/arch/x86/mm/init.c
85 ++++ b/arch/x86/mm/init.c
86 +@@ -63,9 +63,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
87 + #ifdef CONFIG_X86_32
88 + /* for fixmap */
89 + tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
90 +-
91 +- good_end = max_pfn_mapped << PAGE_SHIFT;
92 + #endif
93 ++ good_end = max_pfn_mapped << PAGE_SHIFT;
94 +
95 + base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
96 + if (base == MEMBLOCK_ERROR)
97 +diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
98 +index be44256..7835b8f 100644
99 +--- a/crypto/ghash-generic.c
100 ++++ b/crypto/ghash-generic.c
101 +@@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc,
102 + struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
103 + u8 *dst = dctx->buffer;
104 +
105 ++ if (!ctx->gf128)
106 ++ return -ENOKEY;
107 ++
108 + if (dctx->bytes) {
109 + int n = min(srclen, dctx->bytes);
110 + u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
111 +@@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
112 + struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
113 + u8 *buf = dctx->buffer;
114 +
115 ++ if (!ctx->gf128)
116 ++ return -ENOKEY;
117 ++
118 + ghash_flush(ctx, dctx);
119 + memcpy(dst, buf, GHASH_BLOCK_SIZE);
120 +
121 +diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
122 +index 41841a3..17cef86 100644
123 +--- a/drivers/firewire/sbp2.c
124 ++++ b/drivers/firewire/sbp2.c
125 +@@ -1198,6 +1198,10 @@ static int sbp2_remove(struct device *dev)
126 + {
127 + struct fw_unit *unit = fw_unit(dev);
128 + struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
129 ++ struct sbp2_logical_unit *lu;
130 ++
131 ++ list_for_each_entry(lu, &tgt->lu_list, link)
132 ++ cancel_delayed_work_sync(&lu->work);
133 +
134 + sbp2_target_put(tgt);
135 + return 0;
136 +diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
137 +index ebdb0fd..9a0aee2 100644
138 +--- a/drivers/gpu/drm/radeon/atom.c
139 ++++ b/drivers/gpu/drm/radeon/atom.c
140 +@@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
141 + case ATOM_ARG_FB:
142 + idx = U8(*ptr);
143 + (*ptr)++;
144 +- val = gctx->scratch[((gctx->fb_base + idx) / 4)];
145 ++ if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
146 ++ DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
147 ++ gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
148 ++ val = 0;
149 ++ } else
150 ++ val = gctx->scratch[(gctx->fb_base / 4) + idx];
151 + if (print)
152 + DEBUG("FB[0x%02X]", idx);
153 + break;
154 +@@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
155 + case ATOM_ARG_FB:
156 + idx = U8(*ptr);
157 + (*ptr)++;
158 +- gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
159 ++ if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
160 ++ DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
161 ++ gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
162 ++ } else
163 ++ gctx->scratch[(gctx->fb_base / 4) + idx] = val;
164 + DEBUG("FB[0x%02X]", idx);
165 + break;
166 + case ATOM_ARG_PLL:
167 +@@ -1367,11 +1376,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
168 +
169 + usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
170 + }
171 ++ ctx->scratch_size_bytes = 0;
172 + if (usage_bytes == 0)
173 + usage_bytes = 20 * 1024;
174 + /* allocate some scratch memory */
175 + ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
176 + if (!ctx->scratch)
177 + return -ENOMEM;
178 ++ ctx->scratch_size_bytes = usage_bytes;
179 + return 0;
180 + }
181 +diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
182 +index a589a55..93cfe20 100644
183 +--- a/drivers/gpu/drm/radeon/atom.h
184 ++++ b/drivers/gpu/drm/radeon/atom.h
185 +@@ -137,6 +137,7 @@ struct atom_context {
186 + int cs_equal, cs_above;
187 + int io_mode;
188 + uint32_t *scratch;
189 ++ int scratch_size_bytes;
190 + };
191 +
192 + extern int atom_debug;
193 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
194 +index b7f0726..e2b2d78 100644
195 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
196 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
197 +@@ -392,10 +392,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
198 + * Create and bind a ttm if required.
199 + */
200 +
201 +- if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
202 +- ret = ttm_bo_add_ttm(bo, false);
203 +- if (ret)
204 +- goto out_err;
205 ++ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
206 ++ if (bo->ttm == NULL) {
207 ++ ret = ttm_bo_add_ttm(bo, false);
208 ++ if (ret)
209 ++ goto out_err;
210 ++ }
211 +
212 + ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
213 + if (ret)
214 +diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
215 +index 77dbf40..ae3c6f5 100644
216 +--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
217 ++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
218 +@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
219 + if (ret)
220 + return ret;
221 +
222 +- ttm_bo_free_old_node(bo);
223 + if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
224 + (bo->ttm != NULL)) {
225 + ttm_tt_unbind(bo->ttm);
226 + ttm_tt_destroy(bo->ttm);
227 + bo->ttm = NULL;
228 + }
229 ++ ttm_bo_free_old_node(bo);
230 + } else {
231 + /**
232 + * This should help pipeline ordinary buffer moves.
233 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
234 +index a756ee6..c946d90 100644
235 +--- a/drivers/hid/hid-ids.h
236 ++++ b/drivers/hid/hid-ids.h
237 +@@ -568,6 +568,9 @@
238 + #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
239 + #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
240 +
241 ++#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
242 ++#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
243 ++
244 + #define USB_VENDOR_ID_SKYCABLE 0x1223
245 + #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
246 +
247 +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
248 +index 0ec91c1..56d0539 100644
249 +--- a/drivers/hid/hid-magicmouse.c
250 ++++ b/drivers/hid/hid-magicmouse.c
251 +@@ -501,9 +501,17 @@ static int magicmouse_probe(struct hid_device *hdev,
252 + }
253 + report->size = 6;
254 +
255 ++ /*
256 ++ * Some devices repond with 'invalid report id' when feature
257 ++ * report switching it into multitouch mode is sent to it.
258 ++ *
259 ++ * This results in -EIO from the _raw low-level transport callback,
260 ++ * but there seems to be no other way of switching the mode.
261 ++ * Thus the super-ugly hacky success check below.
262 ++ */
263 + ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
264 + HID_FEATURE_REPORT);
265 +- if (ret != sizeof(feature)) {
266 ++ if (ret != -EIO && ret != sizeof(feature)) {
267 + hid_err(hdev, "unable to request touch data (%d)\n", ret);
268 + goto err_stop_hw;
269 + }
270 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
271 +index 621959d..4bdb5d4 100644
272 +--- a/drivers/hid/usbhid/hid-quirks.c
273 ++++ b/drivers/hid/usbhid/hid-quirks.c
274 +@@ -89,6 +89,7 @@ static const struct hid_blacklist {
275 +
276 + { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
277 + { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
278 ++ { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
279 + { 0, 0 }
280 + };
281 +
282 +diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
283 +index f2b377c..36d7f27 100644
284 +--- a/drivers/hwmon/w83627ehf.c
285 ++++ b/drivers/hwmon/w83627ehf.c
286 +@@ -390,7 +390,7 @@ temp_from_reg(u16 reg, s16 regval)
287 + {
288 + if (is_word_sized(reg))
289 + return LM75_TEMP_FROM_REG(regval);
290 +- return regval * 1000;
291 ++ return ((s8)regval) * 1000;
292 + }
293 +
294 + static inline u16
295 +@@ -398,7 +398,8 @@ temp_to_reg(u16 reg, long temp)
296 + {
297 + if (is_word_sized(reg))
298 + return LM75_TEMP_TO_REG(temp);
299 +- return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000);
300 ++ return (s8)DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000),
301 ++ 1000);
302 + }
303 +
304 + /* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */
305 +@@ -1715,7 +1716,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
306 + }
307 +
308 + /* Get the monitoring functions started */
309 +-static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
310 ++static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
311 ++ enum kinds kind)
312 + {
313 + int i;
314 + u8 tmp, diode;
315 +@@ -1746,10 +1748,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
316 + w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);
317 +
318 + /* Get thermal sensor types */
319 +- diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
320 ++ switch (kind) {
321 ++ case w83627ehf:
322 ++ diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
323 ++ break;
324 ++ default:
325 ++ diode = 0x70;
326 ++ }
327 + for (i = 0; i < 3; i++) {
328 + if ((tmp & (0x02 << i)))
329 +- data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2;
330 ++ data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;
331 + else
332 + data->temp_type[i] = 4; /* thermistor */
333 + }
334 +@@ -2016,7 +2024,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
335 + }
336 +
337 + /* Initialize the chip */
338 +- w83627ehf_init_device(data);
339 ++ w83627ehf_init_device(data, sio_data->kind);
340 +
341 + data->vrm = vid_which_vrm();
342 + superio_enter(sio_data->sioreg);
343 +diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
344 +index 48fea37..29e2399 100644
345 +--- a/drivers/media/video/uvc/uvc_entity.c
346 ++++ b/drivers/media/video/uvc/uvc_entity.c
347 +@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
348 + if (remote == NULL)
349 + return -EINVAL;
350 +
351 +- source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
352 ++ source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
353 + ? (remote->vdev ? &remote->vdev->entity : NULL)
354 + : &remote->subdev.entity;
355 + if (source == NULL)
356 +diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
357 +index d347116..1658575 100644
358 +--- a/drivers/platform/x86/samsung-laptop.c
359 ++++ b/drivers/platform/x86/samsung-laptop.c
360 +@@ -601,6 +601,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
361 + .callback = dmi_check_cb,
362 + },
363 + {
364 ++ .ident = "N150/N210/N220",
365 ++ .matches = {
366 ++ DMI_MATCH(DMI_SYS_VENDOR,
367 ++ "SAMSUNG ELECTRONICS CO., LTD."),
368 ++ DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
369 ++ DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
370 ++ },
371 ++ .callback = dmi_check_cb,
372 ++ },
373 ++ {
374 + .ident = "N150/N210/N220/N230",
375 + .matches = {
376 + DMI_MATCH(DMI_SYS_VENDOR,
377 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
378 +index fc7e57b..53e7d72 100644
379 +--- a/fs/cifs/cifsfs.c
380 ++++ b/fs/cifs/cifsfs.c
381 +@@ -566,6 +566,12 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
382 + struct inode *dir = dentry->d_inode;
383 + struct dentry *child;
384 +
385 ++ if (!dir) {
386 ++ dput(dentry);
387 ++ dentry = ERR_PTR(-ENOENT);
388 ++ break;
389 ++ }
390 ++
391 + /* skip separators */
392 + while (*s == sep)
393 + s++;
394 +@@ -581,10 +587,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
395 + mutex_unlock(&dir->i_mutex);
396 + dput(dentry);
397 + dentry = child;
398 +- if (!dentry->d_inode) {
399 +- dput(dentry);
400 +- dentry = ERR_PTR(-ENOENT);
401 +- }
402 + } while (!IS_ERR(dentry));
403 + _FreeXid(xid);
404 + kfree(full_path);
405 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
406 +index 168a80f..5cb8614 100644
407 +--- a/fs/fuse/dev.c
408 ++++ b/fs/fuse/dev.c
409 +@@ -258,10 +258,14 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
410 + forget->forget_one.nlookup = nlookup;
411 +
412 + spin_lock(&fc->lock);
413 +- fc->forget_list_tail->next = forget;
414 +- fc->forget_list_tail = forget;
415 +- wake_up(&fc->waitq);
416 +- kill_fasync(&fc->fasync, SIGIO, POLL_IN);
417 ++ if (fc->connected) {
418 ++ fc->forget_list_tail->next = forget;
419 ++ fc->forget_list_tail = forget;
420 ++ wake_up(&fc->waitq);
421 ++ kill_fasync(&fc->fasync, SIGIO, POLL_IN);
422 ++ } else {
423 ++ kfree(forget);
424 ++ }
425 + spin_unlock(&fc->lock);
426 + }
427 +
428 +diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
429 +index d685752..4e7f64b 100644
430 +--- a/fs/hfsplus/hfsplus_fs.h
431 ++++ b/fs/hfsplus/hfsplus_fs.h
432 +@@ -13,6 +13,7 @@
433 + #include <linux/fs.h>
434 + #include <linux/mutex.h>
435 + #include <linux/buffer_head.h>
436 ++#include <linux/blkdev.h>
437 + #include "hfsplus_raw.h"
438 +
439 + #define DBG_BNODE_REFS 0x00000001
440 +@@ -110,7 +111,9 @@ struct hfsplus_vh;
441 + struct hfs_btree;
442 +
443 + struct hfsplus_sb_info {
444 ++ void *s_vhdr_buf;
445 + struct hfsplus_vh *s_vhdr;
446 ++ void *s_backup_vhdr_buf;
447 + struct hfsplus_vh *s_backup_vhdr;
448 + struct hfs_btree *ext_tree;
449 + struct hfs_btree *cat_tree;
450 +@@ -258,6 +261,15 @@ struct hfsplus_readdir_data {
451 + struct hfsplus_cat_key key;
452 + };
453 +
454 ++/*
455 ++ * Find minimum acceptible I/O size for an hfsplus sb.
456 ++ */
457 ++static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
458 ++{
459 ++ return max_t(unsigned short, bdev_logical_block_size(sb->s_bdev),
460 ++ HFSPLUS_SECTOR_SIZE);
461 ++}
462 ++
463 + #define hfs_btree_open hfsplus_btree_open
464 + #define hfs_btree_close hfsplus_btree_close
465 + #define hfs_btree_write hfsplus_btree_write
466 +@@ -436,8 +448,8 @@ int hfsplus_compare_dentry(const struct dentry *parent,
467 + /* wrapper.c */
468 + int hfsplus_read_wrapper(struct super_block *);
469 + int hfs_part_find(struct super_block *, sector_t *, sector_t *);
470 +-int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
471 +- void *data, int rw);
472 ++int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
473 ++ void *buf, void **data, int rw);
474 +
475 + /* time macros */
476 + #define __hfsp_mt2ut(t) (be32_to_cpu(t) - 2082844800U)
477 +diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
478 +index 40ad88c..eb355d8 100644
479 +--- a/fs/hfsplus/part_tbl.c
480 ++++ b/fs/hfsplus/part_tbl.c
481 +@@ -88,11 +88,12 @@ static int hfs_parse_old_pmap(struct super_block *sb, struct old_pmap *pm,
482 + return -ENOENT;
483 + }
484 +
485 +-static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
486 +- sector_t *part_start, sector_t *part_size)
487 ++static int hfs_parse_new_pmap(struct super_block *sb, void *buf,
488 ++ struct new_pmap *pm, sector_t *part_start, sector_t *part_size)
489 + {
490 + struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
491 + int size = be32_to_cpu(pm->pmMapBlkCnt);
492 ++ int buf_size = hfsplus_min_io_size(sb);
493 + int res;
494 + int i = 0;
495 +
496 +@@ -107,11 +108,14 @@ static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
497 + if (++i >= size)
498 + return -ENOENT;
499 +
500 +- res = hfsplus_submit_bio(sb->s_bdev,
501 +- *part_start + HFS_PMAP_BLK + i,
502 +- pm, READ);
503 +- if (res)
504 +- return res;
505 ++ pm = (struct new_pmap *)((u8 *)pm + HFSPLUS_SECTOR_SIZE);
506 ++ if ((u8 *)pm - (u8 *)buf >= buf_size) {
507 ++ res = hfsplus_submit_bio(sb,
508 ++ *part_start + HFS_PMAP_BLK + i,
509 ++ buf, (void **)&pm, READ);
510 ++ if (res)
511 ++ return res;
512 ++ }
513 + } while (pm->pmSig == cpu_to_be16(HFS_NEW_PMAP_MAGIC));
514 +
515 + return -ENOENT;
516 +@@ -124,15 +128,15 @@ static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
517 + int hfs_part_find(struct super_block *sb,
518 + sector_t *part_start, sector_t *part_size)
519 + {
520 +- void *data;
521 ++ void *buf, *data;
522 + int res;
523 +
524 +- data = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
525 +- if (!data)
526 ++ buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
527 ++ if (!buf)
528 + return -ENOMEM;
529 +
530 +- res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK,
531 +- data, READ);
532 ++ res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK,
533 ++ buf, &data, READ);
534 + if (res)
535 + goto out;
536 +
537 +@@ -141,13 +145,13 @@ int hfs_part_find(struct super_block *sb,
538 + res = hfs_parse_old_pmap(sb, data, part_start, part_size);
539 + break;
540 + case HFS_NEW_PMAP_MAGIC:
541 +- res = hfs_parse_new_pmap(sb, data, part_start, part_size);
542 ++ res = hfs_parse_new_pmap(sb, buf, data, part_start, part_size);
543 + break;
544 + default:
545 + res = -ENOENT;
546 + break;
547 + }
548 + out:
549 +- kfree(data);
550 ++ kfree(buf);
551 + return res;
552 + }
553 +diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
554 +index 84a47b7..c3a76fd 100644
555 +--- a/fs/hfsplus/super.c
556 ++++ b/fs/hfsplus/super.c
557 +@@ -197,17 +197,17 @@ int hfsplus_sync_fs(struct super_block *sb, int wait)
558 + write_backup = 1;
559 + }
560 +
561 +- error2 = hfsplus_submit_bio(sb->s_bdev,
562 ++ error2 = hfsplus_submit_bio(sb,
563 + sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
564 +- sbi->s_vhdr, WRITE_SYNC);
565 ++ sbi->s_vhdr_buf, NULL, WRITE_SYNC);
566 + if (!error)
567 + error = error2;
568 + if (!write_backup)
569 + goto out;
570 +
571 +- error2 = hfsplus_submit_bio(sb->s_bdev,
572 ++ error2 = hfsplus_submit_bio(sb,
573 + sbi->part_start + sbi->sect_count - 2,
574 +- sbi->s_backup_vhdr, WRITE_SYNC);
575 ++ sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC);
576 + if (!error)
577 + error2 = error;
578 + out:
579 +@@ -251,8 +251,8 @@ static void hfsplus_put_super(struct super_block *sb)
580 + hfs_btree_close(sbi->ext_tree);
581 + iput(sbi->alloc_file);
582 + iput(sbi->hidden_dir);
583 +- kfree(sbi->s_vhdr);
584 +- kfree(sbi->s_backup_vhdr);
585 ++ kfree(sbi->s_vhdr_buf);
586 ++ kfree(sbi->s_backup_vhdr_buf);
587 + unload_nls(sbi->nls);
588 + kfree(sb->s_fs_info);
589 + sb->s_fs_info = NULL;
590 +@@ -508,8 +508,8 @@ out_close_cat_tree:
591 + out_close_ext_tree:
592 + hfs_btree_close(sbi->ext_tree);
593 + out_free_vhdr:
594 +- kfree(sbi->s_vhdr);
595 +- kfree(sbi->s_backup_vhdr);
596 ++ kfree(sbi->s_vhdr_buf);
597 ++ kfree(sbi->s_backup_vhdr_buf);
598 + out_unload_nls:
599 + unload_nls(sbi->nls);
600 + unload_nls(nls);
601 +diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
602 +index 4ac88ff..7b8112d 100644
603 +--- a/fs/hfsplus/wrapper.c
604 ++++ b/fs/hfsplus/wrapper.c
605 +@@ -31,25 +31,67 @@ static void hfsplus_end_io_sync(struct bio *bio, int err)
606 + complete(bio->bi_private);
607 + }
608 +
609 +-int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
610 +- void *data, int rw)
611 ++/*
612 ++ * hfsplus_submit_bio - Perfrom block I/O
613 ++ * @sb: super block of volume for I/O
614 ++ * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
615 ++ * @buf: buffer for I/O
616 ++ * @data: output pointer for location of requested data
617 ++ * @rw: direction of I/O
618 ++ *
619 ++ * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
620 ++ * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
621 ++ * @data will return a pointer to the start of the requested sector,
622 ++ * which may not be the same location as @buf.
623 ++ *
624 ++ * If @sector is not aligned to the bdev logical block size it will
625 ++ * be rounded down. For writes this means that @buf should contain data
626 ++ * that starts at the rounded-down address. As long as the data was
627 ++ * read using hfsplus_submit_bio() and the same buffer is used things
628 ++ * will work correctly.
629 ++ */
630 ++int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
631 ++ void *buf, void **data, int rw)
632 + {
633 + DECLARE_COMPLETION_ONSTACK(wait);
634 + struct bio *bio;
635 + int ret = 0;
636 ++ unsigned int io_size;
637 ++ loff_t start;
638 ++ int offset;
639 ++
640 ++ /*
641 ++ * Align sector to hardware sector size and find offset. We
642 ++ * assume that io_size is a power of two, which _should_
643 ++ * be true.
644 ++ */
645 ++ io_size = hfsplus_min_io_size(sb);
646 ++ start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
647 ++ offset = start & (io_size - 1);
648 ++ sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
649 +
650 + bio = bio_alloc(GFP_NOIO, 1);
651 + bio->bi_sector = sector;
652 +- bio->bi_bdev = bdev;
653 ++ bio->bi_bdev = sb->s_bdev;
654 + bio->bi_end_io = hfsplus_end_io_sync;
655 + bio->bi_private = &wait;
656 +
657 +- /*
658 +- * We always submit one sector at a time, so bio_add_page must not fail.
659 +- */
660 +- if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
661 +- offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
662 +- BUG();
663 ++ if (!(rw & WRITE) && data)
664 ++ *data = (u8 *)buf + offset;
665 ++
666 ++ while (io_size > 0) {
667 ++ unsigned int page_offset = offset_in_page(buf);
668 ++ unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
669 ++ io_size);
670 ++
671 ++ ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
672 ++ if (ret != len) {
673 ++ ret = -EIO;
674 ++ goto out;
675 ++ }
676 ++ io_size -= len;
677 ++ buf = (u8 *)buf + len;
678 ++ }
679 +
680 + submit_bio(rw, bio);
681 + wait_for_completion(&wait);
682 +@@ -57,8 +99,9 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
683 + if (!bio_flagged(bio, BIO_UPTODATE))
684 + ret = -EIO;
685 +
686 ++out:
687 + bio_put(bio);
688 +- return ret;
689 ++ return ret < 0 ? ret : 0;
690 + }
691 +
692 + static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
693 +@@ -147,17 +190,17 @@ int hfsplus_read_wrapper(struct super_block *sb)
694 + }
695 +
696 + error = -ENOMEM;
697 +- sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
698 +- if (!sbi->s_vhdr)
699 ++ sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
700 ++ if (!sbi->s_vhdr_buf)
701 + goto out;
702 +- sbi->s_backup_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
703 +- if (!sbi->s_backup_vhdr)
704 ++ sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
705 ++ if (!sbi->s_backup_vhdr_buf)
706 + goto out_free_vhdr;
707 +
708 + reread:
709 +- error = hfsplus_submit_bio(sb->s_bdev,
710 +- part_start + HFSPLUS_VOLHEAD_SECTOR,
711 +- sbi->s_vhdr, READ);
712 ++ error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
713 ++ sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
714 ++ READ);
715 + if (error)
716 + goto out_free_backup_vhdr;
717 +
718 +@@ -186,9 +229,9 @@ reread:
719 + goto reread;
720 + }
721 +
722 +- error = hfsplus_submit_bio(sb->s_bdev,
723 +- part_start + part_size - 2,
724 +- sbi->s_backup_vhdr, READ);
725 ++ error = hfsplus_submit_bio(sb, part_start + part_size - 2,
726 ++ sbi->s_backup_vhdr_buf,
727 ++ (void **)&sbi->s_backup_vhdr, READ);
728 + if (error)
729 + goto out_free_backup_vhdr;
730 +
731 +@@ -232,9 +275,9 @@ reread:
732 + return 0;
733 +
734 + out_free_backup_vhdr:
735 +- kfree(sbi->s_backup_vhdr);
736 ++ kfree(sbi->s_backup_vhdr_buf);
737 + out_free_vhdr:
738 +- kfree(sbi->s_vhdr);
739 ++ kfree(sbi->s_vhdr_buf);
740 + out:
741 + return error;
742 + }
743 +diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
744 +index 8633521..8731516 100644
745 +--- a/fs/xfs/linux-2.6/xfs_linux.h
746 ++++ b/fs/xfs/linux-2.6/xfs_linux.h
747 +@@ -70,6 +70,8 @@
748 + #include <linux/ctype.h>
749 + #include <linux/writeback.h>
750 + #include <linux/capability.h>
751 ++#include <linux/kthread.h>
752 ++#include <linux/freezer.h>
753 + #include <linux/list_sort.h>
754 +
755 + #include <asm/page.h>
756 +diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
757 +index a1a881e..347cae9 100644
758 +--- a/fs/xfs/linux-2.6/xfs_super.c
759 ++++ b/fs/xfs/linux-2.6/xfs_super.c
760 +@@ -1412,37 +1412,35 @@ xfs_fs_fill_super(
761 + sb->s_time_gran = 1;
762 + set_posix_acl_flag(sb);
763 +
764 +- error = xfs_syncd_init(mp);
765 +- if (error)
766 +- goto out_filestream_unmount;
767 +-
768 + xfs_inode_shrinker_register(mp);
769 +
770 + error = xfs_mountfs(mp);
771 + if (error)
772 +- goto out_syncd_stop;
773 ++ goto out_filestream_unmount;
774 ++
775 ++ error = xfs_syncd_init(mp);
776 ++ if (error)
777 ++ goto out_unmount;
778 +
779 + root = igrab(VFS_I(mp->m_rootip));
780 + if (!root) {
781 + error = ENOENT;
782 +- goto fail_unmount;
783 ++ goto out_syncd_stop;
784 + }
785 + if (is_bad_inode(root)) {
786 + error = EINVAL;
787 +- goto fail_vnrele;
788 ++ goto out_syncd_stop;
789 + }
790 + sb->s_root = d_alloc_root(root);
791 + if (!sb->s_root) {
792 + error = ENOMEM;
793 +- goto fail_vnrele;
794 ++ goto out_iput;
795 + }
796 +
797 + return 0;
798 +
799 +- out_syncd_stop:
800 +- xfs_inode_shrinker_unregister(mp);
801 +- xfs_syncd_stop(mp);
802 + out_filestream_unmount:
803 ++ xfs_inode_shrinker_unregister(mp);
804 + xfs_filestream_unmount(mp);
805 + out_free_sb:
806 + xfs_freesb(mp);
807 +@@ -1456,17 +1454,12 @@ xfs_fs_fill_super(
808 + out:
809 + return -error;
810 +
811 +- fail_vnrele:
812 +- if (sb->s_root) {
813 +- dput(sb->s_root);
814 +- sb->s_root = NULL;
815 +- } else {
816 +- iput(root);
817 +- }
818 +-
819 +- fail_unmount:
820 +- xfs_inode_shrinker_unregister(mp);
821 ++ out_iput:
822 ++ iput(root);
823 ++ out_syncd_stop:
824 + xfs_syncd_stop(mp);
825 ++ out_unmount:
826 ++ xfs_inode_shrinker_unregister(mp);
827 +
828 + /*
829 + * Blow away any referenced inode in the filestreams cache.
830 +@@ -1667,24 +1660,13 @@ xfs_init_workqueues(void)
831 + */
832 + xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
833 + if (!xfs_syncd_wq)
834 +- goto out;
835 +-
836 +- xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
837 +- if (!xfs_ail_wq)
838 +- goto out_destroy_syncd;
839 +-
840 ++ return -ENOMEM;
841 + return 0;
842 +-
843 +-out_destroy_syncd:
844 +- destroy_workqueue(xfs_syncd_wq);
845 +-out:
846 +- return -ENOMEM;
847 + }
848 +
849 + STATIC void
850 + xfs_destroy_workqueues(void)
851 + {
852 +- destroy_workqueue(xfs_ail_wq);
853 + destroy_workqueue(xfs_syncd_wq);
854 + }
855 +
856 +diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
857 +index 9e0e2fa..8126fc2 100644
858 +--- a/fs/xfs/quota/xfs_dquot_item.c
859 ++++ b/fs/xfs/quota/xfs_dquot_item.c
860 +@@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
861 + * search the buffer cache can be a time consuming thing, and AIL lock is a
862 + * spinlock.
863 + */
864 +-STATIC void
865 ++STATIC bool
866 + xfs_qm_dquot_logitem_pushbuf(
867 + struct xfs_log_item *lip)
868 + {
869 + struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
870 + struct xfs_dquot *dqp = qlip->qli_dquot;
871 + struct xfs_buf *bp;
872 ++ bool ret = true;
873 +
874 + ASSERT(XFS_DQ_IS_LOCKED(dqp));
875 +
876 +@@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
877 + if (completion_done(&dqp->q_flush) ||
878 + !(lip->li_flags & XFS_LI_IN_AIL)) {
879 + xfs_dqunlock(dqp);
880 +- return;
881 ++ return true;
882 + }
883 +
884 + bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
885 + dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
886 + xfs_dqunlock(dqp);
887 + if (!bp)
888 +- return;
889 ++ return true;
890 + if (XFS_BUF_ISDELAYWRITE(bp))
891 + xfs_buf_delwri_promote(bp);
892 ++ if (XFS_BUF_ISPINNED(bp))
893 ++ ret = false;
894 + xfs_buf_relse(bp);
895 ++ return ret;
896 + }
897 +
898 + /*
899 +diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
900 +index 7b7e005..a7342e8 100644
901 +--- a/fs/xfs/xfs_buf_item.c
902 ++++ b/fs/xfs/xfs_buf_item.c
903 +@@ -632,7 +632,7 @@ xfs_buf_item_push(
904 + * the xfsbufd to get this buffer written. We have to unlock the buffer
905 + * to allow the xfsbufd to write it, too.
906 + */
907 +-STATIC void
908 ++STATIC bool
909 + xfs_buf_item_pushbuf(
910 + struct xfs_log_item *lip)
911 + {
912 +@@ -646,6 +646,7 @@ xfs_buf_item_pushbuf(
913 +
914 + xfs_buf_delwri_promote(bp);
915 + xfs_buf_relse(bp);
916 ++ return true;
917 + }
918 +
919 + STATIC void
920 +diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
921 +index b1e88d5..391044c 100644
922 +--- a/fs/xfs/xfs_inode_item.c
923 ++++ b/fs/xfs/xfs_inode_item.c
924 +@@ -713,13 +713,14 @@ xfs_inode_item_committed(
925 + * marked delayed write. If that's the case, we'll promote it and that will
926 + * allow the caller to write the buffer by triggering the xfsbufd to run.
927 + */
928 +-STATIC void
929 ++STATIC bool
930 + xfs_inode_item_pushbuf(
931 + struct xfs_log_item *lip)
932 + {
933 + struct xfs_inode_log_item *iip = INODE_ITEM(lip);
934 + struct xfs_inode *ip = iip->ili_inode;
935 + struct xfs_buf *bp;
936 ++ bool ret = true;
937 +
938 + ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
939 +
940 +@@ -730,7 +731,7 @@ xfs_inode_item_pushbuf(
941 + if (completion_done(&ip->i_flush) ||
942 + !(lip->li_flags & XFS_LI_IN_AIL)) {
943 + xfs_iunlock(ip, XFS_ILOCK_SHARED);
944 +- return;
945 ++ return true;
946 + }
947 +
948 + bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
949 +@@ -738,10 +739,13 @@ xfs_inode_item_pushbuf(
950 +
951 + xfs_iunlock(ip, XFS_ILOCK_SHARED);
952 + if (!bp)
953 +- return;
954 ++ return true;
955 + if (XFS_BUF_ISDELAYWRITE(bp))
956 + xfs_buf_delwri_promote(bp);
957 ++ if (XFS_BUF_ISPINNED(bp))
958 ++ ret = false;
959 + xfs_buf_relse(bp);
960 ++ return ret;
961 + }
962 +
963 + /*
964 +diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
965 +index c83f63b..efc147f 100644
966 +--- a/fs/xfs/xfs_trans.c
967 ++++ b/fs/xfs/xfs_trans.c
968 +@@ -1426,6 +1426,7 @@ xfs_trans_committed(
969 + static inline void
970 + xfs_log_item_batch_insert(
971 + struct xfs_ail *ailp,
972 ++ struct xfs_ail_cursor *cur,
973 + struct xfs_log_item **log_items,
974 + int nr_items,
975 + xfs_lsn_t commit_lsn)
976 +@@ -1434,7 +1435,7 @@ xfs_log_item_batch_insert(
977 +
978 + spin_lock(&ailp->xa_lock);
979 + /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
980 +- xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
981 ++ xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
982 +
983 + for (i = 0; i < nr_items; i++)
984 + IOP_UNPIN(log_items[i], 0);
985 +@@ -1452,6 +1453,13 @@ xfs_log_item_batch_insert(
986 + * as an iclog write error even though we haven't started any IO yet. Hence in
987 + * this case all we need to do is IOP_COMMITTED processing, followed by an
988 + * IOP_UNPIN(aborted) call.
989 ++ *
990 ++ * The AIL cursor is used to optimise the insert process. If commit_lsn is not
991 ++ * at the end of the AIL, the insert cursor avoids the need to walk
992 ++ * the AIL to find the insertion point on every xfs_log_item_batch_insert()
993 ++ * call. This saves a lot of needless list walking and is a net win, even
994 ++ * though it slightly increases that amount of AIL lock traffic to set it up
995 ++ * and tear it down.
996 + */
997 + void
998 + xfs_trans_committed_bulk(
999 +@@ -1463,8 +1471,13 @@ xfs_trans_committed_bulk(
1000 + #define LOG_ITEM_BATCH_SIZE 32
1001 + struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
1002 + struct xfs_log_vec *lv;
1003 ++ struct xfs_ail_cursor cur;
1004 + int i = 0;
1005 +
1006 ++ spin_lock(&ailp->xa_lock);
1007 ++ xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
1008 ++ spin_unlock(&ailp->xa_lock);
1009 ++
1010 + /* unpin all the log items */
1011 + for (lv = log_vector; lv; lv = lv->lv_next ) {
1012 + struct xfs_log_item *lip = lv->lv_item;
1013 +@@ -1493,7 +1506,9 @@ xfs_trans_committed_bulk(
1014 + /*
1015 + * Not a bulk update option due to unusual item_lsn.
1016 + * Push into AIL immediately, rechecking the lsn once
1017 +- * we have the ail lock. Then unpin the item.
1018 ++ * we have the ail lock. Then unpin the item. This does
1019 ++ * not affect the AIL cursor the bulk insert path is
1020 ++ * using.
1021 + */
1022 + spin_lock(&ailp->xa_lock);
1023 + if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
1024 +@@ -1507,7 +1522,7 @@ xfs_trans_committed_bulk(
1025 + /* Item is a candidate for bulk AIL insert. */
1026 + log_items[i++] = lv->lv_item;
1027 + if (i >= LOG_ITEM_BATCH_SIZE) {
1028 +- xfs_log_item_batch_insert(ailp, log_items,
1029 ++ xfs_log_item_batch_insert(ailp, &cur, log_items,
1030 + LOG_ITEM_BATCH_SIZE, commit_lsn);
1031 + i = 0;
1032 + }
1033 +@@ -1515,7 +1530,11 @@ xfs_trans_committed_bulk(
1034 +
1035 + /* make sure we insert the remainder! */
1036 + if (i)
1037 +- xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
1038 ++ xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
1039 ++
1040 ++ spin_lock(&ailp->xa_lock);
1041 ++ xfs_trans_ail_cursor_done(ailp, &cur);
1042 ++ spin_unlock(&ailp->xa_lock);
1043 + }
1044 +
1045 + /*
1046 +diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
1047 +index 06a9759..53597f4 100644
1048 +--- a/fs/xfs/xfs_trans.h
1049 ++++ b/fs/xfs/xfs_trans.h
1050 +@@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
1051 + void (*iop_unlock)(xfs_log_item_t *);
1052 + xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
1053 + void (*iop_push)(xfs_log_item_t *);
1054 +- void (*iop_pushbuf)(xfs_log_item_t *);
1055 ++ bool (*iop_pushbuf)(xfs_log_item_t *);
1056 + void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
1057 + } xfs_item_ops_t;
1058 +
1059 +diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
1060 +index 5fc2380..a4c281b 100644
1061 +--- a/fs/xfs/xfs_trans_ail.c
1062 ++++ b/fs/xfs/xfs_trans_ail.c
1063 +@@ -28,8 +28,6 @@
1064 + #include "xfs_trans_priv.h"
1065 + #include "xfs_error.h"
1066 +
1067 +-struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
1068 +-
1069 + #ifdef DEBUG
1070 + /*
1071 + * Check that the list is sorted as it should be.
1072 +@@ -272,9 +270,9 @@ xfs_trans_ail_cursor_clear(
1073 + }
1074 +
1075 + /*
1076 +- * Return the item in the AIL with the current lsn.
1077 +- * Return the current tree generation number for use
1078 +- * in calls to xfs_trans_next_ail().
1079 ++ * Initialise the cursor to the first item in the AIL with the given @lsn.
1080 ++ * This searches the list from lowest LSN to highest. Pass a @lsn of zero
1081 ++ * to initialise the cursor to the first item in the AIL.
1082 + */
1083 + xfs_log_item_t *
1084 + xfs_trans_ail_cursor_first(
1085 +@@ -300,31 +298,97 @@ out:
1086 + }
1087 +
1088 + /*
1089 +- * splice the log item list into the AIL at the given LSN.
1090 ++ * Initialise the cursor to the last item in the AIL with the given @lsn.
1091 ++ * This searches the list from highest LSN to lowest. If there is no item with
1092 ++ * the value of @lsn, then it sets the cursor to the last item with an LSN lower
1093 ++ * than @lsn.
1094 ++ */
1095 ++static struct xfs_log_item *
1096 ++__xfs_trans_ail_cursor_last(
1097 ++ struct xfs_ail *ailp,
1098 ++ xfs_lsn_t lsn)
1099 ++{
1100 ++ xfs_log_item_t *lip;
1101 ++
1102 ++ list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
1103 ++ if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
1104 ++ return lip;
1105 ++ }
1106 ++ return NULL;
1107 ++}
1108 ++
1109 ++/*
1110 ++ * Initialise the cursor to the last item in the AIL with the given @lsn.
1111 ++ * This searches the list from highest LSN to lowest.
1112 ++ */
1113 ++struct xfs_log_item *
1114 ++xfs_trans_ail_cursor_last(
1115 ++ struct xfs_ail *ailp,
1116 ++ struct xfs_ail_cursor *cur,
1117 ++ xfs_lsn_t lsn)
1118 ++{
1119 ++ xfs_trans_ail_cursor_init(ailp, cur);
1120 ++ cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
1121 ++ return cur->item;
1122 ++}
1123 ++
1124 ++/*
1125 ++ * splice the log item list into the AIL at the given LSN. We splice to the
1126 ++ * tail of the given LSN to maintain insert order for push traversals. The
1127 ++ * cursor is optional, allowing repeated updates to the same LSN to avoid
1128 ++ * repeated traversals.
1129 + */
1130 + static void
1131 + xfs_ail_splice(
1132 +- struct xfs_ail *ailp,
1133 +- struct list_head *list,
1134 +- xfs_lsn_t lsn)
1135 ++ struct xfs_ail *ailp,
1136 ++ struct xfs_ail_cursor *cur,
1137 ++ struct list_head *list,
1138 ++ xfs_lsn_t lsn)
1139 + {
1140 +- xfs_log_item_t *next_lip;
1141 ++ struct xfs_log_item *lip = cur ? cur->item : NULL;
1142 ++ struct xfs_log_item *next_lip;
1143 +
1144 +- /* If the list is empty, just insert the item. */
1145 +- if (list_empty(&ailp->xa_ail)) {
1146 +- list_splice(list, &ailp->xa_ail);
1147 +- return;
1148 ++ /*
1149 ++ * Get a new cursor if we don't have a placeholder or the existing one
1150 ++ * has been invalidated.
1151 ++ */
1152 ++ if (!lip || (__psint_t)lip & 1) {
1153 ++ lip = __xfs_trans_ail_cursor_last(ailp, lsn);
1154 ++
1155 ++ if (!lip) {
1156 ++ /* The list is empty, so just splice and return. */
1157 ++ if (cur)
1158 ++ cur->item = NULL;
1159 ++ list_splice(list, &ailp->xa_ail);
1160 ++ return;
1161 ++ }
1162 + }
1163 +
1164 +- list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
1165 +- if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
1166 +- break;
1167 ++ /*
1168 ++ * Our cursor points to the item we want to insert _after_, so we have
1169 ++ * to update the cursor to point to the end of the list we are splicing
1170 ++ * in so that it points to the correct location for the next splice.
1171 ++ * i.e. before the splice
1172 ++ *
1173 ++ * lsn -> lsn -> lsn + x -> lsn + x ...
1174 ++ * ^
1175 ++ * | cursor points here
1176 ++ *
1177 ++ * After the splice we have:
1178 ++ *
1179 ++ * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ...
1180 ++ * ^ ^
1181 ++ * | cursor points here | needs to move here
1182 ++ *
1183 ++ * So we set the cursor to the last item in the list to be spliced
1184 ++ * before we execute the splice, resulting in the cursor pointing to
1185 ++ * the correct item after the splice occurs.
1186 ++ */
1187 ++ if (cur) {
1188 ++ next_lip = list_entry(list->prev, struct xfs_log_item, li_ail);
1189 ++ cur->item = next_lip;
1190 + }
1191 +-
1192 +- ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
1193 +- XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
1194 +-
1195 +- list_splice_init(list, &next_lip->li_ail);
1196 ++ list_splice(list, &lip->li_ail);
1197 + }
1198 +
1199 + /*
1200 +@@ -340,16 +404,10 @@ xfs_ail_delete(
1201 + xfs_trans_ail_cursor_clear(ailp, lip);
1202 + }
1203 +
1204 +-/*
1205 +- * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
1206 +- * to run at a later time if there is more work to do to complete the push.
1207 +- */
1208 +-STATIC void
1209 +-xfs_ail_worker(
1210 +- struct work_struct *work)
1211 ++static long
1212 ++xfsaild_push(
1213 ++ struct xfs_ail *ailp)
1214 + {
1215 +- struct xfs_ail *ailp = container_of(to_delayed_work(work),
1216 +- struct xfs_ail, xa_work);
1217 + xfs_mount_t *mp = ailp->xa_mount;
1218 + struct xfs_ail_cursor *cur = &ailp->xa_cursors;
1219 + xfs_log_item_t *lip;
1220 +@@ -412,8 +470,13 @@ xfs_ail_worker(
1221 +
1222 + case XFS_ITEM_PUSHBUF:
1223 + XFS_STATS_INC(xs_push_ail_pushbuf);
1224 +- IOP_PUSHBUF(lip);
1225 +- ailp->xa_last_pushed_lsn = lsn;
1226 ++
1227 ++ if (!IOP_PUSHBUF(lip)) {
1228 ++ stuck++;
1229 ++ flush_log = 1;
1230 ++ } else {
1231 ++ ailp->xa_last_pushed_lsn = lsn;
1232 ++ }
1233 + push_xfsbufd = 1;
1234 + break;
1235 +
1236 +@@ -425,7 +488,6 @@ xfs_ail_worker(
1237 +
1238 + case XFS_ITEM_LOCKED:
1239 + XFS_STATS_INC(xs_push_ail_locked);
1240 +- ailp->xa_last_pushed_lsn = lsn;
1241 + stuck++;
1242 + break;
1243 +
1244 +@@ -486,20 +548,6 @@ out_done:
1245 + /* We're past our target or empty, so idle */
1246 + ailp->xa_last_pushed_lsn = 0;
1247 +
1248 +- /*
1249 +- * We clear the XFS_AIL_PUSHING_BIT first before checking
1250 +- * whether the target has changed. If the target has changed,
1251 +- * this pushes the requeue race directly onto the result of the
1252 +- * atomic test/set bit, so we are guaranteed that either the
1253 +- * the pusher that changed the target or ourselves will requeue
1254 +- * the work (but not both).
1255 +- */
1256 +- clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
1257 +- smp_rmb();
1258 +- if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
1259 +- test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
1260 +- return;
1261 +-
1262 + tout = 50;
1263 + } else if (XFS_LSN_CMP(lsn, target) >= 0) {
1264 + /*
1265 +@@ -522,9 +570,30 @@ out_done:
1266 + tout = 20;
1267 + }
1268 +
1269 +- /* There is more to do, requeue us. */
1270 +- queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
1271 +- msecs_to_jiffies(tout));
1272 ++ return tout;
1273 ++}
1274 ++
1275 ++static int
1276 ++xfsaild(
1277 ++ void *data)
1278 ++{
1279 ++ struct xfs_ail *ailp = data;
1280 ++ long tout = 0; /* milliseconds */
1281 ++
1282 ++ while (!kthread_should_stop()) {
1283 ++ if (tout && tout <= 20)
1284 ++ __set_current_state(TASK_KILLABLE);
1285 ++ else
1286 ++ __set_current_state(TASK_INTERRUPTIBLE);
1287 ++ schedule_timeout(tout ?
1288 ++ msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
1289 ++
1290 ++ try_to_freeze();
1291 ++
1292 ++ tout = xfsaild_push(ailp);
1293 ++ }
1294 ++
1295 ++ return 0;
1296 + }
1297 +
1298 + /*
1299 +@@ -559,8 +628,9 @@ xfs_ail_push(
1300 + */
1301 + smp_wmb();
1302 + xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
1303 +- if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
1304 +- queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
1305 ++ smp_wmb();
1306 ++
1307 ++ wake_up_process(ailp->xa_task);
1308 + }
1309 +
1310 + /*
1311 +@@ -645,6 +715,7 @@ xfs_trans_unlocked_item(
1312 + void
1313 + xfs_trans_ail_update_bulk(
1314 + struct xfs_ail *ailp,
1315 ++ struct xfs_ail_cursor *cur,
1316 + struct xfs_log_item **log_items,
1317 + int nr_items,
1318 + xfs_lsn_t lsn) __releases(ailp->xa_lock)
1319 +@@ -674,7 +745,7 @@ xfs_trans_ail_update_bulk(
1320 + list_add(&lip->li_ail, &tmp);
1321 + }
1322 +
1323 +- xfs_ail_splice(ailp, &tmp, lsn);
1324 ++ xfs_ail_splice(ailp, cur, &tmp, lsn);
1325 +
1326 + if (!mlip_changed) {
1327 + spin_unlock(&ailp->xa_lock);
1328 +@@ -794,9 +865,18 @@ xfs_trans_ail_init(
1329 + ailp->xa_mount = mp;
1330 + INIT_LIST_HEAD(&ailp->xa_ail);
1331 + spin_lock_init(&ailp->xa_lock);
1332 +- INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
1333 ++
1334 ++ ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
1335 ++ ailp->xa_mount->m_fsname);
1336 ++ if (IS_ERR(ailp->xa_task))
1337 ++ goto out_free_ailp;
1338 ++
1339 + mp->m_ail = ailp;
1340 + return 0;
1341 ++
1342 ++out_free_ailp:
1343 ++ kmem_free(ailp);
1344 ++ return ENOMEM;
1345 + }
1346 +
1347 + void
1348 +@@ -805,6 +885,6 @@ xfs_trans_ail_destroy(
1349 + {
1350 + struct xfs_ail *ailp = mp->m_ail;
1351 +
1352 +- cancel_delayed_work_sync(&ailp->xa_work);
1353 ++ kthread_stop(ailp->xa_task);
1354 + kmem_free(ailp);
1355 + }
1356 +diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
1357 +index 6b164e9..fe2e3cb 100644
1358 +--- a/fs/xfs/xfs_trans_priv.h
1359 ++++ b/fs/xfs/xfs_trans_priv.h
1360 +@@ -64,24 +64,19 @@ struct xfs_ail_cursor {
1361 + */
1362 + struct xfs_ail {
1363 + struct xfs_mount *xa_mount;
1364 ++ struct task_struct *xa_task;
1365 + struct list_head xa_ail;
1366 + xfs_lsn_t xa_target;
1367 + struct xfs_ail_cursor xa_cursors;
1368 + spinlock_t xa_lock;
1369 +- struct delayed_work xa_work;
1370 + xfs_lsn_t xa_last_pushed_lsn;
1371 +- unsigned long xa_flags;
1372 + };
1373 +
1374 +-#define XFS_AIL_PUSHING_BIT 0
1375 +-
1376 + /*
1377 + * From xfs_trans_ail.c
1378 + */
1379 +-
1380 +-extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
1381 +-
1382 + void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
1383 ++ struct xfs_ail_cursor *cur,
1384 + struct xfs_log_item **log_items, int nr_items,
1385 + xfs_lsn_t lsn) __releases(ailp->xa_lock);
1386 + static inline void
1387 +@@ -90,7 +85,7 @@ xfs_trans_ail_update(
1388 + struct xfs_log_item *lip,
1389 + xfs_lsn_t lsn) __releases(ailp->xa_lock)
1390 + {
1391 +- xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
1392 ++ xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
1393 + }
1394 +
1395 + void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
1396 +@@ -111,10 +106,13 @@ xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
1397 + void xfs_trans_unlocked_item(struct xfs_ail *,
1398 + xfs_log_item_t *);
1399 +
1400 +-struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
1401 ++struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
1402 ++ struct xfs_ail_cursor *cur,
1403 ++ xfs_lsn_t lsn);
1404 ++struct xfs_log_item * xfs_trans_ail_cursor_last(struct xfs_ail *ailp,
1405 + struct xfs_ail_cursor *cur,
1406 + xfs_lsn_t lsn);
1407 +-struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
1408 ++struct xfs_log_item * xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
1409 + struct xfs_ail_cursor *cur);
1410 + void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
1411 + struct xfs_ail_cursor *cur);
1412 +diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
1413 +index c8008dd..640ded8 100644
1414 +--- a/kernel/posix-cpu-timers.c
1415 ++++ b/kernel/posix-cpu-timers.c
1416 +@@ -274,9 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
1417 + struct task_cputime sum;
1418 + unsigned long flags;
1419 +
1420 +- spin_lock_irqsave(&cputimer->lock, flags);
1421 + if (!cputimer->running) {
1422 +- cputimer->running = 1;
1423 + /*
1424 + * The POSIX timer interface allows for absolute time expiry
1425 + * values through the TIMER_ABSTIME flag, therefore we have
1426 +@@ -284,8 +282,11 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
1427 + * it.
1428 + */
1429 + thread_group_cputime(tsk, &sum);
1430 ++ spin_lock_irqsave(&cputimer->lock, flags);
1431 ++ cputimer->running = 1;
1432 + update_gt_cputime(&cputimer->cputime, &sum);
1433 +- }
1434 ++ } else
1435 ++ spin_lock_irqsave(&cputimer->lock, flags);
1436 + *times = cputimer->cputime;
1437 + spin_unlock_irqrestore(&cputimer->lock, flags);
1438 + }
1439 +diff --git a/kernel/sys.c b/kernel/sys.c
1440 +index 5c942cf..f88dadc 100644
1441 +--- a/kernel/sys.c
1442 ++++ b/kernel/sys.c
1443 +@@ -1135,7 +1135,7 @@ DECLARE_RWSEM(uts_sem);
1444 + static int override_release(char __user *release, int len)
1445 + {
1446 + int ret = 0;
1447 +- char buf[len];
1448 ++ char buf[65];
1449 +
1450 + if (current->personality & UNAME26) {
1451 + char *rest = UTS_RELEASE;
1452 +diff --git a/mm/migrate.c b/mm/migrate.c
1453 +index 666e4e6..14d0a6a 100644
1454 +--- a/mm/migrate.c
1455 ++++ b/mm/migrate.c
1456 +@@ -120,10 +120,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
1457 +
1458 + ptep = pte_offset_map(pmd, addr);
1459 +
1460 +- if (!is_swap_pte(*ptep)) {
1461 +- pte_unmap(ptep);
1462 +- goto out;
1463 +- }
1464 ++ /*
1465 ++ * Peek to check is_swap_pte() before taking ptlock? No, we
1466 ++ * can race mremap's move_ptes(), which skips anon_vma lock.
1467 ++ */
1468 +
1469 + ptl = pte_lockptr(mm, pmd);
1470 + }
1471 +diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
1472 +index 4680b1e..373e14f 100644
1473 +--- a/net/x25/af_x25.c
1474 ++++ b/net/x25/af_x25.c
1475 +@@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr,
1476 + * Found a listening socket, now check the incoming
1477 + * call user data vs this sockets call user data
1478 + */
1479 +- if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) {
1480 ++ if (x25_sk(s)->cudmatchlength > 0 &&
1481 ++ skb->len >= x25_sk(s)->cudmatchlength) {
1482 + if((memcmp(x25_sk(s)->calluserdata.cuddata,
1483 + skb->data,
1484 + x25_sk(s)->cudmatchlength)) == 0) {
1485 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1486 +index 486f6de..981b6fd 100644
1487 +--- a/sound/pci/hda/hda_intel.c
1488 ++++ b/sound/pci/hda/hda_intel.c
1489 +@@ -2352,6 +2352,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
1490 + SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
1491 + SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
1492 + SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
1493 ++ SND_PCI_QUIRK(0x1028, 0x02c6, "Dell Inspiron 1010", POS_FIX_LPIB),
1494 + SND_PCI_QUIRK(0x1028, 0x0470, "Dell Inspiron 1120", POS_FIX_LPIB),
1495 + SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
1496 + SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
1497 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
1498 +index 7bbc5f2..cf1fa36 100644
1499 +--- a/sound/pci/hda/patch_conexant.c
1500 ++++ b/sound/pci/hda/patch_conexant.c
1501 +@@ -3097,6 +3097,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
1502 + SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
1503 + SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
1504 + SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
1505 ++ SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
1506 + SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
1507 + SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
1508 + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),