Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1556 - genpatches-2.6/trunk/2.6.29
Date: Sat, 09 May 2009 01:04:21
Message-Id: E1M2az8-0003qR-J6@stork.gentoo.org
1 Author: mpagano
2 Date: 2009-05-09 01:04:14 +0000 (Sat, 09 May 2009)
3 New Revision: 1556
4
5 Added:
6 genpatches-2.6/trunk/2.6.29/1002_linux-2.6.29.3.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.29/0000_README
9 Log:
10 Linux patch version 2.6.29.3.
11
12 Modified: genpatches-2.6/trunk/2.6.29/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.29/0000_README 2009-05-05 15:31:02 UTC (rev 1555)
15 +++ genpatches-2.6/trunk/2.6.29/0000_README 2009-05-09 01:04:14 UTC (rev 1556)
16 @@ -47,6 +47,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.29.2
19
20 +Patch: 1002_linux-2.6.29.3.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.29.3
23 +
24 Patch: 1915_ext4-automatically-allocate-delay-allocated-blocks-on-rename.patch
25 From: Theodore Ts'o <tytso@×××.edu>
26 Desc: ext4: Automatically allocate delay allocated blocks on rename
27
28 Added: genpatches-2.6/trunk/2.6.29/1002_linux-2.6.29.3.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/2.6.29/1002_linux-2.6.29.3.patch (rev 0)
31 +++ genpatches-2.6/trunk/2.6.29/1002_linux-2.6.29.3.patch 2009-05-09 01:04:14 UTC (rev 1556)
32 @@ -0,0 +1,2934 @@
33 +diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
34 +index d346649..9eed29e 100644
35 +--- a/arch/powerpc/include/asm/processor.h
36 ++++ b/arch/powerpc/include/asm/processor.h
37 +@@ -313,6 +313,25 @@ static inline void prefetchw(const void *x)
38 + #define HAVE_ARCH_PICK_MMAP_LAYOUT
39 + #endif
40 +
41 ++#ifdef CONFIG_PPC64
42 ++static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
43 ++{
44 ++ unsigned long sp;
45 ++
46 ++ if (is_32)
47 ++ sp = regs->gpr[1] & 0x0ffffffffUL;
48 ++ else
49 ++ sp = regs->gpr[1];
50 ++
51 ++ return sp;
52 ++}
53 ++#else
54 ++static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
55 ++{
56 ++ return regs->gpr[1];
57 ++}
58 ++#endif
59 ++
60 + #endif /* __KERNEL__ */
61 + #endif /* __ASSEMBLY__ */
62 + #endif /* _ASM_POWERPC_PROCESSOR_H */
63 +diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
64 +index a54405e..00b5078 100644
65 +--- a/arch/powerpc/kernel/signal.c
66 ++++ b/arch/powerpc/kernel/signal.c
67 +@@ -26,12 +26,12 @@ int show_unhandled_signals = 0;
68 + * Allocate space for the signal frame
69 + */
70 + void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
71 +- size_t frame_size)
72 ++ size_t frame_size, int is_32)
73 + {
74 + unsigned long oldsp, newsp;
75 +
76 + /* Default to using normal stack */
77 +- oldsp = regs->gpr[1];
78 ++ oldsp = get_clean_sp(regs, is_32);
79 +
80 + /* Check for alt stack */
81 + if ((ka->sa.sa_flags & SA_ONSTACK) &&
82 +diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
83 +index b427bf8..95e1b14 100644
84 +--- a/arch/powerpc/kernel/signal.h
85 ++++ b/arch/powerpc/kernel/signal.h
86 +@@ -15,7 +15,7 @@
87 + extern void do_signal(struct pt_regs *regs, unsigned long thread_info_flags);
88 +
89 + extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
90 +- size_t frame_size);
91 ++ size_t frame_size, int is_32);
92 + extern void restore_sigmask(sigset_t *set);
93 +
94 + extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
95 +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
96 +index b13abf3..d670429 100644
97 +--- a/arch/powerpc/kernel/signal_32.c
98 ++++ b/arch/powerpc/kernel/signal_32.c
99 +@@ -836,7 +836,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
100 +
101 + /* Set up Signal Frame */
102 + /* Put a Real Time Context onto stack */
103 +- rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf));
104 ++ rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
105 + addr = rt_sf;
106 + if (unlikely(rt_sf == NULL))
107 + goto badframe;
108 +@@ -1182,7 +1182,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
109 + unsigned long newsp = 0;
110 +
111 + /* Set up Signal Frame */
112 +- frame = get_sigframe(ka, regs, sizeof(*frame));
113 ++ frame = get_sigframe(ka, regs, sizeof(*frame), 1);
114 + if (unlikely(frame == NULL))
115 + goto badframe;
116 + sc = (struct sigcontext __user *) &frame->sctx;
117 +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
118 +index e132891..2fe6fc6 100644
119 +--- a/arch/powerpc/kernel/signal_64.c
120 ++++ b/arch/powerpc/kernel/signal_64.c
121 +@@ -402,7 +402,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
122 + unsigned long newsp = 0;
123 + long err = 0;
124 +
125 +- frame = get_sigframe(ka, regs, sizeof(*frame));
126 ++ frame = get_sigframe(ka, regs, sizeof(*frame), 0);
127 + if (unlikely(frame == NULL))
128 + goto badframe;
129 +
130 +diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
131 +index 2b54fe0..aa8bc45 100644
132 +--- a/arch/x86/kernel/xsave.c
133 ++++ b/arch/x86/kernel/xsave.c
134 +@@ -89,7 +89,7 @@ int save_i387_xstate(void __user *buf)
135 +
136 + if (!used_math())
137 + return 0;
138 +- clear_used_math(); /* trigger finit */
139 ++
140 + if (task_thread_info(tsk)->status & TS_USEDFPU) {
141 + /*
142 + * Start with clearing the user buffer. This will present a
143 +@@ -114,6 +114,8 @@ int save_i387_xstate(void __user *buf)
144 + return -1;
145 + }
146 +
147 ++ clear_used_math(); /* trigger finit */
148 ++
149 + if (task_thread_info(tsk)->status & TS_XSAVE) {
150 + struct _fpstate __user *fx = buf;
151 + struct _xstate __user *x = buf;
152 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
153 +index 2d4477c..8005da2 100644
154 +--- a/arch/x86/kvm/mmu.c
155 ++++ b/arch/x86/kvm/mmu.c
156 +@@ -797,7 +797,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
157 + ASSERT(is_empty_shadow_page(sp->spt));
158 + bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
159 + sp->multimapped = 0;
160 +- sp->global = 1;
161 ++ sp->global = 0;
162 + sp->parent_pte = parent_pte;
163 + --vcpu->kvm->arch.n_free_mmu_pages;
164 + return sp;
165 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
166 +index 758b7a1..425423e 100644
167 +--- a/arch/x86/kvm/x86.c
168 ++++ b/arch/x86/kvm/x86.c
169 +@@ -3962,6 +3962,11 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
170 +
171 + void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
172 + {
173 ++ if (vcpu->arch.time_page) {
174 ++ kvm_release_page_dirty(vcpu->arch.time_page);
175 ++ vcpu->arch.time_page = NULL;
176 ++ }
177 ++
178 + kvm_x86_ops->vcpu_free(vcpu);
179 + }
180 +
181 +diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
182 +index 6a518dd..4a68571 100644
183 +--- a/arch/x86/mm/kmmio.c
184 ++++ b/arch/x86/mm/kmmio.c
185 +@@ -87,7 +87,7 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
186 + {
187 + struct kmmio_probe *p;
188 + list_for_each_entry_rcu(p, &kmmio_probes, list) {
189 +- if (addr >= p->addr && addr <= (p->addr + p->len))
190 ++ if (addr >= p->addr && addr < (p->addr + p->len))
191 + return p;
192 + }
193 + return NULL;
194 +diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
195 +index 89bf924..9136946 100644
196 +--- a/arch/x86/pci/mmconfig-shared.c
197 ++++ b/arch/x86/pci/mmconfig-shared.c
198 +@@ -254,7 +254,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
199 + if (!fixmem32)
200 + return AE_OK;
201 + if ((mcfg_res->start >= fixmem32->address) &&
202 +- (mcfg_res->end < (fixmem32->address +
203 ++ (mcfg_res->end <= (fixmem32->address +
204 + fixmem32->address_length))) {
205 + mcfg_res->flags = 1;
206 + return AE_CTRL_TERMINATE;
207 +@@ -271,7 +271,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
208 + return AE_OK;
209 +
210 + if ((mcfg_res->start >= address.minimum) &&
211 +- (mcfg_res->end < (address.minimum + address.address_length))) {
212 ++ (mcfg_res->end <= (address.minimum + address.address_length))) {
213 + mcfg_res->flags = 1;
214 + return AE_CTRL_TERMINATE;
215 + }
216 +@@ -318,7 +318,7 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
217 + u64 old_size = size;
218 + int valid = 0;
219 +
220 +- while (!is_reserved(addr, addr + size - 1, E820_RESERVED)) {
221 ++ while (!is_reserved(addr, addr + size, E820_RESERVED)) {
222 + size >>= 1;
223 + if (size < (16UL<<20))
224 + break;
225 +diff --git a/block/genhd.c b/block/genhd.c
226 +index a9ec910..1a4916e 100644
227 +--- a/block/genhd.c
228 ++++ b/block/genhd.c
229 +@@ -98,7 +98,7 @@ void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
230 +
231 + if (flags & DISK_PITER_REVERSE)
232 + piter->idx = ptbl->len - 1;
233 +- else if (flags & DISK_PITER_INCL_PART0)
234 ++ else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
235 + piter->idx = 0;
236 + else
237 + piter->idx = 1;
238 +@@ -134,7 +134,8 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
239 + /* determine iteration parameters */
240 + if (piter->flags & DISK_PITER_REVERSE) {
241 + inc = -1;
242 +- if (piter->flags & DISK_PITER_INCL_PART0)
243 ++ if (piter->flags & (DISK_PITER_INCL_PART0 |
244 ++ DISK_PITER_INCL_EMPTY_PART0))
245 + end = -1;
246 + else
247 + end = 0;
248 +@@ -150,7 +151,10 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
249 + part = rcu_dereference(ptbl->part[piter->idx]);
250 + if (!part)
251 + continue;
252 +- if (!(piter->flags & DISK_PITER_INCL_EMPTY) && !part->nr_sects)
253 ++ if (!part->nr_sects &&
254 ++ !(piter->flags & DISK_PITER_INCL_EMPTY) &&
255 ++ !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
256 ++ piter->idx == 0))
257 + continue;
258 +
259 + get_device(part_to_dev(part));
260 +@@ -1011,7 +1015,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
261 + "\n\n");
262 + */
263 +
264 +- disk_part_iter_init(&piter, gp, DISK_PITER_INCL_PART0);
265 ++ disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
266 + while ((hd = disk_part_iter_next(&piter))) {
267 + cpu = part_stat_lock();
268 + part_round_stats(cpu, hd);
269 +diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
270 +index 61566b1..2b60413 100644
271 +--- a/drivers/acpi/acpica/rscreate.c
272 ++++ b/drivers/acpi/acpica/rscreate.c
273 +@@ -191,8 +191,6 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
274 + user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer);
275 +
276 + for (index = 0; index < number_of_elements; index++) {
277 +- int source_name_index = 2;
278 +- int source_index_index = 3;
279 +
280 + /*
281 + * Point user_prt past this current structure
282 +@@ -261,27 +259,6 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
283 + return_ACPI_STATUS(AE_BAD_DATA);
284 + }
285 +
286 +- /*
287 +- * If BIOS erroneously reversed the _PRT source_name and source_index,
288 +- * then reverse them back.
289 +- */
290 +- if (ACPI_GET_OBJECT_TYPE(sub_object_list[3]) !=
291 +- ACPI_TYPE_INTEGER) {
292 +- if (acpi_gbl_enable_interpreter_slack) {
293 +- source_name_index = 3;
294 +- source_index_index = 2;
295 +- printk(KERN_WARNING
296 +- "ACPI: Handling Garbled _PRT entry\n");
297 +- } else {
298 +- ACPI_ERROR((AE_INFO,
299 +- "(PRT[%X].source_index) Need Integer, found %s",
300 +- index,
301 +- acpi_ut_get_object_type_name
302 +- (sub_object_list[3])));
303 +- return_ACPI_STATUS(AE_BAD_DATA);
304 +- }
305 +- }
306 +-
307 + user_prt->pin = (u32) obj_desc->integer.value;
308 +
309 + /*
310 +@@ -305,7 +282,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
311 + * 3) Third subobject: Dereference the PRT.source_name
312 + * The name may be unresolved (slack mode), so allow a null object
313 + */
314 +- obj_desc = sub_object_list[source_name_index];
315 ++ obj_desc = sub_object_list[2];
316 + if (obj_desc) {
317 + switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
318 + case ACPI_TYPE_LOCAL_REFERENCE:
319 +@@ -379,7 +356,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
320 +
321 + /* 4) Fourth subobject: Dereference the PRT.source_index */
322 +
323 +- obj_desc = sub_object_list[source_index_index];
324 ++ obj_desc = sub_object_list[3];
325 + if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
326 + ACPI_ERROR((AE_INFO,
327 + "(PRT[%X].SourceIndex) Need Integer, found %s",
328 +diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
329 +index d0e563e..86e83f8 100644
330 +--- a/drivers/char/hw_random/virtio-rng.c
331 ++++ b/drivers/char/hw_random/virtio-rng.c
332 +@@ -37,9 +37,9 @@ static void random_recv_done(struct virtqueue *vq)
333 + {
334 + int len;
335 +
336 +- /* We never get spurious callbacks. */
337 ++ /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
338 + if (!vq->vq_ops->get_buf(vq, &len))
339 +- BUG();
340 ++ return;
341 +
342 + data_left = len / sizeof(random_data[0]);
343 + complete(&have_data);
344 +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
345 +index d9e751b..af9761c 100644
346 +--- a/drivers/crypto/ixp4xx_crypto.c
347 ++++ b/drivers/crypto/ixp4xx_crypto.c
348 +@@ -101,6 +101,7 @@ struct buffer_desc {
349 + u32 phys_addr;
350 + u32 __reserved[4];
351 + struct buffer_desc *next;
352 ++ enum dma_data_direction dir;
353 + };
354 +
355 + struct crypt_ctl {
356 +@@ -132,14 +133,10 @@ struct crypt_ctl {
357 + struct ablk_ctx {
358 + struct buffer_desc *src;
359 + struct buffer_desc *dst;
360 +- unsigned src_nents;
361 +- unsigned dst_nents;
362 + };
363 +
364 + struct aead_ctx {
365 + struct buffer_desc *buffer;
366 +- unsigned short assoc_nents;
367 +- unsigned short src_nents;
368 + struct scatterlist ivlist;
369 + /* used when the hmac is not on one sg entry */
370 + u8 *hmac_virt;
371 +@@ -312,7 +309,7 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
372 + }
373 + }
374 +
375 +-static void free_buf_chain(struct buffer_desc *buf, u32 phys)
376 ++static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
377 + {
378 + while (buf) {
379 + struct buffer_desc *buf1;
380 +@@ -320,6 +317,7 @@ static void free_buf_chain(struct buffer_desc *buf, u32 phys)
381 +
382 + buf1 = buf->next;
383 + phys1 = buf->phys_next;
384 ++ dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
385 + dma_pool_free(buffer_pool, buf, phys);
386 + buf = buf1;
387 + phys = phys1;
388 +@@ -348,7 +346,6 @@ static void one_packet(dma_addr_t phys)
389 + struct crypt_ctl *crypt;
390 + struct ixp_ctx *ctx;
391 + int failed;
392 +- enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
393 +
394 + failed = phys & 0x1 ? -EBADMSG : 0;
395 + phys &= ~0x3;
396 +@@ -358,13 +355,8 @@ static void one_packet(dma_addr_t phys)
397 + case CTL_FLAG_PERFORM_AEAD: {
398 + struct aead_request *req = crypt->data.aead_req;
399 + struct aead_ctx *req_ctx = aead_request_ctx(req);
400 +- dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
401 +- DMA_TO_DEVICE);
402 +- dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
403 +- dma_unmap_sg(dev, req->src, req_ctx->src_nents,
404 +- DMA_BIDIRECTIONAL);
405 +
406 +- free_buf_chain(req_ctx->buffer, crypt->src_buf);
407 ++ free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
408 + if (req_ctx->hmac_virt) {
409 + finish_scattered_hmac(crypt);
410 + }
411 +@@ -374,16 +366,11 @@ static void one_packet(dma_addr_t phys)
412 + case CTL_FLAG_PERFORM_ABLK: {
413 + struct ablkcipher_request *req = crypt->data.ablk_req;
414 + struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
415 +- int nents;
416 ++
417 + if (req_ctx->dst) {
418 +- nents = req_ctx->dst_nents;
419 +- dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
420 +- free_buf_chain(req_ctx->dst, crypt->dst_buf);
421 +- src_direction = DMA_TO_DEVICE;
422 ++ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
423 + }
424 +- nents = req_ctx->src_nents;
425 +- dma_unmap_sg(dev, req->src, nents, src_direction);
426 +- free_buf_chain(req_ctx->src, crypt->src_buf);
427 ++ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
428 + req->base.complete(&req->base, failed);
429 + break;
430 + }
431 +@@ -750,56 +737,35 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
432 + return 0;
433 + }
434 +
435 +-static int count_sg(struct scatterlist *sg, int nbytes)
436 ++static struct buffer_desc *chainup_buffers(struct device *dev,
437 ++ struct scatterlist *sg, unsigned nbytes,
438 ++ struct buffer_desc *buf, gfp_t flags,
439 ++ enum dma_data_direction dir)
440 + {
441 +- int i;
442 +- for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
443 +- nbytes -= sg->length;
444 +- return i;
445 +-}
446 +-
447 +-static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
448 +- unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
449 +-{
450 +- int nents = 0;
451 +-
452 +- while (nbytes > 0) {
453 ++ for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
454 ++ unsigned len = min(nbytes, sg->length);
455 + struct buffer_desc *next_buf;
456 + u32 next_buf_phys;
457 +- unsigned len = min(nbytes, sg_dma_len(sg));
458 ++ void *ptr;
459 +
460 +- nents++;
461 + nbytes -= len;
462 +- if (!buf->phys_addr) {
463 +- buf->phys_addr = sg_dma_address(sg);
464 +- buf->buf_len = len;
465 +- buf->next = NULL;
466 +- buf->phys_next = 0;
467 +- goto next;
468 +- }
469 +- /* Two consecutive chunks on one page may be handled by the old
470 +- * buffer descriptor, increased by the length of the new one
471 +- */
472 +- if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
473 +- buf->buf_len += len;
474 +- goto next;
475 +- }
476 ++ ptr = page_address(sg_page(sg)) + sg->offset;
477 + next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
478 +- if (!next_buf)
479 +- return NULL;
480 ++ if (!next_buf) {
481 ++ buf = NULL;
482 ++ break;
483 ++ }
484 ++ sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
485 + buf->next = next_buf;
486 + buf->phys_next = next_buf_phys;
487 +-
488 + buf = next_buf;
489 +- buf->next = NULL;
490 +- buf->phys_next = 0;
491 ++
492 + buf->phys_addr = sg_dma_address(sg);
493 + buf->buf_len = len;
494 +-next:
495 +- if (nbytes > 0) {
496 +- sg = sg_next(sg);
497 +- }
498 ++ buf->dir = dir;
499 + }
500 ++ buf->next = NULL;
501 ++ buf->phys_next = 0;
502 + return buf;
503 + }
504 +
505 +@@ -860,12 +826,12 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
506 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
507 + struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
508 + unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
509 +- int ret = -ENOMEM;
510 + struct ix_sa_dir *dir;
511 + struct crypt_ctl *crypt;
512 +- unsigned int nbytes = req->nbytes, nents;
513 ++ unsigned int nbytes = req->nbytes;
514 + enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
515 + struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
516 ++ struct buffer_desc src_hook;
517 + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
518 + GFP_KERNEL : GFP_ATOMIC;
519 +
520 +@@ -878,7 +844,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
521 +
522 + crypt = get_crypt_desc();
523 + if (!crypt)
524 +- return ret;
525 ++ return -ENOMEM;
526 +
527 + crypt->data.ablk_req = req;
528 + crypt->crypto_ctx = dir->npe_ctx_phys;
529 +@@ -891,53 +857,41 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
530 + BUG_ON(ivsize && !req->info);
531 + memcpy(crypt->iv, req->info, ivsize);
532 + if (req->src != req->dst) {
533 ++ struct buffer_desc dst_hook;
534 + crypt->mode |= NPE_OP_NOT_IN_PLACE;
535 +- nents = count_sg(req->dst, nbytes);
536 + /* This was never tested by Intel
537 + * for more than one dst buffer, I think. */
538 +- BUG_ON(nents != 1);
539 +- req_ctx->dst_nents = nents;
540 +- dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
541 +- req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
542 +- if (!req_ctx->dst)
543 +- goto unmap_sg_dest;
544 +- req_ctx->dst->phys_addr = 0;
545 +- if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
546 ++ BUG_ON(req->dst->length < nbytes);
547 ++ req_ctx->dst = NULL;
548 ++ if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
549 ++ flags, DMA_FROM_DEVICE))
550 + goto free_buf_dest;
551 + src_direction = DMA_TO_DEVICE;
552 ++ req_ctx->dst = dst_hook.next;
553 ++ crypt->dst_buf = dst_hook.phys_next;
554 + } else {
555 + req_ctx->dst = NULL;
556 +- req_ctx->dst_nents = 0;
557 + }
558 +- nents = count_sg(req->src, nbytes);
559 +- req_ctx->src_nents = nents;
560 +- dma_map_sg(dev, req->src, nents, src_direction);
561 +-
562 +- req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
563 +- if (!req_ctx->src)
564 +- goto unmap_sg_src;
565 +- req_ctx->src->phys_addr = 0;
566 +- if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
567 ++ req_ctx->src = NULL;
568 ++ if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
569 ++ flags, src_direction))
570 + goto free_buf_src;
571 +
572 ++ req_ctx->src = src_hook.next;
573 ++ crypt->src_buf = src_hook.phys_next;
574 + crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
575 + qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
576 + BUG_ON(qmgr_stat_overflow(SEND_QID));
577 + return -EINPROGRESS;
578 +
579 + free_buf_src:
580 +- free_buf_chain(req_ctx->src, crypt->src_buf);
581 +-unmap_sg_src:
582 +- dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
583 ++ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
584 + free_buf_dest:
585 + if (req->src != req->dst) {
586 +- free_buf_chain(req_ctx->dst, crypt->dst_buf);
587 +-unmap_sg_dest:
588 +- dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
589 +- DMA_FROM_DEVICE);
590 ++ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
591 + }
592 + crypt->ctl_flags = CTL_FLAG_UNUSED;
593 +- return ret;
594 ++ return -ENOMEM;
595 + }
596 +
597 + static int ablk_encrypt(struct ablkcipher_request *req)
598 +@@ -985,7 +939,7 @@ static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
599 + break;
600 +
601 + offset += sg->length;
602 +- sg = sg_next(sg);
603 ++ sg = scatterwalk_sg_next(sg);
604 + }
605 + return (start + nbytes > offset + sg->length);
606 + }
607 +@@ -997,11 +951,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
608 + struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
609 + unsigned ivsize = crypto_aead_ivsize(tfm);
610 + unsigned authsize = crypto_aead_authsize(tfm);
611 +- int ret = -ENOMEM;
612 + struct ix_sa_dir *dir;
613 + struct crypt_ctl *crypt;
614 +- unsigned int cryptlen, nents;
615 +- struct buffer_desc *buf;
616 ++ unsigned int cryptlen;
617 ++ struct buffer_desc *buf, src_hook;
618 + struct aead_ctx *req_ctx = aead_request_ctx(req);
619 + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
620 + GFP_KERNEL : GFP_ATOMIC;
621 +@@ -1022,7 +975,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
622 + }
623 + crypt = get_crypt_desc();
624 + if (!crypt)
625 +- return ret;
626 ++ return -ENOMEM;
627 +
628 + crypt->data.aead_req = req;
629 + crypt->crypto_ctx = dir->npe_ctx_phys;
630 +@@ -1041,31 +994,27 @@ static int aead_perform(struct aead_request *req, int encrypt,
631 + BUG(); /* -ENOTSUP because of my lazyness */
632 + }
633 +
634 +- req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
635 +- if (!req_ctx->buffer)
636 +- goto out;
637 +- req_ctx->buffer->phys_addr = 0;
638 + /* ASSOC data */
639 +- nents = count_sg(req->assoc, req->assoclen);
640 +- req_ctx->assoc_nents = nents;
641 +- dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
642 +- buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
643 ++ buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
644 ++ flags, DMA_TO_DEVICE);
645 ++ req_ctx->buffer = src_hook.next;
646 ++ crypt->src_buf = src_hook.phys_next;
647 + if (!buf)
648 +- goto unmap_sg_assoc;
649 ++ goto out;
650 + /* IV */
651 + sg_init_table(&req_ctx->ivlist, 1);
652 + sg_set_buf(&req_ctx->ivlist, iv, ivsize);
653 +- dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
654 +- buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
655 ++ buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
656 ++ DMA_BIDIRECTIONAL);
657 + if (!buf)
658 +- goto unmap_sg_iv;
659 ++ goto free_chain;
660 + if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
661 + /* The 12 hmac bytes are scattered,
662 + * we need to copy them into a safe buffer */
663 + req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
664 + &crypt->icv_rev_aes);
665 + if (unlikely(!req_ctx->hmac_virt))
666 +- goto unmap_sg_iv;
667 ++ goto free_chain;
668 + if (!encrypt) {
669 + scatterwalk_map_and_copy(req_ctx->hmac_virt,
670 + req->src, cryptlen, authsize, 0);
671 +@@ -1075,33 +1024,28 @@ static int aead_perform(struct aead_request *req, int encrypt,
672 + req_ctx->hmac_virt = NULL;
673 + }
674 + /* Crypt */
675 +- nents = count_sg(req->src, cryptlen + authsize);
676 +- req_ctx->src_nents = nents;
677 +- dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
678 +- buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
679 ++ buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
680 ++ DMA_BIDIRECTIONAL);
681 + if (!buf)
682 +- goto unmap_sg_src;
683 ++ goto free_hmac_virt;
684 + if (!req_ctx->hmac_virt) {
685 + crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
686 + }
687 ++
688 + crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
689 + qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
690 + BUG_ON(qmgr_stat_overflow(SEND_QID));
691 + return -EINPROGRESS;
692 +-unmap_sg_src:
693 +- dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
694 ++free_hmac_virt:
695 + if (req_ctx->hmac_virt) {
696 + dma_pool_free(buffer_pool, req_ctx->hmac_virt,
697 + crypt->icv_rev_aes);
698 + }
699 +-unmap_sg_iv:
700 +- dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
701 +-unmap_sg_assoc:
702 +- dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
703 +- free_buf_chain(req_ctx->buffer, crypt->src_buf);
704 ++free_chain:
705 ++ free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
706 + out:
707 + crypt->ctl_flags = CTL_FLAG_UNUSED;
708 +- return ret;
709 ++ return -ENOMEM;
710 + }
711 +
712 + static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
713 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
714 +index d6cc986..9239747 100644
715 +--- a/drivers/gpu/drm/i915/i915_drv.h
716 ++++ b/drivers/gpu/drm/i915/i915_drv.h
717 +@@ -773,7 +773,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
718 + (dev)->pci_device == 0x2A42 || \
719 + (dev)->pci_device == 0x2E02 || \
720 + (dev)->pci_device == 0x2E12 || \
721 +- (dev)->pci_device == 0x2E22)
722 ++ (dev)->pci_device == 0x2E22 || \
723 ++ (dev)->pci_device == 0x2E32)
724 +
725 + #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
726 +
727 +@@ -782,6 +783,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
728 + #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
729 + (dev)->pci_device == 0x2E12 || \
730 + (dev)->pci_device == 0x2E22 || \
731 ++ (dev)->pci_device == 0x2E32 || \
732 + IS_GM45(dev))
733 +
734 + #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
735 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
736 +index cc2938d..a787fb8 100644
737 +--- a/drivers/gpu/drm/i915/i915_reg.h
738 ++++ b/drivers/gpu/drm/i915/i915_reg.h
739 +@@ -1431,6 +1431,7 @@
740 + #define DISPPLANE_NO_LINE_DOUBLE 0
741 + #define DISPPLANE_STEREO_POLARITY_FIRST 0
742 + #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
743 ++#define DISPPLANE_TILED (1<<10)
744 + #define DSPAADDR 0x70184
745 + #define DSPASTRIDE 0x70188
746 + #define DSPAPOS 0x7018C /* reserved */
747 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
748 +index 601a76f..254c5ca 100644
749 +--- a/drivers/gpu/drm/i915/intel_display.c
750 ++++ b/drivers/gpu/drm/i915/intel_display.c
751 +@@ -338,6 +338,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
752 + int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
753 + int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
754 + int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
755 ++ int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF);
756 + int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
757 + u32 dspcntr, alignment;
758 + int ret;
759 +@@ -414,6 +415,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
760 + mutex_unlock(&dev->struct_mutex);
761 + return -EINVAL;
762 + }
763 ++ if (IS_I965G(dev)) {
764 ++ if (obj_priv->tiling_mode != I915_TILING_NONE)
765 ++ dspcntr |= DISPPLANE_TILED;
766 ++ else
767 ++ dspcntr &= ~DISPPLANE_TILED;
768 ++ }
769 ++
770 + I915_WRITE(dspcntr_reg, dspcntr);
771 +
772 + Start = obj_priv->gtt_offset;
773 +@@ -426,6 +434,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
774 + I915_READ(dspbase);
775 + I915_WRITE(dspsurf, Start);
776 + I915_READ(dspsurf);
777 ++ I915_WRITE(dsptileoff, (y << 16) | x);
778 + } else {
779 + I915_WRITE(dspbase, Start + Offset);
780 + I915_READ(dspbase);
781 +diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c
782 +index 7a62db7..dc89bc2 100644
783 +--- a/drivers/ide/cs5536.c
784 ++++ b/drivers/ide/cs5536.c
785 +@@ -237,6 +237,7 @@ static const struct ide_dma_ops cs5536_dma_ops = {
786 + .dma_test_irq = ide_dma_test_irq,
787 + .dma_lost_irq = ide_dma_lost_irq,
788 + .dma_timeout = ide_dma_timeout,
789 ++ .dma_sff_read_status = ide_dma_sff_read_status,
790 + };
791 +
792 + static const struct ide_port_info cs5536_info = {
793 +diff --git a/drivers/net/b44.c b/drivers/net/b44.c
794 +index dc5f051..c2ffa8c 100644
795 +--- a/drivers/net/b44.c
796 ++++ b/drivers/net/b44.c
797 +@@ -750,7 +750,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
798 + dest_idx * sizeof(dest_desc),
799 + DMA_BIDIRECTIONAL);
800 +
801 +- ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr),
802 ++ ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
803 + RX_PKT_BUF_SZ,
804 + DMA_FROM_DEVICE);
805 + }
806 +diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
807 +index b8251e8..df0794e 100644
808 +--- a/drivers/net/forcedeth.c
809 ++++ b/drivers/net/forcedeth.c
810 +@@ -5995,6 +5995,9 @@ static int nv_resume(struct pci_dev *pdev)
811 + for (i = 0;i <= np->register_size/sizeof(u32); i++)
812 + writel(np->saved_config_space[i], base+i*sizeof(u32));
813 +
814 ++ /* restore phy state, including autoneg */
815 ++ phy_init(dev);
816 ++
817 + netif_device_attach(dev);
818 + if (netif_running(dev)) {
819 + rc = nv_open(dev);
820 +diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
821 +index b0bc3bc..67bb769 100644
822 +--- a/drivers/net/mv643xx_eth.c
823 ++++ b/drivers/net/mv643xx_eth.c
824 +@@ -372,12 +372,12 @@ struct mv643xx_eth_private {
825 + struct work_struct tx_timeout_task;
826 +
827 + struct napi_struct napi;
828 ++ u8 oom;
829 + u8 work_link;
830 + u8 work_tx;
831 + u8 work_tx_end;
832 + u8 work_rx;
833 + u8 work_rx_refill;
834 +- u8 work_rx_oom;
835 +
836 + int skb_size;
837 + struct sk_buff_head rx_recycle;
838 +@@ -603,7 +603,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
839 + dma_get_cache_alignment() - 1);
840 +
841 + if (skb == NULL) {
842 +- mp->work_rx_oom |= 1 << rxq->index;
843 ++ mp->oom = 1;
844 + goto oom;
845 + }
846 +
847 +@@ -1177,7 +1177,6 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
848 +
849 + spin_lock_bh(&mp->mib_counters_lock);
850 + p->good_octets_received += mib_read(mp, 0x00);
851 +- p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
852 + p->bad_octets_received += mib_read(mp, 0x08);
853 + p->internal_mac_transmit_err += mib_read(mp, 0x0c);
854 + p->good_frames_received += mib_read(mp, 0x10);
855 +@@ -1191,7 +1190,6 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
856 + p->frames_512_to_1023_octets += mib_read(mp, 0x30);
857 + p->frames_1024_to_max_octets += mib_read(mp, 0x34);
858 + p->good_octets_sent += mib_read(mp, 0x38);
859 +- p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
860 + p->good_frames_sent += mib_read(mp, 0x40);
861 + p->excessive_collision += mib_read(mp, 0x44);
862 + p->multicast_frames_sent += mib_read(mp, 0x48);
863 +@@ -1908,8 +1906,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
864 +
865 + mp = container_of(napi, struct mv643xx_eth_private, napi);
866 +
867 +- mp->work_rx_refill |= mp->work_rx_oom;
868 +- mp->work_rx_oom = 0;
869 ++ if (unlikely(mp->oom)) {
870 ++ mp->oom = 0;
871 ++ del_timer(&mp->rx_oom);
872 ++ }
873 +
874 + work_done = 0;
875 + while (work_done < budget) {
876 +@@ -1923,8 +1923,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
877 + continue;
878 + }
879 +
880 +- queue_mask = mp->work_tx | mp->work_tx_end |
881 +- mp->work_rx | mp->work_rx_refill;
882 ++ queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
883 ++ if (likely(!mp->oom))
884 ++ queue_mask |= mp->work_rx_refill;
885 ++
886 + if (!queue_mask) {
887 + if (mv643xx_eth_collect_events(mp))
888 + continue;
889 +@@ -1945,7 +1947,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
890 + txq_maybe_wake(mp->txq + queue);
891 + } else if (mp->work_rx & queue_mask) {
892 + work_done += rxq_process(mp->rxq + queue, work_tbd);
893 +- } else if (mp->work_rx_refill & queue_mask) {
894 ++ } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
895 + work_done += rxq_refill(mp->rxq + queue, work_tbd);
896 + } else {
897 + BUG();
898 +@@ -1953,7 +1955,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
899 + }
900 +
901 + if (work_done < budget) {
902 +- if (mp->work_rx_oom)
903 ++ if (mp->oom)
904 + mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
905 + napi_complete(napi);
906 + wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
907 +@@ -2145,7 +2147,7 @@ static int mv643xx_eth_open(struct net_device *dev)
908 + rxq_refill(mp->rxq + i, INT_MAX);
909 + }
910 +
911 +- if (mp->work_rx_oom) {
912 ++ if (mp->oom) {
913 + mp->rx_oom.expires = jiffies + (HZ / 10);
914 + add_timer(&mp->rx_oom);
915 + }
916 +diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
917 +index ccaeb5c..9347a3c 100644
918 +--- a/drivers/net/wireless/ath5k/debug.c
919 ++++ b/drivers/net/wireless/ath5k/debug.c
920 +@@ -465,7 +465,7 @@ ath5k_debug_dump_bands(struct ath5k_softc *sc)
921 +
922 + for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
923 + struct ieee80211_supported_band *band = &sc->sbands[b];
924 +- char bname[5];
925 ++ char bname[6];
926 + switch (band->band) {
927 + case IEEE80211_BAND_2GHZ:
928 + strcpy(bname, "2 GHz");
929 +diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
930 +index 3c04044..1cc826b 100644
931 +--- a/drivers/net/wireless/ath9k/main.c
932 ++++ b/drivers/net/wireless/ath9k/main.c
933 +@@ -2300,11 +2300,6 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
934 + rfilt = ath_calcrxfilter(sc);
935 + ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
936 +
937 +- if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
938 +- if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
939 +- ath9k_hw_write_associd(sc->sc_ah, ath_bcast_mac, 0);
940 +- }
941 +-
942 + DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter);
943 + }
944 +
945 +diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
946 +index 6d65a02..dbae617 100644
947 +--- a/drivers/net/wireless/b43/dma.c
948 ++++ b/drivers/net/wireless/b43/dma.c
949 +@@ -551,11 +551,32 @@ address_error:
950 + return 1;
951 + }
952 +
953 ++static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
954 ++{
955 ++ unsigned char *f = skb->data + ring->frameoffset;
956 ++
957 ++ return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
958 ++}
959 ++
960 ++static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
961 ++{
962 ++ struct b43_rxhdr_fw4 *rxhdr;
963 ++ unsigned char *frame;
964 ++
965 ++ /* This poisons the RX buffer to detect DMA failures. */
966 ++
967 ++ rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
968 ++ rxhdr->frame_len = 0;
969 ++
970 ++ B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
971 ++ frame = skb->data + ring->frameoffset;
972 ++ memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
973 ++}
974 ++
975 + static int setup_rx_descbuffer(struct b43_dmaring *ring,
976 + struct b43_dmadesc_generic *desc,
977 + struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
978 + {
979 +- struct b43_rxhdr_fw4 *rxhdr;
980 + dma_addr_t dmaaddr;
981 + struct sk_buff *skb;
982 +
983 +@@ -564,6 +585,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
984 + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
985 + if (unlikely(!skb))
986 + return -ENOMEM;
987 ++ b43_poison_rx_buffer(ring, skb);
988 + dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
989 + if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
990 + /* ugh. try to realloc in zone_dma */
991 +@@ -574,6 +596,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
992 + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
993 + if (unlikely(!skb))
994 + return -ENOMEM;
995 ++ b43_poison_rx_buffer(ring, skb);
996 + dmaaddr = map_descbuffer(ring, skb->data,
997 + ring->rx_buffersize, 0);
998 + }
999 +@@ -589,9 +612,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
1000 + ring->ops->fill_descriptor(ring, desc, dmaaddr,
1001 + ring->rx_buffersize, 0, 0, 0);
1002 +
1003 +- rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
1004 +- rxhdr->frame_len = 0;
1005 +-
1006 + return 0;
1007 + }
1008 +
1009 +@@ -1476,12 +1496,17 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
1010 + len = le16_to_cpu(rxhdr->frame_len);
1011 + } while (len == 0 && i++ < 5);
1012 + if (unlikely(len == 0)) {
1013 +- /* recycle the descriptor buffer. */
1014 +- sync_descbuffer_for_device(ring, meta->dmaaddr,
1015 +- ring->rx_buffersize);
1016 +- goto drop;
1017 ++ dmaaddr = meta->dmaaddr;
1018 ++ goto drop_recycle_buffer;
1019 + }
1020 + }
1021 ++ if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1022 ++ /* Something went wrong with the DMA.
1023 ++ * The device did not touch the buffer and did not overwrite the poison. */
1024 ++ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
1025 ++ dmaaddr = meta->dmaaddr;
1026 ++ goto drop_recycle_buffer;
1027 ++ }
1028 + if (unlikely(len > ring->rx_buffersize)) {
1029 + /* The data did not fit into one descriptor buffer
1030 + * and is split over multiple buffers.
1031 +@@ -1494,6 +1519,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
1032 + while (1) {
1033 + desc = ops->idx2desc(ring, *slot, &meta);
1034 + /* recycle the descriptor buffer. */
1035 ++ b43_poison_rx_buffer(ring, meta->skb);
1036 + sync_descbuffer_for_device(ring, meta->dmaaddr,
1037 + ring->rx_buffersize);
1038 + *slot = next_slot(ring, *slot);
1039 +@@ -1512,8 +1538,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
1040 + err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1041 + if (unlikely(err)) {
1042 + b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1043 +- sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1044 +- goto drop;
1045 ++ goto drop_recycle_buffer;
1046 + }
1047 +
1048 + unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1049 +@@ -1523,6 +1548,11 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
1050 + b43_rx(ring->dev, skb, rxhdr);
1051 + drop:
1052 + return;
1053 ++
1054 ++drop_recycle_buffer:
1055 ++ /* Poison and recycle the RX buffer. */
1056 ++ b43_poison_rx_buffer(ring, skb);
1057 ++ sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1058 + }
1059 +
1060 + void b43_dma_rx(struct b43_dmaring *ring)
1061 +diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
1062 +index ed93ac4..f6a9388 100644
1063 +--- a/drivers/net/wireless/rndis_wlan.c
1064 ++++ b/drivers/net/wireless/rndis_wlan.c
1065 +@@ -2550,6 +2550,11 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
1066 + mutex_init(&priv->command_lock);
1067 + spin_lock_init(&priv->stats_lock);
1068 +
1069 ++ /* because rndis_command() sleeps we need to use workqueue */
1070 ++ priv->workqueue = create_singlethread_workqueue("rndis_wlan");
1071 ++ INIT_WORK(&priv->work, rndis_wext_worker);
1072 ++ INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats);
1073 ++
1074 + /* try bind rndis_host */
1075 + retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS);
1076 + if (retval < 0)
1077 +@@ -2594,16 +2599,17 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
1078 + disassociate(usbdev, 1);
1079 + netif_carrier_off(usbdev->net);
1080 +
1081 +- /* because rndis_command() sleeps we need to use workqueue */
1082 +- priv->workqueue = create_singlethread_workqueue("rndis_wlan");
1083 +- INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats);
1084 + queue_delayed_work(priv->workqueue, &priv->stats_work,
1085 + round_jiffies_relative(STATS_UPDATE_JIFFIES));
1086 +- INIT_WORK(&priv->work, rndis_wext_worker);
1087 +
1088 + return 0;
1089 +
1090 + fail:
1091 ++ cancel_delayed_work_sync(&priv->stats_work);
1092 ++ cancel_work_sync(&priv->work);
1093 ++ flush_workqueue(priv->workqueue);
1094 ++ destroy_workqueue(priv->workqueue);
1095 ++
1096 + kfree(priv);
1097 + return retval;
1098 + }
1099 +diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
1100 +index 26c536b..8a01120 100644
1101 +--- a/drivers/pci/dmar.c
1102 ++++ b/drivers/pci/dmar.c
1103 +@@ -170,12 +170,21 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
1104 + struct dmar_drhd_unit *dmaru;
1105 + int ret = 0;
1106 +
1107 ++ drhd = (struct acpi_dmar_hardware_unit *)header;
1108 ++ if (!drhd->address) {
1109 ++ /* Promote an attitude of violence to a BIOS engineer today */
1110 ++ WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
1111 ++ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1112 ++ dmi_get_system_info(DMI_BIOS_VENDOR),
1113 ++ dmi_get_system_info(DMI_BIOS_VERSION),
1114 ++ dmi_get_system_info(DMI_PRODUCT_VERSION));
1115 ++ return -ENODEV;
1116 ++ }
1117 + dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
1118 + if (!dmaru)
1119 + return -ENOMEM;
1120 +
1121 + dmaru->hdr = header;
1122 +- drhd = (struct acpi_dmar_hardware_unit *)header;
1123 + dmaru->reg_base_addr = drhd->address;
1124 + dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
1125 +
1126 +diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
1127 +index f3f6865..7e4f9e6 100644
1128 +--- a/drivers/pci/intel-iommu.c
1129 ++++ b/drivers/pci/intel-iommu.c
1130 +@@ -447,11 +447,17 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
1131 + if (drhd->ignored)
1132 + continue;
1133 +
1134 +- for (i = 0; i < drhd->devices_cnt; i++)
1135 ++ for (i = 0; i < drhd->devices_cnt; i++) {
1136 + if (drhd->devices[i] &&
1137 + drhd->devices[i]->bus->number == bus &&
1138 + drhd->devices[i]->devfn == devfn)
1139 + return drhd->iommu;
1140 ++ if (drhd->devices[i] &&
1141 ++ drhd->devices[i]->subordinate &&
1142 ++ drhd->devices[i]->subordinate->number <= bus &&
1143 ++ drhd->devices[i]->subordinate->subordinate >= bus)
1144 ++ return drhd->iommu;
1145 ++ }
1146 +
1147 + if (drhd->include_all)
1148 + return drhd->iommu;
1149 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1150 +index 92b9efe..c65c2f4 100644
1151 +--- a/drivers/pci/quirks.c
1152 ++++ b/drivers/pci/quirks.c
1153 +@@ -1960,6 +1960,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_di
1154 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
1155 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
1156 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
1157 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
1158 +
1159 + /* Disable MSI on chipsets that are known to not support it */
1160 + static void __devinit quirk_disable_msi(struct pci_dev *dev)
1161 +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
1162 +index d243320..99d32f7 100644
1163 +--- a/drivers/platform/x86/thinkpad_acpi.c
1164 ++++ b/drivers/platform/x86/thinkpad_acpi.c
1165 +@@ -306,11 +306,17 @@ static u32 dbg_level;
1166 +
1167 + static struct workqueue_struct *tpacpi_wq;
1168 +
1169 ++enum led_status_t {
1170 ++ TPACPI_LED_OFF = 0,
1171 ++ TPACPI_LED_ON,
1172 ++ TPACPI_LED_BLINK,
1173 ++};
1174 ++
1175 + /* Special LED class that can defer work */
1176 + struct tpacpi_led_classdev {
1177 + struct led_classdev led_classdev;
1178 + struct work_struct work;
1179 +- enum led_brightness new_brightness;
1180 ++ enum led_status_t new_state;
1181 + unsigned int led;
1182 + };
1183 +
1184 +@@ -4057,7 +4063,7 @@ static void light_set_status_worker(struct work_struct *work)
1185 + container_of(work, struct tpacpi_led_classdev, work);
1186 +
1187 + if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
1188 +- light_set_status((data->new_brightness != LED_OFF));
1189 ++ light_set_status((data->new_state != TPACPI_LED_OFF));
1190 + }
1191 +
1192 + static void light_sysfs_set(struct led_classdev *led_cdev,
1193 +@@ -4067,7 +4073,8 @@ static void light_sysfs_set(struct led_classdev *led_cdev,
1194 + container_of(led_cdev,
1195 + struct tpacpi_led_classdev,
1196 + led_classdev);
1197 +- data->new_brightness = brightness;
1198 ++ data->new_state = (brightness != LED_OFF) ?
1199 ++ TPACPI_LED_ON : TPACPI_LED_OFF;
1200 + queue_work(tpacpi_wq, &data->work);
1201 + }
1202 +
1203 +@@ -4574,12 +4581,6 @@ enum { /* For TPACPI_LED_OLD */
1204 + TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */
1205 + };
1206 +
1207 +-enum led_status_t {
1208 +- TPACPI_LED_OFF = 0,
1209 +- TPACPI_LED_ON,
1210 +- TPACPI_LED_BLINK,
1211 +-};
1212 +-
1213 + static enum led_access_mode led_supported;
1214 +
1215 + TPACPI_HANDLE(led, ec, "SLED", /* 570 */
1216 +@@ -4673,23 +4674,13 @@ static int led_set_status(const unsigned int led,
1217 + return rc;
1218 + }
1219 +
1220 +-static void led_sysfs_set_status(unsigned int led,
1221 +- enum led_brightness brightness)
1222 +-{
1223 +- led_set_status(led,
1224 +- (brightness == LED_OFF) ?
1225 +- TPACPI_LED_OFF :
1226 +- (tpacpi_led_state_cache[led] == TPACPI_LED_BLINK) ?
1227 +- TPACPI_LED_BLINK : TPACPI_LED_ON);
1228 +-}
1229 +-
1230 + static void led_set_status_worker(struct work_struct *work)
1231 + {
1232 + struct tpacpi_led_classdev *data =
1233 + container_of(work, struct tpacpi_led_classdev, work);
1234 +
1235 + if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
1236 +- led_sysfs_set_status(data->led, data->new_brightness);
1237 ++ led_set_status(data->led, data->new_state);
1238 + }
1239 +
1240 + static void led_sysfs_set(struct led_classdev *led_cdev,
1241 +@@ -4698,7 +4689,13 @@ static void led_sysfs_set(struct led_classdev *led_cdev,
1242 + struct tpacpi_led_classdev *data = container_of(led_cdev,
1243 + struct tpacpi_led_classdev, led_classdev);
1244 +
1245 +- data->new_brightness = brightness;
1246 ++ if (brightness == LED_OFF)
1247 ++ data->new_state = TPACPI_LED_OFF;
1248 ++ else if (tpacpi_led_state_cache[data->led] != TPACPI_LED_BLINK)
1249 ++ data->new_state = TPACPI_LED_ON;
1250 ++ else
1251 ++ data->new_state = TPACPI_LED_BLINK;
1252 ++
1253 + queue_work(tpacpi_wq, &data->work);
1254 + }
1255 +
1256 +@@ -4716,7 +4713,7 @@ static int led_sysfs_blink_set(struct led_classdev *led_cdev,
1257 + } else if ((*delay_on != 500) || (*delay_off != 500))
1258 + return -EINVAL;
1259 +
1260 +- data->new_brightness = TPACPI_LED_BLINK;
1261 ++ data->new_state = TPACPI_LED_BLINK;
1262 + queue_work(tpacpi_wq, &data->work);
1263 +
1264 + return 0;
1265 +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
1266 +index cfcfd5a..4b36d88 100644
1267 +--- a/drivers/usb/serial/usb-serial.c
1268 ++++ b/drivers/usb/serial/usb-serial.c
1269 +@@ -136,22 +136,10 @@ static void destroy_serial(struct kref *kref)
1270 +
1271 + dbg("%s - %s", __func__, serial->type->description);
1272 +
1273 +- serial->type->shutdown(serial);
1274 +-
1275 + /* return the minor range that this device had */
1276 + if (serial->minor != SERIAL_TTY_NO_MINOR)
1277 + return_serial(serial);
1278 +
1279 +- for (i = 0; i < serial->num_ports; ++i)
1280 +- serial->port[i]->port.count = 0;
1281 +-
1282 +- /* the ports are cleaned up and released in port_release() */
1283 +- for (i = 0; i < serial->num_ports; ++i)
1284 +- if (serial->port[i]->dev.parent != NULL) {
1285 +- device_unregister(&serial->port[i]->dev);
1286 +- serial->port[i] = NULL;
1287 +- }
1288 +-
1289 + /* If this is a "fake" port, we have to clean it up here, as it will
1290 + * not get cleaned up in port_release() as it was never registered with
1291 + * the driver core */
1292 +@@ -186,7 +174,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
1293 + struct usb_serial *serial;
1294 + struct usb_serial_port *port;
1295 + unsigned int portNumber;
1296 +- int retval;
1297 ++ int retval = 0;
1298 +
1299 + dbg("%s", __func__);
1300 +
1301 +@@ -197,16 +185,24 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
1302 + return -ENODEV;
1303 + }
1304 +
1305 ++ mutex_lock(&serial->disc_mutex);
1306 + portNumber = tty->index - serial->minor;
1307 + port = serial->port[portNumber];
1308 +- if (!port) {
1309 ++ if (!port || serial->disconnected)
1310 + retval = -ENODEV;
1311 +- goto bailout_kref_put;
1312 +- }
1313 ++ else
1314 ++ get_device(&port->dev);
1315 ++ /*
1316 ++ * Note: Our locking order requirement does not allow port->mutex
1317 ++ * to be acquired while serial->disc_mutex is held.
1318 ++ */
1319 ++ mutex_unlock(&serial->disc_mutex);
1320 ++ if (retval)
1321 ++ goto bailout_serial_put;
1322 +
1323 + if (mutex_lock_interruptible(&port->mutex)) {
1324 + retval = -ERESTARTSYS;
1325 +- goto bailout_kref_put;
1326 ++ goto bailout_port_put;
1327 + }
1328 +
1329 + ++port->port.count;
1330 +@@ -226,14 +222,20 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
1331 + goto bailout_mutex_unlock;
1332 + }
1333 +
1334 +- retval = usb_autopm_get_interface(serial->interface);
1335 ++ mutex_lock(&serial->disc_mutex);
1336 ++ if (serial->disconnected)
1337 ++ retval = -ENODEV;
1338 ++ else
1339 ++ retval = usb_autopm_get_interface(serial->interface);
1340 + if (retval)
1341 + goto bailout_module_put;
1342 ++
1343 + /* only call the device specific open if this
1344 + * is the first time the port is opened */
1345 + retval = serial->type->open(tty, port, filp);
1346 + if (retval)
1347 + goto bailout_interface_put;
1348 ++ mutex_unlock(&serial->disc_mutex);
1349 + }
1350 +
1351 + mutex_unlock(&port->mutex);
1352 +@@ -242,13 +244,16 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
1353 + bailout_interface_put:
1354 + usb_autopm_put_interface(serial->interface);
1355 + bailout_module_put:
1356 ++ mutex_unlock(&serial->disc_mutex);
1357 + module_put(serial->type->driver.owner);
1358 + bailout_mutex_unlock:
1359 + port->port.count = 0;
1360 + tty->driver_data = NULL;
1361 + tty_port_tty_set(&port->port, NULL);
1362 + mutex_unlock(&port->mutex);
1363 +-bailout_kref_put:
1364 ++bailout_port_put:
1365 ++ put_device(&port->dev);
1366 ++bailout_serial_put:
1367 + usb_serial_put(serial);
1368 + return retval;
1369 + }
1370 +@@ -256,6 +261,9 @@ bailout_kref_put:
1371 + static void serial_close(struct tty_struct *tty, struct file *filp)
1372 + {
1373 + struct usb_serial_port *port = tty->driver_data;
1374 ++ struct usb_serial *serial;
1375 ++ struct module *owner;
1376 ++ int count;
1377 +
1378 + if (!port)
1379 + return;
1380 +@@ -263,6 +271,8 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
1381 + dbg("%s - port %d", __func__, port->number);
1382 +
1383 + mutex_lock(&port->mutex);
1384 ++ serial = port->serial;
1385 ++ owner = serial->type->driver.owner;
1386 +
1387 + if (port->port.count == 0) {
1388 + mutex_unlock(&port->mutex);
1389 +@@ -275,7 +285,7 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
1390 + * this before we drop the port count. The call is protected
1391 + * by the port mutex
1392 + */
1393 +- port->serial->type->close(tty, port, filp);
1394 ++ serial->type->close(tty, port, filp);
1395 +
1396 + if (port->port.count == (port->console ? 2 : 1)) {
1397 + struct tty_struct *tty = tty_port_tty_get(&port->port);
1398 +@@ -289,17 +299,23 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
1399 + }
1400 + }
1401 +
1402 +- if (port->port.count == 1) {
1403 +- mutex_lock(&port->serial->disc_mutex);
1404 +- if (!port->serial->disconnected)
1405 +- usb_autopm_put_interface(port->serial->interface);
1406 +- mutex_unlock(&port->serial->disc_mutex);
1407 +- module_put(port->serial->type->driver.owner);
1408 +- }
1409 + --port->port.count;
1410 +-
1411 ++ count = port->port.count;
1412 + mutex_unlock(&port->mutex);
1413 +- usb_serial_put(port->serial);
1414 ++ put_device(&port->dev);
1415 ++
1416 ++ /* Mustn't dereference port any more */
1417 ++ if (count == 0) {
1418 ++ mutex_lock(&serial->disc_mutex);
1419 ++ if (!serial->disconnected)
1420 ++ usb_autopm_put_interface(serial->interface);
1421 ++ mutex_unlock(&serial->disc_mutex);
1422 ++ }
1423 ++ usb_serial_put(serial);
1424 ++
1425 ++ /* Mustn't dereference serial any more */
1426 ++ if (count == 0)
1427 ++ module_put(owner);
1428 + }
1429 +
1430 + static int serial_write(struct tty_struct *tty, const unsigned char *buf,
1431 +@@ -548,7 +564,13 @@ static void kill_traffic(struct usb_serial_port *port)
1432 +
1433 + static void port_free(struct usb_serial_port *port)
1434 + {
1435 ++ /*
1436 ++ * Stop all the traffic before cancelling the work, so that
1437 ++ * nobody will restart it by calling usb_serial_port_softint.
1438 ++ */
1439 + kill_traffic(port);
1440 ++ cancel_work_sync(&port->work);
1441 ++
1442 + usb_free_urb(port->read_urb);
1443 + usb_free_urb(port->write_urb);
1444 + usb_free_urb(port->interrupt_in_urb);
1445 +@@ -557,7 +579,6 @@ static void port_free(struct usb_serial_port *port)
1446 + kfree(port->bulk_out_buffer);
1447 + kfree(port->interrupt_in_buffer);
1448 + kfree(port->interrupt_out_buffer);
1449 +- flush_scheduled_work(); /* port->work */
1450 + kfree(port);
1451 + }
1452 +
1453 +@@ -1042,6 +1063,12 @@ void usb_serial_disconnect(struct usb_interface *interface)
1454 + usb_set_intfdata(interface, NULL);
1455 + /* must set a flag, to signal subdrivers */
1456 + serial->disconnected = 1;
1457 ++ mutex_unlock(&serial->disc_mutex);
1458 ++
1459 ++ /* Unfortunately, many of the sub-drivers expect the port structures
1460 ++ * to exist when their shutdown method is called, so we have to go
1461 ++ * through this awkward two-step unregistration procedure.
1462 ++ */
1463 + for (i = 0; i < serial->num_ports; ++i) {
1464 + port = serial->port[i];
1465 + if (port) {
1466 +@@ -1051,11 +1078,21 @@ void usb_serial_disconnect(struct usb_interface *interface)
1467 + tty_kref_put(tty);
1468 + }
1469 + kill_traffic(port);
1470 ++ cancel_work_sync(&port->work);
1471 ++ device_del(&port->dev);
1472 ++ }
1473 ++ }
1474 ++ serial->type->shutdown(serial);
1475 ++ for (i = 0; i < serial->num_ports; ++i) {
1476 ++ port = serial->port[i];
1477 ++ if (port) {
1478 ++ put_device(&port->dev);
1479 ++ serial->port[i] = NULL;
1480 + }
1481 + }
1482 ++
1483 + /* let the last holder of this object
1484 + * cause it to be cleaned up */
1485 +- mutex_unlock(&serial->disc_mutex);
1486 + usb_serial_put(serial);
1487 + dev_info(dev, "device disconnected\n");
1488 + }
1489 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1490 +index 0f54399..af39dec 100644
1491 +--- a/drivers/usb/storage/unusual_devs.h
1492 ++++ b/drivers/usb/storage/unusual_devs.h
1493 +@@ -2134,6 +2134,12 @@ UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001,
1494 + US_SC_DEVICE, US_PR_DEVICE, NULL,
1495 + US_FL_CAPACITY_HEURISTICS),
1496 +
1497 ++/* Reported by Alessio Treglia <quadrispro@××××××.com> */
1498 ++UNUSUAL_DEV( 0xed10, 0x7636, 0x0001, 0x0001,
1499 ++ "TGE",
1500 ++ "Digital MP3 Audio Player",
1501 ++ US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
1502 ++
1503 + /* Control/Bulk transport for all SubClass values */
1504 + USUAL_DEV(US_SC_RBC, US_PR_CB, USB_US_TYPE_STOR),
1505 + USUAL_DEV(US_SC_8020, US_PR_CB, USB_US_TYPE_STOR),
1506 +diff --git a/fs/Makefile b/fs/Makefile
1507 +index dc20db3..0cd7097 100644
1508 +--- a/fs/Makefile
1509 ++++ b/fs/Makefile
1510 +@@ -11,7 +11,7 @@ obj-y := open.o read_write.o file_table.o super.o \
1511 + attr.o bad_inode.o file.o filesystems.o namespace.o \
1512 + seq_file.o xattr.o libfs.o fs-writeback.o \
1513 + pnode.o drop_caches.o splice.o sync.o utimes.o \
1514 +- stack.o
1515 ++ stack.o fs_struct.o
1516 +
1517 + ifeq ($(CONFIG_BLOCK),y)
1518 + obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
1519 +diff --git a/fs/bio.c b/fs/bio.c
1520 +index d4f0632..bfdfe57 100644
1521 +--- a/fs/bio.c
1522 ++++ b/fs/bio.c
1523 +@@ -806,6 +806,9 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1524 + len += iov[i].iov_len;
1525 + }
1526 +
1527 ++ if (offset)
1528 ++ nr_pages++;
1529 ++
1530 + bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
1531 + if (!bmd)
1532 + return ERR_PTR(-ENOMEM);
1533 +diff --git a/fs/compat.c b/fs/compat.c
1534 +index d0145ca..1df8926 100644
1535 +--- a/fs/compat.c
1536 ++++ b/fs/compat.c
1537 +@@ -51,6 +51,7 @@
1538 + #include <linux/poll.h>
1539 + #include <linux/mm.h>
1540 + #include <linux/eventpoll.h>
1541 ++#include <linux/fs_struct.h>
1542 +
1543 + #include <asm/uaccess.h>
1544 + #include <asm/mmu_context.h>
1545 +@@ -1392,12 +1393,18 @@ int compat_do_execve(char * filename,
1546 + {
1547 + struct linux_binprm *bprm;
1548 + struct file *file;
1549 ++ struct files_struct *displaced;
1550 ++ bool clear_in_exec;
1551 + int retval;
1552 +
1553 ++ retval = unshare_files(&displaced);
1554 ++ if (retval)
1555 ++ goto out_ret;
1556 ++
1557 + retval = -ENOMEM;
1558 + bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1559 + if (!bprm)
1560 +- goto out_ret;
1561 ++ goto out_files;
1562 +
1563 + retval = mutex_lock_interruptible(&current->cred_exec_mutex);
1564 + if (retval < 0)
1565 +@@ -1407,12 +1414,16 @@ int compat_do_execve(char * filename,
1566 + bprm->cred = prepare_exec_creds();
1567 + if (!bprm->cred)
1568 + goto out_unlock;
1569 +- check_unsafe_exec(bprm, current->files);
1570 ++
1571 ++ retval = check_unsafe_exec(bprm);
1572 ++ if (retval < 0)
1573 ++ goto out_unlock;
1574 ++ clear_in_exec = retval;
1575 +
1576 + file = open_exec(filename);
1577 + retval = PTR_ERR(file);
1578 + if (IS_ERR(file))
1579 +- goto out_unlock;
1580 ++ goto out_unmark;
1581 +
1582 + sched_exec();
1583 +
1584 +@@ -1454,9 +1465,12 @@ int compat_do_execve(char * filename,
1585 + goto out;
1586 +
1587 + /* execve succeeded */
1588 ++ current->fs->in_exec = 0;
1589 + mutex_unlock(&current->cred_exec_mutex);
1590 + acct_update_integrals(current);
1591 + free_bprm(bprm);
1592 ++ if (displaced)
1593 ++ put_files_struct(displaced);
1594 + return retval;
1595 +
1596 + out:
1597 +@@ -1469,12 +1483,19 @@ out_file:
1598 + fput(bprm->file);
1599 + }
1600 +
1601 ++out_unmark:
1602 ++ if (clear_in_exec)
1603 ++ current->fs->in_exec = 0;
1604 ++
1605 + out_unlock:
1606 + mutex_unlock(&current->cred_exec_mutex);
1607 +
1608 + out_free:
1609 + free_bprm(bprm);
1610 +
1611 ++out_files:
1612 ++ if (displaced)
1613 ++ reset_files_struct(displaced);
1614 + out_ret:
1615 + return retval;
1616 + }
1617 +diff --git a/fs/exec.c b/fs/exec.c
1618 +index 929b580..3b36c69 100644
1619 +--- a/fs/exec.c
1620 ++++ b/fs/exec.c
1621 +@@ -1049,32 +1049,35 @@ EXPORT_SYMBOL(install_exec_creds);
1622 + * - the caller must hold current->cred_exec_mutex to protect against
1623 + * PTRACE_ATTACH
1624 + */
1625 +-void check_unsafe_exec(struct linux_binprm *bprm, struct files_struct *files)
1626 ++int check_unsafe_exec(struct linux_binprm *bprm)
1627 + {
1628 + struct task_struct *p = current, *t;
1629 +- unsigned long flags;
1630 +- unsigned n_fs, n_files, n_sighand;
1631 ++ unsigned n_fs;
1632 ++ int res = 0;
1633 +
1634 + bprm->unsafe = tracehook_unsafe_exec(p);
1635 +
1636 + n_fs = 1;
1637 +- n_files = 1;
1638 +- n_sighand = 1;
1639 +- lock_task_sighand(p, &flags);
1640 ++ write_lock(&p->fs->lock);
1641 ++ rcu_read_lock();
1642 + for (t = next_thread(p); t != p; t = next_thread(t)) {
1643 + if (t->fs == p->fs)
1644 + n_fs++;
1645 +- if (t->files == files)
1646 +- n_files++;
1647 +- n_sighand++;
1648 + }
1649 ++ rcu_read_unlock();
1650 +
1651 +- if (atomic_read(&p->fs->count) > n_fs ||
1652 +- atomic_read(&p->files->count) > n_files ||
1653 +- atomic_read(&p->sighand->count) > n_sighand)
1654 ++ if (p->fs->users > n_fs) {
1655 + bprm->unsafe |= LSM_UNSAFE_SHARE;
1656 ++ } else {
1657 ++ res = -EAGAIN;
1658 ++ if (!p->fs->in_exec) {
1659 ++ p->fs->in_exec = 1;
1660 ++ res = 1;
1661 ++ }
1662 ++ }
1663 ++ write_unlock(&p->fs->lock);
1664 +
1665 +- unlock_task_sighand(p, &flags);
1666 ++ return res;
1667 + }
1668 +
1669 + /*
1670 +@@ -1270,6 +1273,7 @@ int do_execve(char * filename,
1671 + struct linux_binprm *bprm;
1672 + struct file *file;
1673 + struct files_struct *displaced;
1674 ++ bool clear_in_exec;
1675 + int retval;
1676 +
1677 + retval = unshare_files(&displaced);
1678 +@@ -1289,12 +1293,16 @@ int do_execve(char * filename,
1679 + bprm->cred = prepare_exec_creds();
1680 + if (!bprm->cred)
1681 + goto out_unlock;
1682 +- check_unsafe_exec(bprm, displaced);
1683 ++
1684 ++ retval = check_unsafe_exec(bprm);
1685 ++ if (retval < 0)
1686 ++ goto out_unlock;
1687 ++ clear_in_exec = retval;
1688 +
1689 + file = open_exec(filename);
1690 + retval = PTR_ERR(file);
1691 + if (IS_ERR(file))
1692 +- goto out_unlock;
1693 ++ goto out_unmark;
1694 +
1695 + sched_exec();
1696 +
1697 +@@ -1337,6 +1345,7 @@ int do_execve(char * filename,
1698 + goto out;
1699 +
1700 + /* execve succeeded */
1701 ++ current->fs->in_exec = 0;
1702 + mutex_unlock(&current->cred_exec_mutex);
1703 + acct_update_integrals(current);
1704 + free_bprm(bprm);
1705 +@@ -1354,6 +1363,10 @@ out_file:
1706 + fput(bprm->file);
1707 + }
1708 +
1709 ++out_unmark:
1710 ++ if (clear_in_exec)
1711 ++ current->fs->in_exec = 0;
1712 ++
1713 + out_unlock:
1714 + mutex_unlock(&current->cred_exec_mutex);
1715 +
1716 +diff --git a/fs/fs_struct.c b/fs/fs_struct.c
1717 +new file mode 100644
1718 +index 0000000..41cff72
1719 +--- /dev/null
1720 ++++ b/fs/fs_struct.c
1721 +@@ -0,0 +1,170 @@
1722 ++#include <linux/module.h>
1723 ++#include <linux/sched.h>
1724 ++#include <linux/fs.h>
1725 ++#include <linux/path.h>
1726 ++#include <linux/slab.h>
1727 ++
1728 ++/*
1729 ++ * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1730 ++ * It can block.
1731 ++ */
1732 ++void set_fs_root(struct fs_struct *fs, struct path *path)
1733 ++{
1734 ++ struct path old_root;
1735 ++
1736 ++ write_lock(&fs->lock);
1737 ++ old_root = fs->root;
1738 ++ fs->root = *path;
1739 ++ path_get(path);
1740 ++ write_unlock(&fs->lock);
1741 ++ if (old_root.dentry)
1742 ++ path_put(&old_root);
1743 ++}
1744 ++
1745 ++/*
1746 ++ * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
1747 ++ * It can block.
1748 ++ */
1749 ++void set_fs_pwd(struct fs_struct *fs, struct path *path)
1750 ++{
1751 ++ struct path old_pwd;
1752 ++
1753 ++ write_lock(&fs->lock);
1754 ++ old_pwd = fs->pwd;
1755 ++ fs->pwd = *path;
1756 ++ path_get(path);
1757 ++ write_unlock(&fs->lock);
1758 ++
1759 ++ if (old_pwd.dentry)
1760 ++ path_put(&old_pwd);
1761 ++}
1762 ++
1763 ++void chroot_fs_refs(struct path *old_root, struct path *new_root)
1764 ++{
1765 ++ struct task_struct *g, *p;
1766 ++ struct fs_struct *fs;
1767 ++ int count = 0;
1768 ++
1769 ++ read_lock(&tasklist_lock);
1770 ++ do_each_thread(g, p) {
1771 ++ task_lock(p);
1772 ++ fs = p->fs;
1773 ++ if (fs) {
1774 ++ write_lock(&fs->lock);
1775 ++ if (fs->root.dentry == old_root->dentry
1776 ++ && fs->root.mnt == old_root->mnt) {
1777 ++ path_get(new_root);
1778 ++ fs->root = *new_root;
1779 ++ count++;
1780 ++ }
1781 ++ if (fs->pwd.dentry == old_root->dentry
1782 ++ && fs->pwd.mnt == old_root->mnt) {
1783 ++ path_get(new_root);
1784 ++ fs->pwd = *new_root;
1785 ++ count++;
1786 ++ }
1787 ++ write_unlock(&fs->lock);
1788 ++ }
1789 ++ task_unlock(p);
1790 ++ } while_each_thread(g, p);
1791 ++ read_unlock(&tasklist_lock);
1792 ++ while (count--)
1793 ++ path_put(old_root);
1794 ++}
1795 ++
1796 ++void free_fs_struct(struct fs_struct *fs)
1797 ++{
1798 ++ path_put(&fs->root);
1799 ++ path_put(&fs->pwd);
1800 ++ kmem_cache_free(fs_cachep, fs);
1801 ++}
1802 ++
1803 ++void exit_fs(struct task_struct *tsk)
1804 ++{
1805 ++ struct fs_struct *fs = tsk->fs;
1806 ++
1807 ++ if (fs) {
1808 ++ int kill;
1809 ++ task_lock(tsk);
1810 ++ write_lock(&fs->lock);
1811 ++ tsk->fs = NULL;
1812 ++ kill = !--fs->users;
1813 ++ write_unlock(&fs->lock);
1814 ++ task_unlock(tsk);
1815 ++ if (kill)
1816 ++ free_fs_struct(fs);
1817 ++ }
1818 ++}
1819 ++
1820 ++struct fs_struct *copy_fs_struct(struct fs_struct *old)
1821 ++{
1822 ++ struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
1823 ++ /* We don't need to lock fs - think why ;-) */
1824 ++ if (fs) {
1825 ++ fs->users = 1;
1826 ++ fs->in_exec = 0;
1827 ++ rwlock_init(&fs->lock);
1828 ++ fs->umask = old->umask;
1829 ++ read_lock(&old->lock);
1830 ++ fs->root = old->root;
1831 ++ path_get(&old->root);
1832 ++ fs->pwd = old->pwd;
1833 ++ path_get(&old->pwd);
1834 ++ read_unlock(&old->lock);
1835 ++ }
1836 ++ return fs;
1837 ++}
1838 ++
1839 ++int unshare_fs_struct(void)
1840 ++{
1841 ++ struct fs_struct *fs = current->fs;
1842 ++ struct fs_struct *new_fs = copy_fs_struct(fs);
1843 ++ int kill;
1844 ++
1845 ++ if (!new_fs)
1846 ++ return -ENOMEM;
1847 ++
1848 ++ task_lock(current);
1849 ++ write_lock(&fs->lock);
1850 ++ kill = !--fs->users;
1851 ++ current->fs = new_fs;
1852 ++ write_unlock(&fs->lock);
1853 ++ task_unlock(current);
1854 ++
1855 ++ if (kill)
1856 ++ free_fs_struct(fs);
1857 ++
1858 ++ return 0;
1859 ++}
1860 ++EXPORT_SYMBOL_GPL(unshare_fs_struct);
1861 ++
1862 ++/* to be mentioned only in INIT_TASK */
1863 ++struct fs_struct init_fs = {
1864 ++ .users = 1,
1865 ++ .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
1866 ++ .umask = 0022,
1867 ++};
1868 ++
1869 ++void daemonize_fs_struct(void)
1870 ++{
1871 ++ struct fs_struct *fs = current->fs;
1872 ++
1873 ++ if (fs) {
1874 ++ int kill;
1875 ++
1876 ++ task_lock(current);
1877 ++
1878 ++ write_lock(&init_fs.lock);
1879 ++ init_fs.users++;
1880 ++ write_unlock(&init_fs.lock);
1881 ++
1882 ++ write_lock(&fs->lock);
1883 ++ current->fs = &init_fs;
1884 ++ kill = !--fs->users;
1885 ++ write_unlock(&fs->lock);
1886 ++
1887 ++ task_unlock(current);
1888 ++ if (kill)
1889 ++ free_fs_struct(fs);
1890 ++ }
1891 ++}
1892 +diff --git a/fs/internal.h b/fs/internal.h
1893 +index 0d8ac49..b4dac4f 100644
1894 +--- a/fs/internal.h
1895 ++++ b/fs/internal.h
1896 +@@ -11,6 +11,7 @@
1897 +
1898 + struct super_block;
1899 + struct linux_binprm;
1900 ++struct path;
1901 +
1902 + /*
1903 + * block_dev.c
1904 +@@ -43,7 +44,7 @@ extern void __init chrdev_init(void);
1905 + /*
1906 + * exec.c
1907 + */
1908 +-extern void check_unsafe_exec(struct linux_binprm *, struct files_struct *);
1909 ++extern int check_unsafe_exec(struct linux_binprm *);
1910 +
1911 + /*
1912 + * namespace.c
1913 +@@ -60,3 +61,8 @@ extern void umount_tree(struct vfsmount *, int, struct list_head *);
1914 + extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
1915 +
1916 + extern void __init mnt_init(void);
1917 ++
1918 ++/*
1919 ++ * fs_struct.c
1920 ++ */
1921 ++extern void chroot_fs_refs(struct path *, struct path *);
1922 +diff --git a/fs/namei.c b/fs/namei.c
1923 +index bbc15c2..2389dda 100644
1924 +--- a/fs/namei.c
1925 ++++ b/fs/namei.c
1926 +@@ -2891,10 +2891,3 @@ EXPORT_SYMBOL(vfs_symlink);
1927 + EXPORT_SYMBOL(vfs_unlink);
1928 + EXPORT_SYMBOL(dentry_unhash);
1929 + EXPORT_SYMBOL(generic_readlink);
1930 +-
1931 +-/* to be mentioned only in INIT_TASK */
1932 +-struct fs_struct init_fs = {
1933 +- .count = ATOMIC_INIT(1),
1934 +- .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
1935 +- .umask = 0022,
1936 +-};
1937 +diff --git a/fs/namespace.c b/fs/namespace.c
1938 +index 06f8e63..685e354 100644
1939 +--- a/fs/namespace.c
1940 ++++ b/fs/namespace.c
1941 +@@ -2089,66 +2089,6 @@ out1:
1942 + }
1943 +
1944 + /*
1945 +- * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1946 +- * It can block. Requires the big lock held.
1947 +- */
1948 +-void set_fs_root(struct fs_struct *fs, struct path *path)
1949 +-{
1950 +- struct path old_root;
1951 +-
1952 +- write_lock(&fs->lock);
1953 +- old_root = fs->root;
1954 +- fs->root = *path;
1955 +- path_get(path);
1956 +- write_unlock(&fs->lock);
1957 +- if (old_root.dentry)
1958 +- path_put(&old_root);
1959 +-}
1960 +-
1961 +-/*
1962 +- * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
1963 +- * It can block. Requires the big lock held.
1964 +- */
1965 +-void set_fs_pwd(struct fs_struct *fs, struct path *path)
1966 +-{
1967 +- struct path old_pwd;
1968 +-
1969 +- write_lock(&fs->lock);
1970 +- old_pwd = fs->pwd;
1971 +- fs->pwd = *path;
1972 +- path_get(path);
1973 +- write_unlock(&fs->lock);
1974 +-
1975 +- if (old_pwd.dentry)
1976 +- path_put(&old_pwd);
1977 +-}
1978 +-
1979 +-static void chroot_fs_refs(struct path *old_root, struct path *new_root)
1980 +-{
1981 +- struct task_struct *g, *p;
1982 +- struct fs_struct *fs;
1983 +-
1984 +- read_lock(&tasklist_lock);
1985 +- do_each_thread(g, p) {
1986 +- task_lock(p);
1987 +- fs = p->fs;
1988 +- if (fs) {
1989 +- atomic_inc(&fs->count);
1990 +- task_unlock(p);
1991 +- if (fs->root.dentry == old_root->dentry
1992 +- && fs->root.mnt == old_root->mnt)
1993 +- set_fs_root(fs, new_root);
1994 +- if (fs->pwd.dentry == old_root->dentry
1995 +- && fs->pwd.mnt == old_root->mnt)
1996 +- set_fs_pwd(fs, new_root);
1997 +- put_fs_struct(fs);
1998 +- } else
1999 +- task_unlock(p);
2000 +- } while_each_thread(g, p);
2001 +- read_unlock(&tasklist_lock);
2002 +-}
2003 +-
2004 +-/*
2005 + * pivot_root Semantics:
2006 + * Moves the root file system of the current process to the directory put_old,
2007 + * makes new_root as the new root file system of the current process, and sets
2008 +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
2009 +index 07e4f5d..144d699 100644
2010 +--- a/fs/nfsd/nfssvc.c
2011 ++++ b/fs/nfsd/nfssvc.c
2012 +@@ -404,7 +404,6 @@ static int
2013 + nfsd(void *vrqstp)
2014 + {
2015 + struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
2016 +- struct fs_struct *fsp;
2017 + int err, preverr = 0;
2018 +
2019 + /* Lock module and set up kernel thread */
2020 +@@ -413,13 +412,11 @@ nfsd(void *vrqstp)
2021 + /* At this point, the thread shares current->fs
2022 + * with the init process. We need to create files with a
2023 + * umask of 0 instead of init's umask. */
2024 +- fsp = copy_fs_struct(current->fs);
2025 +- if (!fsp) {
2026 ++ if (unshare_fs_struct() < 0) {
2027 + printk("Unable to start nfsd thread: out of memory\n");
2028 + goto out;
2029 + }
2030 +- exit_fs(current);
2031 +- current->fs = fsp;
2032 ++
2033 + current->fs->umask = 0;
2034 +
2035 + /*
2036 +diff --git a/fs/proc/array.c b/fs/proc/array.c
2037 +index 7e4877d..725a650 100644
2038 +--- a/fs/proc/array.c
2039 ++++ b/fs/proc/array.c
2040 +@@ -80,6 +80,7 @@
2041 + #include <linux/delayacct.h>
2042 + #include <linux/seq_file.h>
2043 + #include <linux/pid_namespace.h>
2044 ++#include <linux/ptrace.h>
2045 + #include <linux/tracehook.h>
2046 +
2047 + #include <asm/pgtable.h>
2048 +@@ -352,6 +353,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
2049 + char state;
2050 + pid_t ppid = 0, pgid = -1, sid = -1;
2051 + int num_threads = 0;
2052 ++ int permitted;
2053 + struct mm_struct *mm;
2054 + unsigned long long start_time;
2055 + unsigned long cmin_flt = 0, cmaj_flt = 0;
2056 +@@ -364,11 +366,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
2057 +
2058 + state = *get_task_state(task);
2059 + vsize = eip = esp = 0;
2060 ++ permitted = ptrace_may_access(task, PTRACE_MODE_READ);
2061 + mm = get_task_mm(task);
2062 + if (mm) {
2063 + vsize = task_vsize(mm);
2064 +- eip = KSTK_EIP(task);
2065 +- esp = KSTK_ESP(task);
2066 ++ if (permitted) {
2067 ++ eip = KSTK_EIP(task);
2068 ++ esp = KSTK_ESP(task);
2069 ++ }
2070 + }
2071 +
2072 + get_task_comm(tcomm, task);
2073 +@@ -424,7 +429,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
2074 + unlock_task_sighand(task, &flags);
2075 + }
2076 +
2077 +- if (!whole || num_threads < 2)
2078 ++ if (permitted && (!whole || num_threads < 2))
2079 + wchan = get_wchan(task);
2080 + if (!whole) {
2081 + min_flt = task->min_flt;
2082 +@@ -476,7 +481,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
2083 + rsslim,
2084 + mm ? mm->start_code : 0,
2085 + mm ? mm->end_code : 0,
2086 +- mm ? mm->start_stack : 0,
2087 ++ (permitted && mm) ? mm->start_stack : 0,
2088 + esp,
2089 + eip,
2090 + /* The signal information here is obsolete.
2091 +diff --git a/fs/proc/base.c b/fs/proc/base.c
2092 +index beaa0ce..74e83e7 100644
2093 +--- a/fs/proc/base.c
2094 ++++ b/fs/proc/base.c
2095 +@@ -146,15 +146,22 @@ static unsigned int pid_entry_count_dirs(const struct pid_entry *entries,
2096 + return count;
2097 + }
2098 +
2099 +-static struct fs_struct *get_fs_struct(struct task_struct *task)
2100 ++static int get_fs_path(struct task_struct *task, struct path *path, bool root)
2101 + {
2102 + struct fs_struct *fs;
2103 ++ int result = -ENOENT;
2104 ++
2105 + task_lock(task);
2106 + fs = task->fs;
2107 +- if(fs)
2108 +- atomic_inc(&fs->count);
2109 ++ if (fs) {
2110 ++ read_lock(&fs->lock);
2111 ++ *path = root ? fs->root : fs->pwd;
2112 ++ path_get(path);
2113 ++ read_unlock(&fs->lock);
2114 ++ result = 0;
2115 ++ }
2116 + task_unlock(task);
2117 +- return fs;
2118 ++ return result;
2119 + }
2120 +
2121 + static int get_nr_threads(struct task_struct *tsk)
2122 +@@ -172,42 +179,24 @@ static int get_nr_threads(struct task_struct *tsk)
2123 + static int proc_cwd_link(struct inode *inode, struct path *path)
2124 + {
2125 + struct task_struct *task = get_proc_task(inode);
2126 +- struct fs_struct *fs = NULL;
2127 + int result = -ENOENT;
2128 +
2129 + if (task) {
2130 +- fs = get_fs_struct(task);
2131 ++ result = get_fs_path(task, path, 0);
2132 + put_task_struct(task);
2133 + }
2134 +- if (fs) {
2135 +- read_lock(&fs->lock);
2136 +- *path = fs->pwd;
2137 +- path_get(&fs->pwd);
2138 +- read_unlock(&fs->lock);
2139 +- result = 0;
2140 +- put_fs_struct(fs);
2141 +- }
2142 + return result;
2143 + }
2144 +
2145 + static int proc_root_link(struct inode *inode, struct path *path)
2146 + {
2147 + struct task_struct *task = get_proc_task(inode);
2148 +- struct fs_struct *fs = NULL;
2149 + int result = -ENOENT;
2150 +
2151 + if (task) {
2152 +- fs = get_fs_struct(task);
2153 ++ result = get_fs_path(task, path, 1);
2154 + put_task_struct(task);
2155 + }
2156 +- if (fs) {
2157 +- read_lock(&fs->lock);
2158 +- *path = fs->root;
2159 +- path_get(&fs->root);
2160 +- read_unlock(&fs->lock);
2161 +- result = 0;
2162 +- put_fs_struct(fs);
2163 +- }
2164 + return result;
2165 + }
2166 +
2167 +@@ -332,7 +321,10 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
2168 + wchan = get_wchan(task);
2169 +
2170 + if (lookup_symbol_name(wchan, symname) < 0)
2171 +- return sprintf(buffer, "%lu", wchan);
2172 ++ if (!ptrace_may_access(task, PTRACE_MODE_READ))
2173 ++ return 0;
2174 ++ else
2175 ++ return sprintf(buffer, "%lu", wchan);
2176 + else
2177 + return sprintf(buffer, "%s", symname);
2178 + }
2179 +@@ -596,7 +588,6 @@ static int mounts_open_common(struct inode *inode, struct file *file,
2180 + struct task_struct *task = get_proc_task(inode);
2181 + struct nsproxy *nsp;
2182 + struct mnt_namespace *ns = NULL;
2183 +- struct fs_struct *fs = NULL;
2184 + struct path root;
2185 + struct proc_mounts *p;
2186 + int ret = -EINVAL;
2187 +@@ -610,22 +601,16 @@ static int mounts_open_common(struct inode *inode, struct file *file,
2188 + get_mnt_ns(ns);
2189 + }
2190 + rcu_read_unlock();
2191 +- if (ns)
2192 +- fs = get_fs_struct(task);
2193 ++ if (ns && get_fs_path(task, &root, 1) == 0)
2194 ++ ret = 0;
2195 + put_task_struct(task);
2196 + }
2197 +
2198 + if (!ns)
2199 + goto err;
2200 +- if (!fs)
2201 ++ if (ret)
2202 + goto err_put_ns;
2203 +
2204 +- read_lock(&fs->lock);
2205 +- root = fs->root;
2206 +- path_get(&root);
2207 +- read_unlock(&fs->lock);
2208 +- put_fs_struct(fs);
2209 +-
2210 + ret = -ENOMEM;
2211 + p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
2212 + if (!p)
2213 +diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
2214 +index 43d2394..52981cd 100644
2215 +--- a/fs/proc/meminfo.c
2216 ++++ b/fs/proc/meminfo.c
2217 +@@ -35,7 +35,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
2218 + #define K(x) ((x) << (PAGE_SHIFT - 10))
2219 + si_meminfo(&i);
2220 + si_swapinfo(&i);
2221 +- committed = atomic_long_read(&vm_committed_space);
2222 ++ committed = percpu_counter_read_positive(&vm_committed_as);
2223 + allowed = ((totalram_pages - hugetlb_total_pages())
2224 + * sysctl_overcommit_ratio / 100) + total_swap_pages;
2225 +
2226 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
2227 +index 9406384..c93ed2d 100644
2228 +--- a/fs/proc/task_mmu.c
2229 ++++ b/fs/proc/task_mmu.c
2230 +@@ -663,6 +663,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
2231 + goto out_task;
2232 +
2233 + ret = 0;
2234 ++
2235 ++ if (!count)
2236 ++ goto out_task;
2237 ++
2238 + mm = get_task_mm(task);
2239 + if (!mm)
2240 + goto out_task;
2241 +diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
2242 +index 343ea12..6ca0105 100644
2243 +--- a/fs/proc/task_nommu.c
2244 ++++ b/fs/proc/task_nommu.c
2245 +@@ -49,7 +49,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
2246 + else
2247 + bytes += kobjsize(mm);
2248 +
2249 +- if (current->fs && atomic_read(&current->fs->count) > 1)
2250 ++ if (current->fs && current->fs->users > 1)
2251 + sbytes += kobjsize(current->fs);
2252 + else
2253 + bytes += kobjsize(current->fs);
2254 +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
2255 +index 5165f24..671fab3 100644
2256 +--- a/include/drm/drm_pciids.h
2257 ++++ b/include/drm/drm_pciids.h
2258 +@@ -418,4 +418,5 @@
2259 + {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
2260 + {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
2261 + {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
2262 ++ {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
2263 + {0, 0, 0}
2264 +diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
2265 +index a97c053..78a05bf 100644
2266 +--- a/include/linux/fs_struct.h
2267 ++++ b/include/linux/fs_struct.h
2268 +@@ -4,9 +4,10 @@
2269 + #include <linux/path.h>
2270 +
2271 + struct fs_struct {
2272 +- atomic_t count;
2273 ++ int users;
2274 + rwlock_t lock;
2275 + int umask;
2276 ++ int in_exec;
2277 + struct path root, pwd;
2278 + };
2279 +
2280 +@@ -16,6 +17,8 @@ extern void exit_fs(struct task_struct *);
2281 + extern void set_fs_root(struct fs_struct *, struct path *);
2282 + extern void set_fs_pwd(struct fs_struct *, struct path *);
2283 + extern struct fs_struct *copy_fs_struct(struct fs_struct *);
2284 +-extern void put_fs_struct(struct fs_struct *);
2285 ++extern void free_fs_struct(struct fs_struct *);
2286 ++extern void daemonize_fs_struct(void);
2287 ++extern int unshare_fs_struct(void);
2288 +
2289 + #endif /* _LINUX_FS_STRUCT_H */
2290 +diff --git a/include/linux/genhd.h b/include/linux/genhd.h
2291 +index 16948ea..102d9e9 100644
2292 +--- a/include/linux/genhd.h
2293 ++++ b/include/linux/genhd.h
2294 +@@ -214,6 +214,7 @@ static inline void disk_put_part(struct hd_struct *part)
2295 + #define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
2296 + #define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
2297 + #define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
2298 ++#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
2299 +
2300 + struct disk_part_iter {
2301 + struct gendisk *disk;
2302 +diff --git a/include/linux/kvm.h b/include/linux/kvm.h
2303 +index 0424326..c344599 100644
2304 +--- a/include/linux/kvm.h
2305 ++++ b/include/linux/kvm.h
2306 +@@ -396,6 +396,8 @@ struct kvm_trace_rec {
2307 + #ifdef __KVM_HAVE_USER_NMI
2308 + #define KVM_CAP_USER_NMI 22
2309 + #endif
2310 ++/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
2311 ++#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
2312 +
2313 + /*
2314 + * ioctls for VM fds
2315 +diff --git a/include/linux/mman.h b/include/linux/mman.h
2316 +index 30d1073..9872d6c 100644
2317 +--- a/include/linux/mman.h
2318 ++++ b/include/linux/mman.h
2319 +@@ -12,21 +12,18 @@
2320 +
2321 + #ifdef __KERNEL__
2322 + #include <linux/mm.h>
2323 ++#include <linux/percpu_counter.h>
2324 +
2325 + #include <asm/atomic.h>
2326 +
2327 + extern int sysctl_overcommit_memory;
2328 + extern int sysctl_overcommit_ratio;
2329 +-extern atomic_long_t vm_committed_space;
2330 ++extern struct percpu_counter vm_committed_as;
2331 +
2332 +-#ifdef CONFIG_SMP
2333 +-extern void vm_acct_memory(long pages);
2334 +-#else
2335 + static inline void vm_acct_memory(long pages)
2336 + {
2337 +- atomic_long_add(pages, &vm_committed_space);
2338 ++ percpu_counter_add(&vm_committed_as, pages);
2339 + }
2340 +-#endif
2341 +
2342 + static inline void vm_unacct_memory(long pages)
2343 + {
2344 +diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
2345 +index 027815b..b647a4d 100644
2346 +--- a/include/linux/pci_regs.h
2347 ++++ b/include/linux/pci_regs.h
2348 +@@ -235,7 +235,7 @@
2349 + #define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
2350 + #define PCI_PM_CTRL 4 /* PM control and status register */
2351 + #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
2352 +-#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */
2353 ++#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */
2354 + #define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
2355 + #define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */
2356 + #define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */
2357 +diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
2358 +index 667c841..cb8e962 100644
2359 +--- a/kernel/exec_domain.c
2360 ++++ b/kernel/exec_domain.c
2361 +@@ -145,28 +145,6 @@ __set_personality(u_long personality)
2362 + return 0;
2363 + }
2364 +
2365 +- if (atomic_read(&current->fs->count) != 1) {
2366 +- struct fs_struct *fsp, *ofsp;
2367 +-
2368 +- fsp = copy_fs_struct(current->fs);
2369 +- if (fsp == NULL) {
2370 +- module_put(ep->module);
2371 +- return -ENOMEM;
2372 +- }
2373 +-
2374 +- task_lock(current);
2375 +- ofsp = current->fs;
2376 +- current->fs = fsp;
2377 +- task_unlock(current);
2378 +-
2379 +- put_fs_struct(ofsp);
2380 +- }
2381 +-
2382 +- /*
2383 +- * At that point we are guaranteed to be the sole owner of
2384 +- * current->fs.
2385 +- */
2386 +-
2387 + current->personality = personality;
2388 + oep = current_thread_info()->exec_domain;
2389 + current_thread_info()->exec_domain = ep;
2390 +diff --git a/kernel/exit.c b/kernel/exit.c
2391 +index efd30cc..467ffcd 100644
2392 +--- a/kernel/exit.c
2393 ++++ b/kernel/exit.c
2394 +@@ -429,7 +429,6 @@ EXPORT_SYMBOL(disallow_signal);
2395 + void daemonize(const char *name, ...)
2396 + {
2397 + va_list args;
2398 +- struct fs_struct *fs;
2399 + sigset_t blocked;
2400 +
2401 + va_start(args, name);
2402 +@@ -462,11 +461,7 @@ void daemonize(const char *name, ...)
2403 +
2404 + /* Become as one with the init task */
2405 +
2406 +- exit_fs(current); /* current->fs->count--; */
2407 +- fs = init_task.fs;
2408 +- current->fs = fs;
2409 +- atomic_inc(&fs->count);
2410 +-
2411 ++ daemonize_fs_struct();
2412 + exit_files(current);
2413 + current->files = init_task.files;
2414 + atomic_inc(&current->files->count);
2415 +@@ -565,30 +560,6 @@ void exit_files(struct task_struct *tsk)
2416 + }
2417 + }
2418 +
2419 +-void put_fs_struct(struct fs_struct *fs)
2420 +-{
2421 +- /* No need to hold fs->lock if we are killing it */
2422 +- if (atomic_dec_and_test(&fs->count)) {
2423 +- path_put(&fs->root);
2424 +- path_put(&fs->pwd);
2425 +- kmem_cache_free(fs_cachep, fs);
2426 +- }
2427 +-}
2428 +-
2429 +-void exit_fs(struct task_struct *tsk)
2430 +-{
2431 +- struct fs_struct * fs = tsk->fs;
2432 +-
2433 +- if (fs) {
2434 +- task_lock(tsk);
2435 +- tsk->fs = NULL;
2436 +- task_unlock(tsk);
2437 +- put_fs_struct(fs);
2438 +- }
2439 +-}
2440 +-
2441 +-EXPORT_SYMBOL_GPL(exit_fs);
2442 +-
2443 + #ifdef CONFIG_MM_OWNER
2444 + /*
2445 + * Task p is exiting and it owned mm, lets find a new owner for it
2446 +@@ -950,8 +921,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
2447 + */
2448 + if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
2449 + (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
2450 +- tsk->self_exec_id != tsk->parent_exec_id) &&
2451 +- !capable(CAP_KILL))
2452 ++ tsk->self_exec_id != tsk->parent_exec_id))
2453 + tsk->exit_signal = SIGCHLD;
2454 +
2455 + signal = tracehook_notify_death(tsk, &cookie, group_dead);
2456 +diff --git a/kernel/fork.c b/kernel/fork.c
2457 +index 9b51a1b..8727a5a 100644
2458 +--- a/kernel/fork.c
2459 ++++ b/kernel/fork.c
2460 +@@ -676,38 +676,21 @@ fail_nomem:
2461 + return retval;
2462 + }
2463 +
2464 +-static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
2465 +-{
2466 +- struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
2467 +- /* We don't need to lock fs - think why ;-) */
2468 +- if (fs) {
2469 +- atomic_set(&fs->count, 1);
2470 +- rwlock_init(&fs->lock);
2471 +- fs->umask = old->umask;
2472 +- read_lock(&old->lock);
2473 +- fs->root = old->root;
2474 +- path_get(&old->root);
2475 +- fs->pwd = old->pwd;
2476 +- path_get(&old->pwd);
2477 +- read_unlock(&old->lock);
2478 +- }
2479 +- return fs;
2480 +-}
2481 +-
2482 +-struct fs_struct *copy_fs_struct(struct fs_struct *old)
2483 +-{
2484 +- return __copy_fs_struct(old);
2485 +-}
2486 +-
2487 +-EXPORT_SYMBOL_GPL(copy_fs_struct);
2488 +-
2489 + static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
2490 + {
2491 ++ struct fs_struct *fs = current->fs;
2492 + if (clone_flags & CLONE_FS) {
2493 +- atomic_inc(&current->fs->count);
2494 ++ /* tsk->fs is already what we want */
2495 ++ write_lock(&fs->lock);
2496 ++ if (fs->in_exec) {
2497 ++ write_unlock(&fs->lock);
2498 ++ return -EAGAIN;
2499 ++ }
2500 ++ fs->users++;
2501 ++ write_unlock(&fs->lock);
2502 + return 0;
2503 + }
2504 +- tsk->fs = __copy_fs_struct(current->fs);
2505 ++ tsk->fs = copy_fs_struct(fs);
2506 + if (!tsk->fs)
2507 + return -ENOMEM;
2508 + return 0;
2509 +@@ -1543,12 +1526,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
2510 + {
2511 + struct fs_struct *fs = current->fs;
2512 +
2513 +- if ((unshare_flags & CLONE_FS) &&
2514 +- (fs && atomic_read(&fs->count) > 1)) {
2515 +- *new_fsp = __copy_fs_struct(current->fs);
2516 +- if (!*new_fsp)
2517 +- return -ENOMEM;
2518 +- }
2519 ++ if (!(unshare_flags & CLONE_FS) || !fs)
2520 ++ return 0;
2521 ++
2522 ++ /* don't need lock here; in the worst case we'll do useless copy */
2523 ++ if (fs->users == 1)
2524 ++ return 0;
2525 ++
2526 ++ *new_fsp = copy_fs_struct(fs);
2527 ++ if (!*new_fsp)
2528 ++ return -ENOMEM;
2529 +
2530 + return 0;
2531 + }
2532 +@@ -1664,8 +1651,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
2533 +
2534 + if (new_fs) {
2535 + fs = current->fs;
2536 ++ write_lock(&fs->lock);
2537 + current->fs = new_fs;
2538 +- new_fs = fs;
2539 ++ if (--fs->users)
2540 ++ new_fs = NULL;
2541 ++ else
2542 ++ new_fs = fs;
2543 ++ write_unlock(&fs->lock);
2544 + }
2545 +
2546 + if (new_mm) {
2547 +@@ -1704,7 +1696,7 @@ bad_unshare_cleanup_sigh:
2548 +
2549 + bad_unshare_cleanup_fs:
2550 + if (new_fs)
2551 +- put_fs_struct(new_fs);
2552 ++ free_fs_struct(new_fs);
2553 +
2554 + bad_unshare_cleanup_thread:
2555 + bad_unshare_out:
2556 +diff --git a/kernel/ptrace.c b/kernel/ptrace.c
2557 +index c9cf48b..dc3b98e 100644
2558 +--- a/kernel/ptrace.c
2559 ++++ b/kernel/ptrace.c
2560 +@@ -186,7 +186,7 @@ int ptrace_attach(struct task_struct *task)
2561 + /* Protect exec's credential calculations against our interference;
2562 + * SUID, SGID and LSM creds get determined differently under ptrace.
2563 + */
2564 +- retval = mutex_lock_interruptible(&current->cred_exec_mutex);
2565 ++ retval = mutex_lock_interruptible(&task->cred_exec_mutex);
2566 + if (retval < 0)
2567 + goto out;
2568 +
2569 +@@ -230,7 +230,7 @@ repeat:
2570 + bad:
2571 + write_unlock_irqrestore(&tasklist_lock, flags);
2572 + task_unlock(task);
2573 +- mutex_unlock(&current->cred_exec_mutex);
2574 ++ mutex_unlock(&task->cred_exec_mutex);
2575 + out:
2576 + return retval;
2577 + }
2578 +diff --git a/kernel/sched.c b/kernel/sched.c
2579 +index 5e80629..7d13deb 100644
2580 +--- a/kernel/sched.c
2581 ++++ b/kernel/sched.c
2582 +@@ -4347,7 +4347,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
2583 +
2584 + if (user_tick)
2585 + account_user_time(p, one_jiffy, one_jiffy_scaled);
2586 +- else if (p != rq->idle)
2587 ++ else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
2588 + account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
2589 + one_jiffy_scaled);
2590 + else
2591 +diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
2592 +index 21a5ca8..83c4417 100644
2593 +--- a/kernel/time/tick-common.c
2594 ++++ b/kernel/time/tick-common.c
2595 +@@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
2596 + for (;;) {
2597 + if (!clockevents_program_event(dev, next, ktime_get()))
2598 + return;
2599 +- tick_periodic(cpu);
2600 ++ /*
2601 ++ * Have to be careful here. If we're in oneshot mode,
2602 ++ * before we call tick_periodic() in a loop, we need
2603 ++ * to be sure we're using a real hardware clocksource.
2604 ++ * Otherwise we could get trapped in an infinite
2605 ++ * loop, as the tick_periodic() increments jiffies,
2606 ++ * when then will increment time, posibly causing
2607 ++ * the loop to trigger again and again.
2608 ++ */
2609 ++ if (timekeeping_valid_for_hres())
2610 ++ tick_periodic(cpu);
2611 + next = ktime_add(next, tick_period);
2612 + }
2613 + }
2614 +diff --git a/mm/madvise.c b/mm/madvise.c
2615 +index b9ce574..36d6ea2 100644
2616 +--- a/mm/madvise.c
2617 ++++ b/mm/madvise.c
2618 +@@ -112,6 +112,14 @@ static long madvise_willneed(struct vm_area_struct * vma,
2619 + if (!file)
2620 + return -EBADF;
2621 +
2622 ++ /*
2623 ++ * Page cache readahead assumes page cache pages are order-0 which
2624 ++ * is not the case for hugetlbfs. Do not give a bad return value
2625 ++ * but ignore the advice.
2626 ++ */
2627 ++ if (vma->vm_flags & VM_HUGETLB)
2628 ++ return 0;
2629 ++
2630 + if (file->f_mapping->a_ops->get_xip_mem) {
2631 + /* no bad return value, but ignore advice */
2632 + return 0;
2633 +diff --git a/mm/mmap.c b/mm/mmap.c
2634 +index f1aa6f9..efff81b 100644
2635 +--- a/mm/mmap.c
2636 ++++ b/mm/mmap.c
2637 +@@ -84,7 +84,7 @@ EXPORT_SYMBOL(vm_get_page_prot);
2638 + int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
2639 + int sysctl_overcommit_ratio = 50; /* default is 50% */
2640 + int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
2641 +-atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
2642 ++struct percpu_counter vm_committed_as;
2643 +
2644 + /*
2645 + * Check that a process has enough memory to allocate a new virtual
2646 +@@ -178,11 +178,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
2647 + if (mm)
2648 + allowed -= mm->total_vm / 32;
2649 +
2650 +- /*
2651 +- * cast `allowed' as a signed long because vm_committed_space
2652 +- * sometimes has a negative value
2653 +- */
2654 +- if (atomic_long_read(&vm_committed_space) < (long)allowed)
2655 ++ if (percpu_counter_read_positive(&vm_committed_as) < allowed)
2656 + return 0;
2657 + error:
2658 + vm_unacct_memory(pages);
2659 +@@ -2477,6 +2473,10 @@ void mm_drop_all_locks(struct mm_struct *mm)
2660 + */
2661 + void __init mmap_init(void)
2662 + {
2663 ++ int ret;
2664 ++
2665 ++ ret = percpu_counter_init(&vm_committed_as, 0);
2666 ++ VM_BUG_ON(ret);
2667 + vm_area_cachep = kmem_cache_create("vm_area_struct",
2668 + sizeof(struct vm_area_struct), 0,
2669 + SLAB_PANIC, NULL);
2670 +diff --git a/mm/nommu.c b/mm/nommu.c
2671 +index 2fcf47d..ee955bc 100644
2672 +--- a/mm/nommu.c
2673 ++++ b/mm/nommu.c
2674 +@@ -62,7 +62,7 @@ void *high_memory;
2675 + struct page *mem_map;
2676 + unsigned long max_mapnr;
2677 + unsigned long num_physpages;
2678 +-atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
2679 ++struct percpu_counter vm_committed_as;
2680 + int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
2681 + int sysctl_overcommit_ratio = 50; /* default is 50% */
2682 + int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
2683 +@@ -463,6 +463,10 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
2684 + */
2685 + void __init mmap_init(void)
2686 + {
2687 ++ int ret;
2688 ++
2689 ++ ret = percpu_counter_init(&vm_committed_as, 0);
2690 ++ VM_BUG_ON(ret);
2691 + vm_region_jar = kmem_cache_create("vm_region_jar",
2692 + sizeof(struct vm_region), 0,
2693 + SLAB_PANIC, NULL);
2694 +@@ -1849,12 +1853,9 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
2695 + if (mm)
2696 + allowed -= mm->total_vm / 32;
2697 +
2698 +- /*
2699 +- * cast `allowed' as a signed long because vm_committed_space
2700 +- * sometimes has a negative value
2701 +- */
2702 +- if (atomic_long_read(&vm_committed_space) < (long)allowed)
2703 ++ if (percpu_counter_read_positive(&vm_committed_as) < allowed)
2704 + return 0;
2705 ++
2706 + error:
2707 + vm_unacct_memory(pages);
2708 +
2709 +diff --git a/mm/swap.c b/mm/swap.c
2710 +index 8adb9fe..2460f7d 100644
2711 +--- a/mm/swap.c
2712 ++++ b/mm/swap.c
2713 +@@ -514,49 +514,6 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
2714 +
2715 + EXPORT_SYMBOL(pagevec_lookup_tag);
2716 +
2717 +-#ifdef CONFIG_SMP
2718 +-/*
2719 +- * We tolerate a little inaccuracy to avoid ping-ponging the counter between
2720 +- * CPUs
2721 +- */
2722 +-#define ACCT_THRESHOLD max(16, NR_CPUS * 2)
2723 +-
2724 +-static DEFINE_PER_CPU(long, committed_space);
2725 +-
2726 +-void vm_acct_memory(long pages)
2727 +-{
2728 +- long *local;
2729 +-
2730 +- preempt_disable();
2731 +- local = &__get_cpu_var(committed_space);
2732 +- *local += pages;
2733 +- if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
2734 +- atomic_long_add(*local, &vm_committed_space);
2735 +- *local = 0;
2736 +- }
2737 +- preempt_enable();
2738 +-}
2739 +-
2740 +-#ifdef CONFIG_HOTPLUG_CPU
2741 +-
2742 +-/* Drop the CPU's cached committed space back into the central pool. */
2743 +-static int cpu_swap_callback(struct notifier_block *nfb,
2744 +- unsigned long action,
2745 +- void *hcpu)
2746 +-{
2747 +- long *committed;
2748 +-
2749 +- committed = &per_cpu(committed_space, (long)hcpu);
2750 +- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
2751 +- atomic_long_add(*committed, &vm_committed_space);
2752 +- *committed = 0;
2753 +- drain_cpu_pagevecs((long)hcpu);
2754 +- }
2755 +- return NOTIFY_OK;
2756 +-}
2757 +-#endif /* CONFIG_HOTPLUG_CPU */
2758 +-#endif /* CONFIG_SMP */
2759 +-
2760 + /*
2761 + * Perform any setup for the swap system
2762 + */
2763 +@@ -577,7 +534,4 @@ void __init swap_setup(void)
2764 + * Right now other parts of the system means that we
2765 + * _really_ don't want to cluster much more
2766 + */
2767 +-#ifdef CONFIG_HOTPLUG_CPU
2768 +- hotcpu_notifier(cpu_swap_callback, 0);
2769 +-#endif
2770 + }
2771 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2772 +index 2b890af..4a78c17 100644
2773 +--- a/net/mac80211/mlme.c
2774 ++++ b/net/mac80211/mlme.c
2775 +@@ -1342,7 +1342,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2776 +
2777 + for (i = 0; i < elems.ext_supp_rates_len; i++) {
2778 + int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
2779 +- bool is_basic = !!(elems.supp_rates[i] & 0x80);
2780 ++ bool is_basic = !!(elems.ext_supp_rates[i] & 0x80);
2781 +
2782 + if (rate > 110)
2783 + have_higher_than_11mbit = true;
2784 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
2785 +index 7175ae8..75837ca 100644
2786 +--- a/net/mac80211/rx.c
2787 ++++ b/net/mac80211/rx.c
2788 +@@ -29,6 +29,7 @@
2789 + static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2790 + struct tid_ampdu_rx *tid_agg_rx,
2791 + struct sk_buff *skb,
2792 ++ struct ieee80211_rx_status *status,
2793 + u16 mpdu_seq_num,
2794 + int bar_req);
2795 + /*
2796 +@@ -1538,7 +1539,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2797 + /* manage reordering buffer according to requested */
2798 + /* sequence number */
2799 + rcu_read_lock();
2800 +- ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
2801 ++ ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL,
2802 + start_seq_num, 1);
2803 + rcu_read_unlock();
2804 + return RX_DROP_UNUSABLE;
2805 +@@ -2034,6 +2035,7 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
2806 + static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2807 + struct tid_ampdu_rx *tid_agg_rx,
2808 + struct sk_buff *skb,
2809 ++ struct ieee80211_rx_status *rxstatus,
2810 + u16 mpdu_seq_num,
2811 + int bar_req)
2812 + {
2813 +@@ -2115,6 +2117,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2814 +
2815 + /* put the frame in the reordering buffer */
2816 + tid_agg_rx->reorder_buf[index] = skb;
2817 ++ memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
2818 ++ sizeof(*rxstatus));
2819 + tid_agg_rx->stored_mpdu_num++;
2820 + /* release the buffer until next missing frame */
2821 + index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2822 +@@ -2140,7 +2144,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2823 + }
2824 +
2825 + static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2826 +- struct sk_buff *skb)
2827 ++ struct sk_buff *skb,
2828 ++ struct ieee80211_rx_status *status)
2829 + {
2830 + struct ieee80211_hw *hw = &local->hw;
2831 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2832 +@@ -2191,7 +2196,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2833 +
2834 + /* according to mpdu sequence number deal with reordering buffer */
2835 + mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2836 +- ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
2837 ++ ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status,
2838 + mpdu_seq_num, 0);
2839 + end_reorder:
2840 + return ret;
2841 +@@ -2255,7 +2260,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2842 + return;
2843 + }
2844 +
2845 +- if (!ieee80211_rx_reorder_ampdu(local, skb))
2846 ++ if (!ieee80211_rx_reorder_ampdu(local, skb, status))
2847 + __ieee80211_rx_handle_packet(hw, skb, status, rate);
2848 +
2849 + rcu_read_unlock();
2850 +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
2851 +index 8892161..723b647 100644
2852 +--- a/scripts/mod/modpost.c
2853 ++++ b/scripts/mod/modpost.c
2854 +@@ -2005,6 +2005,7 @@ static void read_markers(const char *fname)
2855 + if (!mod->skip)
2856 + add_marker(mod, marker, fmt);
2857 + }
2858 ++ release_file(file, size);
2859 + return;
2860 + fail:
2861 + fatal("parse error in markers list file\n");
2862 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
2863 +index 0081597..e210b21 100644
2864 +--- a/security/selinux/hooks.c
2865 ++++ b/security/selinux/hooks.c
2866 +@@ -4661,6 +4661,7 @@ static int selinux_ip_postroute_iptables_compat(struct sock *sk,
2867 + if (err)
2868 + return err;
2869 + err = avc_has_perm(sk_sid, if_sid, SECCLASS_NETIF, netif_perm, ad);
2870 ++ if (err)
2871 + return err;
2872 +
2873 + err = sel_netnode_sid(addrp, family, &node_sid);
2874 +diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
2875 +index d004e58..f3d15d5 100644
2876 +--- a/sound/soc/codecs/wm8580.c
2877 ++++ b/sound/soc/codecs/wm8580.c
2878 +@@ -533,7 +533,7 @@ static int wm8580_set_dai_pll(struct snd_soc_dai *codec_dai,
2879 + reg = wm8580_read(codec, WM8580_PLLA4 + offset);
2880 + reg &= ~0x3f;
2881 + reg |= pll_div.prescale | pll_div.postscale << 1 |
2882 +- pll_div.freqmode << 4;
2883 ++ pll_div.freqmode << 3;
2884 +
2885 + wm8580_write(codec, WM8580_PLLA4 + offset, reg);
2886 +
2887 +diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
2888 +index 73e59f4..9ce1c59 100644
2889 +--- a/sound/usb/usx2y/us122l.c
2890 ++++ b/sound/usb/usx2y/us122l.c
2891 +@@ -478,6 +478,14 @@ static bool us122l_create_card(struct snd_card *card)
2892 + return true;
2893 + }
2894 +
2895 ++static void snd_us122l_free(struct snd_card *card)
2896 ++{
2897 ++ struct us122l *us122l = US122L(card);
2898 ++ int index = us122l->chip.index;
2899 ++ if (index >= 0 && index < SNDRV_CARDS)
2900 ++ snd_us122l_card_used[index] = 0;
2901 ++}
2902 ++
2903 + static struct snd_card *usx2y_create_card(struct usb_device *device)
2904 + {
2905 + int dev;
2906 +@@ -492,7 +500,7 @@ static struct snd_card *usx2y_create_card(struct usb_device *device)
2907 + if (!card)
2908 + return NULL;
2909 + snd_us122l_card_used[US122L(card)->chip.index = dev] = 1;
2910 +-
2911 ++ card->private_free = snd_us122l_free;
2912 + US122L(card)->chip.dev = device;
2913 + US122L(card)->chip.card = card;
2914 + mutex_init(&US122L(card)->mutex);
2915 +@@ -575,7 +583,7 @@ static void snd_us122l_disconnect(struct usb_interface *intf)
2916 + }
2917 +
2918 + usb_put_intf(intf);
2919 +- usb_put_dev(US122L(card)->chip.dev);
2920 ++ usb_put_dev(us122l->chip.dev);
2921 +
2922 + while (atomic_read(&us122l->mmap_count))
2923 + msleep(500);
2924 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2925 +index 6723411..d85642e 100644
2926 +--- a/virt/kvm/kvm_main.c
2927 ++++ b/virt/kvm/kvm_main.c
2928 +@@ -964,6 +964,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
2929 + int r;
2930 + gfn_t base_gfn;
2931 + unsigned long npages;
2932 ++ int largepages;
2933 + unsigned long i;
2934 + struct kvm_memory_slot *memslot;
2935 + struct kvm_memory_slot old, new;
2936 +@@ -1004,7 +1005,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
2937 + for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2938 + struct kvm_memory_slot *s = &kvm->memslots[i];
2939 +
2940 +- if (s == memslot)
2941 ++ if (s == memslot || !s->npages)
2942 + continue;
2943 + if (!((base_gfn + npages <= s->base_gfn) ||
2944 + (base_gfn >= s->base_gfn + s->npages)))
2945 +@@ -1039,11 +1040,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
2946 + new.userspace_addr = 0;
2947 + }
2948 + if (npages && !new.lpage_info) {
2949 +- int largepages = npages / KVM_PAGES_PER_HPAGE;
2950 +- if (npages % KVM_PAGES_PER_HPAGE)
2951 +- largepages++;
2952 +- if (base_gfn % KVM_PAGES_PER_HPAGE)
2953 +- largepages++;
2954 ++ largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
2955 ++ largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
2956 +
2957 + new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
2958 +
2959 +@@ -1999,6 +1997,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
2960 + switch (arg) {
2961 + case KVM_CAP_USER_MEMORY:
2962 + case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2963 ++ case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2964 + return 1;
2965 + default:
2966 + break;