Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2290 - genpatches-2.6/trunk/3.8
Date: Sat, 23 Feb 2013 19:08:30
Message-Id: 20130223190825.8BC122171D@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2013-02-23 19:07:21 +0000 (Sat, 23 Feb 2013)
3 New Revision: 2290
4
5 Modified:
6 genpatches-2.6/trunk/3.8/2400_kcopy-patch-for-infiniband-driver.patch
7 Log:
8 Modified kcopy patch. Bug #458426 thanks to Yvan Royon
9
10 Modified: genpatches-2.6/trunk/3.8/2400_kcopy-patch-for-infiniband-driver.patch
11 ===================================================================
12 --- genpatches-2.6/trunk/3.8/2400_kcopy-patch-for-infiniband-driver.patch 2013-02-19 21:26:22 UTC (rev 2289)
13 +++ genpatches-2.6/trunk/3.8/2400_kcopy-patch-for-infiniband-driver.patch 2013-02-23 19:07:21 UTC (rev 2290)
14 @@ -29,18 +29,18 @@
15 +source "drivers/char/kcopy/Kconfig"
16 +
17 config DEVKMEM
18 - bool "/dev/kmem virtual device support"
19 - default y
20 + bool "/dev/kmem virtual device support"
21 + default y
22 diff --git a/drivers/char/Makefile b/drivers/char/Makefile
23 index 0dc5d7c..be519d6 100644
24 --- a/drivers/char/Makefile
25 +++ b/drivers/char/Makefile
26 -@@ -64,3 +64,5 @@ obj-$(CONFIG_JS_RTC) += js-rtc.o
27 +@@ -64,3 +64,5 @@ obj-$(CONFIG_JS_RTC) += js-rtc.o
28 js-rtc-y = rtc.o
29
30 - obj-$(CONFIG_TILE_SROM) += tile-srom.o
31 + obj-$(CONFIG_TILE_SROM) += tile-srom.o
32 +
33 -+obj-$(CONFIG_KCOPY) += kcopy/
34 ++obj-$(CONFIG_KCOPY) += kcopy/
35 diff --git a/drivers/char/kcopy/Kconfig b/drivers/char/kcopy/Kconfig
36 new file mode 100644
37 index 0000000..453ae52
38 @@ -54,13 +54,13 @@
39 +menu "KCopy"
40 +
41 +config KCOPY
42 -+ tristate "Memory-to-memory copies using kernel assist"
43 -+ default m
44 -+ ---help---
45 -+ High-performance inter-process memory copies. Can often save a
46 -+ memory copy to shared memory in the application. Useful at least
47 -+ for MPI applications where the point-to-point nature of vmsplice
48 -+ and pipes can be a limiting factor in performance.
49 ++ tristate "Memory-to-memory copies using kernel assist"
50 ++ default m
51 ++ ---help---
52 ++ High-performance inter-process memory copies. Can often save a
53 ++ memory copy to shared memory in the application. Useful at least
54 ++ for MPI applications where the point-to-point nature of vmsplice
55 ++ and pipes can be a limiting factor in performance.
56 +
57 +endmenu
58 +
59 @@ -73,7 +73,7 @@
60 +#
61 +# Makefile for the kernel character device drivers.
62 +#
63 -+obj-$(CONFIG_KCOPY) += kcopy.o
64 ++obj-$(CONFIG_KCOPY) += kcopy.o
65 diff --git a/drivers/char/kcopy/kcopy.c b/drivers/char/kcopy/kcopy.c
66 new file mode 100644
67 index 0000000..a9f915c
68 @@ -98,39 +98,39 @@
69 +MODULE_AUTHOR("Arthur Jones <arthur.jones@××××××.com>");
70 +MODULE_DESCRIPTION("QLogic kcopy driver");
71 +
72 -+#define KCOPY_ABI 1
73 -+#define KCOPY_MAX_MINORS 64
74 ++#define KCOPY_ABI 1
75 ++#define KCOPY_MAX_MINORS 64
76 +
77 +struct kcopy_device {
78 -+ struct cdev cdev;
79 -+ struct class *class;
80 -+ struct device *devp[KCOPY_MAX_MINORS];
81 -+ dev_t dev;
82 ++ struct cdev cdev;
83 ++ struct class *class;
84 ++ struct device *devp[KCOPY_MAX_MINORS];
85 ++ dev_t dev;
86 +
87 -+ struct kcopy_file *kf[KCOPY_MAX_MINORS];
88 -+ struct mutex open_lock;
89 ++ struct kcopy_file *kf[KCOPY_MAX_MINORS];
90 ++ struct mutex open_lock;
91 +};
92 +
93 +static struct kcopy_device kcopy_dev;
94 +
95 +/* per file data / one of these is shared per minor */
96 +struct kcopy_file {
97 -+ int count;
98 ++ int count;
99 +
100 -+ /* pid indexed */
101 -+ struct rb_root live_map_tree;
102 ++ /* pid indexed */
103 ++ struct rb_root live_map_tree;
104 +
105 -+ struct mutex map_lock;
106 ++ struct mutex map_lock;
107 +};
108 +
109 +struct kcopy_map_entry {
110 -+ int count;
111 -+ struct task_struct *task;
112 -+ pid_t pid;
113 -+ struct kcopy_file *file; /* file backpointer */
114 ++ int count;
115 ++ struct task_struct *task;
116 ++ pid_t pid;
117 ++ struct kcopy_file *file; /* file backpointer */
118 +
119 -+ struct list_head list; /* free map list */
120 -+ struct rb_node node; /* live map tree */
121 ++ struct list_head list; /* free map list */
122 ++ struct rb_node node; /* live map tree */
123 +};
124 +
125 +#define KCOPY_GET_SYSCALL 1
126 @@ -138,590 +138,590 @@
127 +#define KCOPY_ABI_SYSCALL 3
128 +
129 +struct kcopy_syscall {
130 -+ __u32 tag;
131 -+ pid_t pid;
132 -+ __u64 n;
133 -+ __u64 src;
134 -+ __u64 dst;
135 ++ __u32 tag;
136 ++ pid_t pid;
137 ++ __u64 n;
138 ++ __u64 src;
139 ++ __u64 dst;
140 +};
141 +
142 +static const void __user *kcopy_syscall_src(const struct kcopy_syscall *ks)
143 +{
144 -+ return (const void __user *) (unsigned long) ks->src;
145 ++ return (const void __user *) (unsigned long) ks->src;
146 +}
147 +
148 +static void __user *kcopy_syscall_dst(const struct kcopy_syscall *ks)
149 +{
150 -+ return (void __user *) (unsigned long) ks->dst;
151 ++ return (void __user *) (unsigned long) ks->dst;
152 +}
153 +
154 +static unsigned long kcopy_syscall_n(const struct kcopy_syscall *ks)
155 +{
156 -+ return (unsigned long) ks->n;
157 ++ return (unsigned long) ks->n;
158 +}
159 +
160 +static struct kcopy_map_entry *kcopy_create_entry(struct kcopy_file *file)
161 +{
162 -+ struct kcopy_map_entry *kme =
163 -+ kmalloc(sizeof(struct kcopy_map_entry), GFP_KERNEL);
164 ++ struct kcopy_map_entry *kme =
165 ++ kmalloc(sizeof(struct kcopy_map_entry), GFP_KERNEL);
166 +
167 -+ if (!kme)
168 -+ return NULL;
169 ++ if (!kme)
170 ++ return NULL;
171 +
172 -+ kme->count = 1;
173 -+ kme->file = file;
174 -+ kme->task = current;
175 -+ kme->pid = current->tgid;
176 -+ INIT_LIST_HEAD(&kme->list);
177 ++ kme->count = 1;
178 ++ kme->file = file;
179 ++ kme->task = current;
180 ++ kme->pid = current->tgid;
181 ++ INIT_LIST_HEAD(&kme->list);
182 +
183 -+ return kme;
184 ++ return kme;
185 +}
186 +
187 +static struct kcopy_map_entry *
188 +kcopy_lookup_pid(struct rb_root *root, pid_t pid)
189 +{
190 -+ struct rb_node *node = root->rb_node;
191 ++ struct rb_node *node = root->rb_node;
192 +
193 -+ while (node) {
194 -+ struct kcopy_map_entry *kme =
195 -+ container_of(node, struct kcopy_map_entry, node);
196 ++ while (node) {
197 ++ struct kcopy_map_entry *kme =
198 ++ container_of(node, struct kcopy_map_entry, node);
199 +
200 -+ if (pid < kme->pid)
201 -+ node = node->rb_left;
202 -+ else if (pid > kme->pid)
203 -+ node = node->rb_right;
204 -+ else
205 -+ return kme;
206 -+ }
207 ++ if (pid < kme->pid)
208 ++ node = node->rb_left;
209 ++ else if (pid > kme->pid)
210 ++ node = node->rb_right;
211 ++ else
212 ++ return kme;
213 ++ }
214 +
215 -+ return NULL;
216 ++ return NULL;
217 +}
218 +
219 +static int kcopy_insert(struct rb_root *root, struct kcopy_map_entry *kme)
220 +{
221 -+ struct rb_node **new = &(root->rb_node);
222 -+ struct rb_node *parent = NULL;
223 ++ struct rb_node **new = &(root->rb_node);
224 ++ struct rb_node *parent = NULL;
225 +
226 -+ while (*new) {
227 -+ struct kcopy_map_entry *tkme =
228 -+ container_of(*new, struct kcopy_map_entry, node);
229 ++ while (*new) {
230 ++ struct kcopy_map_entry *tkme =
231 ++ container_of(*new, struct kcopy_map_entry, node);
232 +
233 -+ parent = *new;
234 -+ if (kme->pid < tkme->pid)
235 -+ new = &((*new)->rb_left);
236 -+ else if (kme->pid > tkme->pid)
237 -+ new = &((*new)->rb_right);
238 -+ else {
239 -+ printk(KERN_INFO "!!! debugging: bad rb tree !!!\n");
240 -+ return -EINVAL;
241 -+ }
242 -+ }
243 ++ parent = *new;
244 ++ if (kme->pid < tkme->pid)
245 ++ new = &((*new)->rb_left);
246 ++ else if (kme->pid > tkme->pid)
247 ++ new = &((*new)->rb_right);
248 ++ else {
249 ++ printk(KERN_INFO "!!! debugging: bad rb tree !!!\n");
250 ++ return -EINVAL;
251 ++ }
252 ++ }
253 +
254 -+ rb_link_node(&kme->node, parent, new);
255 -+ rb_insert_color(&kme->node, root);
256 ++ rb_link_node(&kme->node, parent, new);
257 ++ rb_insert_color(&kme->node, root);
258 +
259 -+ return 0;
260 ++ return 0;
261 +}
262 +
263 +static int kcopy_open(struct inode *inode, struct file *filp)
264 +{
265 -+ int ret;
266 -+ const int minor = iminor(inode);
267 -+ struct kcopy_file *kf = NULL;
268 -+ struct kcopy_map_entry *kme;
269 -+ struct kcopy_map_entry *okme;
270 ++ int ret;
271 ++ const int minor = iminor(inode);
272 ++ struct kcopy_file *kf = NULL;
273 ++ struct kcopy_map_entry *kme;
274 ++ struct kcopy_map_entry *okme;
275 +
276 -+ if (minor < 0 || minor >= KCOPY_MAX_MINORS)
277 -+ return -ENODEV;
278 ++ if (minor < 0 || minor >= KCOPY_MAX_MINORS)
279 ++ return -ENODEV;
280 +
281 -+ mutex_lock(&kcopy_dev.open_lock);
282 ++ mutex_lock(&kcopy_dev.open_lock);
283 +
284 -+ if (!kcopy_dev.kf[minor]) {
285 -+ kf = kmalloc(sizeof(struct kcopy_file), GFP_KERNEL);
286 ++ if (!kcopy_dev.kf[minor]) {
287 ++ kf = kmalloc(sizeof(struct kcopy_file), GFP_KERNEL);
288 +
289 -+ if (!kf) {
290 -+ ret = -ENOMEM;
291 -+ goto bail;
292 -+ }
293 ++ if (!kf) {
294 ++ ret = -ENOMEM;
295 ++ goto bail;
296 ++ }
297 +
298 -+ kf->count = 1;
299 -+ kf->live_map_tree = RB_ROOT;
300 -+ mutex_init(&kf->map_lock);
301 -+ kcopy_dev.kf[minor] = kf;
302 -+ } else {
303 -+ if (filp->f_flags & O_EXCL) {
304 -+ ret = -EBUSY;
305 -+ goto bail;
306 -+ }
307 -+ kcopy_dev.kf[minor]->count++;
308 -+ }
309 ++ kf->count = 1;
310 ++ kf->live_map_tree = RB_ROOT;
311 ++ mutex_init(&kf->map_lock);
312 ++ kcopy_dev.kf[minor] = kf;
313 ++ } else {
314 ++ if (filp->f_flags & O_EXCL) {
315 ++ ret = -EBUSY;
316 ++ goto bail;
317 ++ }
318 ++ kcopy_dev.kf[minor]->count++;
319 ++ }
320 +
321 -+ kme = kcopy_create_entry(kcopy_dev.kf[minor]);
322 -+ if (!kme) {
323 -+ ret = -ENOMEM;
324 -+ goto err_free_kf;
325 -+ }
326 ++ kme = kcopy_create_entry(kcopy_dev.kf[minor]);
327 ++ if (!kme) {
328 ++ ret = -ENOMEM;
329 ++ goto err_free_kf;
330 ++ }
331 +
332 -+ kf = kcopy_dev.kf[minor];
333 ++ kf = kcopy_dev.kf[minor];
334 +
335 -+ mutex_lock(&kf->map_lock);
336 ++ mutex_lock(&kf->map_lock);
337 +
338 -+ okme = kcopy_lookup_pid(&kf->live_map_tree, kme->pid);
339 -+ if (okme) {
340 -+ /* pid already exists... */
341 -+ okme->count++;
342 -+ kfree(kme);
343 -+ kme = okme;
344 -+ } else
345 -+ ret = kcopy_insert(&kf->live_map_tree, kme);
346 ++ okme = kcopy_lookup_pid(&kf->live_map_tree, kme->pid);
347 ++ if (okme) {
348 ++ /* pid already exists... */
349 ++ okme->count++;
350 ++ kfree(kme);
351 ++ kme = okme;
352 ++ } else
353 ++ ret = kcopy_insert(&kf->live_map_tree, kme);
354 +
355 -+ mutex_unlock(&kf->map_lock);
356 ++ mutex_unlock(&kf->map_lock);
357 +
358 -+ filp->private_data = kme;
359 ++ filp->private_data = kme;
360 +
361 -+ ret = 0;
362 -+ goto bail;
363 ++ ret = 0;
364 ++ goto bail;
365 +
366 +err_free_kf:
367 -+ if (kf) {
368 -+ kcopy_dev.kf[minor] = NULL;
369 -+ kfree(kf);
370 -+ }
371 ++ if (kf) {
372 ++ kcopy_dev.kf[minor] = NULL;
373 ++ kfree(kf);
374 ++ }
375 +bail:
376 -+ mutex_unlock(&kcopy_dev.open_lock);
377 -+ return ret;
378 ++ mutex_unlock(&kcopy_dev.open_lock);
379 ++ return ret;
380 +}
381 +
382 +static int kcopy_flush(struct file *filp, fl_owner_t id)
383 +{
384 -+ struct kcopy_map_entry *kme = filp->private_data;
385 -+ struct kcopy_file *kf = kme->file;
386 ++ struct kcopy_map_entry *kme = filp->private_data;
387 ++ struct kcopy_file *kf = kme->file;
388 +
389 -+ if (file_count(filp) == 1) {
390 -+ mutex_lock(&kf->map_lock);
391 -+ kme->count--;
392 ++ if (file_count(filp) == 1) {
393 ++ mutex_lock(&kf->map_lock);
394 ++ kme->count--;
395 +
396 -+ if (!kme->count) {
397 -+ rb_erase(&kme->node, &kf->live_map_tree);
398 -+ kfree(kme);
399 -+ }
400 -+ mutex_unlock(&kf->map_lock);
401 -+ }
402 ++ if (!kme->count) {
403 ++ rb_erase(&kme->node, &kf->live_map_tree);
404 ++ kfree(kme);
405 ++ }
406 ++ mutex_unlock(&kf->map_lock);
407 ++ }
408 +
409 -+ return 0;
410 ++ return 0;
411 +}
412 +
413 +static int kcopy_release(struct inode *inode, struct file *filp)
414 +{
415 -+ const int minor = iminor(inode);
416 ++ const int minor = iminor(inode);
417 +
418 -+ mutex_lock(&kcopy_dev.open_lock);
419 -+ kcopy_dev.kf[minor]->count--;
420 -+ if (!kcopy_dev.kf[minor]->count) {
421 -+ kfree(kcopy_dev.kf[minor]);
422 -+ kcopy_dev.kf[minor] = NULL;
423 -+ }
424 -+ mutex_unlock(&kcopy_dev.open_lock);
425 ++ mutex_lock(&kcopy_dev.open_lock);
426 ++ kcopy_dev.kf[minor]->count--;
427 ++ if (!kcopy_dev.kf[minor]->count) {
428 ++ kfree(kcopy_dev.kf[minor]);
429 ++ kcopy_dev.kf[minor] = NULL;
430 ++ }
431 ++ mutex_unlock(&kcopy_dev.open_lock);
432 +
433 -+ return 0;
434 ++ return 0;
435 +}
436 +
437 +static void kcopy_put_pages(struct page **pages, int npages)
438 +{
439 -+ int j;
440 ++ int j;
441 +
442 -+ for (j = 0; j < npages; j++)
443 -+ put_page(pages[j]);
444 ++ for (j = 0; j < npages; j++)
445 ++ put_page(pages[j]);
446 +}
447 +
448 +static int kcopy_validate_task(struct task_struct *p)
449 +{
450 -+ return p && ((current_euid() == task_euid(p)) || (current_euid() == task_uid(p)));
451 ++ return p && (uid_eq(current_euid(), task_euid(p)) || uid_eq(current_euid(), task_uid(p)));
452 +}
453 +
454 +static int kcopy_get_pages(struct kcopy_file *kf, pid_t pid,
455 -+ struct page **pages, void __user *addr,
456 -+ int write, size_t npages)
457 ++ struct page **pages, void __user *addr,
458 ++ int write, size_t npages)
459 +{
460 -+ int err;
461 -+ struct mm_struct *mm;
462 -+ struct kcopy_map_entry *rkme;
463 ++ int err;
464 ++ struct mm_struct *mm;
465 ++ struct kcopy_map_entry *rkme;
466 +
467 -+ mutex_lock(&kf->map_lock);
468 ++ mutex_lock(&kf->map_lock);
469 +
470 -+ rkme = kcopy_lookup_pid(&kf->live_map_tree, pid);
471 -+ if (!rkme || !kcopy_validate_task(rkme->task)) {
472 -+ err = -EINVAL;
473 -+ goto bail_unlock;
474 -+ }
475 ++ rkme = kcopy_lookup_pid(&kf->live_map_tree, pid);
476 ++ if (!rkme || !kcopy_validate_task(rkme->task)) {
477 ++ err = -EINVAL;
478 ++ goto bail_unlock;
479 ++ }
480 +
481 -+ mm = get_task_mm(rkme->task);
482 -+ if (unlikely(!mm)) {
483 -+ err = -ENOMEM;
484 -+ goto bail_unlock;
485 -+ }
486 ++ mm = get_task_mm(rkme->task);
487 ++ if (unlikely(!mm)) {
488 ++ err = -ENOMEM;
489 ++ goto bail_unlock;
490 ++ }
491 +
492 -+ down_read(&mm->mmap_sem);
493 -+ err = get_user_pages(rkme->task, mm,
494 -+ (unsigned long) addr, npages, write, 0,
495 -+ pages, NULL);
496 ++ down_read(&mm->mmap_sem);
497 ++ err = get_user_pages(rkme->task, mm,
498 ++ (unsigned long) addr, npages, write, 0,
499 ++ pages, NULL);
500 +
501 -+ if (err < npages && err > 0) {
502 -+ kcopy_put_pages(pages, err);
503 -+ err = -ENOMEM;
504 -+ } else if (err == npages)
505 -+ err = 0;
506 ++ if (err < npages && err > 0) {
507 ++ kcopy_put_pages(pages, err);
508 ++ err = -ENOMEM;
509 ++ } else if (err == npages)
510 ++ err = 0;
511 +
512 -+ up_read(&mm->mmap_sem);
513 ++ up_read(&mm->mmap_sem);
514 +
515 -+ mmput(mm);
516 ++ mmput(mm);
517 +
518 +bail_unlock:
519 -+ mutex_unlock(&kf->map_lock);
520 ++ mutex_unlock(&kf->map_lock);
521 +
522 -+ return err;
523 ++ return err;
524 +}
525 +
526 +static unsigned long kcopy_copy_pages_from_user(void __user *src,
527 -+ struct page **dpages,
528 -+ unsigned doff,
529 -+ unsigned long n)
530 ++ struct page **dpages,
531 ++ unsigned doff,
532 ++ unsigned long n)
533 +{
534 -+ struct page *dpage = *dpages;
535 -+ char *daddr = kmap(dpage);
536 -+ int ret = 0;
537 ++ struct page *dpage = *dpages;
538 ++ char *daddr = kmap(dpage);
539 ++ int ret = 0;
540 +
541 -+ while (1) {
542 -+ const unsigned long nleft = PAGE_SIZE - doff;
543 -+ const unsigned long nc = (n < nleft) ? n : nleft;
544 ++ while (1) {
545 ++ const unsigned long nleft = PAGE_SIZE - doff;
546 ++ const unsigned long nc = (n < nleft) ? n : nleft;
547 +
548 -+ /* if (copy_from_user(daddr + doff, src, nc)) { */
549 -+ if (__copy_from_user_nocache(daddr + doff, src, nc)) {
550 -+ ret = -EFAULT;
551 -+ goto bail;
552 -+ }
553 ++ /* if (copy_from_user(daddr + doff, src, nc)) { */
554 ++ if (__copy_from_user_nocache(daddr + doff, src, nc)) {
555 ++ ret = -EFAULT;
556 ++ goto bail;
557 ++ }
558 +
559 -+ n -= nc;
560 -+ if (n == 0)
561 -+ break;
562 ++ n -= nc;
563 ++ if (n == 0)
564 ++ break;
565 +
566 -+ doff += nc;
567 -+ doff &= ~PAGE_MASK;
568 -+ if (doff == 0) {
569 -+ kunmap(dpage);
570 -+ dpages++;
571 -+ dpage = *dpages;
572 -+ daddr = kmap(dpage);
573 -+ }
574 ++ doff += nc;
575 ++ doff &= ~PAGE_MASK;
576 ++ if (doff == 0) {
577 ++ kunmap(dpage);
578 ++ dpages++;
579 ++ dpage = *dpages;
580 ++ daddr = kmap(dpage);
581 ++ }
582 +
583 -+ src += nc;
584 -+ }
585 ++ src += nc;
586 ++ }
587 +
588 +bail:
589 -+ kunmap(dpage);
590 ++ kunmap(dpage);
591 +
592 -+ return ret;
593 ++ return ret;
594 +}
595 +
596 +static unsigned long kcopy_copy_pages_to_user(void __user *dst,
597 -+ struct page **spages,
598 -+ unsigned soff,
599 -+ unsigned long n)
600 ++ struct page **spages,
601 ++ unsigned soff,
602 ++ unsigned long n)
603 +{
604 -+ struct page *spage = *spages;
605 -+ const char *saddr = kmap(spage);
606 -+ int ret = 0;
607 ++ struct page *spage = *spages;
608 ++ const char *saddr = kmap(spage);
609 ++ int ret = 0;
610 +
611 -+ while (1) {
612 -+ const unsigned long nleft = PAGE_SIZE - soff;
613 -+ const unsigned long nc = (n < nleft) ? n : nleft;
614 ++ while (1) {
615 ++ const unsigned long nleft = PAGE_SIZE - soff;
616 ++ const unsigned long nc = (n < nleft) ? n : nleft;
617 +
618 -+ if (copy_to_user(dst, saddr + soff, nc)) {
619 -+ ret = -EFAULT;
620 -+ goto bail;
621 -+ }
622 ++ if (copy_to_user(dst, saddr + soff, nc)) {
623 ++ ret = -EFAULT;
624 ++ goto bail;
625 ++ }
626 +
627 -+ n -= nc;
628 -+ if (n == 0)
629 -+ break;
630 ++ n -= nc;
631 ++ if (n == 0)
632 ++ break;
633 +
634 -+ soff += nc;
635 -+ soff &= ~PAGE_MASK;
636 -+ if (soff == 0) {
637 -+ kunmap(spage);
638 -+ spages++;
639 -+ spage = *spages;
640 -+ saddr = kmap(spage);
641 -+ }
642 ++ soff += nc;
643 ++ soff &= ~PAGE_MASK;
644 ++ if (soff == 0) {
645 ++ kunmap(spage);
646 ++ spages++;
647 ++ spage = *spages;
648 ++ saddr = kmap(spage);
649 ++ }
650 +
651 -+ dst += nc;
652 -+ }
653 ++ dst += nc;
654 ++ }
655 +
656 +bail:
657 -+ kunmap(spage);
658 ++ kunmap(spage);
659 +
660 -+ return ret;
661 ++ return ret;
662 +}
663 +
664 +static unsigned long kcopy_copy_to_user(void __user *dst,
665 -+ struct kcopy_file *kf, pid_t pid,
666 -+ void __user *src,
667 -+ unsigned long n)
668 ++ struct kcopy_file *kf, pid_t pid,
669 ++ void __user *src,
670 ++ unsigned long n)
671 +{
672 -+ struct page **pages;
673 -+ const int pages_len = PAGE_SIZE / sizeof(struct page *);
674 -+ int ret = 0;
675 ++ struct page **pages;
676 ++ const int pages_len = PAGE_SIZE / sizeof(struct page *);
677 ++ int ret = 0;
678 +
679 -+ pages = (struct page **) __get_free_page(GFP_KERNEL);
680 -+ if (!pages) {
681 -+ ret = -ENOMEM;
682 -+ goto bail;
683 -+ }
684 ++ pages = (struct page **) __get_free_page(GFP_KERNEL);
685 ++ if (!pages) {
686 ++ ret = -ENOMEM;
687 ++ goto bail;
688 ++ }
689 +
690 -+ while (n) {
691 -+ const unsigned long soff = (unsigned long) src & ~PAGE_MASK;
692 -+ const unsigned long spages_left =
693 -+ (soff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
694 -+ const unsigned long spages_cp =
695 -+ min_t(unsigned long, spages_left, pages_len);
696 -+ const unsigned long sbytes =
697 -+ PAGE_SIZE - soff + (spages_cp - 1) * PAGE_SIZE;
698 -+ const unsigned long nbytes = min_t(unsigned long, sbytes, n);
699 ++ while (n) {
700 ++ const unsigned long soff = (unsigned long) src & ~PAGE_MASK;
701 ++ const unsigned long spages_left =
702 ++ (soff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
703 ++ const unsigned long spages_cp =
704 ++ min_t(unsigned long, spages_left, pages_len);
705 ++ const unsigned long sbytes =
706 ++ PAGE_SIZE - soff + (spages_cp - 1) * PAGE_SIZE;
707 ++ const unsigned long nbytes = min_t(unsigned long, sbytes, n);
708 +
709 -+ ret = kcopy_get_pages(kf, pid, pages, src, 0, spages_cp);
710 -+ if (unlikely(ret))
711 -+ goto bail_free;
712 ++ ret = kcopy_get_pages(kf, pid, pages, src, 0, spages_cp);
713 ++ if (unlikely(ret))
714 ++ goto bail_free;
715 +
716 -+ ret = kcopy_copy_pages_to_user(dst, pages, soff, nbytes);
717 -+ kcopy_put_pages(pages, spages_cp);
718 -+ if (ret)
719 -+ goto bail_free;
720 -+ dst = (char *) dst + nbytes;
721 -+ src = (char *) src + nbytes;
722 ++ ret = kcopy_copy_pages_to_user(dst, pages, soff, nbytes);
723 ++ kcopy_put_pages(pages, spages_cp);
724 ++ if (ret)
725 ++ goto bail_free;
726 ++ dst = (char *) dst + nbytes;
727 ++ src = (char *) src + nbytes;
728 +
729 -+ n -= nbytes;
730 -+ }
731 ++ n -= nbytes;
732 ++ }
733 +
734 +bail_free:
735 -+ free_page((unsigned long) pages);
736 ++ free_page((unsigned long) pages);
737 +bail:
738 -+ return ret;
739 ++ return ret;
740 +}
741 +
742 +static unsigned long kcopy_copy_from_user(const void __user *src,
743 -+ struct kcopy_file *kf, pid_t pid,
744 -+ void __user *dst,
745 -+ unsigned long n)
746 ++ struct kcopy_file *kf, pid_t pid,
747 ++ void __user *dst,
748 ++ unsigned long n)
749 +{
750 -+ struct page **pages;
751 -+ const int pages_len = PAGE_SIZE / sizeof(struct page *);
752 -+ int ret = 0;
753 ++ struct page **pages;
754 ++ const int pages_len = PAGE_SIZE / sizeof(struct page *);
755 ++ int ret = 0;
756 +
757 -+ pages = (struct page **) __get_free_page(GFP_KERNEL);
758 -+ if (!pages) {
759 -+ ret = -ENOMEM;
760 -+ goto bail;
761 -+ }
762 ++ pages = (struct page **) __get_free_page(GFP_KERNEL);
763 ++ if (!pages) {
764 ++ ret = -ENOMEM;
765 ++ goto bail;
766 ++ }
767 +
768 -+ while (n) {
769 -+ const unsigned long doff = (unsigned long) dst & ~PAGE_MASK;
770 -+ const unsigned long dpages_left =
771 -+ (doff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
772 -+ const unsigned long dpages_cp =
773 -+ min_t(unsigned long, dpages_left, pages_len);
774 -+ const unsigned long dbytes =
775 -+ PAGE_SIZE - doff + (dpages_cp - 1) * PAGE_SIZE;
776 -+ const unsigned long nbytes = min_t(unsigned long, dbytes, n);
777 ++ while (n) {
778 ++ const unsigned long doff = (unsigned long) dst & ~PAGE_MASK;
779 ++ const unsigned long dpages_left =
780 ++ (doff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
781 ++ const unsigned long dpages_cp =
782 ++ min_t(unsigned long, dpages_left, pages_len);
783 ++ const unsigned long dbytes =
784 ++ PAGE_SIZE - doff + (dpages_cp - 1) * PAGE_SIZE;
785 ++ const unsigned long nbytes = min_t(unsigned long, dbytes, n);
786 +
787 -+ ret = kcopy_get_pages(kf, pid, pages, dst, 1, dpages_cp);
788 -+ if (unlikely(ret))
789 -+ goto bail_free;
790 ++ ret = kcopy_get_pages(kf, pid, pages, dst, 1, dpages_cp);
791 ++ if (unlikely(ret))
792 ++ goto bail_free;
793 +
794 -+ ret = kcopy_copy_pages_from_user((void __user *) src,
795 -+ pages, doff, nbytes);
796 -+ kcopy_put_pages(pages, dpages_cp);
797 -+ if (ret)
798 -+ goto bail_free;
799 ++ ret = kcopy_copy_pages_from_user((void __user *) src,
800 ++ pages, doff, nbytes);
801 ++ kcopy_put_pages(pages, dpages_cp);
802 ++ if (ret)
803 ++ goto bail_free;
804 +
805 -+ dst = (char *) dst + nbytes;
806 -+ src = (char *) src + nbytes;
807 ++ dst = (char *) dst + nbytes;
808 ++ src = (char *) src + nbytes;
809 +
810 -+ n -= nbytes;
811 -+ }
812 ++ n -= nbytes;
813 ++ }
814 +
815 +bail_free:
816 -+ free_page((unsigned long) pages);
817 ++ free_page((unsigned long) pages);
818 +bail:
819 -+ return ret;
820 ++ return ret;
821 +}
822 +
823 +static int kcopy_do_get(struct kcopy_map_entry *kme, pid_t pid,
824 -+ const void __user *src, void __user *dst,
825 -+ unsigned long n)
826 ++ const void __user *src, void __user *dst,
827 ++ unsigned long n)
828 +{
829 -+ struct kcopy_file *kf = kme->file;
830 -+ int ret = 0;
831 ++ struct kcopy_file *kf = kme->file;
832 ++ int ret = 0;
833 +
834 -+ if (n == 0) {
835 -+ ret = -EINVAL;
836 -+ goto bail;
837 -+ }
838 ++ if (n == 0) {
839 ++ ret = -EINVAL;
840 ++ goto bail;
841 ++ }
842 +
843 -+ ret = kcopy_copy_to_user(dst, kf, pid, (void __user *) src, n);
844 ++ ret = kcopy_copy_to_user(dst, kf, pid, (void __user *) src, n);
845 +
846 +bail:
847 -+ return ret;
848 ++ return ret;
849 +}
850 +
851 +static int kcopy_do_put(struct kcopy_map_entry *kme, const void __user *src,
852 -+ pid_t pid, void __user *dst,
853 -+ unsigned long n)
854 ++ pid_t pid, void __user *dst,
855 ++ unsigned long n)
856 +{
857 -+ struct kcopy_file *kf = kme->file;
858 -+ int ret = 0;
859 ++ struct kcopy_file *kf = kme->file;
860 ++ int ret = 0;
861 +
862 -+ if (n == 0) {
863 -+ ret = -EINVAL;
864 -+ goto bail;
865 -+ }
866 ++ if (n == 0) {
867 ++ ret = -EINVAL;
868 ++ goto bail;
869 ++ }
870 +
871 -+ ret = kcopy_copy_from_user(src, kf, pid, (void __user *) dst, n);
872 ++ ret = kcopy_copy_from_user(src, kf, pid, (void __user *) dst, n);
873 +
874 +bail:
875 -+ return ret;
876 ++ return ret;
877 +}
878 +
879 +static int kcopy_do_abi(u32 __user *dst)
880 +{
881 -+ u32 val = KCOPY_ABI;
882 -+ int err;
883 ++ u32 val = KCOPY_ABI;
884 ++ int err;
885 +
886 -+ err = put_user(val, dst);
887 -+ if (err)
888 -+ return -EFAULT;
889 ++ err = put_user(val, dst);
890 ++ if (err)
891 ++ return -EFAULT;
892 +
893 -+ return 0;
894 ++ return 0;
895 +}
896 +
897 +ssize_t kcopy_write(struct file *filp, const char __user *data, size_t cnt,
898 -+ loff_t *o)
899 ++ loff_t *o)
900 +{
901 -+ struct kcopy_map_entry *kme = filp->private_data;
902 -+ struct kcopy_syscall ks;
903 -+ int err = 0;
904 -+ const void __user *src;
905 -+ void __user *dst;
906 -+ unsigned long n;
907 ++ struct kcopy_map_entry *kme = filp->private_data;
908 ++ struct kcopy_syscall ks;
909 ++ int err = 0;
910 ++ const void __user *src;
911 ++ void __user *dst;
912 ++ unsigned long n;
913 +
914 -+ if (cnt != sizeof(struct kcopy_syscall)) {
915 -+ err = -EINVAL;
916 -+ goto bail;
917 -+ }
918 ++ if (cnt != sizeof(struct kcopy_syscall)) {
919 ++ err = -EINVAL;
920 ++ goto bail;
921 ++ }
922 +
923 -+ err = copy_from_user(&ks, data, cnt);
924 -+ if (unlikely(err))
925 -+ goto bail;
926 ++ err = copy_from_user(&ks, data, cnt);
927 ++ if (unlikely(err))
928 ++ goto bail;
929 +
930 -+ src = kcopy_syscall_src(&ks);
931 -+ dst = kcopy_syscall_dst(&ks);
932 -+ n = kcopy_syscall_n(&ks);
933 -+ if (ks.tag == KCOPY_GET_SYSCALL)
934 -+ err = kcopy_do_get(kme, ks.pid, src, dst, n);
935 -+ else if (ks.tag == KCOPY_PUT_SYSCALL)
936 -+ err = kcopy_do_put(kme, src, ks.pid, dst, n);
937 -+ else if (ks.tag == KCOPY_ABI_SYSCALL)
938 -+ err = kcopy_do_abi(dst);
939 -+ else
940 -+ err = -EINVAL;
941 ++ src = kcopy_syscall_src(&ks);
942 ++ dst = kcopy_syscall_dst(&ks);
943 ++ n = kcopy_syscall_n(&ks);
944 ++ if (ks.tag == KCOPY_GET_SYSCALL)
945 ++ err = kcopy_do_get(kme, ks.pid, src, dst, n);
946 ++ else if (ks.tag == KCOPY_PUT_SYSCALL)
947 ++ err = kcopy_do_put(kme, src, ks.pid, dst, n);
948 ++ else if (ks.tag == KCOPY_ABI_SYSCALL)
949 ++ err = kcopy_do_abi(dst);
950 ++ else
951 ++ err = -EINVAL;
952 +
953 +bail:
954 -+ return err ? err : cnt;
955 ++ return err ? err : cnt;
956 +}
957 +
958 +static const struct file_operations kcopy_fops = {
959 -+ .owner = THIS_MODULE,
960 -+ .open = kcopy_open,
961 -+ .release = kcopy_release,
962 -+ .flush = kcopy_flush,
963 -+ .write = kcopy_write,
964 ++ .owner = THIS_MODULE,
965 ++ .open = kcopy_open,
966 ++ .release = kcopy_release,
967 ++ .flush = kcopy_flush,
968 ++ .write = kcopy_write,
969 +};
970 +
971 +static int __init kcopy_init(void)
972 +{
973 -+ int ret;
974 -+ const char *name = "kcopy";
975 -+ int i;
976 -+ int ninit = 0;
977 ++ int ret;
978 ++ const char *name = "kcopy";
979 ++ int i;
980 ++ int ninit = 0;
981 +
982 -+ mutex_init(&kcopy_dev.open_lock);
983 ++ mutex_init(&kcopy_dev.open_lock);
984 +
985 -+ ret = alloc_chrdev_region(&kcopy_dev.dev, 0, KCOPY_MAX_MINORS, name);
986 -+ if (ret)
987 -+ goto bail;
988 ++ ret = alloc_chrdev_region(&kcopy_dev.dev, 0, KCOPY_MAX_MINORS, name);
989 ++ if (ret)
990 ++ goto bail;
991 +
992 -+ kcopy_dev.class = class_create(THIS_MODULE, (char *) name);
993 ++ kcopy_dev.class = class_create(THIS_MODULE, (char *) name);
994 +
995 -+ if (IS_ERR(kcopy_dev.class)) {
996 -+ ret = PTR_ERR(kcopy_dev.class);
997 -+ printk(KERN_ERR "kcopy: Could not create "
998 -+ "device class (err %d)\n", -ret);
999 -+ goto bail_chrdev;
1000 -+ }
1001 ++ if (IS_ERR(kcopy_dev.class)) {
1002 ++ ret = PTR_ERR(kcopy_dev.class);
1003 ++ printk(KERN_ERR "kcopy: Could not create "
1004 ++ "device class (err %d)\n", -ret);
1005 ++ goto bail_chrdev;
1006 ++ }
1007 +
1008 -+ cdev_init(&kcopy_dev.cdev, &kcopy_fops);
1009 -+ ret = cdev_add(&kcopy_dev.cdev, kcopy_dev.dev, KCOPY_MAX_MINORS);
1010 -+ if (ret < 0) {
1011 -+ printk(KERN_ERR "kcopy: Could not add cdev (err %d)\n",
1012 -+ -ret);
1013 -+ goto bail_class;
1014 -+ }
1015 ++ cdev_init(&kcopy_dev.cdev, &kcopy_fops);
1016 ++ ret = cdev_add(&kcopy_dev.cdev, kcopy_dev.dev, KCOPY_MAX_MINORS);
1017 ++ if (ret < 0) {
1018 ++ printk(KERN_ERR "kcopy: Could not add cdev (err %d)\n",
1019 ++ -ret);
1020 ++ goto bail_class;
1021 ++ }
1022 +
1023 -+ for (i = 0; i < KCOPY_MAX_MINORS; i++) {
1024 -+ char devname[8];
1025 -+ const int minor = MINOR(kcopy_dev.dev) + i;
1026 -+ const dev_t dev = MKDEV(MAJOR(kcopy_dev.dev), minor);
1027 ++ for (i = 0; i < KCOPY_MAX_MINORS; i++) {
1028 ++ char devname[8];
1029 ++ const int minor = MINOR(kcopy_dev.dev) + i;
1030 ++ const dev_t dev = MKDEV(MAJOR(kcopy_dev.dev), minor);
1031 +
1032 -+ snprintf(devname, sizeof(devname), "kcopy%02d", i);
1033 -+ kcopy_dev.devp[i] =
1034 -+ device_create(kcopy_dev.class, NULL,
1035 -+ dev, NULL, devname);
1036 ++ snprintf(devname, sizeof(devname), "kcopy%02d", i);
1037 ++ kcopy_dev.devp[i] =
1038 ++ device_create(kcopy_dev.class, NULL,
1039 ++ dev, NULL, devname);
1040 +
1041 -+ if (IS_ERR(kcopy_dev.devp[i])) {
1042 -+ ret = PTR_ERR(kcopy_dev.devp[i]);
1043 -+ printk(KERN_ERR "kcopy: Could not create "
1044 -+ "devp %d (err %d)\n", i, -ret);
1045 -+ goto bail_cdev_add;
1046 -+ }
1047 ++ if (IS_ERR(kcopy_dev.devp[i])) {
1048 ++ ret = PTR_ERR(kcopy_dev.devp[i]);
1049 ++ printk(KERN_ERR "kcopy: Could not create "
1050 ++ "devp %d (err %d)\n", i, -ret);
1051 ++ goto bail_cdev_add;
1052 ++ }
1053 +
1054 -+ ninit++;
1055 -+ }
1056 ++ ninit++;
1057 ++ }
1058 +
1059 -+ ret = 0;
1060 -+ goto bail;
1061 ++ ret = 0;
1062 ++ goto bail;
1063 +
1064 +bail_cdev_add:
1065 -+ for (i = 0; i < ninit; i++)
1066 -+ device_unregister(kcopy_dev.devp[i]);
1067 ++ for (i = 0; i < ninit; i++)
1068 ++ device_unregister(kcopy_dev.devp[i]);
1069 +
1070 -+ cdev_del(&kcopy_dev.cdev);
1071 ++ cdev_del(&kcopy_dev.cdev);
1072 +bail_class:
1073 -+ class_destroy(kcopy_dev.class);
1074 ++ class_destroy(kcopy_dev.class);
1075 +bail_chrdev:
1076 -+ unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
1077 ++ unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
1078 +bail:
1079 -+ return ret;
1080 ++ return ret;
1081 +}
1082 +
1083 +static void __exit kcopy_fini(void)
1084 +{
1085 -+ int i;
1086 ++ int i;
1087 +
1088 -+ for (i = 0; i < KCOPY_MAX_MINORS; i++)
1089 -+ device_unregister(kcopy_dev.devp[i]);
1090 ++ for (i = 0; i < KCOPY_MAX_MINORS; i++)
1091 ++ device_unregister(kcopy_dev.devp[i]);
1092 +
1093 -+ cdev_del(&kcopy_dev.cdev);
1094 -+ class_destroy(kcopy_dev.class);
1095 -+ unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
1096 ++ cdev_del(&kcopy_dev.cdev);
1097 ++ class_destroy(kcopy_dev.class);
1098 ++ unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
1099 +}
1100 +
1101 +module_init(kcopy_init);