Gentoo Archives: gentoo-commits

From: "Mike Frysinger (vapier)" <vapier@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] gentoo commit in src/patchsets/glibc/2.9: 1020_all_glibc-2.9-strlen-hack.patch 1060_all_glibc-nss-deepbind.patch 5021_all_2.9-fnmatch.patch 6120_all_ppc-glibc-2.9-atomic.patch README.history
Date: Wed, 24 Dec 2008 19:52:03
Message-Id: E1LFZlv-0002KO-M5@stork.gentoo.org
1 vapier 08/12/24 19:51:59
2
3 Modified: README.history
4 Added: 1020_all_glibc-2.9-strlen-hack.patch
5 1060_all_glibc-nss-deepbind.patch
6 5021_all_2.9-fnmatch.patch
7 6120_all_ppc-glibc-2.9-atomic.patch
8 Log:
9 add some patches from suse
10
11 Revision Changes Path
12 1.2 src/patchsets/glibc/2.9/README.history
13
14 file : http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/README.history?rev=1.2&view=markup
15 plain: http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/README.history?rev=1.2&content-type=text/plain
16 diff : http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/README.history?r1=1.1&r2=1.2
17
18 Index: README.history
19 ===================================================================
20 RCS file: /var/cvsroot/gentoo/src/patchsets/glibc/2.9/README.history,v
21 retrieving revision 1.1
22 retrieving revision 1.2
23 diff -u -r1.1 -r1.2
24 --- README.history 8 Dec 2008 03:28:12 -0000 1.1
25 +++ README.history 24 Dec 2008 19:51:59 -0000 1.2
26 @@ -1,3 +1,9 @@
27 +2 [pending]
28 + + 1020_all_glibc-2.9-strlen-hack.patch
29 + + 1060_all_glibc-nss-deepbind.patch
30 + + 5021_all_2.9-fnmatch.patch
31 + + 6120_all_ppc-glibc-2.9-atomic.patch
32 +
33 1 08.12.2008
34 + 0010_all_glibc-2.7-ssp-compat.patch
35 + 0030_all_glibc-respect-env-CPPFLAGS.patch
36
37
38
39 1.1 src/patchsets/glibc/2.9/1020_all_glibc-2.9-strlen-hack.patch
40
41 file : http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/1020_all_glibc-2.9-strlen-hack.patch?rev=1.1&view=markup
42 plain: http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/1020_all_glibc-2.9-strlen-hack.patch?rev=1.1&content-type=text/plain
43
44 Index: 1020_all_glibc-2.9-strlen-hack.patch
45 ===================================================================
46 http://sourceware.org/bugzilla/show_bug.cgi?id=5807
47 http://www.cl.cam.ac.uk/~am21/progtricks.html
48
49 --- libc/string/strlen.c
50 +++ libc/string/strlen.c
51 @@ -32,7 +32,7 @@
52 {
53 const char *char_ptr;
54 const unsigned long int *longword_ptr;
55 - unsigned long int longword, magic_bits, himagic, lomagic;
56 + unsigned long int longword, himagic, lomagic;
57
58 /* Handle the first few characters by reading one character at a time.
59 Do this until CHAR_PTR is aligned on a longword boundary. */
60 @@ -42,28 +42,14 @@
61 if (*char_ptr == '\0')
62 return char_ptr - str;
63
64 - /* All these elucidatory comments refer to 4-byte longwords,
65 - but the theory applies equally well to 8-byte longwords. */
66 -
67 longword_ptr = (unsigned long int *) char_ptr;
68
69 - /* Bits 31, 24, 16, and 8 of this number are zero. Call these bits
70 - the "holes." Note that there is a hole just to the left of
71 - each byte, with an extra at the end:
72 -
73 - bits: 01111110 11111110 11111110 11111111
74 - bytes: AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD
75 -
76 - The 1-bits make sure that carries propagate to the next 0-bit.
77 - The 0-bits provide holes for carries to fall into. */
78 - magic_bits = 0x7efefeffL;
79 himagic = 0x80808080L;
80 lomagic = 0x01010101L;
81 if (sizeof (longword) > 4)
82 {
83 /* 64-bit version of the magic. */
84 /* Do the shift in two steps to avoid a warning if long has 32 bits. */
85 - magic_bits = ((0x7efefefeL << 16) << 16) | 0xfefefeffL;
86 himagic = ((himagic << 16) << 16) | himagic;
87 lomagic = ((lomagic << 16) << 16) | lomagic;
88 }
89 @@ -75,56 +61,12 @@
90 if *any of the four* bytes in the longword in question are zero. */
91 for (;;)
92 {
93 - /* We tentatively exit the loop if adding MAGIC_BITS to
94 - LONGWORD fails to change any of the hole bits of LONGWORD.
95 -
96 - 1) Is this safe? Will it catch all the zero bytes?
97 - Suppose there is a byte with all zeros. Any carry bits
98 - propagating from its left will fall into the hole at its
99 - least significant bit and stop. Since there will be no
100 - carry from its most significant bit, the LSB of the
101 - byte to the left will be unchanged, and the zero will be
102 - detected.
103 -
104 - 2) Is this worthwhile? Will it ignore everything except
105 - zero bytes? Suppose every byte of LONGWORD has a bit set
106 - somewhere. There will be a carry into bit 8. If bit 8
107 - is set, this will carry into bit 16. If bit 8 is clear,
108 - one of bits 9-15 must be set, so there will be a carry
109 - into bit 16. Similarly, there will be a carry into bit
110 - 24. If one of bits 24-30 is set, there will be a carry
111 - into bit 31, so all of the hole bits will be changed.
112 -
113 - The one misfire occurs when bits 24-30 are clear and bit
114 - 31 is set; in this case, the hole at bit 31 is not
115 - changed. If we had access to the processor carry flag,
116 - we could close this loophole by putting the fourth hole
117 - at bit 32!
118 -
119 - So it ignores everything except 128's, when they're aligned
120 - properly. */
121 -
122 longword = *longword_ptr++;
123
124 - if (
125 -#if 0
126 - /* Add MAGIC_BITS to LONGWORD. */
127 - (((longword + magic_bits)
128 -
129 - /* Set those bits that were unchanged by the addition. */
130 - ^ ~longword)
131 -
132 - /* Look at only the hole bits. If any of the hole bits
133 - are unchanged, most likely one of the bytes was a
134 - zero. */
135 - & ~magic_bits)
136 -#else
137 - ((longword - lomagic) & himagic)
138 -#endif
139 - != 0)
140 + /* This hack taken from Alan Mycroft's HAKMEMC postings.
141 + See: http://www.cl.cam.ac.uk/~am21/progtricks.html */
142 + if (((longword - lomagic) & ~longword & himagic) != 0)
143 {
144 - /* Which of the bytes was the zero? If none of them were, it was
145 - a misfire; continue the search. */
146
147 const char *cp = (const char *) (longword_ptr - 1);
148
149
150
151
152 1.1 src/patchsets/glibc/2.9/1060_all_glibc-nss-deepbind.patch
153
154 file : http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/1060_all_glibc-nss-deepbind.patch?rev=1.1&view=markup
155 plain: http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/1060_all_glibc-nss-deepbind.patch?rev=1.1&content-type=text/plain
156
157 Index: 1060_all_glibc-nss-deepbind.patch
158 ===================================================================
159 Use DEEPBIND to load the nss modules. Helps thunderbird (linked against its
160 own version of the ldap libs) when using nss_ldap (linked against system
161 libldap) leading to crashes due to incompatibilities.
162
163 See https://bugzilla.novell.com/show_bug.cgi?id=157078 and
164 http://sourceware.org/bugzilla/show_bug.cgi?id=6610
165
166 Index: nss/nsswitch.c
167 ===================================================================
168 --- nss/nsswitch.c.orig
169 +++ nss/nsswitch.c
170 @@ -358,7 +358,9 @@ __nss_lookup_function (service_user *ni,
171 ".so"),
172 __nss_shlib_revision);
173
174 - ni->library->lib_handle = __libc_dlopen (shlib_name);
175 + ni->library->lib_handle
176 + = __libc_dlopen_mode (shlib_name,
177 + RTLD_LAZY | __RTLD_DLOPEN | RTLD_DEEPBIND);
178 if (ni->library->lib_handle == NULL)
179 {
180 /* Failed to load the library. */
181
182
183
184 1.1 src/patchsets/glibc/2.9/5021_all_2.9-fnmatch.patch
185
186 file : http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/5021_all_2.9-fnmatch.patch?rev=1.1&view=markup
187 plain: http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/5021_all_2.9-fnmatch.patch?rev=1.1&content-type=text/plain
188
189 Index: 5021_all_2.9-fnmatch.patch
190 ===================================================================
191 http://sourceware.org/ml/libc-hacker/2002-11/msg00071.html
192
193 When fnmatch detects an invalid multibyte character it should fall back to
194 single byte matching, so that "*" has a chance to match such a string.
195
196 Andreas.
197
198 2005-04-12 Andreas Schwab <schwab@××××.de>
199
200 * posix/fnmatch.c (fnmatch): If conversion to wide character
201 fails fall back to single byte matching.
202
203 Index: posix/fnmatch.c
204 ===================================================================
205 --- posix/fnmatch.c.orig 2007-05-18 10:40:34.000000000 +0200
206 +++ posix/fnmatch.c 2007-05-18 13:21:47.199478000 +0200
207 @@ -327,6 +327,7 @@
208 # if HANDLE_MULTIBYTE
209 if (__builtin_expect (MB_CUR_MAX, 1) != 1)
210 {
211 + const char *orig_pattern = pattern;
212 mbstate_t ps;
213 size_t n;
214 const char *p;
215 @@ -378,10 +379,8 @@
216 wstring = (wchar_t *) alloca ((n + 1) * sizeof (wchar_t));
217 n = mbsrtowcs (wstring, &p, n + 1, &ps);
218 if (__builtin_expect (n == (size_t) -1, 0))
219 - /* Something wrong.
220 - XXX Do we have to set `errno' to something which mbsrtows hasn't
221 - already done? */
222 - return -1;
223 + /* Something wrong. Fall back to single byte matching. */
224 + goto try_singlebyte;
225 if (p)
226 memset (&ps, '\0', sizeof (ps));
227 }
228 @@ -389,10 +388,8 @@
229 {
230 n = mbsrtowcs (NULL, &string, 0, &ps);
231 if (__builtin_expect (n == (size_t) -1, 0))
232 - /* Something wrong.
233 - XXX Do we have to set `errno' to something which mbsrtows hasn't
234 - already done? */
235 - return -1;
236 + /* Something wrong. Fall back to single byte matching. */
237 + goto try_singlebyte;
238 wstring = (wchar_t *) alloca ((n + 1) * sizeof (wchar_t));
239 assert (mbsinit (&ps));
240 (void) mbsrtowcs (wstring, &string, n + 1, &ps);
241 @@ -400,6 +397,9 @@
242
243 return internal_fnwmatch (wpattern, wstring, wstring + n,
244 flags & FNM_PERIOD, flags, NULL);
245 +
246 + try_singlebyte:
247 + pattern = orig_pattern;
248 }
249 # endif /* mbstate_t and mbsrtowcs or _LIBC. */
250
251
252
253
254 1.1 src/patchsets/glibc/2.9/6120_all_ppc-glibc-2.9-atomic.patch
255
256 file : http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/6120_all_ppc-glibc-2.9-atomic.patch?rev=1.1&view=markup
257 plain: http://sources.gentoo.org/viewcvs.py/gentoo/src/patchsets/glibc/2.9/6120_all_ppc-glibc-2.9-atomic.patch?rev=1.1&content-type=text/plain
258
259 Index: 6120_all_ppc-glibc-2.9-atomic.patch
260 ===================================================================
261 sniped from suse
262
263 Index: sysdeps/powerpc/bits/atomic.h
264 ===================================================================
265 RCS file: /cvs/glibc/libc/sysdeps/powerpc/bits/atomic.h,v
266 retrieving revision 1.17
267 diff -u -a -p -r1.17 atomic.h
268 --- sysdeps/powerpc/bits/atomic.h 26 Mar 2007 20:15:28 -0000 1.17
269 +++ sysdeps/powerpc/bits/atomic.h 31 May 2008 08:50:56 -0000
270 @@ -85,14 +85,14 @@ typedef uintmax_t uatomic_max_t;
271 __typeof (*(mem)) __tmp; \
272 __typeof (mem) __memp = (mem); \
273 __asm __volatile ( \
274 - "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
275 + "1: lwarx %0,%y1" MUTEX_HINT_ACQ "\n" \
276 " cmpw %0,%2\n" \
277 " bne 2f\n" \
278 - " stwcx. %3,0,%1\n" \
279 + " stwcx. %3,%y1\n" \
280 " bne- 1b\n" \
281 "2: " __ARCH_ACQ_INSTR \
282 - : "=&r" (__tmp) \
283 - : "b" (__memp), "r" (oldval), "r" (newval) \
284 + : "=&r" (__tmp), "+Z" (*__memp) \
285 + : "r" (oldval), "r" (newval) \
286 : "cr0", "memory"); \
287 __tmp; \
288 })
289 @@ -102,14 +102,14 @@ typedef uintmax_t uatomic_max_t;
290 __typeof (*(mem)) __tmp; \
291 __typeof (mem) __memp = (mem); \
292 __asm __volatile (__ARCH_REL_INSTR "\n" \
293 - "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \
294 + "1: lwarx %0,%y1" MUTEX_HINT_REL "\n" \
295 " cmpw %0,%2\n" \
296 " bne 2f\n" \
297 - " stwcx. %3,0,%1\n" \
298 + " stwcx. %3,%y1\n" \
299 " bne- 1b\n" \
300 "2: " \
301 - : "=&r" (__tmp) \
302 - : "b" (__memp), "r" (oldval), "r" (newval) \
303 + : "=&r" (__tmp), "+Z" (__memp) \
304 + : "r" (oldval), "r" (newval) \
305 : "cr0", "memory"); \
306 __tmp; \
307 })
308 @@ -118,12 +118,12 @@ typedef uintmax_t uatomic_max_t;
309 ({ \
310 __typeof (*mem) __val; \
311 __asm __volatile ( \
312 - "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
313 - " stwcx. %3,0,%2\n" \
314 + "1: lwarx %0,%y1" MUTEX_HINT_ACQ "\n" \
315 + " stwcx. %2,%y1\n" \
316 " bne- 1b\n" \
317 " " __ARCH_ACQ_INSTR \
318 - : "=&r" (__val), "=m" (*mem) \
319 - : "b" (mem), "r" (value), "m" (*mem) \
320 + : "=&r" (__val), "+Z" (*mem) \
321 + : "r" (value) \
322 : "cr0", "memory"); \
323 __val; \
324 })
325 @@ -132,11 +132,11 @@ typedef uintmax_t uatomic_max_t;
326 ({ \
327 __typeof (*mem) __val; \
328 __asm __volatile (__ARCH_REL_INSTR "\n" \
329 - "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \
330 - " stwcx. %3,0,%2\n" \
331 + "1: lwarx %0,%y1" MUTEX_HINT_REL "\n" \
332 + " stwcx. %2,%y1\n" \
333 " bne- 1b" \
334 - : "=&r" (__val), "=m" (*mem) \
335 - : "b" (mem), "r" (value), "m" (*mem) \
336 + : "=&r" (__val), "+Z" (*mem) \
337 + : "r" (value) \
338 : "cr0", "memory"); \
339 __val; \
340 })
341 @@ -144,12 +144,12 @@ typedef uintmax_t uatomic_max_t;
342 #define __arch_atomic_exchange_and_add_32(mem, value) \
343 ({ \
344 __typeof (*mem) __val, __tmp; \
345 - __asm __volatile ("1: lwarx %0,0,%3\n" \
346 - " add %1,%0,%4\n" \
347 - " stwcx. %1,0,%3\n" \
348 + __asm __volatile ("1: lwarx %0,%y2\n" \
349 + " add %1,%0,%3\n" \
350 + " stwcx. %1,%y2\n" \
351 " bne- 1b" \
352 - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
353 - : "b" (mem), "r" (value), "m" (*mem) \
354 + : "=&b" (__val), "=&r" (__tmp), "+Z" (*mem) \
355 + : "r" (value) \
356 : "cr0", "memory"); \
357 __val; \
358 })
359 @@ -157,12 +157,12 @@ typedef uintmax_t uatomic_max_t;
360 #define __arch_atomic_increment_val_32(mem) \
361 ({ \
362 __typeof (*(mem)) __val; \
363 - __asm __volatile ("1: lwarx %0,0,%2\n" \
364 + __asm __volatile ("1: lwarx %0,%y1\n" \
365 " addi %0,%0,1\n" \
366 - " stwcx. %0,0,%2\n" \
367 + " stwcx. %0,%y1\n" \
368 " bne- 1b" \
369 - : "=&b" (__val), "=m" (*mem) \
370 - : "b" (mem), "m" (*mem) \
371 + : "=&b" (__val), "+Z" (*mem) \
372 + : \
373 : "cr0", "memory"); \
374 __val; \
375 })
376 @@ -170,27 +170,27 @@ typedef uintmax_t uatomic_max_t;
377 #define __arch_atomic_decrement_val_32(mem) \
378 ({ \
379 __typeof (*(mem)) __val; \
380 - __asm __volatile ("1: lwarx %0,0,%2\n" \
381 + __asm __volatile ("1: lwarx %0,%y1\n" \
382 " subi %0,%0,1\n" \
383 - " stwcx. %0,0,%2\n" \
384 + " stwcx. %0,%y1\n" \
385 " bne- 1b" \
386 - : "=&b" (__val), "=m" (*mem) \
387 - : "b" (mem), "m" (*mem) \
388 + : "=&b" (__val), "+Z" (*mem) \
389 + : \
390 : "cr0", "memory"); \
391 __val; \
392 })
393
394 #define __arch_atomic_decrement_if_positive_32(mem) \
395 ({ int __val, __tmp; \
396 - __asm __volatile ("1: lwarx %0,0,%3\n" \
397 + __asm __volatile ("1: lwarx %0,%y2\n" \
398 " cmpwi 0,%0,0\n" \
399 " addi %1,%0,-1\n" \
400 " ble 2f\n" \
401 - " stwcx. %1,0,%3\n" \
402 + " stwcx. %1,%y2\n" \
403 " bne- 1b\n" \
404 "2: " __ARCH_ACQ_INSTR \
405 - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
406 - : "b" (mem), "m" (*mem) \
407 + : "=&b" (__val), "=&r" (__tmp), "+Z" (*mem) \
408 + : \
409 : "cr0", "memory"); \
410 __val; \
411 })
412 Index: sysdeps/powerpc/powerpc32/bits/atomic.h
413 ===================================================================
414 RCS file: /cvs/glibc/libc/sysdeps/powerpc/powerpc32/bits/atomic.h,v
415 retrieving revision 1.6
416 diff -u -a -p -r1.6 atomic.h
417 --- sysdeps/powerpc/powerpc32/bits/atomic.h 26 Mar 2007 20:15:45 -0000 1.6
418 +++ sysdeps/powerpc/powerpc32/bits/atomic.h 31 May 2008 08:50:56 -0000
419 @@ -44,14 +44,14 @@
420 ({ \
421 unsigned int __tmp; \
422 __asm __volatile ( \
423 - "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
424 + "1: lwarx %0,%y1" MUTEX_HINT_ACQ "\n" \
425 " subf. %0,%2,%0\n" \
426 " bne 2f\n" \
427 - " stwcx. %3,0,%1\n" \
428 + " stwcx. %3,%y1\n" \
429 " bne- 1b\n" \
430 "2: " __ARCH_ACQ_INSTR \
431 - : "=&r" (__tmp) \
432 - : "b" (mem), "r" (oldval), "r" (newval) \
433 + : "=&r" (__tmp), "+Z" (*(mem)) \
434 + : "r" (oldval), "r" (newval) \
435 : "cr0", "memory"); \
436 __tmp != 0; \
437 })
438 @@ -60,14 +60,14 @@
439 ({ \
440 unsigned int __tmp; \
441 __asm __volatile (__ARCH_REL_INSTR "\n" \
442 - "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \
443 + "1: lwarx %0,%y1" MUTEX_HINT_REL "\n" \
444 " subf. %0,%2,%0\n" \
445 " bne 2f\n" \
446 - " stwcx. %3,0,%1\n" \
447 + " stwcx. %3,%y1\n" \
448 " bne- 1b\n" \
449 "2: " \
450 - : "=&r" (__tmp) \
451 - : "b" (mem), "r" (oldval), "r" (newval) \
452 + : "=&r" (__tmp), "+Z" (*(mem)) \
453 + : "r" (oldval), "r" (newval) \
454 : "cr0", "memory"); \
455 __tmp != 0; \
456 })
457 Index: sysdeps/powerpc/powerpc64/bits/atomic.h
458 ===================================================================
459 RCS file: /cvs/glibc/libc/sysdeps/powerpc/powerpc64/bits/atomic.h,v
460 retrieving revision 1.8
461 diff -u -a -p -r1.8 atomic.h
462 --- sysdeps/powerpc/powerpc64/bits/atomic.h 26 Mar 2007 20:16:03 -0000 1.8
463 +++ sysdeps/powerpc/powerpc64/bits/atomic.h 31 May 2008 08:50:56 -0000
464 @@ -44,14 +44,14 @@
465 ({ \
466 unsigned int __tmp, __tmp2; \
467 __asm __volatile (" clrldi %1,%1,32\n" \
468 - "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
469 + "1: lwarx %0,%y2" MUTEX_HINT_ACQ "\n" \
470 " subf. %0,%1,%0\n" \
471 " bne 2f\n" \
472 - " stwcx. %4,0,%2\n" \
473 + " stwcx. %4,%y2\n" \
474 " bne- 1b\n" \
475 "2: " __ARCH_ACQ_INSTR \
476 - : "=&r" (__tmp), "=r" (__tmp2) \
477 - : "b" (mem), "1" (oldval), "r" (newval) \
478 + : "=&r" (__tmp), "=r" (__tmp2), "+Z" (*(mem)) \
479 + : "1" (oldval), "r" (newval) \
480 : "cr0", "memory"); \
481 __tmp != 0; \
482 })
483 @@ -61,14 +61,14 @@
484 unsigned int __tmp, __tmp2; \
485 __asm __volatile (__ARCH_REL_INSTR "\n" \
486 " clrldi %1,%1,32\n" \
487 - "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \
488 + "1: lwarx %0,%y2" MUTEX_HINT_REL "\n" \
489 " subf. %0,%1,%0\n" \
490 " bne 2f\n" \
491 - " stwcx. %4,0,%2\n" \
492 + " stwcx. %4,%y2\n" \
493 " bne- 1b\n" \
494 "2: " \
495 - : "=&r" (__tmp), "=r" (__tmp2) \
496 - : "b" (mem), "1" (oldval), "r" (newval) \
497 + : "=&r" (__tmp), "=r" (__tmp2), "+Z" (*(mem)) \
498 + : "1" (oldval), "r" (newval) \
499 : "cr0", "memory"); \
500 __tmp != 0; \
501 })
502 @@ -82,14 +82,14 @@
503 ({ \
504 unsigned long __tmp; \
505 __asm __volatile ( \
506 - "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
507 + "1: ldarx %0,%y1" MUTEX_HINT_ACQ "\n" \
508 " subf. %0,%2,%0\n" \
509 " bne 2f\n" \
510 - " stdcx. %3,0,%1\n" \
511 + " stdcx. %3,%y1\n" \
512 " bne- 1b\n" \
513 "2: " __ARCH_ACQ_INSTR \
514 - : "=&r" (__tmp) \
515 - : "b" (mem), "r" (oldval), "r" (newval) \
516 + : "=&r" (__tmp), "+Z" (*(mem)) \
517 + : "r" (oldval), "r" (newval) \
518 : "cr0", "memory"); \
519 __tmp != 0; \
520 })
521 @@ -98,14 +98,14 @@
522 ({ \
523 unsigned long __tmp; \
524 __asm __volatile (__ARCH_REL_INSTR "\n" \
525 - "1: ldarx %0,0,%2" MUTEX_HINT_REL "\n" \
526 + "1: ldarx %0,%y1" MUTEX_HINT_REL "\n" \
527 " subf. %0,%2,%0\n" \
528 " bne 2f\n" \
529 - " stdcx. %3,0,%1\n" \
530 + " stdcx. %3,%y1\n" \
531 " bne- 1b\n" \
532 "2: " \
533 - : "=&r" (__tmp) \
534 - : "b" (mem), "r" (oldval), "r" (newval) \
535 + : "=&r" (__tmp), "+Z" (*(mem)) \
536 + : "r" (oldval), "r" (newval) \
537 : "cr0", "memory"); \
538 __tmp != 0; \
539 })
540 @@ -115,14 +115,14 @@
541 __typeof (*(mem)) __tmp; \
542 __typeof (mem) __memp = (mem); \
543 __asm __volatile ( \
544 - "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
545 + "1: ldarx %0,%y1" MUTEX_HINT_ACQ "\n" \
546 " cmpd %0,%2\n" \
547 " bne 2f\n" \
548 - " stdcx. %3,0,%1\n" \
549 + " stdcx. %3,%y1\n" \
550 " bne- 1b\n" \
551 "2: " __ARCH_ACQ_INSTR \
552 - : "=&r" (__tmp) \
553 - : "b" (__memp), "r" (oldval), "r" (newval) \
554 + : "=&r" (__tmp), "+Z" (*__memp) \
555 + : "r" (oldval), "r" (newval) \
556 : "cr0", "memory"); \
557 __tmp; \
558 })
559 @@ -132,14 +132,14 @@
560 __typeof (*(mem)) __tmp; \
561 __typeof (mem) __memp = (mem); \
562 __asm __volatile (__ARCH_REL_INSTR "\n" \
563 - "1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \
564 + "1: ldarx %0,%y1" MUTEX_HINT_REL "\n" \
565 " cmpd %0,%2\n" \
566 " bne 2f\n" \
567 - " stdcx. %3,0,%1\n" \
568 + " stdcx. %3,%y1\n" \
569 " bne- 1b\n" \
570 "2: " \
571 - : "=&r" (__tmp) \
572 - : "b" (__memp), "r" (oldval), "r" (newval) \
573 + : "=&r" (__tmp), "+Z" (*__memp) \
574 + : "r" (oldval), "r" (newval) \
575 : "cr0", "memory"); \
576 __tmp; \
577 })
578 @@ -148,12 +148,12 @@
579 ({ \
580 __typeof (*mem) __val; \
581 __asm __volatile (__ARCH_REL_INSTR "\n" \
582 - "1: ldarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
583 - " stdcx. %3,0,%2\n" \
584 + "1: ldarx %0,%y1" MUTEX_HINT_ACQ "\n" \
585 + " stdcx. %2,%y1\n" \
586 " bne- 1b\n" \
587 " " __ARCH_ACQ_INSTR \
588 - : "=&r" (__val), "=m" (*mem) \
589 - : "b" (mem), "r" (value), "m" (*mem) \
590 + : "=&r" (__val), "+Z" (*(mem)) \
591 + : "r" (value) \
592 : "cr0", "memory"); \
593 __val; \
594 })
595 @@ -162,11 +162,11 @@
596 ({ \
597 __typeof (*mem) __val; \
598 __asm __volatile (__ARCH_REL_INSTR "\n" \
599 - "1: ldarx %0,0,%2" MUTEX_HINT_REL "\n" \
600 - " stdcx. %3,0,%2\n" \
601 + "1: ldarx %0,%y1" MUTEX_HINT_REL "\n" \
602 + " stdcx. %2,%y1\n" \
603 " bne- 1b" \
604 - : "=&r" (__val), "=m" (*mem) \
605 - : "b" (mem), "r" (value), "m" (*mem) \
606 + : "=&r" (__val), "+Z" (*(mem)) \
607 + : "r" (value) \
608 : "cr0", "memory"); \
609 __val; \
610 })
611 @@ -174,12 +174,12 @@
612 #define __arch_atomic_exchange_and_add_64(mem, value) \
613 ({ \
614 __typeof (*mem) __val, __tmp; \
615 - __asm __volatile ("1: ldarx %0,0,%3\n" \
616 - " add %1,%0,%4\n" \
617 - " stdcx. %1,0,%3\n" \
618 + __asm __volatile ("1: ldarx %0,%y2\n" \
619 + " add %1,%0,%3\n" \
620 + " stdcx. %1,%y2\n" \
621 " bne- 1b" \
622 - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
623 - : "b" (mem), "r" (value), "m" (*mem) \
624 + : "=&b" (__val), "=&r" (__tmp), "+Z" (*(mem)) \
625 + : "r" (value) \
626 : "cr0", "memory"); \
627 __val; \
628 })
629 @@ -187,12 +187,12 @@
630 #define __arch_atomic_increment_val_64(mem) \
631 ({ \
632 __typeof (*(mem)) __val; \
633 - __asm __volatile ("1: ldarx %0,0,%2\n" \
634 + __asm __volatile ("1: ldarx %0,%y1\n" \
635 " addi %0,%0,1\n" \
636 - " stdcx. %0,0,%2\n" \
637 + " stdcx. %0,%y1\n" \
638 " bne- 1b" \
639 - : "=&b" (__val), "=m" (*mem) \
640 - : "b" (mem), "m" (*mem) \
641 + : "=&b" (__val), "+Z" (*(mem)) \
642 + : \
643 : "cr0", "memory"); \
644 __val; \
645 })
646 @@ -200,27 +200,27 @@
647 #define __arch_atomic_decrement_val_64(mem) \
648 ({ \
649 __typeof (*(mem)) __val; \
650 - __asm __volatile ("1: ldarx %0,0,%2\n" \
651 + __asm __volatile ("1: ldarx %0,%y1\n" \
652 " subi %0,%0,1\n" \
653 - " stdcx. %0,0,%2\n" \
654 + " stdcx. %0,%y1\n" \
655 " bne- 1b" \
656 - : "=&b" (__val), "=m" (*mem) \
657 - : "b" (mem), "m" (*mem) \
658 + : "=&b" (__val), "+Z" (*(mem)) \
659 + : \
660 : "cr0", "memory"); \
661 __val; \
662 })
663
664 #define __arch_atomic_decrement_if_positive_64(mem) \
665 ({ int __val, __tmp; \
666 - __asm __volatile ("1: ldarx %0,0,%3\n" \
667 + __asm __volatile ("1: ldarx %0,%y2\n" \
668 " cmpdi 0,%0,0\n" \
669 " addi %1,%0,-1\n" \
670 " ble 2f\n" \
671 - " stdcx. %1,0,%3\n" \
672 + " stdcx. %1,%y2\n" \
673 " bne- 1b\n" \
674 "2: " __ARCH_ACQ_INSTR \
675 - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
676 - : "b" (mem), "m" (*mem) \
677 + : "=&b" (__val), "=&r" (__tmp), "+Z" (*(mem)) \
678 + : \
679 : "cr0", "memory"); \
680 __val; \
681 })