Commit | Line | Data |
---|---|---|
0114ba7f MD |
1 | #ifndef _ARCH_ATOMIC_X86_H |
2 | #define _ARCH_ATOMIC_X86_H | |
3 | ||
4 | /* | |
5 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. | |
6 | * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. | |
7 | * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. | |
8 | * Copyright (c) 2009 Mathieu Desnoyers | |
9 | * | |
10 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
11 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
12 | * | |
13 | * Permission is hereby granted to use or copy this program | |
14 | * for any purpose, provided the above notices are retained on all copies. | |
15 | * Permission to modify the code and to distribute modified code is granted, | |
16 | * provided the above notices are retained, and a notice that the code was | |
17 | * modified is included with the above copyright notice. | |
18 | * | |
19 | * Code inspired from libatomic_ops-1.2, inherited in part from the | |
20 | * Boehm-Demers-Weiser conservative garbage collector. | |
21 | */ | |
22 | ||
23 | #ifndef BITS_PER_LONG | |
24 | #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) | |
25 | #endif | |
26 | ||
27 | #ifndef _INCLUDE_API_H | |
28 | ||
29 | /* | |
0114ba7f MD |
30 | * Derived from AO_compare_and_swap() and AO_test_and_set_full(). |
31 | */ | |
32 | ||
33 | static __attribute__((always_inline)) | |
34 | unsigned int atomic_exchange_32(volatile unsigned int *addr, unsigned int val) | |
35 | { | |
36 | unsigned int result; | |
37 | ||
38 | __asm__ __volatile__( | |
39 | /* Note: the "xchg" instruction does not need a "lock" prefix */ | |
40 | "xchgl %0, %1" | |
41 | : "=&r"(result), "=m"(*addr) | |
42 | : "0" (val), "m"(*addr) | |
43 | : "memory"); | |
44 | ||
45 | return result; | |
46 | } | |
47 | ||
48 | #if (BITS_PER_LONG == 64) | |
49 | ||
50 | static __attribute__((always_inline)) | |
51 | unsigned long atomic_exchange_64(volatile unsigned long *addr, | |
52 | unsigned long val) | |
53 | { | |
54 | unsigned long result; | |
55 | ||
56 | __asm__ __volatile__( | |
57 | /* Note: the "xchg" instruction does not need a "lock" prefix */ | |
58 | "xchgq %0, %1" | |
59 | : "=&r"(result), "=m"(*addr) | |
60 | : "0" (val), "m"(*addr) | |
61 | : "memory"); | |
62 | ||
63 | return result; | |
64 | } | |
65 | ||
66 | #endif | |
67 | ||
68 | static __attribute__((always_inline)) | |
69 | unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len) | |
70 | { | |
71 | switch (len) { | |
72 | case 4: return atomic_exchange_32(addr, val); | |
73 | #if (BITS_PER_LONG == 64) | |
74 | case 8: return atomic_exchange_64(addr, val); | |
75 | #endif | |
76 | } | |
77 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
78 | * when optimizations are disabled. */ | |
79 | __asm__ __volatile__("ud2"); | |
80 | return 0; | |
81 | } | |
82 | ||
83 | #define xchg(addr, v) \ | |
84 | ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \ | |
85 | sizeof(*(addr)))) | |
86 | ||
87 | #endif /* #ifndef _INCLUDE_API_H */ | |
88 | ||
89 | #endif /* ARCH_ATOMIC_X86_H */ |