|
25 | 25 | use crate::ffi::CStr;
|
26 | 26 | use crate::marker::PhantomData;
|
27 | 27 | use crate::mem;
|
28 |
| -use crate::sync::atomic::{self, AtomicUsize, Ordering}; |
| 28 | +use crate::ptr; |
| 29 | +use crate::sync::atomic::{self, AtomicPtr, Ordering}; |
29 | 30 |
|
30 | 31 | // We can use true weak linkage on ELF targets.
|
31 | 32 | #[cfg(not(any(target_os = "macos", target_os = "ios")))]
|
@@ -83,25 +84,25 @@ pub(crate) macro dlsym {
|
83 | 84 | }
|
84 | 85 | pub(crate) struct DlsymWeak<F> {
|
85 | 86 | name: &'static str,
|
86 |
| - addr: AtomicUsize, |
| 87 | + func: AtomicPtr<libc::c_void>, |
87 | 88 | _marker: PhantomData<F>,
|
88 | 89 | }
|
89 | 90 |
|
90 | 91 | impl<F> DlsymWeak<F> {
|
91 | 92 | pub(crate) const fn new(name: &'static str) -> Self {
|
92 |
| - DlsymWeak { name, addr: AtomicUsize::new(1), _marker: PhantomData } |
| 93 | + DlsymWeak { name, func: AtomicPtr::new(ptr::invalid_mut(1)), _marker: PhantomData } |
93 | 94 | }
|
94 | 95 |
|
95 | 96 | #[inline]
|
96 | 97 | pub(crate) fn get(&self) -> Option<F> {
|
97 | 98 | unsafe {
|
98 | 99 | // Relaxed is fine here because we fence before reading through the
|
99 | 100 | // pointer (see the comment below).
|
100 |
| - match self.addr.load(Ordering::Relaxed) { |
101 |
| - 1 => self.initialize(), |
102 |
| - 0 => None, |
103 |
| - addr => { |
104 |
| - let func = mem::transmute_copy::<usize, F>(&addr); |
| 101 | + match self.func.load(Ordering::Relaxed) { |
| 102 | + func if func.addr() == 1 => self.initialize(), |
| 103 | + func if func.is_null() => None, |
| 104 | + func => { |
| 105 | + let func = mem::transmute_copy::<*mut libc::c_void, F>(&func); |
105 | 106 | // The caller is presumably going to read through this value
|
106 | 107 | // (by calling the function we've dlsymed). This means we'd
|
107 | 108 | // need to have loaded it with at least C11's consume
|
@@ -129,25 +130,22 @@ impl<F> DlsymWeak<F> {
|
129 | 130 | // Cold because it should only happen during first-time initialization.
|
130 | 131 | #[cold]
|
131 | 132 | unsafe fn initialize(&self) -> Option<F> {
|
132 |
| - assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>()); |
| 133 | + assert_eq!(mem::size_of::<F>(), mem::size_of::<*mut libc::c_void>()); |
133 | 134 |
|
134 | 135 | let val = fetch(self.name);
|
135 | 136 | // This synchronizes with the acquire fence in `get`.
|
136 |
| - self.addr.store(val, Ordering::Release); |
| 137 | + self.func.store(val, Ordering::Release); |
137 | 138 |
|
138 |
| - match val { |
139 |
| - 0 => None, |
140 |
| - addr => Some(mem::transmute_copy::<usize, F>(&addr)), |
141 |
| - } |
| 139 | + if val.is_null() { None } else { Some(mem::transmute_copy::<*mut libc::c_void, F>(&val)) } |
142 | 140 | }
|
143 | 141 | }
|
144 | 142 |
|
145 |
| -unsafe fn fetch(name: &str) -> usize { |
| 143 | +unsafe fn fetch(name: &str) -> *mut libc::c_void { |
146 | 144 | let name = match CStr::from_bytes_with_nul(name.as_bytes()) {
|
147 | 145 | Ok(cstr) => cstr,
|
148 |
| - Err(..) => return 0, |
| 146 | + Err(..) => return ptr::null_mut(), |
149 | 147 | };
|
150 |
| - libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) as usize |
| 148 | + libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) |
151 | 149 | }
|
152 | 150 |
|
153 | 151 | #[cfg(not(any(target_os = "linux", target_os = "android")))]
|
|
0 commit comments