|
| 1 | +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
| 2 | +// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s |
| 3 | +// |
| 4 | +// Test GNU atomic builtins for __int128 aligned to 16 bytes, which should be |
| 5 | +// expanded to LLVM I/R by the front end. |
| 6 | + |
| 7 | +#include <stdatomic.h> |
| 8 | +#include <stdint.h> |
| 9 | + |
| 10 | +__int128 Ptr __attribute__((aligned(16))); |
| 11 | +__int128 Ret __attribute__((aligned(16))); |
| 12 | +__int128 Val __attribute__((aligned(16))); |
| 13 | +__int128 Exp __attribute__((aligned(16))); |
| 14 | +__int128 Des __attribute__((aligned(16))); |
| 15 | + |
| 16 | +// CHECK-LABEL: @f1( |
| 17 | +// CHECK-NEXT: entry: |
| 18 | +// CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 16 |
| 19 | +// CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2:![0-9]+]] |
| 20 | +// CHECK-NEXT: ret void |
| 21 | +// |
| 22 | +__int128 f1() { |
| 23 | + return __atomic_load_n(&Ptr, memory_order_seq_cst); |
| 24 | +} |
| 25 | + |
| 26 | +// CHECK-LABEL: @f2( |
| 27 | +// CHECK-NEXT: entry: |
| 28 | +// CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 16 |
| 29 | +// CHECK-NEXT: store i128 [[TMP0]], ptr @Ret, align 16 |
| 30 | +// CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 31 | +// CHECK-NEXT: ret void |
| 32 | +// |
| 33 | +__int128 f2() { |
| 34 | + __atomic_load(&Ptr, &Ret, memory_order_seq_cst); |
| 35 | + return Ret; |
| 36 | +} |
| 37 | + |
| 38 | +// CHECK-LABEL: @f3( |
| 39 | +// CHECK-NEXT: entry: |
| 40 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 41 | +// CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 16 |
| 42 | +// CHECK-NEXT: ret void |
| 43 | +// |
| 44 | +void f3() { |
| 45 | + __atomic_store_n(&Ptr, Val, memory_order_seq_cst); |
| 46 | +} |
| 47 | + |
| 48 | +// CHECK-LABEL: @f4( |
| 49 | +// CHECK-NEXT: entry: |
| 50 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16 |
| 51 | +// CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 16 |
| 52 | +// CHECK-NEXT: ret void |
| 53 | +// |
| 54 | +void f4() { |
| 55 | + __atomic_store(&Ptr, &Val, memory_order_seq_cst); |
| 56 | +} |
| 57 | + |
| 58 | +// CHECK-LABEL: @f5( |
| 59 | +// CHECK-NEXT: entry: |
| 60 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 61 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 62 | +// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 63 | +// CHECK-NEXT: ret void |
| 64 | +// |
| 65 | +__int128 f5() { |
| 66 | + return __atomic_exchange_n(&Ptr, Val, memory_order_seq_cst); |
| 67 | +} |
| 68 | + |
| 69 | +// CHECK-LABEL: @f6( |
| 70 | +// CHECK-NEXT: entry: |
| 71 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16 |
| 72 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 73 | +// CHECK-NEXT: store i128 [[TMP1]], ptr @Ret, align 16 |
| 74 | +// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 75 | +// CHECK-NEXT: ret void |
| 76 | +// |
| 77 | +__int128 f6() { |
| 78 | + __atomic_exchange(&Ptr, &Val, &Ret, memory_order_seq_cst); |
| 79 | + return Ret; |
| 80 | +} |
| 81 | + |
| 82 | +// CHECK-LABEL: @f7( |
| 83 | +// CHECK-NEXT: entry: |
| 84 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Des, align 16, !tbaa [[TBAA2]] |
| 85 | +// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Exp, align 16 |
| 86 | +// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP1]], i128 [[TMP0]] seq_cst seq_cst, align 16 |
| 87 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1 |
| 88 | +// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]] |
| 89 | +// CHECK: cmpxchg.store_expected: |
| 90 | +// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0 |
| 91 | +// CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 16 |
| 92 | +// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]] |
| 93 | +// CHECK: cmpxchg.continue: |
| 94 | +// CHECK-NEXT: ret i1 [[TMP3]] |
| 95 | +// |
| 96 | +_Bool f7() { |
| 97 | + return __atomic_compare_exchange_n(&Ptr, &Exp, Des, 0, |
| 98 | + memory_order_seq_cst, memory_order_seq_cst); |
| 99 | +} |
| 100 | + |
| 101 | +// CHECK-LABEL: @f8( |
| 102 | +// CHECK-NEXT: entry: |
| 103 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Exp, align 16 |
| 104 | +// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Des, align 16 |
| 105 | +// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP0]], i128 [[TMP1]] seq_cst seq_cst, align 16 |
| 106 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1 |
| 107 | +// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]] |
| 108 | +// CHECK: cmpxchg.store_expected: |
| 109 | +// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0 |
| 110 | +// CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 16 |
| 111 | +// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]] |
| 112 | +// CHECK: cmpxchg.continue: |
| 113 | +// CHECK-NEXT: ret i1 [[TMP3]] |
| 114 | +// |
| 115 | +_Bool f8() { |
| 116 | + return __atomic_compare_exchange(&Ptr, &Exp, &Des, 0, |
| 117 | + memory_order_seq_cst, memory_order_seq_cst); |
| 118 | +} |
| 119 | + |
| 120 | +// CHECK-LABEL: @f9( |
| 121 | +// CHECK-NEXT: entry: |
| 122 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 123 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 124 | +// CHECK-NEXT: [[TMP2:%.*]] = add i128 [[TMP1]], [[TMP0]] |
| 125 | +// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 126 | +// CHECK-NEXT: ret void |
| 127 | +// |
| 128 | +__int128 f9() { |
| 129 | + return __atomic_add_fetch(&Ptr, Val, memory_order_seq_cst); |
| 130 | +} |
| 131 | + |
| 132 | +// CHECK-LABEL: @f10( |
| 133 | +// CHECK-NEXT: entry: |
| 134 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 135 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 136 | +// CHECK-NEXT: [[TMP2:%.*]] = sub i128 [[TMP1]], [[TMP0]] |
| 137 | +// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 138 | +// CHECK-NEXT: ret void |
| 139 | +// |
| 140 | +__int128 f10() { |
| 141 | + return __atomic_sub_fetch(&Ptr, Val, memory_order_seq_cst); |
| 142 | +} |
| 143 | + |
| 144 | +// CHECK-LABEL: @f11( |
| 145 | +// CHECK-NEXT: entry: |
| 146 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 147 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 148 | +// CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]] |
| 149 | +// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 150 | +// CHECK-NEXT: ret void |
| 151 | +// |
| 152 | +__int128 f11() { |
| 153 | + return __atomic_and_fetch(&Ptr, Val, memory_order_seq_cst); |
| 154 | +} |
| 155 | + |
| 156 | +// CHECK-LABEL: @f12( |
| 157 | +// CHECK-NEXT: entry: |
| 158 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 159 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 160 | +// CHECK-NEXT: [[TMP2:%.*]] = xor i128 [[TMP1]], [[TMP0]] |
| 161 | +// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 162 | +// CHECK-NEXT: ret void |
| 163 | +// |
| 164 | +__int128 f12() { |
| 165 | + return __atomic_xor_fetch(&Ptr, Val, memory_order_seq_cst); |
| 166 | +} |
| 167 | + |
| 168 | +// CHECK-LABEL: @f13( |
| 169 | +// CHECK-NEXT: entry: |
| 170 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 171 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 172 | +// CHECK-NEXT: [[TMP2:%.*]] = or i128 [[TMP1]], [[TMP0]] |
| 173 | +// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 174 | +// CHECK-NEXT: ret void |
| 175 | +// |
| 176 | +__int128 f13() { |
| 177 | + return __atomic_or_fetch(&Ptr, Val, memory_order_seq_cst); |
| 178 | +} |
| 179 | + |
| 180 | +// CHECK-LABEL: @f14( |
| 181 | +// CHECK-NEXT: entry: |
| 182 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 183 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 184 | +// CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]] |
| 185 | +// CHECK-NEXT: [[TMP3:%.*]] = xor i128 [[TMP2]], -1 |
| 186 | +// CHECK-NEXT: store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 187 | +// CHECK-NEXT: ret void |
| 188 | +// |
| 189 | +__int128 f14() { |
| 190 | + return __atomic_nand_fetch(&Ptr, Val, memory_order_seq_cst); |
| 191 | +} |
| 192 | + |
| 193 | +// CHECK-LABEL: @f15( |
| 194 | +// CHECK-NEXT: entry: |
| 195 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 196 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 197 | +// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 198 | +// CHECK-NEXT: ret void |
| 199 | +// |
| 200 | +__int128 f15() { |
| 201 | + return __atomic_fetch_add(&Ptr, Val, memory_order_seq_cst); |
| 202 | +} |
| 203 | + |
| 204 | +// CHECK-LABEL: @f16( |
| 205 | +// CHECK-NEXT: entry: |
| 206 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 207 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 208 | +// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 209 | +// CHECK-NEXT: ret void |
| 210 | +// |
| 211 | +__int128 f16() { |
| 212 | + return __atomic_fetch_sub(&Ptr, Val, memory_order_seq_cst); |
| 213 | +} |
| 214 | + |
| 215 | +// CHECK-LABEL: @f17( |
| 216 | +// CHECK-NEXT: entry: |
| 217 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 218 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 219 | +// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 220 | +// CHECK-NEXT: ret void |
| 221 | +// |
| 222 | +__int128 f17() { |
| 223 | + return __atomic_fetch_and(&Ptr, Val, memory_order_seq_cst); |
| 224 | +} |
| 225 | + |
| 226 | +// CHECK-LABEL: @f18( |
| 227 | +// CHECK-NEXT: entry: |
| 228 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 229 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 230 | +// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 231 | +// CHECK-NEXT: ret void |
| 232 | +// |
| 233 | +__int128 f18() { |
| 234 | + return __atomic_fetch_xor(&Ptr, Val, memory_order_seq_cst); |
| 235 | +} |
| 236 | + |
| 237 | +// CHECK-LABEL: @f19( |
| 238 | +// CHECK-NEXT: entry: |
| 239 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 240 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 241 | +// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 242 | +// CHECK-NEXT: ret void |
| 243 | +// |
| 244 | +__int128 f19() { |
| 245 | + return __atomic_fetch_or(&Ptr, Val, memory_order_seq_cst); |
| 246 | +} |
| 247 | + |
| 248 | +// CHECK-LABEL: @f20( |
| 249 | +// CHECK-NEXT: entry: |
| 250 | +// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]] |
| 251 | +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16 |
| 252 | +// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]] |
| 253 | +// CHECK-NEXT: ret void |
| 254 | +// |
| 255 | +__int128 f20() { |
| 256 | + return __atomic_fetch_nand(&Ptr, Val, memory_order_seq_cst); |
| 257 | +} |
0 commit comments