1 // RUN: %clang_cc1 -triple riscv32 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -triple riscv32 -emit-llvm -fforce-enable-int128 %s -o - \
3 // RUN: | FileCheck %s -check-prefixes=CHECK,CHECK-FORCEINT128
8 // CHECK-LABEL: define void @f_void()
11 // Scalar arguments and return values smaller than the word size are extended
12 // according to the sign of their type, up to 32 bits
14 // CHECK-LABEL: define zeroext i1 @f_scalar_0(i1 zeroext %x)
15 _Bool f_scalar_0(_Bool x) { return x; }
17 // CHECK-LABEL: define signext i8 @f_scalar_1(i8 signext %x)
18 int8_t f_scalar_1(int8_t x) { return x; }
20 // CHECK-LABEL: define zeroext i8 @f_scalar_2(i8 zeroext %x)
21 uint8_t f_scalar_2(uint8_t x) { return x; }
23 // CHECK-LABEL: define i32 @f_scalar_3(i32 %x)
24 int32_t f_scalar_3(int32_t x) { return x; }
26 // CHECK-LABEL: define i64 @f_scalar_4(i64 %x)
27 int64_t f_scalar_4(int64_t x) { return x; }
29 #ifdef __SIZEOF_INT128__
30 // CHECK-FORCEINT128-LABEL: define i128 @f_scalar_5(i128 %x)
31 __int128_t f_scalar_5(__int128_t x) { return x; }
34 // CHECK-LABEL: define float @f_fp_scalar_1(float %x)
35 float f_fp_scalar_1(float x) { return x; }
37 // CHECK-LABEL: define double @f_fp_scalar_2(double %x)
38 double f_fp_scalar_2(double x) { return x; }
40 // Scalars larger than 2*xlen are passed/returned indirect. However, the
41 // RISC-V LLVM backend can handle this fine, so the function doesn't need to
44 // CHECK-LABEL: define fp128 @f_fp_scalar_3(fp128 %x)
45 long double f_fp_scalar_3(long double x) { return x; }
47 // Empty structs or unions are ignored.
51 // CHECK-LABEL: define void @f_agg_empty_struct()
52 struct empty_s f_agg_empty_struct(struct empty_s x) {
58 // CHECK-LABEL: define void @f_agg_empty_union()
59 union empty_u f_agg_empty_union(union empty_u x) {
63 // Aggregates <= 2*xlen may be passed in registers, so will be coerced to
64 // integer arguments. The rules for return are the same.
70 // CHECK-LABEL: define void @f_agg_tiny(i32 %x.coerce)
71 void f_agg_tiny(struct tiny x) {
76 // CHECK-LABEL: define i32 @f_agg_tiny_ret()
77 struct tiny f_agg_tiny_ret() {
78 return (struct tiny){1, 2, 3, 4};
81 typedef uint8_t v4i8 __attribute__((vector_size(4)));
82 typedef int32_t v1i32 __attribute__((vector_size(4)));
84 // CHECK-LABEL: define void @f_vec_tiny_v4i8(i32 %x.coerce)
85 void f_vec_tiny_v4i8(v4i8 x) {
90 // CHECK-LABEL: define i32 @f_vec_tiny_v4i8_ret()
91 v4i8 f_vec_tiny_v4i8_ret() {
92 return (v4i8){1, 2, 3, 4};
95 // CHECK-LABEL: define void @f_vec_tiny_v1i32(i32 %x.coerce)
96 void f_vec_tiny_v1i32(v1i32 x) {
100 // CHECK-LABEL: define i32 @f_vec_tiny_v1i32_ret()
101 v1i32 f_vec_tiny_v1i32_ret() {
109 // CHECK-LABEL: define void @f_agg_small([2 x i32] %x.coerce)
110 void f_agg_small(struct small x) {
115 // CHECK-LABEL: define [2 x i32] @f_agg_small_ret()
116 struct small f_agg_small_ret() {
117 return (struct small){1, 0};
120 typedef uint8_t v8i8 __attribute__((vector_size(8)));
121 typedef int64_t v1i64 __attribute__((vector_size(8)));
123 // CHECK-LABEL: define void @f_vec_small_v8i8(i64 %x.coerce)
124 void f_vec_small_v8i8(v8i8 x) {
128 // CHECK-LABEL: define i64 @f_vec_small_v8i8_ret()
129 v8i8 f_vec_small_v8i8_ret() {
130 return (v8i8){1, 2, 3, 4, 5, 6, 7, 8};
133 // CHECK-LABEL: define void @f_vec_small_v1i64(i64 %x.coerce)
134 void f_vec_small_v1i64(v1i64 x) {
138 // CHECK-LABEL: define i64 @f_vec_small_v1i64_ret()
139 v1i64 f_vec_small_v1i64_ret() {
143 // Aggregates of 2*xlen size and 2*xlen alignment should be coerced to a
144 // single 2*xlen-sized argument, to ensure that alignment can be maintained if
145 // passed on the stack.
147 struct small_aligned {
151 // CHECK-LABEL: define void @f_agg_small_aligned(i64 %x.coerce)
152 void f_agg_small_aligned(struct small_aligned x) {
156 // CHECK-LABEL: define i64 @f_agg_small_aligned_ret(i64 %x.coerce)
157 struct small_aligned f_agg_small_aligned_ret(struct small_aligned x) {
158 return (struct small_aligned){10};
161 // Aggregates greater > 2*xlen will be passed and returned indirectly
166 // CHECK-LABEL: define void @f_agg_large(%struct.large* %x)
167 void f_agg_large(struct large x) {
168 x.a = x.b + x.c + x.d;
171 // The address where the struct should be written to will be the first
173 // CHECK-LABEL: define void @f_agg_large_ret(%struct.large* noalias sret %agg.result, i32 %i, i8 signext %j)
174 struct large f_agg_large_ret(int32_t i, int8_t j) {
175 return (struct large){1, 2, 3, 4};
178 typedef unsigned char v16i8 __attribute__((vector_size(16)));
180 // CHECK-LABEL: define void @f_vec_large_v16i8(<16 x i8>*)
181 void f_vec_large_v16i8(v16i8 x) {
185 // CHECK-LABEL: define void @f_vec_large_v16i8_ret(<16 x i8>* noalias sret %agg.result)
186 v16i8 f_vec_large_v16i8_ret() {
187 return (v16i8){1, 2, 3, 4, 5, 6, 7, 8};
190 // Scalars passed on the stack should have signext/zeroext attributes (they
193 // CHECK-LABEL: define i32 @f_scalar_stack_1(i32 %a.coerce, [2 x i32] %b.coerce, i64 %c.coerce, %struct.large* %d, i8 zeroext %e, i8 signext %f, i8 %g, i8 %h)
194 int f_scalar_stack_1(struct tiny a, struct small b, struct small_aligned c,
195 struct large d, uint8_t e, int8_t f, uint8_t g, int8_t h) {
199 // CHECK-LABEL: define i32 @f_scalar_stack_2(i32 %a, i64 %b, float %c, double %d, fp128 %e, i8 zeroext %f, i8 %g, i8 %h)
200 int f_scalar_stack_2(int32_t a, int64_t b, float c, double d, long double e,
201 uint8_t f, int8_t g, uint8_t h) {
205 // Ensure that scalars passed on the stack are still determined correctly in
206 // the presence of large return values that consume a register due to the need
207 // to pass a pointer.
209 // CHECK-LABEL: define void @f_scalar_stack_3(%struct.large* noalias sret %agg.result, i32 %a, i64 %b, double %c, fp128 %d, i8 zeroext %e, i8 %f, i8 %g)
210 struct large f_scalar_stack_3(int32_t a, int64_t b, double c, long double d,
211 uint8_t e, int8_t f, uint8_t g) {
212 return (struct large){a, e, f, g};
215 // CHECK-LABEL: define fp128 @f_scalar_stack_4(i32 %a, i64 %b, double %c, fp128 %d, i8 zeroext %e, i8 %f, i8 %g)
216 long double f_scalar_stack_4(int32_t a, int64_t b, double c, long double d,
217 uint8_t e, int8_t f, uint8_t g) {
221 // Aggregates and >=XLen scalars passed on the stack should be lowered just as
222 // they would be if passed via registers.
224 // CHECK-LABEL: define void @f_scalar_stack_5(double %a, i64 %b, double %c, i64 %d, i32 %e, i64 %f, float %g, double %h, fp128 %i)
225 void f_scalar_stack_5(double a, int64_t b, double c, int64_t d, int e,
226 int64_t f, float g, double h, long double i) {}
228 // CHECK-LABEL: define void @f_agg_stack(double %a, i64 %b, double %c, i64 %d, i32 %e.coerce, [2 x i32] %f.coerce, i64 %g.coerce, %struct.large* %h)
229 void f_agg_stack(double a, int64_t b, double c, int64_t d, struct tiny e,
230 struct small f, struct small_aligned g, struct large h) {}
232 // Ensure that ABI lowering happens as expected for vararg calls. For RV32
233 // with the base integer calling convention there will be no observable
234 // differences in the lowered IR for a call with varargs vs without.
236 int f_va_callee(int, ...);
238 // CHECK-LABEL: define void @f_va_caller()
239 // CHECK: call i32 (i32, ...) @f_va_callee(i32 1, i32 2, i64 3, double 4.000000e+00, double 5.000000e+00, i32 {{%.*}}, [2 x i32] {{%.*}}, i64 {{%.*}}, %struct.large* {{%.*}})
241 f_va_callee(1, 2, 3LL, 4.0f, 5.0, (struct tiny){6, 7, 8, 9},
242 (struct small){10, NULL}, (struct small_aligned){11},
243 (struct large){12, 13, 14, 15});
246 // CHECK-LABEL: define i32 @f_va_1(i8* %fmt, ...) {{.*}} {
247 // CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 4
248 // CHECK: [[VA:%.*]] = alloca i8*, align 4
249 // CHECK: [[V:%.*]] = alloca i32, align 4
250 // CHECK: store i8* %fmt, i8** [[FMT_ADDR]], align 4
251 // CHECK: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
252 // CHECK: call void @llvm.va_start(i8* [[VA1]])
253 // CHECK: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 4
254 // CHECK: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i32 4
255 // CHECK: store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
256 // CHECK: [[TMP0:%.*]] = bitcast i8* [[ARGP_CUR]] to i32*
257 // CHECK: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
258 // CHECK: store i32 [[TMP1]], i32* [[V]], align 4
259 // CHECK: [[VA2:%.*]] = bitcast i8** [[VA]] to i8*
260 // CHECK: call void @llvm.va_end(i8* [[VA2]])
261 // CHECK: [[TMP2:%.*]] = load i32, i32* [[V]], align 4
262 // CHECK: ret i32 [[TMP2]]
264 int f_va_1(char *fmt, ...) {
265 __builtin_va_list va;
267 __builtin_va_start(va, fmt);
268 int v = __builtin_va_arg(va, int);
269 __builtin_va_end(va);
274 // An "aligned" register pair (where the first register is even-numbered) is
275 // used to pass varargs with 2x xlen alignment and 2x xlen size. Ensure the
276 // correct offsets are used.
278 // CHECK-LABEL: @f_va_2(
279 // CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 4
280 // CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 4
281 // CHECK-NEXT: [[V:%.*]] = alloca double, align 8
282 // CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 4
283 // CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
284 // CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]])
285 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 4
286 // CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[ARGP_CUR]] to i32
287 // CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 7
288 // CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8
289 // CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = inttoptr i32 [[TMP2]] to i8*
290 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR_ALIGNED]], i32 8
291 // CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
292 // CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[ARGP_CUR_ALIGNED]] to double*
293 // CHECK-NEXT: [[TMP4:%.*]] = load double, double* [[TMP3]], align 8
294 // CHECK-NEXT: store double [[TMP4]], double* [[V]], align 8
295 // CHECK-NEXT: [[VA2:%.*]] = bitcast i8** [[VA]] to i8*
296 // CHECK-NEXT: call void @llvm.va_end(i8* [[VA2]])
297 // CHECK-NEXT: [[TMP5:%.*]] = load double, double* [[V]], align 8
298 // CHECK-NEXT: ret double [[TMP5]]
299 double f_va_2(char *fmt, ...) {
300 __builtin_va_list va;
302 __builtin_va_start(va, fmt);
303 double v = __builtin_va_arg(va, double);
304 __builtin_va_end(va);
309 // Two "aligned" register pairs.
311 // CHECK-LABEL: @f_va_3(
312 // CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 4
313 // CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 4
314 // CHECK-NEXT: [[V:%.*]] = alloca double, align 8
315 // CHECK-NEXT: [[W:%.*]] = alloca i32, align 4
316 // CHECK-NEXT: [[X:%.*]] = alloca double, align 8
317 // CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 4
318 // CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
319 // CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]])
320 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 4
321 // CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[ARGP_CUR]] to i32
322 // CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 7
323 // CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8
324 // CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = inttoptr i32 [[TMP2]] to i8*
325 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR_ALIGNED]], i32 8
326 // CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
327 // CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[ARGP_CUR_ALIGNED]] to double*
328 // CHECK-NEXT: [[TMP4:%.*]] = load double, double* [[TMP3]], align 8
329 // CHECK-NEXT: store double [[TMP4]], double* [[V]], align 8
330 // CHECK-NEXT: [[ARGP_CUR2:%.*]] = load i8*, i8** [[VA]], align 4
331 // CHECK-NEXT: [[ARGP_NEXT3:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR2]], i32 4
332 // CHECK-NEXT: store i8* [[ARGP_NEXT3]], i8** [[VA]], align 4
333 // CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[ARGP_CUR2]] to i32*
334 // CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
335 // CHECK-NEXT: store i32 [[TMP6]], i32* [[W]], align 4
336 // CHECK-NEXT: [[ARGP_CUR4:%.*]] = load i8*, i8** [[VA]], align 4
337 // CHECK-NEXT: [[TMP7:%.*]] = ptrtoint i8* [[ARGP_CUR4]] to i32
338 // CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], 7
339 // CHECK-NEXT: [[TMP9:%.*]] = and i32 [[TMP8]], -8
340 // CHECK-NEXT: [[ARGP_CUR4_ALIGNED:%.*]] = inttoptr i32 [[TMP9]] to i8*
341 // CHECK-NEXT: [[ARGP_NEXT5:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR4_ALIGNED]], i32 8
342 // CHECK-NEXT: store i8* [[ARGP_NEXT5]], i8** [[VA]], align 4
343 // CHECK-NEXT: [[TMP10:%.*]] = bitcast i8* [[ARGP_CUR4_ALIGNED]] to double*
344 // CHECK-NEXT: [[TMP11:%.*]] = load double, double* [[TMP10]], align 8
345 // CHECK-NEXT: store double [[TMP11]], double* [[X]], align 8
346 // CHECK-NEXT: [[VA6:%.*]] = bitcast i8** [[VA]] to i8*
347 // CHECK-NEXT: call void @llvm.va_end(i8* [[VA6]])
348 // CHECK-NEXT: [[TMP12:%.*]] = load double, double* [[V]], align 8
349 // CHECK-NEXT: [[TMP13:%.*]] = load double, double* [[X]], align 8
350 // CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
351 // CHECK-NEXT: ret double [[ADD]]
352 double f_va_3(char *fmt, ...) {
353 __builtin_va_list va;
355 __builtin_va_start(va, fmt);
356 double v = __builtin_va_arg(va, double);
357 int w = __builtin_va_arg(va, int);
358 double x = __builtin_va_arg(va, double);
359 __builtin_va_end(va);
364 // CHECK-LABEL: define i32 @f_va_4(i8* %fmt, ...) {{.*}} {
365 // CHECK: [[FMT_ADDR:%.*]] = alloca i8*, align 4
366 // CHECK-NEXT: [[VA:%.*]] = alloca i8*, align 4
367 // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
368 // CHECK-NEXT: [[LD:%.*]] = alloca fp128, align 16
369 // CHECK-NEXT: [[TS:%.*]] = alloca [[STRUCT_TINY:%.*]], align 1
370 // CHECK-NEXT: [[SS:%.*]] = alloca [[STRUCT_SMALL:%.*]], align 4
371 // CHECK-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 4
372 // CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4
373 // CHECK-NEXT: store i8* [[FMT:%.*]], i8** [[FMT_ADDR]], align 4
374 // CHECK-NEXT: [[VA1:%.*]] = bitcast i8** [[VA]] to i8*
375 // CHECK-NEXT: call void @llvm.va_start(i8* [[VA1]])
376 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load i8*, i8** [[VA]], align 4
377 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i32 4
378 // CHECK-NEXT: store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
379 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[ARGP_CUR]] to i32*
380 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
381 // CHECK-NEXT: store i32 [[TMP1]], i32* [[V]], align 4
382 // CHECK-NEXT: [[ARGP_CUR2:%.*]] = load i8*, i8** [[VA]], align 4
383 // CHECK-NEXT: [[ARGP_NEXT3:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR2]], i32 4
384 // CHECK-NEXT: store i8* [[ARGP_NEXT3]], i8** [[VA]], align 4
385 // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[ARGP_CUR2]] to fp128**
386 // CHECK-NEXT: [[TMP3:%.*]] = load fp128*, fp128** [[TMP2]], align 4
387 // CHECK-NEXT: [[TMP4:%.*]] = load fp128, fp128* [[TMP3]], align 16
388 // CHECK-NEXT: store fp128 [[TMP4]], fp128* [[LD]], align 16
389 // CHECK-NEXT: [[ARGP_CUR4:%.*]] = load i8*, i8** [[VA]], align 4
390 // CHECK-NEXT: [[ARGP_NEXT5:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR4]], i32 4
391 // CHECK-NEXT: store i8* [[ARGP_NEXT5]], i8** [[VA]], align 4
392 // CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[ARGP_CUR4]] to %struct.tiny*
393 // CHECK-NEXT: [[TMP6:%.*]] = bitcast %struct.tiny* [[TS]] to i8*
394 // CHECK-NEXT: [[TMP7:%.*]] = bitcast %struct.tiny* [[TMP5]] to i8*
395 // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[TMP6]], i8* align 4 [[TMP7]], i32 4, i1 false)
396 // CHECK-NEXT: [[ARGP_CUR6:%.*]] = load i8*, i8** [[VA]], align 4
397 // CHECK-NEXT: [[ARGP_NEXT7:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR6]], i32 8
398 // CHECK-NEXT: store i8* [[ARGP_NEXT7]], i8** [[VA]], align 4
399 // CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[ARGP_CUR6]] to %struct.small*
400 // CHECK-NEXT: [[TMP9:%.*]] = bitcast %struct.small* [[SS]] to i8*
401 // CHECK-NEXT: [[TMP10:%.*]] = bitcast %struct.small* [[TMP8]] to i8*
402 // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP9]], i8* align 4 [[TMP10]], i32 8, i1 false)
403 // CHECK-NEXT: [[ARGP_CUR8:%.*]] = load i8*, i8** [[VA]], align 4
404 // CHECK-NEXT: [[ARGP_NEXT9:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR8]], i32 4
405 // CHECK-NEXT: store i8* [[ARGP_NEXT9]], i8** [[VA]], align 4
406 // CHECK-NEXT: [[TMP11:%.*]] = bitcast i8* [[ARGP_CUR8]] to %struct.large**
407 // CHECK-NEXT: [[TMP12:%.*]] = load %struct.large*, %struct.large** [[TMP11]], align 4
408 // CHECK-NEXT: [[TMP13:%.*]] = bitcast %struct.large* [[LS]] to i8*
409 // CHECK-NEXT: [[TMP14:%.*]] = bitcast %struct.large* [[TMP12]] to i8*
410 // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP13]], i8* align 4 [[TMP14]], i32 16, i1 false)
411 // CHECK-NEXT: [[VA10:%.*]] = bitcast i8** [[VA]] to i8*
412 // CHECK-NEXT: call void @llvm.va_end(i8* [[VA10]])
413 int f_va_4(char *fmt, ...) {
414 __builtin_va_list va;
416 __builtin_va_start(va, fmt);
417 int v = __builtin_va_arg(va, int);
418 long double ld = __builtin_va_arg(va, long double);
419 struct tiny ts = __builtin_va_arg(va, struct tiny);
420 struct small ss = __builtin_va_arg(va, struct small);
421 struct large ls = __builtin_va_arg(va, struct large);
422 __builtin_va_end(va);
424 int ret = (int)((long double)v + ld);
425 ret = ret + ts.a + ts.b + ts.c + ts.d;
426 ret = ret + ss.a + (int)ss.b;
427 ret = ret + ls.a + ls.b + ls.c + ls.d;