svn commit: r255063 - in projects/arm_eabi_vfp/lib/libc/arm: aeabi gen

Andrew Turner andrew at FreeBSD.org
Fri Aug 30 08:38:06 UTC 2013


Author: andrew
Date: Fri Aug 30 08:38:04 2013
New Revision: 255063
URL: http://svnweb.freebsd.org/changeset/base/255063

Log:
  Add the start of the libc __aeabi_* functions. These are written so we
  could use them with the soft float ABI.
  
  They are not fully working, for example:
  printf("%f", 0.22);
  
  will print:
  0.0F0000

Added:
  projects/arm_eabi_vfp/lib/libc/arm/aeabi/aeabi_vfp.h   (contents, props changed)
  projects/arm_eabi_vfp/lib/libc/arm/aeabi/aeabi_vfp_double.S   (contents, props changed)
  projects/arm_eabi_vfp/lib/libc/arm/aeabi/aeabi_vfp_float.S   (contents, props changed)
Modified:
  projects/arm_eabi_vfp/lib/libc/arm/aeabi/Makefile.inc
  projects/arm_eabi_vfp/lib/libc/arm/gen/flt_rounds.c

Modified: projects/arm_eabi_vfp/lib/libc/arm/aeabi/Makefile.inc
==============================================================================
--- projects/arm_eabi_vfp/lib/libc/arm/aeabi/Makefile.inc	Fri Aug 30 08:29:23 2013	(r255062)
+++ projects/arm_eabi_vfp/lib/libc/arm/aeabi/Makefile.inc	Fri Aug 30 08:38:04 2013	(r255063)
@@ -3,9 +3,13 @@
 .PATH: ${.CURDIR}/arm/aeabi
 
 SRCS+=	aeabi_atexit.c		\
-	aeabi_double.c		\
-	aeabi_float.c		\
-	aeabi_unwind_cpp.c
+	aeabi_unwind_cpp.c	\
+	aeabi_vfp_double.S	\
+	aeabi_vfp_float.S
+.if ${MACHINE_ARCH} != "armv6hf"
+SRCS+=	aeabi_double.c		\
+	aeabi_float.c
+.endif
 
 # Add the aeabi_mem* functions. While they live in compiler-rt they call into
 # libc. This causes issues when other parts of libc call these functions.

Added: projects/arm_eabi_vfp/lib/libc/arm/aeabi/aeabi_vfp.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/arm_eabi_vfp/lib/libc/arm/aeabi/aeabi_vfp.h	Fri Aug 30 08:38:04 2013	(r255063)
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2013 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef AEABI_VFP_H
+#define	AEABI_VFP_H
+
+/*
+ * ASM helper macros. These allow the functions to be changed when
+ * building for a hard-float version of the ABI.
+ */
+
+#ifndef __ARM_PCS_VFP
+/* Define a standard name for the function */
+#define	AEABI_ENTRY(x)	ENTRY(__aeabi_ ## x ## _softfp)
+#define	AEABI_END(x)	END(__aeabi_ ## x ## _softfp)
+
+/*
+ * These should be used when a function either takes, or returns a floating
+ * point falue. They will load the data from an ARM to a VFP register(s),
+ * or from a VFP to an ARM register
+ */
+#ifdef __ARMEB__
+#define	LOAD_DREG(vreg, reg0, reg1)   vmov vreg, reg1, reg0
+#define	UNLOAD_DREG(reg0, reg1, vreg) vmov reg1, reg0, vreg
+#else
+#define	LOAD_DREG(vreg, reg0, reg1)   vmov vreg, reg0, reg1
+#define	UNLOAD_DREG(reg0, reg1, vreg) vmov reg0, reg1, vreg
+#endif
+
+#define	LOAD_SREGS(vreg0, vreg1, reg0, reg1) vmov vreg0, vreg1, reg0, reg1
+#define	LOAD_SREG(vreg, reg)                 vmov vreg, reg
+#define	UNLOAD_SREG(reg, vreg)               vmov reg, vreg
+#else
+#define	AEABI_ENTRY(x)	ENTRY(__aeabi_ ## x)
+#define	AEABI_END(x)	END(__aeabi_ ## x)
+
+/*
+ * On ARM Hard-Float we don't need these as the data
+ * is already in the VFP registers.
+ */
+#define	LOAD_DREG(vreg, reg0, reg1)
+#define	UNLOAD_DREG(reg0, reg1, vreg)
+
+#define	LOAD_SREGS(vreg0, vreg1, reg0, reg1)
+#define	LOAD_SREG(vreg, reg)
+#define	UNLOAD_SREG(reg, vreg)
+#endif
+
+/*
+ * C Helper macros
+ */
+
+/*
+ * Generate a function that will either call into the VFP implementation,
+ * or the soft float version for a given __aeabi_* helper. The function
+ * will take a single argument of the type given by in_type.
+ */
+#define	AEABI_FUNC(name, in_type, soft_func)			\
+__aeabi_ ## name(in_type a)					\
+{								\
+	if (_libc_arm_fpu_present)				\
+		return __aeabi_ ## name ## _softfp(a);		\
+	else							\
+		return soft_func (a);				\
+}
+
+/* As above, but takes two arguments of the same type */
+#define	AEABI_FUNC2(name, in_type, soft_func)			\
+__aeabi_ ## name(in_type a, in_type b)				\
+{								\
+	if (_libc_arm_fpu_present)				\
+		return __aeabi_ ## name ## _softfp(a, b);	\
+	else							\
+		return soft_func (a, b);			\
+}
+
+/* As above, but with the soft float arguments reversed */
+#define	AEABI_FUNC2_REV(name, in_type, soft_func)		\
+__aeabi_ ## name(in_type a, in_type b)				\
+{								\
+	if (_libc_arm_fpu_present)				\
+		return __aeabi_ ## name ## _softfp(a, b);	\
+	else							\
+		return soft_func (b, a);			\
+}
+
+#endif
+

Added: projects/arm_eabi_vfp/lib/libc/arm/aeabi/aeabi_vfp_double.S
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/arm_eabi_vfp/lib/libc/arm/aeabi/aeabi_vfp_double.S	Fri Aug 30 08:38:04 2013	(r255063)
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2013 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+#include "aeabi_vfp.h"
+
+.fpu	vfp
+.syntax	unified
+
+/* int __aeabi_dcmpeq(double, double) */
+AEABI_ENTRY(dcmpeq)
+	LOAD_DREG(d0, r0, r1)
+	LOAD_DREG(d1, r2, r3)
+	vcmp.f64 d0, d1
+	vmrs     APSR_nzcv, fpscr
+	movne    r0, #0
+	moveq    r0, #1
+	RET
+AEABI_END(dcmpeq)
+
+/* int __aeabi_dcmplt(double, double) */
+AEABI_ENTRY(dcmplt)
+	LOAD_DREG(d0, r0, r1)
+	LOAD_DREG(d1, r2, r3)
+	vcmp.f64 d0, d1
+	vmrs     APSR_nzcv, fpscr
+	movcs    r0, #0
+	movlt    r0, #1
+	RET
+AEABI_END(dcmplt)
+
+/* int __aeabi_dcmple(double, double) */
+AEABI_ENTRY(dcmple)
+	LOAD_DREG(d0, r0, r1)
+	LOAD_DREG(d1, r2, r3)
+	vcmp.f64 d0, d1
+	vmrs     APSR_nzcv, fpscr
+	movhi    r0, #0
+	movls    r0, #1
+	RET
+AEABI_END(dcmple)
+
+/* int __aeabi_dcmpge(double, double) */
+AEABI_ENTRY(dcmpge)
+	LOAD_DREG(d0, r0, r1)
+	LOAD_DREG(d1, r2, r3)
+	vcmp.f64 d0, d1
+	vmrs     APSR_nzcv, fpscr
+	movlt    r0, #0
+	movge    r0, #1
+	RET
+AEABI_END(dcmpge)
+
+/* int __aeabi_dcmpgt(double, double) */
+AEABI_ENTRY(dcmpgt)
+	LOAD_DREG(d0, r0, r1)
+	LOAD_DREG(d1, r2, r3)
+	vcmp.f64 d0, d1
+	vmrs     APSR_nzcv, fpscr
+	movle    r0, #0
+	movgt    r0, #1
+	RET
+AEABI_END(dcmpgt)
+
+/* int __aeabi_dcmpun(double, double) */
+AEABI_ENTRY(dcmpun)
+	LOAD_DREG(d0, r0, r1)
+	LOAD_DREG(d1, r2, r3)
+	vcmp.f64 d0, d1
+	vmrs     APSR_nzcv, fpscr
+	movvc    r0, #0
+	movvs    r0, #1
+	RET
+AEABI_END(dcmpun)
+
+/* int __aeabi_d2iz(double) */
+AEABI_ENTRY(d2iz)
+	LOAD_DREG(d0, r0, r1)
+#if 0
+	/*
+	 * This should be the correct instruction, but binutils incorrectly
+	 * encodes it as the version that used FPSCR to determine the rounding.
+	 * When binutils is fixed we can use this again.
+	 */
+	vcvt.s32.f64 s0, d0
+#else
+	ftosizd s0, d0
+#endif
+	vmov         r0, s0
+	RET
+AEABI_END(d2iz)
+
+/* float __aeabi_d2f(double) */
+AEABI_ENTRY(d2f)
+	LOAD_DREG(d0, r0, r1)
+	vcvt.f32.f64 s0, d0
+	UNLOAD_SREG(r0, s0)
+	RET
+AEABI_END(d2f)
+
+/* double __aeabi_i2d(int) */
+AEABI_ENTRY(i2d)
+	vmov         s0, r0
+	vcvt.f64.s32 d0, s0
+	UNLOAD_DREG(r0, r1, d0)
+	RET
+AEABI_END(i2d)
+
+/* double __aeabi_dadd(double, double) */
+AEABI_ENTRY(dadd)
+	LOAD_DREG(d0, r0, r1)
+	LOAD_DREG(d1, r2, r3)
+	vadd.f64 d0, d0, d1
+	UNLOAD_DREG(r0, r1, d0)
+	RET
+AEABI_END(dadd)
+
+/* double __aeabi_ddiv(double, double) */
+AEABI_ENTRY(ddiv)
+	LOAD_DREG(d0, r0, r1)
+	LOAD_DREG(d1, r2, r3)
+	vdiv.f64 d0, d0, d1
+	UNLOAD_DREG(r0, r1, d0)
+	RET
+AEABI_END(ddiv)
+
+/* double __aeabi_dmul(double, double) */
+AEABI_ENTRY(dmul)
+	LOAD_DREG(d0, r0, r1)
+	LOAD_DREG(d1, r2, r3)
+	vmul.f64 d0, d0, d1
+	UNLOAD_DREG(r0, r1, d0)
+	RET
+AEABI_END(dmul)
+
+/* double __aeabi_dsub(double, double) */
+AEABI_ENTRY(dsub)
+	LOAD_DREG(d0, r0, r1)
+	LOAD_DREG(d1, r2, r3)
+	vsub.f64 d0, d0, d1
+	UNLOAD_DREG(r0, r1, d0)
+	RET
+AEABI_END(dsub)
+

Added: projects/arm_eabi_vfp/lib/libc/arm/aeabi/aeabi_vfp_float.S
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/arm_eabi_vfp/lib/libc/arm/aeabi/aeabi_vfp_float.S	Fri Aug 30 08:38:04 2013	(r255063)
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2013 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+#include "aeabi_vfp.h"
+
+.fpu	vfp
+.syntax	unified
+
+/* int __aeabi_fcmpeq(float, float) */
+AEABI_ENTRY(fcmpeq)
+	LOAD_SREGS(s0, s1, r0, r1)
+	vcmp.f32 s0, s1
+	vmrs     APSR_nzcv, fpscr
+	movne    r0, #0
+	moveq    r0, #1
+	RET
+AEABI_END(fcmpeq)
+
+/* int __aeabi_fcmplt(float, float) */
+AEABI_ENTRY(fcmplt)
+	LOAD_SREGS(s0, s1, r0, r1)
+	vcmp.f32 s0, s1
+	vmrs     APSR_nzcv, fpscr
+	movcs    r0, #0
+	movlt    r0, #1
+	RET
+AEABI_END(fcmplt)
+
+/* int __aeabi_fcmple(float, float) */
+AEABI_ENTRY(fcmple)
+	LOAD_SREGS(s0, s1, r0, r1)
+	vcmp.f32 s0, s1
+	vmrs     APSR_nzcv, fpscr
+	movhi    r0, #0
+	movls    r0, #1
+	RET
+AEABI_END(fcmple)
+
+/* int __aeabi_fcmpge(float, float) */
+AEABI_ENTRY(fcmpge)
+	LOAD_SREGS(s0, s1, r0, r1)
+	vcmp.f32 s0, s1
+	vmrs     APSR_nzcv, fpscr
+	movlt    r0, #0
+	movge    r0, #1
+	RET
+AEABI_END(fcmpge)
+
+/* int __aeabi_fcmpgt(float, float) */
+AEABI_ENTRY(fcmpgt)
+	LOAD_SREGS(s0, s1, r0, r1)
+	vcmp.f32 s0, s1
+	vmrs     APSR_nzcv, fpscr
+	movle    r0, #0
+	movgt    r0, #1
+	RET
+AEABI_END(fcmpgt)
+
+/* int __aeabi_fcmpun(float, float) */
+AEABI_ENTRY(fcmpun)
+	LOAD_SREGS(s0, s1, r0, r1)
+	vcmp.f32 s0, s1
+	vmrs     APSR_nzcv, fpscr
+	movvc    r0, #0
+	movvs    r0, #1
+	RET
+AEABI_END(fcmpun)
+
+/* int __aeabi_f2iz(float) */
+AEABI_ENTRY(f2iz)
+	LOAD_SREG(s0, r0)
+#if 0
+	/*
+	 * This should be the correct instruction, but binutils incorrectly
+	 * encodes it as the version that used FPSCR to determine the rounding.
+	 * When binutils is fixed we can use this again.
+	 */
+	vcvt.s32.f32 s0, s0
+#else
+	ftosizs      s0, s0
+#endif
+	vmov         r0, s0
+	RET
+AEABI_END(f2iz)
+
+/* double __aeabi_f2d(float) */
+AEABI_ENTRY(f2d)
+	LOAD_SREG(s0, r0)
+	vcvt.f64.f32 d0, s0
+	UNLOAD_DREG(r0, r1, d0)
+	RET
+AEABI_END(f2d)
+
+/* float __aeabi_i2f(int) */
+AEABI_ENTRY(i2f)
+	vmov         s0, r0
+	vcvt.f32.s32 s0, s0
+	UNLOAD_SREG(r0, s0)
+	RET
+AEABI_END(i2f)
+
+/* float __aeabi_fadd(float, float) */
+AEABI_ENTRY(fadd)
+	LOAD_SREGS(s0, s1, r0, r1)
+	vadd.f32 s0, s0, s1
+	UNLOAD_SREG(r0, s0)
+	RET
+AEABI_END(fadd)
+
+/* float __aeabi_fmul(float, float) */
+AEABI_ENTRY(fdiv)
+	LOAD_SREGS(s0, s1, r0, r1)
+	vdiv.f32 s0, s0, s1
+	UNLOAD_SREG(r0, s0)
+	RET
+AEABI_END(fdiv)
+
+/* float __aeabi_fmul(float, float) */
+AEABI_ENTRY(fmul)
+	LOAD_SREGS(s0, s1, r0, r1)
+	vmul.f32 s0, s0, s1
+	UNLOAD_SREG(r0, s0)
+	RET
+AEABI_END(fmul)
+
+/* float __aeabi_fsub(float, float) */
+AEABI_ENTRY(fsub)
+	LOAD_SREGS(s0, s1, r0, r1)
+	vsub.f32 s0, s0, s1
+	UNLOAD_SREG(r0, s0)
+	RET
+AEABI_END(fsub)
+

Modified: projects/arm_eabi_vfp/lib/libc/arm/gen/flt_rounds.c
==============================================================================
--- projects/arm_eabi_vfp/lib/libc/arm/gen/flt_rounds.c	Fri Aug 30 08:29:23 2013	(r255062)
+++ projects/arm_eabi_vfp/lib/libc/arm/gen/flt_rounds.c	Fri Aug 30 08:38:04 2013	(r255063)
@@ -30,15 +30,17 @@ __FBSDID("$FreeBSD$");
 #include <fenv.h>
 #include <float.h>
 
+#ifndef __ARM_PCS_VFP
 #include "softfloat-for-gcc.h"
 #include "milieu.h"
 #include "softfloat.h"
+#endif
 
 int
 __flt_rounds(void)
 {
 
-#ifndef ARM_HARD_FLOAT
+#ifndef __ARM_PCS_VFP
 	/*
 	 * Translate our rounding modes to the unnamed
 	 * manifest constants required by C99 et. al.


More information about the svn-src-projects mailing list