svn commit: r261969 - in projects/arm64/sys: arm64/arm64 conf

Andrew Turner andrew at FreeBSD.org
Sun Feb 16 10:59:03 UTC 2014


Author: andrew
Date: Sun Feb 16 10:59:01 2014
New Revision: 261969
URL: http://svnweb.freebsd.org/changeset/base/261969

Log:
  Add a stub locore.S and pmap, and hook them to the build.

Added:
  projects/arm64/sys/arm64/arm64/locore.S   (contents, props changed)
  projects/arm64/sys/arm64/arm64/pmap.c   (contents, props changed)
  projects/arm64/sys/conf/Makefile.arm64   (contents, props changed)
  projects/arm64/sys/conf/files.arm64
  projects/arm64/sys/conf/ldscript.arm64
  projects/arm64/sys/conf/options.arm64

Added: projects/arm64/sys/arm64/arm64/locore.S
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/arm64/sys/arm64/arm64/locore.S	Sun Feb 16 10:59:01 2014	(r261969)
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "assym.s"
+
+	.globl	kernbase
+	.set	kernbase, KERNBASE
+
+	.globl	_start
+_start:
+	/* Load the address of the fvp UART */
+	mov	x0, 0x1c090000
+	/* Load 'A' */
+	mov	x1, 0x41
+	/* Print 'A' */
+	str	x1, [x0]
+
+1:	b	1b
+
+
+	.globl abort
+abort:
+	b abort

Added: projects/arm64/sys/arm64/arm64/pmap.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/arm64/sys/arm64/arm64/pmap.c	Sun Feb 16 10:59:01 2014	(r261969)
@@ -0,0 +1,625 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#if !defined(DIAGNOSTIC)
+#ifdef __GNUC_GNU_INLINE__
+#define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
+#else
+#define PMAP_INLINE	extern inline
+#endif
+#else
+#define PMAP_INLINE
+#endif
+
+vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
+vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
+vm_offset_t kernel_vm_end = 0;
+vm_offset_t vm_max_kernel_address;
+
+int unmapped_buf_allowed = 0;
+
+struct pmap kernel_pmap_store;
+
+struct msgbuf *msgbufp = NULL;
+
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+	panic("pmap_page_init");
+}
+
+/*
+ *	Initialize the pmap module.
+ *	Called by vm_init, to initialize any structures that the pmap
+ *	system needs to map virtual memory.
+ */
+void
+pmap_init(void)
+{
+
+	panic("pmap_init");
+}
+
+/***************************************************
+ * Low level helper routines.....
+ ***************************************************/
+
+vm_paddr_t
+pmap_kextract(vm_offset_t va)
+{
+
+	panic("pmap_kextract");
+}
+
+/*
+ *	Routine:	pmap_extract
+ *	Function:
+ *		Extract the physical page address associated
+ *		with the given map/virtual_address pair.
+ */
+vm_paddr_t 
+pmap_extract(pmap_t pmap, vm_offset_t va)
+{
+
+	panic("pmap_extract");
+}
+
+/*
+ *	Routine:	pmap_extract_and_hold
+ *	Function:
+ *		Atomically extract and hold the physical page
+ *		with the given pmap and virtual address pair
+ *		if that mapping permits the given protection.
+ */
+vm_page_t
+pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+{
+
+	panic("pmap_extract_and_hold");
+}
+
+/***************************************************
+ * Low level mapping routines.....
+ ***************************************************/
+
+/*
+ * Add a wired page to the kva.
+ * Note: not SMP coherent.
+ *
+ * This function may be used before pmap_bootstrap() is called.
+ */
+PMAP_INLINE void 
+pmap_kenter(vm_offset_t va, vm_paddr_t pa)
+{
+
+	panic("pmap_kenter");
+}
+
+/*
+ * Remove a page from the kernel pagetables.
+ * Note: not SMP coherent.
+ *
+ * This function may be used before pmap_bootstrap() is called.
+ */
+PMAP_INLINE void
+pmap_kremove(vm_offset_t va)
+{
+
+	panic("pmap_kremove");
+}
+
+/*
+ *	Used to map a range of physical addresses into kernel
+ *	virtual address space.
+ *
+ *	The value passed in '*virt' is a suggested virtual address for
+ *	the mapping. Architectures which can support a direct-mapped
+ *	physical to virtual region can return the appropriate address
+ *	within that region, leaving '*virt' unchanged. Other
+ *	architectures should map the pages starting at '*virt' and
+ *	update '*virt' with the first usable address after the mapped
+ *	region.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
+{
+
+	panic("pmap_map");
+}
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over.  The page *must* be wired.
+ * Note: SMP coherent.  Uses a ranged shootdown IPI.
+ */
+void
+pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
+{
+
+	panic("pmap_map");
+}
+
+/*
+ * This routine tears out page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ * Note: SMP coherent.  Uses a ranged shootdown IPI.
+ */
+void
+pmap_qremove(vm_offset_t sva, int count)
+{
+
+	panic("pmap_qremove");
+}
+
+/***************************************************
+ * Page table page management routines.....
+ ***************************************************/
+
+/*
+ * Initialize the pmap for the swapper process.
+ */
+void
+pmap_pinit0(pmap_t pmap)
+{
+
+	panic("pmap_pinit0");
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+int
+pmap_pinit(pmap_t pmap)
+{
+
+	panic("pmap_pinit");
+}
+
+/***************************************************
+* Pmap allocation/deallocation routines.
+ ***************************************************/
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap_t pmap)
+{
+
+	panic("pmap_release");
+}
+
+/*
+ * grow the number of kernel page table entries, if needed
+ */
+void
+pmap_growkernel(vm_offset_t addr)
+{
+
+	panic("pmap_growkernel");
+}
+
+/***************************************************
+ * page management routines.
+ ***************************************************/
+
+/*
+ *	Remove the given range of addresses from the specified map.
+ *
+ *	It is assumed that the start and end are properly
+ *	rounded to the page size.
+ */
+void
+pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+
+	panic("pmap_remove");
+}
+
+/*
+ *	Routine:	pmap_remove_all
+ *	Function:
+ *		Removes this physical page from
+ *		all physical maps in which it resides.
+ *		Reflects back modify bits to the pager.
+ *
+ *	Notes:
+ *		Original versions of this routine were very
+ *		inefficient because they iteratively called
+ *		pmap_remove (slow...)
+ */
+void
+pmap_remove_all(vm_page_t m)
+{
+
+	panic("pmap_remove_all");
+}
+
+/*
+ *	Set the physical protection on the
+ *	specified range of this map as requested.
+ */
+void
+pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+
+	panic("pmap_protect");
+}
+
+/*
+ *	Insert the given physical page (p) at
+ *	the specified virtual address (v) in the
+ *	target physical map with the protection requested.
+ *
+ *	If specified, the page will be wired down, meaning
+ *	that the related pte can not be reclaimed.
+ *
+ *	NB:  This is the only routine which MAY NOT lazy-evaluate
+ *	or lose information.  That is, this routine must actually
+ *	insert this page into the given map NOW.
+ */
+void
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
+    vm_prot_t prot, boolean_t wired)
+{
+
+	panic("pmap_enter");
+}
+
+/*
+ * Maps a sequence of resident pages belonging to the same object.
+ * The sequence begins with the given page m_start.  This page is
+ * mapped at the given virtual address start.  Each subsequent page is
+ * mapped at a virtual address that is offset from start by the same
+ * amount as the page is offset from m_start within the object.  The
+ * last page in the sequence is the page with the largest offset from
+ * m_start that can be mapped at a virtual address less than the given
+ * virtual address end.  Not every virtual page between start and end
+ * is mapped; only those for which a resident page exists with the
+ * corresponding offset from m_start are mapped.
+ */
+void
+pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
+    vm_page_t m_start, vm_prot_t prot)
+{
+
+	panic("pmap_enter_object");
+}
+
+/*
+ * this code makes some *MAJOR* assumptions:
+ * 1. Current pmap & pmap exists.
+ * 2. Not wired.
+ * 3. Read access.
+ * 4. No page table pages.
+ * but is *MUCH* faster than pmap_enter...
+ */
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
+{
+
+	panic("pmap_enter_quick");
+}
+
+/*
+ * This code maps large physical mmap regions into the
+ * processor address space.  Note that some shortcuts
+ * are taken, but the code works.
+ */
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+    vm_pindex_t pindex, vm_size_t size)
+{
+
+	panic("pmap_object_init_pt");
+}
+
+/*
+ *	Routine:	pmap_change_wiring
+ *	Function:	Change the wiring attribute for a map/virtual-address
+ *			pair.
+ *	In/out conditions:
+ *			The mapping must already exist in the pmap.
+ */
+void
+pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+{
+
+	panic("pmap_change_wiring");
+}
+
+/*
+ *	Copy the range specified by src_addr/len
+ *	from the source map to the range dst_addr/len
+ *	in the destination map.
+ *
+ *	This routine is only advisory and need not do anything.
+ */
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
+    vm_offset_t src_addr)
+{
+
+	panic("pmap_copy");
+}
+
+/*
+ *	pmap_zero_page zeros the specified hardware page by mapping 
+ *	the page into KVM and using bzero to clear its contents.
+ */
+void
+pmap_zero_page(vm_page_t m)
+{
+
+	panic("pmap_zero_page");
+}
+
+/*
+ *	pmap_zero_page_area zeros the specified hardware page by mapping 
+ *	the page into KVM and using bzero to clear its contents.
+ *
+ *	off and size may not cover an area beyond a single hardware page.
+ */
+void
+pmap_zero_page_area(vm_page_t m, int off, int size)
+{
+
+	panic("pmap_zero_page_area");
+}
+
+/*
+ *	pmap_zero_page_idle zeros the specified hardware page by mapping 
+ *	the page into KVM and using bzero to clear its contents.  This
+ *	is intended to be called from the vm_pagezero process only and
+ *	outside of Giant.
+ */
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+
+	panic("pmap_zero_page_idle");
+}
+
+/*
+ *	pmap_copy_page copies the specified (machine independent)
+ *	page by mapping the page into virtual memory and using
+ *	bcopy to copy the page, one machine dependent page at a
+ *	time.
+ */
+void
+pmap_copy_page(vm_page_t src, vm_page_t dst)
+{
+
+	panic("pmap_copy_page");
+}
+
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+    vm_offset_t b_offset, int xfersize)
+{
+
+	panic("pmap_copy_pages");
+}
+
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page.  This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
+boolean_t
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
+{
+
+	panic("pmap_page_exists_quick");
+}
+
+/*
+ *	pmap_page_wired_mappings:
+ *
+ *	Return the number of managed mappings to the given physical page
+ *	that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+
+	panic("pmap_page_wired_mappings");
+}
+
+/*
+ * Remove all pages from specified address space
+ * this aids process exit speeds.  Also, this code
+ * is special cased for current process only, but
+ * can have the more generic (and slightly slower)
+ * mode enabled.  This is much faster than pmap_remove
+ * in the case of running down an entire address space.
+ */
+void
+pmap_remove_pages(pmap_t pmap)
+{
+
+	panic("pmap_remove_pages");
+}
+
+/*
+ *	pmap_is_modified:
+ *
+ *	Return whether or not the specified physical page was modified
+ *	in any physical maps.
+ */
+boolean_t
+pmap_is_modified(vm_page_t m)
+{
+
+	panic("pmap_is_modified");
+}
+
+/*
+ *	pmap_is_prefaultable:
+ *
+ *	Return whether or not the specified virtual address is elgible
+ *	for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+
+	panic("pmap_is_prefaultable");
+}
+
+/*
+ *	pmap_is_referenced:
+ *
+ *	Return whether or not the specified physical page was referenced
+ *	in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+
+	panic("pmap_is_referenced");
+}
+
+/*
+ * Clear the write and modified bits in each of the given page's mappings.
+ */
+void
+pmap_remove_write(vm_page_t m)
+{
+
+	panic("pmap_remove_write");
+}
+
+/*
+ *	pmap_ts_referenced:
+ *
+ *	Return a count of reference bits for a page, clearing those bits.
+ *	It is not necessary for every reference bit to be cleared, but it
+ *	is necessary that 0 only be returned when there are truly no
+ *	reference bits set.
+ *
+ *	XXX: The exact number of bits to check and clear is a matter that
+ *	should be tested and standardized at some point in the future for
+ *	optimal aging of shared pages.
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+
+	panic("pmap_ts_referenced");
+}
+
+/*
+ *	Apply the given advice to the specified range of addresses within the
+ *	given pmap.  Depending on the advice, clear the referenced and/or
+ *	modified flags in each mapping and set the mapped page's dirty field.
+ */
+void
+pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
+{
+
+	panic("pmap_advise");
+}
+
+/*
+ *	Clear the modify bits on the specified physical page.
+ */
+void
+pmap_clear_modify(vm_page_t m)
+{
+
+	panic("pmap_clear_modify");
+}
+
+/*
+ * Sets the memory attribute for the specified page.
+ */
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+
+	panic("pmap_page_set_memattr");
+}
+
+/*
+ * perform the pmap work for mincore
+ */
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
+{
+
+	panic("pmap_mincore");
+}
+
+void
+pmap_activate(struct thread *td)
+{
+
+	panic("pmap_activate");
+}
+
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+
+	panic("pmap_sync_icache");
+}
+
+/*
+ * Increase the starting virtual address of the given mapping if a
+ * different alignment might result in more superpage mappings.
+ */
+void
+pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
+    vm_offset_t *addr, vm_size_t size)
+{
+
+}
+

Added: projects/arm64/sys/conf/Makefile.arm64
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/arm64/sys/conf/Makefile.arm64	Sun Feb 16 10:59:01 2014	(r261969)
@@ -0,0 +1,50 @@
+# Makefile.arm64 -- with config changes.
+# Copyright 1990 W. Jolitz
+#	from: @(#)Makefile.i386	7.1 5/10/91
+#	from FreeBSD: src/sys/conf/Makefile.i386,v 1.255 2002/02/20 23:35:49
+# $FreeBSD$
+#
+# Makefile for FreeBSD
+#
+# This makefile is constructed from a machine description:
+#	config machineid
+# Most changes should be made in the machine description
+#	/sys/arm64/conf/``machineid''
+# after which you should do
+#	 config machineid
+# Generic makefile changes should be made in
+#	/sys/conf/Makefile.arm64
+# after which config should be rerun for all machines.
+#
+
+# Which version of config(8) is required.
+%VERSREQ=	600012
+
+STD8X16FONT?=	iso
+
+.if !defined(S)
+.if exists(./@/.)
+S=	./@
+.else
+S=	../../..
+.endif
+.endif
+.include "$S/conf/kern.pre.mk"
+
+INCLUDES+= -I$S/contrib/libfdt
+
+%BEFORE_DEPEND
+
+%OBJS
+
+%FILES.c
+
+%FILES.s
+
+%FILES.m
+
+%CLEAN
+
+%RULES
+
+.include "$S/conf/kern.post.mk"

Added: projects/arm64/sys/conf/files.arm64
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/arm64/sys/conf/files.arm64	Sun Feb 16 10:59:01 2014	(r261969)
@@ -0,0 +1,31 @@
+
+arm64/arm64/bcopy.c		standard
+arm64/arm64/busdma_machdep.c	standard
+arm64/arm64/clock.c		standard
+arm64/arm64/copyinout.c		standard
+arm64/arm64/copystr.c		standard
+arm64/arm64/dump_machdep.c	standard
+arm64/arm64/elf_machdep.c	standard
+arm64/arm64/exception.c		standard
+arm64/arm64/identcpu.c		standard
+arm64/arm64/in_cksum.c		optional	inet | inet6
+arm64/arm64/locore.S		standard	no-obj
+arm64/arm64/machdep.c		standard
+arm64/arm64/mem.c		standard
+arm64/arm64/pmap.c		standard
+arm64/arm64/stack_machdep.c	standard
+arm64/arm64/support.c		standard
+arm64/arm64/swtch.c		standard
+arm64/arm64/sys_machdep.c	standard
+arm64/arm64/trap.c		standard
+arm64/arm64/uio_machdep.c	standard
+arm64/arm64/vm_machdep.c	standard
+kern/kern_clocksource.c		standard
+kern/subr_dummy_vdso_tc.c	standard
+libkern/bcmp.c			standard
+libkern/ffs.c			standard
+libkern/ffsl.c			standard
+libkern/fls.c			standard
+libkern/flsl.c			standard
+libkern/memmove.c		standard
+libkern/memset.c		standard

Added: projects/arm64/sys/conf/ldscript.arm64
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/arm64/sys/conf/ldscript.arm64	Sun Feb 16 10:59:01 2014	(r261969)
@@ -0,0 +1,148 @@
+/* $FreeBSD$ */
+OUTPUT_ARCH(aarch64)
+ENTRY(_start)
+
+SEARCH_DIR(/usr/lib);
+SECTIONS
+{
+  /* Read-only sections, merged into text segment: */
+  . = kernbase + SIZEOF_HEADERS;
+  .text      :
+  {
+    *(.text)
+    *(.stub)
+    /* .gnu.warning sections are handled specially by elf32.em.  */
+    *(.gnu.warning)
+    *(.gnu.linkonce.t*)
+  } =0x9090
+  _etext = .;
+  PROVIDE (etext = .);
+  .fini      : { *(.fini)    } =0x9090
+  .rodata    : { *(.rodata) *(.gnu.linkonce.r*) }
+  .rodata1   : { *(.rodata1) }
+   .interp     : { *(.interp) 	}
+  .hash          : { *(.hash)		}
+  .dynsym        : { *(.dynsym)		}
+  .dynstr        : { *(.dynstr)		}
+  .gnu.version   : { *(.gnu.version)	}
+  .gnu.version_d   : { *(.gnu.version_d)	}
+  .gnu.version_r   : { *(.gnu.version_r)	}
+  .rel.text      :
+    { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+  .rela.text     :
+    { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+  .rel.data      :
+    { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+  .rela.data     :
+    { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+  .rel.rodata    :
+    { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+  .rela.rodata   :
+    { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+  .rel.got       : { *(.rel.got)		}
+  .rela.got      : { *(.rela.got)		}
+  .rel.ctors     : { *(.rel.ctors)	}
+  .rela.ctors    : { *(.rela.ctors)	}
+  .rel.dtors     : { *(.rel.dtors)	}
+  .rela.dtors    : { *(.rela.dtors)	}
+  .rel.init      : { *(.rel.init)	}
+  .rela.init     : { *(.rela.init)	}
+  .rel.fini      : { *(.rel.fini)	}
+  .rela.fini     : { *(.rela.fini)	}
+  .rel.bss       : { *(.rel.bss)		}
+  .rela.bss      : { *(.rela.bss)		}
+  .rel.plt       : { *(.rel.plt)		}
+  .rela.plt      : { *(.rela.plt)		}
+  .init          : { *(.init)	} =0x9090
+  .plt      : { *(.plt)	}
+
+  . = ALIGN(4);
+  _extab_start = .;
+  PROVIDE(extab_start = .);
+  .ARM.extab : { *(.ARM.extab) }
+  _extab.end = .;
+  PROVIDE(extab_end = .);
+
+  _exidx_start = .;
+  PROVIDE(exidx_start = .);
+  .ARM.exidx : { *(.ARM.exidx) }
+  _exidx_end = .;
+  PROVIDE(exidx_end = .);
+
+  /* Adjust the address for the data segment.  We want to adjust up to
+     the same address within the page on the next page up.  */
+  . = ALIGN(0x1000) + (. & (0x1000 - 1)) ; 
+  .data    :
+  {
+    *(.data)
+    *(.gnu.linkonce.d*)
+    CONSTRUCTORS
+  }
+  .data1   : { *(.data1) }
+  . = ALIGN(32 / 8);
+  _start_ctors = .;
+  PROVIDE (start_ctors = .);
+  .ctors         :
+  {
+    *(.ctors)
+  }
+  _stop_ctors = .;
+  PROVIDE (stop_ctors = .);
+  .dtors         :
+  {
+    *(.dtors)
+  }
+  .got           : { *(.got.plt) *(.got) }
+  .dynamic       : { *(.dynamic) }
+  /* We want the small data sections together, so single-instruction offsets
+     can access them all, and initialized data all before uninitialized, so
+     we can shorten the on-disk segment size.  */
+  .sdata     : { *(.sdata) }
+  _edata  =  .;
+  PROVIDE (edata = .);
+  __bss_start = .;
+  .sbss      : { *(.sbss) *(.scommon) }
+  .bss       :
+  {
+   *(.dynbss)
+   *(.bss)
+   *(COMMON)
+  }
+  . = ALIGN(32 / 8);
+  _end = . ;
+  PROVIDE (end = .);
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+  /* DWARF debug sections.
+     Symbols in the DWARF debugging sections are relative to the beginning
+     of the section so we begin them at 0.  */
+  /* DWARF 1 */
+  .debug          0 : { *(.debug) }
+  .line           0 : { *(.line) }
+  /* GNU DWARF 1 extensions */
+  .debug_srcinfo  0 : { *(.debug_srcinfo) }
+  .debug_sfnames  0 : { *(.debug_sfnames) }
+  /* DWARF 1.1 and DWARF 2 */
+  .debug_aranges  0 : { *(.debug_aranges) }
+  .debug_pubnames 0 : { *(.debug_pubnames) }
+  /* DWARF 2 */
+  .debug_info     0 : { *(.debug_info) }
+  .debug_abbrev   0 : { *(.debug_abbrev) }
+  .debug_line     0 : { *(.debug_line) }
+  .debug_frame    0 : { *(.debug_frame) }
+  .debug_str      0 : { *(.debug_str) }
+  .debug_loc      0 : { *(.debug_loc) }
+  .debug_macinfo  0 : { *(.debug_macinfo) }
+  /* SGI/MIPS DWARF 2 extensions */
+  .debug_weaknames 0 : { *(.debug_weaknames) }
+  .debug_funcnames 0 : { *(.debug_funcnames) }
+  .debug_typenames 0 : { *(.debug_typenames) }
+  .debug_varnames  0 : { *(.debug_varnames) }
+  /* These must appear regardless of  .  */
+}

Added: projects/arm64/sys/conf/options.arm64
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/arm64/sys/conf/options.arm64	Sun Feb 16 10:59:01 2014	(r261969)
@@ -0,0 +1 @@
+ARM64	opt_global.h


More information about the svn-src-projects mailing list