Skip to content
This repository has been archived by the owner on Jan 24, 2022. It is now read-only.

link.x.in: put most __[se] symbols back into sections #323

Merged
merged 2 commits into from
Apr 15, 2021
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 33 additions & 24 deletions link.x.in
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ SECTIONS
/* ### .text */
.text _stext :
{
__stext = .;
*(.Reset);

*(.text .text.*);
Expand All @@ -96,34 +97,22 @@ SECTIONS
*(.HardFault.*);

. = ALIGN(4); /* Pad .text to the alignment to workaround overlapping load section bug in old lld */
__etext = .;
} > FLASH
. = ALIGN(4); /* Ensure __etext is aligned if something unaligned is inserted after .text */
__etext = .; /* Define outside of .text to allow using INSERT AFTER .text */

/* ### .rodata */
.rodata __etext : ALIGN(4)
.rodata : ALIGN(4)
{
. = ALIGN(4);
__srodata = .;
*(.rodata .rodata.*);

/* 4-byte align the end (VMA) of this section.
This is required by LLD to ensure the LMA of the following .data
section will have the correct alignment. */
. = ALIGN(4);
__erodata = .;
} > FLASH
. = ALIGN(4); /* Ensure __erodata is aligned if something unaligned is inserted after .rodata */
__erodata = .;

/* ### .gnu.sgstubs
This section contains the TrustZone-M veneers put there by the Arm GNU linker. */
. = ALIGN(32); /* Security Attribution Unit blocks must be 32 bytes aligned. */
__veneer_base = ALIGN(4);
.gnu.sgstubs : ALIGN(4)
{
*(.gnu.sgstubs*)
. = ALIGN(4); /* 4-byte align the end (VMA) of this section */
} > FLASH
. = ALIGN(4); /* Ensure __veneer_limit is aligned if something unaligned is inserted after .gnu.sgstubs */
__veneer_limit = .;

/* ## Sections in RAM */
/* ### .data */
Expand All @@ -134,35 +123,55 @@ SECTIONS
*(.data .data.*);
. = ALIGN(4); /* 4-byte align the end (VMA) of this section */
} > RAM AT>FLASH
. = ALIGN(4); /* Ensure __edata is aligned if something unaligned is inserted after .data */
/* Allow sections from user `memory.x` injected using `INSERT AFTER .data` to
* use the .data loading mechanism by pushing __edata. Note: do not change
* output region or load region in those user sections! */
. = ALIGN(4);
__edata = .;

/* LMA of .data */
__sidata = LOADADDR(.data);

/* ### .gnu.sgstubs
This section contains the TrustZone-M veneers put there by the Arm GNU linker. */
/* Security Attribution Unit blocks must be 32 bytes aligned. */
/* Note that this pads the FLASH usage to 32 byte alignment. */
.gnu.sgstubs : ALIGN(32)
{
. = ALIGN(32);
__veneer_base = .;
*(.gnu.sgstubs*)
. = ALIGN(32);
__veneer_limit = .;
} > FLASH

/* ### .bss */
. = ALIGN(4);
__sbss = .; /* Define outside of section to include INSERT BEFORE/AFTER symbols */
.bss (NOLOAD) : ALIGN(4)
{
. = ALIGN(4);
__sbss = .;
*(.bss .bss.*);
*(COMMON); /* Uninitialized C statics */
. = ALIGN(4); /* 4-byte align the end (VMA) of this section */
} > RAM
. = ALIGN(4); /* Ensure __ebss is aligned if something unaligned is inserted after .bss */
/* Allow sections from user `memory.x` injected using `INSERT AFTER .bss` to
* use the .bss zeroing mechanism by pushing __ebss. Note: do not change
* output region or load region in those user sections! */
. = ALIGN(4);
__ebss = .;

/* ### .uninit */
.uninit (NOLOAD) : ALIGN(4)
{
. = ALIGN(4);
__suninit = .;
*(.uninit .uninit.*);
. = ALIGN(4);
__euninit = .;
} > RAM

/* Place the heap right after `.uninit` */
. = ALIGN(4);
__sheap = .;
/* Place the heap right after `.uninit` in RAM */
PROVIDE(__sheap = __euninit);

/* ## .got */
/* Dynamic relocations are unsupported. This section is only used to detect relocatable code in
Expand Down