diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c index 92a539afcc8..4dce36d8148 100644 --- a/bfd/elfnn-aarch64.c +++ b/bfd/elfnn-aarch64.c @@ -3315,6 +3315,9 @@ struct elf_aarch64_link_hash_table /* Linker call-backs. */ asection *(*add_stub_section) (const char *, asection *); void (*layout_sections_again) (void); + c64_section_padding_setter_t c64_set_section_padding; + c64_section_padding_getter_t c64_get_section_padding; + c64_pad_after_section_t c64_pad_after_section; /* Array to keep track of which stub sections have been created, and information on stub grouping. */ @@ -3349,6 +3352,11 @@ struct elf_aarch64_link_hash_table bfd_boolean c64_output; htab_t c64_tls_data_stub_hash_table; void * tls_data_stub_memory; + + /* Bookkeeping for padding we add to sections in C64. */ + void **c64_sec_info; + unsigned min_output_section_id; + unsigned max_output_section_id; }; /* Create an entry in an AArch64 ELF linker hash table. */ @@ -4322,6 +4330,12 @@ elfNN_aarch64_setup_section_lists (bfd *output_bfd, struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); + /* XXX: in practice, we believe this branch should never be taken. + This function is only ever called from gld*_after_allocation + (defined in ld/emultempl/aarch64elf.em), which can only ever be + called if the LD emulation is an AArch64 ELF emulation. The branch + can only be taken if the BFD backend is not an ELF backend. In + practice, this isn't supported, see PR29649 for details. */ if (!is_elf_hash_table (htab)) return 0; @@ -5184,25 +5198,86 @@ c64_valid_cap_range (bfd_vma *basep, bfd_vma *limitp, unsigned *alignmentp) return FALSE; } +static bfd_boolean +c64_get_section_padding_info (struct elf_aarch64_link_hash_table *htab, + asection *sec, void ***info) +{ + if (!htab->c64_sec_info) + { + unsigned min_os_id, max_os_id; + min_os_id = max_os_id = sec->id; + asection *iter; + bfd *output_bfd = sec->owner; + for (iter = output_bfd->sections; iter; iter = iter->next) + { + BFD_ASSERT (iter->output_section == iter); + if (iter->id < min_os_id) + min_os_id = iter->id; + if (iter->id > max_os_id) + max_os_id = iter->id; + } + + /* Create a sparse array mapping sections IDs onto cookies. */ + const int num_slots = max_os_id - min_os_id + 1; + htab->min_output_section_id = min_os_id; + htab->max_output_section_id = max_os_id; + const size_t mem_sz = num_slots * sizeof (void *); + htab->c64_sec_info = bfd_zalloc (output_bfd, mem_sz); + if (!htab->c64_sec_info) + { + _bfd_error_handler (_("out of memory allocating padding info")); + bfd_set_error (bfd_error_no_memory); + return FALSE; + } + } + + BFD_ASSERT (sec->id >= htab->min_output_section_id); + BFD_ASSERT (sec->id <= htab->max_output_section_id); + *info = htab->c64_sec_info + (sec->id - htab->min_output_section_id); + return TRUE; +} + /* Check if the bounds of section SEC will get rounded off in the Morello capability format and if it would, adjust the section to ensure any capability spanning this section would have its bounds precise. */ -static inline void +static bfd_boolean ensure_precisely_bounded_section (asection *sec, struct elf_aarch64_link_hash_table *htab, - void (*c64_pad_section) (asection *, bfd_vma)) + bfd_boolean *changed_layout) { bfd_vma low = sec->vma; bfd_vma high = sec->vma + sec->size; unsigned alignment; + void **info; + if (!c64_get_section_padding_info (htab, sec, &info)) + return FALSE; + + bfd_vma old_padding = htab->c64_get_section_padding (*info); + + /* Ignore any existing padding when calculating bounds validity. */ + high -= old_padding; + bfd_boolean did_change = FALSE; if (!c64_valid_cap_range (&low, &high, &alignment)) { - bfd_vma padding = high - low - sec->size; - c64_pad_section (sec, padding); + bfd_vma padding = high - low - (sec->size - old_padding); + if (padding != old_padding) + { + htab->c64_set_section_padding (sec, padding, info); + did_change = TRUE; + } + } + else if (old_padding) + { + /* If the range without the padding is valid, then + we can drop the padding. This may happen e.g. if a stub gets + added that happens to take up the space occupied by the + padding. */ + htab->c64_set_section_padding (sec, 0, info); did_change = TRUE; } + if (sec->alignment_power < alignment) { sec->alignment_power = alignment; @@ -5210,7 +5285,13 @@ ensure_precisely_bounded_section (asection *sec, } if (did_change) - (*htab->layout_sections_again) (); + { + (*htab->layout_sections_again) (); + if (changed_layout) + *changed_layout = TRUE; + } + + return TRUE; } /* Make sure that all capabilities that refer to sections have bounds that @@ -5226,18 +5307,18 @@ ensure_precisely_bounded_section (asection *sec, static bfd_vma pcc_low; static bfd_vma pcc_high; -void -elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info, - void (*c64_pad_section) (asection *, bfd_vma), - void (*layout_sections_again) (void)) +static asection *pcc_low_sec; +static asection *pcc_high_sec; + +static bfd_boolean +c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info, + bfd_boolean *changed_layout) { asection *sec; struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); bfd *input_bfd; unsigned align = 0; - htab->layout_sections_again = layout_sections_again; - /* If this is not a PURECAP binary, and has no C64 code in it, then this is just a stock AArch64 binary and the section padding is not necessary. We can have PURECAP shared libraries that are data-only, so just checking @@ -5245,7 +5326,7 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info, binaries, so just checking for PURECAP is not enough. */ if (!(htab->c64_output || (elf_elfheader (output_bfd)->e_flags & EF_AARCH64_CHERI_PURECAP))) - return; + return TRUE; /* First, walk through all the relocations to find those referring to linker defined and ldscript defined symbols since we set their range to their @@ -5308,8 +5389,25 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info, os = h->root.u.def.section->output_section; - if (h->root.linker_def) - ensure_precisely_bounded_section (os, htab, c64_pad_section); + /* XXX: we may see some ldscript-defined + symbols here whose output section is set to *ABS*. + These symbols should get their output section resolved + to a real output section in ldexp_finalize_syms, but + because we are running earlier in the lang_process + flow (specifically in ldemul_after_allocation here) we + see these symbols as pointing into the *ABS* section. + + For now, we just skip such symbols, but this should be + fixed properly later on. */ + if (os == bfd_abs_section_ptr) + continue; + + if (h->root.linker_def || h->start_stop) + { + if (!ensure_precisely_bounded_section (os, htab, + changed_layout)) + return FALSE; + } else if (h->root.ldscript_def) { const char *name = h->root.root.string; @@ -5323,12 +5421,16 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info, && ((altos = bfd_sections_find_if (info->output_bfd, section_start_symbol, &value)) != NULL)) - ensure_precisely_bounded_section (altos, htab, - c64_pad_section); + if (!ensure_precisely_bounded_section (altos, htab, + changed_layout)) + return FALSE; + /* XXX We're overfitting here because the offset of H within the output section is not yet resolved and ldscript defined symbols do not have input section information. */ - ensure_precisely_bounded_section (os, htab, c64_pad_section); + if (!ensure_precisely_bounded_section (os, htab, + changed_layout)) + return FALSE; } } } @@ -5350,10 +5452,10 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info, While that seems unlikely to happen, having no relocations in a file also seems quite unlikely, so we may as well play it safe. */ if (!htab->c64_output) - return; + return TRUE; bfd_vma low = (bfd_vma) -1, high = 0; - asection *pcc_low_sec = NULL, *pcc_high_sec = NULL; + pcc_low_sec = pcc_high_sec = NULL; for (sec = output_bfd->sections; sec != NULL; sec = sec->next) { /* XXX This is a good place to figure out if there are any readable or @@ -5404,40 +5506,52 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info, different sections with their own alignment requirements can easily change the length of the region we want the PCC to span. Also, that change in length could change the alignment we want. We - don't proove that the alignment requirement converges, but believe + don't prove that the alignment requirement converges, but believe that it should (there is only so much space that existing alignment requirements could trigger to be added -- a section with an alignment requirement of 16 can only really add 15 bytes to the length). */ - bfd_boolean valid_range = FALSE; while (TRUE) { pcc_low_tmp = pcc_low_sec->vma; pcc_high_tmp = pcc_high_sec->vma + pcc_high_sec->size; - valid_range = - c64_valid_cap_range (&pcc_low_tmp, &pcc_high_tmp, &align); + c64_valid_cap_range (&pcc_low_tmp, &pcc_high_tmp, &align); if (pcc_low_sec->alignment_power >= align) break; pcc_low_sec->alignment_power = align; (*htab->layout_sections_again) (); + if (changed_layout) + *changed_layout = TRUE; } - /* We have calculated the bottom and top address that we want in the - above call to c64_valid_cap_range. We have also aligned the lowest - section in the PCC range to where we want it. Just have to add the - padding remaining if needs be. */ - if (!valid_range) - { - BFD_ASSERT (pcc_low_tmp == pcc_low_sec->vma); - bfd_vma current_length = - (pcc_high_sec->vma + pcc_high_sec->size) - pcc_low_sec->vma; - bfd_vma desired_length = (pcc_high_tmp - pcc_low_tmp); - bfd_vma padding = desired_length - current_length; - c64_pad_section (pcc_high_sec, padding); - (*htab->layout_sections_again) (); - } - - pcc_low = pcc_low_sec->vma; - pcc_high = pcc_high_sec->vma + pcc_high_sec->size; + /* We have aligned the base section appropriately, but we may + still (later) need to add padding after pcc_high_sec to + get representable PCC bounds. */ + BFD_ASSERT (pcc_low_tmp == pcc_low_sec->vma); + pcc_low = pcc_low_tmp; + pcc_high = pcc_high_tmp; } + + return TRUE; +} + +bfd_boolean +elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info, + c64_section_padding_setter_t set_padding, + c64_section_padding_getter_t get_padding, + c64_pad_after_section_t pad_after_section, + void (*layout_sections_again) (void)) +{ + struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); + htab->c64_set_section_padding = set_padding; + htab->c64_get_section_padding = get_padding; + htab->c64_pad_after_section = pad_after_section; + htab->layout_sections_again = layout_sections_again; + + /* We assert this here since we require elfNN_aarch64_size_stubs to be + called to complete setting the PCC bounds, see the comment at the + top of elfNN_aarch64_setup_section_lists. */ + BFD_ASSERT (is_elf_hash_table (htab)); + + return c64_resize_sections (output_bfd, info, NULL); } /* Determine and set the size of the stub section for a final link. @@ -5447,45 +5561,14 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info, C64 or vice versa) or in case of unconditional branches (B/BL), is unreachable. */ -bfd_boolean -elfNN_aarch64_size_stubs (bfd *output_bfd, - bfd *stub_bfd, - struct bfd_link_info *info, - bfd_signed_vma group_size, - asection * (*add_stub_section) (const char *, - asection *)) +static bfd_boolean +aarch64_size_stubs (bfd *output_bfd, + struct bfd_link_info *info) { - bfd_size_type stub_group_size; - bfd_boolean stubs_always_before_branch; bfd_boolean stub_changed = FALSE; struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); unsigned int num_erratum_835769_fixes = 0; - /* Propagate mach to stub bfd, because it may not have been - finalized when we created stub_bfd. */ - bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), - bfd_get_mach (output_bfd)); - - /* Stash our params away. */ - htab->stub_bfd = stub_bfd; - htab->add_stub_section = add_stub_section; - stubs_always_before_branch = group_size < 0; - if (group_size < 0) - stub_group_size = -group_size; - else - stub_group_size = group_size; - - if (stub_group_size == 1) - { - /* Default values. */ - /* AArch64 branch range is +-128MB. The value used is 1MB less. */ - stub_group_size = 127 * 1024 * 1024; - } - - group_sections (htab, stub_group_size, stubs_always_before_branch); - - (*htab->layout_sections_again) (); - if (htab->fix_erratum_835769) { bfd *input_bfd; @@ -5832,6 +5915,89 @@ elfNN_aarch64_size_stubs (bfd *output_bfd, return FALSE; } +bfd_boolean +elfNN_aarch64_size_stubs (bfd *output_bfd, + bfd *stub_bfd, + struct bfd_link_info *info, + bfd_signed_vma group_size, + asection * (*add_stub_section) (const char *, + asection *)) +{ + bfd_size_type stub_group_size; + bfd_boolean stubs_always_before_branch; + struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); + + /* Propagate mach to stub bfd, because it may not have been + finalized when we created stub_bfd. */ + bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), + bfd_get_mach (output_bfd)); + + /* Stash our params away. */ + htab->stub_bfd = stub_bfd; + htab->add_stub_section = add_stub_section; + + stubs_always_before_branch = group_size < 0; + if (group_size < 0) + stub_group_size = -group_size; + else + stub_group_size = group_size; + + if (stub_group_size == 1) + { + /* Default values. */ + /* AArch64 branch range is +-128MB. The value used is 1MB less. */ + stub_group_size = 127 * 1024 * 1024; + } + + group_sections (htab, stub_group_size, stubs_always_before_branch); + + (*htab->layout_sections_again) (); + + /* The layout changes we do in aarch64_size_stubs may mean we need to + adjust the PCC base alignment (or individual section bounds) again, + and vice versa. Here we run both in a loop until we no longer make + further adjustments. */ + enum { max_tries = 10 }; + int tries; + for (tries = 0; tries < max_tries; tries++) + { + if (!aarch64_size_stubs (output_bfd, info)) + return FALSE; + + bfd_boolean changed_layout = FALSE; + if (!c64_resize_sections (output_bfd, info, &changed_layout)) + return FALSE; + + if (!changed_layout) + break; + } + + if (tries >= max_tries) + { + _bfd_error_handler (_("looping in elfNN_aarch64_size_stubs")); + abort (); + } + + if (pcc_high_sec) + { + /* Now that we've sized everything up, add any additional + padding *after* the last section needed to get exact + PCC bounds. */ + bfd_vma current_high = pcc_high_sec->vma + pcc_high_sec->size; + bfd_vma desired_high = pcc_high; + BFD_ASSERT (desired_high >= current_high); + bfd_vma padding = desired_high - current_high; + if (padding) + { + htab->c64_pad_after_section (pcc_high_sec, padding); + htab->layout_sections_again (); + } + } + + return TRUE; +} + + /* Build all the stubs associated with the current output file. The stubs are kept in a hash table attached to the main linker hash table. We also set up the .plt entries for statically linked PIC diff --git a/bfd/elfxx-aarch64.h b/bfd/elfxx-aarch64.h index 86b5ed2de37..aa742d51795 100644 --- a/bfd/elfxx-aarch64.h +++ b/bfd/elfxx-aarch64.h @@ -82,9 +82,15 @@ extern bfd_boolean elf64_aarch64_size_stubs (bfd *, bfd *, struct bfd_link_info *, bfd_signed_vma, struct bfd_section * (*) (const char *, struct bfd_section *)); -extern void elf64_c64_resize_sections (bfd *, struct bfd_link_info *, - void (*) (asection *, bfd_vma), - void (*) (void)); +typedef void (*c64_section_padding_setter_t)(asection *, bfd_vma, void **); +typedef bfd_vma (*c64_section_padding_getter_t)(void *); +typedef void (*c64_pad_after_section_t)(asection *, bfd_vma); + +extern bfd_boolean elf64_c64_resize_sections (bfd *, struct bfd_link_info *, + c64_section_padding_setter_t, + c64_section_padding_getter_t, + c64_pad_after_section_t, + void (*) (void)); extern bfd_boolean elf64_aarch64_build_stubs (struct bfd_link_info *); @@ -99,9 +105,11 @@ extern bfd_boolean elf32_aarch64_size_stubs extern bfd_boolean elf32_aarch64_build_stubs (struct bfd_link_info *); -extern void elf32_c64_resize_sections (bfd *, struct bfd_link_info *, - void (*) (asection *, bfd_vma), - void (*) (void)); +extern bfd_boolean elf32_c64_resize_sections (bfd *, struct bfd_link_info *, + c64_section_padding_setter_t, + c64_section_padding_getter_t, + c64_pad_after_section_t, + void (*) (void)); /* Take the PAGE component of an address or offset. */ #define PG(x) ((x) & ~ (bfd_vma) 0xfff) diff --git a/ld/emultempl/aarch64elf.em b/ld/emultempl/aarch64elf.em index df4d33f3ab4..eb227c085f1 100644 --- a/ld/emultempl/aarch64elf.em +++ b/ld/emultempl/aarch64elf.em @@ -205,31 +205,76 @@ elf${ELFSIZE}_aarch64_add_stub_section (const char *stub_sec_name, return NULL; } -/* Insert a pad immediately after OUTPUT_SECTION. */ +/* Insert a pad of PADDING bytes immediately after the output section + OSEC. If *COOKIE is null, it is set to an opaque pointer which can be + re-used to adjust the amount of padding inserted by re-invoking the + function with *COOKIE set to the same pointer. */ static void -elf64_c64_pad_section (asection *osec, bfd_vma padding) +c64_set_section_padding (asection *osec, bfd_vma padding, void **cookie) { - if (padding > 0) + etree_type *pad_exp; + + if (*cookie) + { + pad_exp = *cookie; + if (pad_exp->type.node_code != INT) + abort (); + pad_exp->value.value = padding; + } + else if (padding > 0) { - lang_statement_list_type list; lang_output_section_statement_type *os = lang_output_section_get (osec); - - lang_list_init (&list); - lang_add_assignment_internal (&list, + pad_exp = exp_intop (padding); + *cookie = pad_exp; + lang_add_assignment_internal (&os->children, exp_assign (".", - exp_binop ('+', exp_nameop (NAME, "."), exp_intop (padding)), + exp_binop ('+', exp_nameop (NAME, "."), pad_exp), FALSE)); + } +} - if (list.head == NULL) - { - einfo (_("%X%P: can not make padding section: %E\n")); - return; - } +/* Insert PADDING bytes of padding after the output section OSEC. - *(list.tail) = NULL; - *(os->children.tail) = list.head; - } + Unlike c64_set_section_padding, the padding cannot be adjusted after + it has been applied, and the padding does not affect the size of the + output section itself, but instead just changes where the next output + section starts. */ + +static void +c64_pad_after_section (asection *osec, bfd_vma padding) +{ + if (!padding) + return; + + lang_output_section_statement_type *os = lang_output_section_get (osec); + + etree_type *assign_exp + = exp_assign (".", + exp_binop ('+', + exp_nameop (NAME, "."), + exp_intop (padding)), FALSE); + + lang_statement_list_type dummy; + lang_list_init (&dummy); + lang_add_assignment_internal (&dummy, assign_exp); + lang_statement_union_type *assign = dummy.head; + + lang_statement_union_type *next = os->header.next; + os->header.next = (lang_statement_union_type *)assign; + assign->header.next = next; +} + +static bfd_vma +c64_get_section_padding (void *cookie) +{ + if (!cookie) + return 0; + + etree_type *exp = cookie; + if (exp->type.node_code != INT) + abort (); + return exp->value.value; } /* Another call-back for elf${ELFSIZE}_aarch64_size_stubs. */ @@ -277,9 +322,15 @@ gld${EMULATION_NAME}_after_allocation (void) else if (ret > 0) need_laying_out = 1; - elf${ELFSIZE}_c64_resize_sections (link_info.output_bfd, & link_info, - & elf64_c64_pad_section, - & gldaarch64_layout_sections_again); + if (!elf${ELFSIZE}_c64_resize_sections (link_info.output_bfd, & link_info, + & c64_set_section_padding, + & c64_get_section_padding, + & c64_pad_after_section, + & gldaarch64_layout_sections_again)) + { + einfo (_("%X%P: failed to resize sections for C64: %E\n")); + return; + } /* If generating a relocatable output file, then we don't have to examine the relocs. */ diff --git a/ld/testsuite/ld-aarch64/aarch64-elf.exp b/ld/testsuite/ld-aarch64/aarch64-elf.exp index c48164d9cb3..2c372e24138 100644 --- a/ld/testsuite/ld-aarch64/aarch64-elf.exp +++ b/ld/testsuite/ld-aarch64/aarch64-elf.exp @@ -338,6 +338,8 @@ run_dump_test_lp64 "morello-dt-init-fini" run_dump_test_lp64 "morello-capinit" run_dump_test_lp64 "morello-stubs" run_dump_test_lp64 "morello-stubs-static" +run_dump_test_lp64 "morello-stubs-pcc-bounds" +run_dump_test_lp64 "morello-stubs-sec-bounds" run_dump_test_lp64 "morello-sec-round" run_dump_test_lp64 "morello-sec-round-adjust" run_dump_test_lp64 "morello-sec-always-align" diff --git a/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.d b/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.d index 1aa3f8e202d..142d1a8ad3e 100644 --- a/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.d +++ b/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.d @@ -20,11 +20,9 @@ Sections: Idx Name Size VMA LMA File off Algn .* \.text 00000010 00000000000001a0 00000000000001a0 000001a0 2\*\*5 -#record: LAST_RO_SIZE LAST_RO_VMA +#record: FIRST_RW_VMA #... - +[0-9]+ \.rela\.dyn +([0-9a-f]+) ([0-9a-f]+) .* - + CONTENTS, ALLOC, LOAD, READONLY, DATA - +[0-9]+ \.data .* + +[0-9]+ \.first_rw +[0-9a-f]+ ([0-9a-f]+) .* #... Disassembly of section \.text: @@ -33,7 +31,7 @@ Disassembly of section \.text: #... Disassembly of section \.data: -#check: PCC_SIZE format %08x [expr "0x$LAST_RO_VMA + 0x$LAST_RO_SIZE - 0x1a0"] +#check: PCC_SIZE format %08x [expr "0x$FIRST_RW_VMA - 0x1a0"] 0000000000014000 : [0-9a-f]+: 000001a0 .* 14000: R_MORELLO_RELATIVE \*ABS\* diff --git a/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.ld b/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.ld index 53a94a79d68..3435fe280da 100644 --- a/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.ld +++ b/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.ld @@ -6,6 +6,7 @@ SECTIONS { .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } . = DATA_SEGMENT_RELRO_END (0, .); .othersection ALIGN(0x2000) : { *(.othersection*) } + .first_rw : { *(.first_rw) } .data ALIGN(0x2000) : { *(.data) } . = DATA_SEGMENT_END (.); } diff --git a/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.s b/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.s index 5727022ca9a..833adc54f6a 100644 --- a/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.s +++ b/ld/testsuite/ld-aarch64/morello-pcc-bounds-include-readonly.s @@ -16,3 +16,5 @@ obj: ldr c0, [c0] ldr c0, [c0] +.section .first_rw,"aw" +.xword 42 diff --git a/ld/testsuite/ld-aarch64/morello-sec-round-adjust.d b/ld/testsuite/ld-aarch64/morello-sec-round-adjust.d index f00e6a02128..95e6e35f328 100644 --- a/ld/testsuite/ld-aarch64/morello-sec-round-adjust.d +++ b/ld/testsuite/ld-aarch64/morello-sec-round-adjust.d @@ -13,7 +13,8 @@ Idx Name Size VMA LMA File off Algn CONTENTS, ALLOC, LOAD, DATA 3 \.got\.plt 00000030 0000000000100630 0000000000100630 00100630 2\*\*4 CONTENTS, ALLOC, LOAD, DATA - 4 \.rela\.dyn 000001a0 0000000000100660 0000000000100660 00100660 2\*\*3 - CONTENTS, ALLOC, LOAD, READONLY, DATA + 4 \.tail_rw 00000008 0000000000100800 0000000000100800 00100800 2\*\*0 + CONTENTS, ALLOC, LOAD, DATA + 5 \.rela\.dyn 00000030 0000000000100660 0000000000100660 00100660 2\*\*3 #pass diff --git a/ld/testsuite/ld-aarch64/morello-sec-round-adjust.ld b/ld/testsuite/ld-aarch64/morello-sec-round-adjust.ld index 87613d0f3b3..e700a9e043d 100644 --- a/ld/testsuite/ld-aarch64/morello-sec-round-adjust.ld +++ b/ld/testsuite/ld-aarch64/morello-sec-round-adjust.ld @@ -13,5 +13,6 @@ SECTIONS { .iplt : { *(.iplt) } .data : { *(.data) } .rela.dyn : { *(.rela.dyn) } + .tail_rw : { *(.tail_rw) } .interp : { *(.interp) } } diff --git a/ld/testsuite/ld-aarch64/morello-sec-round-adjust.s b/ld/testsuite/ld-aarch64/morello-sec-round-adjust.s index 09de3cf246a..71f78802f15 100644 --- a/ld/testsuite/ld-aarch64/morello-sec-round-adjust.s +++ b/ld/testsuite/ld-aarch64/morello-sec-round-adjust.s @@ -20,3 +20,6 @@ __start: .capinit __data_rel_ro_startsym .xword 0 .xword 0 + +.section .tail_rw,"aw" +.xword 42 diff --git a/ld/testsuite/ld-aarch64/morello-stubs-pcc-bounds.d b/ld/testsuite/ld-aarch64/morello-stubs-pcc-bounds.d new file mode 100644 index 00000000000..63918065dd0 --- /dev/null +++ b/ld/testsuite/ld-aarch64/morello-stubs-pcc-bounds.d @@ -0,0 +1,32 @@ +# The purpose of this test is to check the interaction between the code +# to ensure exact PCC bounds and the code that inserts stubs. We want to +# make sure we still get tight PCC bounds even when stubs are requried. +# +# For the PCC bounds, we now add padding *after* the end of the last +# section in PCC bounds (instead of padding the last section itself). +# Hence we check that the PCC span ends where the .tail_relro section +# starts, rather than checking where .text_high ends. +#source: morello-stubs-pcc-bounds.s +#as: -march=morello+c64 +#ld: -static -T morello-stubs-pcc-bounds.ld +#objdump: -Dr --section-headers + +.*: file format .* + +Sections: +Idx Name Size VMA LMA File off Algn + 0 \.text_low 00000030 00000000000000c0 00000000000000c0 000000c0 2\*\*5 +#... +[0-9a-f]+ <__baz_a64c64_veneer>: +#... +[0-9a-f]+ <__foo_c64a64_veneer>: +#... +Disassembly of section \.tail_relro: + +#record: TAIL_RELRO_START +([0-9a-f]+) : +#check: PCC_SIZE format %08x [expr "0x$TAIL_RELRO_START - 0xc0"] + 20040: 000000c0 .* + 20044: 00000000 .* + 20048: PCC_SIZE .* + 2004c: 04000000 .* diff --git a/ld/testsuite/ld-aarch64/morello-stubs-pcc-bounds.ld b/ld/testsuite/ld-aarch64/morello-stubs-pcc-bounds.ld new file mode 100644 index 00000000000..4ec7bd9f590 --- /dev/null +++ b/ld/testsuite/ld-aarch64/morello-stubs-pcc-bounds.ld @@ -0,0 +1,7 @@ +SECTIONS { + . = SIZEOF_HEADERS; + .text_low : { *(.text.1) *(.text.2) } + .relocs_mid : { *(.rela.dyn) } + .text_high 0x20000 : { *(.text.3) } + .tail_relro : { *(.data.rel.ro.local) } +} diff --git a/ld/testsuite/ld-aarch64/morello-stubs-pcc-bounds.s b/ld/testsuite/ld-aarch64/morello-stubs-pcc-bounds.s new file mode 100644 index 00000000000..3edb06e87fa --- /dev/null +++ b/ld/testsuite/ld-aarch64/morello-stubs-pcc-bounds.s @@ -0,0 +1,33 @@ +.arch morello +.section .text.1 +.type foo, STT_FUNC +foo: + bl baz + mov c1, c2 + ret + +.arch morello+c64 +baz2: + b baz + ret + +.section .text.3 +.arch morello +baz3: + b foo + ret + +.arch morello+c64 + +.type baz, STT_FUNC +.global baz +baz: + b foo + ret + +.section .data.rel.ro.local,"aw" +.align 4 +.type ptr, %object +.size ptr, 16 +ptr: + .chericap baz diff --git a/ld/testsuite/ld-aarch64/morello-stubs-sec-bounds.d b/ld/testsuite/ld-aarch64/morello-stubs-sec-bounds.d new file mode 100644 index 00000000000..d26fc6cfddc --- /dev/null +++ b/ld/testsuite/ld-aarch64/morello-stubs-sec-bounds.d @@ -0,0 +1,33 @@ +# The purpose of this test is to check the interaction between the code +# to ensure that an individual section gets precise, representable +# bounds and the code that inserts stubs. +# +# In the case of this test, .text_low should get tight bounds, and +# the .tail_relro section should start exactly where the PCC bounds +# end. +#source: morello-stubs-sec-bounds.s +#as: -march=morello+c64 +#ld: -static -T morello-stubs-sec-bounds.ld +#objdump: -Dr --section-headers + +.*: file format .* + +Sections: +Idx Name Size VMA LMA File off Algn + 0 \.text_low 00100200 0000000000000400 0000000000000400 00000400 2\*\*10 +#record: TAIL_RELRO_VMA +#... + 5 \.tail_relro 00000010 ([0-9a-f]+) .* +#... +[0-9a-f]+ <__baz_a64c64_veneer>: +#... +[0-9a-f]+ <__foo_c64a64_veneer>: +#... +Disassembly of section \.tail_relro: + +#check: PCC_SIZE format %08x [expr "0x$TAIL_RELRO_VMA - 0x400"] +[0-9a-f]+ : + [0-9a-f]+: 00000400 .* + [0-9a-f]+: 00000000 .* + [0-9a-f]+: PCC_SIZE .* + [0-9a-f]+: 04000000 .* diff --git a/ld/testsuite/ld-aarch64/morello-stubs-sec-bounds.ld b/ld/testsuite/ld-aarch64/morello-stubs-sec-bounds.ld new file mode 100644 index 00000000000..63fcf0ff830 --- /dev/null +++ b/ld/testsuite/ld-aarch64/morello-stubs-sec-bounds.ld @@ -0,0 +1,14 @@ +SECTIONS { + . = SIZEOF_HEADERS; + .text_low : { + *(.text.1) + text_sym = .; + } + .relocs_mid : { *(.rela.dyn) } + .text_high 0x200000 : { + *(.text.3) + } + .got : { *(.got) } + .got.plt : { *(.got.plt) } + .tail_relro : { *(.data.rel.ro.local) } +} diff --git a/ld/testsuite/ld-aarch64/morello-stubs-sec-bounds.s b/ld/testsuite/ld-aarch64/morello-stubs-sec-bounds.s new file mode 100644 index 00000000000..37dccc5ca0b --- /dev/null +++ b/ld/testsuite/ld-aarch64/morello-stubs-sec-bounds.s @@ -0,0 +1,37 @@ +.arch morello +.section .text.1 +.type foo, STT_FUNC +foo: + bl baz + mov c1, c2 + ret + +.arch morello+c64 +baz2: + b baz + ret + +lots_of_zeros: +.zero 0xffff0 + +.section .text.3 +.arch morello +baz3: + b foo + ret + +.arch morello+c64 + +.type baz, STT_FUNC +.global baz +baz: + adrp c0, :got:text_sym + b foo + ret + +.section .data.rel.ro.local,"aw" +.align 4 +.type ptr, %object +.size ptr, 16 +ptr: + .chericap baz