9 kx /* AArch64-specific support for NN-bit ELF.
9 kx Copyright (C) 2009-2023 Free Software Foundation, Inc.
9 kx Contributed by ARM Ltd.
9 kx
9 kx This file is part of BFD, the Binary File Descriptor library.
9 kx
9 kx This program is free software; you can redistribute it and/or modify
9 kx it under the terms of the GNU General Public License as published by
9 kx the Free Software Foundation; either version 3 of the License, or
9 kx (at your option) any later version.
9 kx
9 kx This program is distributed in the hope that it will be useful,
9 kx but WITHOUT ANY WARRANTY; without even the implied warranty of
9 kx MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 kx GNU General Public License for more details.
9 kx
9 kx You should have received a copy of the GNU General Public License
9 kx along with this program; see the file COPYING3. If not,
9 kx see <http://www.gnu.org/licenses/>. */
9 kx
9 kx /* Notes on implementation:
9 kx
9 kx Thread Local Store (TLS)
9 kx
9 kx Overview:
9 kx
9 kx The implementation currently supports both traditional TLS and TLS
9 kx descriptors, but only general dynamic (GD).
9 kx
9 kx For traditional TLS the assembler will present us with code
9 kx fragments of the form:
9 kx
9 kx adrp x0, :tlsgd:foo
9 kx R_AARCH64_TLSGD_ADR_PAGE21(foo)
9 kx add x0, :tlsgd_lo12:foo
9 kx R_AARCH64_TLSGD_ADD_LO12_NC(foo)
9 kx bl __tls_get_addr
9 kx nop
9 kx
9 kx For TLS descriptors the assembler will present us with code
9 kx fragments of the form:
9 kx
9 kx adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
9 kx ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
9 kx add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
9 kx .tlsdesccall foo
9 kx blr x1 R_AARCH64_TLSDESC_CALL(foo)
9 kx
9 kx The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
9 kx indicate that foo is thread local and should be accessed via the
9 kx traditional TLS mechanims.
9 kx
9 kx The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
9 kx against foo indicate that 'foo' is thread local and should be accessed
9 kx via a TLS descriptor mechanism.
9 kx
9 kx The precise instruction sequence is only relevant from the
9 kx perspective of linker relaxation which is currently not implemented.
9 kx
9 kx The static linker must detect that 'foo' is a TLS object and
9 kx allocate a double GOT entry. The GOT entry must be created for both
9 kx global and local TLS symbols. Note that this is different to none
9 kx TLS local objects which do not need a GOT entry.
9 kx
9 kx In the traditional TLS mechanism, the double GOT entry is used to
9 kx provide the tls_index structure, containing module and offset
9 kx entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
9 kx on the module entry. The loader will subsequently fixup this
9 kx relocation with the module identity.
9 kx
9 kx For global traditional TLS symbols the static linker places an
9 kx R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
9 kx will subsequently fixup the offset. For local TLS symbols the static
9 kx linker fixes up offset.
9 kx
9 kx In the TLS descriptor mechanism the double GOT entry is used to
9 kx provide the descriptor. The static linker places the relocation
9 kx R_AARCH64_TLSDESC on the first GOT slot. The loader will
9 kx subsequently fix this up.
9 kx
9 kx Implementation:
9 kx
9 kx The handling of TLS symbols is implemented across a number of
9 kx different backend functions. The following is a top level view of
9 kx what processing is performed where.
9 kx
9 kx The TLS implementation maintains state information for each TLS
9 kx symbol. The state information for local and global symbols is kept
9 kx in different places. Global symbols use generic BFD structures while
9 kx local symbols use backend specific structures that are allocated and
9 kx maintained entirely by the backend.
9 kx
9 kx The flow:
9 kx
9 kx elfNN_aarch64_check_relocs()
9 kx
9 kx This function is invoked for each relocation.
9 kx
9 kx The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
9 kx R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
9 kx spotted. One time creation of local symbol data structures are
9 kx created when the first local symbol is seen.
9 kx
9 kx The reference count for a symbol is incremented. The GOT type for
9 kx each symbol is marked as general dynamic.
9 kx
9 kx elfNN_aarch64_allocate_dynrelocs ()
9 kx
9 kx For each global with positive reference count we allocate a double
9 kx GOT slot. For a traditional TLS symbol we allocate space for two
9 kx relocation entries on the GOT, for a TLS descriptor symbol we
9 kx allocate space for one relocation on the slot. Record the GOT offset
9 kx for this symbol.
9 kx
9 kx elfNN_aarch64_size_dynamic_sections ()
9 kx
9 kx Iterate all input BFDS, look for in the local symbol data structure
9 kx constructed earlier for local TLS symbols and allocate them double
9 kx GOT slots along with space for a single GOT relocation. Update the
9 kx local symbol structure to record the GOT offset allocated.
9 kx
9 kx elfNN_aarch64_relocate_section ()
9 kx
9 kx Calls elfNN_aarch64_final_link_relocate ()
9 kx
9 kx Emit the relevant TLS relocations against the GOT for each TLS
9 kx symbol. For local TLS symbols emit the GOT offset directly. The GOT
9 kx relocations are emitted once the first time a TLS symbol is
9 kx encountered. The implementation uses the LSB of the GOT offset to
9 kx flag that the relevant GOT relocations for a symbol have been
9 kx emitted. All of the TLS code that uses the GOT offset needs to take
9 kx care to mask out this flag bit before using the offset.
9 kx
9 kx elfNN_aarch64_final_link_relocate ()
9 kx
9 kx Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
9 kx
9 kx #include "sysdep.h"
9 kx #include "bfd.h"
9 kx #include "libiberty.h"
9 kx #include "libbfd.h"
9 kx #include "elf-bfd.h"
9 kx #include "bfdlink.h"
9 kx #include "objalloc.h"
9 kx #include "elf/aarch64.h"
9 kx #include "elfxx-aarch64.h"
9 kx #include "cpu-aarch64.h"
9 kx
9 kx #define ARCH_SIZE NN
9 kx
9 kx #if ARCH_SIZE == 64
9 kx #define AARCH64_R(NAME) R_AARCH64_ ## NAME
9 kx #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
9 kx #define HOWTO64(...) HOWTO (__VA_ARGS__)
9 kx #define HOWTO32(...) EMPTY_HOWTO (0)
9 kx #define LOG_FILE_ALIGN 3
9 kx #define BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC BFD_RELOC_AARCH64_TLSDESC_LD64_LO12
9 kx #endif
9 kx
9 kx #if ARCH_SIZE == 32
9 kx #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
9 kx #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
9 kx #define HOWTO64(...) EMPTY_HOWTO (0)
9 kx #define HOWTO32(...) HOWTO (__VA_ARGS__)
9 kx #define LOG_FILE_ALIGN 2
9 kx #define BFD_RELOC_AARCH64_TLSDESC_LD32_LO12 BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9 kx #define R_AARCH64_P32_TLSDESC_ADD_LO12 R_AARCH64_P32_TLSDESC_ADD_LO12_NC
9 kx #endif
9 kx
9 kx #define IS_AARCH64_TLS_RELOC(R_TYPE) \
9 kx ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
9 kx || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
9 kx
9 kx #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \
9 kx ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
9 kx
9 kx #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
9 kx ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
9 kx || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
9 kx
9 kx #define ELIMINATE_COPY_RELOCS 1
9 kx
9 kx /* Return size of a relocation entry. HTAB is the bfd's
9 kx elf_aarch64_link_hash_entry. */
9 kx #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
9 kx
9 kx /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
9 kx #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
9 kx #define PLT_ENTRY_SIZE (32)
9 kx #define PLT_SMALL_ENTRY_SIZE (16)
9 kx #define PLT_TLSDESC_ENTRY_SIZE (32)
9 kx /* PLT sizes with BTI insn. */
9 kx #define PLT_BTI_SMALL_ENTRY_SIZE (24)
9 kx /* PLT sizes with PAC insn. */
9 kx #define PLT_PAC_SMALL_ENTRY_SIZE (24)
9 kx /* PLT sizes with BTI and PAC insn. */
9 kx #define PLT_BTI_PAC_SMALL_ENTRY_SIZE (24)
9 kx
9 kx /* Encoding of the nop instruction. */
9 kx #define INSN_NOP 0xd503201f
9 kx
9 kx #define aarch64_compute_jump_table_size(htab) \
9 kx (((htab)->root.srelplt == NULL) ? 0 \
9 kx : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
9 kx
9 kx /* The first entry in a procedure linkage table looks like this
9 kx if the distance between the PLTGOT and the PLT is < 4GB use
9 kx these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
9 kx in x16 and needs to work out PLTGOT[1] by using an address of
9 kx [x16,#-GOT_ENTRY_SIZE]. */
9 kx static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
9 kx {
9 kx 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
9 kx 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
9 kx #if ARCH_SIZE == 64
9 kx 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
9 kx 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
9 kx #else
9 kx 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
9 kx 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
9 kx #endif
9 kx 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
9 kx 0x1f, 0x20, 0x03, 0xd5, /* nop */
9 kx 0x1f, 0x20, 0x03, 0xd5, /* nop */
9 kx 0x1f, 0x20, 0x03, 0xd5, /* nop */
9 kx };
9 kx
9 kx static const bfd_byte elfNN_aarch64_small_plt0_bti_entry[PLT_ENTRY_SIZE] =
9 kx {
9 kx 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
9 kx 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
9 kx 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
9 kx #if ARCH_SIZE == 64
9 kx 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
9 kx 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
9 kx #else
9 kx 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
9 kx 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
9 kx #endif
9 kx 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
9 kx 0x1f, 0x20, 0x03, 0xd5, /* nop */
9 kx 0x1f, 0x20, 0x03, 0xd5, /* nop */
9 kx };
9 kx
9 kx /* Per function entry in a procedure linkage table looks like this
9 kx if the distance between the PLTGOT and the PLT is < 4GB use
9 kx these PLT entries. Use BTI versions of the PLTs when enabled. */
9 kx static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
9 kx {
9 kx 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
9 kx #if ARCH_SIZE == 64
9 kx 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
9 kx 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
9 kx #else
9 kx 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
9 kx 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
9 kx #endif
9 kx 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
9 kx };
9 kx
9 kx static const bfd_byte
9 kx elfNN_aarch64_small_plt_bti_entry[PLT_BTI_SMALL_ENTRY_SIZE] =
9 kx {
9 kx 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
9 kx 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
9 kx #if ARCH_SIZE == 64
9 kx 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
9 kx 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
9 kx #else
9 kx 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
9 kx 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
9 kx #endif
9 kx 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
9 kx 0x1f, 0x20, 0x03, 0xd5, /* nop */
9 kx };
9 kx
9 kx static const bfd_byte
9 kx elfNN_aarch64_small_plt_pac_entry[PLT_PAC_SMALL_ENTRY_SIZE] =
9 kx {
9 kx 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
9 kx #if ARCH_SIZE == 64
9 kx 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
9 kx 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
9 kx #else
9 kx 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
9 kx 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
9 kx #endif
9 kx 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
9 kx 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
9 kx 0x1f, 0x20, 0x03, 0xd5, /* nop */
9 kx };
9 kx
9 kx static const bfd_byte
9 kx elfNN_aarch64_small_plt_bti_pac_entry[PLT_BTI_PAC_SMALL_ENTRY_SIZE] =
9 kx {
9 kx 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
9 kx 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
9 kx #if ARCH_SIZE == 64
9 kx 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
9 kx 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
9 kx #else
9 kx 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
9 kx 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
9 kx #endif
9 kx 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
9 kx 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
9 kx };
9 kx
9 kx static const bfd_byte
9 kx elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
9 kx {
9 kx 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
9 kx 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
9 kx 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
9 kx #if ARCH_SIZE == 64
9 kx 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
9 kx 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
9 kx #else
9 kx 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
9 kx 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
9 kx #endif
9 kx 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
9 kx 0x1f, 0x20, 0x03, 0xd5, /* nop */
9 kx 0x1f, 0x20, 0x03, 0xd5, /* nop */
9 kx };
9 kx
9 kx static const bfd_byte
9 kx elfNN_aarch64_tlsdesc_small_plt_bti_entry[PLT_TLSDESC_ENTRY_SIZE] =
9 kx {
9 kx 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
9 kx 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
9 kx 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
9 kx 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
9 kx #if ARCH_SIZE == 64
9 kx 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
9 kx 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
9 kx #else
9 kx 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
9 kx 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
9 kx #endif
9 kx 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
9 kx 0x1f, 0x20, 0x03, 0xd5, /* nop */
9 kx };
9 kx
9 kx #define elf_info_to_howto elfNN_aarch64_info_to_howto
9 kx #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
9 kx
9 kx #define AARCH64_ELF_ABI_VERSION 0
9 kx
9 kx /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
9 kx #define ALL_ONES (~ (bfd_vma) 0)
9 kx
9 kx /* Indexed by the bfd interal reloc enumerators.
9 kx Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
9 kx in reloc.c. */
9 kx
9 kx static reloc_howto_type elfNN_aarch64_howto_table[] =
9 kx {
9 kx EMPTY_HOWTO (0),
9 kx
9 kx /* Basic data relocations. */
9 kx
9 kx /* Deprecated, but retained for backwards compatibility. */
9 kx HOWTO64 (R_AARCH64_NULL, /* type */
9 kx 0, /* rightshift */
9 kx 0, /* size */
9 kx 0, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx "R_AARCH64_NULL", /* name */
9 kx false, /* partial_inplace */
9 kx 0, /* src_mask */
9 kx 0, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx HOWTO (R_AARCH64_NONE, /* type */
9 kx 0, /* rightshift */
9 kx 0, /* size */
9 kx 0, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx "R_AARCH64_NONE", /* name */
9 kx false, /* partial_inplace */
9 kx 0, /* src_mask */
9 kx 0, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* .xword: (S+A) */
9 kx HOWTO64 (AARCH64_R (ABS64), /* type */
9 kx 0, /* rightshift */
9 kx 8, /* size */
9 kx 64, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (ABS64), /* name */
9 kx false, /* partial_inplace */
9 kx ALL_ONES, /* src_mask */
9 kx ALL_ONES, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* .word: (S+A) */
9 kx HOWTO (AARCH64_R (ABS32), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 32, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (ABS32), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffffffff, /* src_mask */
9 kx 0xffffffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* .half: (S+A) */
9 kx HOWTO (AARCH64_R (ABS16), /* type */
9 kx 0, /* rightshift */
9 kx 2, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (ABS16), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* .xword: (S+A-P) */
9 kx HOWTO64 (AARCH64_R (PREL64), /* type */
9 kx 0, /* rightshift */
9 kx 8, /* size */
9 kx 64, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (PREL64), /* name */
9 kx false, /* partial_inplace */
9 kx ALL_ONES, /* src_mask */
9 kx ALL_ONES, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* .word: (S+A-P) */
9 kx HOWTO (AARCH64_R (PREL32), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 32, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (PREL32), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffffffff, /* src_mask */
9 kx 0xffffffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* .half: (S+A-P) */
9 kx HOWTO (AARCH64_R (PREL16), /* type */
9 kx 0, /* rightshift */
9 kx 2, /* size */
9 kx 16, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (PREL16), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* Group relocations to create a 16, 32, 48 or 64 bit
9 kx unsigned data or abs address inline. */
9 kx
9 kx /* MOVZ: ((S+A) >> 0) & 0xffff */
9 kx HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_UABS_G0), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
9 kx HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOVZ: ((S+A) >> 16) & 0xffff */
9 kx HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_UABS_G1), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
9 kx HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOVZ: ((S+A) >> 32) & 0xffff */
9 kx HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
9 kx 32, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_UABS_G2), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
9 kx HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
9 kx 32, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOVZ: ((S+A) >> 48) & 0xffff */
9 kx HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
9 kx 48, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_UABS_G3), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Group relocations to create high part of a 16, 32, 48 or 64 bit
9 kx signed data or abs address inline. Will change instruction
9 kx to MOVN or MOVZ depending on sign of calculated value. */
9 kx
9 kx /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
9 kx HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 17, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_SABS_G0), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
9 kx HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 17, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_SABS_G1), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
9 kx HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
9 kx 32, /* rightshift */
9 kx 4, /* size */
9 kx 17, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_SABS_G2), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Group relocations to create a 16, 32, 48 or 64 bit
9 kx PC relative address inline. */
9 kx
9 kx /* MOV[NZ]: ((S+A-P) >> 0) & 0xffff */
9 kx HOWTO (AARCH64_R (MOVW_PREL_G0), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 17, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_PREL_G0), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* MOVK: ((S+A-P) >> 0) & 0xffff [no overflow check] */
9 kx HOWTO (AARCH64_R (MOVW_PREL_G0_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_PREL_G0_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* MOV[NZ]: ((S+A-P) >> 16) & 0xffff */
9 kx HOWTO (AARCH64_R (MOVW_PREL_G1), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 17, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_PREL_G1), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* MOVK: ((S+A-P) >> 16) & 0xffff [no overflow check] */
9 kx HOWTO64 (AARCH64_R (MOVW_PREL_G1_NC), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_PREL_G1_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* MOV[NZ]: ((S+A-P) >> 32) & 0xffff */
9 kx HOWTO64 (AARCH64_R (MOVW_PREL_G2), /* type */
9 kx 32, /* rightshift */
9 kx 4, /* size */
9 kx 17, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_PREL_G2), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* MOVK: ((S+A-P) >> 32) & 0xffff [no overflow check] */
9 kx HOWTO64 (AARCH64_R (MOVW_PREL_G2_NC), /* type */
9 kx 32, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_PREL_G2_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* MOV[NZ]: ((S+A-P) >> 48) & 0xffff */
9 kx HOWTO64 (AARCH64_R (MOVW_PREL_G3), /* type */
9 kx 48, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_PREL_G3), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
9 kx addresses: PG(x) is (x & ~0xfff). */
9 kx
9 kx /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
9 kx HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 19, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LD_PREL_LO19), /* name */
9 kx false, /* partial_inplace */
9 kx 0x7ffff, /* src_mask */
9 kx 0x7ffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* ADR: (S+A-P) & 0x1fffff */
9 kx HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (ADR_PREL_LO21), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9 kx HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
9 kx 12, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
9 kx HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
9 kx 12, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* ADD: (S+A) & 0xfff [no overflow check] */
9 kx HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0x3ffc00, /* src_mask */
9 kx 0x3ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST8: (S+A) & 0xfff */
9 kx HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xfff, /* src_mask */
9 kx 0xfff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Relocations for control-flow instructions. */
9 kx
9 kx /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
9 kx HOWTO (AARCH64_R (TSTBR14), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 14, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TSTBR14), /* name */
9 kx false, /* partial_inplace */
9 kx 0x3fff, /* src_mask */
9 kx 0x3fff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
9 kx HOWTO (AARCH64_R (CONDBR19), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 19, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (CONDBR19), /* name */
9 kx false, /* partial_inplace */
9 kx 0x7ffff, /* src_mask */
9 kx 0x7ffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* B: ((S+A-P) >> 2) & 0x3ffffff */
9 kx HOWTO (AARCH64_R (JUMP26), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 26, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (JUMP26), /* name */
9 kx false, /* partial_inplace */
9 kx 0x3ffffff, /* src_mask */
9 kx 0x3ffffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* BL: ((S+A-P) >> 2) & 0x3ffffff */
9 kx HOWTO (AARCH64_R (CALL26), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 26, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (CALL26), /* name */
9 kx false, /* partial_inplace */
9 kx 0x3ffffff, /* src_mask */
9 kx 0x3ffffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* LD/ST16: (S+A) & 0xffe */
9 kx HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
9 kx 1, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffe, /* src_mask */
9 kx 0xffe, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST32: (S+A) & 0xffc */
9 kx HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffc, /* src_mask */
9 kx 0xffc, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST64: (S+A) & 0xff8 */
9 kx HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
9 kx 3, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xff8, /* src_mask */
9 kx 0xff8, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST128: (S+A) & 0xff0 */
9 kx HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
9 kx 4, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xff0, /* src_mask */
9 kx 0xff0, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Set a load-literal immediate field to bits
9 kx 0x1FFFFC of G(S)-P */
9 kx HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 19, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (GOT_LD_PREL19), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffffe0, /* src_mask */
9 kx 0xffffe0, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* Get to the page for the GOT entry for the symbol
9 kx (G(S) - P) using an ADRP instruction. */
9 kx HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
9 kx 12, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (ADR_GOT_PAGE), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* LD64: GOT offset G(S) & 0xff8 */
9 kx HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
9 kx 3, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xff8, /* src_mask */
9 kx 0xff8, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD32: GOT offset G(S) & 0xffc */
9 kx HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffc, /* src_mask */
9 kx 0xffc, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Lower 16 bits of GOT offset for the symbol. */
9 kx HOWTO64 (AARCH64_R (MOVW_GOTOFF_G0_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_GOTOFF_G0_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Higher 16 bits of GOT offset for the symbol. */
9 kx HOWTO64 (AARCH64_R (MOVW_GOTOFF_G1), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (MOVW_GOTOFF_G1), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD64: GOT offset for the symbol. */
9 kx HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
9 kx 3, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
9 kx false, /* partial_inplace */
9 kx 0x7ff8, /* src_mask */
9 kx 0x7ff8, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD32: GOT offset to the page address of GOT table.
9 kx (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
9 kx HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
9 kx false, /* partial_inplace */
9 kx 0x5ffc, /* src_mask */
9 kx 0x5ffc, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD64: GOT offset to the page address of GOT table.
9 kx (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
9 kx HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
9 kx 3, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
9 kx false, /* partial_inplace */
9 kx 0x7ff8, /* src_mask */
9 kx 0x7ff8, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Get to the page for the GOT entry for the symbol
9 kx (G(S) - P) using an ADRP instruction. */
9 kx HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
9 kx 12, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
9 kx HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xfff, /* src_mask */
9 kx 0xfff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Lower 16 bits of GOT offset to tls_index. */
9 kx HOWTO64 (AARCH64_R (TLSGD_MOVW_G0_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSGD_MOVW_G0_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Higher 16 bits of GOT offset to tls_index. */
9 kx HOWTO64 (AARCH64_R (TLSGD_MOVW_G1), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSGD_MOVW_G1), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
9 kx 12, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
9 kx 3, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xff8, /* src_mask */
9 kx 0xff8, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffc, /* src_mask */
9 kx 0xffc, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 19, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1ffffc, /* src_mask */
9 kx 0x1ffffc, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* ADD: bit[23:12] of byte offset to module TLS base address. */
9 kx HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_HI12), /* type */
9 kx 12, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_ADD_DTPREL_HI12), /* name */
9 kx false, /* partial_inplace */
9 kx 0xfff, /* src_mask */
9 kx 0xfff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Unsigned 12 bit byte offset to module TLS base address. */
9 kx HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0xfff, /* src_mask */
9 kx 0xfff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12. */
9 kx HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xfff, /* src_mask */
9 kx 0xfff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
9 kx HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xfff, /* src_mask */
9 kx 0xfff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Get to the page for the GOT entry for the symbol
9 kx (G(S) - P) using an ADRP instruction. */
9 kx HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
9 kx 12, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_signed, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
9 kx HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12), /* type */
9 kx 1, /* rightshift */
9 kx 4, /* size */
9 kx 11, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1ffc00, /* src_mask */
9 kx 0x1ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Same as BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, but no overflow check. */
9 kx HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12_NC), /* type */
9 kx 1, /* rightshift */
9 kx 4, /* size */
9 kx 11, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1ffc00, /* src_mask */
9 kx 0x1ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
9 kx HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 10, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0x3ffc00, /* src_mask */
9 kx 0x3ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Same as BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, but no overflow check. */
9 kx HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12_NC), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 10, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffc00, /* src_mask */
9 kx 0xffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
9 kx HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12), /* type */
9 kx 3, /* rightshift */
9 kx 4, /* size */
9 kx 9, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0x3ffc00, /* src_mask */
9 kx 0x3ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Same as BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, but no overflow check. */
9 kx HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12_NC), /* type */
9 kx 3, /* rightshift */
9 kx 4, /* size */
9 kx 9, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0x7fc00, /* src_mask */
9 kx 0x7fc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
9 kx HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0x3ffc00, /* src_mask */
9 kx 0x3ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Same as BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, but no overflow check. */
9 kx HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0x3ffc00, /* src_mask */
9 kx 0x3ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOVZ: bit[15:0] of byte offset to module TLS base address. */
9 kx HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
9 kx HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOVZ: bit[31:16] of byte offset to module TLS base address. */
9 kx HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G1), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
9 kx HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G1_NC), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* MOVZ: bit[47:32] of byte offset to module TLS base address. */
9 kx HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G2), /* type */
9 kx 32, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLD_MOVW_DTPREL_G2), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
9 kx 32, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 16, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
9 kx 12, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
9 kx false, /* partial_inplace */
9 kx 0xfff, /* src_mask */
9 kx 0xfff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0xfff, /* src_mask */
9 kx 0xfff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xfff, /* src_mask */
9 kx 0xfff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
9 kx HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12), /* type */
9 kx 1, /* rightshift */
9 kx 4, /* size */
9 kx 11, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1ffc00, /* src_mask */
9 kx 0x1ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Same as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. */
9 kx HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12_NC), /* type */
9 kx 1, /* rightshift */
9 kx 4, /* size */
9 kx 11, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1ffc00, /* src_mask */
9 kx 0x1ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
9 kx HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 10, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffc00, /* src_mask */
9 kx 0xffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Same as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. */
9 kx HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12_NC), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 10, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffc00, /* src_mask */
9 kx 0xffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
9 kx HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12), /* type */
9 kx 3, /* rightshift */
9 kx 4, /* size */
9 kx 9, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0x7fc00, /* src_mask */
9 kx 0x7fc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Same as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. */
9 kx HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12_NC), /* type */
9 kx 3, /* rightshift */
9 kx 4, /* size */
9 kx 9, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0x7fc00, /* src_mask */
9 kx 0x7fc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
9 kx HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0x3ffc00, /* src_mask */
9 kx 0x3ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* Same as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. */
9 kx HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 10, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0x3ffc00, /* src_mask */
9 kx 0x3ffc00, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 19, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
9 kx false, /* partial_inplace */
9 kx 0x0ffffe0, /* src_mask */
9 kx 0x0ffffe0, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* Get to the page for the GOT entry for the symbol
9 kx (G(S) - P) using an ADRP instruction. */
9 kx HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
9 kx 12, /* rightshift */
9 kx 4, /* size */
9 kx 21, /* bitsize */
9 kx true, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
9 kx false, /* partial_inplace */
9 kx 0x1fffff, /* src_mask */
9 kx 0x1fffff, /* dst_mask */
9 kx true), /* pcrel_offset */
9 kx
9 kx /* LD64: GOT offset G(S) & 0xff8. */
9 kx HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12), /* type */
9 kx 3, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_LD64_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0xff8, /* src_mask */
9 kx 0xff8, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* LD32: GOT offset G(S) & 0xffc. */
9 kx HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
9 kx 2, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffc, /* src_mask */
9 kx 0xffc, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx /* ADD: GOT offset G(S) & 0xfff. */
9 kx HOWTO (AARCH64_R (TLSDESC_ADD_LO12), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont,/* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_ADD_LO12), /* name */
9 kx false, /* partial_inplace */
9 kx 0xfff, /* src_mask */
9 kx 0xfff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
9 kx 16, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_unsigned, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
9 kx false, /* partial_inplace */
9 kx 0xffff, /* src_mask */
9 kx 0xffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_LDR), /* name */
9 kx false, /* partial_inplace */
9 kx 0x0, /* src_mask */
9 kx 0x0, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 12, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_ADD), /* name */
9 kx false, /* partial_inplace */
9 kx 0x0, /* src_mask */
9 kx 0x0, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 0, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC_CALL), /* name */
9 kx false, /* partial_inplace */
9 kx 0x0, /* src_mask */
9 kx 0x0, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (COPY), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 64, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_bitfield, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (COPY), /* name */
9 kx true, /* partial_inplace */
9 kx 0xffffffff, /* src_mask */
9 kx 0xffffffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (GLOB_DAT), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 64, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_bitfield, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (GLOB_DAT), /* name */
9 kx true, /* partial_inplace */
9 kx 0xffffffff, /* src_mask */
9 kx 0xffffffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (JUMP_SLOT), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 64, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_bitfield, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (JUMP_SLOT), /* name */
9 kx true, /* partial_inplace */
9 kx 0xffffffff, /* src_mask */
9 kx 0xffffffff, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (RELATIVE), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 64, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_bitfield, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (RELATIVE), /* name */
9 kx true, /* partial_inplace */
9 kx ALL_ONES, /* src_mask */
9 kx ALL_ONES, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 64, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx #if ARCH_SIZE == 64
9 kx AARCH64_R_STR (TLS_DTPMOD64), /* name */
9 kx #else
9 kx AARCH64_R_STR (TLS_DTPMOD), /* name */
9 kx #endif
9 kx false, /* partial_inplace */
9 kx 0, /* src_mask */
9 kx ALL_ONES, /* dst_mask */
9 kx false), /* pc_reloffset */
9 kx
9 kx HOWTO (AARCH64_R (TLS_DTPREL), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 64, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx #if ARCH_SIZE == 64
9 kx AARCH64_R_STR (TLS_DTPREL64), /* name */
9 kx #else
9 kx AARCH64_R_STR (TLS_DTPREL), /* name */
9 kx #endif
9 kx false, /* partial_inplace */
9 kx 0, /* src_mask */
9 kx ALL_ONES, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLS_TPREL), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 64, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx #if ARCH_SIZE == 64
9 kx AARCH64_R_STR (TLS_TPREL64), /* name */
9 kx #else
9 kx AARCH64_R_STR (TLS_TPREL), /* name */
9 kx #endif
9 kx false, /* partial_inplace */
9 kx 0, /* src_mask */
9 kx ALL_ONES, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (TLSDESC), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 64, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (TLSDESC), /* name */
9 kx false, /* partial_inplace */
9 kx 0, /* src_mask */
9 kx ALL_ONES, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx HOWTO (AARCH64_R (IRELATIVE), /* type */
9 kx 0, /* rightshift */
9 kx 4, /* size */
9 kx 64, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_bitfield, /* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx AARCH64_R_STR (IRELATIVE), /* name */
9 kx false, /* partial_inplace */
9 kx 0, /* src_mask */
9 kx ALL_ONES, /* dst_mask */
9 kx false), /* pcrel_offset */
9 kx
9 kx EMPTY_HOWTO (0),
9 kx };
9 kx
9 kx static reloc_howto_type elfNN_aarch64_howto_none =
9 kx HOWTO (R_AARCH64_NONE, /* type */
9 kx 0, /* rightshift */
9 kx 0, /* size */
9 kx 0, /* bitsize */
9 kx false, /* pc_relative */
9 kx 0, /* bitpos */
9 kx complain_overflow_dont,/* complain_on_overflow */
9 kx bfd_elf_generic_reloc, /* special_function */
9 kx "R_AARCH64_NONE", /* name */
9 kx false, /* partial_inplace */
9 kx 0, /* src_mask */
9 kx 0, /* dst_mask */
9 kx false); /* pcrel_offset */
9 kx
9 kx /* Given HOWTO, return the bfd internal relocation enumerator. */
9 kx
9 kx static bfd_reloc_code_real_type
9 kx elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
9 kx {
9 kx const int size
9 kx = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
9 kx const ptrdiff_t offset
9 kx = howto - elfNN_aarch64_howto_table;
9 kx
9 kx if (offset > 0 && offset < size - 1)
9 kx return BFD_RELOC_AARCH64_RELOC_START + offset;
9 kx
9 kx if (howto == &elfNN_aarch64_howto_none)
9 kx return BFD_RELOC_AARCH64_NONE;
9 kx
9 kx return BFD_RELOC_AARCH64_RELOC_START;
9 kx }
9 kx
9 kx /* Given R_TYPE, return the bfd internal relocation enumerator. */
9 kx
9 kx static bfd_reloc_code_real_type
9 kx elfNN_aarch64_bfd_reloc_from_type (bfd *abfd, unsigned int r_type)
9 kx {
9 kx static bool initialized_p = false;
9 kx /* Indexed by R_TYPE, values are offsets in the howto_table. */
9 kx static unsigned int offsets[R_AARCH64_end];
9 kx
9 kx if (!initialized_p)
9 kx {
9 kx unsigned int i;
9 kx
9 kx for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
9 kx if (elfNN_aarch64_howto_table[i].type != 0)
9 kx offsets[elfNN_aarch64_howto_table[i].type] = i;
9 kx
9 kx initialized_p = true;
9 kx }
9 kx
9 kx if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
9 kx return BFD_RELOC_AARCH64_NONE;
9 kx
9 kx /* PR 17512: file: b371e70a. */
9 kx if (r_type >= R_AARCH64_end)
9 kx {
9 kx _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
9 kx abfd, r_type);
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return BFD_RELOC_AARCH64_NONE;
9 kx }
9 kx
9 kx return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
9 kx }
9 kx
9 kx struct elf_aarch64_reloc_map
9 kx {
9 kx bfd_reloc_code_real_type from;
9 kx bfd_reloc_code_real_type to;
9 kx };
9 kx
9 kx /* Map bfd generic reloc to AArch64-specific reloc. */
9 kx static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
9 kx {
9 kx {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
9 kx
9 kx /* Basic data relocations. */
9 kx {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
9 kx {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
9 kx {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
9 kx {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
9 kx {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
9 kx {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
9 kx {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
9 kx };
9 kx
9 kx /* Given the bfd internal relocation enumerator in CODE, return the
9 kx corresponding howto entry. */
9 kx
9 kx static reloc_howto_type *
9 kx elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
9 kx {
9 kx unsigned int i;
9 kx
9 kx /* Convert bfd generic reloc to AArch64-specific reloc. */
9 kx if (code < BFD_RELOC_AARCH64_RELOC_START
9 kx || code > BFD_RELOC_AARCH64_RELOC_END)
9 kx for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
9 kx if (elf_aarch64_reloc_map[i].from == code)
9 kx {
9 kx code = elf_aarch64_reloc_map[i].to;
9 kx break;
9 kx }
9 kx
9 kx if (code > BFD_RELOC_AARCH64_RELOC_START
9 kx && code < BFD_RELOC_AARCH64_RELOC_END)
9 kx if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
9 kx return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
9 kx
9 kx if (code == BFD_RELOC_AARCH64_NONE)
9 kx return &elfNN_aarch64_howto_none;
9 kx
9 kx return NULL;
9 kx }
9 kx
9 kx static reloc_howto_type *
9 kx elfNN_aarch64_howto_from_type (bfd *abfd, unsigned int r_type)
9 kx {
9 kx bfd_reloc_code_real_type val;
9 kx reloc_howto_type *howto;
9 kx
9 kx #if ARCH_SIZE == 32
9 kx if (r_type > 256)
9 kx {
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return NULL;
9 kx }
9 kx #endif
9 kx
9 kx if (r_type == R_AARCH64_NONE)
9 kx return &elfNN_aarch64_howto_none;
9 kx
9 kx val = elfNN_aarch64_bfd_reloc_from_type (abfd, r_type);
9 kx howto = elfNN_aarch64_howto_from_bfd_reloc (val);
9 kx
9 kx if (howto != NULL)
9 kx return howto;
9 kx
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return NULL;
9 kx }
9 kx
9 kx static bool
9 kx elfNN_aarch64_info_to_howto (bfd *abfd, arelent *bfd_reloc,
9 kx Elf_Internal_Rela *elf_reloc)
9 kx {
9 kx unsigned int r_type;
9 kx
9 kx r_type = ELFNN_R_TYPE (elf_reloc->r_info);
9 kx bfd_reloc->howto = elfNN_aarch64_howto_from_type (abfd, r_type);
9 kx
9 kx if (bfd_reloc->howto == NULL)
9 kx {
9 kx /* xgettext:c-format */
9 kx _bfd_error_handler (_("%pB: unsupported relocation type %#x"), abfd, r_type);
9 kx return false;
9 kx }
9 kx return true;
9 kx }
9 kx
9 kx static reloc_howto_type *
9 kx elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
9 kx bfd_reloc_code_real_type code)
9 kx {
9 kx reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
9 kx
9 kx if (howto != NULL)
9 kx return howto;
9 kx
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return NULL;
9 kx }
9 kx
9 kx static reloc_howto_type *
9 kx elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
9 kx const char *r_name)
9 kx {
9 kx unsigned int i;
9 kx
9 kx for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
9 kx if (elfNN_aarch64_howto_table[i].name != NULL
9 kx && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
9 kx return &elfNN_aarch64_howto_table[i];
9 kx
9 kx return NULL;
9 kx }
9 kx
9 kx #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
9 kx #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
9 kx #define TARGET_BIG_SYM aarch64_elfNN_be_vec
9 kx #define TARGET_BIG_NAME "elfNN-bigaarch64"
9 kx
9 kx /* The linker script knows the section names for placement.
9 kx The entry_names are used to do simple name mangling on the stubs.
9 kx Given a function name, and its type, the stub can be found. The
9 kx name can be changed. The only requirement is the %s be present. */
9 kx #define STUB_ENTRY_NAME "__%s_veneer"
9 kx
9 kx /* The name of the dynamic interpreter. This is put in the .interp
9 kx section. */
9 kx #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
9 kx
9 kx #define AARCH64_MAX_FWD_BRANCH_OFFSET \
9 kx (((1 << 25) - 1) << 2)
9 kx #define AARCH64_MAX_BWD_BRANCH_OFFSET \
9 kx (-((1 << 25) << 2))
9 kx
9 kx #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
9 kx #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
9 kx
9 kx static int
9 kx aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
9 kx {
9 kx bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
9 kx return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
9 kx }
9 kx
9 kx static int
9 kx aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
9 kx {
9 kx bfd_signed_vma offset = (bfd_signed_vma) (value - place);
9 kx return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
9 kx && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
9 kx }
9 kx
9 kx static const uint32_t aarch64_adrp_branch_stub [] =
9 kx {
9 kx 0x90000010, /* adrp ip0, X */
9 kx /* R_AARCH64_ADR_HI21_PCREL(X) */
9 kx 0x91000210, /* add ip0, ip0, :lo12:X */
9 kx /* R_AARCH64_ADD_ABS_LO12_NC(X) */
9 kx 0xd61f0200, /* br ip0 */
9 kx };
9 kx
9 kx static const uint32_t aarch64_long_branch_stub[] =
9 kx {
9 kx #if ARCH_SIZE == 64
9 kx 0x58000090, /* ldr ip0, 1f */
9 kx #else
9 kx 0x18000090, /* ldr wip0, 1f */
9 kx #endif
9 kx 0x10000011, /* adr ip1, #0 */
9 kx 0x8b110210, /* add ip0, ip0, ip1 */
9 kx 0xd61f0200, /* br ip0 */
9 kx 0x00000000, /* 1: .xword or .word
9 kx R_AARCH64_PRELNN(X) + 12
9 kx */
9 kx 0x00000000,
9 kx };
9 kx
9 kx static const uint32_t aarch64_erratum_835769_stub[] =
9 kx {
9 kx 0x00000000, /* Placeholder for multiply accumulate. */
9 kx 0x14000000, /* b <label> */
9 kx };
9 kx
9 kx static const uint32_t aarch64_erratum_843419_stub[] =
9 kx {
9 kx 0x00000000, /* Placeholder for LDR instruction. */
9 kx 0x14000000, /* b <label> */
9 kx };
9 kx
9 kx /* Section name for stubs is the associated section name plus this
9 kx string. */
9 kx #define STUB_SUFFIX ".stub"
9 kx
9 kx enum elf_aarch64_stub_type
9 kx {
9 kx aarch64_stub_none,
9 kx aarch64_stub_adrp_branch,
9 kx aarch64_stub_long_branch,
9 kx aarch64_stub_erratum_835769_veneer,
9 kx aarch64_stub_erratum_843419_veneer,
9 kx };
9 kx
9 kx struct elf_aarch64_stub_hash_entry
9 kx {
9 kx /* Base hash table entry structure. */
9 kx struct bfd_hash_entry root;
9 kx
9 kx /* The stub section. */
9 kx asection *stub_sec;
9 kx
9 kx /* Offset within stub_sec of the beginning of this stub. */
9 kx bfd_vma stub_offset;
9 kx
9 kx /* Given the symbol's value and its section we can determine its final
9 kx value when building the stubs (so the stub knows where to jump). */
9 kx bfd_vma target_value;
9 kx asection *target_section;
9 kx
9 kx enum elf_aarch64_stub_type stub_type;
9 kx
9 kx /* The symbol table entry, if any, that this was derived from. */
9 kx struct elf_aarch64_link_hash_entry *h;
9 kx
9 kx /* Destination symbol type */
9 kx unsigned char st_type;
9 kx
9 kx /* Where this stub is being called from, or, in the case of combined
9 kx stub sections, the first input section in the group. */
9 kx asection *id_sec;
9 kx
9 kx /* The name for the local symbol at the start of this stub. The
9 kx stub name in the hash table has to be unique; this does not, so
9 kx it can be friendlier. */
9 kx char *output_name;
9 kx
9 kx /* The instruction which caused this stub to be generated (only valid for
9 kx erratum 835769 workaround stubs at present). */
9 kx uint32_t veneered_insn;
9 kx
9 kx /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
9 kx bfd_vma adrp_offset;
9 kx };
9 kx
9 kx /* Used to build a map of a section. This is required for mixed-endian
9 kx code/data. */
9 kx
9 kx typedef struct elf_elf_section_map
9 kx {
9 kx bfd_vma vma;
9 kx char type;
9 kx }
9 kx elf_aarch64_section_map;
9 kx
9 kx
9 kx typedef struct _aarch64_elf_section_data
9 kx {
9 kx struct bfd_elf_section_data elf;
9 kx unsigned int mapcount;
9 kx unsigned int mapsize;
9 kx elf_aarch64_section_map *map;
9 kx }
9 kx _aarch64_elf_section_data;
9 kx
9 kx #define elf_aarch64_section_data(sec) \
9 kx ((_aarch64_elf_section_data *) elf_section_data (sec))
9 kx
9 kx /* The size of the thread control block which is defined to be two pointers. */
9 kx #define TCB_SIZE (ARCH_SIZE/8)*2
9 kx
9 kx struct elf_aarch64_local_symbol
9 kx {
9 kx unsigned int got_type;
9 kx bfd_signed_vma got_refcount;
9 kx bfd_vma got_offset;
9 kx
9 kx /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
9 kx offset is from the end of the jump table and reserved entries
9 kx within the PLTGOT.
9 kx
9 kx The magic value (bfd_vma) -1 indicates that an offset has not be
9 kx allocated. */
9 kx bfd_vma tlsdesc_got_jump_table_offset;
9 kx };
9 kx
9 kx struct elf_aarch64_obj_tdata
9 kx {
9 kx struct elf_obj_tdata root;
9 kx
9 kx /* local symbol descriptors */
9 kx struct elf_aarch64_local_symbol *locals;
9 kx
9 kx /* Zero to warn when linking objects with incompatible enum sizes. */
9 kx int no_enum_size_warning;
9 kx
9 kx /* Zero to warn when linking objects with incompatible wchar_t sizes. */
9 kx int no_wchar_size_warning;
9 kx
9 kx /* All GNU_PROPERTY_AARCH64_FEATURE_1_AND properties. */
9 kx uint32_t gnu_and_prop;
9 kx
9 kx /* Zero to warn when linking objects with incompatible
9 kx GNU_PROPERTY_AARCH64_FEATURE_1_BTI. */
9 kx int no_bti_warn;
9 kx
9 kx /* PLT type based on security. */
9 kx aarch64_plt_type plt_type;
9 kx };
9 kx
9 kx #define elf_aarch64_tdata(bfd) \
9 kx ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
9 kx
9 kx #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
9 kx
9 kx #define is_aarch64_elf(bfd) \
9 kx (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
9 kx && elf_tdata (bfd) != NULL \
9 kx && elf_object_id (bfd) == AARCH64_ELF_DATA)
9 kx
9 kx static bool
9 kx elfNN_aarch64_mkobject (bfd *abfd)
9 kx {
9 kx return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
9 kx AARCH64_ELF_DATA);
9 kx }
9 kx
9 kx #define elf_aarch64_hash_entry(ent) \
9 kx ((struct elf_aarch64_link_hash_entry *)(ent))
9 kx
9 kx #define GOT_UNKNOWN 0
9 kx #define GOT_NORMAL 1
9 kx #define GOT_TLS_GD 2
9 kx #define GOT_TLS_IE 4
9 kx #define GOT_TLSDESC_GD 8
9 kx
9 kx #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
9 kx
9 kx /* AArch64 ELF linker hash entry. */
9 kx struct elf_aarch64_link_hash_entry
9 kx {
9 kx struct elf_link_hash_entry root;
9 kx
9 kx /* Since PLT entries have variable size, we need to record the
9 kx index into .got.plt instead of recomputing it from the PLT
9 kx offset. */
9 kx bfd_signed_vma plt_got_offset;
9 kx
9 kx /* Bit mask representing the type of GOT entry(s) if any required by
9 kx this symbol. */
9 kx unsigned int got_type;
9 kx
9 kx /* TRUE if symbol is defined as a protected symbol. */
9 kx unsigned int def_protected : 1;
9 kx
9 kx /* A pointer to the most recently used stub hash entry against this
9 kx symbol. */
9 kx struct elf_aarch64_stub_hash_entry *stub_cache;
9 kx
9 kx /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
9 kx is from the end of the jump table and reserved entries within the PLTGOT.
9 kx
9 kx The magic value (bfd_vma) -1 indicates that an offset has not
9 kx be allocated. */
9 kx bfd_vma tlsdesc_got_jump_table_offset;
9 kx };
9 kx
9 kx static unsigned int
9 kx elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
9 kx bfd *abfd,
9 kx unsigned long r_symndx)
9 kx {
9 kx if (h)
9 kx return elf_aarch64_hash_entry (h)->got_type;
9 kx
9 kx if (! elf_aarch64_locals (abfd))
9 kx return GOT_UNKNOWN;
9 kx
9 kx return elf_aarch64_locals (abfd)[r_symndx].got_type;
9 kx }
9 kx
9 kx /* Get the AArch64 elf linker hash table from a link_info structure. */
9 kx #define elf_aarch64_hash_table(info) \
9 kx ((struct elf_aarch64_link_hash_table *) ((info)->hash))
9 kx
9 kx #define aarch64_stub_hash_lookup(table, string, create, copy) \
9 kx ((struct elf_aarch64_stub_hash_entry *) \
9 kx bfd_hash_lookup ((table), (string), (create), (copy)))
9 kx
9 kx /* AArch64 ELF linker hash table. */
9 kx struct elf_aarch64_link_hash_table
9 kx {
9 kx /* The main hash table. */
9 kx struct elf_link_hash_table root;
9 kx
9 kx /* Nonzero to force PIC branch veneers. */
9 kx int pic_veneer;
9 kx
9 kx /* Fix erratum 835769. */
9 kx int fix_erratum_835769;
9 kx
9 kx /* Fix erratum 843419. */
9 kx erratum_84319_opts fix_erratum_843419;
9 kx
9 kx /* Don't apply link-time values for dynamic relocations. */
9 kx int no_apply_dynamic_relocs;
9 kx
9 kx /* The number of bytes in the initial entry in the PLT. */
9 kx bfd_size_type plt_header_size;
9 kx
9 kx /* The bytes of the initial PLT entry. */
9 kx const bfd_byte *plt0_entry;
9 kx
9 kx /* The number of bytes in the subsequent PLT entries. */
9 kx bfd_size_type plt_entry_size;
9 kx
9 kx /* The bytes of the subsequent PLT entry. */
9 kx const bfd_byte *plt_entry;
9 kx
9 kx /* For convenience in allocate_dynrelocs. */
9 kx bfd *obfd;
9 kx
9 kx /* The amount of space used by the reserved portion of the sgotplt
9 kx section, plus whatever space is used by the jump slots. */
9 kx bfd_vma sgotplt_jump_table_size;
9 kx
9 kx /* The stub hash table. */
9 kx struct bfd_hash_table stub_hash_table;
9 kx
9 kx /* Linker stub bfd. */
9 kx bfd *stub_bfd;
9 kx
9 kx /* Linker call-backs. */
9 kx asection *(*add_stub_section) (const char *, asection *);
9 kx void (*layout_sections_again) (void);
9 kx
9 kx /* Array to keep track of which stub sections have been created, and
9 kx information on stub grouping. */
9 kx struct map_stub
9 kx {
9 kx /* This is the section to which stubs in the group will be
9 kx attached. */
9 kx asection *link_sec;
9 kx /* The stub section. */
9 kx asection *stub_sec;
9 kx } *stub_group;
9 kx
9 kx /* Assorted information used by elfNN_aarch64_size_stubs. */
9 kx unsigned int bfd_count;
9 kx unsigned int top_index;
9 kx asection **input_list;
9 kx
9 kx /* JUMP_SLOT relocs for variant PCS symbols may be present. */
9 kx int variant_pcs;
9 kx
9 kx /* The number of bytes in the PLT enty for the TLS descriptor. */
9 kx bfd_size_type tlsdesc_plt_entry_size;
9 kx
9 kx /* Used by local STT_GNU_IFUNC symbols. */
9 kx htab_t loc_hash_table;
9 kx void * loc_hash_memory;
9 kx };
9 kx
9 kx /* Create an entry in an AArch64 ELF linker hash table. */
9 kx
9 kx static struct bfd_hash_entry *
9 kx elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
9 kx struct bfd_hash_table *table,
9 kx const char *string)
9 kx {
9 kx struct elf_aarch64_link_hash_entry *ret =
9 kx (struct elf_aarch64_link_hash_entry *) entry;
9 kx
9 kx /* Allocate the structure if it has not already been allocated by a
9 kx subclass. */
9 kx if (ret == NULL)
9 kx ret = bfd_hash_allocate (table,
9 kx sizeof (struct elf_aarch64_link_hash_entry));
9 kx if (ret == NULL)
9 kx return (struct bfd_hash_entry *) ret;
9 kx
9 kx /* Call the allocation method of the superclass. */
9 kx ret = ((struct elf_aarch64_link_hash_entry *)
9 kx _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
9 kx table, string));
9 kx if (ret != NULL)
9 kx {
9 kx ret->got_type = GOT_UNKNOWN;
9 kx ret->def_protected = 0;
9 kx ret->plt_got_offset = (bfd_vma) - 1;
9 kx ret->stub_cache = NULL;
9 kx ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
9 kx }
9 kx
9 kx return (struct bfd_hash_entry *) ret;
9 kx }
9 kx
9 kx /* Initialize an entry in the stub hash table. */
9 kx
9 kx static struct bfd_hash_entry *
9 kx stub_hash_newfunc (struct bfd_hash_entry *entry,
9 kx struct bfd_hash_table *table, const char *string)
9 kx {
9 kx /* Allocate the structure if it has not already been allocated by a
9 kx subclass. */
9 kx if (entry == NULL)
9 kx {
9 kx entry = bfd_hash_allocate (table,
9 kx sizeof (struct
9 kx elf_aarch64_stub_hash_entry));
9 kx if (entry == NULL)
9 kx return entry;
9 kx }
9 kx
9 kx /* Call the allocation method of the superclass. */
9 kx entry = bfd_hash_newfunc (entry, table, string);
9 kx if (entry != NULL)
9 kx {
9 kx struct elf_aarch64_stub_hash_entry *eh;
9 kx
9 kx /* Initialize the local fields. */
9 kx eh = (struct elf_aarch64_stub_hash_entry *) entry;
9 kx eh->adrp_offset = 0;
9 kx eh->stub_sec = NULL;
9 kx eh->stub_offset = 0;
9 kx eh->target_value = 0;
9 kx eh->target_section = NULL;
9 kx eh->stub_type = aarch64_stub_none;
9 kx eh->h = NULL;
9 kx eh->id_sec = NULL;
9 kx }
9 kx
9 kx return entry;
9 kx }
9 kx
9 kx /* Compute a hash of a local hash entry. We use elf_link_hash_entry
9 kx for local symbol so that we can handle local STT_GNU_IFUNC symbols
9 kx as global symbol. We reuse indx and dynstr_index for local symbol
9 kx hash since they aren't used by global symbols in this backend. */
9 kx
9 kx static hashval_t
9 kx elfNN_aarch64_local_htab_hash (const void *ptr)
9 kx {
9 kx struct elf_link_hash_entry *h
9 kx = (struct elf_link_hash_entry *) ptr;
9 kx return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
9 kx }
9 kx
9 kx /* Compare local hash entries. */
9 kx
9 kx static int
9 kx elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
9 kx {
9 kx struct elf_link_hash_entry *h1
9 kx = (struct elf_link_hash_entry *) ptr1;
9 kx struct elf_link_hash_entry *h2
9 kx = (struct elf_link_hash_entry *) ptr2;
9 kx
9 kx return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
9 kx }
9 kx
9 kx /* Find and/or create a hash entry for local symbol. */
9 kx
9 kx static struct elf_link_hash_entry *
9 kx elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
9 kx bfd *abfd, const Elf_Internal_Rela *rel,
9 kx bool create)
9 kx {
9 kx struct elf_aarch64_link_hash_entry e, *ret;
9 kx asection *sec = abfd->sections;
9 kx hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
9 kx ELFNN_R_SYM (rel->r_info));
9 kx void **slot;
9 kx
9 kx e.root.indx = sec->id;
9 kx e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
9 kx slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
9 kx create ? INSERT : NO_INSERT);
9 kx
9 kx if (!slot)
9 kx return NULL;
9 kx
9 kx if (*slot)
9 kx {
9 kx ret = (struct elf_aarch64_link_hash_entry *) *slot;
9 kx return &ret->root;
9 kx }
9 kx
9 kx ret = (struct elf_aarch64_link_hash_entry *)
9 kx objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
9 kx sizeof (struct elf_aarch64_link_hash_entry));
9 kx if (ret)
9 kx {
9 kx memset (ret, 0, sizeof (*ret));
9 kx ret->root.indx = sec->id;
9 kx ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
9 kx ret->root.dynindx = -1;
9 kx *slot = ret;
9 kx }
9 kx return &ret->root;
9 kx }
9 kx
9 kx /* Copy the extra info we tack onto an elf_link_hash_entry. */
9 kx
9 kx static void
9 kx elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
9 kx struct elf_link_hash_entry *dir,
9 kx struct elf_link_hash_entry *ind)
9 kx {
9 kx struct elf_aarch64_link_hash_entry *edir, *eind;
9 kx
9 kx edir = (struct elf_aarch64_link_hash_entry *) dir;
9 kx eind = (struct elf_aarch64_link_hash_entry *) ind;
9 kx
9 kx if (ind->root.type == bfd_link_hash_indirect)
9 kx {
9 kx /* Copy over PLT info. */
9 kx if (dir->got.refcount <= 0)
9 kx {
9 kx edir->got_type = eind->got_type;
9 kx eind->got_type = GOT_UNKNOWN;
9 kx }
9 kx }
9 kx
9 kx _bfd_elf_link_hash_copy_indirect (info, dir, ind);
9 kx }
9 kx
9 kx /* Merge non-visibility st_other attributes. */
9 kx
9 kx static void
9 kx elfNN_aarch64_merge_symbol_attribute (struct elf_link_hash_entry *h,
9 kx unsigned int st_other,
9 kx bool definition,
9 kx bool dynamic ATTRIBUTE_UNUSED)
9 kx {
9 kx if (definition)
9 kx {
9 kx struct elf_aarch64_link_hash_entry *eh
9 kx = (struct elf_aarch64_link_hash_entry *)h;
9 kx eh->def_protected = ELF_ST_VISIBILITY (st_other) == STV_PROTECTED;
9 kx }
9 kx
9 kx unsigned int isym_sto = st_other & ~ELF_ST_VISIBILITY (-1);
9 kx unsigned int h_sto = h->other & ~ELF_ST_VISIBILITY (-1);
9 kx
9 kx if (isym_sto == h_sto)
9 kx return;
9 kx
9 kx if (isym_sto & ~STO_AARCH64_VARIANT_PCS)
9 kx /* Not fatal, this callback cannot fail. */
9 kx _bfd_error_handler (_("unknown attribute for symbol `%s': 0x%02x"),
9 kx h->root.root.string, isym_sto);
9 kx
9 kx /* Note: Ideally we would warn about any attribute mismatch, but
9 kx this api does not allow that without substantial changes. */
9 kx if (isym_sto & STO_AARCH64_VARIANT_PCS)
9 kx h->other |= STO_AARCH64_VARIANT_PCS;
9 kx }
9 kx
9 kx /* Destroy an AArch64 elf linker hash table. */
9 kx
9 kx static void
9 kx elfNN_aarch64_link_hash_table_free (bfd *obfd)
9 kx {
9 kx struct elf_aarch64_link_hash_table *ret
9 kx = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
9 kx
9 kx if (ret->loc_hash_table)
9 kx htab_delete (ret->loc_hash_table);
9 kx if (ret->loc_hash_memory)
9 kx objalloc_free ((struct objalloc *) ret->loc_hash_memory);
9 kx
9 kx bfd_hash_table_free (&ret->stub_hash_table);
9 kx _bfd_elf_link_hash_table_free (obfd);
9 kx }
9 kx
9 kx /* Create an AArch64 elf linker hash table. */
9 kx
9 kx static struct bfd_link_hash_table *
9 kx elfNN_aarch64_link_hash_table_create (bfd *abfd)
9 kx {
9 kx struct elf_aarch64_link_hash_table *ret;
9 kx size_t amt = sizeof (struct elf_aarch64_link_hash_table);
9 kx
9 kx ret = bfd_zmalloc (amt);
9 kx if (ret == NULL)
9 kx return NULL;
9 kx
9 kx if (!_bfd_elf_link_hash_table_init
9 kx (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
9 kx sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
9 kx {
9 kx free (ret);
9 kx return NULL;
9 kx }
9 kx
9 kx ret->plt_header_size = PLT_ENTRY_SIZE;
9 kx ret->plt0_entry = elfNN_aarch64_small_plt0_entry;
9 kx ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
9 kx ret->plt_entry = elfNN_aarch64_small_plt_entry;
9 kx ret->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
9 kx ret->obfd = abfd;
9 kx ret->root.tlsdesc_got = (bfd_vma) - 1;
9 kx
9 kx if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
9 kx sizeof (struct elf_aarch64_stub_hash_entry)))
9 kx {
9 kx _bfd_elf_link_hash_table_free (abfd);
9 kx return NULL;
9 kx }
9 kx
9 kx ret->loc_hash_table = htab_try_create (1024,
9 kx elfNN_aarch64_local_htab_hash,
9 kx elfNN_aarch64_local_htab_eq,
9 kx NULL);
9 kx ret->loc_hash_memory = objalloc_create ();
9 kx if (!ret->loc_hash_table || !ret->loc_hash_memory)
9 kx {
9 kx elfNN_aarch64_link_hash_table_free (abfd);
9 kx return NULL;
9 kx }
9 kx ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
9 kx
9 kx return &ret->root.root;
9 kx }
9 kx
9 kx /* Perform relocation R_TYPE. Returns TRUE upon success, FALSE otherwise. */
9 kx
9 kx static bool
9 kx aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
9 kx bfd_vma offset, bfd_vma value)
9 kx {
9 kx reloc_howto_type *howto;
9 kx bfd_vma place;
9 kx
9 kx howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
9 kx place = (input_section->output_section->vma + input_section->output_offset
9 kx + offset);
9 kx
9 kx r_type = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, r_type, place,
9 kx value, 0, false);
9 kx return _bfd_aarch64_elf_put_addend (input_bfd,
9 kx input_section->contents + offset, r_type,
9 kx howto, value) == bfd_reloc_ok;
9 kx }
9 kx
9 kx static enum elf_aarch64_stub_type
9 kx aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
9 kx {
9 kx if (aarch64_valid_for_adrp_p (value, place))
9 kx return aarch64_stub_adrp_branch;
9 kx return aarch64_stub_long_branch;
9 kx }
9 kx
9 kx /* Determine the type of stub needed, if any, for a call. */
9 kx
9 kx static enum elf_aarch64_stub_type
9 kx aarch64_type_of_stub (asection *input_sec,
9 kx const Elf_Internal_Rela *rel,
9 kx asection *sym_sec,
9 kx unsigned char st_type,
9 kx bfd_vma destination)
9 kx {
9 kx bfd_vma location;
9 kx bfd_signed_vma branch_offset;
9 kx unsigned int r_type;
9 kx enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
9 kx
9 kx if (st_type != STT_FUNC
9 kx && (sym_sec == input_sec))
9 kx return stub_type;
9 kx
9 kx /* Determine where the call point is. */
9 kx location = (input_sec->output_offset
9 kx + input_sec->output_section->vma + rel->r_offset);
9 kx
9 kx branch_offset = (bfd_signed_vma) (destination - location);
9 kx
9 kx r_type = ELFNN_R_TYPE (rel->r_info);
9 kx
9 kx /* We don't want to redirect any old unconditional jump in this way,
9 kx only one which is being used for a sibcall, where it is
9 kx acceptable for the IP0 and IP1 registers to be clobbered. */
9 kx if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
9 kx && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
9 kx || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
9 kx {
9 kx stub_type = aarch64_stub_long_branch;
9 kx }
9 kx
9 kx return stub_type;
9 kx }
9 kx
9 kx /* Build a name for an entry in the stub hash table. */
9 kx
9 kx static char *
9 kx elfNN_aarch64_stub_name (const asection *input_section,
9 kx const asection *sym_sec,
9 kx const struct elf_aarch64_link_hash_entry *hash,
9 kx const Elf_Internal_Rela *rel)
9 kx {
9 kx char *stub_name;
9 kx bfd_size_type len;
9 kx
9 kx if (hash)
9 kx {
9 kx len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
9 kx stub_name = bfd_malloc (len);
9 kx if (stub_name != NULL)
9 kx snprintf (stub_name, len, "%08x_%s+%" PRIx64,
9 kx (unsigned int) input_section->id,
9 kx hash->root.root.root.string,
9 kx (uint64_t) rel->r_addend);
9 kx }
9 kx else
9 kx {
9 kx len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
9 kx stub_name = bfd_malloc (len);
9 kx if (stub_name != NULL)
9 kx snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64,
9 kx (unsigned int) input_section->id,
9 kx (unsigned int) sym_sec->id,
9 kx (unsigned int) ELFNN_R_SYM (rel->r_info),
9 kx (uint64_t) rel->r_addend);
9 kx }
9 kx
9 kx return stub_name;
9 kx }
9 kx
9 kx /* Return TRUE if symbol H should be hashed in the `.gnu.hash' section. For
9 kx executable PLT slots where the executable never takes the address of those
9 kx functions, the function symbols are not added to the hash table. */
9 kx
9 kx static bool
9 kx elf_aarch64_hash_symbol (struct elf_link_hash_entry *h)
9 kx {
9 kx if (h->plt.offset != (bfd_vma) -1
9 kx && !h->def_regular
9 kx && !h->pointer_equality_needed)
9 kx return false;
9 kx
9 kx return _bfd_elf_hash_symbol (h);
9 kx }
9 kx
9 kx
9 kx /* Look up an entry in the stub hash. Stub entries are cached because
9 kx creating the stub name takes a bit of time. */
9 kx
9 kx static struct elf_aarch64_stub_hash_entry *
9 kx elfNN_aarch64_get_stub_entry (const asection *input_section,
9 kx const asection *sym_sec,
9 kx struct elf_link_hash_entry *hash,
9 kx const Elf_Internal_Rela *rel,
9 kx struct elf_aarch64_link_hash_table *htab)
9 kx {
9 kx struct elf_aarch64_stub_hash_entry *stub_entry;
9 kx struct elf_aarch64_link_hash_entry *h =
9 kx (struct elf_aarch64_link_hash_entry *) hash;
9 kx const asection *id_sec;
9 kx
9 kx if ((input_section->flags & SEC_CODE) == 0)
9 kx return NULL;
9 kx
9 kx /* If this input section is part of a group of sections sharing one
9 kx stub section, then use the id of the first section in the group.
9 kx Stub names need to include a section id, as there may well be
9 kx more than one stub used to reach say, printf, and we need to
9 kx distinguish between them. */
9 kx id_sec = htab->stub_group[input_section->id].link_sec;
9 kx
9 kx if (h != NULL && h->stub_cache != NULL
9 kx && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
9 kx {
9 kx stub_entry = h->stub_cache;
9 kx }
9 kx else
9 kx {
9 kx char *stub_name;
9 kx
9 kx stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
9 kx if (stub_name == NULL)
9 kx return NULL;
9 kx
9 kx stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
9 kx stub_name, false, false);
9 kx if (h != NULL)
9 kx h->stub_cache = stub_entry;
9 kx
9 kx free (stub_name);
9 kx }
9 kx
9 kx return stub_entry;
9 kx }
9 kx
9 kx
9 kx /* Create a stub section. */
9 kx
9 kx static asection *
9 kx _bfd_aarch64_create_stub_section (asection *section,
9 kx struct elf_aarch64_link_hash_table *htab)
9 kx {
9 kx size_t namelen;
9 kx bfd_size_type len;
9 kx char *s_name;
9 kx
9 kx namelen = strlen (section->name);
9 kx len = namelen + sizeof (STUB_SUFFIX);
9 kx s_name = bfd_alloc (htab->stub_bfd, len);
9 kx if (s_name == NULL)
9 kx return NULL;
9 kx
9 kx memcpy (s_name, section->name, namelen);
9 kx memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
9 kx return (*htab->add_stub_section) (s_name, section);
9 kx }
9 kx
9 kx
9 kx /* Find or create a stub section for a link section.
9 kx
9 kx Fix or create the stub section used to collect stubs attached to
9 kx the specified link section. */
9 kx
9 kx static asection *
9 kx _bfd_aarch64_get_stub_for_link_section (asection *link_section,
9 kx struct elf_aarch64_link_hash_table *htab)
9 kx {
9 kx if (htab->stub_group[link_section->id].stub_sec == NULL)
9 kx htab->stub_group[link_section->id].stub_sec
9 kx = _bfd_aarch64_create_stub_section (link_section, htab);
9 kx return htab->stub_group[link_section->id].stub_sec;
9 kx }
9 kx
9 kx
9 kx /* Find or create a stub section in the stub group for an input
9 kx section. */
9 kx
9 kx static asection *
9 kx _bfd_aarch64_create_or_find_stub_sec (asection *section,
9 kx struct elf_aarch64_link_hash_table *htab)
9 kx {
9 kx asection *link_sec = htab->stub_group[section->id].link_sec;
9 kx return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
9 kx }
9 kx
9 kx
9 kx /* Add a new stub entry in the stub group associated with an input
9 kx section to the stub hash. Not all fields of the new stub entry are
9 kx initialised. */
9 kx
9 kx static struct elf_aarch64_stub_hash_entry *
9 kx _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
9 kx asection *section,
9 kx struct elf_aarch64_link_hash_table *htab)
9 kx {
9 kx asection *link_sec;
9 kx asection *stub_sec;
9 kx struct elf_aarch64_stub_hash_entry *stub_entry;
9 kx
9 kx link_sec = htab->stub_group[section->id].link_sec;
9 kx stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
9 kx
9 kx /* Enter this entry into the linker stub hash table. */
9 kx stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
9 kx true, false);
9 kx if (stub_entry == NULL)
9 kx {
9 kx /* xgettext:c-format */
9 kx _bfd_error_handler (_("%pB: cannot create stub entry %s"),
9 kx section->owner, stub_name);
9 kx return NULL;
9 kx }
9 kx
9 kx stub_entry->stub_sec = stub_sec;
9 kx stub_entry->stub_offset = 0;
9 kx stub_entry->id_sec = link_sec;
9 kx
9 kx return stub_entry;
9 kx }
9 kx
9 kx /* Add a new stub entry in the final stub section to the stub hash.
9 kx Not all fields of the new stub entry are initialised. */
9 kx
9 kx static struct elf_aarch64_stub_hash_entry *
9 kx _bfd_aarch64_add_stub_entry_after (const char *stub_name,
9 kx asection *link_section,
9 kx struct elf_aarch64_link_hash_table *htab)
9 kx {
9 kx asection *stub_sec;
9 kx struct elf_aarch64_stub_hash_entry *stub_entry;
9 kx
9 kx stub_sec = NULL;
9 kx /* Only create the actual stub if we will end up needing it. */
9 kx if (htab->fix_erratum_843419 & ERRAT_ADRP)
9 kx stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
9 kx stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
9 kx true, false);
9 kx if (stub_entry == NULL)
9 kx {
9 kx _bfd_error_handler (_("cannot create stub entry %s"), stub_name);
9 kx return NULL;
9 kx }
9 kx
9 kx stub_entry->stub_sec = stub_sec;
9 kx stub_entry->stub_offset = 0;
9 kx stub_entry->id_sec = link_section;
9 kx
9 kx return stub_entry;
9 kx }
9 kx
9 kx
9 kx static bool
9 kx aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
9 kx void *in_arg)
9 kx {
9 kx struct elf_aarch64_stub_hash_entry *stub_entry;
9 kx asection *stub_sec;
9 kx bfd *stub_bfd;
9 kx bfd_byte *loc;
9 kx bfd_vma sym_value;
9 kx bfd_vma veneered_insn_loc;
9 kx bfd_vma veneer_entry_loc;
9 kx bfd_signed_vma branch_offset = 0;
9 kx unsigned int template_size;
9 kx const uint32_t *template;
9 kx unsigned int i;
9 kx struct bfd_link_info *info;
9 kx
9 kx /* Massage our args to the form they really have. */
9 kx stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
9 kx
9 kx info = (struct bfd_link_info *) in_arg;
9 kx
9 kx /* Fail if the target section could not be assigned to an output
9 kx section. The user should fix his linker script. */
9 kx if (stub_entry->target_section->output_section == NULL
9 kx && info->non_contiguous_regions)
9 kx info->callbacks->einfo (_("%F%P: Could not assign `%pA' to an output section. "
9 kx "Retry without "
9 kx "--enable-non-contiguous-regions.\n"),
9 kx stub_entry->target_section);
9 kx
9 kx stub_sec = stub_entry->stub_sec;
9 kx
9 kx /* Make a note of the offset within the stubs for this entry. */
9 kx stub_entry->stub_offset = stub_sec->size;
9 kx loc = stub_sec->contents + stub_entry->stub_offset;
9 kx
9 kx stub_bfd = stub_sec->owner;
9 kx
9 kx /* This is the address of the stub destination. */
9 kx sym_value = (stub_entry->target_value
9 kx + stub_entry->target_section->output_offset
9 kx + stub_entry->target_section->output_section->vma);
9 kx
9 kx if (stub_entry->stub_type == aarch64_stub_long_branch)
9 kx {
9 kx bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
9 kx + stub_sec->output_offset);
9 kx
9 kx /* See if we can relax the stub. */
9 kx if (aarch64_valid_for_adrp_p (sym_value, place))
9 kx stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
9 kx }
9 kx
9 kx switch (stub_entry->stub_type)
9 kx {
9 kx case aarch64_stub_adrp_branch:
9 kx template = aarch64_adrp_branch_stub;
9 kx template_size = sizeof (aarch64_adrp_branch_stub);
9 kx break;
9 kx case aarch64_stub_long_branch:
9 kx template = aarch64_long_branch_stub;
9 kx template_size = sizeof (aarch64_long_branch_stub);
9 kx break;
9 kx case aarch64_stub_erratum_835769_veneer:
9 kx template = aarch64_erratum_835769_stub;
9 kx template_size = sizeof (aarch64_erratum_835769_stub);
9 kx break;
9 kx case aarch64_stub_erratum_843419_veneer:
9 kx template = aarch64_erratum_843419_stub;
9 kx template_size = sizeof (aarch64_erratum_843419_stub);
9 kx break;
9 kx default:
9 kx abort ();
9 kx }
9 kx
9 kx for (i = 0; i < (template_size / sizeof template[0]); i++)
9 kx {
9 kx bfd_putl32 (template[i], loc);
9 kx loc += 4;
9 kx }
9 kx
9 kx template_size = (template_size + 7) & ~7;
9 kx stub_sec->size += template_size;
9 kx
9 kx switch (stub_entry->stub_type)
9 kx {
9 kx case aarch64_stub_adrp_branch:
9 kx if (!aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
9 kx stub_entry->stub_offset, sym_value))
9 kx /* The stub would not have been relaxed if the offset was out
9 kx of range. */
9 kx BFD_FAIL ();
9 kx
9 kx if (!aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
9 kx stub_entry->stub_offset + 4, sym_value))
9 kx BFD_FAIL ();
9 kx break;
9 kx
9 kx case aarch64_stub_long_branch:
9 kx /* We want the value relative to the address 12 bytes back from the
9 kx value itself. */
9 kx if (!aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
9 kx stub_entry->stub_offset + 16, sym_value + 12))
9 kx BFD_FAIL ();
9 kx break;
9 kx
9 kx case aarch64_stub_erratum_835769_veneer:
9 kx veneered_insn_loc = stub_entry->target_section->output_section->vma
9 kx + stub_entry->target_section->output_offset
9 kx + stub_entry->target_value;
9 kx veneer_entry_loc = stub_entry->stub_sec->output_section->vma
9 kx + stub_entry->stub_sec->output_offset
9 kx + stub_entry->stub_offset;
9 kx branch_offset = veneered_insn_loc - veneer_entry_loc;
9 kx branch_offset >>= 2;
9 kx branch_offset &= 0x3ffffff;
9 kx bfd_putl32 (stub_entry->veneered_insn,
9 kx stub_sec->contents + stub_entry->stub_offset);
9 kx bfd_putl32 (template[1] | branch_offset,
9 kx stub_sec->contents + stub_entry->stub_offset + 4);
9 kx break;
9 kx
9 kx case aarch64_stub_erratum_843419_veneer:
9 kx if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
9 kx stub_entry->stub_offset + 4, sym_value + 4))
9 kx BFD_FAIL ();
9 kx break;
9 kx
9 kx default:
9 kx abort ();
9 kx }
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* As above, but don't actually build the stub. Just bump offset so
9 kx we know stub section sizes. */
9 kx
9 kx static bool
9 kx aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
9 kx {
9 kx struct elf_aarch64_stub_hash_entry *stub_entry;
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx int size;
9 kx
9 kx /* Massage our args to the form they really have. */
9 kx stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
9 kx htab = (struct elf_aarch64_link_hash_table *) in_arg;
9 kx
9 kx switch (stub_entry->stub_type)
9 kx {
9 kx case aarch64_stub_adrp_branch:
9 kx size = sizeof (aarch64_adrp_branch_stub);
9 kx break;
9 kx case aarch64_stub_long_branch:
9 kx size = sizeof (aarch64_long_branch_stub);
9 kx break;
9 kx case aarch64_stub_erratum_835769_veneer:
9 kx size = sizeof (aarch64_erratum_835769_stub);
9 kx break;
9 kx case aarch64_stub_erratum_843419_veneer:
9 kx {
9 kx if (htab->fix_erratum_843419 == ERRAT_ADR)
9 kx return true;
9 kx size = sizeof (aarch64_erratum_843419_stub);
9 kx }
9 kx break;
9 kx default:
9 kx abort ();
9 kx }
9 kx
9 kx size = (size + 7) & ~7;
9 kx stub_entry->stub_sec->size += size;
9 kx return true;
9 kx }
9 kx
9 kx /* External entry points for sizing and building linker stubs. */
9 kx
9 kx /* Set up various things so that we can make a list of input sections
9 kx for each output section included in the link. Returns -1 on error,
9 kx 0 when no stubs will be needed, and 1 on success. */
9 kx
9 kx int
9 kx elfNN_aarch64_setup_section_lists (bfd *output_bfd,
9 kx struct bfd_link_info *info)
9 kx {
9 kx bfd *input_bfd;
9 kx unsigned int bfd_count;
9 kx unsigned int top_id, top_index;
9 kx asection *section;
9 kx asection **input_list, **list;
9 kx size_t amt;
9 kx struct elf_aarch64_link_hash_table *htab =
9 kx elf_aarch64_hash_table (info);
9 kx
9 kx if (!is_elf_hash_table (&htab->root.root))
9 kx return 0;
9 kx
9 kx /* Count the number of input BFDs and find the top input section id. */
9 kx for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
9 kx input_bfd != NULL; input_bfd = input_bfd->link.next)
9 kx {
9 kx bfd_count += 1;
9 kx for (section = input_bfd->sections;
9 kx section != NULL; section = section->next)
9 kx {
9 kx if (top_id < section->id)
9 kx top_id = section->id;
9 kx }
9 kx }
9 kx htab->bfd_count = bfd_count;
9 kx
9 kx amt = sizeof (struct map_stub) * (top_id + 1);
9 kx htab->stub_group = bfd_zmalloc (amt);
9 kx if (htab->stub_group == NULL)
9 kx return -1;
9 kx
9 kx /* We can't use output_bfd->section_count here to find the top output
9 kx section index as some sections may have been removed, and
9 kx _bfd_strip_section_from_output doesn't renumber the indices. */
9 kx for (section = output_bfd->sections, top_index = 0;
9 kx section != NULL; section = section->next)
9 kx {
9 kx if (top_index < section->index)
9 kx top_index = section->index;
9 kx }
9 kx
9 kx htab->top_index = top_index;
9 kx amt = sizeof (asection *) * (top_index + 1);
9 kx input_list = bfd_malloc (amt);
9 kx htab->input_list = input_list;
9 kx if (input_list == NULL)
9 kx return -1;
9 kx
9 kx /* For sections we aren't interested in, mark their entries with a
9 kx value we can check later. */
9 kx list = input_list + top_index;
9 kx do
9 kx *list = bfd_abs_section_ptr;
9 kx while (list-- != input_list);
9 kx
9 kx for (section = output_bfd->sections;
9 kx section != NULL; section = section->next)
9 kx {
9 kx if ((section->flags & SEC_CODE) != 0)
9 kx input_list[section->index] = NULL;
9 kx }
9 kx
9 kx return 1;
9 kx }
9 kx
9 kx /* Used by elfNN_aarch64_next_input_section and group_sections. */
9 kx #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
9 kx
9 kx /* The linker repeatedly calls this function for each input section,
9 kx in the order that input sections are linked into output sections.
9 kx Build lists of input sections to determine groupings between which
9 kx we may insert linker stubs. */
9 kx
9 kx void
9 kx elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
9 kx {
9 kx struct elf_aarch64_link_hash_table *htab =
9 kx elf_aarch64_hash_table (info);
9 kx
9 kx if (isec->output_section->index <= htab->top_index)
9 kx {
9 kx asection **list = htab->input_list + isec->output_section->index;
9 kx
9 kx if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
9 kx {
9 kx /* Steal the link_sec pointer for our list. */
9 kx /* This happens to make the list in reverse order,
9 kx which is what we want. */
9 kx PREV_SEC (isec) = *list;
9 kx *list = isec;
9 kx }
9 kx }
9 kx }
9 kx
9 kx /* See whether we can group stub sections together. Grouping stub
9 kx sections may result in fewer stubs. More importantly, we need to
9 kx put all .init* and .fini* stubs at the beginning of the .init or
9 kx .fini output sections respectively, because glibc splits the
9 kx _init and _fini functions into multiple parts. Putting a stub in
9 kx the middle of a function is not a good idea. */
9 kx
9 kx static void
9 kx group_sections (struct elf_aarch64_link_hash_table *htab,
9 kx bfd_size_type stub_group_size,
9 kx bool stubs_always_after_branch)
9 kx {
9 kx asection **list = htab->input_list;
9 kx
9 kx do
9 kx {
9 kx asection *tail = *list;
9 kx asection *head;
9 kx
9 kx if (tail == bfd_abs_section_ptr)
9 kx continue;
9 kx
9 kx /* Reverse the list: we must avoid placing stubs at the
9 kx beginning of the section because the beginning of the text
9 kx section may be required for an interrupt vector in bare metal
9 kx code. */
9 kx #define NEXT_SEC PREV_SEC
9 kx head = NULL;
9 kx while (tail != NULL)
9 kx {
9 kx /* Pop from tail. */
9 kx asection *item = tail;
9 kx tail = PREV_SEC (item);
9 kx
9 kx /* Push on head. */
9 kx NEXT_SEC (item) = head;
9 kx head = item;
9 kx }
9 kx
9 kx while (head != NULL)
9 kx {
9 kx asection *curr;
9 kx asection *next;
9 kx bfd_vma stub_group_start = head->output_offset;
9 kx bfd_vma end_of_next;
9 kx
9 kx curr = head;
9 kx while (NEXT_SEC (curr) != NULL)
9 kx {
9 kx next = NEXT_SEC (curr);
9 kx end_of_next = next->output_offset + next->size;
9 kx if (end_of_next - stub_group_start >= stub_group_size)
9 kx /* End of NEXT is too far from start, so stop. */
9 kx break;
9 kx /* Add NEXT to the group. */
9 kx curr = next;
9 kx }
9 kx
9 kx /* OK, the size from the start to the start of CURR is less
9 kx than stub_group_size and thus can be handled by one stub
9 kx section. (Or the head section is itself larger than
9 kx stub_group_size, in which case we may be toast.)
9 kx We should really be keeping track of the total size of
9 kx stubs added here, as stubs contribute to the final output
9 kx section size. */
9 kx do
9 kx {
9 kx next = NEXT_SEC (head);
9 kx /* Set up this stub group. */
9 kx htab->stub_group[head->id].link_sec = curr;
9 kx }
9 kx while (head != curr && (head = next) != NULL);
9 kx
9 kx /* But wait, there's more! Input sections up to stub_group_size
9 kx bytes after the stub section can be handled by it too. */
9 kx if (!stubs_always_after_branch)
9 kx {
9 kx stub_group_start = curr->output_offset + curr->size;
9 kx
9 kx while (next != NULL)
9 kx {
9 kx end_of_next = next->output_offset + next->size;
9 kx if (end_of_next - stub_group_start >= stub_group_size)
9 kx /* End of NEXT is too far from stubs, so stop. */
9 kx break;
9 kx /* Add NEXT to the stub group. */
9 kx head = next;
9 kx next = NEXT_SEC (head);
9 kx htab->stub_group[head->id].link_sec = curr;
9 kx }
9 kx }
9 kx head = next;
9 kx }
9 kx }
9 kx while (list++ != htab->input_list + htab->top_index);
9 kx
9 kx free (htab->input_list);
9 kx }
9 kx
9 kx #undef PREV_SEC
9 kx #undef PREV_SEC
9 kx
9 kx #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
9 kx
9 kx #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
9 kx #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
9 kx #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
9 kx #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
9 kx #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
9 kx #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
9 kx
9 kx #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
9 kx #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
9 kx #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
9 kx #define AARCH64_ZR 0x1f
9 kx
9 kx /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
9 kx LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
9 kx
9 kx #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
9 kx #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
9 kx #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
9 kx #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
9 kx #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
9 kx #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
9 kx #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
9 kx #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
9 kx #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
9 kx #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
9 kx #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
9 kx #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
9 kx #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
9 kx #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
9 kx #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
9 kx #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
9 kx #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
9 kx #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
9 kx
9 kx /* Classify an INSN if it is indeed a load/store.
9 kx
9 kx Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
9 kx
9 kx For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
9 kx is set equal to RT.
9 kx
9 kx For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned. */
9 kx
9 kx static bool
9 kx aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
9 kx bool *pair, bool *load)
9 kx {
9 kx uint32_t opcode;
9 kx unsigned int r;
9 kx uint32_t opc = 0;
9 kx uint32_t v = 0;
9 kx uint32_t opc_v = 0;
9 kx
9 kx /* Bail out quickly if INSN doesn't fall into the load-store
9 kx encoding space. */
9 kx if (!AARCH64_LDST (insn))
9 kx return false;
9 kx
9 kx *pair = false;
9 kx *load = false;
9 kx if (AARCH64_LDST_EX (insn))
9 kx {
9 kx *rt = AARCH64_RT (insn);
9 kx *rt2 = *rt;
9 kx if (AARCH64_BIT (insn, 21) == 1)
9 kx {
9 kx *pair = true;
9 kx *rt2 = AARCH64_RT2 (insn);
9 kx }
9 kx *load = AARCH64_LD (insn);
9 kx return true;
9 kx }
9 kx else if (AARCH64_LDST_NAP (insn)
9 kx || AARCH64_LDSTP_PI (insn)
9 kx || AARCH64_LDSTP_O (insn)
9 kx || AARCH64_LDSTP_PRE (insn))
9 kx {
9 kx *pair = true;
9 kx *rt = AARCH64_RT (insn);
9 kx *rt2 = AARCH64_RT2 (insn);
9 kx *load = AARCH64_LD (insn);
9 kx return true;
9 kx }
9 kx else if (AARCH64_LDST_PCREL (insn)
9 kx || AARCH64_LDST_UI (insn)
9 kx || AARCH64_LDST_PIIMM (insn)
9 kx || AARCH64_LDST_U (insn)
9 kx || AARCH64_LDST_PREIMM (insn)
9 kx || AARCH64_LDST_RO (insn)
9 kx || AARCH64_LDST_UIMM (insn))
9 kx {
9 kx *rt = AARCH64_RT (insn);
9 kx *rt2 = *rt;
9 kx if (AARCH64_LDST_PCREL (insn))
9 kx *load = true;
9 kx opc = AARCH64_BITS (insn, 22, 2);
9 kx v = AARCH64_BIT (insn, 26);
9 kx opc_v = opc | (v << 2);
9 kx *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
9 kx || opc_v == 5 || opc_v == 7);
9 kx return true;
9 kx }
9 kx else if (AARCH64_LDST_SIMD_M (insn)
9 kx || AARCH64_LDST_SIMD_M_PI (insn))
9 kx {
9 kx *rt = AARCH64_RT (insn);
9 kx *load = AARCH64_BIT (insn, 22);
9 kx opcode = (insn >> 12) & 0xf;
9 kx switch (opcode)
9 kx {
9 kx case 0:
9 kx case 2:
9 kx *rt2 = *rt + 3;
9 kx break;
9 kx
9 kx case 4:
9 kx case 6:
9 kx *rt2 = *rt + 2;
9 kx break;
9 kx
9 kx case 7:
9 kx *rt2 = *rt;
9 kx break;
9 kx
9 kx case 8:
9 kx case 10:
9 kx *rt2 = *rt + 1;
9 kx break;
9 kx
9 kx default:
9 kx return false;
9 kx }
9 kx return true;
9 kx }
9 kx else if (AARCH64_LDST_SIMD_S (insn)
9 kx || AARCH64_LDST_SIMD_S_PI (insn))
9 kx {
9 kx *rt = AARCH64_RT (insn);
9 kx r = (insn >> 21) & 1;
9 kx *load = AARCH64_BIT (insn, 22);
9 kx opcode = (insn >> 13) & 0x7;
9 kx switch (opcode)
9 kx {
9 kx case 0:
9 kx case 2:
9 kx case 4:
9 kx *rt2 = *rt + r;
9 kx break;
9 kx
9 kx case 1:
9 kx case 3:
9 kx case 5:
9 kx *rt2 = *rt + (r == 0 ? 2 : 3);
9 kx break;
9 kx
9 kx case 6:
9 kx *rt2 = *rt + r;
9 kx break;
9 kx
9 kx case 7:
9 kx *rt2 = *rt + (r == 0 ? 2 : 3);
9 kx break;
9 kx
9 kx default:
9 kx return false;
9 kx }
9 kx return true;
9 kx }
9 kx
9 kx return false;
9 kx }
9 kx
9 kx /* Return TRUE if INSN is multiply-accumulate. */
9 kx
9 kx static bool
9 kx aarch64_mlxl_p (uint32_t insn)
9 kx {
9 kx uint32_t op31 = AARCH64_OP31 (insn);
9 kx
9 kx if (AARCH64_MAC (insn)
9 kx && (op31 == 0 || op31 == 1 || op31 == 5)
9 kx /* Exclude MUL instructions which are encoded as a multiple accumulate
9 kx with RA = XZR. */
9 kx && AARCH64_RA (insn) != AARCH64_ZR)
9 kx return true;
9 kx
9 kx return false;
9 kx }
9 kx
9 kx /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
9 kx it is possible for a 64-bit multiply-accumulate instruction to generate an
9 kx incorrect result. The details are quite complex and hard to
9 kx determine statically, since branches in the code may exist in some
9 kx circumstances, but all cases end with a memory (load, store, or
9 kx prefetch) instruction followed immediately by the multiply-accumulate
9 kx operation. We employ a linker patching technique, by moving the potentially
9 kx affected multiply-accumulate instruction into a patch region and replacing
9 kx the original instruction with a branch to the patch. This function checks
9 kx if INSN_1 is the memory operation followed by a multiply-accumulate
9 kx operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
9 kx if INSN_1 and INSN_2 are safe. */
9 kx
9 kx static bool
9 kx aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
9 kx {
9 kx uint32_t rt;
9 kx uint32_t rt2;
9 kx uint32_t rn;
9 kx uint32_t rm;
9 kx uint32_t ra;
9 kx bool pair;
9 kx bool load;
9 kx
9 kx if (aarch64_mlxl_p (insn_2)
9 kx && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
9 kx {
9 kx /* Any SIMD memory op is independent of the subsequent MLA
9 kx by definition of the erratum. */
9 kx if (AARCH64_BIT (insn_1, 26))
9 kx return true;
9 kx
9 kx /* If not SIMD, check for integer memory ops and MLA relationship. */
9 kx rn = AARCH64_RN (insn_2);
9 kx ra = AARCH64_RA (insn_2);
9 kx rm = AARCH64_RM (insn_2);
9 kx
9 kx /* If this is a load and there's a true(RAW) dependency, we are safe
9 kx and this is not an erratum sequence. */
9 kx if (load &&
9 kx (rt == rn || rt == rm || rt == ra
9 kx || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
9 kx return false;
9 kx
9 kx /* We conservatively put out stubs for all other cases (including
9 kx writebacks). */
9 kx return true;
9 kx }
9 kx
9 kx return false;
9 kx }
9 kx
9 kx /* Used to order a list of mapping symbols by address. */
9 kx
9 kx static int
9 kx elf_aarch64_compare_mapping (const void *a, const void *b)
9 kx {
9 kx const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
9 kx const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
9 kx
9 kx if (amap->vma > bmap->vma)
9 kx return 1;
9 kx else if (amap->vma < bmap->vma)
9 kx return -1;
9 kx else if (amap->type > bmap->type)
9 kx /* Ensure results do not depend on the host qsort for objects with
9 kx multiple mapping symbols at the same address by sorting on type
9 kx after vma. */
9 kx return 1;
9 kx else if (amap->type < bmap->type)
9 kx return -1;
9 kx else
9 kx return 0;
9 kx }
9 kx
9 kx
9 kx static char *
9 kx _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
9 kx {
9 kx char *stub_name = (char *) bfd_malloc
9 kx (strlen ("__erratum_835769_veneer_") + 16);
9 kx if (stub_name != NULL)
9 kx sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
9 kx return stub_name;
9 kx }
9 kx
9 kx /* Scan for Cortex-A53 erratum 835769 sequence.
9 kx
9 kx Return TRUE else FALSE on abnormal termination. */
9 kx
9 kx static bool
9 kx _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
9 kx struct bfd_link_info *info,
9 kx unsigned int *num_fixes_p)
9 kx {
9 kx asection *section;
9 kx struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
9 kx unsigned int num_fixes = *num_fixes_p;
9 kx
9 kx if (htab == NULL)
9 kx return true;
9 kx
9 kx for (section = input_bfd->sections;
9 kx section != NULL;
9 kx section = section->next)
9 kx {
9 kx bfd_byte *contents = NULL;
9 kx struct _aarch64_elf_section_data *sec_data;
9 kx unsigned int span;
9 kx
9 kx if (elf_section_type (section) != SHT_PROGBITS
9 kx || (elf_section_flags (section) & SHF_EXECINSTR) == 0
9 kx || (section->flags & SEC_EXCLUDE) != 0
9 kx || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
9 kx || (section->output_section == bfd_abs_section_ptr))
9 kx continue;
9 kx
9 kx if (elf_section_data (section)->this_hdr.contents != NULL)
9 kx contents = elf_section_data (section)->this_hdr.contents;
9 kx else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
9 kx return false;
9 kx
9 kx sec_data = elf_aarch64_section_data (section);
9 kx
9 kx if (sec_data->mapcount)
9 kx qsort (sec_data->map, sec_data->mapcount,
9 kx sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
9 kx
9 kx for (span = 0; span < sec_data->mapcount; span++)
9 kx {
9 kx unsigned int span_start = sec_data->map[span].vma;
9 kx unsigned int span_end = ((span == sec_data->mapcount - 1)
9 kx ? sec_data->map[0].vma + section->size
9 kx : sec_data->map[span + 1].vma);
9 kx unsigned int i;
9 kx char span_type = sec_data->map[span].type;
9 kx
9 kx if (span_type == 'd')
9 kx continue;
9 kx
9 kx for (i = span_start; i + 4 < span_end; i += 4)
9 kx {
9 kx uint32_t insn_1 = bfd_getl32 (contents + i);
9 kx uint32_t insn_2 = bfd_getl32 (contents + i + 4);
9 kx
9 kx if (aarch64_erratum_sequence (insn_1, insn_2))
9 kx {
9 kx struct elf_aarch64_stub_hash_entry *stub_entry;
9 kx char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
9 kx if (! stub_name)
9 kx return false;
9 kx
9 kx stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
9 kx section,
9 kx htab);
9 kx if (! stub_entry)
9 kx return false;
9 kx
9 kx stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
9 kx stub_entry->target_section = section;
9 kx stub_entry->target_value = i + 4;
9 kx stub_entry->veneered_insn = insn_2;
9 kx stub_entry->output_name = stub_name;
9 kx num_fixes++;
9 kx }
9 kx }
9 kx }
9 kx if (elf_section_data (section)->this_hdr.contents == NULL)
9 kx free (contents);
9 kx }
9 kx
9 kx *num_fixes_p = num_fixes;
9 kx
9 kx return true;
9 kx }
9 kx
9 kx
9 kx /* Test if instruction INSN is ADRP. */
9 kx
9 kx static bool
9 kx _bfd_aarch64_adrp_p (uint32_t insn)
9 kx {
9 kx return ((insn & AARCH64_ADRP_OP_MASK) == AARCH64_ADRP_OP);
9 kx }
9 kx
9 kx
9 kx /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
9 kx
9 kx static bool
9 kx _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
9 kx uint32_t insn_3)
9 kx {
9 kx uint32_t rt;
9 kx uint32_t rt2;
9 kx bool pair;
9 kx bool load;
9 kx
9 kx return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
9 kx && (!pair
9 kx || (pair && !load))
9 kx && AARCH64_LDST_UIMM (insn_3)
9 kx && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
9 kx }
9 kx
9 kx
9 kx /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
9 kx
9 kx Return TRUE if section CONTENTS at offset I contains one of the
9 kx erratum 843419 sequences, otherwise return FALSE. If a sequence is
9 kx seen set P_VENEER_I to the offset of the final LOAD/STORE
9 kx instruction in the sequence.
9 kx */
9 kx
9 kx static bool
9 kx _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
9 kx bfd_vma i, bfd_vma span_end,
9 kx bfd_vma *p_veneer_i)
9 kx {
9 kx uint32_t insn_1 = bfd_getl32 (contents + i);
9 kx
9 kx if (!_bfd_aarch64_adrp_p (insn_1))
9 kx return false;
9 kx
9 kx if (span_end < i + 12)
9 kx return false;
9 kx
9 kx uint32_t insn_2 = bfd_getl32 (contents + i + 4);
9 kx uint32_t insn_3 = bfd_getl32 (contents + i + 8);
9 kx
9 kx if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
9 kx return false;
9 kx
9 kx if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
9 kx {
9 kx *p_veneer_i = i + 8;
9 kx return true;
9 kx }
9 kx
9 kx if (span_end < i + 16)
9 kx return false;
9 kx
9 kx uint32_t insn_4 = bfd_getl32 (contents + i + 12);
9 kx
9 kx if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
9 kx {
9 kx *p_veneer_i = i + 12;
9 kx return true;
9 kx }
9 kx
9 kx return false;
9 kx }
9 kx
9 kx
9 kx /* Resize all stub sections. */
9 kx
9 kx static void
9 kx _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
9 kx {
9 kx asection *section;
9 kx
9 kx /* OK, we've added some stubs. Find out the new size of the
9 kx stub sections. */
9 kx for (section = htab->stub_bfd->sections;
9 kx section != NULL; section = section->next)
9 kx {
9 kx /* Ignore non-stub sections. */
9 kx if (!strstr (section->name, STUB_SUFFIX))
9 kx continue;
9 kx section->size = 0;
9 kx }
9 kx
9 kx bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
9 kx
9 kx for (section = htab->stub_bfd->sections;
9 kx section != NULL; section = section->next)
9 kx {
9 kx if (!strstr (section->name, STUB_SUFFIX))
9 kx continue;
9 kx
9 kx /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned,
9 kx as long branch stubs contain a 64-bit address. */
9 kx if (section->size)
9 kx section->size += 8;
9 kx
9 kx /* Ensure all stub sections have a size which is a multiple of
9 kx 4096. This is important in order to ensure that the insertion
9 kx of stub sections does not in itself move existing code around
9 kx in such a way that new errata sequences are created. We only do this
9 kx when the ADRP workaround is enabled. If only the ADR workaround is
9 kx enabled then the stubs workaround won't ever be used. */
9 kx if (htab->fix_erratum_843419 & ERRAT_ADRP)
9 kx if (section->size)
9 kx section->size = BFD_ALIGN (section->size, 0x1000);
9 kx }
9 kx }
9 kx
9 kx /* Construct an erratum 843419 workaround stub name. */
9 kx
9 kx static char *
9 kx _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
9 kx bfd_vma offset)
9 kx {
9 kx const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
9 kx char *stub_name = bfd_malloc (len);
9 kx
9 kx if (stub_name != NULL)
9 kx snprintf (stub_name, len, "e843419@%04x_%08x_%" PRIx64,
9 kx input_section->owner->id,
9 kx input_section->id,
9 kx (uint64_t) offset);
9 kx return stub_name;
9 kx }
9 kx
9 kx /* Build a stub_entry structure describing an 843419 fixup.
9 kx
9 kx The stub_entry constructed is populated with the bit pattern INSN
9 kx of the instruction located at OFFSET within input SECTION.
9 kx
9 kx Returns TRUE on success. */
9 kx
9 kx static bool
9 kx _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
9 kx bfd_vma adrp_offset,
9 kx bfd_vma ldst_offset,
9 kx asection *section,
9 kx struct bfd_link_info *info)
9 kx {
9 kx struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
9 kx char *stub_name;
9 kx struct elf_aarch64_stub_hash_entry *stub_entry;
9 kx
9 kx stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
9 kx if (stub_name == NULL)
9 kx return false;
9 kx stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
9 kx false, false);
9 kx if (stub_entry)
9 kx {
9 kx free (stub_name);
9 kx return true;
9 kx }
9 kx
9 kx /* We always place an 843419 workaround veneer in the stub section
9 kx attached to the input section in which an erratum sequence has
9 kx been found. This ensures that later in the link process (in
9 kx elfNN_aarch64_write_section) when we copy the veneered
9 kx instruction from the input section into the stub section the
9 kx copied instruction will have had any relocations applied to it.
9 kx If we placed workaround veneers in any other stub section then we
9 kx could not assume that all relocations have been processed on the
9 kx corresponding input section at the point we output the stub
9 kx section. */
9 kx
9 kx stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
9 kx if (stub_entry == NULL)
9 kx {
9 kx free (stub_name);
9 kx return false;
9 kx }
9 kx
9 kx stub_entry->adrp_offset = adrp_offset;
9 kx stub_entry->target_value = ldst_offset;
9 kx stub_entry->target_section = section;
9 kx stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
9 kx stub_entry->veneered_insn = insn;
9 kx stub_entry->output_name = stub_name;
9 kx
9 kx return true;
9 kx }
9 kx
9 kx
9 kx /* Scan an input section looking for the signature of erratum 843419.
9 kx
9 kx Scans input SECTION in INPUT_BFD looking for erratum 843419
9 kx signatures, for each signature found a stub_entry is created
9 kx describing the location of the erratum for subsequent fixup.
9 kx
9 kx Return TRUE on successful scan, FALSE on failure to scan.
9 kx */
9 kx
9 kx static bool
9 kx _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
9 kx struct bfd_link_info *info)
9 kx {
9 kx struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
9 kx
9 kx if (htab == NULL)
9 kx return true;
9 kx
9 kx if (elf_section_type (section) != SHT_PROGBITS
9 kx || (elf_section_flags (section) & SHF_EXECINSTR) == 0
9 kx || (section->flags & SEC_EXCLUDE) != 0
9 kx || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
9 kx || (section->output_section == bfd_abs_section_ptr))
9 kx return true;
9 kx
9 kx do
9 kx {
9 kx bfd_byte *contents = NULL;
9 kx struct _aarch64_elf_section_data *sec_data;
9 kx unsigned int span;
9 kx
9 kx if (elf_section_data (section)->this_hdr.contents != NULL)
9 kx contents = elf_section_data (section)->this_hdr.contents;
9 kx else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
9 kx return false;
9 kx
9 kx sec_data = elf_aarch64_section_data (section);
9 kx
9 kx if (sec_data->mapcount)
9 kx qsort (sec_data->map, sec_data->mapcount,
9 kx sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
9 kx
9 kx for (span = 0; span < sec_data->mapcount; span++)
9 kx {
9 kx unsigned int span_start = sec_data->map[span].vma;
9 kx unsigned int span_end = ((span == sec_data->mapcount - 1)
9 kx ? sec_data->map[0].vma + section->size
9 kx : sec_data->map[span + 1].vma);
9 kx unsigned int i;
9 kx char span_type = sec_data->map[span].type;
9 kx
9 kx if (span_type == 'd')
9 kx continue;
9 kx
9 kx for (i = span_start; i + 8 < span_end; i += 4)
9 kx {
9 kx bfd_vma vma = (section->output_section->vma
9 kx + section->output_offset
9 kx + i);
9 kx bfd_vma veneer_i;
9 kx
9 kx if (_bfd_aarch64_erratum_843419_p
9 kx (contents, vma, i, span_end, &veneer_i))
9 kx {
9 kx uint32_t insn = bfd_getl32 (contents + veneer_i);
9 kx
9 kx if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
9 kx section, info))
9 kx return false;
9 kx }
9 kx }
9 kx }
9 kx
9 kx if (elf_section_data (section)->this_hdr.contents == NULL)
9 kx free (contents);
9 kx }
9 kx while (0);
9 kx
9 kx return true;
9 kx }
9 kx
9 kx
9 kx /* Determine and set the size of the stub section for a final link.
9 kx
9 kx The basic idea here is to examine all the relocations looking for
9 kx PC-relative calls to a target that is unreachable with a "bl"
9 kx instruction. */
9 kx
9 kx bool
9 kx elfNN_aarch64_size_stubs (bfd *output_bfd,
9 kx bfd *stub_bfd,
9 kx struct bfd_link_info *info,
9 kx bfd_signed_vma group_size,
9 kx asection * (*add_stub_section) (const char *,
9 kx asection *),
9 kx void (*layout_sections_again) (void))
9 kx {
9 kx bfd_size_type stub_group_size;
9 kx bool stubs_always_before_branch;
9 kx bool stub_changed = false;
9 kx struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
9 kx unsigned int num_erratum_835769_fixes = 0;
9 kx
9 kx /* Propagate mach to stub bfd, because it may not have been
9 kx finalized when we created stub_bfd. */
9 kx bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
9 kx bfd_get_mach (output_bfd));
9 kx
9 kx /* Stash our params away. */
9 kx htab->stub_bfd = stub_bfd;
9 kx htab->add_stub_section = add_stub_section;
9 kx htab->layout_sections_again = layout_sections_again;
9 kx stubs_always_before_branch = group_size < 0;
9 kx if (group_size < 0)
9 kx stub_group_size = -group_size;
9 kx else
9 kx stub_group_size = group_size;
9 kx
9 kx if (stub_group_size == 1)
9 kx {
9 kx /* Default values. */
9 kx /* AArch64 branch range is +-128MB. The value used is 1MB less. */
9 kx stub_group_size = 127 * 1024 * 1024;
9 kx }
9 kx
9 kx group_sections (htab, stub_group_size, stubs_always_before_branch);
9 kx
9 kx (*htab->layout_sections_again) ();
9 kx
9 kx if (htab->fix_erratum_835769)
9 kx {
9 kx bfd *input_bfd;
9 kx
9 kx for (input_bfd = info->input_bfds;
9 kx input_bfd != NULL; input_bfd = input_bfd->link.next)
9 kx {
9 kx if (!is_aarch64_elf (input_bfd)
9 kx || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
9 kx continue;
9 kx
9 kx if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
9 kx &num_erratum_835769_fixes))
9 kx return false;
9 kx }
9 kx
9 kx _bfd_aarch64_resize_stubs (htab);
9 kx (*htab->layout_sections_again) ();
9 kx }
9 kx
9 kx if (htab->fix_erratum_843419 != ERRAT_NONE)
9 kx {
9 kx bfd *input_bfd;
9 kx
9 kx for (input_bfd = info->input_bfds;
9 kx input_bfd != NULL;
9 kx input_bfd = input_bfd->link.next)
9 kx {
9 kx asection *section;
9 kx
9 kx if (!is_aarch64_elf (input_bfd)
9 kx || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
9 kx continue;
9 kx
9 kx for (section = input_bfd->sections;
9 kx section != NULL;
9 kx section = section->next)
9 kx if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
9 kx return false;
9 kx }
9 kx
9 kx _bfd_aarch64_resize_stubs (htab);
9 kx (*htab->layout_sections_again) ();
9 kx }
9 kx
9 kx while (1)
9 kx {
9 kx bfd *input_bfd;
9 kx
9 kx for (input_bfd = info->input_bfds;
9 kx input_bfd != NULL; input_bfd = input_bfd->link.next)
9 kx {
9 kx Elf_Internal_Shdr *symtab_hdr;
9 kx asection *section;
9 kx Elf_Internal_Sym *local_syms = NULL;
9 kx
9 kx if (!is_aarch64_elf (input_bfd)
9 kx || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
9 kx continue;
9 kx
9 kx /* We'll need the symbol table in a second. */
9 kx symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
9 kx if (symtab_hdr->sh_info == 0)
9 kx continue;
9 kx
9 kx /* Walk over each section attached to the input bfd. */
9 kx for (section = input_bfd->sections;
9 kx section != NULL; section = section->next)
9 kx {
9 kx Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
9 kx
9 kx /* If there aren't any relocs, then there's nothing more
9 kx to do. */
9 kx if ((section->flags & SEC_RELOC) == 0
9 kx || section->reloc_count == 0
9 kx || (section->flags & SEC_CODE) == 0)
9 kx continue;
9 kx
9 kx /* If this section is a link-once section that will be
9 kx discarded, then don't create any stubs. */
9 kx if (section->output_section == NULL
9 kx || section->output_section->owner != output_bfd)
9 kx continue;
9 kx
9 kx /* Get the relocs. */
9 kx internal_relocs
9 kx = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
9 kx NULL, info->keep_memory);
9 kx if (internal_relocs == NULL)
9 kx goto error_ret_free_local;
9 kx
9 kx /* Now examine each relocation. */
9 kx irela = internal_relocs;
9 kx irelaend = irela + section->reloc_count;
9 kx for (; irela < irelaend; irela++)
9 kx {
9 kx unsigned int r_type, r_indx;
9 kx enum elf_aarch64_stub_type stub_type;
9 kx struct elf_aarch64_stub_hash_entry *stub_entry;
9 kx asection *sym_sec;
9 kx bfd_vma sym_value;
9 kx bfd_vma destination;
9 kx struct elf_aarch64_link_hash_entry *hash;
9 kx const char *sym_name;
9 kx char *stub_name;
9 kx const asection *id_sec;
9 kx unsigned char st_type;
9 kx bfd_size_type len;
9 kx
9 kx r_type = ELFNN_R_TYPE (irela->r_info);
9 kx r_indx = ELFNN_R_SYM (irela->r_info);
9 kx
9 kx if (r_type >= (unsigned int) R_AARCH64_end)
9 kx {
9 kx bfd_set_error (bfd_error_bad_value);
9 kx error_ret_free_internal:
9 kx if (elf_section_data (section)->relocs == NULL)
9 kx free (internal_relocs);
9 kx goto error_ret_free_local;
9 kx }
9 kx
9 kx /* Only look for stubs on unconditional branch and
9 kx branch and link instructions. */
9 kx if (r_type != (unsigned int) AARCH64_R (CALL26)
9 kx && r_type != (unsigned int) AARCH64_R (JUMP26))
9 kx continue;
9 kx
9 kx /* Now determine the call target, its name, value,
9 kx section. */
9 kx sym_sec = NULL;
9 kx sym_value = 0;
9 kx destination = 0;
9 kx hash = NULL;
9 kx sym_name = NULL;
9 kx if (r_indx < symtab_hdr->sh_info)
9 kx {
9 kx /* It's a local symbol. */
9 kx Elf_Internal_Sym *sym;
9 kx Elf_Internal_Shdr *hdr;
9 kx
9 kx if (local_syms == NULL)
9 kx {
9 kx local_syms
9 kx = (Elf_Internal_Sym *) symtab_hdr->contents;
9 kx if (local_syms == NULL)
9 kx local_syms
9 kx = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
9 kx symtab_hdr->sh_info, 0,
9 kx NULL, NULL, NULL);
9 kx if (local_syms == NULL)
9 kx goto error_ret_free_internal;
9 kx }
9 kx
9 kx sym = local_syms + r_indx;
9 kx hdr = elf_elfsections (input_bfd)[sym->st_shndx];
9 kx sym_sec = hdr->bfd_section;
9 kx if (!sym_sec)
9 kx /* This is an undefined symbol. It can never
9 kx be resolved. */
9 kx continue;
9 kx
9 kx if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
9 kx sym_value = sym->st_value;
9 kx destination = (sym_value + irela->r_addend
9 kx + sym_sec->output_offset
9 kx + sym_sec->output_section->vma);
9 kx st_type = ELF_ST_TYPE (sym->st_info);
9 kx sym_name
9 kx = bfd_elf_string_from_elf_section (input_bfd,
9 kx symtab_hdr->sh_link,
9 kx sym->st_name);
9 kx }
9 kx else
9 kx {
9 kx int e_indx;
9 kx
9 kx e_indx = r_indx - symtab_hdr->sh_info;
9 kx hash = ((struct elf_aarch64_link_hash_entry *)
9 kx elf_sym_hashes (input_bfd)[e_indx]);
9 kx
9 kx while (hash->root.root.type == bfd_link_hash_indirect
9 kx || hash->root.root.type == bfd_link_hash_warning)
9 kx hash = ((struct elf_aarch64_link_hash_entry *)
9 kx hash->root.root.u.i.link);
9 kx
9 kx if (hash->root.root.type == bfd_link_hash_defined
9 kx || hash->root.root.type == bfd_link_hash_defweak)
9 kx {
9 kx struct elf_aarch64_link_hash_table *globals =
9 kx elf_aarch64_hash_table (info);
9 kx sym_sec = hash->root.root.u.def.section;
9 kx sym_value = hash->root.root.u.def.value;
9 kx /* For a destination in a shared library,
9 kx use the PLT stub as target address to
9 kx decide whether a branch stub is
9 kx needed. */
9 kx if (globals->root.splt != NULL && hash != NULL
9 kx && hash->root.plt.offset != (bfd_vma) - 1)
9 kx {
9 kx sym_sec = globals->root.splt;
9 kx sym_value = hash->root.plt.offset;
9 kx if (sym_sec->output_section != NULL)
9 kx destination = (sym_value
9 kx + sym_sec->output_offset
9 kx +
9 kx sym_sec->output_section->vma);
9 kx }
9 kx else if (sym_sec->output_section != NULL)
9 kx destination = (sym_value + irela->r_addend
9 kx + sym_sec->output_offset
9 kx + sym_sec->output_section->vma);
9 kx }
9 kx else if (hash->root.root.type == bfd_link_hash_undefined
9 kx || (hash->root.root.type
9 kx == bfd_link_hash_undefweak))
9 kx {
9 kx /* For a shared library, use the PLT stub as
9 kx target address to decide whether a long
9 kx branch stub is needed.
9 kx For absolute code, they cannot be handled. */
9 kx struct elf_aarch64_link_hash_table *globals =
9 kx elf_aarch64_hash_table (info);
9 kx
9 kx if (globals->root.splt != NULL && hash != NULL
9 kx && hash->root.plt.offset != (bfd_vma) - 1)
9 kx {
9 kx sym_sec = globals->root.splt;
9 kx sym_value = hash->root.plt.offset;
9 kx if (sym_sec->output_section != NULL)
9 kx destination = (sym_value
9 kx + sym_sec->output_offset
9 kx +
9 kx sym_sec->output_section->vma);
9 kx }
9 kx else
9 kx continue;
9 kx }
9 kx else
9 kx {
9 kx bfd_set_error (bfd_error_bad_value);
9 kx goto error_ret_free_internal;
9 kx }
9 kx st_type = ELF_ST_TYPE (hash->root.type);
9 kx sym_name = hash->root.root.root.string;
9 kx }
9 kx
9 kx /* Determine what (if any) linker stub is needed. */
9 kx stub_type = aarch64_type_of_stub (section, irela, sym_sec,
9 kx st_type, destination);
9 kx if (stub_type == aarch64_stub_none)
9 kx continue;
9 kx
9 kx /* Support for grouping stub sections. */
9 kx id_sec = htab->stub_group[section->id].link_sec;
9 kx
9 kx /* Get the name of this stub. */
9 kx stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
9 kx irela);
9 kx if (!stub_name)
9 kx goto error_ret_free_internal;
9 kx
9 kx stub_entry =
9 kx aarch64_stub_hash_lookup (&htab->stub_hash_table,
9 kx stub_name, false, false);
9 kx if (stub_entry != NULL)
9 kx {
9 kx /* The proper stub has already been created. */
9 kx free (stub_name);
9 kx /* Always update this stub's target since it may have
9 kx changed after layout. */
9 kx stub_entry->target_value = sym_value + irela->r_addend;
9 kx continue;
9 kx }
9 kx
9 kx stub_entry = _bfd_aarch64_add_stub_entry_in_group
9 kx (stub_name, section, htab);
9 kx if (stub_entry == NULL)
9 kx {
9 kx free (stub_name);
9 kx goto error_ret_free_internal;
9 kx }
9 kx
9 kx stub_entry->target_value = sym_value + irela->r_addend;
9 kx stub_entry->target_section = sym_sec;
9 kx stub_entry->stub_type = stub_type;
9 kx stub_entry->h = hash;
9 kx stub_entry->st_type = st_type;
9 kx
9 kx if (sym_name == NULL)
9 kx sym_name = "unnamed";
9 kx len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
9 kx stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
9 kx if (stub_entry->output_name == NULL)
9 kx {
9 kx free (stub_name);
9 kx goto error_ret_free_internal;
9 kx }
9 kx
9 kx snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
9 kx sym_name);
9 kx
9 kx stub_changed = true;
9 kx }
9 kx
9 kx /* We're done with the internal relocs, free them. */
9 kx if (elf_section_data (section)->relocs == NULL)
9 kx free (internal_relocs);
9 kx }
9 kx }
9 kx
9 kx if (!stub_changed)
9 kx break;
9 kx
9 kx _bfd_aarch64_resize_stubs (htab);
9 kx
9 kx /* Ask the linker to do its stuff. */
9 kx (*htab->layout_sections_again) ();
9 kx stub_changed = false;
9 kx }
9 kx
9 kx return true;
9 kx
9 kx error_ret_free_local:
9 kx return false;
9 kx }
9 kx
9 kx /* Build all the stubs associated with the current output file. The
9 kx stubs are kept in a hash table attached to the main linker hash
9 kx table. We also set up the .plt entries for statically linked PIC
9 kx functions here. This function is called via aarch64_elf_finish in the
9 kx linker. */
9 kx
9 kx bool
9 kx elfNN_aarch64_build_stubs (struct bfd_link_info *info)
9 kx {
9 kx asection *stub_sec;
9 kx struct bfd_hash_table *table;
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx
9 kx htab = elf_aarch64_hash_table (info);
9 kx
9 kx for (stub_sec = htab->stub_bfd->sections;
9 kx stub_sec != NULL; stub_sec = stub_sec->next)
9 kx {
9 kx bfd_size_type size;
9 kx
9 kx /* Ignore non-stub sections. */
9 kx if (!strstr (stub_sec->name, STUB_SUFFIX))
9 kx continue;
9 kx
9 kx /* Allocate memory to hold the linker stubs. */
9 kx size = stub_sec->size;
9 kx stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
9 kx if (stub_sec->contents == NULL && size != 0)
9 kx return false;
9 kx stub_sec->size = 0;
9 kx
9 kx /* Add a branch around the stub section, and a nop, to keep it 8 byte
9 kx aligned, as long branch stubs contain a 64-bit address. */
9 kx bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
9 kx bfd_putl32 (INSN_NOP, stub_sec->contents + 4);
9 kx stub_sec->size += 8;
9 kx }
9 kx
9 kx /* Build the stubs as directed by the stub hash table. */
9 kx table = &htab->stub_hash_table;
9 kx bfd_hash_traverse (table, aarch64_build_one_stub, info);
9 kx
9 kx return true;
9 kx }
9 kx
9 kx
9 kx /* Add an entry to the code/data map for section SEC. */
9 kx
9 kx static void
9 kx elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
9 kx {
9 kx struct _aarch64_elf_section_data *sec_data =
9 kx elf_aarch64_section_data (sec);
9 kx unsigned int newidx;
9 kx
9 kx if (sec_data->map == NULL)
9 kx {
9 kx sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
9 kx sec_data->mapcount = 0;
9 kx sec_data->mapsize = 1;
9 kx }
9 kx
9 kx newidx = sec_data->mapcount++;
9 kx
9 kx if (sec_data->mapcount > sec_data->mapsize)
9 kx {
9 kx sec_data->mapsize *= 2;
9 kx sec_data->map = bfd_realloc_or_free
9 kx (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
9 kx }
9 kx
9 kx if (sec_data->map)
9 kx {
9 kx sec_data->map[newidx].vma = vma;
9 kx sec_data->map[newidx].type = type;
9 kx }
9 kx }
9 kx
9 kx
9 kx /* Initialise maps of insn/data for input BFDs. */
9 kx void
9 kx bfd_elfNN_aarch64_init_maps (bfd *abfd)
9 kx {
9 kx Elf_Internal_Sym *isymbuf;
9 kx Elf_Internal_Shdr *hdr;
9 kx unsigned int i, localsyms;
9 kx
9 kx /* Make sure that we are dealing with an AArch64 elf binary. */
9 kx if (!is_aarch64_elf (abfd))
9 kx return;
9 kx
9 kx if ((abfd->flags & DYNAMIC) != 0)
9 kx return;
9 kx
9 kx hdr = &elf_symtab_hdr (abfd);
9 kx localsyms = hdr->sh_info;
9 kx
9 kx /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
9 kx should contain the number of local symbols, which should come before any
9 kx global symbols. Mapping symbols are always local. */
9 kx isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
9 kx
9 kx /* No internal symbols read? Skip this BFD. */
9 kx if (isymbuf == NULL)
9 kx return;
9 kx
9 kx for (i = 0; i < localsyms; i++)
9 kx {
9 kx Elf_Internal_Sym *isym = &isymbuf[i];
9 kx asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
9 kx const char *name;
9 kx
9 kx if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
9 kx {
9 kx name = bfd_elf_string_from_elf_section (abfd,
9 kx hdr->sh_link,
9 kx isym->st_name);
9 kx
9 kx if (bfd_is_aarch64_special_symbol_name
9 kx (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
9 kx elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
9 kx }
9 kx }
9 kx }
9 kx
9 kx static void
9 kx setup_plt_values (struct bfd_link_info *link_info,
9 kx aarch64_plt_type plt_type)
9 kx {
9 kx struct elf_aarch64_link_hash_table *globals;
9 kx globals = elf_aarch64_hash_table (link_info);
9 kx
9 kx if (plt_type == PLT_BTI_PAC)
9 kx {
9 kx globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
9 kx
9 kx /* Only in ET_EXEC we need PLTn with BTI. */
9 kx if (bfd_link_pde (link_info))
9 kx {
9 kx globals->plt_entry_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
9 kx globals->plt_entry = elfNN_aarch64_small_plt_bti_pac_entry;
9 kx }
9 kx else
9 kx {
9 kx globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
9 kx globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
9 kx }
9 kx }
9 kx else if (plt_type == PLT_BTI)
9 kx {
9 kx globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
9 kx
9 kx /* Only in ET_EXEC we need PLTn with BTI. */
9 kx if (bfd_link_pde (link_info))
9 kx {
9 kx globals->plt_entry_size = PLT_BTI_SMALL_ENTRY_SIZE;
9 kx globals->plt_entry = elfNN_aarch64_small_plt_bti_entry;
9 kx }
9 kx }
9 kx else if (plt_type == PLT_PAC)
9 kx {
9 kx globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
9 kx globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
9 kx }
9 kx }
9 kx
9 kx /* Set option values needed during linking. */
9 kx void
9 kx bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
9 kx struct bfd_link_info *link_info,
9 kx int no_enum_warn,
9 kx int no_wchar_warn, int pic_veneer,
9 kx int fix_erratum_835769,
9 kx erratum_84319_opts fix_erratum_843419,
9 kx int no_apply_dynamic_relocs,
9 kx aarch64_bti_pac_info bp_info)
9 kx {
9 kx struct elf_aarch64_link_hash_table *globals;
9 kx
9 kx globals = elf_aarch64_hash_table (link_info);
9 kx globals->pic_veneer = pic_veneer;
9 kx globals->fix_erratum_835769 = fix_erratum_835769;
9 kx /* If the default options are used, then ERRAT_ADR will be set by default
9 kx which will enable the ADRP->ADR workaround for the erratum 843419
9 kx workaround. */
9 kx globals->fix_erratum_843419 = fix_erratum_843419;
9 kx globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs;
9 kx
9 kx BFD_ASSERT (is_aarch64_elf (output_bfd));
9 kx elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
9 kx elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
9 kx
9 kx switch (bp_info.bti_type)
9 kx {
9 kx case BTI_WARN:
9 kx elf_aarch64_tdata (output_bfd)->no_bti_warn = 0;
9 kx elf_aarch64_tdata (output_bfd)->gnu_and_prop
9 kx |= GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
9 kx break;
9 kx
9 kx default:
9 kx break;
9 kx }
9 kx elf_aarch64_tdata (output_bfd)->plt_type = bp_info.plt_type;
9 kx setup_plt_values (link_info, bp_info.plt_type);
9 kx }
9 kx
9 kx static bfd_vma
9 kx aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
9 kx struct elf_aarch64_link_hash_table
9 kx *globals, struct bfd_link_info *info,
9 kx bfd_vma value, bfd *output_bfd,
9 kx bool *unresolved_reloc_p)
9 kx {
9 kx bfd_vma off = (bfd_vma) - 1;
9 kx asection *basegot = globals->root.sgot;
9 kx bool dyn = globals->root.dynamic_sections_created;
9 kx
9 kx if (h != NULL)
9 kx {
9 kx BFD_ASSERT (basegot != NULL);
9 kx off = h->got.offset;
9 kx BFD_ASSERT (off != (bfd_vma) - 1);
9 kx if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
9 kx || (bfd_link_pic (info)
9 kx && SYMBOL_REFERENCES_LOCAL (info, h))
9 kx || (ELF_ST_VISIBILITY (h->other)
9 kx && h->root.type == bfd_link_hash_undefweak))
9 kx {
9 kx /* This is actually a static link, or it is a -Bsymbolic link
9 kx and the symbol is defined locally. We must initialize this
9 kx entry in the global offset table. Since the offset must
9 kx always be a multiple of 8 (4 in the case of ILP32), we use
9 kx the least significant bit to record whether we have
9 kx initialized it already.
9 kx When doing a dynamic link, we create a .rel(a).got relocation
9 kx entry to initialize the value. This is done in the
9 kx finish_dynamic_symbol routine. */
9 kx if ((off & 1) != 0)
9 kx off &= ~1;
9 kx else
9 kx {
9 kx bfd_put_NN (output_bfd, value, basegot->contents + off);
9 kx h->got.offset |= 1;
9 kx }
9 kx }
9 kx else
9 kx *unresolved_reloc_p = false;
9 kx
9 kx off = off + basegot->output_section->vma + basegot->output_offset;
9 kx }
9 kx
9 kx return off;
9 kx }
9 kx
9 kx /* Change R_TYPE to a more efficient access model where possible,
9 kx return the new reloc type. */
9 kx
9 kx static bfd_reloc_code_real_type
9 kx aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
9 kx struct elf_link_hash_entry *h,
9 kx struct bfd_link_info *info)
9 kx {
9 kx bool local_exec = bfd_link_executable (info)
9 kx && SYMBOL_REFERENCES_LOCAL (info, h);
9 kx
9 kx switch (r_type)
9 kx {
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9 kx return (local_exec
9 kx ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
9 kx : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9 kx return (local_exec
9 kx ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
9 kx : r_type);
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9 kx return (local_exec
9 kx ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
9 kx : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_LDR:
9 kx return (local_exec
9 kx ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
9 kx : BFD_RELOC_AARCH64_NONE);
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9 kx return (local_exec
9 kx ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
9 kx : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC);
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9 kx return (local_exec
9 kx ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
9 kx : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1);
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9 kx return (local_exec
9 kx ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
9 kx : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
9 kx
9 kx case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9 kx return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
9 kx return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9 kx return r_type;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9 kx return (local_exec
9 kx ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
9 kx : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADD:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9 kx case BFD_RELOC_AARCH64_TLSDESC_CALL:
9 kx /* Instructions with these relocations will become NOPs. */
9 kx return BFD_RELOC_AARCH64_NONE;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9 kx return local_exec ? BFD_RELOC_AARCH64_NONE : r_type;
9 kx
9 kx #if ARCH_SIZE == 64
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9 kx return local_exec
9 kx ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
9 kx : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9 kx return local_exec
9 kx ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
9 kx : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
9 kx #endif
9 kx
9 kx default:
9 kx break;
9 kx }
9 kx
9 kx return r_type;
9 kx }
9 kx
9 kx static unsigned int
9 kx aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
9 kx {
9 kx switch (r_type)
9 kx {
9 kx case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9 kx case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9 kx case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9 kx case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9 kx case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9 kx case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9 kx case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9 kx return GOT_NORMAL;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9 kx return GOT_TLS_GD;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADD:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9 kx case BFD_RELOC_AARCH64_TLSDESC_CALL:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LDR:
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9 kx return GOT_TLSDESC_GD;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9 kx case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9 kx return GOT_TLS_IE;
9 kx
9 kx default:
9 kx break;
9 kx }
9 kx return GOT_UNKNOWN;
9 kx }
9 kx
9 kx static bool
9 kx aarch64_can_relax_tls (bfd *input_bfd,
9 kx struct bfd_link_info *info,
9 kx bfd_reloc_code_real_type r_type,
9 kx struct elf_link_hash_entry *h,
9 kx unsigned long r_symndx)
9 kx {
9 kx unsigned int symbol_got_type;
9 kx unsigned int reloc_got_type;
9 kx
9 kx if (! IS_AARCH64_TLS_RELAX_RELOC (r_type))
9 kx return false;
9 kx
9 kx symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
9 kx reloc_got_type = aarch64_reloc_got_type (r_type);
9 kx
9 kx if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
9 kx return true;
9 kx
9 kx if (!bfd_link_executable (info))
9 kx return false;
9 kx
9 kx if (h && h->root.type == bfd_link_hash_undefweak)
9 kx return false;
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Given the relocation code R_TYPE, return the relaxed bfd reloc
9 kx enumerator. */
9 kx
9 kx static bfd_reloc_code_real_type
9 kx aarch64_tls_transition (bfd *input_bfd,
9 kx struct bfd_link_info *info,
9 kx unsigned int r_type,
9 kx struct elf_link_hash_entry *h,
9 kx unsigned long r_symndx)
9 kx {
9 kx bfd_reloc_code_real_type bfd_r_type
9 kx = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
9 kx
9 kx if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
9 kx return bfd_r_type;
9 kx
9 kx return aarch64_tls_transition_without_check (bfd_r_type, h, info);
9 kx }
9 kx
9 kx /* Return the base VMA address which should be subtracted from real addresses
9 kx when resolving R_AARCH64_TLS_DTPREL relocation. */
9 kx
9 kx static bfd_vma
9 kx dtpoff_base (struct bfd_link_info *info)
9 kx {
9 kx /* If tls_sec is NULL, we should have signalled an error already. */
9 kx BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
9 kx return elf_hash_table (info)->tls_sec->vma;
9 kx }
9 kx
9 kx /* Return the base VMA address which should be subtracted from real addresses
9 kx when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
9 kx
9 kx static bfd_vma
9 kx tpoff_base (struct bfd_link_info *info)
9 kx {
9 kx struct elf_link_hash_table *htab = elf_hash_table (info);
9 kx
9 kx /* If tls_sec is NULL, we should have signalled an error already. */
9 kx BFD_ASSERT (htab->tls_sec != NULL);
9 kx
9 kx bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
9 kx htab->tls_sec->alignment_power);
9 kx return htab->tls_sec->vma - base;
9 kx }
9 kx
9 kx static bfd_vma *
9 kx symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
9 kx unsigned long r_symndx)
9 kx {
9 kx /* Calculate the address of the GOT entry for symbol
9 kx referred to in h. */
9 kx if (h != NULL)
9 kx return &h->got.offset;
9 kx else
9 kx {
9 kx /* local symbol */
9 kx struct elf_aarch64_local_symbol *l;
9 kx
9 kx l = elf_aarch64_locals (input_bfd);
9 kx return &l[r_symndx].got_offset;
9 kx }
9 kx }
9 kx
9 kx static void
9 kx symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
9 kx unsigned long r_symndx)
9 kx {
9 kx bfd_vma *p;
9 kx p = symbol_got_offset_ref (input_bfd, h, r_symndx);
9 kx *p |= 1;
9 kx }
9 kx
9 kx static int
9 kx symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
9 kx unsigned long r_symndx)
9 kx {
9 kx bfd_vma value;
9 kx value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
9 kx return value & 1;
9 kx }
9 kx
9 kx static bfd_vma
9 kx symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
9 kx unsigned long r_symndx)
9 kx {
9 kx bfd_vma value;
9 kx value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
9 kx value &= ~1;
9 kx return value;
9 kx }
9 kx
9 kx static bfd_vma *
9 kx symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
9 kx unsigned long r_symndx)
9 kx {
9 kx /* Calculate the address of the GOT entry for symbol
9 kx referred to in h. */
9 kx if (h != NULL)
9 kx {
9 kx struct elf_aarch64_link_hash_entry *eh;
9 kx eh = (struct elf_aarch64_link_hash_entry *) h;
9 kx return &eh->tlsdesc_got_jump_table_offset;
9 kx }
9 kx else
9 kx {
9 kx /* local symbol */
9 kx struct elf_aarch64_local_symbol *l;
9 kx
9 kx l = elf_aarch64_locals (input_bfd);
9 kx return &l[r_symndx].tlsdesc_got_jump_table_offset;
9 kx }
9 kx }
9 kx
9 kx static void
9 kx symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
9 kx unsigned long r_symndx)
9 kx {
9 kx bfd_vma *p;
9 kx p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
9 kx *p |= 1;
9 kx }
9 kx
9 kx static int
9 kx symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
9 kx struct elf_link_hash_entry *h,
9 kx unsigned long r_symndx)
9 kx {
9 kx bfd_vma value;
9 kx value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
9 kx return value & 1;
9 kx }
9 kx
9 kx static bfd_vma
9 kx symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
9 kx unsigned long r_symndx)
9 kx {
9 kx bfd_vma value;
9 kx value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
9 kx value &= ~1;
9 kx return value;
9 kx }
9 kx
9 kx /* Data for make_branch_to_erratum_835769_stub(). */
9 kx
9 kx struct erratum_835769_branch_to_stub_data
9 kx {
9 kx struct bfd_link_info *info;
9 kx asection *output_section;
9 kx bfd_byte *contents;
9 kx };
9 kx
9 kx /* Helper to insert branches to erratum 835769 stubs in the right
9 kx places for a particular section. */
9 kx
9 kx static bool
9 kx make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
9 kx void *in_arg)
9 kx {
9 kx struct elf_aarch64_stub_hash_entry *stub_entry;
9 kx struct erratum_835769_branch_to_stub_data *data;
9 kx bfd_byte *contents;
9 kx unsigned long branch_insn = 0;
9 kx bfd_vma veneered_insn_loc, veneer_entry_loc;
9 kx bfd_signed_vma branch_offset;
9 kx unsigned int target;
9 kx bfd *abfd;
9 kx
9 kx stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
9 kx data = (struct erratum_835769_branch_to_stub_data *) in_arg;
9 kx
9 kx if (stub_entry->target_section != data->output_section
9 kx || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
9 kx return true;
9 kx
9 kx contents = data->contents;
9 kx veneered_insn_loc = stub_entry->target_section->output_section->vma
9 kx + stub_entry->target_section->output_offset
9 kx + stub_entry->target_value;
9 kx veneer_entry_loc = stub_entry->stub_sec->output_section->vma
9 kx + stub_entry->stub_sec->output_offset
9 kx + stub_entry->stub_offset;
9 kx branch_offset = veneer_entry_loc - veneered_insn_loc;
9 kx
9 kx abfd = stub_entry->target_section->owner;
9 kx if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
9 kx _bfd_error_handler
9 kx (_("%pB: error: erratum 835769 stub out "
9 kx "of range (input file too large)"), abfd);
9 kx
9 kx target = stub_entry->target_value;
9 kx branch_insn = 0x14000000;
9 kx branch_offset >>= 2;
9 kx branch_offset &= 0x3ffffff;
9 kx branch_insn |= branch_offset;
9 kx bfd_putl32 (branch_insn, &contents[target]);
9 kx
9 kx return true;
9 kx }
9 kx
9 kx
9 kx static bool
9 kx _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
9 kx void *in_arg)
9 kx {
9 kx struct elf_aarch64_stub_hash_entry *stub_entry
9 kx = (struct elf_aarch64_stub_hash_entry *) gen_entry;
9 kx struct erratum_835769_branch_to_stub_data *data
9 kx = (struct erratum_835769_branch_to_stub_data *) in_arg;
9 kx struct bfd_link_info *info;
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx bfd_byte *contents;
9 kx asection *section;
9 kx bfd *abfd;
9 kx bfd_vma place;
9 kx uint32_t insn;
9 kx
9 kx info = data->info;
9 kx contents = data->contents;
9 kx section = data->output_section;
9 kx
9 kx htab = elf_aarch64_hash_table (info);
9 kx
9 kx if (stub_entry->target_section != section
9 kx || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
9 kx return true;
9 kx
9 kx BFD_ASSERT (((htab->fix_erratum_843419 & ERRAT_ADRP) && stub_entry->stub_sec)
9 kx || (htab->fix_erratum_843419 & ERRAT_ADR));
9 kx
9 kx /* Only update the stub section if we have one. We should always have one if
9 kx we're allowed to use the ADRP errata workaround, otherwise it is not
9 kx required. */
9 kx if (stub_entry->stub_sec)
9 kx {
9 kx insn = bfd_getl32 (contents + stub_entry->target_value);
9 kx bfd_putl32 (insn,
9 kx stub_entry->stub_sec->contents + stub_entry->stub_offset);
9 kx }
9 kx
9 kx place = (section->output_section->vma + section->output_offset
9 kx + stub_entry->adrp_offset);
9 kx insn = bfd_getl32 (contents + stub_entry->adrp_offset);
9 kx
9 kx if (!_bfd_aarch64_adrp_p (insn))
9 kx abort ();
9 kx
9 kx bfd_signed_vma imm =
9 kx (_bfd_aarch64_sign_extend
9 kx ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
9 kx - (place & 0xfff));
9 kx
9 kx if ((htab->fix_erratum_843419 & ERRAT_ADR)
9 kx && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
9 kx {
9 kx insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
9 kx | AARCH64_RT (insn));
9 kx bfd_putl32 (insn, contents + stub_entry->adrp_offset);
9 kx /* Stub is not needed, don't map it out. */
9 kx stub_entry->stub_type = aarch64_stub_none;
9 kx }
9 kx else if (htab->fix_erratum_843419 & ERRAT_ADRP)
9 kx {
9 kx bfd_vma veneered_insn_loc;
9 kx bfd_vma veneer_entry_loc;
9 kx bfd_signed_vma branch_offset;
9 kx uint32_t branch_insn;
9 kx
9 kx veneered_insn_loc = stub_entry->target_section->output_section->vma
9 kx + stub_entry->target_section->output_offset
9 kx + stub_entry->target_value;
9 kx veneer_entry_loc = stub_entry->stub_sec->output_section->vma
9 kx + stub_entry->stub_sec->output_offset
9 kx + stub_entry->stub_offset;
9 kx branch_offset = veneer_entry_loc - veneered_insn_loc;
9 kx
9 kx abfd = stub_entry->target_section->owner;
9 kx if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
9 kx _bfd_error_handler
9 kx (_("%pB: error: erratum 843419 stub out "
9 kx "of range (input file too large)"), abfd);
9 kx
9 kx branch_insn = 0x14000000;
9 kx branch_offset >>= 2;
9 kx branch_offset &= 0x3ffffff;
9 kx branch_insn |= branch_offset;
9 kx bfd_putl32 (branch_insn, contents + stub_entry->target_value);
9 kx }
9 kx else
9 kx {
9 kx abfd = stub_entry->target_section->owner;
9 kx _bfd_error_handler
9 kx (_("%pB: error: erratum 843419 immediate 0x%" PRIx64
9 kx " out of range for ADR (input file too large) and "
9 kx "--fix-cortex-a53-843419=adr used. Run the linker with "
9 kx "--fix-cortex-a53-843419=full instead"),
9 kx abfd, (uint64_t) (bfd_vma) imm);
9 kx bfd_set_error (bfd_error_bad_value);
9 kx /* This function is called inside a hashtable traversal and the error
9 kx handlers called above turn into non-fatal errors. Which means this
9 kx case ld returns an exit code 0 and also produces a broken object file.
9 kx To prevent this, issue a hard abort. */
9 kx BFD_FAIL ();
9 kx }
9 kx return true;
9 kx }
9 kx
9 kx
9 kx static bool
9 kx elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
9 kx struct bfd_link_info *link_info,
9 kx asection *sec,
9 kx bfd_byte *contents)
9 kx
9 kx {
9 kx struct elf_aarch64_link_hash_table *globals =
9 kx elf_aarch64_hash_table (link_info);
9 kx
9 kx if (globals == NULL)
9 kx return false;
9 kx
9 kx /* Fix code to point to erratum 835769 stubs. */
9 kx if (globals->fix_erratum_835769)
9 kx {
9 kx struct erratum_835769_branch_to_stub_data data;
9 kx
9 kx data.info = link_info;
9 kx data.output_section = sec;
9 kx data.contents = contents;
9 kx bfd_hash_traverse (&globals->stub_hash_table,
9 kx make_branch_to_erratum_835769_stub, &data);
9 kx }
9 kx
9 kx if (globals->fix_erratum_843419)
9 kx {
9 kx struct erratum_835769_branch_to_stub_data data;
9 kx
9 kx data.info = link_info;
9 kx data.output_section = sec;
9 kx data.contents = contents;
9 kx bfd_hash_traverse (&globals->stub_hash_table,
9 kx _bfd_aarch64_erratum_843419_branch_to_stub, &data);
9 kx }
9 kx
9 kx return false;
9 kx }
9 kx
9 kx /* Return TRUE if RELOC is a relocation against the base of GOT table. */
9 kx
9 kx static bool
9 kx aarch64_relocation_aginst_gp_p (bfd_reloc_code_real_type reloc)
9 kx {
9 kx return (reloc == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14
9 kx || reloc == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
9 kx || reloc == BFD_RELOC_AARCH64_LD64_GOTOFF_LO15
9 kx || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC
9 kx || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1);
9 kx }
9 kx
9 kx /* Perform a relocation as part of a final link. The input relocation type
9 kx should be TLS relaxed. */
9 kx
9 kx static bfd_reloc_status_type
9 kx elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
9 kx bfd *input_bfd,
9 kx bfd *output_bfd,
9 kx asection *input_section,
9 kx bfd_byte *contents,
9 kx Elf_Internal_Rela *rel,
9 kx bfd_vma value,
9 kx struct bfd_link_info *info,
9 kx asection *sym_sec,
9 kx struct elf_link_hash_entry *h,
9 kx bool *unresolved_reloc_p,
9 kx bool save_addend,
9 kx bfd_vma *saved_addend,
9 kx Elf_Internal_Sym *sym)
9 kx {
9 kx Elf_Internal_Shdr *symtab_hdr;
9 kx unsigned int r_type = howto->type;
9 kx bfd_reloc_code_real_type bfd_r_type
9 kx = elfNN_aarch64_bfd_reloc_from_howto (howto);
9 kx unsigned long r_symndx;
9 kx bfd_byte *hit_data = contents + rel->r_offset;
9 kx bfd_vma place, off, got_entry_addr = 0;
9 kx bfd_signed_vma signed_addend;
9 kx struct elf_aarch64_link_hash_table *globals;
9 kx bool weak_undef_p;
9 kx bool relative_reloc;
9 kx asection *base_got;
9 kx bfd_vma orig_value = value;
9 kx bool resolved_to_zero;
9 kx bool abs_symbol_p;
9 kx
9 kx globals = elf_aarch64_hash_table (info);
9 kx
9 kx symtab_hdr = &elf_symtab_hdr (input_bfd);
9 kx
9 kx BFD_ASSERT (is_aarch64_elf (input_bfd));
9 kx
9 kx r_symndx = ELFNN_R_SYM (rel->r_info);
9 kx
9 kx place = input_section->output_section->vma
9 kx + input_section->output_offset + rel->r_offset;
9 kx
9 kx /* Get addend, accumulating the addend for consecutive relocs
9 kx which refer to the same offset. */
9 kx signed_addend = saved_addend ? *saved_addend : 0;
9 kx signed_addend += rel->r_addend;
9 kx
9 kx weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
9 kx : bfd_is_und_section (sym_sec));
9 kx abs_symbol_p = h != NULL && bfd_is_abs_symbol (&h->root);
9 kx
9 kx
9 kx /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
9 kx it here if it is defined in a non-shared object. */
9 kx if (h != NULL
9 kx && h->type == STT_GNU_IFUNC
9 kx && (input_section->flags & SEC_ALLOC)
9 kx && h->def_regular)
9 kx {
9 kx asection *plt;
9 kx const char *name;
9 kx bfd_vma addend = 0;
9 kx
9 kx if ((input_section->flags & SEC_ALLOC) == 0)
9 kx {
9 kx /* If this is a SHT_NOTE section without SHF_ALLOC, treat
9 kx STT_GNU_IFUNC symbol as STT_FUNC. */
9 kx if (elf_section_type (input_section) == SHT_NOTE)
9 kx goto skip_ifunc;
9 kx
9 kx /* Dynamic relocs are not propagated for SEC_DEBUGGING
9 kx sections because such sections are not SEC_ALLOC and
9 kx thus ld.so will not process them. */
9 kx if ((input_section->flags & SEC_DEBUGGING) != 0)
9 kx return bfd_reloc_ok;
9 kx
9 kx if (h->root.root.string)
9 kx name = h->root.root.string;
9 kx else
9 kx name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL);
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB(%pA+%#" PRIx64 "): "
9 kx "unresolvable %s relocation against symbol `%s'"),
9 kx input_bfd, input_section, (uint64_t) rel->r_offset,
9 kx howto->name, name);
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return bfd_reloc_notsupported;
9 kx }
9 kx else if (h->plt.offset == (bfd_vma) -1)
9 kx goto bad_ifunc_reloc;
9 kx
9 kx /* STT_GNU_IFUNC symbol must go through PLT. */
9 kx plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
9 kx value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
9 kx
9 kx switch (bfd_r_type)
9 kx {
9 kx default:
9 kx bad_ifunc_reloc:
9 kx if (h->root.root.string)
9 kx name = h->root.root.string;
9 kx else
9 kx name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
9 kx NULL);
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB: relocation %s against STT_GNU_IFUNC "
9 kx "symbol `%s' isn't handled by %s"), input_bfd,
9 kx howto->name, name, __FUNCTION__);
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return bfd_reloc_notsupported;
9 kx
9 kx case BFD_RELOC_AARCH64_NN:
9 kx if (rel->r_addend != 0)
9 kx {
9 kx if (h->root.root.string)
9 kx name = h->root.root.string;
9 kx else
9 kx name = bfd_elf_sym_name (input_bfd, symtab_hdr,
9 kx sym, NULL);
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB: relocation %s against STT_GNU_IFUNC "
9 kx "symbol `%s' has non-zero addend: %" PRId64),
9 kx input_bfd, howto->name, name, (int64_t) rel->r_addend);
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return bfd_reloc_notsupported;
9 kx }
9 kx
9 kx /* Generate dynamic relocation only when there is a
9 kx non-GOT reference in a shared object. */
9 kx if (bfd_link_pic (info) && h->non_got_ref)
9 kx {
9 kx Elf_Internal_Rela outrel;
9 kx asection *sreloc;
9 kx
9 kx /* Need a dynamic relocation to get the real function
9 kx address. */
9 kx outrel.r_offset = _bfd_elf_section_offset (output_bfd,
9 kx info,
9 kx input_section,
9 kx rel->r_offset);
9 kx if (outrel.r_offset == (bfd_vma) -1
9 kx || outrel.r_offset == (bfd_vma) -2)
9 kx abort ();
9 kx
9 kx outrel.r_offset += (input_section->output_section->vma
9 kx + input_section->output_offset);
9 kx
9 kx if (h->dynindx == -1
9 kx || h->forced_local
9 kx || bfd_link_executable (info))
9 kx {
9 kx /* This symbol is resolved locally. */
9 kx outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
9 kx outrel.r_addend = (h->root.u.def.value
9 kx + h->root.u.def.section->output_section->vma
9 kx + h->root.u.def.section->output_offset);
9 kx }
9 kx else
9 kx {
9 kx outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
9 kx outrel.r_addend = 0;
9 kx }
9 kx
9 kx sreloc = globals->root.irelifunc;
9 kx elf_append_rela (output_bfd, sreloc, &outrel);
9 kx
9 kx /* If this reloc is against an external symbol, we
9 kx do not want to fiddle with the addend. Otherwise,
9 kx we need to include the symbol value so that it
9 kx becomes an addend for the dynamic reloc. For an
9 kx internal symbol, we have updated addend. */
9 kx return bfd_reloc_ok;
9 kx }
9 kx /* FALLTHROUGH */
9 kx case BFD_RELOC_AARCH64_CALL26:
9 kx case BFD_RELOC_AARCH64_JUMP26:
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx signed_addend,
9 kx weak_undef_p);
9 kx return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
9 kx howto, value);
9 kx case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9 kx case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9 kx case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9 kx case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9 kx case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9 kx case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9 kx case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9 kx case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9 kx base_got = globals->root.sgot;
9 kx off = h->got.offset;
9 kx
9 kx if (base_got == NULL)
9 kx abort ();
9 kx
9 kx if (off == (bfd_vma) -1)
9 kx {
9 kx bfd_vma plt_index;
9 kx
9 kx /* We can't use h->got.offset here to save state, or
9 kx even just remember the offset, as finish_dynamic_symbol
9 kx would use that as offset into .got. */
9 kx
9 kx if (globals->root.splt != NULL)
9 kx {
9 kx plt_index = ((h->plt.offset - globals->plt_header_size) /
9 kx globals->plt_entry_size);
9 kx off = (plt_index + 3) * GOT_ENTRY_SIZE;
9 kx base_got = globals->root.sgotplt;
9 kx }
9 kx else
9 kx {
9 kx plt_index = h->plt.offset / globals->plt_entry_size;
9 kx off = plt_index * GOT_ENTRY_SIZE;
9 kx base_got = globals->root.igotplt;
9 kx }
9 kx
9 kx if (h->dynindx == -1
9 kx || h->forced_local
9 kx || info->symbolic)
9 kx {
9 kx /* This references the local definition. We must
9 kx initialize this entry in the global offset table.
9 kx Since the offset must always be a multiple of 8,
9 kx we use the least significant bit to record
9 kx whether we have initialized it already.
9 kx
9 kx When doing a dynamic link, we create a .rela.got
9 kx relocation entry to initialize the value. This
9 kx is done in the finish_dynamic_symbol routine. */
9 kx if ((off & 1) != 0)
9 kx off &= ~1;
9 kx else
9 kx {
9 kx bfd_put_NN (output_bfd, value,
9 kx base_got->contents + off);
9 kx /* Note that this is harmless as -1 | 1 still is -1. */
9 kx h->got.offset |= 1;
9 kx }
9 kx }
9 kx value = (base_got->output_section->vma
9 kx + base_got->output_offset + off);
9 kx }
9 kx else
9 kx value = aarch64_calculate_got_entry_vma (h, globals, info,
9 kx value, output_bfd,
9 kx unresolved_reloc_p);
9 kx
9 kx if (aarch64_relocation_aginst_gp_p (bfd_r_type))
9 kx addend = (globals->root.sgot->output_section->vma
9 kx + globals->root.sgot->output_offset);
9 kx
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx addend, weak_undef_p);
9 kx return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
9 kx case BFD_RELOC_AARCH64_ADD_LO12:
9 kx case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9 kx break;
9 kx }
9 kx }
9 kx
9 kx skip_ifunc:
9 kx resolved_to_zero = (h != NULL
9 kx && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
9 kx
9 kx switch (bfd_r_type)
9 kx {
9 kx case BFD_RELOC_AARCH64_NONE:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADD:
9 kx case BFD_RELOC_AARCH64_TLSDESC_CALL:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LDR:
9 kx *unresolved_reloc_p = false;
9 kx return bfd_reloc_ok;
9 kx
9 kx case BFD_RELOC_AARCH64_NN:
9 kx
9 kx /* When generating a shared object or relocatable executable, these
9 kx relocations are copied into the output file to be resolved at
9 kx run time. */
9 kx if (((bfd_link_pic (info)
9 kx || globals->root.is_relocatable_executable)
9 kx && (input_section->flags & SEC_ALLOC)
9 kx && (h == NULL
9 kx || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9 kx && !resolved_to_zero)
9 kx || h->root.type != bfd_link_hash_undefweak))
9 kx /* Or we are creating an executable, we may need to keep relocations
9 kx for symbols satisfied by a dynamic library if we manage to avoid
9 kx copy relocs for the symbol. */
9 kx || (ELIMINATE_COPY_RELOCS
9 kx && !bfd_link_pic (info)
9 kx && h != NULL
9 kx && (input_section->flags & SEC_ALLOC)
9 kx && h->dynindx != -1
9 kx && !h->non_got_ref
9 kx && ((h->def_dynamic
9 kx && !h->def_regular)
9 kx || h->root.type == bfd_link_hash_undefweak
9 kx || h->root.type == bfd_link_hash_undefined)))
9 kx {
9 kx Elf_Internal_Rela outrel;
9 kx bfd_byte *loc;
9 kx bool skip, relocate;
9 kx asection *sreloc;
9 kx
9 kx *unresolved_reloc_p = false;
9 kx
9 kx skip = false;
9 kx relocate = false;
9 kx
9 kx outrel.r_addend = signed_addend;
9 kx outrel.r_offset =
9 kx _bfd_elf_section_offset (output_bfd, info, input_section,
9 kx rel->r_offset);
9 kx if (outrel.r_offset == (bfd_vma) - 1)
9 kx skip = true;
9 kx else if (outrel.r_offset == (bfd_vma) - 2)
9 kx {
9 kx skip = true;
9 kx relocate = true;
9 kx }
9 kx else if (abs_symbol_p)
9 kx {
9 kx /* Local absolute symbol. */
9 kx skip = (h->forced_local || (h->dynindx == -1));
9 kx relocate = skip;
9 kx }
9 kx
9 kx outrel.r_offset += (input_section->output_section->vma
9 kx + input_section->output_offset);
9 kx
9 kx if (skip)
9 kx memset (&outrel, 0, sizeof outrel);
9 kx else if (h != NULL
9 kx && h->dynindx != -1
9 kx && (!bfd_link_pic (info)
9 kx || !(bfd_link_pie (info) || SYMBOLIC_BIND (info, h))
9 kx || !h->def_regular))
9 kx outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
9 kx else
9 kx {
9 kx int symbol;
9 kx
9 kx /* On SVR4-ish systems, the dynamic loader cannot
9 kx relocate the text and data segments independently,
9 kx so the symbol does not matter. */
9 kx symbol = 0;
9 kx relocate = !globals->no_apply_dynamic_relocs;
9 kx outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
9 kx outrel.r_addend += value;
9 kx }
9 kx
9 kx sreloc = elf_section_data (input_section)->sreloc;
9 kx if (sreloc == NULL || sreloc->contents == NULL)
9 kx return bfd_reloc_notsupported;
9 kx
9 kx loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
9 kx bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
9 kx
9 kx if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
9 kx {
9 kx /* Sanity to check that we have previously allocated
9 kx sufficient space in the relocation section for the
9 kx number of relocations we actually want to emit. */
9 kx abort ();
9 kx }
9 kx
9 kx /* If this reloc is against an external symbol, we do not want to
9 kx fiddle with the addend. Otherwise, we need to include the symbol
9 kx value so that it becomes an addend for the dynamic reloc. */
9 kx if (!relocate)
9 kx return bfd_reloc_ok;
9 kx
9 kx return _bfd_final_link_relocate (howto, input_bfd, input_section,
9 kx contents, rel->r_offset, value,
9 kx signed_addend);
9 kx }
9 kx else
9 kx value += signed_addend;
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_CALL26:
9 kx case BFD_RELOC_AARCH64_JUMP26:
9 kx {
9 kx asection *splt = globals->root.splt;
9 kx bool via_plt_p =
9 kx splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
9 kx
9 kx /* A call to an undefined weak symbol is converted to a jump to
9 kx the next instruction unless a PLT entry will be created.
9 kx The jump to the next instruction is optimized as a NOP.
9 kx Do the same for local undefined symbols. */
9 kx if (weak_undef_p && ! via_plt_p)
9 kx {
9 kx bfd_putl32 (INSN_NOP, hit_data);
9 kx return bfd_reloc_ok;
9 kx }
9 kx
9 kx /* If the call goes through a PLT entry, make sure to
9 kx check distance to the right destination address. */
9 kx if (via_plt_p)
9 kx value = (splt->output_section->vma
9 kx + splt->output_offset + h->plt.offset);
9 kx
9 kx /* Check if a stub has to be inserted because the destination
9 kx is too far away. */
9 kx struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
9 kx
9 kx /* If the branch destination is directed to plt stub, "value" will be
9 kx the final destination, otherwise we should plus signed_addend, it may
9 kx contain non-zero value, for example call to local function symbol
9 kx which are turned into "sec_sym + sec_off", and sec_off is kept in
9 kx signed_addend. */
9 kx if (! aarch64_valid_branch_p (via_plt_p ? value : value + signed_addend,
9 kx place))
9 kx /* The target is out of reach, so redirect the branch to
9 kx the local stub for this function. */
9 kx stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
9 kx rel, globals);
9 kx if (stub_entry != NULL)
9 kx {
9 kx value = (stub_entry->stub_offset
9 kx + stub_entry->stub_sec->output_offset
9 kx + stub_entry->stub_sec->output_section->vma);
9 kx
9 kx /* We have redirected the destination to stub entry address,
9 kx so ignore any addend record in the original rela entry. */
9 kx signed_addend = 0;
9 kx }
9 kx }
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx signed_addend, weak_undef_p);
9 kx *unresolved_reloc_p = false;
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_16_PCREL:
9 kx case BFD_RELOC_AARCH64_32_PCREL:
9 kx case BFD_RELOC_AARCH64_64_PCREL:
9 kx case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9 kx case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9 kx case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9 kx case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9 kx case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9 kx case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9 kx case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9 kx case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9 kx if (bfd_link_pic (info)
9 kx && (input_section->flags & SEC_ALLOC) != 0
9 kx && (input_section->flags & SEC_READONLY) != 0
9 kx && !_bfd_elf_symbol_refs_local_p (h, info, 1))
9 kx {
9 kx int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
9 kx
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB: relocation %s against symbol `%s' which may bind "
9 kx "externally can not be used when making a shared object; "
9 kx "recompile with -fPIC"),
9 kx input_bfd, elfNN_aarch64_howto_table[howto_index].name,
9 kx h->root.root.string);
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return bfd_reloc_notsupported;
9 kx }
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx signed_addend,
9 kx weak_undef_p);
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_BRANCH19:
9 kx case BFD_RELOC_AARCH64_TSTBR14:
9 kx if (h && h->root.type == bfd_link_hash_undefined)
9 kx {
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB: conditional branch to undefined symbol `%s' "
9 kx "not allowed"), input_bfd, h->root.root.string);
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return bfd_reloc_notsupported;
9 kx }
9 kx /* Fall through. */
9 kx
9 kx case BFD_RELOC_AARCH64_16:
9 kx #if ARCH_SIZE == 64
9 kx case BFD_RELOC_AARCH64_32:
9 kx #endif
9 kx case BFD_RELOC_AARCH64_ADD_LO12:
9 kx case BFD_RELOC_AARCH64_LDST128_LO12:
9 kx case BFD_RELOC_AARCH64_LDST16_LO12:
9 kx case BFD_RELOC_AARCH64_LDST32_LO12:
9 kx case BFD_RELOC_AARCH64_LDST64_LO12:
9 kx case BFD_RELOC_AARCH64_LDST8_LO12:
9 kx case BFD_RELOC_AARCH64_MOVW_G0:
9 kx case BFD_RELOC_AARCH64_MOVW_G0_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_G0_S:
9 kx case BFD_RELOC_AARCH64_MOVW_G1:
9 kx case BFD_RELOC_AARCH64_MOVW_G1_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_G1_S:
9 kx case BFD_RELOC_AARCH64_MOVW_G2:
9 kx case BFD_RELOC_AARCH64_MOVW_G2_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_G2_S:
9 kx case BFD_RELOC_AARCH64_MOVW_G3:
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx signed_addend, weak_undef_p);
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9 kx case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9 kx case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9 kx case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9 kx case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9 kx case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9 kx case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9 kx case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9 kx if (globals->root.sgot == NULL)
9 kx BFD_ASSERT (h != NULL);
9 kx
9 kx relative_reloc = false;
9 kx if (h != NULL)
9 kx {
9 kx bfd_vma addend = 0;
9 kx
9 kx /* If a symbol is not dynamic and is not undefined weak, bind it
9 kx locally and generate a RELATIVE relocation under PIC mode.
9 kx
9 kx NOTE: one symbol may be referenced by several relocations, we
9 kx should only generate one RELATIVE relocation for that symbol.
9 kx Therefore, check GOT offset mark first. */
9 kx if (h->dynindx == -1
9 kx && !h->forced_local
9 kx && h->root.type != bfd_link_hash_undefweak
9 kx && bfd_link_pic (info)
9 kx && !symbol_got_offset_mark_p (input_bfd, h, r_symndx))
9 kx relative_reloc = true;
9 kx
9 kx value = aarch64_calculate_got_entry_vma (h, globals, info, value,
9 kx output_bfd,
9 kx unresolved_reloc_p);
9 kx /* Record the GOT entry address which will be used when generating
9 kx RELATIVE relocation. */
9 kx if (relative_reloc)
9 kx got_entry_addr = value;
9 kx
9 kx if (aarch64_relocation_aginst_gp_p (bfd_r_type))
9 kx addend = (globals->root.sgot->output_section->vma
9 kx + globals->root.sgot->output_offset);
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx addend, weak_undef_p);
9 kx }
9 kx else
9 kx {
9 kx bfd_vma addend = 0;
9 kx struct elf_aarch64_local_symbol *locals
9 kx = elf_aarch64_locals (input_bfd);
9 kx
9 kx if (locals == NULL)
9 kx {
9 kx int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB: local symbol descriptor table be NULL when applying "
9 kx "relocation %s against local symbol"),
9 kx input_bfd, elfNN_aarch64_howto_table[howto_index].name);
9 kx abort ();
9 kx }
9 kx
9 kx off = symbol_got_offset (input_bfd, h, r_symndx);
9 kx base_got = globals->root.sgot;
9 kx got_entry_addr = (base_got->output_section->vma
9 kx + base_got->output_offset + off);
9 kx
9 kx if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
9 kx {
9 kx bfd_put_64 (output_bfd, value, base_got->contents + off);
9 kx
9 kx /* For local symbol, we have done absolute relocation in static
9 kx linking stage. While for shared library, we need to update the
9 kx content of GOT entry according to the shared object's runtime
9 kx base address. So, we need to generate a R_AARCH64_RELATIVE reloc
9 kx for dynamic linker. */
9 kx if (bfd_link_pic (info))
9 kx relative_reloc = true;
9 kx
9 kx symbol_got_offset_mark (input_bfd, h, r_symndx);
9 kx }
9 kx
9 kx /* Update the relocation value to GOT entry addr as we have transformed
9 kx the direct data access into indirect data access through GOT. */
9 kx value = got_entry_addr;
9 kx
9 kx if (aarch64_relocation_aginst_gp_p (bfd_r_type))
9 kx addend = base_got->output_section->vma + base_got->output_offset;
9 kx
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx addend, weak_undef_p);
9 kx }
9 kx
9 kx if (relative_reloc)
9 kx {
9 kx asection *s;
9 kx Elf_Internal_Rela outrel;
9 kx
9 kx s = globals->root.srelgot;
9 kx if (s == NULL)
9 kx abort ();
9 kx
9 kx outrel.r_offset = got_entry_addr;
9 kx outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
9 kx outrel.r_addend = orig_value;
9 kx elf_append_rela (output_bfd, s, &outrel);
9 kx }
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9 kx case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9 kx if (globals->root.sgot == NULL)
9 kx return bfd_reloc_notsupported;
9 kx
9 kx value = (symbol_got_offset (input_bfd, h, r_symndx)
9 kx + globals->root.sgot->output_section->vma
9 kx + globals->root.sgot->output_offset);
9 kx
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx 0, weak_undef_p);
9 kx *unresolved_reloc_p = false;
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9 kx case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9 kx if (globals->root.sgot == NULL)
9 kx return bfd_reloc_notsupported;
9 kx
9 kx value = symbol_got_offset (input_bfd, h, r_symndx);
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx 0, weak_undef_p);
9 kx *unresolved_reloc_p = false;
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9 kx case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9 kx case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9 kx case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9 kx case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9 kx case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9 kx case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9 kx {
9 kx if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
9 kx {
9 kx int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB: TLS relocation %s against undefined symbol `%s'"),
9 kx input_bfd, elfNN_aarch64_howto_table[howto_index].name,
9 kx h->root.root.string);
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return bfd_reloc_notsupported;
9 kx }
9 kx
9 kx bfd_vma def_value
9 kx = weak_undef_p ? 0 : signed_addend - dtpoff_base (info);
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx def_value, weak_undef_p);
9 kx break;
9 kx }
9 kx
9 kx case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9 kx case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9 kx case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9 kx case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9 kx case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9 kx case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9 kx case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9 kx case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9 kx case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9 kx case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9 kx {
9 kx if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
9 kx {
9 kx int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB: TLS relocation %s against undefined symbol `%s'"),
9 kx input_bfd, elfNN_aarch64_howto_table[howto_index].name,
9 kx h->root.root.string);
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return bfd_reloc_notsupported;
9 kx }
9 kx
9 kx bfd_vma def_value
9 kx = weak_undef_p ? 0 : signed_addend - tpoff_base (info);
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx def_value, weak_undef_p);
9 kx *unresolved_reloc_p = false;
9 kx break;
9 kx }
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9 kx if (globals->root.sgot == NULL)
9 kx return bfd_reloc_notsupported;
9 kx value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
9 kx + globals->root.sgotplt->output_section->vma
9 kx + globals->root.sgotplt->output_offset
9 kx + globals->sgotplt_jump_table_size);
9 kx
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx 0, weak_undef_p);
9 kx *unresolved_reloc_p = false;
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9 kx if (globals->root.sgot == NULL)
9 kx return bfd_reloc_notsupported;
9 kx
9 kx value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
9 kx + globals->root.sgotplt->output_section->vma
9 kx + globals->root.sgotplt->output_offset
9 kx + globals->sgotplt_jump_table_size);
9 kx
9 kx value -= (globals->root.sgot->output_section->vma
9 kx + globals->root.sgot->output_offset);
9 kx
9 kx value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
9 kx place, value,
9 kx 0, weak_undef_p);
9 kx *unresolved_reloc_p = false;
9 kx break;
9 kx
9 kx default:
9 kx return bfd_reloc_notsupported;
9 kx }
9 kx
9 kx if (saved_addend)
9 kx *saved_addend = value;
9 kx
9 kx /* Only apply the final relocation in a sequence. */
9 kx if (save_addend)
9 kx return bfd_reloc_continue;
9 kx
9 kx return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
9 kx howto, value);
9 kx }
9 kx
9 kx /* LP64 and ILP32 operates on x- and w-registers respectively.
9 kx Next definitions take into account the difference between
9 kx corresponding machine codes. R means x-register if the target
9 kx arch is LP64, and w-register if the target is ILP32. */
9 kx
9 kx #if ARCH_SIZE == 64
9 kx # define add_R0_R0 (0x91000000)
9 kx # define add_R0_R0_R1 (0x8b000020)
9 kx # define add_R0_R1 (0x91400020)
9 kx # define ldr_R0 (0x58000000)
9 kx # define ldr_R0_mask(i) (i & 0xffffffe0)
9 kx # define ldr_R0_x0 (0xf9400000)
9 kx # define ldr_hw_R0 (0xf2a00000)
9 kx # define movk_R0 (0xf2800000)
9 kx # define movz_R0 (0xd2a00000)
9 kx # define movz_hw_R0 (0xd2c00000)
9 kx #else /*ARCH_SIZE == 32 */
9 kx # define add_R0_R0 (0x11000000)
9 kx # define add_R0_R0_R1 (0x0b000020)
9 kx # define add_R0_R1 (0x11400020)
9 kx # define ldr_R0 (0x18000000)
9 kx # define ldr_R0_mask(i) (i & 0xbfffffe0)
9 kx # define ldr_R0_x0 (0xb9400000)
9 kx # define ldr_hw_R0 (0x72a00000)
9 kx # define movk_R0 (0x72800000)
9 kx # define movz_R0 (0x52a00000)
9 kx # define movz_hw_R0 (0x52c00000)
9 kx #endif
9 kx
9 kx /* Structure to hold payload for _bfd_aarch64_erratum_843419_clear_stub,
9 kx it is used to identify the stub information to reset. */
9 kx
9 kx struct erratum_843419_branch_to_stub_clear_data
9 kx {
9 kx bfd_vma adrp_offset;
9 kx asection *output_section;
9 kx };
9 kx
9 kx /* Clear the erratum information for GEN_ENTRY if the ADRP_OFFSET and
9 kx section inside IN_ARG matches. The clearing is done by setting the
9 kx stub_type to none. */
9 kx
9 kx static bool
9 kx _bfd_aarch64_erratum_843419_clear_stub (struct bfd_hash_entry *gen_entry,
9 kx void *in_arg)
9 kx {
9 kx struct elf_aarch64_stub_hash_entry *stub_entry
9 kx = (struct elf_aarch64_stub_hash_entry *) gen_entry;
9 kx struct erratum_843419_branch_to_stub_clear_data *data
9 kx = (struct erratum_843419_branch_to_stub_clear_data *) in_arg;
9 kx
9 kx if (stub_entry->target_section != data->output_section
9 kx || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer
9 kx || stub_entry->adrp_offset != data->adrp_offset)
9 kx return true;
9 kx
9 kx /* Change the stub type instead of removing the entry, removing from the hash
9 kx table would be slower and we have already reserved the memory for the entry
9 kx so there wouldn't be much gain. Changing the stub also keeps around a
9 kx record of what was there before. */
9 kx stub_entry->stub_type = aarch64_stub_none;
9 kx
9 kx /* We're done and there could have been only one matching stub at that
9 kx particular offset, so abort further traversal. */
9 kx return false;
9 kx }
9 kx
9 kx /* TLS Relaxations may relax an adrp sequence that matches the erratum 843419
9 kx sequence. In this case the erratum no longer applies and we need to remove
9 kx the entry from the pending stub generation. This clears matching adrp insn
9 kx at ADRP_OFFSET in INPUT_SECTION in the stub table defined in GLOBALS. */
9 kx
9 kx static void
9 kx clear_erratum_843419_entry (struct elf_aarch64_link_hash_table *globals,
9 kx bfd_vma adrp_offset, asection *input_section)
9 kx {
9 kx if (globals->fix_erratum_843419 & ERRAT_ADRP)
9 kx {
9 kx struct erratum_843419_branch_to_stub_clear_data data;
9 kx data.adrp_offset = adrp_offset;
9 kx data.output_section = input_section;
9 kx
9 kx bfd_hash_traverse (&globals->stub_hash_table,
9 kx _bfd_aarch64_erratum_843419_clear_stub, &data);
9 kx }
9 kx }
9 kx
9 kx /* Handle TLS relaxations. Relaxing is possible for symbols that use
9 kx R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
9 kx link.
9 kx
9 kx Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
9 kx is to then call final_link_relocate. Return other values in the
9 kx case of error. */
9 kx
9 kx static bfd_reloc_status_type
9 kx elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
9 kx bfd *input_bfd, asection *input_section,
9 kx bfd_byte *contents, Elf_Internal_Rela *rel,
9 kx struct elf_link_hash_entry *h,
9 kx struct bfd_link_info *info)
9 kx {
9 kx bool local_exec = bfd_link_executable (info)
9 kx && SYMBOL_REFERENCES_LOCAL (info, h);
9 kx unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
9 kx unsigned long insn;
9 kx
9 kx BFD_ASSERT (globals && input_bfd && contents && rel);
9 kx
9 kx switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
9 kx {
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9 kx if (local_exec)
9 kx {
9 kx /* GD->LE relaxation:
9 kx adrp x0, :tlsgd:var => movz R0, :tprel_g1:var
9 kx or
9 kx adrp x0, :tlsdesc:var => movz R0, :tprel_g1:var
9 kx
9 kx Where R is x for LP64, and w for ILP32. */
9 kx bfd_putl32 (movz_R0, contents + rel->r_offset);
9 kx /* We have relaxed the adrp into a mov, we may have to clear any
9 kx pending erratum fixes. */
9 kx clear_erratum_843419_entry (globals, rel->r_offset, input_section);
9 kx return bfd_reloc_continue;
9 kx }
9 kx else
9 kx {
9 kx /* GD->IE relaxation:
9 kx adrp x0, :tlsgd:var => adrp x0, :gottprel:var
9 kx or
9 kx adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
9 kx */
9 kx return bfd_reloc_continue;
9 kx }
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9 kx BFD_ASSERT (0);
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9 kx if (local_exec)
9 kx {
9 kx /* Tiny TLSDESC->LE relaxation:
9 kx ldr x1, :tlsdesc:var => movz R0, #:tprel_g1:var
9 kx adr x0, :tlsdesc:var => movk R0, #:tprel_g0_nc:var
9 kx .tlsdesccall var
9 kx blr x1 => nop
9 kx
9 kx Where R is x for LP64, and w for ILP32. */
9 kx BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
9 kx BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
9 kx
9 kx rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
9 kx AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
9 kx rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
9 kx
9 kx bfd_putl32 (movz_R0, contents + rel->r_offset);
9 kx bfd_putl32 (movk_R0, contents + rel->r_offset + 4);
9 kx bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
9 kx return bfd_reloc_continue;
9 kx }
9 kx else
9 kx {
9 kx /* Tiny TLSDESC->IE relaxation:
9 kx ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
9 kx adr x0, :tlsdesc:var => nop
9 kx .tlsdesccall var
9 kx blr x1 => nop
9 kx */
9 kx BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
9 kx BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
9 kx
9 kx rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
9 kx rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
9 kx
9 kx bfd_putl32 (ldr_R0, contents + rel->r_offset);
9 kx bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
9 kx bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
9 kx return bfd_reloc_continue;
9 kx }
9 kx
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9 kx if (local_exec)
9 kx {
9 kx /* Tiny GD->LE relaxation:
9 kx adr x0, :tlsgd:var => mrs x1, tpidr_el0
9 kx bl __tls_get_addr => add R0, R1, #:tprel_hi12:x, lsl #12
9 kx nop => add R0, R0, #:tprel_lo12_nc:x
9 kx
9 kx Where R is x for LP64, and x for Ilp32. */
9 kx
9 kx /* First kill the tls_get_addr reloc on the bl instruction. */
9 kx BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
9 kx
9 kx bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
9 kx bfd_putl32 (add_R0_R1, contents + rel->r_offset + 4);
9 kx bfd_putl32 (add_R0_R0, contents + rel->r_offset + 8);
9 kx
9 kx rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
9 kx AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
9 kx rel[1].r_offset = rel->r_offset + 8;
9 kx
9 kx /* Move the current relocation to the second instruction in
9 kx the sequence. */
9 kx rel->r_offset += 4;
9 kx rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
9 kx AARCH64_R (TLSLE_ADD_TPREL_HI12));
9 kx return bfd_reloc_continue;
9 kx }
9 kx else
9 kx {
9 kx /* Tiny GD->IE relaxation:
9 kx adr x0, :tlsgd:var => ldr R0, :gottprel:var
9 kx bl __tls_get_addr => mrs x1, tpidr_el0
9 kx nop => add R0, R0, R1
9 kx
9 kx Where R is x for LP64, and w for Ilp32. */
9 kx
9 kx /* First kill the tls_get_addr reloc on the bl instruction. */
9 kx BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
9 kx rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
9 kx
9 kx bfd_putl32 (ldr_R0, contents + rel->r_offset);
9 kx bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
9 kx bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
9 kx return bfd_reloc_continue;
9 kx }
9 kx
9 kx #if ARCH_SIZE == 64
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9 kx BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSGD_MOVW_G0_NC));
9 kx BFD_ASSERT (rel->r_offset + 12 == rel[2].r_offset);
9 kx BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (CALL26));
9 kx
9 kx if (local_exec)
9 kx {
9 kx /* Large GD->LE relaxation:
9 kx movz x0, #:tlsgd_g1:var => movz x0, #:tprel_g2:var, lsl #32
9 kx movk x0, #:tlsgd_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16
9 kx add x0, gp, x0 => movk x0, #:tprel_g0_nc:var
9 kx bl __tls_get_addr => mrs x1, tpidr_el0
9 kx nop => add x0, x0, x1
9 kx */
9 kx rel[2].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
9 kx AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
9 kx rel[2].r_offset = rel->r_offset + 8;
9 kx
9 kx bfd_putl32 (movz_hw_R0, contents + rel->r_offset + 0);
9 kx bfd_putl32 (ldr_hw_R0, contents + rel->r_offset + 4);
9 kx bfd_putl32 (movk_R0, contents + rel->r_offset + 8);
9 kx bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
9 kx bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
9 kx }
9 kx else
9 kx {
9 kx /* Large GD->IE relaxation:
9 kx movz x0, #:tlsgd_g1:var => movz x0, #:gottprel_g1:var, lsl #16
9 kx movk x0, #:tlsgd_g0_nc:var => movk x0, #:gottprel_g0_nc:var
9 kx add x0, gp, x0 => ldr x0, [gp, x0]
9 kx bl __tls_get_addr => mrs x1, tpidr_el0
9 kx nop => add x0, x0, x1
9 kx */
9 kx rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
9 kx bfd_putl32 (0xd2a80000, contents + rel->r_offset + 0);
9 kx bfd_putl32 (ldr_R0, contents + rel->r_offset + 8);
9 kx bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
9 kx bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
9 kx }
9 kx return bfd_reloc_continue;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9 kx return bfd_reloc_continue;
9 kx #endif
9 kx
9 kx case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9 kx return bfd_reloc_continue;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
9 kx if (local_exec)
9 kx {
9 kx /* GD->LE relaxation:
9 kx ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
9 kx
9 kx Where R is x for lp64 mode, and w for ILP32 mode. */
9 kx bfd_putl32 (movk_R0, contents + rel->r_offset);
9 kx return bfd_reloc_continue;
9 kx }
9 kx else
9 kx {
9 kx /* GD->IE relaxation:
9 kx ldr xd, [x0, #:tlsdesc_lo12:var] => ldr R0, [x0, #:gottprel_lo12:var]
9 kx
9 kx Where R is x for lp64 mode, and w for ILP32 mode. */
9 kx insn = bfd_getl32 (contents + rel->r_offset);
9 kx bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
9 kx return bfd_reloc_continue;
9 kx }
9 kx
9 kx case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9 kx if (local_exec)
9 kx {
9 kx /* GD->LE relaxation
9 kx add x0, #:tlsgd_lo12:var => movk R0, :tprel_g0_nc:var
9 kx bl __tls_get_addr => mrs x1, tpidr_el0
9 kx nop => add R0, R1, R0
9 kx
9 kx Where R is x for lp64 mode, and w for ILP32 mode. */
9 kx
9 kx /* First kill the tls_get_addr reloc on the bl instruction. */
9 kx BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
9 kx rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
9 kx
9 kx bfd_putl32 (movk_R0, contents + rel->r_offset);
9 kx bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
9 kx bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
9 kx return bfd_reloc_continue;
9 kx }
9 kx else
9 kx {
9 kx /* GD->IE relaxation
9 kx ADD x0, #:tlsgd_lo12:var => ldr R0, [x0, #:gottprel_lo12:var]
9 kx BL __tls_get_addr => mrs x1, tpidr_el0
9 kx R_AARCH64_CALL26
9 kx NOP => add R0, R1, R0
9 kx
9 kx Where R is x for lp64 mode, and w for ilp32 mode. */
9 kx
9 kx BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
9 kx
9 kx /* Remove the relocation on the BL instruction. */
9 kx rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
9 kx
9 kx /* We choose to fixup the BL and NOP instructions using the
9 kx offset from the second relocation to allow flexibility in
9 kx scheduling instructions between the ADD and BL. */
9 kx bfd_putl32 (ldr_R0_x0, contents + rel->r_offset);
9 kx bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
9 kx bfd_putl32 (add_R0_R0_R1, contents + rel[1].r_offset + 4);
9 kx return bfd_reloc_continue;
9 kx }
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADD:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9 kx case BFD_RELOC_AARCH64_TLSDESC_CALL:
9 kx /* GD->IE/LE relaxation:
9 kx add x0, x0, #:tlsdesc_lo12:var => nop
9 kx blr xd => nop
9 kx */
9 kx bfd_putl32 (INSN_NOP, contents + rel->r_offset);
9 kx return bfd_reloc_ok;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_LDR:
9 kx if (local_exec)
9 kx {
9 kx /* GD->LE relaxation:
9 kx ldr xd, [gp, xn] => movk R0, #:tprel_g0_nc:var
9 kx
9 kx Where R is x for lp64 mode, and w for ILP32 mode. */
9 kx bfd_putl32 (movk_R0, contents + rel->r_offset);
9 kx return bfd_reloc_continue;
9 kx }
9 kx else
9 kx {
9 kx /* GD->IE relaxation:
9 kx ldr xd, [gp, xn] => ldr R0, [gp, xn]
9 kx
9 kx Where R is x for lp64 mode, and w for ILP32 mode. */
9 kx insn = bfd_getl32 (contents + rel->r_offset);
9 kx bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
9 kx return bfd_reloc_ok;
9 kx }
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9 kx /* GD->LE relaxation:
9 kx movk xd, #:tlsdesc_off_g0_nc:var => movk R0, #:tprel_g1_nc:var, lsl #16
9 kx GD->IE relaxation:
9 kx movk xd, #:tlsdesc_off_g0_nc:var => movk Rd, #:gottprel_g0_nc:var
9 kx
9 kx Where R is x for lp64 mode, and w for ILP32 mode. */
9 kx if (local_exec)
9 kx bfd_putl32 (ldr_hw_R0, contents + rel->r_offset);
9 kx return bfd_reloc_continue;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9 kx if (local_exec)
9 kx {
9 kx /* GD->LE relaxation:
9 kx movz xd, #:tlsdesc_off_g1:var => movz R0, #:tprel_g2:var, lsl #32
9 kx
9 kx Where R is x for lp64 mode, and w for ILP32 mode. */
9 kx bfd_putl32 (movz_hw_R0, contents + rel->r_offset);
9 kx return bfd_reloc_continue;
9 kx }
9 kx else
9 kx {
9 kx /* GD->IE relaxation:
9 kx movz xd, #:tlsdesc_off_g1:var => movz Rd, #:gottprel_g1:var, lsl #16
9 kx
9 kx Where R is x for lp64 mode, and w for ILP32 mode. */
9 kx insn = bfd_getl32 (contents + rel->r_offset);
9 kx bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
9 kx return bfd_reloc_continue;
9 kx }
9 kx
9 kx case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9 kx /* IE->LE relaxation:
9 kx adrp xd, :gottprel:var => movz Rd, :tprel_g1:var
9 kx
9 kx Where R is x for lp64 mode, and w for ILP32 mode. */
9 kx if (local_exec)
9 kx {
9 kx insn = bfd_getl32 (contents + rel->r_offset);
9 kx bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
9 kx /* We have relaxed the adrp into a mov, we may have to clear any
9 kx pending erratum fixes. */
9 kx clear_erratum_843419_entry (globals, rel->r_offset, input_section);
9 kx }
9 kx return bfd_reloc_continue;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
9 kx /* IE->LE relaxation:
9 kx ldr xd, [xm, #:gottprel_lo12:var] => movk Rd, :tprel_g0_nc:var
9 kx
9 kx Where R is x for lp64 mode, and w for ILP32 mode. */
9 kx if (local_exec)
9 kx {
9 kx insn = bfd_getl32 (contents + rel->r_offset);
9 kx bfd_putl32 (movk_R0 | (insn & 0x1f), contents + rel->r_offset);
9 kx }
9 kx return bfd_reloc_continue;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9 kx /* LD->LE relaxation (tiny):
9 kx adr x0, :tlsldm:x => mrs x0, tpidr_el0
9 kx bl __tls_get_addr => add R0, R0, TCB_SIZE
9 kx
9 kx Where R is x for lp64 mode, and w for ilp32 mode. */
9 kx if (local_exec)
9 kx {
9 kx BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
9 kx BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
9 kx /* No need of CALL26 relocation for tls_get_addr. */
9 kx rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
9 kx bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0);
9 kx bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
9 kx contents + rel->r_offset + 4);
9 kx return bfd_reloc_ok;
9 kx }
9 kx return bfd_reloc_continue;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9 kx /* LD->LE relaxation (small):
9 kx adrp x0, :tlsldm:x => mrs x0, tpidr_el0
9 kx */
9 kx if (local_exec)
9 kx {
9 kx bfd_putl32 (0xd53bd040, contents + rel->r_offset);
9 kx return bfd_reloc_ok;
9 kx }
9 kx return bfd_reloc_continue;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9 kx /* LD->LE relaxation (small):
9 kx add x0, #:tlsldm_lo12:x => add R0, R0, TCB_SIZE
9 kx bl __tls_get_addr => nop
9 kx
9 kx Where R is x for lp64 mode, and w for ilp32 mode. */
9 kx if (local_exec)
9 kx {
9 kx BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
9 kx BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
9 kx /* No need of CALL26 relocation for tls_get_addr. */
9 kx rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
9 kx bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
9 kx contents + rel->r_offset + 0);
9 kx bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
9 kx return bfd_reloc_ok;
9 kx }
9 kx return bfd_reloc_continue;
9 kx
9 kx default:
9 kx return bfd_reloc_continue;
9 kx }
9 kx
9 kx return bfd_reloc_ok;
9 kx }
9 kx
9 kx /* Relocate an AArch64 ELF section. */
9 kx
9 kx static int
9 kx elfNN_aarch64_relocate_section (bfd *output_bfd,
9 kx struct bfd_link_info *info,
9 kx bfd *input_bfd,
9 kx asection *input_section,
9 kx bfd_byte *contents,
9 kx Elf_Internal_Rela *relocs,
9 kx Elf_Internal_Sym *local_syms,
9 kx asection **local_sections)
9 kx {
9 kx Elf_Internal_Shdr *symtab_hdr;
9 kx struct elf_link_hash_entry **sym_hashes;
9 kx Elf_Internal_Rela *rel;
9 kx Elf_Internal_Rela *relend;
9 kx const char *name;
9 kx struct elf_aarch64_link_hash_table *globals;
9 kx bool save_addend = false;
9 kx bfd_vma addend = 0;
9 kx
9 kx globals = elf_aarch64_hash_table (info);
9 kx
9 kx symtab_hdr = &elf_symtab_hdr (input_bfd);
9 kx sym_hashes = elf_sym_hashes (input_bfd);
9 kx
9 kx rel = relocs;
9 kx relend = relocs + input_section->reloc_count;
9 kx for (; rel < relend; rel++)
9 kx {
9 kx unsigned int r_type;
9 kx bfd_reloc_code_real_type bfd_r_type;
9 kx bfd_reloc_code_real_type relaxed_bfd_r_type;
9 kx reloc_howto_type *howto;
9 kx unsigned long r_symndx;
9 kx Elf_Internal_Sym *sym;
9 kx asection *sec;
9 kx struct elf_link_hash_entry *h;
9 kx bfd_vma relocation;
9 kx bfd_reloc_status_type r;
9 kx arelent bfd_reloc;
9 kx char sym_type;
9 kx bool unresolved_reloc = false;
9 kx char *error_message = NULL;
9 kx
9 kx r_symndx = ELFNN_R_SYM (rel->r_info);
9 kx r_type = ELFNN_R_TYPE (rel->r_info);
9 kx
9 kx bfd_reloc.howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
9 kx howto = bfd_reloc.howto;
9 kx
9 kx if (howto == NULL)
9 kx return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
9 kx
9 kx bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
9 kx
9 kx h = NULL;
9 kx sym = NULL;
9 kx sec = NULL;
9 kx
9 kx if (r_symndx < symtab_hdr->sh_info)
9 kx {
9 kx sym = local_syms + r_symndx;
9 kx sym_type = ELFNN_ST_TYPE (sym->st_info);
9 kx sec = local_sections[r_symndx];
9 kx
9 kx /* An object file might have a reference to a local
9 kx undefined symbol. This is a daft object file, but we
9 kx should at least do something about it. */
9 kx if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
9 kx && bfd_is_und_section (sec)
9 kx && ELF_ST_BIND (sym->st_info) != STB_WEAK)
9 kx (*info->callbacks->undefined_symbol)
9 kx (info, bfd_elf_string_from_elf_section
9 kx (input_bfd, symtab_hdr->sh_link, sym->st_name),
9 kx input_bfd, input_section, rel->r_offset, true);
9 kx
9 kx relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
9 kx
9 kx /* Relocate against local STT_GNU_IFUNC symbol. */
9 kx if (!bfd_link_relocatable (info)
9 kx && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
9 kx {
9 kx h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
9 kx rel, false);
9 kx if (h == NULL)
9 kx abort ();
9 kx
9 kx /* Set STT_GNU_IFUNC symbol value. */
9 kx h->root.u.def.value = sym->st_value;
9 kx h->root.u.def.section = sec;
9 kx }
9 kx }
9 kx else
9 kx {
9 kx bool warned, ignored;
9 kx
9 kx RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
9 kx r_symndx, symtab_hdr, sym_hashes,
9 kx h, sec, relocation,
9 kx unresolved_reloc, warned, ignored);
9 kx
9 kx sym_type = h->type;
9 kx }
9 kx
9 kx if (sec != NULL && discarded_section (sec))
9 kx RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
9 kx rel, 1, relend, howto, 0, contents);
9 kx
9 kx if (bfd_link_relocatable (info))
9 kx continue;
9 kx
9 kx if (h != NULL)
9 kx name = h->root.root.string;
9 kx else
9 kx {
9 kx name = (bfd_elf_string_from_elf_section
9 kx (input_bfd, symtab_hdr->sh_link, sym->st_name));
9 kx if (name == NULL || *name == '\0')
9 kx name = bfd_section_name (sec);
9 kx }
9 kx
9 kx if (r_symndx != 0
9 kx && r_type != R_AARCH64_NONE
9 kx && r_type != R_AARCH64_NULL
9 kx && (h == NULL
9 kx || h->root.type == bfd_link_hash_defined
9 kx || h->root.type == bfd_link_hash_defweak)
9 kx && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
9 kx {
9 kx _bfd_error_handler
9 kx ((sym_type == STT_TLS
9 kx /* xgettext:c-format */
9 kx ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
9 kx /* xgettext:c-format */
9 kx : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
9 kx input_bfd,
9 kx input_section, (uint64_t) rel->r_offset, howto->name, name);
9 kx }
9 kx
9 kx /* We relax only if we can see that there can be a valid transition
9 kx from a reloc type to another.
9 kx We call elfNN_aarch64_final_link_relocate unless we're completely
9 kx done, i.e., the relaxation produced the final output we want. */
9 kx
9 kx relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
9 kx h, r_symndx);
9 kx if (relaxed_bfd_r_type != bfd_r_type)
9 kx {
9 kx bfd_r_type = relaxed_bfd_r_type;
9 kx howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
9 kx BFD_ASSERT (howto != NULL);
9 kx r_type = howto->type;
9 kx r = elfNN_aarch64_tls_relax (globals, input_bfd, input_section,
9 kx contents, rel, h, info);
9 kx unresolved_reloc = 0;
9 kx }
9 kx else
9 kx r = bfd_reloc_continue;
9 kx
9 kx /* There may be multiple consecutive relocations for the
9 kx same offset. In that case we are supposed to treat the
9 kx output of each relocation as the addend for the next. */
9 kx if (rel + 1 < relend
9 kx && rel->r_offset == rel[1].r_offset
9 kx && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
9 kx && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
9 kx save_addend = true;
9 kx else
9 kx save_addend = false;
9 kx
9 kx if (r == bfd_reloc_continue)
9 kx r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
9 kx input_section, contents, rel,
9 kx relocation, info, sec,
9 kx h, &unresolved_reloc,
9 kx save_addend, &addend, sym);
9 kx
9 kx switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
9 kx {
9 kx case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9 kx if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
9 kx {
9 kx bool need_relocs = false;
9 kx bfd_byte *loc;
9 kx int indx;
9 kx bfd_vma off;
9 kx
9 kx off = symbol_got_offset (input_bfd, h, r_symndx);
9 kx indx = h && h->dynindx != -1 ? h->dynindx : 0;
9 kx
9 kx need_relocs =
9 kx (!bfd_link_executable (info) || indx != 0) &&
9 kx (h == NULL
9 kx || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9 kx || h->root.type != bfd_link_hash_undefweak);
9 kx
9 kx BFD_ASSERT (globals->root.srelgot != NULL);
9 kx
9 kx if (need_relocs)
9 kx {
9 kx Elf_Internal_Rela rela;
9 kx rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
9 kx rela.r_addend = 0;
9 kx rela.r_offset = globals->root.sgot->output_section->vma +
9 kx globals->root.sgot->output_offset + off;
9 kx
9 kx
9 kx loc = globals->root.srelgot->contents;
9 kx loc += globals->root.srelgot->reloc_count++
9 kx * RELOC_SIZE (htab);
9 kx bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9 kx
9 kx bfd_reloc_code_real_type real_type =
9 kx elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
9 kx
9 kx if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
9 kx || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
9 kx || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
9 kx {
9 kx /* For local dynamic, don't generate DTPREL in any case.
9 kx Initialize the DTPREL slot into zero, so we get module
9 kx base address when invoke runtime TLS resolver. */
9 kx bfd_put_NN (output_bfd, 0,
9 kx globals->root.sgot->contents + off
9 kx + GOT_ENTRY_SIZE);
9 kx }
9 kx else if (indx == 0)
9 kx {
9 kx bfd_put_NN (output_bfd,
9 kx relocation - dtpoff_base (info),
9 kx globals->root.sgot->contents + off
9 kx + GOT_ENTRY_SIZE);
9 kx }
9 kx else
9 kx {
9 kx /* This TLS symbol is global. We emit a
9 kx relocation to fixup the tls offset at load
9 kx time. */
9 kx rela.r_info =
9 kx ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
9 kx rela.r_addend = 0;
9 kx rela.r_offset =
9 kx (globals->root.sgot->output_section->vma
9 kx + globals->root.sgot->output_offset + off
9 kx + GOT_ENTRY_SIZE);
9 kx
9 kx loc = globals->root.srelgot->contents;
9 kx loc += globals->root.srelgot->reloc_count++
9 kx * RELOC_SIZE (globals);
9 kx bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9 kx bfd_put_NN (output_bfd, (bfd_vma) 0,
9 kx globals->root.sgot->contents + off
9 kx + GOT_ENTRY_SIZE);
9 kx }
9 kx }
9 kx else
9 kx {
9 kx bfd_put_NN (output_bfd, (bfd_vma) 1,
9 kx globals->root.sgot->contents + off);
9 kx bfd_put_NN (output_bfd,
9 kx relocation - dtpoff_base (info),
9 kx globals->root.sgot->contents + off
9 kx + GOT_ENTRY_SIZE);
9 kx }
9 kx
9 kx symbol_got_offset_mark (input_bfd, h, r_symndx);
9 kx }
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9 kx case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9 kx if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
9 kx {
9 kx bool need_relocs = false;
9 kx bfd_byte *loc;
9 kx int indx;
9 kx bfd_vma off;
9 kx
9 kx off = symbol_got_offset (input_bfd, h, r_symndx);
9 kx
9 kx indx = h && h->dynindx != -1 ? h->dynindx : 0;
9 kx
9 kx need_relocs =
9 kx (!bfd_link_executable (info) || indx != 0) &&
9 kx (h == NULL
9 kx || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9 kx || h->root.type != bfd_link_hash_undefweak);
9 kx
9 kx BFD_ASSERT (globals->root.srelgot != NULL);
9 kx
9 kx if (need_relocs)
9 kx {
9 kx Elf_Internal_Rela rela;
9 kx
9 kx if (indx == 0)
9 kx rela.r_addend = relocation - dtpoff_base (info);
9 kx else
9 kx rela.r_addend = 0;
9 kx
9 kx rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
9 kx rela.r_offset = globals->root.sgot->output_section->vma +
9 kx globals->root.sgot->output_offset + off;
9 kx
9 kx loc = globals->root.srelgot->contents;
9 kx loc += globals->root.srelgot->reloc_count++
9 kx * RELOC_SIZE (htab);
9 kx
9 kx bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9 kx
9 kx bfd_put_NN (output_bfd, rela.r_addend,
9 kx globals->root.sgot->contents + off);
9 kx }
9 kx else
9 kx bfd_put_NN (output_bfd, relocation - tpoff_base (info),
9 kx globals->root.sgot->contents + off);
9 kx
9 kx symbol_got_offset_mark (input_bfd, h, r_symndx);
9 kx }
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9 kx if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
9 kx {
9 kx bool need_relocs = false;
9 kx int indx = h && h->dynindx != -1 ? h->dynindx : 0;
9 kx bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
9 kx
9 kx need_relocs = (h == NULL
9 kx || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9 kx || h->root.type != bfd_link_hash_undefweak);
9 kx
9 kx BFD_ASSERT (globals->root.srelgot != NULL);
9 kx BFD_ASSERT (globals->root.sgot != NULL);
9 kx
9 kx if (need_relocs)
9 kx {
9 kx bfd_byte *loc;
9 kx Elf_Internal_Rela rela;
9 kx rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
9 kx
9 kx rela.r_addend = 0;
9 kx rela.r_offset = (globals->root.sgotplt->output_section->vma
9 kx + globals->root.sgotplt->output_offset
9 kx + off + globals->sgotplt_jump_table_size);
9 kx
9 kx if (indx == 0)
9 kx rela.r_addend = relocation - dtpoff_base (info);
9 kx
9 kx /* Allocate the next available slot in the PLT reloc
9 kx section to hold our R_AARCH64_TLSDESC, the next
9 kx available slot is determined from reloc_count,
9 kx which we step. But note, reloc_count was
9 kx artifically moved down while allocating slots for
9 kx real PLT relocs such that all of the PLT relocs
9 kx will fit above the initial reloc_count and the
9 kx extra stuff will fit below. */
9 kx loc = globals->root.srelplt->contents;
9 kx loc += globals->root.srelplt->reloc_count++
9 kx * RELOC_SIZE (globals);
9 kx
9 kx bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9 kx
9 kx bfd_put_NN (output_bfd, (bfd_vma) 0,
9 kx globals->root.sgotplt->contents + off +
9 kx globals->sgotplt_jump_table_size);
9 kx bfd_put_NN (output_bfd, (bfd_vma) 0,
9 kx globals->root.sgotplt->contents + off +
9 kx globals->sgotplt_jump_table_size +
9 kx GOT_ENTRY_SIZE);
9 kx }
9 kx
9 kx symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
9 kx }
9 kx break;
9 kx default:
9 kx break;
9 kx }
9 kx
9 kx /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9 kx because such sections are not SEC_ALLOC and thus ld.so will
9 kx not process them. */
9 kx if (unresolved_reloc
9 kx && !((input_section->flags & SEC_DEBUGGING) != 0
9 kx && h->def_dynamic)
9 kx && _bfd_elf_section_offset (output_bfd, info, input_section,
9 kx +rel->r_offset) != (bfd_vma) - 1)
9 kx {
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB(%pA+%#" PRIx64 "): "
9 kx "unresolvable %s relocation against symbol `%s'"),
9 kx input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
9 kx h->root.root.string);
9 kx return false;
9 kx }
9 kx
9 kx if (r != bfd_reloc_ok && r != bfd_reloc_continue)
9 kx {
9 kx bfd_reloc_code_real_type real_r_type
9 kx = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
9 kx
9 kx switch (r)
9 kx {
9 kx case bfd_reloc_overflow:
9 kx (*info->callbacks->reloc_overflow)
9 kx (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
9 kx input_bfd, input_section, rel->r_offset);
9 kx if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
9 kx || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
9 kx {
9 kx (*info->callbacks->warning)
9 kx (info,
9 kx _("too many GOT entries for -fpic, "
9 kx "please recompile with -fPIC"),
9 kx name, input_bfd, input_section, rel->r_offset);
9 kx return false;
9 kx }
9 kx /* Overflow can occur when a variable is referenced with a type
9 kx that has a larger alignment than the type with which it was
9 kx declared. eg:
9 kx file1.c: extern int foo; int a (void) { return foo; }
9 kx file2.c: char bar, foo, baz;
9 kx If the variable is placed into a data section at an offset
9 kx that is incompatible with the larger alignment requirement
9 kx overflow will occur. (Strictly speaking this is not overflow
9 kx but rather an alignment problem, but the bfd_reloc_ error
9 kx enum does not have a value to cover that situation).
9 kx
9 kx Try to catch this situation here and provide a more helpful
9 kx error message to the user. */
9 kx if (addend & (((bfd_vma) 1 << howto->rightshift) - 1)
9 kx /* FIXME: Are we testing all of the appropriate reloc
9 kx types here ? */
9 kx && (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL
9 kx || real_r_type == BFD_RELOC_AARCH64_LDST16_LO12
9 kx || real_r_type == BFD_RELOC_AARCH64_LDST32_LO12
9 kx || real_r_type == BFD_RELOC_AARCH64_LDST64_LO12
9 kx || real_r_type == BFD_RELOC_AARCH64_LDST128_LO12))
9 kx {
9 kx info->callbacks->warning
9 kx (info, _("one possible cause of this error is that the \
9 kx symbol is being referenced in the indicated code as if it had a larger \
9 kx alignment than was declared where it was defined"),
9 kx name, input_bfd, input_section, rel->r_offset);
9 kx }
9 kx break;
9 kx
9 kx case bfd_reloc_undefined:
9 kx (*info->callbacks->undefined_symbol)
9 kx (info, name, input_bfd, input_section, rel->r_offset, true);
9 kx break;
9 kx
9 kx case bfd_reloc_outofrange:
9 kx error_message = _("out of range");
9 kx goto common_error;
9 kx
9 kx case bfd_reloc_notsupported:
9 kx error_message = _("unsupported relocation");
9 kx goto common_error;
9 kx
9 kx case bfd_reloc_dangerous:
9 kx /* error_message should already be set. */
9 kx goto common_error;
9 kx
9 kx default:
9 kx error_message = _("unknown error");
9 kx /* Fall through. */
9 kx
9 kx common_error:
9 kx BFD_ASSERT (error_message != NULL);
9 kx (*info->callbacks->reloc_dangerous)
9 kx (info, error_message, input_bfd, input_section, rel->r_offset);
9 kx break;
9 kx }
9 kx }
9 kx
9 kx if (!save_addend)
9 kx addend = 0;
9 kx }
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Set the right machine number. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_object_p (bfd *abfd)
9 kx {
9 kx #if ARCH_SIZE == 32
9 kx bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
9 kx #else
9 kx bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
9 kx #endif
9 kx return true;
9 kx }
9 kx
9 kx /* Function to keep AArch64 specific flags in the ELF header. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
9 kx {
9 kx if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
9 kx {
9 kx }
9 kx else
9 kx {
9 kx elf_elfheader (abfd)->e_flags = flags;
9 kx elf_flags_init (abfd) = true;
9 kx }
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Merge backend specific data from an object file to the output
9 kx object file when linking. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
9 kx {
9 kx bfd *obfd = info->output_bfd;
9 kx flagword out_flags;
9 kx flagword in_flags;
9 kx bool flags_compatible = true;
9 kx asection *sec;
9 kx
9 kx /* Check if we have the same endianess. */
9 kx if (!_bfd_generic_verify_endian_match (ibfd, info))
9 kx return false;
9 kx
9 kx if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
9 kx return true;
9 kx
9 kx /* The input BFD must have had its flags initialised. */
9 kx /* The following seems bogus to me -- The flags are initialized in
9 kx the assembler but I don't think an elf_flags_init field is
9 kx written into the object. */
9 kx /* BFD_ASSERT (elf_flags_init (ibfd)); */
9 kx
9 kx in_flags = elf_elfheader (ibfd)->e_flags;
9 kx out_flags = elf_elfheader (obfd)->e_flags;
9 kx
9 kx if (!elf_flags_init (obfd))
9 kx {
9 kx /* If the input is the default architecture and had the default
9 kx flags then do not bother setting the flags for the output
9 kx architecture, instead allow future merges to do this. If no
9 kx future merges ever set these flags then they will retain their
9 kx uninitialised values, which surprise surprise, correspond
9 kx to the default values. */
9 kx if (bfd_get_arch_info (ibfd)->the_default
9 kx && elf_elfheader (ibfd)->e_flags == 0)
9 kx return true;
9 kx
9 kx elf_flags_init (obfd) = true;
9 kx elf_elfheader (obfd)->e_flags = in_flags;
9 kx
9 kx if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
9 kx && bfd_get_arch_info (obfd)->the_default)
9 kx return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
9 kx bfd_get_mach (ibfd));
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Identical flags must be compatible. */
9 kx if (in_flags == out_flags)
9 kx return true;
9 kx
9 kx /* Check to see if the input BFD actually contains any sections. If
9 kx not, its flags may not have been initialised either, but it
9 kx cannot actually cause any incompatiblity. Do not short-circuit
9 kx dynamic objects; their section list may be emptied by
9 kx elf_link_add_object_symbols.
9 kx
9 kx Also check to see if there are no code sections in the input.
9 kx In this case there is no need to check for code specific flags.
9 kx XXX - do we need to worry about floating-point format compatability
9 kx in data sections ? */
9 kx if (!(ibfd->flags & DYNAMIC))
9 kx {
9 kx bool null_input_bfd = true;
9 kx bool only_data_sections = true;
9 kx
9 kx for (sec = ibfd->sections; sec != NULL; sec = sec->next)
9 kx {
9 kx if ((bfd_section_flags (sec)
9 kx & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
9 kx == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
9 kx only_data_sections = false;
9 kx
9 kx null_input_bfd = false;
9 kx break;
9 kx }
9 kx
9 kx if (null_input_bfd || only_data_sections)
9 kx return true;
9 kx }
9 kx
9 kx return flags_compatible;
9 kx }
9 kx
9 kx /* Display the flags field. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
9 kx {
9 kx FILE *file = (FILE *) ptr;
9 kx unsigned long flags;
9 kx
9 kx BFD_ASSERT (abfd != NULL && ptr != NULL);
9 kx
9 kx /* Print normal ELF private data. */
9 kx _bfd_elf_print_private_bfd_data (abfd, ptr);
9 kx
9 kx flags = elf_elfheader (abfd)->e_flags;
9 kx /* Ignore init flag - it may not be set, despite the flags field
9 kx containing valid data. */
9 kx
9 kx /* xgettext:c-format */
9 kx fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
9 kx
9 kx if (flags)
9 kx fprintf (file, _(" <Unrecognised flag bits set>"));
9 kx
9 kx fputc ('\n', file);
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Return true if we need copy relocation against EH. */
9 kx
9 kx static bool
9 kx need_copy_relocation_p (struct elf_aarch64_link_hash_entry *eh)
9 kx {
9 kx struct elf_dyn_relocs *p;
9 kx asection *s;
9 kx
9 kx for (p = eh->root.dyn_relocs; p != NULL; p = p->next)
9 kx {
9 kx /* If there is any pc-relative reference, we need to keep copy relocation
9 kx to avoid propagating the relocation into runtime that current glibc
9 kx does not support. */
9 kx if (p->pc_count)
9 kx return true;
9 kx
9 kx s = p->sec->output_section;
9 kx /* Need copy relocation if it's against read-only section. */
9 kx if (s != NULL && (s->flags & SEC_READONLY) != 0)
9 kx return true;
9 kx }
9 kx
9 kx return false;
9 kx }
9 kx
9 kx /* Adjust a symbol defined by a dynamic object and referenced by a
9 kx regular object. The current definition is in some section of the
9 kx dynamic object, but we're not including those sections. We have to
9 kx change the definition to something the rest of the link can
9 kx understand. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
9 kx struct elf_link_hash_entry *h)
9 kx {
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx asection *s, *srel;
9 kx
9 kx /* If this is a function, put it in the procedure linkage table. We
9 kx will fill in the contents of the procedure linkage table later,
9 kx when we know the address of the .got section. */
9 kx if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
9 kx {
9 kx if (h->plt.refcount <= 0
9 kx || (h->type != STT_GNU_IFUNC
9 kx && (SYMBOL_CALLS_LOCAL (info, h)
9 kx || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
9 kx && h->root.type == bfd_link_hash_undefweak))))
9 kx {
9 kx /* This case can occur if we saw a CALL26 reloc in
9 kx an input file, but the symbol wasn't referred to
9 kx by a dynamic object or all references were
9 kx garbage collected. In which case we can end up
9 kx resolving. */
9 kx h->plt.offset = (bfd_vma) - 1;
9 kx h->needs_plt = 0;
9 kx }
9 kx
9 kx return true;
9 kx }
9 kx else
9 kx /* Otherwise, reset to -1. */
9 kx h->plt.offset = (bfd_vma) - 1;
9 kx
9 kx
9 kx /* If this is a weak symbol, and there is a real definition, the
9 kx processor independent code will have arranged for us to see the
9 kx real definition first, and we can just use the same value. */
9 kx if (h->is_weakalias)
9 kx {
9 kx struct elf_link_hash_entry *def = weakdef (h);
9 kx BFD_ASSERT (def->root.type == bfd_link_hash_defined);
9 kx h->root.u.def.section = def->root.u.def.section;
9 kx h->root.u.def.value = def->root.u.def.value;
9 kx if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
9 kx h->non_got_ref = def->non_got_ref;
9 kx return true;
9 kx }
9 kx
9 kx /* If we are creating a shared library, we must presume that the
9 kx only references to the symbol are via the global offset table.
9 kx For such cases we need not do anything here; the relocations will
9 kx be handled correctly by relocate_section. */
9 kx if (bfd_link_pic (info))
9 kx return true;
9 kx
9 kx /* If there are no references to this symbol that do not use the
9 kx GOT, we don't need to generate a copy reloc. */
9 kx if (!h->non_got_ref)
9 kx return true;
9 kx
9 kx /* If -z nocopyreloc was given, we won't generate them either. */
9 kx if (info->nocopyreloc)
9 kx {
9 kx h->non_got_ref = 0;
9 kx return true;
9 kx }
9 kx
9 kx if (ELIMINATE_COPY_RELOCS)
9 kx {
9 kx struct elf_aarch64_link_hash_entry *eh;
9 kx /* If we don't find any dynamic relocs in read-only sections, then
9 kx we'll be keeping the dynamic relocs and avoiding the copy reloc. */
9 kx eh = (struct elf_aarch64_link_hash_entry *) h;
9 kx if (!need_copy_relocation_p (eh))
9 kx {
9 kx h->non_got_ref = 0;
9 kx return true;
9 kx }
9 kx }
9 kx
9 kx /* We must allocate the symbol in our .dynbss section, which will
9 kx become part of the .bss section of the executable. There will be
9 kx an entry for this symbol in the .dynsym section. The dynamic
9 kx object will contain position independent code, so all references
9 kx from the dynamic object to this symbol will go through the global
9 kx offset table. The dynamic linker will use the .dynsym entry to
9 kx determine the address it must put in the global offset table, so
9 kx both the dynamic object and the regular object will refer to the
9 kx same memory location for the variable. */
9 kx
9 kx htab = elf_aarch64_hash_table (info);
9 kx
9 kx /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
9 kx to copy the initial value out of the dynamic object and into the
9 kx runtime process image. */
9 kx if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
9 kx {
9 kx s = htab->root.sdynrelro;
9 kx srel = htab->root.sreldynrelro;
9 kx }
9 kx else
9 kx {
9 kx s = htab->root.sdynbss;
9 kx srel = htab->root.srelbss;
9 kx }
9 kx if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
9 kx {
9 kx srel->size += RELOC_SIZE (htab);
9 kx h->needs_copy = 1;
9 kx }
9 kx
9 kx return _bfd_elf_adjust_dynamic_copy (info, h, s);
9 kx
9 kx }
9 kx
9 kx static bool
9 kx elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
9 kx {
9 kx struct elf_aarch64_local_symbol *locals;
9 kx locals = elf_aarch64_locals (abfd);
9 kx if (locals == NULL)
9 kx {
9 kx locals = (struct elf_aarch64_local_symbol *)
9 kx bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
9 kx if (locals == NULL)
9 kx return false;
9 kx elf_aarch64_locals (abfd) = locals;
9 kx }
9 kx return true;
9 kx }
9 kx
9 kx /* Create the .got section to hold the global offset table. */
9 kx
9 kx static bool
9 kx aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
9 kx {
9 kx const struct elf_backend_data *bed = get_elf_backend_data (abfd);
9 kx flagword flags;
9 kx asection *s;
9 kx struct elf_link_hash_entry *h;
9 kx struct elf_link_hash_table *htab = elf_hash_table (info);
9 kx
9 kx /* This function may be called more than once. */
9 kx if (htab->sgot != NULL)
9 kx return true;
9 kx
9 kx flags = bed->dynamic_sec_flags;
9 kx
9 kx s = bfd_make_section_anyway_with_flags (abfd,
9 kx (bed->rela_plts_and_copies_p
9 kx ? ".rela.got" : ".rel.got"),
9 kx (bed->dynamic_sec_flags
9 kx | SEC_READONLY));
9 kx if (s == NULL
9 kx || !bfd_set_section_alignment (s, bed->s->log_file_align))
9 kx return false;
9 kx htab->srelgot = s;
9 kx
9 kx s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
9 kx if (s == NULL
9 kx || !bfd_set_section_alignment (s, bed->s->log_file_align))
9 kx return false;
9 kx htab->sgot = s;
9 kx htab->sgot->size += GOT_ENTRY_SIZE;
9 kx
9 kx if (bed->want_got_sym)
9 kx {
9 kx /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
9 kx (or .got.plt) section. We don't do this in the linker script
9 kx because we don't want to define the symbol if we are not creating
9 kx a global offset table. */
9 kx h = _bfd_elf_define_linkage_sym (abfd, info, s,
9 kx "_GLOBAL_OFFSET_TABLE_");
9 kx elf_hash_table (info)->hgot = h;
9 kx if (h == NULL)
9 kx return false;
9 kx }
9 kx
9 kx if (bed->want_got_plt)
9 kx {
9 kx s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
9 kx if (s == NULL
9 kx || !bfd_set_section_alignment (s, bed->s->log_file_align))
9 kx return false;
9 kx htab->sgotplt = s;
9 kx }
9 kx
9 kx /* The first bit of the global offset table is the header. */
9 kx s->size += bed->got_header_size;
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Look through the relocs for a section during the first phase. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
9 kx asection *sec, const Elf_Internal_Rela *relocs)
9 kx {
9 kx Elf_Internal_Shdr *symtab_hdr;
9 kx struct elf_link_hash_entry **sym_hashes;
9 kx const Elf_Internal_Rela *rel;
9 kx const Elf_Internal_Rela *rel_end;
9 kx asection *sreloc;
9 kx
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx
9 kx if (bfd_link_relocatable (info))
9 kx return true;
9 kx
9 kx BFD_ASSERT (is_aarch64_elf (abfd));
9 kx
9 kx htab = elf_aarch64_hash_table (info);
9 kx sreloc = NULL;
9 kx
9 kx symtab_hdr = &elf_symtab_hdr (abfd);
9 kx sym_hashes = elf_sym_hashes (abfd);
9 kx
9 kx rel_end = relocs + sec->reloc_count;
9 kx for (rel = relocs; rel < rel_end; rel++)
9 kx {
9 kx struct elf_link_hash_entry *h;
9 kx unsigned int r_symndx;
9 kx unsigned int r_type;
9 kx bfd_reloc_code_real_type bfd_r_type;
9 kx Elf_Internal_Sym *isym;
9 kx
9 kx r_symndx = ELFNN_R_SYM (rel->r_info);
9 kx r_type = ELFNN_R_TYPE (rel->r_info);
9 kx
9 kx if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
9 kx {
9 kx /* xgettext:c-format */
9 kx _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
9 kx return false;
9 kx }
9 kx
9 kx if (r_symndx < symtab_hdr->sh_info)
9 kx {
9 kx /* A local symbol. */
9 kx isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
9 kx abfd, r_symndx);
9 kx if (isym == NULL)
9 kx return false;
9 kx
9 kx /* Check relocation against local STT_GNU_IFUNC symbol. */
9 kx if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
9 kx {
9 kx h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
9 kx true);
9 kx if (h == NULL)
9 kx return false;
9 kx
9 kx /* Fake a STT_GNU_IFUNC symbol. */
9 kx h->type = STT_GNU_IFUNC;
9 kx h->def_regular = 1;
9 kx h->ref_regular = 1;
9 kx h->forced_local = 1;
9 kx h->root.type = bfd_link_hash_defined;
9 kx }
9 kx else
9 kx h = NULL;
9 kx }
9 kx else
9 kx {
9 kx h = sym_hashes[r_symndx - symtab_hdr->sh_info];
9 kx while (h->root.type == bfd_link_hash_indirect
9 kx || h->root.type == bfd_link_hash_warning)
9 kx h = (struct elf_link_hash_entry *) h->root.u.i.link;
9 kx }
9 kx
9 kx /* Could be done earlier, if h were already available. */
9 kx bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
9 kx
9 kx if (h != NULL)
9 kx {
9 kx /* If a relocation refers to _GLOBAL_OFFSET_TABLE_, create the .got.
9 kx This shows up in particular in an R_AARCH64_PREL64 in large model
9 kx when calculating the pc-relative address to .got section which is
9 kx used to initialize the gp register. */
9 kx if (h->root.root.string
9 kx && strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0)
9 kx {
9 kx if (htab->root.dynobj == NULL)
9 kx htab->root.dynobj = abfd;
9 kx
9 kx if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
9 kx return false;
9 kx
9 kx BFD_ASSERT (h == htab->root.hgot);
9 kx }
9 kx
9 kx /* Create the ifunc sections for static executables. If we
9 kx never see an indirect function symbol nor we are building
9 kx a static executable, those sections will be empty and
9 kx won't appear in output. */
9 kx switch (bfd_r_type)
9 kx {
9 kx default:
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_ADD_LO12:
9 kx case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9 kx case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9 kx case BFD_RELOC_AARCH64_CALL26:
9 kx case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9 kx case BFD_RELOC_AARCH64_JUMP26:
9 kx case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9 kx case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9 kx case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9 kx case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9 kx case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9 kx case BFD_RELOC_AARCH64_NN:
9 kx if (htab->root.dynobj == NULL)
9 kx htab->root.dynobj = abfd;
9 kx if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
9 kx return false;
9 kx break;
9 kx }
9 kx
9 kx /* It is referenced by a non-shared object. */
9 kx h->ref_regular = 1;
9 kx }
9 kx
9 kx switch (bfd_r_type)
9 kx {
9 kx case BFD_RELOC_AARCH64_16:
9 kx #if ARCH_SIZE == 64
9 kx case BFD_RELOC_AARCH64_32:
9 kx #endif
9 kx if (bfd_link_pic (info) && (sec->flags & SEC_ALLOC) != 0)
9 kx {
9 kx if (h != NULL
9 kx /* This is an absolute symbol. It represents a value instead
9 kx of an address. */
9 kx && (bfd_is_abs_symbol (&h->root)
9 kx /* This is an undefined symbol. */
9 kx || h->root.type == bfd_link_hash_undefined))
9 kx break;
9 kx
9 kx /* For local symbols, defined global symbols in a non-ABS section,
9 kx it is assumed that the value is an address. */
9 kx int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB: relocation %s against `%s' can not be used when making "
9 kx "a shared object"),
9 kx abfd, elfNN_aarch64_howto_table[howto_index].name,
9 kx (h) ? h->root.root.string : "a local symbol");
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return false;
9 kx }
9 kx else
9 kx break;
9 kx
9 kx case BFD_RELOC_AARCH64_MOVW_G0_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_G1_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_G2_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_G3:
9 kx if (bfd_link_pic (info))
9 kx {
9 kx int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
9 kx _bfd_error_handler
9 kx /* xgettext:c-format */
9 kx (_("%pB: relocation %s against `%s' can not be used when making "
9 kx "a shared object; recompile with -fPIC"),
9 kx abfd, elfNN_aarch64_howto_table[howto_index].name,
9 kx (h) ? h->root.root.string : "a local symbol");
9 kx bfd_set_error (bfd_error_bad_value);
9 kx return false;
9 kx }
9 kx /* Fall through. */
9 kx
9 kx case BFD_RELOC_AARCH64_16_PCREL:
9 kx case BFD_RELOC_AARCH64_32_PCREL:
9 kx case BFD_RELOC_AARCH64_64_PCREL:
9 kx case BFD_RELOC_AARCH64_ADD_LO12:
9 kx case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9 kx case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9 kx case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9 kx case BFD_RELOC_AARCH64_LDST128_LO12:
9 kx case BFD_RELOC_AARCH64_LDST16_LO12:
9 kx case BFD_RELOC_AARCH64_LDST32_LO12:
9 kx case BFD_RELOC_AARCH64_LDST64_LO12:
9 kx case BFD_RELOC_AARCH64_LDST8_LO12:
9 kx case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9 kx if (h == NULL || bfd_link_pic (info))
9 kx break;
9 kx /* Fall through. */
9 kx
9 kx case BFD_RELOC_AARCH64_NN:
9 kx
9 kx /* We don't need to handle relocs into sections not going into
9 kx the "real" output. */
9 kx if ((sec->flags & SEC_ALLOC) == 0)
9 kx break;
9 kx
9 kx if (h != NULL)
9 kx {
9 kx if (!bfd_link_pic (info))
9 kx h->non_got_ref = 1;
9 kx
9 kx h->plt.refcount += 1;
9 kx h->pointer_equality_needed = 1;
9 kx }
9 kx
9 kx /* No need to do anything if we're not creating a shared
9 kx object. */
9 kx if (!(bfd_link_pic (info)
9 kx /* If on the other hand, we are creating an executable, we
9 kx may need to keep relocations for symbols satisfied by a
9 kx dynamic library if we manage to avoid copy relocs for the
9 kx symbol.
9 kx
9 kx NOTE: Currently, there is no support of copy relocs
9 kx elimination on pc-relative relocation types, because there is
9 kx no dynamic relocation support for them in glibc. We still
9 kx record the dynamic symbol reference for them. This is
9 kx because one symbol may be referenced by both absolute
9 kx relocation (for example, BFD_RELOC_AARCH64_NN) and
9 kx pc-relative relocation. We need full symbol reference
9 kx information to make correct decision later in
9 kx elfNN_aarch64_adjust_dynamic_symbol. */
9 kx || (ELIMINATE_COPY_RELOCS
9 kx && !bfd_link_pic (info)
9 kx && h != NULL
9 kx && (h->root.type == bfd_link_hash_defweak
9 kx || !h->def_regular))))
9 kx break;
9 kx
9 kx {
9 kx struct elf_dyn_relocs *p;
9 kx struct elf_dyn_relocs **head;
9 kx int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
9 kx
9 kx /* We must copy these reloc types into the output file.
9 kx Create a reloc section in dynobj and make room for
9 kx this reloc. */
9 kx if (sreloc == NULL)
9 kx {
9 kx if (htab->root.dynobj == NULL)
9 kx htab->root.dynobj = abfd;
9 kx
9 kx sreloc = _bfd_elf_make_dynamic_reloc_section
9 kx (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
9 kx
9 kx if (sreloc == NULL)
9 kx return false;
9 kx }
9 kx
9 kx /* If this is a global symbol, we count the number of
9 kx relocations we need for this symbol. */
9 kx if (h != NULL)
9 kx {
9 kx head = &h->dyn_relocs;
9 kx }
9 kx else
9 kx {
9 kx /* Track dynamic relocs needed for local syms too.
9 kx We really need local syms available to do this
9 kx easily. Oh well. */
9 kx
9 kx asection *s;
9 kx void **vpp;
9 kx
9 kx isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
9 kx abfd, r_symndx);
9 kx if (isym == NULL)
9 kx return false;
9 kx
9 kx s = bfd_section_from_elf_index (abfd, isym->st_shndx);
9 kx if (s == NULL)
9 kx s = sec;
9 kx
9 kx /* Beware of type punned pointers vs strict aliasing
9 kx rules. */
9 kx vpp = &(elf_section_data (s)->local_dynrel);
9 kx head = (struct elf_dyn_relocs **) vpp;
9 kx }
9 kx
9 kx p = *head;
9 kx if (p == NULL || p->sec != sec)
9 kx {
9 kx size_t amt = sizeof *p;
9 kx p = ((struct elf_dyn_relocs *)
9 kx bfd_zalloc (htab->root.dynobj, amt));
9 kx if (p == NULL)
9 kx return false;
9 kx p->next = *head;
9 kx *head = p;
9 kx p->sec = sec;
9 kx }
9 kx
9 kx p->count += 1;
9 kx
9 kx if (elfNN_aarch64_howto_table[howto_index].pc_relative)
9 kx p->pc_count += 1;
9 kx }
9 kx break;
9 kx
9 kx /* RR: We probably want to keep a consistency check that
9 kx there are no dangling GOT_PAGE relocs. */
9 kx case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9 kx case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9 kx case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9 kx case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9 kx case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9 kx case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9 kx case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9 kx case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9 kx case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9 kx case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9 kx case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9 kx case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9 kx case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9 kx {
9 kx unsigned got_type;
9 kx unsigned old_got_type;
9 kx
9 kx got_type = aarch64_reloc_got_type (bfd_r_type);
9 kx
9 kx if (h)
9 kx {
9 kx h->got.refcount += 1;
9 kx old_got_type = elf_aarch64_hash_entry (h)->got_type;
9 kx }
9 kx else
9 kx {
9 kx struct elf_aarch64_local_symbol *locals;
9 kx
9 kx if (!elfNN_aarch64_allocate_local_symbols
9 kx (abfd, symtab_hdr->sh_info))
9 kx return false;
9 kx
9 kx locals = elf_aarch64_locals (abfd);
9 kx BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
9 kx locals[r_symndx].got_refcount += 1;
9 kx old_got_type = locals[r_symndx].got_type;
9 kx }
9 kx
9 kx /* If a variable is accessed with both general dynamic TLS
9 kx methods, two slots may be created. */
9 kx if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
9 kx got_type |= old_got_type;
9 kx
9 kx /* We will already have issued an error message if there
9 kx is a TLS/non-TLS mismatch, based on the symbol type.
9 kx So just combine any TLS types needed. */
9 kx if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
9 kx && got_type != GOT_NORMAL)
9 kx got_type |= old_got_type;
9 kx
9 kx /* If the symbol is accessed by both IE and GD methods, we
9 kx are able to relax. Turn off the GD flag, without
9 kx messing up with any other kind of TLS types that may be
9 kx involved. */
9 kx if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
9 kx got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
9 kx
9 kx if (old_got_type != got_type)
9 kx {
9 kx if (h != NULL)
9 kx elf_aarch64_hash_entry (h)->got_type = got_type;
9 kx else
9 kx {
9 kx struct elf_aarch64_local_symbol *locals;
9 kx locals = elf_aarch64_locals (abfd);
9 kx BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
9 kx locals[r_symndx].got_type = got_type;
9 kx }
9 kx }
9 kx
9 kx if (htab->root.dynobj == NULL)
9 kx htab->root.dynobj = abfd;
9 kx if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
9 kx return false;
9 kx break;
9 kx }
9 kx
9 kx case BFD_RELOC_AARCH64_CALL26:
9 kx case BFD_RELOC_AARCH64_JUMP26:
9 kx /* If this is a local symbol then we resolve it
9 kx directly without creating a PLT entry. */
9 kx if (h == NULL)
9 kx continue;
9 kx
9 kx h->needs_plt = 1;
9 kx if (h->plt.refcount <= 0)
9 kx h->plt.refcount = 1;
9 kx else
9 kx h->plt.refcount += 1;
9 kx break;
9 kx
9 kx default:
9 kx break;
9 kx }
9 kx }
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Treat mapping symbols as special target symbols. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
9 kx asymbol *sym)
9 kx {
9 kx return bfd_is_aarch64_special_symbol_name (sym->name,
9 kx BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
9 kx }
9 kx
9 kx /* If the ELF symbol SYM might be a function in SEC, return the
9 kx function size and set *CODE_OFF to the function's entry point,
9 kx otherwise return zero. */
9 kx
9 kx static bfd_size_type
9 kx elfNN_aarch64_maybe_function_sym (const asymbol *sym, asection *sec,
9 kx bfd_vma *code_off)
9 kx {
9 kx bfd_size_type size;
9 kx elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
9 kx
9 kx if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
9 kx | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
9 kx || sym->section != sec)
9 kx return 0;
9 kx
9 kx size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
9 kx
9 kx if (!(sym->flags & BSF_SYNTHETIC))
9 kx switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
9 kx {
9 kx case STT_NOTYPE:
9 kx /* Ignore symbols created by the annobin plugin for gcc and clang.
9 kx These symbols are hidden, local, notype and have a size of 0. */
9 kx if (size == 0
9 kx && sym->flags & BSF_LOCAL
9 kx && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
9 kx return 0;
9 kx /* Fall through. */
9 kx case STT_FUNC:
9 kx /* FIXME: Allow STT_GNU_IFUNC as well ? */
9 kx break;
9 kx default:
9 kx return 0;
9 kx }
9 kx
9 kx if ((sym->flags & BSF_LOCAL)
9 kx && bfd_is_aarch64_special_symbol_name (sym->name,
9 kx BFD_AARCH64_SPECIAL_SYM_TYPE_ANY))
9 kx return 0;
9 kx
9 kx *code_off = sym->value;
9 kx
9 kx /* Do not return 0 for the function's size. */
9 kx return size ? size : 1;
9 kx }
9 kx
9 kx static bool
9 kx elfNN_aarch64_find_inliner_info (bfd *abfd,
9 kx const char **filename_ptr,
9 kx const char **functionname_ptr,
9 kx unsigned int *line_ptr)
9 kx {
9 kx bool found;
9 kx found = _bfd_dwarf2_find_inliner_info
9 kx (abfd, filename_ptr,
9 kx functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
9 kx return found;
9 kx }
9 kx
9 kx
9 kx static bool
9 kx elfNN_aarch64_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
9 kx {
9 kx Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
9 kx
9 kx if (!_bfd_elf_init_file_header (abfd, link_info))
9 kx return false;
9 kx
9 kx i_ehdrp = elf_elfheader (abfd);
9 kx i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
9 kx return true;
9 kx }
9 kx
9 kx static enum elf_reloc_type_class
9 kx elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
9 kx const asection *rel_sec ATTRIBUTE_UNUSED,
9 kx const Elf_Internal_Rela *rela)
9 kx {
9 kx struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
9 kx
9 kx if (htab->root.dynsym != NULL
9 kx && htab->root.dynsym->contents != NULL)
9 kx {
9 kx /* Check relocation against STT_GNU_IFUNC symbol if there are
9 kx dynamic symbols. */
9 kx bfd *abfd = info->output_bfd;
9 kx const struct elf_backend_data *bed = get_elf_backend_data (abfd);
9 kx unsigned long r_symndx = ELFNN_R_SYM (rela->r_info);
9 kx if (r_symndx != STN_UNDEF)
9 kx {
9 kx Elf_Internal_Sym sym;
9 kx if (!bed->s->swap_symbol_in (abfd,
9 kx (htab->root.dynsym->contents
9 kx + r_symndx * bed->s->sizeof_sym),
9 kx 0, &sym))
9 kx {
9 kx /* xgettext:c-format */
9 kx _bfd_error_handler (_("%pB symbol number %lu references"
9 kx " nonexistent SHT_SYMTAB_SHNDX section"),
9 kx abfd, r_symndx);
9 kx /* Ideally an error class should be returned here. */
9 kx }
9 kx else if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
9 kx return reloc_class_ifunc;
9 kx }
9 kx }
9 kx
9 kx switch ((int) ELFNN_R_TYPE (rela->r_info))
9 kx {
9 kx case AARCH64_R (IRELATIVE):
9 kx return reloc_class_ifunc;
9 kx case AARCH64_R (RELATIVE):
9 kx return reloc_class_relative;
9 kx case AARCH64_R (JUMP_SLOT):
9 kx return reloc_class_plt;
9 kx case AARCH64_R (COPY):
9 kx return reloc_class_copy;
9 kx default:
9 kx return reloc_class_normal;
9 kx }
9 kx }
9 kx
9 kx /* Handle an AArch64 specific section when reading an object file. This is
9 kx called when bfd_section_from_shdr finds a section with an unknown
9 kx type. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_section_from_shdr (bfd *abfd,
9 kx Elf_Internal_Shdr *hdr,
9 kx const char *name, int shindex)
9 kx {
9 kx /* There ought to be a place to keep ELF backend specific flags, but
9 kx at the moment there isn't one. We just keep track of the
9 kx sections by their name, instead. Fortunately, the ABI gives
9 kx names for all the AArch64 specific sections, so we will probably get
9 kx away with this. */
9 kx switch (hdr->sh_type)
9 kx {
9 kx case SHT_AARCH64_ATTRIBUTES:
9 kx break;
9 kx
9 kx default:
9 kx return false;
9 kx }
9 kx
9 kx if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
9 kx return false;
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Process any AArch64-specific program segment types. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_section_from_phdr (bfd *abfd ATTRIBUTE_UNUSED,
9 kx Elf_Internal_Phdr *hdr,
9 kx int hdr_index ATTRIBUTE_UNUSED,
9 kx const char *name ATTRIBUTE_UNUSED)
9 kx {
9 kx /* Right now we only handle the PT_AARCH64_MEMTAG_MTE segment type. */
9 kx if (hdr == NULL || hdr->p_type != PT_AARCH64_MEMTAG_MTE)
9 kx return false;
9 kx
9 kx if (hdr->p_filesz > 0)
9 kx {
9 kx /* Sections created from memory tag p_type's are always named
9 kx "memtag". This makes it easier for tools (for example, GDB)
9 kx to find them. */
9 kx asection *newsect = bfd_make_section_anyway (abfd, "memtag");
9 kx
9 kx if (newsect == NULL)
9 kx return false;
9 kx
9 kx unsigned int opb = bfd_octets_per_byte (abfd, NULL);
9 kx
9 kx /* p_vaddr holds the original start address of the tagged memory
9 kx range. */
9 kx newsect->vma = hdr->p_vaddr / opb;
9 kx
9 kx /* p_filesz holds the storage size of the packed tags. */
9 kx newsect->size = hdr->p_filesz;
9 kx newsect->filepos = hdr->p_offset;
9 kx
9 kx /* p_memsz holds the size of the memory range that contains tags. The
9 kx section's rawsize field is reused for this purpose. */
9 kx newsect->rawsize = hdr->p_memsz;
9 kx
9 kx /* Make sure the section's flags has SEC_HAS_CONTENTS set, otherwise
9 kx BFD will return all zeroes when attempting to get contents from this
9 kx section. */
9 kx newsect->flags |= SEC_HAS_CONTENTS;
9 kx }
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Implements the bfd_elf_modify_headers hook for aarch64. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_modify_headers (bfd *abfd,
9 kx struct bfd_link_info *info)
9 kx {
9 kx struct elf_segment_map *m;
9 kx unsigned int segment_count = 0;
9 kx Elf_Internal_Phdr *p;
9 kx
9 kx for (m = elf_seg_map (abfd); m != NULL; m = m->next, segment_count++)
9 kx {
9 kx /* We are only interested in the memory tag segment that will be dumped
9 kx to a core file. If we have no memory tags or this isn't a core file we
9 kx are dealing with, just skip this segment. */
9 kx if (m->p_type != PT_AARCH64_MEMTAG_MTE
9 kx || bfd_get_format (abfd) != bfd_core)
9 kx continue;
9 kx
9 kx /* For memory tag segments in core files, the size of the file contents
9 kx is smaller than the size of the memory range. Adjust the memory size
9 kx accordingly. The real memory size is held in the section's rawsize
9 kx field. */
9 kx if (m->count > 0)
9 kx {
9 kx p = elf_tdata (abfd)->phdr;
9 kx p += m->idx;
9 kx p->p_memsz = m->sections[0]->rawsize;
9 kx p->p_flags = 0;
9 kx p->p_paddr = 0;
9 kx p->p_align = 0;
9 kx }
9 kx }
9 kx
9 kx /* Give the generic code a chance to handle the headers. */
9 kx return _bfd_elf_modify_headers (abfd, info);
9 kx }
9 kx
9 kx /* A structure used to record a list of sections, independently
9 kx of the next and prev fields in the asection structure. */
9 kx typedef struct section_list
9 kx {
9 kx asection *sec;
9 kx struct section_list *next;
9 kx struct section_list *prev;
9 kx }
9 kx section_list;
9 kx
9 kx /* Unfortunately we need to keep a list of sections for which
9 kx an _aarch64_elf_section_data structure has been allocated. This
9 kx is because it is possible for functions like elfNN_aarch64_write_section
9 kx to be called on a section which has had an elf_data_structure
9 kx allocated for it (and so the used_by_bfd field is valid) but
9 kx for which the AArch64 extended version of this structure - the
9 kx _aarch64_elf_section_data structure - has not been allocated. */
9 kx static section_list *sections_with_aarch64_elf_section_data = NULL;
9 kx
9 kx static void
9 kx record_section_with_aarch64_elf_section_data (asection *sec)
9 kx {
9 kx struct section_list *entry;
9 kx
9 kx entry = bfd_malloc (sizeof (*entry));
9 kx if (entry == NULL)
9 kx return;
9 kx entry->sec = sec;
9 kx entry->next = sections_with_aarch64_elf_section_data;
9 kx entry->prev = NULL;
9 kx if (entry->next != NULL)
9 kx entry->next->prev = entry;
9 kx sections_with_aarch64_elf_section_data = entry;
9 kx }
9 kx
9 kx static struct section_list *
9 kx find_aarch64_elf_section_entry (asection *sec)
9 kx {
9 kx struct section_list *entry;
9 kx static struct section_list *last_entry = NULL;
9 kx
9 kx /* This is a short cut for the typical case where the sections are added
9 kx to the sections_with_aarch64_elf_section_data list in forward order and
9 kx then looked up here in backwards order. This makes a real difference
9 kx to the ld-srec/sec64k.exp linker test. */
9 kx entry = sections_with_aarch64_elf_section_data;
9 kx if (last_entry != NULL)
9 kx {
9 kx if (last_entry->sec == sec)
9 kx entry = last_entry;
9 kx else if (last_entry->next != NULL && last_entry->next->sec == sec)
9 kx entry = last_entry->next;
9 kx }
9 kx
9 kx for (; entry; entry = entry->next)
9 kx if (entry->sec == sec)
9 kx break;
9 kx
9 kx if (entry)
9 kx /* Record the entry prior to this one - it is the entry we are
9 kx most likely to want to locate next time. Also this way if we
9 kx have been called from
9 kx unrecord_section_with_aarch64_elf_section_data () we will not
9 kx be caching a pointer that is about to be freed. */
9 kx last_entry = entry->prev;
9 kx
9 kx return entry;
9 kx }
9 kx
9 kx static void
9 kx unrecord_section_with_aarch64_elf_section_data (asection *sec)
9 kx {
9 kx struct section_list *entry;
9 kx
9 kx entry = find_aarch64_elf_section_entry (sec);
9 kx
9 kx if (entry)
9 kx {
9 kx if (entry->prev != NULL)
9 kx entry->prev->next = entry->next;
9 kx if (entry->next != NULL)
9 kx entry->next->prev = entry->prev;
9 kx if (entry == sections_with_aarch64_elf_section_data)
9 kx sections_with_aarch64_elf_section_data = entry->next;
9 kx free (entry);
9 kx }
9 kx }
9 kx
9 kx
9 kx typedef struct
9 kx {
9 kx void *finfo;
9 kx struct bfd_link_info *info;
9 kx asection *sec;
9 kx int sec_shndx;
9 kx int (*func) (void *, const char *, Elf_Internal_Sym *,
9 kx asection *, struct elf_link_hash_entry *);
9 kx } output_arch_syminfo;
9 kx
9 kx enum map_symbol_type
9 kx {
9 kx AARCH64_MAP_INSN,
9 kx AARCH64_MAP_DATA
9 kx };
9 kx
9 kx
9 kx /* Output a single mapping symbol. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
9 kx enum map_symbol_type type, bfd_vma offset)
9 kx {
9 kx static const char *names[2] = { "$x", "$d" };
9 kx Elf_Internal_Sym sym;
9 kx
9 kx sym.st_value = (osi->sec->output_section->vma
9 kx + osi->sec->output_offset + offset);
9 kx sym.st_size = 0;
9 kx sym.st_other = 0;
9 kx sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
9 kx sym.st_shndx = osi->sec_shndx;
9 kx return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
9 kx }
9 kx
9 kx /* Output a single local symbol for a generated stub. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
9 kx bfd_vma offset, bfd_vma size)
9 kx {
9 kx Elf_Internal_Sym sym;
9 kx
9 kx sym.st_value = (osi->sec->output_section->vma
9 kx + osi->sec->output_offset + offset);
9 kx sym.st_size = size;
9 kx sym.st_other = 0;
9 kx sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
9 kx sym.st_shndx = osi->sec_shndx;
9 kx return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
9 kx }
9 kx
9 kx static bool
9 kx aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
9 kx {
9 kx struct elf_aarch64_stub_hash_entry *stub_entry;
9 kx asection *stub_sec;
9 kx bfd_vma addr;
9 kx char *stub_name;
9 kx output_arch_syminfo *osi;
9 kx
9 kx /* Massage our args to the form they really have. */
9 kx stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
9 kx osi = (output_arch_syminfo *) in_arg;
9 kx
9 kx stub_sec = stub_entry->stub_sec;
9 kx
9 kx /* Ensure this stub is attached to the current section being
9 kx processed. */
9 kx if (stub_sec != osi->sec)
9 kx return true;
9 kx
9 kx addr = (bfd_vma) stub_entry->stub_offset;
9 kx
9 kx stub_name = stub_entry->output_name;
9 kx
9 kx switch (stub_entry->stub_type)
9 kx {
9 kx case aarch64_stub_adrp_branch:
9 kx if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
9 kx sizeof (aarch64_adrp_branch_stub)))
9 kx return false;
9 kx if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
9 kx return false;
9 kx break;
9 kx case aarch64_stub_long_branch:
9 kx if (!elfNN_aarch64_output_stub_sym
9 kx (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
9 kx return false;
9 kx if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
9 kx return false;
9 kx if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
9 kx return false;
9 kx break;
9 kx case aarch64_stub_erratum_835769_veneer:
9 kx if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
9 kx sizeof (aarch64_erratum_835769_stub)))
9 kx return false;
9 kx if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
9 kx return false;
9 kx break;
9 kx case aarch64_stub_erratum_843419_veneer:
9 kx if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
9 kx sizeof (aarch64_erratum_843419_stub)))
9 kx return false;
9 kx if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
9 kx return false;
9 kx break;
9 kx case aarch64_stub_none:
9 kx break;
9 kx
9 kx default:
9 kx abort ();
9 kx }
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Output mapping symbols for linker generated sections. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
9 kx struct bfd_link_info *info,
9 kx void *finfo,
9 kx int (*func) (void *, const char *,
9 kx Elf_Internal_Sym *,
9 kx asection *,
9 kx struct elf_link_hash_entry
9 kx *))
9 kx {
9 kx output_arch_syminfo osi;
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx
9 kx if (info->strip == strip_all
9 kx && !info->emitrelocations
9 kx && !bfd_link_relocatable (info))
9 kx return true;
9 kx
9 kx htab = elf_aarch64_hash_table (info);
9 kx
9 kx osi.finfo = finfo;
9 kx osi.info = info;
9 kx osi.func = func;
9 kx
9 kx /* Long calls stubs. */
9 kx if (htab->stub_bfd && htab->stub_bfd->sections)
9 kx {
9 kx asection *stub_sec;
9 kx
9 kx for (stub_sec = htab->stub_bfd->sections;
9 kx stub_sec != NULL; stub_sec = stub_sec->next)
9 kx {
9 kx /* Ignore non-stub sections. */
9 kx if (!strstr (stub_sec->name, STUB_SUFFIX))
9 kx continue;
9 kx
9 kx osi.sec = stub_sec;
9 kx
9 kx osi.sec_shndx = _bfd_elf_section_from_bfd_section
9 kx (output_bfd, osi.sec->output_section);
9 kx
9 kx /* The first instruction in a stub is always a branch. */
9 kx if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
9 kx return false;
9 kx
9 kx bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
9 kx &osi);
9 kx }
9 kx }
9 kx
9 kx /* Finally, output mapping symbols for the PLT. */
9 kx if (!htab->root.splt || htab->root.splt->size == 0)
9 kx return true;
9 kx
9 kx osi.sec_shndx = _bfd_elf_section_from_bfd_section
9 kx (output_bfd, htab->root.splt->output_section);
9 kx osi.sec = htab->root.splt;
9 kx
9 kx elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0);
9 kx
9 kx return true;
9 kx
9 kx }
9 kx
9 kx /* Allocate target specific section data. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
9 kx {
9 kx if (!sec->used_by_bfd)
9 kx {
9 kx _aarch64_elf_section_data *sdata;
9 kx size_t amt = sizeof (*sdata);
9 kx
9 kx sdata = bfd_zalloc (abfd, amt);
9 kx if (sdata == NULL)
9 kx return false;
9 kx sec->used_by_bfd = sdata;
9 kx }
9 kx
9 kx record_section_with_aarch64_elf_section_data (sec);
9 kx
9 kx return _bfd_elf_new_section_hook (abfd, sec);
9 kx }
9 kx
9 kx
9 kx static void
9 kx unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
9 kx asection *sec,
9 kx void *ignore ATTRIBUTE_UNUSED)
9 kx {
9 kx unrecord_section_with_aarch64_elf_section_data (sec);
9 kx }
9 kx
9 kx static bool
9 kx elfNN_aarch64_close_and_cleanup (bfd *abfd)
9 kx {
9 kx if (abfd->sections)
9 kx bfd_map_over_sections (abfd,
9 kx unrecord_section_via_map_over_sections, NULL);
9 kx
9 kx return _bfd_elf_close_and_cleanup (abfd);
9 kx }
9 kx
9 kx static bool
9 kx elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
9 kx {
9 kx if (abfd->sections)
9 kx bfd_map_over_sections (abfd,
9 kx unrecord_section_via_map_over_sections, NULL);
9 kx
9 kx return _bfd_free_cached_info (abfd);
9 kx }
9 kx
9 kx /* Create dynamic sections. This is different from the ARM backend in that
9 kx the got, plt, gotplt and their relocation sections are all created in the
9 kx standard part of the bfd elf backend. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
9 kx struct bfd_link_info *info)
9 kx {
9 kx /* We need to create .got section. */
9 kx if (!aarch64_elf_create_got_section (dynobj, info))
9 kx return false;
9 kx
9 kx return _bfd_elf_create_dynamic_sections (dynobj, info);
9 kx }
9 kx
9 kx
9 kx /* Allocate space in .plt, .got and associated reloc sections for
9 kx dynamic relocs. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
9 kx {
9 kx struct bfd_link_info *info;
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx struct elf_aarch64_link_hash_entry *eh;
9 kx struct elf_dyn_relocs *p;
9 kx
9 kx /* An example of a bfd_link_hash_indirect symbol is versioned
9 kx symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
9 kx -> __gxx_personality_v0(bfd_link_hash_defined)
9 kx
9 kx There is no need to process bfd_link_hash_indirect symbols here
9 kx because we will also be presented with the concrete instance of
9 kx the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
9 kx called to copy all relevant data from the generic to the concrete
9 kx symbol instance. */
9 kx if (h->root.type == bfd_link_hash_indirect)
9 kx return true;
9 kx
9 kx if (h->root.type == bfd_link_hash_warning)
9 kx h = (struct elf_link_hash_entry *) h->root.u.i.link;
9 kx
9 kx info = (struct bfd_link_info *) inf;
9 kx htab = elf_aarch64_hash_table (info);
9 kx
9 kx /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
9 kx here if it is defined and referenced in a non-shared object. */
9 kx if (h->type == STT_GNU_IFUNC
9 kx && h->def_regular)
9 kx return true;
9 kx else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
9 kx {
9 kx /* Make sure this symbol is output as a dynamic symbol.
9 kx Undefined weak syms won't yet be marked as dynamic. */
9 kx if (h->dynindx == -1 && !h->forced_local
9 kx && h->root.type == bfd_link_hash_undefweak)
9 kx {
9 kx if (!bfd_elf_link_record_dynamic_symbol (info, h))
9 kx return false;
9 kx }
9 kx
9 kx if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
9 kx {
9 kx asection *s = htab->root.splt;
9 kx
9 kx /* If this is the first .plt entry, make room for the special
9 kx first entry. */
9 kx if (s->size == 0)
9 kx s->size += htab->plt_header_size;
9 kx
9 kx h->plt.offset = s->size;
9 kx
9 kx /* If this symbol is not defined in a regular file, and we are
9 kx not generating a shared library, then set the symbol to this
9 kx location in the .plt. This is required to make function
9 kx pointers compare as equal between the normal executable and
9 kx the shared library. */
9 kx if (!bfd_link_pic (info) && !h->def_regular)
9 kx {
9 kx h->root.u.def.section = s;
9 kx h->root.u.def.value = h->plt.offset;
9 kx }
9 kx
9 kx /* Make room for this entry. For now we only create the
9 kx small model PLT entries. We later need to find a way
9 kx of relaxing into these from the large model PLT entries. */
9 kx s->size += htab->plt_entry_size;
9 kx
9 kx /* We also need to make an entry in the .got.plt section, which
9 kx will be placed in the .got section by the linker script. */
9 kx htab->root.sgotplt->size += GOT_ENTRY_SIZE;
9 kx
9 kx /* We also need to make an entry in the .rela.plt section. */
9 kx htab->root.srelplt->size += RELOC_SIZE (htab);
9 kx
9 kx /* We need to ensure that all GOT entries that serve the PLT
9 kx are consecutive with the special GOT slots [0] [1] and
9 kx [2]. Any addtional relocations, such as
9 kx R_AARCH64_TLSDESC, must be placed after the PLT related
9 kx entries. We abuse the reloc_count such that during
9 kx sizing we adjust reloc_count to indicate the number of
9 kx PLT related reserved entries. In subsequent phases when
9 kx filling in the contents of the reloc entries, PLT related
9 kx entries are placed by computing their PLT index (0
9 kx .. reloc_count). While other none PLT relocs are placed
9 kx at the slot indicated by reloc_count and reloc_count is
9 kx updated. */
9 kx
9 kx htab->root.srelplt->reloc_count++;
9 kx
9 kx /* Mark the DSO in case R_<CLS>_JUMP_SLOT relocs against
9 kx variant PCS symbols are present. */
9 kx if (h->other & STO_AARCH64_VARIANT_PCS)
9 kx htab->variant_pcs = 1;
9 kx
9 kx }
9 kx else
9 kx {
9 kx h->plt.offset = (bfd_vma) - 1;
9 kx h->needs_plt = 0;
9 kx }
9 kx }
9 kx else
9 kx {
9 kx h->plt.offset = (bfd_vma) - 1;
9 kx h->needs_plt = 0;
9 kx }
9 kx
9 kx eh = (struct elf_aarch64_link_hash_entry *) h;
9 kx eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
9 kx
9 kx if (h->got.refcount > 0)
9 kx {
9 kx bool dyn;
9 kx unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
9 kx
9 kx h->got.offset = (bfd_vma) - 1;
9 kx
9 kx dyn = htab->root.dynamic_sections_created;
9 kx
9 kx /* Make sure this symbol is output as a dynamic symbol.
9 kx Undefined weak syms won't yet be marked as dynamic. */
9 kx if (dyn && h->dynindx == -1 && !h->forced_local
9 kx && h->root.type == bfd_link_hash_undefweak)
9 kx {
9 kx if (!bfd_elf_link_record_dynamic_symbol (info, h))
9 kx return false;
9 kx }
9 kx
9 kx if (got_type == GOT_UNKNOWN)
9 kx {
9 kx }
9 kx else if (got_type == GOT_NORMAL)
9 kx {
9 kx h->got.offset = htab->root.sgot->size;
9 kx htab->root.sgot->size += GOT_ENTRY_SIZE;
9 kx if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9 kx || h->root.type != bfd_link_hash_undefweak)
9 kx && (bfd_link_pic (info)
9 kx || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))
9 kx /* Undefined weak symbol in static PIE resolves to 0 without
9 kx any dynamic relocations. */
9 kx && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9 kx {
9 kx htab->root.srelgot->size += RELOC_SIZE (htab);
9 kx }
9 kx }
9 kx else
9 kx {
9 kx int indx;
9 kx if (got_type & GOT_TLSDESC_GD)
9 kx {
9 kx eh->tlsdesc_got_jump_table_offset =
9 kx (htab->root.sgotplt->size
9 kx - aarch64_compute_jump_table_size (htab));
9 kx htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
9 kx h->got.offset = (bfd_vma) - 2;
9 kx }
9 kx
9 kx if (got_type & GOT_TLS_GD)
9 kx {
9 kx h->got.offset = htab->root.sgot->size;
9 kx htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
9 kx }
9 kx
9 kx if (got_type & GOT_TLS_IE)
9 kx {
9 kx h->got.offset = htab->root.sgot->size;
9 kx htab->root.sgot->size += GOT_ENTRY_SIZE;
9 kx }
9 kx
9 kx indx = h && h->dynindx != -1 ? h->dynindx : 0;
9 kx if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9 kx || h->root.type != bfd_link_hash_undefweak)
9 kx && (!bfd_link_executable (info)
9 kx || indx != 0
9 kx || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
9 kx {
9 kx if (got_type & GOT_TLSDESC_GD)
9 kx {
9 kx htab->root.srelplt->size += RELOC_SIZE (htab);
9 kx /* Note reloc_count not incremented here! We have
9 kx already adjusted reloc_count for this relocation
9 kx type. */
9 kx
9 kx /* TLSDESC PLT is now needed, but not yet determined. */
9 kx htab->root.tlsdesc_plt = (bfd_vma) - 1;
9 kx }
9 kx
9 kx if (got_type & GOT_TLS_GD)
9 kx htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
9 kx
9 kx if (got_type & GOT_TLS_IE)
9 kx htab->root.srelgot->size += RELOC_SIZE (htab);
9 kx }
9 kx }
9 kx }
9 kx else
9 kx {
9 kx h->got.offset = (bfd_vma) - 1;
9 kx }
9 kx
9 kx if (h->dyn_relocs == NULL)
9 kx return true;
9 kx
9 kx for (p = h->dyn_relocs; p != NULL; p = p->next)
9 kx if (eh->def_protected)
9 kx {
9 kx /* Disallow copy relocations against protected symbol. */
9 kx asection *s = p->sec->output_section;
9 kx if (s != NULL && (s->flags & SEC_READONLY) != 0)
9 kx {
9 kx info->callbacks->einfo
9 kx /* xgettext:c-format */
9 kx (_ ("%F%P: %pB: copy relocation against non-copyable "
9 kx "protected symbol `%s'\n"),
9 kx p->sec->owner, h->root.root.string);
9 kx return false;
9 kx }
9 kx }
9 kx
9 kx /* In the shared -Bsymbolic case, discard space allocated for
9 kx dynamic pc-relative relocs against symbols which turn out to be
9 kx defined in regular objects. For the normal shared case, discard
9 kx space for pc-relative relocs that have become local due to symbol
9 kx visibility changes. */
9 kx
9 kx if (bfd_link_pic (info))
9 kx {
9 kx /* Relocs that use pc_count are those that appear on a call
9 kx insn, or certain REL relocs that can generated via assembly.
9 kx We want calls to protected symbols to resolve directly to the
9 kx function rather than going via the plt. If people want
9 kx function pointer comparisons to work as expected then they
9 kx should avoid writing weird assembly. */
9 kx if (SYMBOL_CALLS_LOCAL (info, h))
9 kx {
9 kx struct elf_dyn_relocs **pp;
9 kx
9 kx for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
9 kx {
9 kx p->count -= p->pc_count;
9 kx p->pc_count = 0;
9 kx if (p->count == 0)
9 kx *pp = p->next;
9 kx else
9 kx pp = &p->next;
9 kx }
9 kx }
9 kx
9 kx /* Also discard relocs on undefined weak syms with non-default
9 kx visibility. */
9 kx if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
9 kx {
9 kx if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
9 kx || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9 kx h->dyn_relocs = NULL;
9 kx
9 kx /* Make sure undefined weak symbols are output as a dynamic
9 kx symbol in PIEs. */
9 kx else if (h->dynindx == -1
9 kx && !h->forced_local
9 kx && h->root.type == bfd_link_hash_undefweak
9 kx && !bfd_elf_link_record_dynamic_symbol (info, h))
9 kx return false;
9 kx }
9 kx
9 kx }
9 kx else if (ELIMINATE_COPY_RELOCS)
9 kx {
9 kx /* For the non-shared case, discard space for relocs against
9 kx symbols which turn out to need copy relocs or are not
9 kx dynamic. */
9 kx
9 kx if (!h->non_got_ref
9 kx && ((h->def_dynamic
9 kx && !h->def_regular)
9 kx || (htab->root.dynamic_sections_created
9 kx && (h->root.type == bfd_link_hash_undefweak
9 kx || h->root.type == bfd_link_hash_undefined))))
9 kx {
9 kx /* Make sure this symbol is output as a dynamic symbol.
9 kx Undefined weak syms won't yet be marked as dynamic. */
9 kx if (h->dynindx == -1
9 kx && !h->forced_local
9 kx && h->root.type == bfd_link_hash_undefweak
9 kx && !bfd_elf_link_record_dynamic_symbol (info, h))
9 kx return false;
9 kx
9 kx /* If that succeeded, we know we'll be keeping all the
9 kx relocs. */
9 kx if (h->dynindx != -1)
9 kx goto keep;
9 kx }
9 kx
9 kx h->dyn_relocs = NULL;
9 kx
9 kx keep:;
9 kx }
9 kx
9 kx /* Finally, allocate space. */
9 kx for (p = h->dyn_relocs; p != NULL; p = p->next)
9 kx {
9 kx asection *sreloc;
9 kx
9 kx sreloc = elf_section_data (p->sec)->sreloc;
9 kx
9 kx BFD_ASSERT (sreloc != NULL);
9 kx
9 kx sreloc->size += p->count * RELOC_SIZE (htab);
9 kx }
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Allocate space in .plt, .got and associated reloc sections for
9 kx ifunc dynamic relocs. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
9 kx void *inf)
9 kx {
9 kx struct bfd_link_info *info;
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx
9 kx /* An example of a bfd_link_hash_indirect symbol is versioned
9 kx symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
9 kx -> __gxx_personality_v0(bfd_link_hash_defined)
9 kx
9 kx There is no need to process bfd_link_hash_indirect symbols here
9 kx because we will also be presented with the concrete instance of
9 kx the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
9 kx called to copy all relevant data from the generic to the concrete
9 kx symbol instance. */
9 kx if (h->root.type == bfd_link_hash_indirect)
9 kx return true;
9 kx
9 kx if (h->root.type == bfd_link_hash_warning)
9 kx h = (struct elf_link_hash_entry *) h->root.u.i.link;
9 kx
9 kx info = (struct bfd_link_info *) inf;
9 kx htab = elf_aarch64_hash_table (info);
9 kx
9 kx /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
9 kx here if it is defined and referenced in a non-shared object. */
9 kx if (h->type == STT_GNU_IFUNC
9 kx && h->def_regular)
9 kx return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
9 kx &h->dyn_relocs,
9 kx htab->plt_entry_size,
9 kx htab->plt_header_size,
9 kx GOT_ENTRY_SIZE,
9 kx false);
9 kx return true;
9 kx }
9 kx
9 kx /* Allocate space in .plt, .got and associated reloc sections for
9 kx local ifunc dynamic relocs. */
9 kx
9 kx static int
9 kx elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
9 kx {
9 kx struct elf_link_hash_entry *h
9 kx = (struct elf_link_hash_entry *) *slot;
9 kx
9 kx if (h->type != STT_GNU_IFUNC
9 kx || !h->def_regular
9 kx || !h->ref_regular
9 kx || !h->forced_local
9 kx || h->root.type != bfd_link_hash_defined)
9 kx abort ();
9 kx
9 kx return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
9 kx }
9 kx
9 kx /* This is the most important function of all . Innocuosly named
9 kx though ! */
9 kx
9 kx static bool
9 kx elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
9 kx struct bfd_link_info *info)
9 kx {
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx bfd *dynobj;
9 kx asection *s;
9 kx bool relocs;
9 kx bfd *ibfd;
9 kx
9 kx htab = elf_aarch64_hash_table ((info));
9 kx dynobj = htab->root.dynobj;
9 kx
9 kx BFD_ASSERT (dynobj != NULL);
9 kx
9 kx if (htab->root.dynamic_sections_created)
9 kx {
9 kx if (bfd_link_executable (info) && !info->nointerp)
9 kx {
9 kx s = bfd_get_linker_section (dynobj, ".interp");
9 kx if (s == NULL)
9 kx abort ();
9 kx s->size = sizeof ELF_DYNAMIC_INTERPRETER;
9 kx s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
9 kx }
9 kx }
9 kx
9 kx /* Set up .got offsets for local syms, and space for local dynamic
9 kx relocs. */
9 kx for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9 kx {
9 kx struct elf_aarch64_local_symbol *locals = NULL;
9 kx Elf_Internal_Shdr *symtab_hdr;
9 kx asection *srel;
9 kx unsigned int i;
9 kx
9 kx if (!is_aarch64_elf (ibfd))
9 kx continue;
9 kx
9 kx for (s = ibfd->sections; s != NULL; s = s->next)
9 kx {
9 kx struct elf_dyn_relocs *p;
9 kx
9 kx for (p = (struct elf_dyn_relocs *)
9 kx (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
9 kx {
9 kx if (!bfd_is_abs_section (p->sec)
9 kx && bfd_is_abs_section (p->sec->output_section))
9 kx {
9 kx /* Input section has been discarded, either because
9 kx it is a copy of a linkonce section or due to
9 kx linker script /DISCARD/, so we'll be discarding
9 kx the relocs too. */
9 kx }
9 kx else if (p->count != 0)
9 kx {
9 kx srel = elf_section_data (p->sec)->sreloc;
9 kx srel->size += p->count * RELOC_SIZE (htab);
9 kx if ((p->sec->output_section->flags & SEC_READONLY) != 0)
9 kx info->flags |= DF_TEXTREL;
9 kx }
9 kx }
9 kx }
9 kx
9 kx locals = elf_aarch64_locals (ibfd);
9 kx if (!locals)
9 kx continue;
9 kx
9 kx symtab_hdr = &elf_symtab_hdr (ibfd);
9 kx srel = htab->root.srelgot;
9 kx for (i = 0; i < symtab_hdr->sh_info; i++)
9 kx {
9 kx locals[i].got_offset = (bfd_vma) - 1;
9 kx locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
9 kx if (locals[i].got_refcount > 0)
9 kx {
9 kx unsigned got_type = locals[i].got_type;
9 kx if (got_type & GOT_TLSDESC_GD)
9 kx {
9 kx locals[i].tlsdesc_got_jump_table_offset =
9 kx (htab->root.sgotplt->size
9 kx - aarch64_compute_jump_table_size (htab));
9 kx htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
9 kx locals[i].got_offset = (bfd_vma) - 2;
9 kx }
9 kx
9 kx if (got_type & GOT_TLS_GD)
9 kx {
9 kx locals[i].got_offset = htab->root.sgot->size;
9 kx htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
9 kx }
9 kx
9 kx if (got_type & GOT_TLS_IE
9 kx || got_type & GOT_NORMAL)
9 kx {
9 kx locals[i].got_offset = htab->root.sgot->size;
9 kx htab->root.sgot->size += GOT_ENTRY_SIZE;
9 kx }
9 kx
9 kx if (got_type == GOT_UNKNOWN)
9 kx {
9 kx }
9 kx
9 kx if (bfd_link_pic (info))
9 kx {
9 kx if (got_type & GOT_TLSDESC_GD)
9 kx {
9 kx htab->root.srelplt->size += RELOC_SIZE (htab);
9 kx /* Note RELOC_COUNT not incremented here! */
9 kx htab->root.tlsdesc_plt = (bfd_vma) - 1;
9 kx }
9 kx
9 kx if (got_type & GOT_TLS_GD)
9 kx htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
9 kx
9 kx if (got_type & GOT_TLS_IE
9 kx || got_type & GOT_NORMAL)
9 kx htab->root.srelgot->size += RELOC_SIZE (htab);
9 kx }
9 kx }
9 kx else
9 kx {
9 kx locals[i].got_refcount = (bfd_vma) - 1;
9 kx }
9 kx }
9 kx }
9 kx
9 kx
9 kx /* Allocate global sym .plt and .got entries, and space for global
9 kx sym dynamic relocs. */
9 kx elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
9 kx info);
9 kx
9 kx /* Allocate global ifunc sym .plt and .got entries, and space for global
9 kx ifunc sym dynamic relocs. */
9 kx elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
9 kx info);
9 kx
9 kx /* Allocate .plt and .got entries, and space for local ifunc symbols. */
9 kx htab_traverse (htab->loc_hash_table,
9 kx elfNN_aarch64_allocate_local_ifunc_dynrelocs,
9 kx info);
9 kx
9 kx /* For every jump slot reserved in the sgotplt, reloc_count is
9 kx incremented. However, when we reserve space for TLS descriptors,
9 kx it's not incremented, so in order to compute the space reserved
9 kx for them, it suffices to multiply the reloc count by the jump
9 kx slot size. */
9 kx
9 kx if (htab->root.srelplt)
9 kx htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
9 kx
9 kx if (htab->root.tlsdesc_plt)
9 kx {
9 kx if (htab->root.splt->size == 0)
9 kx htab->root.splt->size += htab->plt_header_size;
9 kx
9 kx /* If we're not using lazy TLS relocations, don't generate the
9 kx GOT and PLT entry required. */
9 kx if ((info->flags & DF_BIND_NOW))
9 kx htab->root.tlsdesc_plt = 0;
9 kx else
9 kx {
9 kx htab->root.tlsdesc_plt = htab->root.splt->size;
9 kx htab->root.splt->size += htab->tlsdesc_plt_entry_size;
9 kx
9 kx htab->root.tlsdesc_got = htab->root.sgot->size;
9 kx htab->root.sgot->size += GOT_ENTRY_SIZE;
9 kx }
9 kx }
9 kx
9 kx /* Init mapping symbols information to use later to distingush between
9 kx code and data while scanning for errata. */
9 kx if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
9 kx for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9 kx {
9 kx if (!is_aarch64_elf (ibfd))
9 kx continue;
9 kx bfd_elfNN_aarch64_init_maps (ibfd);
9 kx }
9 kx
9 kx /* We now have determined the sizes of the various dynamic sections.
9 kx Allocate memory for them. */
9 kx relocs = false;
9 kx for (s = dynobj->sections; s != NULL; s = s->next)
9 kx {
9 kx if ((s->flags & SEC_LINKER_CREATED) == 0)
9 kx continue;
9 kx
9 kx if (s == htab->root.splt
9 kx || s == htab->root.sgot
9 kx || s == htab->root.sgotplt
9 kx || s == htab->root.iplt
9 kx || s == htab->root.igotplt
9 kx || s == htab->root.sdynbss
9 kx || s == htab->root.sdynrelro)
9 kx {
9 kx /* Strip this section if we don't need it; see the
9 kx comment below. */
9 kx }
9 kx else if (startswith (bfd_section_name (s), ".rela"))
9 kx {
9 kx if (s->size != 0 && s != htab->root.srelplt)
9 kx relocs = true;
9 kx
9 kx /* We use the reloc_count field as a counter if we need
9 kx to copy relocs into the output file. */
9 kx if (s != htab->root.srelplt)
9 kx s->reloc_count = 0;
9 kx }
9 kx else
9 kx {
9 kx /* It's not one of our sections, so don't allocate space. */
9 kx continue;
9 kx }
9 kx
9 kx if (s->size == 0)
9 kx {
9 kx /* If we don't need this section, strip it from the
9 kx output file. This is mostly to handle .rela.bss and
9 kx .rela.plt. We must create both sections in
9 kx create_dynamic_sections, because they must be created
9 kx before the linker maps input sections to output
9 kx sections. The linker does that before
9 kx adjust_dynamic_symbol is called, and it is that
9 kx function which decides whether anything needs to go
9 kx into these sections. */
9 kx s->flags |= SEC_EXCLUDE;
9 kx continue;
9 kx }
9 kx
9 kx if ((s->flags & SEC_HAS_CONTENTS) == 0)
9 kx continue;
9 kx
9 kx /* Allocate memory for the section contents. We use bfd_zalloc
9 kx here in case unused entries are not reclaimed before the
9 kx section's contents are written out. This should not happen,
9 kx but this way if it does, we get a R_AARCH64_NONE reloc instead
9 kx of garbage. */
9 kx s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
9 kx if (s->contents == NULL)
9 kx return false;
9 kx }
9 kx
9 kx if (htab->root.dynamic_sections_created)
9 kx {
9 kx /* Add some entries to the .dynamic section. We fill in the
9 kx values later, in elfNN_aarch64_finish_dynamic_sections, but we
9 kx must add the entries now so that we get the correct size for
9 kx the .dynamic section. The DT_DEBUG entry is filled in by the
9 kx dynamic linker and used by the debugger. */
9 kx #define add_dynamic_entry(TAG, VAL) \
9 kx _bfd_elf_add_dynamic_entry (info, TAG, VAL)
9 kx
9 kx if (!_bfd_elf_add_dynamic_tags (output_bfd, info, relocs))
9 kx return false;
9 kx
9 kx if (htab->root.splt->size != 0)
9 kx {
9 kx if (htab->variant_pcs
9 kx && !add_dynamic_entry (DT_AARCH64_VARIANT_PCS, 0))
9 kx return false;
9 kx
9 kx if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI_PAC)
9 kx && (!add_dynamic_entry (DT_AARCH64_BTI_PLT, 0)
9 kx || !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0)))
9 kx return false;
9 kx
9 kx else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI)
9 kx && !add_dynamic_entry (DT_AARCH64_BTI_PLT, 0))
9 kx return false;
9 kx
9 kx else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_PAC)
9 kx && !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0))
9 kx return false;
9 kx }
9 kx }
9 kx #undef add_dynamic_entry
9 kx
9 kx return true;
9 kx }
9 kx
9 kx static inline void
9 kx elf_aarch64_update_plt_entry (bfd *output_bfd,
9 kx bfd_reloc_code_real_type r_type,
9 kx bfd_byte *plt_entry, bfd_vma value)
9 kx {
9 kx reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
9 kx
9 kx /* FIXME: We should check the return value from this function call. */
9 kx (void) _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
9 kx }
9 kx
9 kx static void
9 kx elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
9 kx struct elf_aarch64_link_hash_table
9 kx *htab, bfd *output_bfd,
9 kx struct bfd_link_info *info)
9 kx {
9 kx bfd_byte *plt_entry;
9 kx bfd_vma plt_index;
9 kx bfd_vma got_offset;
9 kx bfd_vma gotplt_entry_address;
9 kx bfd_vma plt_entry_address;
9 kx Elf_Internal_Rela rela;
9 kx bfd_byte *loc;
9 kx asection *plt, *gotplt, *relplt;
9 kx
9 kx /* When building a static executable, use .iplt, .igot.plt and
9 kx .rela.iplt sections for STT_GNU_IFUNC symbols. */
9 kx if (htab->root.splt != NULL)
9 kx {
9 kx plt = htab->root.splt;
9 kx gotplt = htab->root.sgotplt;
9 kx relplt = htab->root.srelplt;
9 kx }
9 kx else
9 kx {
9 kx plt = htab->root.iplt;
9 kx gotplt = htab->root.igotplt;
9 kx relplt = htab->root.irelplt;
9 kx }
9 kx
9 kx /* Get the index in the procedure linkage table which
9 kx corresponds to this symbol. This is the index of this symbol
9 kx in all the symbols for which we are making plt entries. The
9 kx first entry in the procedure linkage table is reserved.
9 kx
9 kx Get the offset into the .got table of the entry that
9 kx corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
9 kx bytes. The first three are reserved for the dynamic linker.
9 kx
9 kx For static executables, we don't reserve anything. */
9 kx
9 kx if (plt == htab->root.splt)
9 kx {
9 kx plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
9 kx got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
9 kx }
9 kx else
9 kx {
9 kx plt_index = h->plt.offset / htab->plt_entry_size;
9 kx got_offset = plt_index * GOT_ENTRY_SIZE;
9 kx }
9 kx
9 kx plt_entry = plt->contents + h->plt.offset;
9 kx plt_entry_address = plt->output_section->vma
9 kx + plt->output_offset + h->plt.offset;
9 kx gotplt_entry_address = gotplt->output_section->vma +
9 kx gotplt->output_offset + got_offset;
9 kx
9 kx /* Copy in the boiler-plate for the PLTn entry. */
9 kx memcpy (plt_entry, htab->plt_entry, htab->plt_entry_size);
9 kx
9 kx /* First instruction in BTI enabled PLT stub is a BTI
9 kx instruction so skip it. */
9 kx if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI
9 kx && elf_elfheader (output_bfd)->e_type == ET_EXEC)
9 kx plt_entry = plt_entry + 4;
9 kx
9 kx /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9 kx ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9 kx elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9 kx plt_entry,
9 kx PG (gotplt_entry_address) -
9 kx PG (plt_entry_address));
9 kx
9 kx /* Fill in the lo12 bits for the load from the pltgot. */
9 kx elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9 kx plt_entry + 4,
9 kx PG_OFFSET (gotplt_entry_address));
9 kx
9 kx /* Fill in the lo12 bits for the add from the pltgot entry. */
9 kx elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9 kx plt_entry + 8,
9 kx PG_OFFSET (gotplt_entry_address));
9 kx
9 kx /* All the GOTPLT Entries are essentially initialized to PLT0. */
9 kx bfd_put_NN (output_bfd,
9 kx plt->output_section->vma + plt->output_offset,
9 kx gotplt->contents + got_offset);
9 kx
9 kx rela.r_offset = gotplt_entry_address;
9 kx
9 kx if (h->dynindx == -1
9 kx || ((bfd_link_executable (info)
9 kx || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
9 kx && h->def_regular
9 kx && h->type == STT_GNU_IFUNC))
9 kx {
9 kx /* If an STT_GNU_IFUNC symbol is locally defined, generate
9 kx R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
9 kx rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
9 kx rela.r_addend = (h->root.u.def.value
9 kx + h->root.u.def.section->output_section->vma
9 kx + h->root.u.def.section->output_offset);
9 kx }
9 kx else
9 kx {
9 kx /* Fill in the entry in the .rela.plt section. */
9 kx rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
9 kx rela.r_addend = 0;
9 kx }
9 kx
9 kx /* Compute the relocation entry to used based on PLT index and do
9 kx not adjust reloc_count. The reloc_count has already been adjusted
9 kx to account for this entry. */
9 kx loc = relplt->contents + plt_index * RELOC_SIZE (htab);
9 kx bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9 kx }
9 kx
9 kx /* Size sections even though they're not dynamic. We use it to setup
9 kx _TLS_MODULE_BASE_, if needed. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_always_size_sections (bfd *output_bfd,
9 kx struct bfd_link_info *info)
9 kx {
9 kx asection *tls_sec;
9 kx
9 kx if (bfd_link_relocatable (info))
9 kx return true;
9 kx
9 kx tls_sec = elf_hash_table (info)->tls_sec;
9 kx
9 kx if (tls_sec)
9 kx {
9 kx struct elf_link_hash_entry *tlsbase;
9 kx
9 kx tlsbase = elf_link_hash_lookup (elf_hash_table (info),
9 kx "_TLS_MODULE_BASE_", true, true, false);
9 kx
9 kx if (tlsbase)
9 kx {
9 kx struct bfd_link_hash_entry *h = NULL;
9 kx const struct elf_backend_data *bed =
9 kx get_elf_backend_data (output_bfd);
9 kx
9 kx if (!(_bfd_generic_link_add_one_symbol
9 kx (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
9 kx tls_sec, 0, NULL, false, bed->collect, &h)))
9 kx return false;
9 kx
9 kx tlsbase->type = STT_TLS;
9 kx tlsbase = (struct elf_link_hash_entry *) h;
9 kx tlsbase->def_regular = 1;
9 kx tlsbase->other = STV_HIDDEN;
9 kx (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
9 kx }
9 kx }
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Finish up dynamic symbol handling. We set the contents of various
9 kx dynamic sections here. */
9 kx
9 kx static bool
9 kx elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
9 kx struct bfd_link_info *info,
9 kx struct elf_link_hash_entry *h,
9 kx Elf_Internal_Sym *sym)
9 kx {
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx htab = elf_aarch64_hash_table (info);
9 kx
9 kx if (h->plt.offset != (bfd_vma) - 1)
9 kx {
9 kx asection *plt, *gotplt, *relplt;
9 kx
9 kx /* This symbol has an entry in the procedure linkage table. Set
9 kx it up. */
9 kx
9 kx /* When building a static executable, use .iplt, .igot.plt and
9 kx .rela.iplt sections for STT_GNU_IFUNC symbols. */
9 kx if (htab->root.splt != NULL)
9 kx {
9 kx plt = htab->root.splt;
9 kx gotplt = htab->root.sgotplt;
9 kx relplt = htab->root.srelplt;
9 kx }
9 kx else
9 kx {
9 kx plt = htab->root.iplt;
9 kx gotplt = htab->root.igotplt;
9 kx relplt = htab->root.irelplt;
9 kx }
9 kx
9 kx /* This symbol has an entry in the procedure linkage table. Set
9 kx it up. */
9 kx if ((h->dynindx == -1
9 kx && !((h->forced_local || bfd_link_executable (info))
9 kx && h->def_regular
9 kx && h->type == STT_GNU_IFUNC))
9 kx || plt == NULL
9 kx || gotplt == NULL
9 kx || relplt == NULL)
9 kx return false;
9 kx
9 kx elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
9 kx if (!h->def_regular)
9 kx {
9 kx /* Mark the symbol as undefined, rather than as defined in
9 kx the .plt section. */
9 kx sym->st_shndx = SHN_UNDEF;
9 kx /* If the symbol is weak we need to clear the value.
9 kx Otherwise, the PLT entry would provide a definition for
9 kx the symbol even if the symbol wasn't defined anywhere,
9 kx and so the symbol would never be NULL. Leave the value if
9 kx there were any relocations where pointer equality matters
9 kx (this is a clue for the dynamic linker, to make function
9 kx pointer comparisons work between an application and shared
9 kx library). */
9 kx if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
9 kx sym->st_value = 0;
9 kx }
9 kx }
9 kx
9 kx if (h->got.offset != (bfd_vma) - 1
9 kx && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL
9 kx /* Undefined weak symbol in static PIE resolves to 0 without
9 kx any dynamic relocations. */
9 kx && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9 kx {
9 kx Elf_Internal_Rela rela;
9 kx bfd_byte *loc;
9 kx
9 kx /* This symbol has an entry in the global offset table. Set it
9 kx up. */
9 kx if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
9 kx abort ();
9 kx
9 kx rela.r_offset = (htab->root.sgot->output_section->vma
9 kx + htab->root.sgot->output_offset
9 kx + (h->got.offset & ~(bfd_vma) 1));
9 kx
9 kx if (h->def_regular
9 kx && h->type == STT_GNU_IFUNC)
9 kx {
9 kx if (bfd_link_pic (info))
9 kx {
9 kx /* Generate R_AARCH64_GLOB_DAT. */
9 kx goto do_glob_dat;
9 kx }
9 kx else
9 kx {
9 kx asection *plt;
9 kx
9 kx if (!h->pointer_equality_needed)
9 kx abort ();
9 kx
9 kx /* For non-shared object, we can't use .got.plt, which
9 kx contains the real function address if we need pointer
9 kx equality. We load the GOT entry with the PLT entry. */
9 kx plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
9 kx bfd_put_NN (output_bfd, (plt->output_section->vma
9 kx + plt->output_offset
9 kx + h->plt.offset),
9 kx htab->root.sgot->contents
9 kx + (h->got.offset & ~(bfd_vma) 1));
9 kx return true;
9 kx }
9 kx }
9 kx else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
9 kx {
9 kx if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
9 kx return false;
9 kx
9 kx BFD_ASSERT ((h->got.offset & 1) != 0);
9 kx rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
9 kx rela.r_addend = (h->root.u.def.value
9 kx + h->root.u.def.section->output_section->vma
9 kx + h->root.u.def.section->output_offset);
9 kx }
9 kx else
9 kx {
9 kx do_glob_dat:
9 kx BFD_ASSERT ((h->got.offset & 1) == 0);
9 kx bfd_put_NN (output_bfd, (bfd_vma) 0,
9 kx htab->root.sgot->contents + h->got.offset);
9 kx rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
9 kx rela.r_addend = 0;
9 kx }
9 kx
9 kx loc = htab->root.srelgot->contents;
9 kx loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
9 kx bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9 kx }
9 kx
9 kx if (h->needs_copy)
9 kx {
9 kx Elf_Internal_Rela rela;
9 kx asection *s;
9 kx bfd_byte *loc;
9 kx
9 kx /* This symbol needs a copy reloc. Set it up. */
9 kx if (h->dynindx == -1
9 kx || (h->root.type != bfd_link_hash_defined
9 kx && h->root.type != bfd_link_hash_defweak)
9 kx || htab->root.srelbss == NULL)
9 kx abort ();
9 kx
9 kx rela.r_offset = (h->root.u.def.value
9 kx + h->root.u.def.section->output_section->vma
9 kx + h->root.u.def.section->output_offset);
9 kx rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
9 kx rela.r_addend = 0;
9 kx if (h->root.u.def.section == htab->root.sdynrelro)
9 kx s = htab->root.sreldynrelro;
9 kx else
9 kx s = htab->root.srelbss;
9 kx loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
9 kx bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9 kx }
9 kx
9 kx /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
9 kx be NULL for local symbols. */
9 kx if (sym != NULL
9 kx && (h == elf_hash_table (info)->hdynamic
9 kx || h == elf_hash_table (info)->hgot))
9 kx sym->st_shndx = SHN_ABS;
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Finish up local dynamic symbol handling. We set the contents of
9 kx various dynamic sections here. */
9 kx
9 kx static int
9 kx elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
9 kx {
9 kx struct elf_link_hash_entry *h
9 kx = (struct elf_link_hash_entry *) *slot;
9 kx struct bfd_link_info *info
9 kx = (struct bfd_link_info *) inf;
9 kx
9 kx return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
9 kx info, h, NULL);
9 kx }
9 kx
9 kx static void
9 kx elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
9 kx struct elf_aarch64_link_hash_table
9 kx *htab)
9 kx {
9 kx /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
9 kx small and large plts and at the minute just generates
9 kx the small PLT. */
9 kx
9 kx /* PLT0 of the small PLT looks like this in ELF64 -
9 kx stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
9 kx adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
9 kx ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
9 kx // symbol resolver
9 kx add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
9 kx // GOTPLT entry for this.
9 kx br x17
9 kx PLT0 will be slightly different in ELF32 due to different got entry
9 kx size. */
9 kx bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
9 kx bfd_vma plt_base;
9 kx
9 kx
9 kx memcpy (htab->root.splt->contents, htab->plt0_entry,
9 kx htab->plt_header_size);
9 kx
9 kx /* PR 26312: Explicitly set the sh_entsize to 0 so that
9 kx consumers do not think that the section contains fixed
9 kx sized objects. */
9 kx elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = 0;
9 kx
9 kx plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
9 kx + htab->root.sgotplt->output_offset
9 kx + GOT_ENTRY_SIZE * 2);
9 kx
9 kx plt_base = htab->root.splt->output_section->vma +
9 kx htab->root.splt->output_offset;
9 kx
9 kx /* First instruction in BTI enabled PLT stub is a BTI
9 kx instruction so skip it. */
9 kx bfd_byte *plt0_entry = htab->root.splt->contents;
9 kx if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI)
9 kx plt0_entry = plt0_entry + 4;
9 kx
9 kx /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9 kx ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9 kx elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9 kx plt0_entry + 4,
9 kx PG (plt_got_2nd_ent) - PG (plt_base + 4));
9 kx
9 kx elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9 kx plt0_entry + 8,
9 kx PG_OFFSET (plt_got_2nd_ent));
9 kx
9 kx elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9 kx plt0_entry + 12,
9 kx PG_OFFSET (plt_got_2nd_ent));
9 kx }
9 kx
9 kx static bool
9 kx elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
9 kx struct bfd_link_info *info)
9 kx {
9 kx struct elf_aarch64_link_hash_table *htab;
9 kx bfd *dynobj;
9 kx asection *sdyn;
9 kx
9 kx htab = elf_aarch64_hash_table (info);
9 kx dynobj = htab->root.dynobj;
9 kx sdyn = bfd_get_linker_section (dynobj, ".dynamic");
9 kx
9 kx if (htab->root.dynamic_sections_created)
9 kx {
9 kx ElfNN_External_Dyn *dyncon, *dynconend;
9 kx
9 kx if (sdyn == NULL || htab->root.sgot == NULL)
9 kx abort ();
9 kx
9 kx dyncon = (ElfNN_External_Dyn *) sdyn->contents;
9 kx dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
9 kx for (; dyncon < dynconend; dyncon++)
9 kx {
9 kx Elf_Internal_Dyn dyn;
9 kx asection *s;
9 kx
9 kx bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
9 kx
9 kx switch (dyn.d_tag)
9 kx {
9 kx default:
9 kx continue;
9 kx
9 kx case DT_PLTGOT:
9 kx s = htab->root.sgotplt;
9 kx dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9 kx break;
9 kx
9 kx case DT_JMPREL:
9 kx s = htab->root.srelplt;
9 kx dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9 kx break;
9 kx
9 kx case DT_PLTRELSZ:
9 kx s = htab->root.srelplt;
9 kx dyn.d_un.d_val = s->size;
9 kx break;
9 kx
9 kx case DT_TLSDESC_PLT:
9 kx s = htab->root.splt;
9 kx dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9 kx + htab->root.tlsdesc_plt;
9 kx break;
9 kx
9 kx case DT_TLSDESC_GOT:
9 kx s = htab->root.sgot;
9 kx BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
9 kx dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9 kx + htab->root.tlsdesc_got;
9 kx break;
9 kx }
9 kx
9 kx bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
9 kx }
9 kx
9 kx }
9 kx
9 kx /* Fill in the special first entry in the procedure linkage table. */
9 kx if (htab->root.splt && htab->root.splt->size > 0)
9 kx {
9 kx elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
9 kx
9 kx if (htab->root.tlsdesc_plt && !(info->flags & DF_BIND_NOW))
9 kx {
9 kx BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
9 kx bfd_put_NN (output_bfd, (bfd_vma) 0,
9 kx htab->root.sgot->contents + htab->root.tlsdesc_got);
9 kx
9 kx const bfd_byte *entry = elfNN_aarch64_tlsdesc_small_plt_entry;
9 kx htab->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
9 kx
9 kx aarch64_plt_type type = elf_aarch64_tdata (output_bfd)->plt_type;
9 kx if (type == PLT_BTI || type == PLT_BTI_PAC)
9 kx {
9 kx entry = elfNN_aarch64_tlsdesc_small_plt_bti_entry;
9 kx }
9 kx
9 kx memcpy (htab->root.splt->contents + htab->root.tlsdesc_plt,
9 kx entry, htab->tlsdesc_plt_entry_size);
9 kx
9 kx {
9 kx bfd_vma adrp1_addr =
9 kx htab->root.splt->output_section->vma
9 kx + htab->root.splt->output_offset
9 kx + htab->root.tlsdesc_plt + 4;
9 kx
9 kx bfd_vma adrp2_addr = adrp1_addr + 4;
9 kx
9 kx bfd_vma got_addr =
9 kx htab->root.sgot->output_section->vma
9 kx + htab->root.sgot->output_offset;
9 kx
9 kx bfd_vma pltgot_addr =
9 kx htab->root.sgotplt->output_section->vma
9 kx + htab->root.sgotplt->output_offset;
9 kx
9 kx bfd_vma dt_tlsdesc_got = got_addr + htab->root.tlsdesc_got;
9 kx
9 kx bfd_byte *plt_entry =
9 kx htab->root.splt->contents + htab->root.tlsdesc_plt;
9 kx
9 kx /* First instruction in BTI enabled PLT stub is a BTI
9 kx instruction so skip it. */
9 kx if (type & PLT_BTI)
9 kx {
9 kx plt_entry = plt_entry + 4;
9 kx adrp1_addr = adrp1_addr + 4;
9 kx adrp2_addr = adrp2_addr + 4;
9 kx }
9 kx
9 kx /* adrp x2, DT_TLSDESC_GOT */
9 kx elf_aarch64_update_plt_entry (output_bfd,
9 kx BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9 kx plt_entry + 4,
9 kx (PG (dt_tlsdesc_got)
9 kx - PG (adrp1_addr)));
9 kx
9 kx /* adrp x3, 0 */
9 kx elf_aarch64_update_plt_entry (output_bfd,
9 kx BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9 kx plt_entry + 8,
9 kx (PG (pltgot_addr)
9 kx - PG (adrp2_addr)));
9 kx
9 kx /* ldr x2, [x2, #0] */
9 kx elf_aarch64_update_plt_entry (output_bfd,
9 kx BFD_RELOC_AARCH64_LDSTNN_LO12,
9 kx plt_entry + 12,
9 kx PG_OFFSET (dt_tlsdesc_got));
9 kx
9 kx /* add x3, x3, 0 */
9 kx elf_aarch64_update_plt_entry (output_bfd,
9 kx BFD_RELOC_AARCH64_ADD_LO12,
9 kx plt_entry + 16,
9 kx PG_OFFSET (pltgot_addr));
9 kx }
9 kx }
9 kx }
9 kx
9 kx if (htab->root.sgotplt)
9 kx {
9 kx if (bfd_is_abs_section (htab->root.sgotplt->output_section))
9 kx {
9 kx _bfd_error_handler
9 kx (_("discarded output section: `%pA'"), htab->root.sgotplt);
9 kx return false;
9 kx }
9 kx
9 kx /* Fill in the first three entries in the global offset table. */
9 kx if (htab->root.sgotplt->size > 0)
9 kx {
9 kx bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
9 kx
9 kx /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
9 kx bfd_put_NN (output_bfd,
9 kx (bfd_vma) 0,
9 kx htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
9 kx bfd_put_NN (output_bfd,
9 kx (bfd_vma) 0,
9 kx htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
9 kx }
9 kx
9 kx if (htab->root.sgot)
9 kx {
9 kx if (htab->root.sgot->size > 0)
9 kx {
9 kx bfd_vma addr =
9 kx sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
9 kx bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
9 kx }
9 kx }
9 kx
9 kx elf_section_data (htab->root.sgotplt->output_section)->
9 kx this_hdr.sh_entsize = GOT_ENTRY_SIZE;
9 kx }
9 kx
9 kx if (htab->root.sgot && htab->root.sgot->size > 0)
9 kx elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
9 kx = GOT_ENTRY_SIZE;
9 kx
9 kx /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
9 kx htab_traverse (htab->loc_hash_table,
9 kx elfNN_aarch64_finish_local_dynamic_symbol,
9 kx info);
9 kx
9 kx return true;
9 kx }
9 kx
9 kx /* Check if BTI enabled PLTs are needed. Returns the type needed. */
9 kx static aarch64_plt_type
9 kx get_plt_type (bfd *abfd)
9 kx {
9 kx aarch64_plt_type ret = PLT_NORMAL;
9 kx bfd_byte *contents, *extdyn, *extdynend;
9 kx asection *sec = bfd_get_section_by_name (abfd, ".dynamic");
9 kx if (!sec
9 kx || sec->size < sizeof (ElfNN_External_Dyn)
9 kx || !bfd_malloc_and_get_section (abfd, sec, &contents))
9 kx return ret;
9 kx extdyn = contents;
9 kx extdynend = contents + sec->size - sizeof (ElfNN_External_Dyn);
9 kx for (; extdyn <= extdynend; extdyn += sizeof (ElfNN_External_Dyn))
9 kx {
9 kx Elf_Internal_Dyn dyn;
9 kx bfd_elfNN_swap_dyn_in (abfd, extdyn, &dyn);
9 kx
9 kx /* Let's check the processor specific dynamic array tags. */
9 kx bfd_vma tag = dyn.d_tag;
9 kx if (tag < DT_LOPROC || tag > DT_HIPROC)
9 kx continue;
9 kx
9 kx switch (tag)
9 kx {
9 kx case DT_AARCH64_BTI_PLT:
9 kx ret |= PLT_BTI;
9 kx break;
9 kx
9 kx case DT_AARCH64_PAC_PLT:
9 kx ret |= PLT_PAC;
9 kx break;
9 kx
9 kx default: break;
9 kx }
9 kx }
9 kx free (contents);
9 kx return ret;
9 kx }
9 kx
9 kx static long
9 kx elfNN_aarch64_get_synthetic_symtab (bfd *abfd,
9 kx long symcount,
9 kx asymbol **syms,
9 kx long dynsymcount,
9 kx asymbol **dynsyms,
9 kx asymbol **ret)
9 kx {
9 kx elf_aarch64_tdata (abfd)->plt_type = get_plt_type (abfd);
9 kx return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
9 kx dynsymcount, dynsyms, ret);
9 kx }
9 kx
9 kx /* Return address for Ith PLT stub in section PLT, for relocation REL
9 kx or (bfd_vma) -1 if it should not be included. */
9 kx
9 kx static bfd_vma
9 kx elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
9 kx const arelent *rel ATTRIBUTE_UNUSED)
9 kx {
9 kx size_t plt0_size = PLT_ENTRY_SIZE;
9 kx size_t pltn_size = PLT_SMALL_ENTRY_SIZE;
9 kx
9 kx if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI_PAC)
9 kx {
9 kx if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
9 kx pltn_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
9 kx else
9 kx pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
9 kx }
9 kx else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI)
9 kx {
9 kx if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
9 kx pltn_size = PLT_BTI_SMALL_ENTRY_SIZE;
9 kx }
9 kx else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_PAC)
9 kx {
9 kx pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
9 kx }
9 kx
9 kx return plt->vma + plt0_size + i * pltn_size;
9 kx }
9 kx
9 kx /* Returns TRUE if NAME is an AArch64 mapping symbol.
9 kx The ARM ELF standard defines $x (for A64 code) and $d (for data).
9 kx It also allows a period initiated suffix to be added to the symbol, ie:
9 kx "$[adtx]\.[:sym_char]+". */
9 kx
9 kx static bool
9 kx is_aarch64_mapping_symbol (const char * name)
9 kx {
9 kx return name != NULL /* Paranoia. */
9 kx && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
9 kx the mapping symbols could have acquired a prefix.
9 kx We do not support this here, since such symbols no
9 kx longer conform to the ARM ELF ABI. */
9 kx && (name[1] == 'd' || name[1] == 'x')
9 kx && (name[2] == 0 || name[2] == '.');
9 kx /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
9 kx any characters that follow the period are legal characters for the body
9 kx of a symbol's name. For now we just assume that this is the case. */
9 kx }
9 kx
9 kx /* Make sure that mapping symbols in object files are not removed via the
9 kx "strip --strip-unneeded" tool. These symbols might needed in order to
9 kx correctly generate linked files. Once an object file has been linked,
9 kx it should be safe to remove them. */
9 kx
9 kx static void
9 kx elfNN_aarch64_backend_symbol_processing (bfd *abfd, asymbol *sym)
9 kx {
9 kx if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
9 kx && sym->section != bfd_abs_section_ptr
9 kx && is_aarch64_mapping_symbol (sym->name))
9 kx sym->flags |= BSF_KEEP;
9 kx }
9 kx
9 kx /* Implement elf_backend_setup_gnu_properties for AArch64. It serves as a
9 kx wrapper function for _bfd_aarch64_elf_link_setup_gnu_properties to account
9 kx for the effect of GNU properties of the output_bfd. */
9 kx static bfd *
9 kx elfNN_aarch64_link_setup_gnu_properties (struct bfd_link_info *info)
9 kx {
9 kx uint32_t prop = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
9 kx bfd *pbfd = _bfd_aarch64_elf_link_setup_gnu_properties (info, &prop);
9 kx elf_aarch64_tdata (info->output_bfd)->gnu_and_prop = prop;
9 kx elf_aarch64_tdata (info->output_bfd)->plt_type
9 kx |= (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ? PLT_BTI : 0;
9 kx setup_plt_values (info, elf_aarch64_tdata (info->output_bfd)->plt_type);
9 kx return pbfd;
9 kx }
9 kx
9 kx /* Implement elf_backend_merge_gnu_properties for AArch64. It serves as a
9 kx wrapper function for _bfd_aarch64_elf_merge_gnu_properties to account
9 kx for the effect of GNU properties of the output_bfd. */
9 kx static bool
9 kx elfNN_aarch64_merge_gnu_properties (struct bfd_link_info *info,
9 kx bfd *abfd, bfd *bbfd,
9 kx elf_property *aprop,
9 kx elf_property *bprop)
9 kx {
9 kx uint32_t prop
9 kx = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
9 kx
9 kx /* If output has been marked with BTI using command line argument, give out
9 kx warning if necessary. */
9 kx /* Properties are merged per type, hence only check for warnings when merging
9 kx GNU_PROPERTY_AARCH64_FEATURE_1_AND. */
9 kx if (((aprop && aprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND)
9 kx || (bprop && bprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND))
9 kx && (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
9 kx && (!elf_aarch64_tdata (info->output_bfd)->no_bti_warn))
9 kx {
9 kx if ((aprop && !(aprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
9 kx || !aprop)
9 kx {
9 kx _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
9 kx "all inputs do not have BTI in NOTE section."),
9 kx abfd);
9 kx }
9 kx if ((bprop && !(bprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
9 kx || !bprop)
9 kx {
9 kx _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
9 kx "all inputs do not have BTI in NOTE section."),
9 kx bbfd);
9 kx }
9 kx }
9 kx
9 kx return _bfd_aarch64_elf_merge_gnu_properties (info, abfd, aprop,
9 kx bprop, prop);
9 kx }
9 kx
9 kx /* We use this so we can override certain functions
9 kx (though currently we don't). */
9 kx
9 kx const struct elf_size_info elfNN_aarch64_size_info =
9 kx {
9 kx sizeof (ElfNN_External_Ehdr),
9 kx sizeof (ElfNN_External_Phdr),
9 kx sizeof (ElfNN_External_Shdr),
9 kx sizeof (ElfNN_External_Rel),
9 kx sizeof (ElfNN_External_Rela),
9 kx sizeof (ElfNN_External_Sym),
9 kx sizeof (ElfNN_External_Dyn),
9 kx sizeof (Elf_External_Note),
9 kx 4, /* Hash table entry size. */
9 kx 1, /* Internal relocs per external relocs. */
9 kx ARCH_SIZE, /* Arch size. */
9 kx LOG_FILE_ALIGN, /* Log_file_align. */
9 kx ELFCLASSNN, EV_CURRENT,
9 kx bfd_elfNN_write_out_phdrs,
9 kx bfd_elfNN_write_shdrs_and_ehdr,
9 kx bfd_elfNN_checksum_contents,
9 kx bfd_elfNN_write_relocs,
9 kx bfd_elfNN_swap_symbol_in,
9 kx bfd_elfNN_swap_symbol_out,
9 kx bfd_elfNN_slurp_reloc_table,
9 kx bfd_elfNN_slurp_symbol_table,
9 kx bfd_elfNN_swap_dyn_in,
9 kx bfd_elfNN_swap_dyn_out,
9 kx bfd_elfNN_swap_reloc_in,
9 kx bfd_elfNN_swap_reloc_out,
9 kx bfd_elfNN_swap_reloca_in,
9 kx bfd_elfNN_swap_reloca_out
9 kx };
9 kx
9 kx #define ELF_ARCH bfd_arch_aarch64
9 kx #define ELF_MACHINE_CODE EM_AARCH64
9 kx #define ELF_MAXPAGESIZE 0x10000
9 kx #define ELF_COMMONPAGESIZE 0x1000
9 kx
9 kx #define bfd_elfNN_close_and_cleanup \
9 kx elfNN_aarch64_close_and_cleanup
9 kx
9 kx #define bfd_elfNN_bfd_free_cached_info \
9 kx elfNN_aarch64_bfd_free_cached_info
9 kx
9 kx #define bfd_elfNN_bfd_is_target_special_symbol \
9 kx elfNN_aarch64_is_target_special_symbol
9 kx
9 kx #define bfd_elfNN_bfd_link_hash_table_create \
9 kx elfNN_aarch64_link_hash_table_create
9 kx
9 kx #define bfd_elfNN_bfd_merge_private_bfd_data \
9 kx elfNN_aarch64_merge_private_bfd_data
9 kx
9 kx #define bfd_elfNN_bfd_print_private_bfd_data \
9 kx elfNN_aarch64_print_private_bfd_data
9 kx
9 kx #define bfd_elfNN_bfd_reloc_type_lookup \
9 kx elfNN_aarch64_reloc_type_lookup
9 kx
9 kx #define bfd_elfNN_bfd_reloc_name_lookup \
9 kx elfNN_aarch64_reloc_name_lookup
9 kx
9 kx #define bfd_elfNN_bfd_set_private_flags \
9 kx elfNN_aarch64_set_private_flags
9 kx
9 kx #define bfd_elfNN_find_inliner_info \
9 kx elfNN_aarch64_find_inliner_info
9 kx
9 kx #define bfd_elfNN_get_synthetic_symtab \
9 kx elfNN_aarch64_get_synthetic_symtab
9 kx
9 kx #define bfd_elfNN_mkobject \
9 kx elfNN_aarch64_mkobject
9 kx
9 kx #define bfd_elfNN_new_section_hook \
9 kx elfNN_aarch64_new_section_hook
9 kx
9 kx #define elf_backend_adjust_dynamic_symbol \
9 kx elfNN_aarch64_adjust_dynamic_symbol
9 kx
9 kx #define elf_backend_always_size_sections \
9 kx elfNN_aarch64_always_size_sections
9 kx
9 kx #define elf_backend_check_relocs \
9 kx elfNN_aarch64_check_relocs
9 kx
9 kx #define elf_backend_copy_indirect_symbol \
9 kx elfNN_aarch64_copy_indirect_symbol
9 kx
9 kx #define elf_backend_merge_symbol_attribute \
9 kx elfNN_aarch64_merge_symbol_attribute
9 kx
9 kx /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
9 kx to them in our hash. */
9 kx #define elf_backend_create_dynamic_sections \
9 kx elfNN_aarch64_create_dynamic_sections
9 kx
9 kx #define elf_backend_init_index_section \
9 kx _bfd_elf_init_2_index_sections
9 kx
9 kx #define elf_backend_finish_dynamic_sections \
9 kx elfNN_aarch64_finish_dynamic_sections
9 kx
9 kx #define elf_backend_finish_dynamic_symbol \
9 kx elfNN_aarch64_finish_dynamic_symbol
9 kx
9 kx #define elf_backend_object_p \
9 kx elfNN_aarch64_object_p
9 kx
9 kx #define elf_backend_output_arch_local_syms \
9 kx elfNN_aarch64_output_arch_local_syms
9 kx
9 kx #define elf_backend_maybe_function_sym \
9 kx elfNN_aarch64_maybe_function_sym
9 kx
9 kx #define elf_backend_plt_sym_val \
9 kx elfNN_aarch64_plt_sym_val
9 kx
9 kx #define elf_backend_init_file_header \
9 kx elfNN_aarch64_init_file_header
9 kx
9 kx #define elf_backend_relocate_section \
9 kx elfNN_aarch64_relocate_section
9 kx
9 kx #define elf_backend_reloc_type_class \
9 kx elfNN_aarch64_reloc_type_class
9 kx
9 kx #define elf_backend_section_from_shdr \
9 kx elfNN_aarch64_section_from_shdr
9 kx
9 kx #define elf_backend_section_from_phdr \
9 kx elfNN_aarch64_section_from_phdr
9 kx
9 kx #define elf_backend_modify_headers \
9 kx elfNN_aarch64_modify_headers
9 kx
9 kx #define elf_backend_size_dynamic_sections \
9 kx elfNN_aarch64_size_dynamic_sections
9 kx
9 kx #define elf_backend_size_info \
9 kx elfNN_aarch64_size_info
9 kx
9 kx #define elf_backend_write_section \
9 kx elfNN_aarch64_write_section
9 kx
9 kx #define elf_backend_symbol_processing \
9 kx elfNN_aarch64_backend_symbol_processing
9 kx
9 kx #define elf_backend_setup_gnu_properties \
9 kx elfNN_aarch64_link_setup_gnu_properties
9 kx
9 kx #define elf_backend_merge_gnu_properties \
9 kx elfNN_aarch64_merge_gnu_properties
9 kx
9 kx #define elf_backend_can_refcount 1
9 kx #define elf_backend_can_gc_sections 1
9 kx #define elf_backend_plt_readonly 1
9 kx #define elf_backend_want_got_plt 1
9 kx #define elf_backend_want_plt_sym 0
9 kx #define elf_backend_want_dynrelro 1
9 kx #define elf_backend_may_use_rel_p 0
9 kx #define elf_backend_may_use_rela_p 1
9 kx #define elf_backend_default_use_rela_p 1
9 kx #define elf_backend_rela_normal 1
9 kx #define elf_backend_dtrel_excludes_plt 1
9 kx #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
9 kx #define elf_backend_default_execstack 0
9 kx #define elf_backend_extern_protected_data 0
9 kx #define elf_backend_hash_symbol elf_aarch64_hash_symbol
9 kx
9 kx #undef elf_backend_obj_attrs_section
9 kx #define elf_backend_obj_attrs_section ".ARM.attributes"
9 kx
9 kx #include "elfNN-target.h"
9 kx
9 kx /* CloudABI support. */
9 kx
9 kx #undef TARGET_LITTLE_SYM
9 kx #define TARGET_LITTLE_SYM aarch64_elfNN_le_cloudabi_vec
9 kx #undef TARGET_LITTLE_NAME
9 kx #define TARGET_LITTLE_NAME "elfNN-littleaarch64-cloudabi"
9 kx #undef TARGET_BIG_SYM
9 kx #define TARGET_BIG_SYM aarch64_elfNN_be_cloudabi_vec
9 kx #undef TARGET_BIG_NAME
9 kx #define TARGET_BIG_NAME "elfNN-bigaarch64-cloudabi"
9 kx
9 kx #undef ELF_OSABI
9 kx #define ELF_OSABI ELFOSABI_CLOUDABI
9 kx
9 kx #undef elfNN_bed
9 kx #define elfNN_bed elfNN_aarch64_cloudabi_bed
9 kx
9 kx #include "elfNN-target.h"