mirror of
https://github.com/espressif/binutils-gdb.git
synced 2025-06-26 13:56:22 +08:00
Refactor Cortex-A8 erratum workaround in preparation
2016-05-10 Thomas Preud'homme <thomas.preudhomme@arm.com> bfd/ * elf32-arm.c (enum elf32_arm_stub_type): Delete arm_stub_a8_veneer_lwm enumerator. (arm_stub_a8_veneer_lwm): New unsigned constant to replace aforementioned enumerator. (struct elf32_arm_stub_hash_entry): Delete target_addend field and add source_value. (struct a8_erratum_fix): Delete addend field and add target_offset. (stub_hash_newfunc): Initialize source_value field amd remove initialization for target_addend. (arm_build_one_stub): Stop special casing Thumb relocations: promote the else to being always executed, moving the arm_stub_a8_veneer_b_cond specific code in it. Remove stub_entry->target_addend from points_to computation. (cortex_a8_erratum_scan): Store in a8_erratum_fix structure the offset to target symbol from start of section rather than the offset from the stub address. (elf32_arm_size_stubs): Set stub_entry's source_value and target_value fields from struct a8_erratum_fix's offset and target_offset respectively. (make_branch_to_a8_stub): Rename target variable to loc. Compute veneered_insn_loc and loc using stub_entry's source_value.
This commit is contained in:
@ -1,3 +1,27 @@
|
||||
2016-05-10 Thomas Preud'homme <thomas.preudhomme@arm.com>
|
||||
|
||||
* elf32-arm.c (enum elf32_arm_stub_type): Delete
|
||||
arm_stub_a8_veneer_lwm enumerator.
|
||||
(arm_stub_a8_veneer_lwm): New unsigned constant to replace
|
||||
aforementioned enumerator.
|
||||
(struct elf32_arm_stub_hash_entry): Delete target_addend
|
||||
field and add source_value.
|
||||
(struct a8_erratum_fix): Delete addend field and add target_offset.
|
||||
(stub_hash_newfunc): Initialize source_value field amd remove
|
||||
initialization for target_addend.
|
||||
(arm_build_one_stub): Stop special casing Thumb relocations: promote
|
||||
the else to being always executed, moving the
|
||||
arm_stub_a8_veneer_b_cond specific code in it. Remove
|
||||
stub_entry->target_addend from points_to computation.
|
||||
(cortex_a8_erratum_scan): Store in a8_erratum_fix structure the offset
|
||||
to target symbol from start of section rather than the offset from the
|
||||
stub address.
|
||||
(elf32_arm_size_stubs): Set stub_entry's source_value and target_value
|
||||
fields from struct a8_erratum_fix's offset and target_offset
|
||||
respectively.
|
||||
(make_branch_to_a8_stub): Rename target variable to loc. Compute
|
||||
veneered_insn_loc and loc using stub_entry's source_value.
|
||||
|
||||
2016-05-10 Thomas Preud'homme <thomas.preudhomme@arm.com>
|
||||
|
||||
(elf32_arm_merge_eabi_attributes): Add merging logic for
|
||||
|
119
bfd/elf32-arm.c
119
bfd/elf32-arm.c
@ -2632,11 +2632,12 @@ enum elf32_arm_stub_type
|
||||
{
|
||||
arm_stub_none,
|
||||
DEF_STUBS
|
||||
/* Note the first a8_veneer type. */
|
||||
arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
|
||||
};
|
||||
#undef DEF_STUB
|
||||
|
||||
/* Note the first a8_veneer type. */
|
||||
const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
const insn_sequence* template_sequence;
|
||||
@ -2666,8 +2667,12 @@ struct elf32_arm_stub_hash_entry
|
||||
bfd_vma target_value;
|
||||
asection *target_section;
|
||||
|
||||
/* Offset to apply to relocation referencing target_value. */
|
||||
bfd_vma target_addend;
|
||||
/* Same as above but for the source of the branch to the stub. Used for
|
||||
Cortex-A8 erratum workaround to patch it to branch to the stub. As
|
||||
such, source section does not need to be recorded since Cortex-A8 erratum
|
||||
workaround stubs are only generated when both source and target are in the
|
||||
same section. */
|
||||
bfd_vma source_value;
|
||||
|
||||
/* The instruction which caused this stub to be generated (only valid for
|
||||
Cortex-A8 erratum workaround stubs at present). */
|
||||
@ -2836,7 +2841,7 @@ struct a8_erratum_fix
|
||||
bfd *input_bfd;
|
||||
asection *section;
|
||||
bfd_vma offset;
|
||||
bfd_vma addend;
|
||||
bfd_vma target_offset;
|
||||
unsigned long orig_insn;
|
||||
char *stub_name;
|
||||
enum elf32_arm_stub_type stub_type;
|
||||
@ -3413,9 +3418,9 @@ stub_hash_newfunc (struct bfd_hash_entry *entry,
|
||||
eh = (struct elf32_arm_stub_hash_entry *) entry;
|
||||
eh->stub_sec = NULL;
|
||||
eh->stub_offset = 0;
|
||||
eh->source_value = 0;
|
||||
eh->target_value = 0;
|
||||
eh->target_section = NULL;
|
||||
eh->target_addend = 0;
|
||||
eh->orig_insn = 0;
|
||||
eh->stub_type = arm_stub_none;
|
||||
eh->stub_size = 0;
|
||||
@ -4450,65 +4455,36 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
|
||||
BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
|
||||
|
||||
for (i = 0; i < nrelocs; i++)
|
||||
if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
|
||||
|| template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
|
||||
|| template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
|
||||
|| template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
|
||||
{
|
||||
Elf_Internal_Rela rel;
|
||||
bfd_boolean unresolved_reloc;
|
||||
char *error_message;
|
||||
enum arm_st_branch_type branch_type
|
||||
= (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
|
||||
? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
|
||||
bfd_vma points_to = sym_value + stub_entry->target_addend;
|
||||
{
|
||||
Elf_Internal_Rela rel;
|
||||
bfd_boolean unresolved_reloc;
|
||||
char *error_message;
|
||||
bfd_vma points_to =
|
||||
sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
|
||||
|
||||
rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
|
||||
rel.r_info = ELF32_R_INFO (0,
|
||||
template_sequence[stub_reloc_idx[i]].r_type);
|
||||
rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
|
||||
rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
|
||||
rel.r_info = ELF32_R_INFO (0,
|
||||
template_sequence[stub_reloc_idx[i]].r_type);
|
||||
rel.r_addend = 0;
|
||||
|
||||
if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
|
||||
/* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
|
||||
template should refer back to the instruction after the original
|
||||
branch. */
|
||||
points_to = sym_value;
|
||||
if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
|
||||
/* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
|
||||
template should refer back to the instruction after the original
|
||||
branch. We use target_section as Cortex-A8 erratum workaround stubs
|
||||
are only generated when both source and target are in the same
|
||||
section. */
|
||||
points_to = stub_entry->target_section->output_section->vma
|
||||
+ stub_entry->target_section->output_offset
|
||||
+ stub_entry->source_value;
|
||||
|
||||
/* There may be unintended consequences if this is not true. */
|
||||
BFD_ASSERT (stub_entry->h == NULL);
|
||||
|
||||
/* Note: _bfd_final_link_relocate doesn't handle these relocations
|
||||
properly. We should probably use this function unconditionally,
|
||||
rather than only for certain relocations listed in the enclosing
|
||||
conditional, for the sake of consistency. */
|
||||
elf32_arm_final_link_relocate (elf32_arm_howto_from_type
|
||||
(template_sequence[stub_reloc_idx[i]].r_type),
|
||||
stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
|
||||
points_to, info, stub_entry->target_section, "", STT_FUNC,
|
||||
branch_type, (struct elf_link_hash_entry *) stub_entry->h,
|
||||
&unresolved_reloc, &error_message);
|
||||
}
|
||||
else
|
||||
{
|
||||
Elf_Internal_Rela rel;
|
||||
bfd_boolean unresolved_reloc;
|
||||
char *error_message;
|
||||
bfd_vma points_to = sym_value + stub_entry->target_addend
|
||||
+ template_sequence[stub_reloc_idx[i]].reloc_addend;
|
||||
|
||||
rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
|
||||
rel.r_info = ELF32_R_INFO (0,
|
||||
template_sequence[stub_reloc_idx[i]].r_type);
|
||||
rel.r_addend = 0;
|
||||
|
||||
elf32_arm_final_link_relocate (elf32_arm_howto_from_type
|
||||
(template_sequence[stub_reloc_idx[i]].r_type),
|
||||
stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
|
||||
points_to, info, stub_entry->target_section, "", STT_FUNC,
|
||||
stub_entry->branch_type,
|
||||
(struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
|
||||
&error_message);
|
||||
}
|
||||
elf32_arm_final_link_relocate (elf32_arm_howto_from_type
|
||||
(template_sequence[stub_reloc_idx[i]].r_type),
|
||||
stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
|
||||
points_to, info, stub_entry->target_section, "", STT_FUNC,
|
||||
stub_entry->branch_type,
|
||||
(struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
|
||||
&error_message);
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
#undef MAXRELOCS
|
||||
@ -5101,7 +5077,8 @@ cortex_a8_erratum_scan (bfd *input_bfd,
|
||||
a8_fixes[num_a8_fixes].input_bfd = input_bfd;
|
||||
a8_fixes[num_a8_fixes].section = section;
|
||||
a8_fixes[num_a8_fixes].offset = i;
|
||||
a8_fixes[num_a8_fixes].addend = offset;
|
||||
a8_fixes[num_a8_fixes].target_offset =
|
||||
target - base_vma;
|
||||
a8_fixes[num_a8_fixes].orig_insn = insn;
|
||||
a8_fixes[num_a8_fixes].stub_name = stub_name;
|
||||
a8_fixes[num_a8_fixes].stub_type = stub_type;
|
||||
@ -5680,9 +5657,9 @@ elf32_arm_size_stubs (bfd *output_bfd,
|
||||
stub_entry->stub_offset = 0;
|
||||
stub_entry->id_sec = link_sec;
|
||||
stub_entry->stub_type = a8_fixes[i].stub_type;
|
||||
stub_entry->source_value = a8_fixes[i].offset;
|
||||
stub_entry->target_section = a8_fixes[i].section;
|
||||
stub_entry->target_value = a8_fixes[i].offset;
|
||||
stub_entry->target_addend = a8_fixes[i].addend;
|
||||
stub_entry->target_value = a8_fixes[i].target_offset;
|
||||
stub_entry->orig_insn = a8_fixes[i].orig_insn;
|
||||
stub_entry->branch_type = a8_fixes[i].branch_type;
|
||||
|
||||
@ -16277,7 +16254,7 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
|
||||
bfd_vma veneered_insn_loc, veneer_entry_loc;
|
||||
bfd_signed_vma branch_offset;
|
||||
bfd *abfd;
|
||||
unsigned int target;
|
||||
unsigned int loc;
|
||||
|
||||
stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
|
||||
data = (struct a8_branch_to_stub_data *) in_arg;
|
||||
@ -16288,9 +16265,11 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
|
||||
|
||||
contents = data->contents;
|
||||
|
||||
/* We use target_section as Cortex-A8 erratum workaround stubs are only
|
||||
generated when both source and target are in the same section. */
|
||||
veneered_insn_loc = stub_entry->target_section->output_section->vma
|
||||
+ stub_entry->target_section->output_offset
|
||||
+ stub_entry->target_value;
|
||||
+ stub_entry->source_value;
|
||||
|
||||
veneer_entry_loc = stub_entry->stub_sec->output_section->vma
|
||||
+ stub_entry->stub_sec->output_offset
|
||||
@ -16302,7 +16281,7 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
|
||||
branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
|
||||
|
||||
abfd = stub_entry->target_section->owner;
|
||||
target = stub_entry->target_value;
|
||||
loc = stub_entry->source_value;
|
||||
|
||||
/* We attempt to avoid this condition by setting stubs_always_after_branch
|
||||
in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
|
||||
@ -16363,8 +16342,8 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
|
||||
bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
|
||||
bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
|
||||
bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
Reference in New Issue
Block a user