Add lval_funcs::is_optimized_out

This adds an is_optimized_out function pointer to lval_funcs, and
changes value_optimized_out to call it.  This new function lets gdb
determine if a value is optimized out without necessarily fetching the
value.  This is needed for a subsequent patch, where an attempt to
access a lazy value would fail due to the value size limit -- however,
the access was only needed to determine the optimized-out state.
This commit is contained in:
Tom Tromey
2021-09-10 12:40:54 -06:00
parent 25b0a5714c
commit a519e8ffe2
5 changed files with 66 additions and 15 deletions

View File

@ -149,10 +149,13 @@ allocate_piece_closure (dwarf2_per_cu_data *per_cu,
/* Read or write a pieced value V. If FROM != NULL, operate in "write
mode": copy FROM into the pieces comprising V. If FROM == NULL,
operate in "read mode": fetch the contents of the (lazy) value V by
composing it from its pieces. */
composing it from its pieces. If CHECK_OPTIMIZED is true, then no
reading or writing is done; instead the return value of this
function is true if any piece is optimized out. When
CHECK_OPTIMIZED is true, FROM must be nullptr. */
static void
rw_pieced_value (value *v, value *from)
static bool
rw_pieced_value (value *v, value *from, bool check_optimized)
{
int i;
LONGEST offset = 0, max_offset;
@ -163,6 +166,7 @@ rw_pieced_value (value *v, value *from)
gdb::byte_vector buffer;
bool bits_big_endian = type_byte_order (value_type (v)) == BFD_ENDIAN_BIG;
gdb_assert (!check_optimized || from == nullptr);
if (from != nullptr)
{
from_contents = value_contents (from);
@ -174,7 +178,10 @@ rw_pieced_value (value *v, value *from)
internal_error (__FILE__, __LINE__,
_("Should not be able to create a lazy value with "
"an enclosing type"));
v_contents = value_contents_raw (v);
if (check_optimized)
v_contents = nullptr;
else
v_contents = value_contents_raw (v);
from_contents = nullptr;
}
@ -240,17 +247,22 @@ rw_pieced_value (value *v, value *from)
buffer, &optim, &unavail))
{
if (optim)
mark_value_bits_optimized_out (v, offset,
this_size_bits);
if (unavail)
{
if (check_optimized)
return true;
mark_value_bits_optimized_out (v, offset,
this_size_bits);
}
if (unavail && !check_optimized)
mark_value_bits_unavailable (v, offset,
this_size_bits);
break;
}
copy_bitwise (v_contents, offset,
buffer.data (), bits_to_skip % 8,
this_size_bits, bits_big_endian);
if (!check_optimized)
copy_bitwise (v_contents, offset,
buffer.data (), bits_to_skip % 8,
this_size_bits, bits_big_endian);
}
else
{
@ -286,6 +298,9 @@ rw_pieced_value (value *v, value *from)
case DWARF_VALUE_MEMORY:
{
if (check_optimized)
break;
bits_to_skip += p->offset;
CORE_ADDR start_addr = p->v.mem.addr + bits_to_skip / 8;
@ -355,6 +370,9 @@ rw_pieced_value (value *v, value *from)
case DWARF_VALUE_STACK:
{
if (check_optimized)
break;
if (from != nullptr)
{
mark_value_bits_optimized_out (v, offset, this_size_bits);
@ -384,6 +402,9 @@ rw_pieced_value (value *v, value *from)
case DWARF_VALUE_LITERAL:
{
if (check_optimized)
break;
if (from != nullptr)
{
mark_value_bits_optimized_out (v, offset, this_size_bits);
@ -418,6 +439,8 @@ rw_pieced_value (value *v, value *from)
break;
case DWARF_VALUE_OPTIMIZED_OUT:
if (check_optimized)
return true;
mark_value_bits_optimized_out (v, offset, this_size_bits);
break;
@ -428,18 +451,26 @@ rw_pieced_value (value *v, value *from)
offset += this_size_bits;
bits_to_skip = 0;
}
return false;
}
static void
read_pieced_value (value *v)
{
rw_pieced_value (v, nullptr);
rw_pieced_value (v, nullptr, false);
}
static void
write_pieced_value (value *to, value *from)
{
rw_pieced_value (to, from);
rw_pieced_value (to, from, false);
}
static bool
is_optimized_out_pieced_value (value *v)
{
return rw_pieced_value (v, nullptr, true);
}
/* An implementation of an lval_funcs method to see whether a value is
@ -617,6 +648,7 @@ free_pieced_value_closure (value *v)
static const struct lval_funcs pieced_value_funcs = {
read_pieced_value,
write_pieced_value,
is_optimized_out_pieced_value,
indirect_pieced_value,
coerce_pieced_ref,
check_pieced_synthetic_pointer,

View File

@ -1282,6 +1282,7 @@ static const struct lval_funcs entry_data_value_funcs =
{
NULL, /* read */
NULL, /* write */
nullptr,
NULL, /* indirect */
entry_data_value_coerce_ref,
NULL, /* check_synthetic_pointer */

View File

@ -254,6 +254,7 @@ static const struct lval_funcs opencl_value_funcs =
{
lval_func_read,
lval_func_write,
nullptr,
NULL, /* indirect */
NULL, /* coerce_ref */
lval_func_check_synthetic_pointer,

View File

@ -1407,10 +1407,21 @@ value_contents_writeable (struct value *value)
int
value_optimized_out (struct value *value)
{
/* We can only know if a value is optimized out once we have tried to
fetch it. */
if (value->optimized_out.empty () && value->lazy)
if (value->lazy)
{
/* See if we can compute the result without fetching the
value. */
if (VALUE_LVAL (value) == lval_memory)
return false;
else if (VALUE_LVAL (value) == lval_computed)
{
const struct lval_funcs *funcs = value->location.computed.funcs;
if (funcs->is_optimized_out != nullptr)
return funcs->is_optimized_out (value);
}
/* Fall back to fetching. */
try
{
value_fetch_lazy (value);

View File

@ -264,6 +264,12 @@ struct lval_funcs
TOVAL is not considered as an lvalue. */
void (*write) (struct value *toval, struct value *fromval);
/* Return true if any part of V is optimized out, false otherwise.
This will only be called for lazy values -- if the value has been
fetched, then the value's optimized-out bits are consulted
instead. */
bool (*is_optimized_out) (struct value *v);
/* If non-NULL, this is used to implement pointer indirection for
this value. This method may return NULL, in which case value_ind
will fall back to ordinary indirection. */