2011-06-06 Pedro Alves <pedro@codesourcery.com>

gdb/
	* infcall.c (run_inferior_call): Don't mask async.  Instead force
	a synchronous wait, if the target can async.

	* target.h (struct target_ops): Delete to_async_mask.
	(target_async_mask): Delete.
	* target.c (update_current_target): Delete references to to_async_mask.
	* linux-nat.c (linux_nat_async_mask_value): Delete.
	(linux_nat_is_async_p, linux_nat_can_async_p): Remove references
	to linux_nat_async_mask_value.
	(linux_nat_async_mask): Delete.
	(linux_nat_async, linux_nat_close): Remove references to
	linux_nat_async_mask_value.
	* record.c (record_async_mask_value): Delete.
	(record_async): Remove references to record_async_mask_value.
	(record_async_mask): Delete.
	(record_can_async_p, record_is_async_p): Remove references to
	record_async_mask_value.
	(init_record_ops, init_record_core_ops): Remove references to
	record_async_mask.
	* remote.c (remote_async_mask_value): Delete.
	(init_remote_ops): Remove reference to remote_async_mask.
	(remote_can_async_p, remote_is_async_p): Remove references to
	remote_async_mask_value.
	(remote_async): Remove references to remote_async_mask_value.
	(remote_async_mask): Delete.

	* infrun.c (fetch_inferior_event): Don't claim registers changed
	if the current thread is already not executing.
This commit is contained in:
Pedro Alves
2011-06-06 12:47:07 +00:00
parent 3c0013bf3b
commit 3dd5b83d5b
8 changed files with 61 additions and 138 deletions

View File

@ -1,3 +1,34 @@
2011-06-06 Pedro Alves <pedro@codesourcery.com>
* infcall.c (run_inferior_call): Don't mask async. Instead force
a synchronous wait, if the target can async.
* target.h (struct target_ops): Delete to_async_mask.
(target_async_mask): Delete.
* target.c (update_current_target): Delete references to to_async_mask.
* linux-nat.c (linux_nat_async_mask_value): Delete.
(linux_nat_is_async_p, linux_nat_can_async_p): Remove references
to linux_nat_async_mask_value.
(linux_nat_async_mask): Delete.
(linux_nat_async, linux_nat_close): Remove references to
linux_nat_async_mask_value.
* record.c (record_async_mask_value): Delete.
(record_async): Remove references to record_async_mask_value.
(record_async_mask): Delete.
(record_can_async_p, record_is_async_p): Remove references to
record_async_mask_value.
(init_record_ops, init_record_core_ops): Remove references to
record_async_mask.
* remote.c (remote_async_mask_value): Delete.
(init_remote_ops): Remove reference to remote_async_mask.
(remote_can_async_p, remote_is_async_p): Remove references to
remote_async_mask_value.
(remote_async): Remove references to remote_async_mask_value.
(remote_async_mask): Delete.
* infrun.c (fetch_inferior_event): Don't claim registers changed
if the current thread is already not executing.
2011-06-03 Joel Brobecker <brobecker@adacore.com> (obvious fix) 2011-06-03 Joel Brobecker <brobecker@adacore.com> (obvious fix)
From Stephen Kitt <steve@sk2.org> From Stephen Kitt <steve@sk2.org>

View File

@ -387,10 +387,8 @@ static struct gdb_exception
run_inferior_call (struct thread_info *call_thread, CORE_ADDR real_pc) run_inferior_call (struct thread_info *call_thread, CORE_ADDR real_pc)
{ {
volatile struct gdb_exception e; volatile struct gdb_exception e;
int saved_async = 0;
int saved_in_infcall = call_thread->control.in_infcall; int saved_in_infcall = call_thread->control.in_infcall;
ptid_t call_thread_ptid = call_thread->ptid; ptid_t call_thread_ptid = call_thread->ptid;
char *saved_target_shortname = xstrdup (target_shortname);
call_thread->control.in_infcall = 1; call_thread->control.in_infcall = 1;
@ -401,22 +399,24 @@ run_inferior_call (struct thread_info *call_thread, CORE_ADDR real_pc)
/* We want stop_registers, please... */ /* We want stop_registers, please... */
call_thread->control.proceed_to_finish = 1; call_thread->control.proceed_to_finish = 1;
if (target_can_async_p ())
saved_async = target_async_mask (0);
TRY_CATCH (e, RETURN_MASK_ALL) TRY_CATCH (e, RETURN_MASK_ALL)
proceed (real_pc, TARGET_SIGNAL_0, 0); {
proceed (real_pc, TARGET_SIGNAL_0, 0);
/* Inferior function calls are always synchronous, even if the
target supports asynchronous execution. Do here what
`proceed' itself does in sync mode. */
if (target_can_async_p () && is_running (inferior_ptid))
{
wait_for_inferior ();
normal_stop ();
}
}
/* At this point the current thread may have changed. Refresh /* At this point the current thread may have changed. Refresh
CALL_THREAD as it could be invalid if its thread has exited. */ CALL_THREAD as it could be invalid if its thread has exited. */
call_thread = find_thread_ptid (call_thread_ptid); call_thread = find_thread_ptid (call_thread_ptid);
/* Don't restore the async mask if the target has changed,
saved_async is for the original target. */
if (saved_async
&& strcmp (saved_target_shortname, target_shortname) == 0)
target_async_mask (saved_async);
enable_watchpoints_after_interactive_call_stop (); enable_watchpoints_after_interactive_call_stop ();
/* Call breakpoint_auto_delete on the current contents of the bpstat /* Call breakpoint_auto_delete on the current contents of the bpstat
@ -433,8 +433,6 @@ run_inferior_call (struct thread_info *call_thread, CORE_ADDR real_pc)
if (call_thread != NULL) if (call_thread != NULL)
call_thread->control.in_infcall = saved_in_infcall; call_thread->control.in_infcall = saved_in_infcall;
xfree (saved_target_shortname);
return e; return e;
} }

View File

@ -2739,7 +2739,16 @@ fetch_inferior_event (void *client_data)
status mechanism. */ status mechanism. */
overlay_cache_invalid = 1; overlay_cache_invalid = 1;
registers_changed ();
/* But don't do it if the current thread is already stopped (hence
this is either a delayed event that will result in
TARGET_WAITKIND_IGNORE, or it's an event for another thread (and
we always clear the register and frame caches when the user
switches threads anyway). If we didn't do this, a spurious
delayed event in all-stop mode would make the user lose the
selected frame. */
if (non_stop || is_executing (inferior_ptid))
registers_changed ();
make_cleanup_restore_integer (&execution_direction); make_cleanup_restore_integer (&execution_direction);
execution_direction = target_execution_direction (); execution_direction = target_execution_direction ();

View File

@ -250,15 +250,11 @@ static int linux_supports_tracesysgood_flag = -1;
static int linux_supports_tracevforkdone_flag = -1; static int linux_supports_tracevforkdone_flag = -1;
/* Async mode support. */
/* Zero if the async mode, although enabled, is masked, which means
linux_nat_wait should behave as if async mode was off. */
static int linux_nat_async_mask_value = 1;
/* Stores the current used ptrace() options. */ /* Stores the current used ptrace() options. */
static int current_ptrace_options = 0; static int current_ptrace_options = 0;
/* Async mode support. */
/* The read/write ends of the pipe registered as waitable file in the /* The read/write ends of the pipe registered as waitable file in the
event loop. */ event loop. */
static int linux_nat_event_pipe[2] = { -1, -1 }; static int linux_nat_event_pipe[2] = { -1, -1 };
@ -306,7 +302,6 @@ static void linux_nat_async (void (*callback)
(enum inferior_event_type event_type, (enum inferior_event_type event_type,
void *context), void *context),
void *context); void *context);
static int linux_nat_async_mask (int mask);
static int kill_lwp (int lwpid, int signo); static int kill_lwp (int lwpid, int signo);
static int stop_callback (struct lwp_info *lp, void *data); static int stop_callback (struct lwp_info *lp, void *data);
@ -5359,11 +5354,7 @@ linux_nat_is_async_p (void)
/* NOTE: palves 2008-03-21: We're only async when the user requests /* NOTE: palves 2008-03-21: We're only async when the user requests
it explicitly with the "set target-async" command. it explicitly with the "set target-async" command.
Someday, linux will always be async. */ Someday, linux will always be async. */
if (!target_async_permitted) return target_async_permitted;
return 0;
/* See target.h/target_async_mask. */
return linux_nat_async_mask_value;
} }
/* target_can_async_p implementation. */ /* target_can_async_p implementation. */
@ -5374,11 +5365,7 @@ linux_nat_can_async_p (void)
/* NOTE: palves 2008-03-21: We're only async when the user requests /* NOTE: palves 2008-03-21: We're only async when the user requests
it explicitly with the "set target-async" command. it explicitly with the "set target-async" command.
Someday, linux will always be async. */ Someday, linux will always be async. */
if (!target_async_permitted) return target_async_permitted;
return 0;
/* See target.h/target_async_mask. */
return linux_nat_async_mask_value;
} }
static int static int
@ -5398,37 +5385,6 @@ linux_nat_supports_multi_process (void)
return linux_multi_process; return linux_multi_process;
} }
/* target_async_mask implementation. */
static int
linux_nat_async_mask (int new_mask)
{
int curr_mask = linux_nat_async_mask_value;
if (curr_mask != new_mask)
{
if (new_mask == 0)
{
linux_nat_async (NULL, 0);
linux_nat_async_mask_value = new_mask;
}
else
{
linux_nat_async_mask_value = new_mask;
/* If we're going out of async-mask in all-stop, then the
inferior is stopped. The next resume will call
target_async. In non-stop, the target event source
should be always registered in the event loop. Do so
now. */
if (non_stop)
linux_nat_async (inferior_event_handler, 0);
}
}
return curr_mask;
}
static int async_terminal_is_ours = 1; static int async_terminal_is_ours = 1;
/* target_terminal_inferior implementation. */ /* target_terminal_inferior implementation. */
@ -5555,10 +5511,6 @@ static void
linux_nat_async (void (*callback) (enum inferior_event_type event_type, linux_nat_async (void (*callback) (enum inferior_event_type event_type,
void *context), void *context) void *context), void *context)
{ {
if (linux_nat_async_mask_value == 0 || !target_async_permitted)
internal_error (__FILE__, __LINE__,
"Calling target_async when async is masked");
if (callback != NULL) if (callback != NULL)
{ {
async_client_callback = callback; async_client_callback = callback;
@ -5651,9 +5603,6 @@ linux_nat_close (int quitting)
if (target_is_async_p ()) if (target_is_async_p ())
target_async (NULL, 0); target_async (NULL, 0);
/* Reset the async_masking. */
linux_nat_async_mask_value = 1;
if (linux_ops->to_close) if (linux_ops->to_close)
linux_ops->to_close (quitting); linux_ops->to_close (quitting);
} }
@ -5800,7 +5749,6 @@ linux_nat_add_target (struct target_ops *t)
t->to_is_async_p = linux_nat_is_async_p; t->to_is_async_p = linux_nat_is_async_p;
t->to_supports_non_stop = linux_nat_supports_non_stop; t->to_supports_non_stop = linux_nat_supports_non_stop;
t->to_async = linux_nat_async; t->to_async = linux_nat_async;
t->to_async_mask = linux_nat_async_mask;
t->to_terminal_inferior = linux_nat_terminal_inferior; t->to_terminal_inferior = linux_nat_terminal_inferior;
t->to_terminal_ours = linux_nat_terminal_ours; t->to_terminal_ours = linux_nat_terminal_ours;
t->to_close = linux_nat_close; t->to_close = linux_nat_close;

View File

@ -1817,16 +1817,10 @@ record_goto_bookmark (gdb_byte *bookmark, int from_tty)
return; return;
} }
static int record_async_mask_value = 1;
static void static void
record_async (void (*callback) (enum inferior_event_type event_type, record_async (void (*callback) (enum inferior_event_type event_type,
void *context), void *context) void *context), void *context)
{ {
if (record_async_mask_value == 0)
internal_error (__FILE__, __LINE__,
_("Calling record_async when async is masked"));
/* If we're on top of a line target (e.g., linux-nat, remote), then /* If we're on top of a line target (e.g., linux-nat, remote), then
set it to async mode as well. Will be NULL if we're sitting on set it to async mode as well. Will be NULL if we're sitting on
top of the core target, for "record restore". */ top of the core target, for "record restore". */
@ -1834,33 +1828,18 @@ record_async (void (*callback) (enum inferior_event_type event_type,
record_beneath_to_async (callback, context); record_beneath_to_async (callback, context);
} }
static int
record_async_mask (int new_mask)
{
int curr_mask = record_async_mask_value;
record_async_mask_value = new_mask;
return curr_mask;
}
static int static int
record_can_async_p (void) record_can_async_p (void)
{ {
/* We only enable async when the user specifically asks for it. */ /* We only enable async when the user specifically asks for it. */
if (!target_async_permitted) return target_async_permitted;
return 0;
return record_async_mask_value;
} }
static int static int
record_is_async_p (void) record_is_async_p (void)
{ {
/* We only enable async when the user specifically asks for it. */ /* We only enable async when the user specifically asks for it. */
if (!target_async_permitted) return target_async_permitted;
return 0;
return record_async_mask_value;
} }
static enum exec_direction_kind static enum exec_direction_kind
@ -1899,7 +1878,6 @@ init_record_ops (void)
record_ops.to_async = record_async; record_ops.to_async = record_async;
record_ops.to_can_async_p = record_can_async_p; record_ops.to_can_async_p = record_can_async_p;
record_ops.to_is_async_p = record_is_async_p; record_ops.to_is_async_p = record_is_async_p;
record_ops.to_async_mask = record_async_mask;
record_ops.to_execution_direction = record_execution_direction; record_ops.to_execution_direction = record_execution_direction;
record_ops.to_magic = OPS_MAGIC; record_ops.to_magic = OPS_MAGIC;
} }
@ -2125,7 +2103,6 @@ init_record_core_ops (void)
record_core_ops.to_async = record_async; record_core_ops.to_async = record_async;
record_core_ops.to_can_async_p = record_can_async_p; record_core_ops.to_can_async_p = record_can_async_p;
record_core_ops.to_is_async_p = record_is_async_p; record_core_ops.to_is_async_p = record_is_async_p;
record_core_ops.to_async_mask = record_async_mask;
record_core_ops.to_execution_direction = record_execution_direction; record_core_ops.to_execution_direction = record_execution_direction;
record_core_ops.to_magic = OPS_MAGIC; record_core_ops.to_magic = OPS_MAGIC;
} }

View File

@ -134,8 +134,6 @@ static int remote_is_async_p (void);
static void remote_async (void (*callback) (enum inferior_event_type event_type, static void remote_async (void (*callback) (enum inferior_event_type event_type,
void *context), void *context); void *context), void *context);
static int remote_async_mask (int new_mask);
static void remote_detach (struct target_ops *ops, char *args, int from_tty); static void remote_detach (struct target_ops *ops, char *args, int from_tty);
static void remote_interrupt (int signo); static void remote_interrupt (int signo);
@ -721,8 +719,6 @@ static struct target_ops remote_ops;
static struct target_ops extended_remote_ops; static struct target_ops extended_remote_ops;
static int remote_async_mask_value = 1;
/* FIXME: cagney/1999-09-23: Even though getpkt was called with /* FIXME: cagney/1999-09-23: Even though getpkt was called with
``forever'' still use the normal timeout mechanism. This is ``forever'' still use the normal timeout mechanism. This is
currently used by the ASYNC code to guarentee that target reads currently used by the ASYNC code to guarentee that target reads
@ -10359,7 +10355,6 @@ Specify the serial device it is connected to\n\
remote_ops.to_can_async_p = remote_can_async_p; remote_ops.to_can_async_p = remote_can_async_p;
remote_ops.to_is_async_p = remote_is_async_p; remote_ops.to_is_async_p = remote_is_async_p;
remote_ops.to_async = remote_async; remote_ops.to_async = remote_async;
remote_ops.to_async_mask = remote_async_mask;
remote_ops.to_terminal_inferior = remote_terminal_inferior; remote_ops.to_terminal_inferior = remote_terminal_inferior;
remote_ops.to_terminal_ours = remote_terminal_ours; remote_ops.to_terminal_ours = remote_terminal_ours;
remote_ops.to_supports_non_stop = remote_supports_non_stop; remote_ops.to_supports_non_stop = remote_supports_non_stop;
@ -10426,7 +10421,7 @@ remote_can_async_p (void)
return 0; return 0;
/* We're async whenever the serial device is. */ /* We're async whenever the serial device is. */
return remote_async_mask_value && serial_can_async_p (remote_desc); return serial_can_async_p (remote_desc);
} }
static int static int
@ -10437,7 +10432,7 @@ remote_is_async_p (void)
return 0; return 0;
/* We're async whenever the serial device is. */ /* We're async whenever the serial device is. */
return remote_async_mask_value && serial_is_async_p (remote_desc); return serial_is_async_p (remote_desc);
} }
/* Pass the SERIAL event on and up to the client. One day this code /* Pass the SERIAL event on and up to the client. One day this code
@ -10473,10 +10468,6 @@ static void
remote_async (void (*callback) (enum inferior_event_type event_type, remote_async (void (*callback) (enum inferior_event_type event_type,
void *context), void *context) void *context), void *context)
{ {
if (remote_async_mask_value == 0)
internal_error (__FILE__, __LINE__,
_("Calling remote_async when async is masked"));
if (callback != NULL) if (callback != NULL)
{ {
serial_async (remote_desc, remote_async_serial_handler, NULL); serial_async (remote_desc, remote_async_serial_handler, NULL);
@ -10487,15 +10478,6 @@ remote_async (void (*callback) (enum inferior_event_type event_type,
serial_async (remote_desc, NULL, NULL); serial_async (remote_desc, NULL, NULL);
} }
static int
remote_async_mask (int new_mask)
{
int curr_mask = remote_async_mask_value;
remote_async_mask_value = new_mask;
return curr_mask;
}
static void static void
set_remote_cmd (char *args, int from_tty) set_remote_cmd (char *args, int from_tty)
{ {

View File

@ -659,7 +659,6 @@ update_current_target (void)
INHERIT (to_can_async_p, t); INHERIT (to_can_async_p, t);
INHERIT (to_is_async_p, t); INHERIT (to_is_async_p, t);
INHERIT (to_async, t); INHERIT (to_async, t);
INHERIT (to_async_mask, t);
INHERIT (to_find_memory_regions, t); INHERIT (to_find_memory_regions, t);
INHERIT (to_make_corefile_notes, t); INHERIT (to_make_corefile_notes, t);
INHERIT (to_get_bookmark, t); INHERIT (to_get_bookmark, t);
@ -829,9 +828,6 @@ update_current_target (void)
de_fault (to_async, de_fault (to_async,
(void (*) (void (*) (enum inferior_event_type, void*), void*)) (void (*) (void (*) (enum inferior_event_type, void*), void*))
tcomplain); tcomplain);
de_fault (to_async_mask,
(int (*) (int))
return_one);
de_fault (to_thread_architecture, de_fault (to_thread_architecture,
default_thread_architecture); default_thread_architecture);
current_target.to_read_description = NULL; current_target.to_read_description = NULL;

View File

@ -524,7 +524,6 @@ struct target_ops
int (*to_can_async_p) (void); int (*to_can_async_p) (void);
int (*to_is_async_p) (void); int (*to_is_async_p) (void);
void (*to_async) (void (*) (enum inferior_event_type, void *), void *); void (*to_async) (void (*) (enum inferior_event_type, void *), void *);
int (*to_async_mask) (int);
int (*to_supports_non_stop) (void); int (*to_supports_non_stop) (void);
/* find_memory_regions support method for gcore */ /* find_memory_regions support method for gcore */
int (*to_find_memory_regions) (find_memory_region_ftype func, void *data); int (*to_find_memory_regions) (find_memory_region_ftype func, void *data);
@ -1255,23 +1254,6 @@ int target_supports_non_stop (void);
#define target_async(CALLBACK,CONTEXT) \ #define target_async(CALLBACK,CONTEXT) \
(current_target.to_async ((CALLBACK), (CONTEXT))) (current_target.to_async ((CALLBACK), (CONTEXT)))
/* This is to be used ONLY within call_function_by_hand(). It provides
a workaround, to have inferior function calls done in sychronous
mode, even though the target is asynchronous. After
target_async_mask(0) is called, calls to target_can_async_p() will
return FALSE , so that target_resume() will not try to start the
target asynchronously. After the inferior stops, we IMMEDIATELY
restore the previous nature of the target, by calling
target_async_mask(1). After that, target_can_async_p() will return
TRUE. ANY OTHER USE OF THIS FEATURE IS DEPRECATED.
FIXME ezannoni 1999-12-13: we won't need this once we move
the turning async on and off to the single execution commands,
from where it is done currently, in remote_resume(). */
#define target_async_mask(MASK) \
(current_target.to_async_mask (MASK))
#define target_execution_direction() \ #define target_execution_direction() \
(current_target.to_execution_direction ()) (current_target.to_execution_direction ())