clean up some target delegation cases
This patch cleans up some minor inconsistencies in target delegation. It's primary purpose is to avoid confusion in the code. A few spots were checking the "beneath" target; however this can only be NULL for the dummy target, so such tests are not needed. Some other spots were iterating over the beneath targets, looking for a method implementation. This is not needed for methods handled by make-target-delegates, as there is always an implementation. 2014-07-18 Tom Tromey <tromey@redhat.com> PR gdb/17130: * spu-multiarch.c (spu_region_ok_for_hw_watchpoint) (spu_fetch_registers, spu_store_registers, spu_xfer_partial) (spu_search_memory, spu_mourn_inferior): Simplify delegation. * linux-thread-db.c (thread_db_pid_to_str): Always delegate. * windows-nat.c (windows_xfer_partial): Always delegate. * record-btrace.c (record_btrace_xfer_partial): Simplify delegation. (record_btrace_fetch_registers, record_btrace_store_registers) (record_btrace_prepare_to_store, record_btrace_resume) (record_btrace_wait, record_btrace_find_new_threads) (record_btrace_thread_alive): Likewise. * procfs.c (procfs_xfer_partial): Always delegate. * corelow.c (core_xfer_partial): Always delegate. * sol-thread.c (sol_find_new_threads): Simplify delegation.
This commit is contained in:
parent
83814951ff
commit
e75fdfcad1
8 changed files with 50 additions and 97 deletions
|
@ -1,3 +1,21 @@
|
|||
2014-07-18 Tom Tromey <tromey@redhat.com>
|
||||
|
||||
PR gdb/17130:
|
||||
* spu-multiarch.c (spu_region_ok_for_hw_watchpoint)
|
||||
(spu_fetch_registers, spu_store_registers, spu_xfer_partial)
|
||||
(spu_search_memory, spu_mourn_inferior): Simplify delegation.
|
||||
* linux-thread-db.c (thread_db_pid_to_str): Always delegate.
|
||||
* windows-nat.c (windows_xfer_partial): Always delegate.
|
||||
* record-btrace.c (record_btrace_xfer_partial): Simplify
|
||||
delegation.
|
||||
(record_btrace_fetch_registers, record_btrace_store_registers)
|
||||
(record_btrace_prepare_to_store, record_btrace_resume)
|
||||
(record_btrace_wait, record_btrace_find_new_threads)
|
||||
(record_btrace_thread_alive): Likewise.
|
||||
* procfs.c (procfs_xfer_partial): Always delegate.
|
||||
* corelow.c (core_xfer_partial): Always delegate.
|
||||
* sol-thread.c (sol_find_new_threads): Simplify delegation.
|
||||
|
||||
2014-07-18 Tom Tromey <tromey@redhat.com>
|
||||
|
||||
* exec.c (exec_make_note_section): Move earlier.
|
||||
|
|
|
@ -871,12 +871,10 @@ core_xfer_partial (struct target_ops *ops, enum target_object object,
|
|||
return TARGET_XFER_E_IO;
|
||||
|
||||
default:
|
||||
if (ops->beneath != NULL)
|
||||
return ops->beneath->to_xfer_partial (ops->beneath, object,
|
||||
annex, readbuf,
|
||||
writebuf, offset, len,
|
||||
xfered_len);
|
||||
return TARGET_XFER_E_IO;
|
||||
return ops->beneath->to_xfer_partial (ops->beneath, object,
|
||||
annex, readbuf,
|
||||
writebuf, offset, len,
|
||||
xfered_len);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1767,10 +1767,7 @@ thread_db_pid_to_str (struct target_ops *ops, ptid_t ptid)
|
|||
}
|
||||
|
||||
beneath = find_target_beneath (ops);
|
||||
if (beneath->to_pid_to_str (beneath, ptid))
|
||||
return beneath->to_pid_to_str (beneath, ptid);
|
||||
|
||||
return normal_pid_to_str (ptid);
|
||||
return beneath->to_pid_to_str (beneath, ptid);
|
||||
}
|
||||
|
||||
/* Return a string describing the state of the thread specified by
|
||||
|
|
|
@ -3993,11 +3993,9 @@ procfs_xfer_partial (struct target_ops *ops, enum target_object object,
|
|||
#endif
|
||||
|
||||
default:
|
||||
if (ops->beneath != NULL)
|
||||
return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
|
||||
readbuf, writebuf, offset, len,
|
||||
xfered_len);
|
||||
return TARGET_XFER_E_IO;
|
||||
return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
|
||||
readbuf, writebuf, offset, len,
|
||||
xfered_len);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -896,13 +896,9 @@ record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
|
|||
}
|
||||
|
||||
/* Forward the request. */
|
||||
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
|
||||
if (ops->to_xfer_partial != NULL)
|
||||
return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
|
||||
offset, len, xfered_len);
|
||||
|
||||
*xfered_len = len;
|
||||
return TARGET_XFER_UNAVAILABLE;
|
||||
ops = ops->beneath;
|
||||
return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
|
||||
offset, len, xfered_len);
|
||||
}
|
||||
|
||||
/* The to_insert_breakpoint method of target record-btrace. */
|
||||
|
@ -996,14 +992,9 @@ record_btrace_fetch_registers (struct target_ops *ops,
|
|||
}
|
||||
else
|
||||
{
|
||||
struct target_ops *t;
|
||||
struct target_ops *t = ops->beneath;
|
||||
|
||||
for (t = ops->beneath; t != NULL; t = t->beneath)
|
||||
if (t->to_fetch_registers != NULL)
|
||||
{
|
||||
t->to_fetch_registers (t, regcache, regno);
|
||||
break;
|
||||
}
|
||||
t->to_fetch_registers (t, regcache, regno);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1020,14 +1011,8 @@ record_btrace_store_registers (struct target_ops *ops,
|
|||
|
||||
gdb_assert (may_write_registers != 0);
|
||||
|
||||
for (t = ops->beneath; t != NULL; t = t->beneath)
|
||||
if (t->to_store_registers != NULL)
|
||||
{
|
||||
t->to_store_registers (t, regcache, regno);
|
||||
return;
|
||||
}
|
||||
|
||||
noprocess ();
|
||||
t = ops->beneath;
|
||||
t->to_store_registers (t, regcache, regno);
|
||||
}
|
||||
|
||||
/* The to_prepare_to_store method of target record-btrace. */
|
||||
|
@ -1041,12 +1026,8 @@ record_btrace_prepare_to_store (struct target_ops *ops,
|
|||
if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
|
||||
return;
|
||||
|
||||
for (t = ops->beneath; t != NULL; t = t->beneath)
|
||||
if (t->to_prepare_to_store != NULL)
|
||||
{
|
||||
t->to_prepare_to_store (t, regcache);
|
||||
return;
|
||||
}
|
||||
t = ops->beneath;
|
||||
t->to_prepare_to_store (t, regcache);
|
||||
}
|
||||
|
||||
/* The branch trace frame cache. */
|
||||
|
@ -1533,11 +1514,8 @@ record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
|
|||
/* As long as we're not replaying, just forward the request. */
|
||||
if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
|
||||
{
|
||||
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
|
||||
if (ops->to_resume != NULL)
|
||||
return ops->to_resume (ops, ptid, step, signal);
|
||||
|
||||
error (_("Cannot find target for stepping."));
|
||||
ops = ops->beneath;
|
||||
return ops->to_resume (ops, ptid, step, signal);
|
||||
}
|
||||
|
||||
/* Compute the btrace thread flag for the requested move. */
|
||||
|
@ -1760,11 +1738,8 @@ record_btrace_wait (struct target_ops *ops, ptid_t ptid,
|
|||
/* As long as we're not replaying, just forward the request. */
|
||||
if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
|
||||
{
|
||||
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
|
||||
if (ops->to_wait != NULL)
|
||||
return ops->to_wait (ops, ptid, status, options);
|
||||
|
||||
error (_("Cannot find target for waiting."));
|
||||
ops = ops->beneath;
|
||||
return ops->to_wait (ops, ptid, status, options);
|
||||
}
|
||||
|
||||
/* Let's find a thread to move. */
|
||||
|
@ -1826,12 +1801,8 @@ record_btrace_find_new_threads (struct target_ops *ops)
|
|||
return;
|
||||
|
||||
/* Forward the request. */
|
||||
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
|
||||
if (ops->to_find_new_threads != NULL)
|
||||
{
|
||||
ops->to_find_new_threads (ops);
|
||||
break;
|
||||
}
|
||||
ops = ops->beneath;
|
||||
ops->to_find_new_threads (ops);
|
||||
}
|
||||
|
||||
/* The to_thread_alive method of target record-btrace. */
|
||||
|
@ -1844,11 +1815,8 @@ record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
|
|||
return find_thread_ptid (ptid) != NULL;
|
||||
|
||||
/* Forward the request. */
|
||||
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
|
||||
if (ops->to_thread_alive != NULL)
|
||||
return ops->to_thread_alive (ops, ptid);
|
||||
|
||||
return 0;
|
||||
ops = ops->beneath;
|
||||
return ops->to_thread_alive (ops, ptid);
|
||||
}
|
||||
|
||||
/* Set the replay branch trace instruction iterator. If IT is NULL, replay
|
||||
|
|
|
@ -1084,8 +1084,7 @@ sol_find_new_threads (struct target_ops *ops)
|
|||
struct target_ops *beneath = find_target_beneath (ops);
|
||||
|
||||
/* First Find any new LWP's. */
|
||||
if (beneath->to_find_new_threads != NULL)
|
||||
beneath->to_find_new_threads (beneath);
|
||||
beneath->to_find_new_threads (beneath);
|
||||
|
||||
/* Then find any new user-level threads. */
|
||||
p_td_ta_thr_iter (main_ta, sol_find_new_threads_callback, (void *) 0,
|
||||
|
|
|
@ -122,18 +122,12 @@ spu_region_ok_for_hw_watchpoint (struct target_ops *self,
|
|||
CORE_ADDR addr, int len)
|
||||
{
|
||||
struct target_ops *ops_beneath = find_target_beneath (&spu_ops);
|
||||
while (ops_beneath && !ops_beneath->to_region_ok_for_hw_watchpoint)
|
||||
ops_beneath = find_target_beneath (ops_beneath);
|
||||
|
||||
/* We cannot watch SPU local store. */
|
||||
if (SPUADDR_SPU (addr) != -1)
|
||||
return 0;
|
||||
|
||||
if (ops_beneath)
|
||||
return ops_beneath->to_region_ok_for_hw_watchpoint (ops_beneath,
|
||||
addr, len);
|
||||
|
||||
return 0;
|
||||
return ops_beneath->to_region_ok_for_hw_watchpoint (ops_beneath, addr, len);
|
||||
}
|
||||
|
||||
/* Override the to_fetch_registers routine. */
|
||||
|
@ -150,10 +144,6 @@ spu_fetch_registers (struct target_ops *ops,
|
|||
/* This version applies only if we're currently in spu_run. */
|
||||
if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
|
||||
{
|
||||
while (ops_beneath && !ops_beneath->to_fetch_registers)
|
||||
ops_beneath = find_target_beneath (ops_beneath);
|
||||
|
||||
gdb_assert (ops_beneath);
|
||||
ops_beneath->to_fetch_registers (ops_beneath, regcache, regno);
|
||||
return;
|
||||
}
|
||||
|
@ -208,10 +198,6 @@ spu_store_registers (struct target_ops *ops,
|
|||
/* This version applies only if we're currently in spu_run. */
|
||||
if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
|
||||
{
|
||||
while (ops_beneath && !ops_beneath->to_fetch_registers)
|
||||
ops_beneath = find_target_beneath (ops_beneath);
|
||||
|
||||
gdb_assert (ops_beneath);
|
||||
ops_beneath->to_store_registers (ops_beneath, regcache, regno);
|
||||
return;
|
||||
}
|
||||
|
@ -254,9 +240,6 @@ spu_xfer_partial (struct target_ops *ops, enum target_object object,
|
|||
ULONGEST *xfered_len)
|
||||
{
|
||||
struct target_ops *ops_beneath = find_target_beneath (ops);
|
||||
while (ops_beneath && !ops_beneath->to_xfer_partial)
|
||||
ops_beneath = find_target_beneath (ops_beneath);
|
||||
gdb_assert (ops_beneath);
|
||||
|
||||
/* Use the "mem" spufs file to access SPU local store. */
|
||||
if (object == TARGET_OBJECT_MEMORY)
|
||||
|
@ -308,12 +291,9 @@ spu_search_memory (struct target_ops* ops,
|
|||
CORE_ADDR *found_addrp)
|
||||
{
|
||||
struct target_ops *ops_beneath = find_target_beneath (ops);
|
||||
while (ops_beneath && !ops_beneath->to_search_memory)
|
||||
ops_beneath = find_target_beneath (ops_beneath);
|
||||
|
||||
/* For SPU local store, always fall back to the simple method. Likewise
|
||||
if we do not have any target-specific special implementation. */
|
||||
if (!ops_beneath || SPUADDR_SPU (start_addr) >= 0)
|
||||
/* For SPU local store, always fall back to the simple method. */
|
||||
if (SPUADDR_SPU (start_addr) >= 0)
|
||||
return simple_search_memory (ops,
|
||||
start_addr, search_space_len,
|
||||
pattern, pattern_len, found_addrp);
|
||||
|
@ -378,10 +358,7 @@ static void
|
|||
spu_mourn_inferior (struct target_ops *ops)
|
||||
{
|
||||
struct target_ops *ops_beneath = find_target_beneath (ops);
|
||||
while (ops_beneath && !ops_beneath->to_mourn_inferior)
|
||||
ops_beneath = find_target_beneath (ops_beneath);
|
||||
|
||||
gdb_assert (ops_beneath);
|
||||
ops_beneath->to_mourn_inferior (ops_beneath);
|
||||
spu_multiarch_deactivate ();
|
||||
}
|
||||
|
|
|
@ -2519,11 +2519,9 @@ windows_xfer_partial (struct target_ops *ops, enum target_object object,
|
|||
writebuf, offset, len, xfered_len);
|
||||
|
||||
default:
|
||||
if (ops->beneath != NULL)
|
||||
return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
|
||||
readbuf, writebuf, offset, len,
|
||||
xfered_len);
|
||||
return TARGET_XFER_E_IO;
|
||||
return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
|
||||
readbuf, writebuf, offset, len,
|
||||
xfered_len);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue