gdb/gdbserver/

2010-06-01  Pedro Alves  <pedro@codesourcery.com>
	    Stan Shebs  <stan@codesourcery.com>

	* Makefile.in (IPA_DEPFILES, extra_libraries): New.
	(all): Depend on $(extra_libraries).
	(install-only): Install the IPA.
	(IPA_OBJS, IPA_LIB): New.
	(clean): Remove the IPA lib.
	(IPAGENT_CFLAGS): New.
	(tracepoint-ipa.o, utils-ipa.o, remote-utils-ipa.o)
	(regcache-ipa.o, i386-linux-ipa.o, linux-i386-ipa.o)
	(linux-amd64-ipa.o, amd64-linux-ipa.o): New rules.
	* linux-amd64-ipa.c, linux-i386-ipa.c: New files.
	* configure.ac: Check for atomic builtins support in the compiler.
	(IPA_DEPFILES, extra_libraries): Define.
	* configure.srv (ipa_obj): Add description.
	(ipa_i386_linux_regobj, ipa_amd64_linux_regobj): Define.
	(i[34567]86-*-linux*): Set ipa_obj.
	(x86_64-*-linux*): Set ipa_obj.
	* linux-low.c (stabilizing_threads): New.
	(supports_fast_tracepoints): New.
	(linux_detach): Stabilize threads before detaching.
	(handle_tracepoints): Handle internal tracing breakpoints.  Assert
	the lwp is either not stabilizing, or is moving out of a jump pad.
	(linux_fast_tracepoint_collecting): New.
	(maybe_move_out_of_jump_pad): New.
	(enqueue_one_deferred_signal): New.
	(dequeue_one_deferred_signal): New.
	(linux_wait_for_event_1): If moving out of a jump pad, defer
	pending signals to later.
	(linux_stabilize_threads): New.
	(linux_wait_1): Check if threads need moving out of jump pads, and
	do it if so.
	(stuck_in_jump_pad_callback): New.
	(move_out_of_jump_pad_callback): New.
	(lwp_running): New.
	(linux_resume_one_lwp): Handle moving out of jump pads.
	(linux_set_resume_request): Dequeue deferred signals.
	(need_step_over_p): Also step over fast tracepoint jumps.
	(start_step_over): Also uninsert fast tracepoint jumps.
	(finish_step_over): Also reinsert fast tracepoint jumps.
	(linux_install_fast_tracepoint_jump): New.
	(linux_target_ops): Install linux_stabilize_threads and
	linux_install_fast_tracepoint_jump_pad.
	* linux-low.h (linux_target_ops) <get_thread_area,
	install_fast_tracepoint_jump_pad>: New fields.
	(struct lwp_info) <collecting_fast_tracepoint,
	pending_signals_to_report, exit_jump_pad_bkpt>: New fields.
	(linux_get_thread_area): Declare.
	* linux-x86-low.c (jump_insn): New.
	(x86_get_thread_area): New.
	(append_insns): New.
	(push_opcode): New.
	(amd64_install_fast_tracepoint_jump_pad): New.
	(i386_install_fast_tracepoint_jump_pad): New.
	(x86_install_fast_tracepoint_jump_pad): New.
	(the_low_target): Install x86_get_thread_area and
	x86_install_fast_tracepoint_jump_pad.
	* mem-break.c (set_raw_breakpoint_at): Use read_inferior_memory.
	(struct fast_tracepoint_jump): New.
	(fast_tracepoint_jump_insn): New.
	(fast_tracepoint_jump_shadow): New.
	(find_fast_tracepoint_jump_at): New.
	(fast_tracepoint_jump_here): New.
	(delete_fast_tracepoint_jump): New.
	(set_fast_tracepoint_jump): New.
	(uninsert_fast_tracepoint_jumps_at): New.
	(reinsert_fast_tracepoint_jumps_at): New.
	(set_breakpoint_at): Use write_inferior_memory.
	(uninsert_raw_breakpoint): Use write_inferior_memory.
	(check_mem_read): Mask out fast tracepoint jumps.
	(check_mem_write): Mask out fast tracepoint jumps.
	* mem-break.h (struct fast_tracepoint_jump): Forward declare.
	(set_fast_tracepoint_jump): Declare.
	(delete_fast_tracepoint_jump)
	(fast_tracepoint_jump_here, uninsert_fast_tracepoint_jumps_at)
	(reinsert_fast_tracepoint_jumps_at): Declare.
	* regcache.c: Don't compile many functions when building the
	in-process agent library.
	(init_register_cache) [IN_PROCESS_AGENT]: Don't allow allocating
	the register buffer in the heap.
	(free_register_cache): If the register buffer isn't owned by the
	regcache, don't free it.
	(set_register_cache) [IN_PROCESS_AGENT]: Don't re-alocate
	pre-existing register caches.
	* remote-utils.c (convert_int_to_ascii): Constify `from' parameter
	type.
	(convert_ascii_to_int): : Constify `from' parameter type.
	(decode_M_packet, decode_X_packet): Replace the `to' parameter by
	a `to_p' pointer to pointer parameter.  If TO_P is NULL, malloc
	the needed buffer in-place.
	(relocate_instruction): New.
	* server.c (handle_query) <qSymbols>: If the target supports
	tracepoints, give it a chance of looking up symbols.  Report
	support for fast tracepoints.
	(handle_status): Stabilize threads.
	(process_serial_event): Adjust.
	* server.h (struct fast_tracepoint_jump): Forward declare.
	(struct process_info) <fast_tracepoint_jumps>: New field.
	(convert_ascii_to_int, convert_int_to_ascii): Adjust.
	(decode_X_packet, decode_M_packet): Adjust.
	(relocate_instruction): Declare.
	(in_process_agent_loaded): Declare.
	(tracepoint_look_up_symbols): Declare.
	(struct fast_tpoint_collect_status): Declare.
	(fast_tracepoint_collecting): Declare.
	(force_unlock_trace_buffer): Declare.
	(handle_tracepoint_bkpts): Declare.
	(initialize_low_tracepoint)
	(supply_fast_tracepoint_registers) [IN_PROCESS_AGENT]: Declare.
	* target.h (struct target_ops) <stabilize_threads,
	install_fast_tracepoint_jump_pad>: New fields.
	(stabilize_threads, install_fast_tracepoint_jump_pad): New.
	* tracepoint.c [HAVE_MALLOC_H]: Include malloc.h.
	[HAVE_STDINT_H]: Include stdint.h.
	(trace_debug_1): Rename to ...
	(trace_vdebug): ... this.
	(trace_debug): Rename to ...
	(trace_debug_1): ... this.  Add `level' parameter.
	(trace_debug): New.
	(ATTR_USED, ATTR_NOINLINE): New.
	(IP_AGENT_EXPORT): New.
	(gdb_tp_heap_buffer, gdb_jump_pad_buffer, gdb_jump_pad_buffer_end)
	(collecting, gdb_collect, stop_tracing, flush_trace_buffer)
	(about_to_request_buffer_space, trace_buffer_is_full)
	(stopping_tracepoint, expr_eval_result, error_tracepoint)
	(tracepoints, tracing, trace_buffer_ctrl, trace_buffer_ctrl_curr)
	(trace_buffer_lo, trace_buffer_hi, traceframe_read_count)
	(traceframe_write_count, traceframes_created)
	(trace_state_variables)
	New renaming defines.
	(struct ipa_sym_addresses): New.
	(STRINGIZE_1, STRINGIZE, IPA_SYM): New.
	(symbol_list): New.
	(ipa_sym_addrs): New.
	(all_tracepoint_symbols_looked_up): New.
	(in_process_agent_loaded): New.
	(write_e_ipa_not_loaded): New.
	(maybe_write_ipa_not_loaded): New.
	(tracepoint_look_up_symbols): New.
	(debug_threads) [IN_PROCESS_AGENT]: New.
	(read_inferior_memory) [IN_PROCESS_AGENT]: New.
	(UNKNOWN_SIDE_EFFECTS): New.
	(stop_tracing): New.
	(flush_trace_buffer): New.
	(stop_tracing_bkpt): New.
	(flush_trace_buffer_bkpt): New.
	(read_inferior_integer): New.
	(read_inferior_uinteger): New.
	(read_inferior_data_pointer): New.
	(write_inferior_data_pointer): New.
	(write_inferior_integer): New.
	(write_inferior_uinteger): New.
	(struct collect_static_trace_data_action): Delete.
	(enum tracepoint_type): New.
	(struct tracepoint) <type>: New field `type'.
	<actions_str, step_actions, step_actions_str>: Only include in GDBserver.
	<orig_size, obj_addr_on_target, adjusted_insn_addr>
	<adjusted_insn_addr_end, jump_pad, jump_pad_end>: New fields.
	(tracepoints): Use IP_AGENT_EXPORT.
	(last_tracepoint): Don't include in the IPA.
	(stopping_tracepoint): Use IP_AGENT_EXPORT.
	(trace_buffer_is_full): Use IP_AGENT_EXPORT.
	(alloced_trace_state_variables): New.
	(trace_state_variables): Use IP_AGENT_EXPORT.
	(traceframe_t): Delete unused variable.
	(circular_trace_buffer): Don't include in the IPA.
	(trace_buffer_start): Delete.
	(struct trace_buffer_control): New.
	(trace_buffer_free): Delete.
	(struct ipa_trace_buffer_control): New.
	(GDBSERVER_FLUSH_COUNT_MASK, GDBSERVER_FLUSH_COUNT_MASK_PREV)
	(GDBSERVER_FLUSH_COUNT_MASK_CURR, GDBSERVER_UPDATED_FLUSH_COUNT_BIT):
	New.
	(trace_buffer_ctrl): New.
	(TRACE_BUFFER_CTRL_CURR): New.
	(trace_buffer_start, trace_buffer_free, trace_buffer_end_free):
	Reimplement as macros.
	(trace_buffer_wrap): Delete.
	(traceframe_write_count, traceframe_read_count)
	(traceframes_created, tracing): Use IP_AGENT_EXPORT.
	(struct tracepoint_hit_ctx) <type>: New field.
	(struct fast_tracepoint_ctx): New.
	(memory_barrier): New.
	(cmpxchg): New.
	(record_tracepoint_error): Update atomically in the IPA.
	(clear_inferior_trace_buffer): New.
	(about_to_request_buffer_space): New.
	(trace_buffer_alloc): Handle GDBserver and inferior simulatenous
	updating the same buffer.
	(add_tracepoint): Default the tracepoint's type to trap
	tracepoint, and orig_size to -1.
	(get_trace_state_variable) [IN_PROCESS_AGENT]: Handle allocated
	internal variables.
	(create_trace_state_variable): New parameter `gdb'.  Handle it.
	(clear_installed_tracepoints): Clear fast tracepoint jumps.
	(cmd_qtdp): Handle fast tracepoints.
	(cmd_qtdv): Adjust.
	(max_jump_pad_size): New.
	(gdb_jump_pad_head): New.
	(get_jump_space_head): New.
	(claim_jump_space): New.
	(sort_tracepoints): New.
	(MAX_JUMP_SIZE): New.
	(cmd_qtstart): Handle fast tracepoints.  Sync tracepoints with the
	IPA.
	(stop_tracing) [IN_PROCESS_AGENT]: Don't include the tdisconnected
	support.  Upload fast traceframes, and delete internal IPA
	breakpoints.
	(stop_tracing_handler): New.
	(flush_trace_buffer_handler): New.
	(cmd_qtstop): Upload fast tracepoints.
	(response_tracepoint): Handle fast tracepoints.
	(tracepoint_finished_step): Upload fast traceframes.  Set the
	tracepoint hit context's tracepoint type.
	(handle_tracepoint_bkpts): New.
	(tracepoint_was_hit): Set the tracepoint hit context's tracepoint
	type.  Add comment about fast tracepoints.
	(collect_data_at_tracepoint) [IN_PROCESS_AGENT]: Don't access the
	non-existing action_str field.
	(get_context_regcache): Handle fast tracepoints.
	(do_action_at_tracepoint) [!IN_PROCESS_AGENT]: Don't write the PC
	to the regcache.
	(fast_tracepoint_from_jump_pad_address): New.
	(fast_tracepoint_from_ipa_tpoint_address): New.
	(collecting_t): New.
	(force_unlock_trace_buffer): New.
	(fast_tracepoint_collecting): New.
	(collecting): New.
	(gdb_collect): New.
	(write_inferior_data_ptr): New.
	(target_tp_heap): New.
	(target_malloc): New.
	(download_agent_expr): New.
	(UALIGN): New.
	(download_tracepoints): New.
	(download_trace_state_variables): New.
	(upload_fast_traceframes): New.
	(IPA_FIRST_TRACEFRAME): New.
	(IPA_NEXT_TRACEFRAME_1): New.
	(IPA_NEXT_TRACEFRAME): New.
	[IN_PROCESS_AGENT]: Include sys/mman.h and fcntl.h.
	[IN_PROCESS_AGENT] (gdb_tp_heap_buffer, gdb_jump_pad_buffer)
	(gdb_jump_pad_buffer_end): New.
	[IN_PROCESS_AGENT] (initialize_tracepoint_ftlib): New.
	(initialize_tracepoint): Adjust.
	[IN_PROCESS_AGENT]: Allocate the IPA heap, and jump pad scratch
	buffer.  Initialize the low module.
	* utils.c (PREFIX, TOOLNAME): New.
	(malloc_failure): Use PREFIX.
	(error): In the IPA, an error causes an exit.
	(fatal, warning): Use PREFIX.
	(internal_error): Use TOOLNAME.
	(NUMCELLS): Increase to 10.
	* configure, config.in: Regenerate.

gdb/
2010-06-01  Pedro Alves  <pedro@codesourcery.com>

	* NEWS: Mention gdbserver fast tracepoints support.

gdb/doc/
2010-06-01  Pedro Alves  <pedro@codesourcery.com>

	* gdb.texinfo (Set Tracepoints): Mention tracepoints support in
	gdbserver, and add cross reference.
	(Tracepoints support in gdbserver): New subsection.
This commit is contained in:
Pedro Alves 2010-06-01 13:20:52 +00:00
parent d149dd1dab
commit fa593d66d5
24 changed files with 4666 additions and 162 deletions

View file

@ -40,6 +40,8 @@ void init_registers_amd64_avx_linux (void);
/* Defined in auto-generated file i386-mmx-linux.c. */
void init_registers_i386_mmx_linux (void);
static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
/* Backward compatibility for gdb without XML support. */
static const char *xmltarget_i386_linux_no_xml = "@<target>\
@ -191,6 +193,53 @@ ps_get_thread_area (const struct ps_prochandle *ph,
return PS_OK;
}
}
/* Get the thread area address. This is used to recognize which
thread is which when tracing with the in-process agent library. We
don't read anything from the address, and treat it as opaque; it's
the address itself that we assume is unique per-thread. */
static int
x86_get_thread_area (int lwpid, CORE_ADDR *addr)
{
#ifdef __x86_64__
int use_64bit = register_size (0) == 8;
if (use_64bit)
{
void *base;
if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
{
*addr = (CORE_ADDR) (uintptr_t) base;
return 0;
}
return -1;
}
#endif
{
struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
unsigned int desc[4];
ULONGEST gs = 0;
const int reg_thread_area = 3; /* bits to scale down register value. */
int idx;
collect_register_by_name (regcache, "gs", &gs);
idx = gs >> reg_thread_area;
if (ptrace (PTRACE_GET_THREAD_AREA,
lwpid_of (lwp), (void *) (long) idx, (unsigned long) &desc) < 0)
return -1;
*addr = desc[1];
return 0;
}
}
static int
i386_cannot_store_register (int regno)
@ -1041,6 +1090,386 @@ x86_supports_tracepoints (void)
return 1;
}
static void
append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
{
write_inferior_memory (*to, buf, len);
*to += len;
}
static int
push_opcode (unsigned char *buf, char *op)
{
unsigned char *buf_org = buf;
while (1)
{
char *endptr;
unsigned long ul = strtoul (op, &endptr, 16);
if (endptr == op)
break;
*buf++ = ul;
op = endptr;
}
return buf - buf_org;
}
#ifdef __x86_64__
/* Build a jump pad that saves registers and calls a collection
function. Writes a jump instruction to the jump pad to
JJUMPAD_INSN. The caller is responsible to write it in at the
tracepoint address. */
static int
amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
CORE_ADDR collector,
CORE_ADDR lockaddr,
ULONGEST orig_size,
CORE_ADDR *jump_entry,
unsigned char *jjump_pad_insn,
ULONGEST *jjump_pad_insn_size,
CORE_ADDR *adjusted_insn_addr,
CORE_ADDR *adjusted_insn_addr_end)
{
unsigned char buf[40];
int i, offset;
CORE_ADDR buildaddr = *jump_entry;
/* Build the jump pad. */
/* First, do tracepoint data collection. Save registers. */
i = 0;
/* Need to ensure stack pointer saved first. */
buf[i++] = 0x54; /* push %rsp */
buf[i++] = 0x55; /* push %rbp */
buf[i++] = 0x57; /* push %rdi */
buf[i++] = 0x56; /* push %rsi */
buf[i++] = 0x52; /* push %rdx */
buf[i++] = 0x51; /* push %rcx */
buf[i++] = 0x53; /* push %rbx */
buf[i++] = 0x50; /* push %rax */
buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
buf[i++] = 0x9c; /* pushfq */
buf[i++] = 0x48; /* movl <addr>,%rdi */
buf[i++] = 0xbf;
*((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
i += sizeof (unsigned long);
buf[i++] = 0x57; /* push %rdi */
append_insns (&buildaddr, i, buf);
/* Stack space for the collecting_t object. */
i = 0;
i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
memcpy (buf + i, &tpoint, 8);
i += 8;
i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
i += push_opcode (&buf[i],
"64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
append_insns (&buildaddr, i, buf);
/* spin-lock. */
i = 0;
i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
memcpy (&buf[i], (void *) &lockaddr, 8);
i += 8;
i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
append_insns (&buildaddr, i, buf);
/* Set up the gdb_collect call. */
/* At this point, (stack pointer + 0x18) is the base of our saved
register block. */
i = 0;
i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
/* tpoint address may be 64-bit wide. */
i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
memcpy (buf + i, &tpoint, 8);
i += 8;
append_insns (&buildaddr, i, buf);
/* The collector function being in the shared library, may be
>31-bits away off the jump pad. */
i = 0;
i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
memcpy (buf + i, &collector, 8);
i += 8;
i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
append_insns (&buildaddr, i, buf);
/* Clear the spin-lock. */
i = 0;
i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
memcpy (buf + i, &lockaddr, 8);
i += 8;
append_insns (&buildaddr, i, buf);
/* Remove stack that had been used for the collect_t object. */
i = 0;
i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
append_insns (&buildaddr, i, buf);
/* Restore register state. */
i = 0;
buf[i++] = 0x48; /* add $0x8,%rsp */
buf[i++] = 0x83;
buf[i++] = 0xc4;
buf[i++] = 0x08;
buf[i++] = 0x9d; /* popfq */
buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
buf[i++] = 0x58; /* pop %rax */
buf[i++] = 0x5b; /* pop %rbx */
buf[i++] = 0x59; /* pop %rcx */
buf[i++] = 0x5a; /* pop %rdx */
buf[i++] = 0x5e; /* pop %rsi */
buf[i++] = 0x5f; /* pop %rdi */
buf[i++] = 0x5d; /* pop %rbp */
buf[i++] = 0x5c; /* pop %rsp */
append_insns (&buildaddr, i, buf);
/* Now, adjust the original instruction to execute in the jump
pad. */
*adjusted_insn_addr = buildaddr;
relocate_instruction (&buildaddr, tpaddr);
*adjusted_insn_addr_end = buildaddr;
/* Finally, write a jump back to the program. */
offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
memcpy (buf, jump_insn, sizeof (jump_insn));
memcpy (buf + 1, &offset, 4);
append_insns (&buildaddr, sizeof (jump_insn), buf);
/* The jump pad is now built. Wire in a jump to our jump pad. This
is always done last (by our caller actually), so that we can
install fast tracepoints with threads running. This relies on
the agent's atomic write support. */
offset = *jump_entry - (tpaddr + sizeof (jump_insn));
memcpy (buf, jump_insn, sizeof (jump_insn));
memcpy (buf + 1, &offset, 4);
memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
*jjump_pad_insn_size = sizeof (jump_insn);
/* Return the end address of our pad. */
*jump_entry = buildaddr;
return 0;
}
#endif /* __x86_64__ */
/* Build a jump pad that saves registers and calls a collection
function. Writes a jump instruction to the jump pad to
JJUMPAD_INSN. The caller is responsible to write it in at the
tracepoint address. */
static int
i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
CORE_ADDR collector,
CORE_ADDR lockaddr,
ULONGEST orig_size,
CORE_ADDR *jump_entry,
unsigned char *jjump_pad_insn,
ULONGEST *jjump_pad_insn_size,
CORE_ADDR *adjusted_insn_addr,
CORE_ADDR *adjusted_insn_addr_end)
{
unsigned char buf[0x100];
int i, offset;
CORE_ADDR buildaddr = *jump_entry;
/* Build the jump pad. */
/* First, do tracepoint data collection. Save registers. */
i = 0;
buf[i++] = 0x60; /* pushad */
buf[i++] = 0x68; /* push tpaddr aka $pc */
*((int *)(buf + i)) = (int) tpaddr;
i += 4;
buf[i++] = 0x9c; /* pushf */
buf[i++] = 0x1e; /* push %ds */
buf[i++] = 0x06; /* push %es */
buf[i++] = 0x0f; /* push %fs */
buf[i++] = 0xa0;
buf[i++] = 0x0f; /* push %gs */
buf[i++] = 0xa8;
buf[i++] = 0x16; /* push %ss */
buf[i++] = 0x0e; /* push %cs */
append_insns (&buildaddr, i, buf);
/* Stack space for the collecting_t object. */
i = 0;
i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
/* Build the object. */
i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
memcpy (buf + i, &tpoint, 4);
i += 4;
i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
append_insns (&buildaddr, i, buf);
/* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
If we cared for it, this could be using xchg alternatively. */
i = 0;
i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
%esp,<lockaddr> */
memcpy (&buf[i], (void *) &lockaddr, 4);
i += 4;
i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
append_insns (&buildaddr, i, buf);
/* Set up arguments to the gdb_collect call. */
i = 0;
i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
append_insns (&buildaddr, i, buf);
i = 0;
i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
append_insns (&buildaddr, i, buf);
i = 0;
i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
memcpy (&buf[i], (void *) &tpoint, 4);
i += 4;
append_insns (&buildaddr, i, buf);
buf[0] = 0xe8; /* call <reladdr> */
offset = collector - (buildaddr + sizeof (jump_insn));
memcpy (buf + 1, &offset, 4);
append_insns (&buildaddr, 5, buf);
/* Clean up after the call. */
buf[0] = 0x83; /* add $0x8,%esp */
buf[1] = 0xc4;
buf[2] = 0x08;
append_insns (&buildaddr, 3, buf);
/* Clear the spin-lock. This would need the LOCK prefix on older
broken archs. */
i = 0;
i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
memcpy (buf + i, &lockaddr, 4);
i += 4;
append_insns (&buildaddr, i, buf);
/* Remove stack that had been used for the collect_t object. */
i = 0;
i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
append_insns (&buildaddr, i, buf);
i = 0;
buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
buf[i++] = 0xc4;
buf[i++] = 0x04;
buf[i++] = 0x17; /* pop %ss */
buf[i++] = 0x0f; /* pop %gs */
buf[i++] = 0xa9;
buf[i++] = 0x0f; /* pop %fs */
buf[i++] = 0xa1;
buf[i++] = 0x07; /* pop %es */
buf[i++] = 0x1f; /* pop %de */
buf[i++] = 0x9d; /* popf */
buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
buf[i++] = 0xc4;
buf[i++] = 0x04;
buf[i++] = 0x61; /* popad */
append_insns (&buildaddr, i, buf);
/* Now, adjust the original instruction to execute in the jump
pad. */
*adjusted_insn_addr = buildaddr;
relocate_instruction (&buildaddr, tpaddr);
*adjusted_insn_addr_end = buildaddr;
/* Write the jump back to the program. */
offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
memcpy (buf, jump_insn, sizeof (jump_insn));
memcpy (buf + 1, &offset, 4);
append_insns (&buildaddr, sizeof (jump_insn), buf);
/* The jump pad is now built. Wire in a jump to our jump pad. This
is always done last (by our caller actually), so that we can
install fast tracepoints with threads running. This relies on
the agent's atomic write support. */
offset = *jump_entry - (tpaddr + sizeof (jump_insn));
memcpy (buf, jump_insn, sizeof (jump_insn));
memcpy (buf + 1, &offset, 4);
memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
*jjump_pad_insn_size = sizeof (jump_insn);
/* Return the end address of our pad. */
*jump_entry = buildaddr;
return 0;
}
static int
x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
CORE_ADDR collector,
CORE_ADDR lockaddr,
ULONGEST orig_size,
CORE_ADDR *jump_entry,
unsigned char *jjump_pad_insn,
ULONGEST *jjump_pad_insn_size,
CORE_ADDR *adjusted_insn_addr,
CORE_ADDR *adjusted_insn_addr_end)
{
#ifdef __x86_64__
if (register_size (0) == 8)
return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
collector, lockaddr,
orig_size, jump_entry,
jjump_pad_insn,
jjump_pad_insn_size,
adjusted_insn_addr,
adjusted_insn_addr_end);
#endif
return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
collector, lockaddr,
orig_size, jump_entry,
jjump_pad_insn,
jjump_pad_insn_size,
adjusted_insn_addr,
adjusted_insn_addr_end);
}
/* This is initialized assuming an amd64 target.
x86_arch_setup will correct it for i386 or amd64 targets. */
@ -1073,5 +1502,7 @@ struct linux_target_ops the_low_target =
x86_linux_new_thread,
x86_linux_prepare_to_resume,
x86_linux_process_qsupported,
x86_supports_tracepoints
x86_supports_tracepoints,
x86_get_thread_area,
x86_install_fast_tracepoint_jump_pad
};