[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH for-6.1 v5 11/15] accel/tcg: Merge tb_find into its only caller
From: |
Richard Henderson |
Subject: |
[PATCH for-6.1 v5 11/15] accel/tcg: Merge tb_find into its only caller |
Date: |
Mon, 19 Jul 2021 15:17:56 -1000 |
We are going to want two things:
(1) check for breakpoints will want to break out of the loop here,
(2) cflags can only be calculated with pc in hand.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/cpu-exec.c | 83 ++++++++++++++++++++++----------------------
1 file changed, 41 insertions(+), 42 deletions(-)
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 5bb099174f..cde7069eb7 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -500,41 +500,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
return;
}
-static inline TranslationBlock *tb_find(CPUState *cpu,
- TranslationBlock *last_tb,
- int tb_exit, uint32_t cflags)
-{
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
- TranslationBlock *tb;
- target_ulong cs_base, pc;
- uint32_t flags;
-
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
-
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
- if (tb == NULL) {
- mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
- mmap_unlock();
- /* We add the TB in the virtual pc hash table for the fast lookup */
- qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
- }
-#ifndef CONFIG_USER_ONLY
- /* We don't take care of direct jumps when address mapping changes in
- * system emulation. So it's not safe to make a direct jump to a TB
- * spanning two pages because the mapping for the second page can change.
- */
- if (tb->page_addr[1] != -1) {
- last_tb = NULL;
- }
-#endif
- /* See if we can patch the calling TB. */
- if (last_tb) {
- tb_add_jump(last_tb, tb_exit, tb);
- }
- return tb;
-}
-
static inline bool cpu_handle_halt(CPUState *cpu)
{
if (cpu->halted) {
@@ -868,22 +833,56 @@ int cpu_exec(CPUState *cpu)
int tb_exit = 0;
while (!cpu_handle_interrupt(cpu, &last_tb)) {
- uint32_t cflags = cpu->cflags_next_tb;
TranslationBlock *tb;
+ target_ulong cs_base, pc;
+ uint32_t flags, cflags;
- /* When requested, use an exact setting for cflags for the next
- execution. This is used for icount, precise smc, and stop-
- after-access watchpoints. Since this request should never
- have CF_INVALID set, -1 is a convenient invalid value that
- does not require tcg headers for cpu_common_reset. */
+ /*
+ * When requested, use an exact setting for cflags for the next
+ * execution. This is used for icount, precise smc, and stop-
+ * after-access watchpoints. Since this request should never
+ * have CF_INVALID set, -1 is a convenient invalid value that
+ * does not require tcg headers for cpu_common_reset.
+ */
+ cflags = cpu->cflags_next_tb;
if (cflags == -1) {
cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
- tb = tb_find(cpu, last_tb, tb_exit, cflags);
+ cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ if (tb == NULL) {
+ mmap_lock();
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ mmap_unlock();
+ /*
+ * We add the TB in the virtual pc hash table
+ * for the fast lookup
+ */
+ qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)],
tb);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /*
+ * We don't take care of direct jumps when address mapping
+ * changes in system emulation. So it's not safe to make a
+ * direct jump to a TB spanning two pages because the mapping
+ * for the second page can change.
+ */
+ if (tb->page_addr[1] != -1) {
+ last_tb = NULL;
+ }
+#endif
+ /* See if we can patch the calling TB. */
+ if (last_tb) {
+ tb_add_jump(last_tb, tb_exit, tb);
+ }
+
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
+
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(&sc, cpu);
--
2.25.1
- [PATCH for-6.1 v5 00/15] tcg: breakpoint reorg, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 01/15] accel/tcg: Reduce CF_COUNT_MASK to match TCG_MAX_INSNS, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 02/15] accel/tcg: Move curr_cflags into cpu-exec.c, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 03/15] target/alpha: Drop goto_tb path in gen_call_pal, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 04/15] accel/tcg: Add CF_NO_GOTO_TB and CF_NO_GOTO_PTR, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 05/15] accel/tcg: Drop CF_NO_GOTO_PTR from -d nochain, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 06/15] accel/tcg: Handle -singlestep in curr_cflags, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 07/15] accel/tcg: Use CF_NO_GOTO_{TB, PTR} in cpu_exec_step_atomic, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 11/15] accel/tcg: Merge tb_find into its only caller,
Richard Henderson <=
- [PATCH for-6.1 v5 12/15] accel/tcg: Move breakpoint recognition outside translation, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 09/15] target/arm: Implement debug_check_breakpoint, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 10/15] target/i386: Implement debug_check_breakpoint, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 15/15] accel/tcg: Record singlestep_enabled in tb->cflags, Richard Henderson, 2021/07/19
- [PATCH for-6.1 v5 13/15] accel/tcg: Remove TranslatorOps.breakpoint_check, Richard Henderson, 2021/07/19