#define SEM_IN_SWITCH
#define WANT_CPU sh64
#define WANT_CPU_SH64
#include "sim-main.h"
#include "bfd.h"
#include "cgen-mem.h"
#include "cgen-ops.h"
#include "sim-assert.h"
static INLINE void
sh64_compact_fill_argbuf (const SIM_CPU *cpu, ARGBUF *abuf, const IDESC *idesc,
PCADDR pc, int fast_p)
{
#if WITH_SCACHE
SEM_SET_CODE (abuf, idesc, fast_p);
ARGBUF_ADDR (abuf) = pc;
#endif
ARGBUF_IDESC (abuf) = idesc;
}
static INLINE void
sh64_compact_fill_argbuf_tp (const SIM_CPU *cpu, ARGBUF *abuf,
int trace_p, int profile_p)
{
ARGBUF_TRACE_P (abuf) = trace_p;
ARGBUF_PROFILE_P (abuf) = profile_p;
}
#if WITH_SCACHE_PBB
static INLINE void
sh64_compact_emit_before (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc, int first_p)
{
ARGBUF *abuf = &sc[0].argbuf;
const IDESC *id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEFORE];
abuf->fields.before.first_p = first_p;
sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, 0);
}
static INLINE void
sh64_compact_emit_after (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc)
{
ARGBUF *abuf = &sc[0].argbuf;
const IDESC *id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_AFTER];
sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, 0);
}
#endif
static INLINE const IDESC *
extract (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, ARGBUF *abuf,
int fast_p)
{
const IDESC *id = sh64_compact_decode (current_cpu, pc, insn, insn, abuf);
sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
if (! fast_p)
{
int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
sh64_compact_fill_argbuf_tp (current_cpu, abuf, trace_p, profile_p);
}
return id;
}
static INLINE SEM_PC
execute (SIM_CPU *current_cpu, SCACHE *sc, int fast_p)
{
SEM_PC vpc;
if (fast_p)
{
#if ! WITH_SEM_SWITCH_FAST
#if WITH_SCACHE
vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, sc);
#else
vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, &sc->argbuf);
#endif
#else
abort ();
#endif
}
else
{
#if ! WITH_SEM_SWITCH_FULL
ARGBUF *abuf = &sc->argbuf;
const IDESC *idesc = abuf->idesc;
#if WITH_SCACHE_PBB
int virtual_p = CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_VIRTUAL);
#else
int virtual_p = 0;
#endif
if (! virtual_p)
{
if (ARGBUF_PROFILE_P (abuf))
PROFILE_COUNT_INSN (current_cpu, abuf->addr, idesc->num);
if (PROFILE_MODEL_P (current_cpu)
&& ARGBUF_PROFILE_P (abuf))
sh64_compact_model_insn_before (current_cpu, 1 );
TRACE_INSN_INIT (current_cpu, abuf, 1);
TRACE_INSN (current_cpu, idesc->idata,
(const struct argbuf *) abuf, abuf->addr);
}
#if WITH_SCACHE
vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, sc);
#else
vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, abuf);
#endif
if (! virtual_p)
{
if (PROFILE_MODEL_P (current_cpu)
&& ARGBUF_PROFILE_P (abuf))
{
int cycles;
cycles = (*idesc->timing->model_fn) (current_cpu, sc);
sh64_compact_model_insn_after (current_cpu, 1 , cycles);
}
TRACE_INSN_FINI (current_cpu, abuf, 1);
}
#else
abort ();
#endif
}
return vpc;
}
#define SET_CTI_VPC(sc) do { _cti_sc = (sc); } while (0)
#define SET_INSN_COUNT(n) do { _insn_count = (n); } while (0)
INLINE SEM_PC
sh64_compact_pbb_begin (SIM_CPU *current_cpu, int FAST_P)
{
SEM_PC new_vpc;
PCADDR pc;
SCACHE *sc;
int max_insns = CPU_SCACHE_MAX_CHAIN_LENGTH (current_cpu);
pc = GET_H_PC ();
new_vpc = scache_lookup_or_alloc (current_cpu, pc, max_insns, &sc);
if (! new_vpc)
{
int _insn_count = 0;
SCACHE *orig_sc = sc;
SCACHE *_cti_sc = NULL;
int slice_insns = CPU_MAX_SLICE_INSNS (current_cpu);
if (slice_insns == 1)
{
max_insns = 1;
}
else
{
max_insns -= (1
+ (FAST_P
? 0
: (1 + MAX_PARALLEL_INSNS) )
+ (MAX_PARALLEL_INSNS > 1
? (MAX_PARALLEL_INSNS * 2)
: 0));
if (! FAST_P)
slice_insns *= 3;
if (slice_insns > 0
&& slice_insns < max_insns)
max_insns = slice_insns;
}
new_vpc = sc;
{
const IDESC *idesc;
int icount = 0;
while (max_insns > 0)
{
UHI insn = GETIMEMUHI (current_cpu, pc);
idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
SEM_SKIP_COMPILE (current_cpu, sc, 1);
++sc;
--max_insns;
++icount;
pc += idesc->length;
if (IDESC_CTI_P (idesc))
{
SET_CTI_VPC (sc - 1);
if (CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_DELAY_SLOT))
{
USI insn = GETIMEMUHI (current_cpu, pc);
idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
if (IDESC_CTI_P (idesc) ||
CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_ILLSLOT))
{
SIM_DESC sd = CPU_STATE (current_cpu);
sim_io_eprintf (CPU_STATE (current_cpu),
"malformed program, `%s' insn in delay slot\n",
CGEN_INSN_NAME (idesc->idata));
sim_engine_halt (sd, current_cpu, NULL, pc,
sim_stopped, SIM_SIGILL);
}
else
{
++sc;
--max_insns;
++icount;
pc += idesc->length;
}
}
break;
}
}
Finish:
SET_INSN_COUNT (icount);
}
{
const IDESC *id;
if (_cti_sc)
{
id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_CTI_CHAIN];
}
else
{
id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_CHAIN];
}
SEM_SET_CODE (&sc->argbuf, id, FAST_P);
sc->argbuf.idesc = id;
sc->argbuf.addr = pc;
sc->argbuf.fields.chain.insn_count = _insn_count;
sc->argbuf.fields.chain.next = 0;
sc->argbuf.fields.chain.branch_target = 0;
++sc;
}
CPU_SCACHE_NEXT_FREE (current_cpu) = sc;
if (! FAST_P)
PROFILE_COUNT_SCACHE_CHAIN_LENGTH (current_cpu, sc - orig_sc);
}
return new_vpc;
}
INLINE SEM_PC
sh64_compact_pbb_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg)
{
ARGBUF *abuf = SEM_ARGBUF (sem_arg);
PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
SET_H_PC (abuf->addr);
if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
|| STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
CPU_RUNNING_P (current_cpu) = 0;
if (abuf->fields.chain.next)
return abuf->fields.chain.next;
abuf->fields.chain.next = scache_lookup (current_cpu, abuf->addr);
if (abuf->fields.chain.next)
return abuf->fields.chain.next;
return CPU_SCACHE_PBB_BEGIN (current_cpu);
}
INLINE SEM_PC
sh64_compact_pbb_cti_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg,
SEM_BRANCH_TYPE br_type, PCADDR new_pc)
{
SEM_PC *new_vpc_ptr;
PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
if (new_pc & 1)
{
CPU_IDESC_SEM_INIT_P (current_cpu) = 0;
CPU_RUNNING_P (current_cpu) = 0;
}
if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
|| STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
CPU_RUNNING_P (current_cpu) = 0;
if (br_type == SEM_BRANCH_UNCACHEABLE)
{
SET_H_PC (new_pc);
return CPU_SCACHE_PBB_BEGIN (current_cpu);
}
if (br_type == SEM_BRANCH_UNTAKEN)
{
ARGBUF *abuf = SEM_ARGBUF (sem_arg);
new_pc = abuf->addr;
SET_H_PC (new_pc);
new_vpc_ptr = &abuf->fields.chain.next;
}
else
{
ARGBUF *abuf = SEM_ARGBUF (sem_arg);
SET_H_PC (new_pc);
new_vpc_ptr = &abuf->fields.chain.branch_target;
}
if (*new_vpc_ptr)
return *new_vpc_ptr;
*new_vpc_ptr = scache_lookup (current_cpu, new_pc);
if (*new_vpc_ptr)
return *new_vpc_ptr;
return CPU_SCACHE_PBB_BEGIN (current_cpu);
}
void
sh64_compact_pbb_before (SIM_CPU *current_cpu, SCACHE *sc)
{
SEM_ARG sem_arg = sc;
const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
int first_p = abuf->fields.before.first_p;
const ARGBUF *cur_abuf = SEM_ARGBUF (sc + 1);
const IDESC *cur_idesc = cur_abuf->idesc;
PCADDR pc = cur_abuf->addr;
if (ARGBUF_PROFILE_P (cur_abuf))
PROFILE_COUNT_INSN (current_cpu, pc, cur_idesc->num);
if (! first_p)
{
if (PROFILE_MODEL_P (current_cpu))
{
const SEM_ARG prev_sem_arg = sc - 1;
const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
const IDESC *prev_idesc = prev_abuf->idesc;
int cycles;
if (ARGBUF_PROFILE_P (prev_abuf))
{
cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
sh64_compact_model_insn_after (current_cpu, 0 , cycles);
}
}
TRACE_INSN_FINI (current_cpu, cur_abuf, 0 );
}
if (PROFILE_MODEL_P (current_cpu)
&& ARGBUF_PROFILE_P (cur_abuf))
sh64_compact_model_insn_before (current_cpu, first_p);
TRACE_INSN_INIT (current_cpu, cur_abuf, first_p);
TRACE_INSN (current_cpu, cur_idesc->idata, cur_abuf, pc);
}
void
sh64_compact_pbb_after (SIM_CPU *current_cpu, SCACHE *sc)
{
SEM_ARG sem_arg = sc;
const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
const SEM_ARG prev_sem_arg = sc - 1;
const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
if (PROFILE_MODEL_P (current_cpu)
&& ARGBUF_PROFILE_P (prev_abuf))
{
const IDESC *prev_idesc = prev_abuf->idesc;
int cycles;
cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
sh64_compact_model_insn_after (current_cpu, 1 , cycles);
}
TRACE_INSN_FINI (current_cpu, prev_abuf, 1 );
}
#define FAST_P 0
void
sh64_compact_engine_run_full (SIM_CPU *current_cpu)
{
SIM_DESC current_state = CPU_STATE (current_cpu);
SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
SEM_PC vpc;
#if WITH_SEM_SWITCH_FULL
SEM_BRANCH_TYPE pbb_br_type;
PCADDR pbb_br_npc;
#endif
if (! CPU_IDESC_SEM_INIT_P (current_cpu))
{
#if WITH_SEM_SWITCH_FULL
#if defined (__GNUC__)
#define DEFINE_LABELS
#include "sem-compact-switch.c"
#endif
#else
sh64_compact_sem_init_idesc_table (current_cpu);
#endif
vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
SEM_SET_FULL_CODE (SEM_ARGBUF (vpc),
& CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN]);
vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN];
CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
}
CPU_RUNNING_P (current_cpu) = 1;
vpc = sh64_compact_pbb_begin (current_cpu, FAST_P);
do
{
{
#if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
#define DEFINE_SWITCH
#include "sem-compact-switch.c"
#else
vpc = execute (current_cpu, vpc, FAST_P);
#endif
}
}
while (CPU_RUNNING_P (current_cpu));
}
#undef FAST_P
#define FAST_P 1
void
sh64_compact_engine_run_fast (SIM_CPU *current_cpu)
{
SIM_DESC current_state = CPU_STATE (current_cpu);
SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
SEM_PC vpc;
#if WITH_SEM_SWITCH_FAST
SEM_BRANCH_TYPE pbb_br_type;
PCADDR pbb_br_npc;
#endif
if (! CPU_IDESC_SEM_INIT_P (current_cpu))
{
#if WITH_SEM_SWITCH_FAST
#if defined (__GNUC__)
#define DEFINE_LABELS
#include "sem-compact-switch.c"
#endif
#else
sh64_compact_semf_init_idesc_table (current_cpu);
#endif
vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
SEM_SET_FAST_CODE (SEM_ARGBUF (vpc),
& CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN]);
vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN];
CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
}
CPU_RUNNING_P (current_cpu) = 1;
vpc = sh64_compact_pbb_begin (current_cpu, FAST_P);
do
{
{
#if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
#define DEFINE_SWITCH
#include "sem-compact-switch.c"
#else
vpc = execute (current_cpu, vpc, FAST_P);
#endif
}
}
while (CPU_RUNNING_P (current_cpu));
}
#undef FAST_P