#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "function.h"
#include "flags.h"
#include "insn-config.h"
#include "insn-attr.h"
#include "except.h"
#include "toplev.h"
#include "recog.h"
#include "sched-int.h"
#include "params.h"
#include "cselib.h"
#include "df.h"
#ifdef ENABLE_LLVM
#undef INSN_SCHEDULING
#endif
#ifdef INSN_SCHEDULING
static regset reg_pending_sets;
static regset reg_pending_clobbers;
static regset reg_pending_uses;
enum reg_pending_barrier_mode
{
NOT_A_BARRIER = 0,
MOVE_BARRIER,
TRUE_BARRIER
};
static enum reg_pending_barrier_mode reg_pending_barrier;
static bitmap_head *true_dependency_cache;
static bitmap_head *output_dependency_cache;
static bitmap_head *anti_dependency_cache;
static bitmap_head *spec_dependency_cache;
static int cache_size;
#ifdef ENABLE_CHECKING
static bitmap_head *forward_dependency_cache;
#endif
static int deps_may_trap_p (rtx);
static void add_dependence_list (rtx, rtx, int, enum reg_note);
static void add_dependence_list_and_free (rtx, rtx *, int, enum reg_note);
static void delete_all_dependences (rtx);
static void fixup_sched_groups (rtx);
static void flush_pending_lists (struct deps *, rtx, int, int);
static void sched_analyze_1 (struct deps *, rtx, rtx);
static void sched_analyze_2 (struct deps *, rtx, rtx);
static void sched_analyze_insn (struct deps *, rtx, rtx);
static rtx sched_get_condition (rtx);
static int conditions_mutex_p (rtx, rtx);
static enum DEPS_ADJUST_RESULT maybe_add_or_update_back_dep_1 (rtx, rtx,
enum reg_note, ds_t, rtx, rtx, rtx **);
static enum DEPS_ADJUST_RESULT add_or_update_back_dep_1 (rtx, rtx,
enum reg_note, ds_t, rtx, rtx, rtx **);
static void add_back_dep (rtx, rtx, enum reg_note, ds_t);
static void adjust_add_sorted_back_dep (rtx, rtx, rtx *);
static void adjust_back_add_forw_dep (rtx, rtx *);
static void delete_forw_dep (rtx, rtx);
static dw_t estimate_dep_weak (rtx, rtx);
#ifdef INSN_SCHEDULING
#ifdef ENABLE_CHECKING
static void check_dep_status (enum reg_note, ds_t, bool);
#endif
#endif
static int
deps_may_trap_p (rtx mem)
{
rtx addr = XEXP (mem, 0);
if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
{
rtx t = get_reg_known_value (REGNO (addr));
if (t)
addr = t;
}
return rtx_addr_can_trap_p (addr);
}
rtx
find_insn_list (rtx insn, rtx list)
{
while (list)
{
if (XEXP (list, 0) == insn)
return list;
list = XEXP (list, 1);
}
return 0;
}
static rtx
sched_get_condition (rtx insn)
{
rtx pat = PATTERN (insn);
rtx src;
if (pat == 0)
return 0;
if (GET_CODE (pat) == COND_EXEC)
return COND_EXEC_TEST (pat);
if (!any_condjump_p (insn) || !onlyjump_p (insn))
return 0;
src = SET_SRC (pc_set (insn));
if (XEXP (src, 2) == pc_rtx)
return XEXP (src, 0);
else if (XEXP (src, 1) == pc_rtx)
{
rtx cond = XEXP (src, 0);
enum rtx_code revcode = reversed_comparison_code (cond, insn);
if (revcode == UNKNOWN)
return 0;
return gen_rtx_fmt_ee (revcode, GET_MODE (cond), XEXP (cond, 0),
XEXP (cond, 1));
}
return 0;
}
static int
conditions_mutex_p (rtx cond1, rtx cond2)
{
if (COMPARISON_P (cond1)
&& COMPARISON_P (cond2)
&& GET_CODE (cond1) == reversed_comparison_code (cond2, NULL)
&& XEXP (cond1, 0) == XEXP (cond2, 0)
&& XEXP (cond1, 1) == XEXP (cond2, 1))
return 1;
return 0;
}
bool
sched_insns_conditions_mutex_p (rtx insn1, rtx insn2)
{
rtx cond1, cond2;
if (!CALL_P (insn1) && !CALL_P (insn2))
{
cond1 = sched_get_condition (insn1);
cond2 = sched_get_condition (insn2);
if (cond1 && cond2
&& conditions_mutex_p (cond1, cond2)
&& !modified_in_p (cond1, insn2)
&& !modified_in_p (cond2, insn1))
return true;
}
return false;
}
static enum DEPS_ADJUST_RESULT
maybe_add_or_update_back_dep_1 (rtx insn, rtx elem, enum reg_note dep_type,
ds_t ds, rtx mem1, rtx mem2,
rtx **changed_linkpp)
{
gcc_assert (INSN_P (insn) && INSN_P (elem));
if (insn == elem)
{
#ifdef INSN_SCHEDULING
if (current_sched_info->flags & DO_SPECULATION)
HAS_INTERNAL_DEP (insn) = 1;
#endif
return 0;
}
return add_or_update_back_dep_1 (insn, elem, dep_type,
ds, mem1, mem2, changed_linkpp);
}
static enum DEPS_ADJUST_RESULT
add_or_update_back_dep_1 (rtx insn, rtx elem, enum reg_note dep_type,
ds_t ds ATTRIBUTE_UNUSED,
rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED,
rtx **changed_linkpp ATTRIBUTE_UNUSED)
{
bool maybe_present_p = true, present_p = false;
gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
#ifdef INSN_SCHEDULING
#ifdef ENABLE_CHECKING
check_dep_status (dep_type, ds, mem1 != NULL);
#endif
if (true_dependency_cache != NULL)
{
enum reg_note present_dep_type;
gcc_assert (output_dependency_cache);
gcc_assert (anti_dependency_cache);
if (!(current_sched_info->flags & USE_DEPS_LIST))
{
if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_type = REG_DEP_TRUE;
else if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_type = REG_DEP_OUTPUT;
else if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_type = REG_DEP_ANTI;
else
maybe_present_p = false;
if (maybe_present_p)
{
if ((int) dep_type >= (int) present_dep_type)
return DEP_PRESENT;
present_p = true;
}
}
else
{
ds_t present_dep_types = 0;
if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_types |= DEP_TRUE;
if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_types |= DEP_OUTPUT;
if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_types |= DEP_ANTI;
if (present_dep_types)
{
if (!(current_sched_info->flags & DO_SPECULATION)
|| !bitmap_bit_p (&spec_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
{
if ((present_dep_types | (ds & DEP_TYPES))
== present_dep_types)
return DEP_PRESENT;
}
else
{
gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
== present_dep_types);
}
present_p = true;
}
else
maybe_present_p = false;
}
}
#endif
if (maybe_present_p)
{
rtx *linkp;
for (linkp = &LOG_LINKS (insn); *linkp; linkp = &XEXP (*linkp, 1))
{
rtx link = *linkp;
gcc_assert (true_dependency_cache == 0 || present_p);
if (XEXP (link, 0) == elem)
{
enum DEPS_ADJUST_RESULT changed_p = DEP_PRESENT;
#ifdef INSN_SCHEDULING
if (current_sched_info->flags & USE_DEPS_LIST)
{
ds_t new_status = ds | DEP_STATUS (link);
if (new_status & SPECULATIVE)
{
if (!(ds & SPECULATIVE)
|| !(DEP_STATUS (link) & SPECULATIVE))
{
new_status &= ~SPECULATIVE;
if (true_dependency_cache
&& (DEP_STATUS (link) & SPECULATIVE))
bitmap_clear_bit (&spec_dependency_cache
[INSN_LUID (insn)],
INSN_LUID (elem));
}
else
{
if (mem1)
{
dw_t dw;
dw = estimate_dep_weak (mem1, mem2);
ds = set_dep_weak (ds, BEGIN_DATA, dw);
}
new_status = ds_merge (DEP_STATUS (link), ds);
}
}
ds = new_status;
}
if (true_dependency_cache != NULL
&& !(current_sched_info->flags & USE_DEPS_LIST))
{
enum reg_note kind = REG_NOTE_KIND (link);
switch (kind)
{
case REG_DEP_OUTPUT:
bitmap_clear_bit (&output_dependency_cache
[INSN_LUID (insn)], INSN_LUID (elem));
break;
case REG_DEP_ANTI:
bitmap_clear_bit (&anti_dependency_cache
[INSN_LUID (insn)], INSN_LUID (elem));
break;
default:
gcc_unreachable ();
}
}
if ((current_sched_info->flags & USE_DEPS_LIST)
&& DEP_STATUS (link) != ds)
{
DEP_STATUS (link) = ds;
changed_p = DEP_CHANGED;
}
#endif
if ((int) dep_type < (int) REG_NOTE_KIND (link))
{
PUT_REG_NOTE_KIND (link, dep_type);
changed_p = DEP_CHANGED;
}
#ifdef INSN_SCHEDULING
if (true_dependency_cache != NULL)
{
if (!(current_sched_info->flags & USE_DEPS_LIST))
{
if (REG_NOTE_KIND (link) == REG_DEP_TRUE)
bitmap_set_bit (&true_dependency_cache
[INSN_LUID (insn)], INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
bitmap_set_bit (&output_dependency_cache
[INSN_LUID (insn)], INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
bitmap_set_bit (&anti_dependency_cache
[INSN_LUID (insn)], INSN_LUID (elem));
}
else
{
if (ds & DEP_TRUE)
bitmap_set_bit (&true_dependency_cache
[INSN_LUID (insn)], INSN_LUID (elem));
if (ds & DEP_OUTPUT)
bitmap_set_bit (&output_dependency_cache
[INSN_LUID (insn)], INSN_LUID (elem));
if (ds & DEP_ANTI)
bitmap_set_bit (&anti_dependency_cache
[INSN_LUID (insn)], INSN_LUID (elem));
}
}
if (changed_linkpp && changed_p == DEP_CHANGED)
*changed_linkpp = linkp;
#endif
return changed_p;
}
}
gcc_assert (!present_p);
}
if (mem1)
{
gcc_assert (current_sched_info->flags & DO_SPECULATION);
ds = set_dep_weak (ds, BEGIN_DATA, estimate_dep_weak (mem1, mem2));
}
add_back_dep (insn, elem, dep_type, ds);
return DEP_CREATED;
}
static void
add_back_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
{
gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
if (current_sched_info->flags & USE_DEPS_LIST)
LOG_LINKS (insn) = alloc_DEPS_LIST (elem, LOG_LINKS (insn), ds);
else
LOG_LINKS (insn) = alloc_INSN_LIST (elem, LOG_LINKS (insn));
PUT_REG_NOTE_KIND (LOG_LINKS (insn), dep_type);
#ifdef INSN_SCHEDULING
#ifdef ENABLE_CHECKING
check_dep_status (dep_type, ds, false);
#endif
if (true_dependency_cache != NULL)
{
if (!(current_sched_info->flags & USE_DEPS_LIST))
{
if (dep_type == REG_DEP_TRUE)
bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
else if (dep_type == REG_DEP_OUTPUT)
bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
else if (dep_type == REG_DEP_ANTI)
bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
}
else
{
if (ds & DEP_TRUE)
bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
if (ds & DEP_OUTPUT)
bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
if (ds & DEP_ANTI)
bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
if (ds & SPECULATIVE)
{
gcc_assert (current_sched_info->flags & DO_SPECULATION);
bitmap_set_bit (&spec_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
}
}
}
#endif
}
static void
add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
{
for (; list; list = XEXP (list, 1))
{
if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
add_dependence (insn, XEXP (list, 0), dep_type);
}
}
static void
add_dependence_list_and_free (rtx insn, rtx *listp, int uncond,
enum reg_note dep_type)
{
rtx list, next;
for (list = *listp, *listp = NULL; list ; list = next)
{
next = XEXP (list, 1);
if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
add_dependence (insn, XEXP (list, 0), dep_type);
free_INSN_LIST_node (list);
}
}
static void
delete_all_dependences (rtx insn)
{
#ifdef INSN_SCHEDULING
if (true_dependency_cache != NULL)
{
bitmap_clear (&true_dependency_cache[INSN_LUID (insn)]);
bitmap_clear (&output_dependency_cache[INSN_LUID (insn)]);
bitmap_clear (&anti_dependency_cache[INSN_LUID (insn)]);
if (current_sched_info->flags & DO_SPECULATION)
bitmap_clear (&spec_dependency_cache[INSN_LUID (insn)]);
}
#endif
if (!(current_sched_info->flags & USE_DEPS_LIST))
free_INSN_LIST_list (&LOG_LINKS (insn));
else
free_DEPS_LIST_list (&LOG_LINKS (insn));
}
static void
fixup_sched_groups (rtx insn)
{
rtx link, prev_nonnote;
for (link = LOG_LINKS (insn); link ; link = XEXP (link, 1))
{
rtx i = insn;
do
{
i = prev_nonnote_insn (i);
if (XEXP (link, 0) == i)
goto next_link;
} while (SCHED_GROUP_P (i));
if (! sched_insns_conditions_mutex_p (i, XEXP (link, 0)))
add_dependence (i, XEXP (link, 0), REG_NOTE_KIND (link));
next_link:;
}
delete_all_dependences (insn);
prev_nonnote = prev_nonnote_insn (insn);
if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
&& ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
}
static void
add_insn_mem_dependence (struct deps *deps, rtx *insn_list, rtx *mem_list,
rtx insn, rtx mem)
{
rtx link;
link = alloc_INSN_LIST (insn, *insn_list);
*insn_list = link;
if (current_sched_info->use_cselib)
{
mem = shallow_copy_rtx (mem);
XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
}
link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
*mem_list = link;
deps->pending_lists_length++;
}
static void
flush_pending_lists (struct deps *deps, rtx insn, int for_read,
int for_write)
{
if (for_write)
{
add_dependence_list_and_free (insn, &deps->pending_read_insns, 1,
REG_DEP_ANTI);
free_EXPR_LIST_list (&deps->pending_read_mems);
}
add_dependence_list_and_free (insn, &deps->pending_write_insns, 1,
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
free_EXPR_LIST_list (&deps->pending_write_mems);
deps->pending_lists_length = 0;
add_dependence_list_and_free (insn, &deps->last_pending_memory_flush, 1,
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
deps->pending_flush_length = 1;
}
static void
sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
enum rtx_code ref, rtx insn)
{
if (regno < FIRST_PSEUDO_REGISTER)
{
int i = hard_regno_nregs[regno][mode];
if (ref == SET)
{
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_sets, regno + i);
}
else if (ref == USE)
{
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_uses, regno + i);
}
else
{
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
}
}
else if (regno >= deps->max_reg)
{
enum rtx_code code = GET_CODE (PATTERN (insn));
gcc_assert (code == USE || code == CLOBBER);
}
else
{
if (ref == SET)
SET_REGNO_REG_SET (reg_pending_sets, regno);
else if (ref == USE)
SET_REGNO_REG_SET (reg_pending_uses, regno);
else
SET_REGNO_REG_SET (reg_pending_clobbers, regno);
if (!reload_completed && get_reg_known_equiv_p (regno))
{
rtx t = get_reg_known_value (regno);
if (MEM_P (t))
sched_analyze_2 (deps, XEXP (t, 0), insn);
}
if (REG_N_CALLS_CROSSED (regno) == 0)
{
if (ref == USE)
deps->sched_before_next_call
= alloc_INSN_LIST (insn, deps->sched_before_next_call);
else
add_dependence_list (insn, deps->last_function_call, 1,
REG_DEP_ANTI);
}
}
}
static void
sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
{
rtx dest = XEXP (x, 0);
enum rtx_code code = GET_CODE (x);
if (dest == 0)
return;
if (GET_CODE (dest) == PARALLEL)
{
int i;
for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
sched_analyze_1 (deps,
gen_rtx_CLOBBER (VOIDmode,
XEXP (XVECEXP (dest, 0, i), 0)),
insn);
if (GET_CODE (x) == SET)
sched_analyze_2 (deps, SET_SRC (x), insn);
return;
}
while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
|| GET_CODE (dest) == ZERO_EXTRACT)
{
if (GET_CODE (dest) == STRICT_LOW_PART
|| GET_CODE (dest) == ZERO_EXTRACT
|| df_read_modify_subreg_p (dest))
{
sched_analyze_2 (deps, XEXP (dest, 0), insn);
}
if (GET_CODE (dest) == ZERO_EXTRACT)
{
sched_analyze_2 (deps, XEXP (dest, 1), insn);
sched_analyze_2 (deps, XEXP (dest, 2), insn);
}
dest = XEXP (dest, 0);
}
if (REG_P (dest))
{
int regno = REGNO (dest);
enum machine_mode mode = GET_MODE (dest);
sched_analyze_reg (deps, regno, mode, code, insn);
#ifdef STACK_REGS
if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
{
if (regno != FIRST_STACK_REG)
sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
}
#endif
}
else if (MEM_P (dest))
{
rtx t = dest;
if (current_sched_info->use_cselib)
{
t = shallow_copy_rtx (dest);
cselib_lookup (XEXP (t, 0), Pmode, 1);
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
}
t = canon_rtx (t);
if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
{
flush_pending_lists (deps, insn, false, true);
}
else
{
rtx pending, pending_mem;
pending = deps->pending_read_insns;
pending_mem = deps->pending_read_mems;
while (pending)
{
if (anti_dependence (XEXP (pending_mem, 0), t)
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
pending = deps->pending_write_insns;
pending_mem = deps->pending_write_mems;
while (pending)
{
if (output_dependence (XEXP (pending_mem, 0), t)
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
REG_DEP_ANTI);
add_insn_mem_dependence (deps, &deps->pending_write_insns,
&deps->pending_write_mems, insn, dest);
}
sched_analyze_2 (deps, XEXP (dest, 0), insn);
}
if (GET_CODE (x) == SET)
sched_analyze_2 (deps, SET_SRC (x), insn);
}
static void
sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
{
int i;
int j;
enum rtx_code code;
const char *fmt;
if (x == 0)
return;
code = GET_CODE (x);
switch (code)
{
case CONST_INT:
case CONST_DOUBLE:
case CONST_VECTOR:
case SYMBOL_REF:
case CONST:
case LABEL_REF:
return;
#ifdef HAVE_cc0
case CC0:
SCHED_GROUP_P (insn) = 1;
CANT_MOVE (prev_nonnote_insn (insn)) = 1;
return;
#endif
case REG:
{
int regno = REGNO (x);
enum machine_mode mode = GET_MODE (x);
sched_analyze_reg (deps, regno, mode, USE, insn);
#ifdef STACK_REGS
if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
{
if (regno != FIRST_STACK_REG)
sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
}
#endif
return;
}
case MEM:
{
rtx u;
rtx pending, pending_mem;
rtx t = x;
if (current_sched_info->use_cselib)
{
t = shallow_copy_rtx (t);
cselib_lookup (XEXP (t, 0), Pmode, 1);
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
}
t = canon_rtx (t);
pending = deps->pending_read_insns;
pending_mem = deps->pending_read_mems;
while (pending)
{
if (read_dependence (XEXP (pending_mem, 0), t)
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
pending = deps->pending_write_insns;
pending_mem = deps->pending_write_mems;
while (pending)
{
if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
t, rtx_varies_p)
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
{
if (current_sched_info->flags & DO_SPECULATION)
maybe_add_or_update_back_dep_1 (insn, XEXP (pending, 0),
REG_DEP_TRUE,
BEGIN_DATA | DEP_TRUE,
XEXP (pending_mem, 0), t, 0);
else
add_dependence (insn, XEXP (pending, 0), REG_DEP_TRUE);
}
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
if (! JUMP_P (XEXP (u, 0)) || deps_may_trap_p (x))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
add_insn_mem_dependence (deps, &deps->pending_read_insns,
&deps->pending_read_mems, insn, x);
sched_analyze_2 (deps, XEXP (x, 0), insn);
return;
}
case TRAP_IF:
flush_pending_lists (deps, insn, true, false);
break;
case ASM_OPERANDS:
case ASM_INPUT:
case UNSPEC_VOLATILE:
{
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
reg_pending_barrier = TRUE_BARRIER;
if (code == ASM_OPERANDS)
{
for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
return;
}
break;
}
case PRE_DEC:
case POST_DEC:
case PRE_INC:
case POST_INC:
sched_analyze_2 (deps, XEXP (x, 0), insn);
sched_analyze_1 (deps, x, insn);
return;
case POST_MODIFY:
case PRE_MODIFY:
sched_analyze_2 (deps, XEXP (x, 0), insn);
sched_analyze_2 (deps, XEXP (x, 1), insn);
sched_analyze_1 (deps, x, insn);
return;
default:
break;
}
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
sched_analyze_2 (deps, XEXP (x, i), insn);
else if (fmt[i] == 'E')
for (j = 0; j < XVECLEN (x, i); j++)
sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
}
}
static void
sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
{
RTX_CODE code = GET_CODE (x);
rtx link;
unsigned i;
reg_set_iterator rsi;
if (code == COND_EXEC)
{
sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
x = COND_EXEC_CODE (x);
code = GET_CODE (x);
}
if (code == SET || code == CLOBBER)
{
sched_analyze_1 (deps, x, insn);
if (code == CLOBBER)
add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT);
}
else if (code == PARALLEL)
{
for (i = XVECLEN (x, 0); i--;)
{
rtx sub = XVECEXP (x, 0, i);
code = GET_CODE (sub);
if (code == COND_EXEC)
{
sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
sub = COND_EXEC_CODE (sub);
code = GET_CODE (sub);
}
if (code == SET || code == CLOBBER)
sched_analyze_1 (deps, sub, insn);
else
sched_analyze_2 (deps, sub, insn);
}
}
else
sched_analyze_2 (deps, x, insn);
if (CALL_P (insn))
{
for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
{
if (GET_CODE (XEXP (link, 0)) == CLOBBER)
sched_analyze_1 (deps, XEXP (link, 0), insn);
else
sched_analyze_2 (deps, XEXP (link, 0), insn);
}
if (find_reg_note (insn, REG_SETJMP, NULL))
reg_pending_barrier = MOVE_BARRIER;
}
if (JUMP_P (insn))
{
rtx next;
next = next_nonnote_insn (insn);
if (next && BARRIER_P (next))
reg_pending_barrier = TRUE_BARRIER;
else
{
rtx pending, pending_mem;
regset_head tmp_uses, tmp_sets;
INIT_REG_SET (&tmp_uses);
INIT_REG_SET (&tmp_sets);
(*current_sched_info->compute_jump_reg_dependencies)
(insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
reg_last->uses_length++;
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
}
IOR_REG_SET (reg_pending_sets, &tmp_sets);
CLEAR_REG_SET (&tmp_uses);
CLEAR_REG_SET (&tmp_sets);
pending = deps->pending_write_insns;
pending_mem = deps->pending_write_mems;
while (pending)
{
if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
pending = deps->pending_read_insns;
pending_mem = deps->pending_read_mems;
while (pending)
{
if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
REG_DEP_ANTI);
}
}
if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
|| (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
reg_pending_barrier = MOVE_BARRIER;
if (reg_pending_barrier)
{
if (sched_get_condition (insn))
{
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
add_dependence_list
(insn, reg_last->sets, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
add_dependence_list
(insn, reg_last->clobbers, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
}
}
else
{
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (insn, ®_last->uses, 0,
REG_DEP_ANTI);
add_dependence_list_and_free
(insn, ®_last->sets, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
add_dependence_list_and_free
(insn, ®_last->clobbers, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
reg_last->uses_length = 0;
reg_last->clobbers_length = 0;
}
}
for (i = 0; i < (unsigned)deps->max_reg; i++)
{
struct deps_reg *reg_last = &deps->reg_last[i];
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
}
flush_pending_lists (deps, insn, true, true);
CLEAR_REG_SET (&deps->reg_conditional_sets);
reg_pending_barrier = NOT_A_BARRIER;
}
else
{
if (sched_get_condition (insn))
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
reg_last->uses_length++;
}
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
reg_last->clobbers_length++;
}
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
}
}
else
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
reg_last->uses_length++;
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
}
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
|| reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
{
add_dependence_list_and_free (insn, ®_last->sets, 0,
REG_DEP_OUTPUT);
add_dependence_list_and_free (insn, ®_last->uses, 0,
REG_DEP_ANTI);
add_dependence_list_and_free (insn, ®_last->clobbers, 0,
REG_DEP_OUTPUT);
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
reg_last->clobbers_length = 0;
reg_last->uses_length = 0;
}
else
{
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
}
reg_last->clobbers_length++;
reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
}
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (insn, ®_last->sets, 0,
REG_DEP_OUTPUT);
add_dependence_list_and_free (insn, ®_last->clobbers, 0,
REG_DEP_OUTPUT);
add_dependence_list_and_free (insn, ®_last->uses, 0,
REG_DEP_ANTI);
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
reg_last->uses_length = 0;
reg_last->clobbers_length = 0;
CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
}
}
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
}
CLEAR_REG_SET (reg_pending_uses);
CLEAR_REG_SET (reg_pending_clobbers);
CLEAR_REG_SET (reg_pending_sets);
if (deps->libcall_block_tail_insn)
{
SCHED_GROUP_P (insn) = 1;
CANT_MOVE (insn) = 1;
}
if (deps->in_post_call_group_p)
{
rtx tmp, set = single_set (insn);
int src_regno, dest_regno;
if (set == NULL)
goto end_call_group;
tmp = SET_DEST (set);
if (GET_CODE (tmp) == SUBREG)
tmp = SUBREG_REG (tmp);
if (REG_P (tmp))
dest_regno = REGNO (tmp);
else
goto end_call_group;
tmp = SET_SRC (set);
if (GET_CODE (tmp) == SUBREG)
tmp = SUBREG_REG (tmp);
if ((GET_CODE (tmp) == PLUS
|| GET_CODE (tmp) == MINUS)
&& REG_P (XEXP (tmp, 0))
&& REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
&& dest_regno == STACK_POINTER_REGNUM)
src_regno = STACK_POINTER_REGNUM;
else if (REG_P (tmp))
src_regno = REGNO (tmp);
else
goto end_call_group;
if (src_regno < FIRST_PSEUDO_REGISTER
|| dest_regno < FIRST_PSEUDO_REGISTER)
{
if (deps->in_post_call_group_p == post_call_initial)
deps->in_post_call_group_p = post_call;
SCHED_GROUP_P (insn) = 1;
CANT_MOVE (insn) = 1;
}
else
{
end_call_group:
deps->in_post_call_group_p = not_post_call;
}
}
if (SCHED_GROUP_P (insn))
fixup_sched_groups (insn);
}
void
sched_analyze (struct deps *deps, rtx head, rtx tail)
{
rtx insn;
if (current_sched_info->use_cselib)
cselib_init (true);
if (! reload_completed && !LABEL_P (head))
{
insn = prev_nonnote_insn (head);
if (insn && CALL_P (insn))
deps->in_post_call_group_p = post_call_initial;
}
for (insn = head;; insn = NEXT_INSN (insn))
{
rtx link, end_seq, r0, set;
if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
{
free_INSN_LIST_list (&LOG_LINKS (insn));
if (JUMP_P (insn))
{
if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
flush_pending_lists (deps, insn, true, true);
else
deps->last_pending_memory_flush
= alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
}
sched_analyze_insn (deps, PATTERN (insn), insn);
}
else if (CALL_P (insn))
{
int i;
CANT_MOVE (insn) = 1;
free_INSN_LIST_list (&LOG_LINKS (insn));
if (find_reg_note (insn, REG_SETJMP, NULL))
{
reg_pending_barrier = MOVE_BARRIER;
}
else
{
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (global_regs[i])
{
SET_REGNO_REG_SET (reg_pending_sets, i);
SET_REGNO_REG_SET (reg_pending_uses, i);
}
else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
|| TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
SET_REGNO_REG_SET (reg_pending_clobbers, i);
else if (fixed_regs[i])
SET_REGNO_REG_SET (reg_pending_uses, i);
else if (i == FRAME_POINTER_REGNUM
|| (i == HARD_FRAME_POINTER_REGNUM
&& (! reload_completed || frame_pointer_needed)))
SET_REGNO_REG_SET (reg_pending_uses, i);
}
add_dependence_list_and_free (insn, &deps->sched_before_next_call, 1,
REG_DEP_ANTI);
sched_analyze_insn (deps, PATTERN (insn), insn);
flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
free_INSN_LIST_list (&deps->last_function_call);
deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
if (! reload_completed)
deps->in_post_call_group_p = post_call;
}
if (NOTE_P (insn))
gcc_assert (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END);
if (current_sched_info->use_cselib)
cselib_process_insn (insn);
if (!reload_completed
&& deps->libcall_block_tail_insn == 0
&& NONJUMP_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) == CLOBBER
&& (r0 = XEXP (PATTERN (insn), 0), REG_P (r0))
&& REG_P (XEXP (PATTERN (insn), 0))
&& (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
&& (end_seq = XEXP (link, 0)) != 0
&& (set = single_set (end_seq)) != 0
&& SET_DEST (set) == r0 && SET_SRC (set) == r0
&& find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
&& find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
deps->libcall_block_tail_insn = XEXP (link, 0);
if (deps->libcall_block_tail_insn == insn)
deps->libcall_block_tail_insn = 0;
if (insn == tail)
{
if (current_sched_info->use_cselib)
cselib_finish ();
return;
}
}
gcc_unreachable ();
}
void
add_forw_dep (rtx to, rtx link)
{
rtx new_link, from;
from = XEXP (link, 0);
#ifdef ENABLE_CHECKING
gcc_assert (INSN_P (from));
gcc_assert (!INSN_DELETED_P (from));
if (true_dependency_cache)
{
gcc_assert (!bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
INSN_LUID (to)));
bitmap_set_bit (&forward_dependency_cache[INSN_LUID (from)],
INSN_LUID (to));
}
else
gcc_assert (!find_insn_list (to, INSN_DEPEND (from)));
#endif
if (!(current_sched_info->flags & USE_DEPS_LIST))
new_link = alloc_INSN_LIST (to, INSN_DEPEND (from));
else
new_link = alloc_DEPS_LIST (to, INSN_DEPEND (from), DEP_STATUS (link));
PUT_REG_NOTE_KIND (new_link, REG_NOTE_KIND (link));
INSN_DEPEND (from) = new_link;
INSN_DEP_COUNT (to) += 1;
}
void
compute_forward_dependences (rtx head, rtx tail)
{
rtx insn;
rtx next_tail;
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
{
rtx link;
if (! INSN_P (insn))
continue;
if (current_sched_info->flags & DO_SPECULATION)
{
rtx new = 0, link, next;
for (link = LOG_LINKS (insn); link; link = next)
{
next = XEXP (link, 1);
adjust_add_sorted_back_dep (insn, link, &new);
}
LOG_LINKS (insn) = new;
}
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
add_forw_dep (insn, link);
}
}
void
init_deps (struct deps *deps)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->max_reg = max_reg;
deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
INIT_REG_SET (&deps->reg_last_in_use);
INIT_REG_SET (&deps->reg_conditional_sets);
deps->pending_read_insns = 0;
deps->pending_read_mems = 0;
deps->pending_write_insns = 0;
deps->pending_write_mems = 0;
deps->pending_lists_length = 0;
deps->pending_flush_length = 0;
deps->last_pending_memory_flush = 0;
deps->last_function_call = 0;
deps->sched_before_next_call = 0;
deps->in_post_call_group_p = not_post_call;
deps->libcall_block_tail_insn = 0;
}
void
free_deps (struct deps *deps)
{
unsigned i;
reg_set_iterator rsi;
free_INSN_LIST_list (&deps->pending_read_insns);
free_EXPR_LIST_list (&deps->pending_read_mems);
free_INSN_LIST_list (&deps->pending_write_insns);
free_EXPR_LIST_list (&deps->pending_write_mems);
free_INSN_LIST_list (&deps->last_pending_memory_flush);
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
if (reg_last->uses)
free_INSN_LIST_list (®_last->uses);
if (reg_last->sets)
free_INSN_LIST_list (®_last->sets);
if (reg_last->clobbers)
free_INSN_LIST_list (®_last->clobbers);
}
CLEAR_REG_SET (&deps->reg_last_in_use);
CLEAR_REG_SET (&deps->reg_conditional_sets);
free (deps->reg_last);
}
void
init_dependency_caches (int luid)
{
if (luid / n_basic_blocks > 100 * 5)
{
cache_size = 0;
extend_dependency_caches (luid, true);
}
}
void
extend_dependency_caches (int n, bool create_p)
{
if (create_p || true_dependency_cache)
{
int i, luid = cache_size + n;
true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
luid);
output_dependency_cache = XRESIZEVEC (bitmap_head,
output_dependency_cache, luid);
anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
luid);
#ifdef ENABLE_CHECKING
forward_dependency_cache = XRESIZEVEC (bitmap_head,
forward_dependency_cache, luid);
#endif
if (current_sched_info->flags & DO_SPECULATION)
spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
luid);
for (i = cache_size; i < luid; i++)
{
bitmap_initialize (&true_dependency_cache[i], 0);
bitmap_initialize (&output_dependency_cache[i], 0);
bitmap_initialize (&anti_dependency_cache[i], 0);
#ifdef ENABLE_CHECKING
bitmap_initialize (&forward_dependency_cache[i], 0);
#endif
if (current_sched_info->flags & DO_SPECULATION)
bitmap_initialize (&spec_dependency_cache[i], 0);
}
cache_size = luid;
}
}
void
free_dependency_caches (void)
{
if (true_dependency_cache)
{
int i;
for (i = 0; i < cache_size; i++)
{
bitmap_clear (&true_dependency_cache[i]);
bitmap_clear (&output_dependency_cache[i]);
bitmap_clear (&anti_dependency_cache[i]);
#ifdef ENABLE_CHECKING
bitmap_clear (&forward_dependency_cache[i]);
#endif
if (current_sched_info->flags & DO_SPECULATION)
bitmap_clear (&spec_dependency_cache[i]);
}
free (true_dependency_cache);
true_dependency_cache = NULL;
free (output_dependency_cache);
output_dependency_cache = NULL;
free (anti_dependency_cache);
anti_dependency_cache = NULL;
#ifdef ENABLE_CHECKING
free (forward_dependency_cache);
forward_dependency_cache = NULL;
#endif
if (current_sched_info->flags & DO_SPECULATION)
{
free (spec_dependency_cache);
spec_dependency_cache = NULL;
}
}
}
void
init_deps_global (void)
{
reg_pending_sets = ALLOC_REG_SET (®_obstack);
reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
reg_pending_uses = ALLOC_REG_SET (®_obstack);
reg_pending_barrier = NOT_A_BARRIER;
}
void
finish_deps_global (void)
{
FREE_REG_SET (reg_pending_sets);
FREE_REG_SET (reg_pending_clobbers);
FREE_REG_SET (reg_pending_uses);
}
static void
adjust_add_sorted_back_dep (rtx insn, rtx link, rtx *linkp)
{
gcc_assert (current_sched_info->flags & DO_SPECULATION);
if (HAS_INTERNAL_DEP (insn)
&& (DEP_STATUS (link) & SPECULATIVE))
{
DEP_STATUS (link) &= ~SPECULATIVE;
if (true_dependency_cache)
bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
INSN_LUID (XEXP (link, 0)));
}
if (DEP_STATUS (link) & SPECULATIVE)
while (*linkp && !(DEP_STATUS (*linkp) & SPECULATIVE))
linkp = &XEXP (*linkp, 1);
XEXP (link, 1) = *linkp;
*linkp = link;
}
static void
adjust_back_add_forw_dep (rtx insn, rtx *linkp)
{
rtx link;
gcc_assert (current_sched_info->flags & DO_SPECULATION);
link = *linkp;
*linkp = XEXP (*linkp, 1);
adjust_add_sorted_back_dep (insn, link, &LOG_LINKS (insn));
add_forw_dep (insn, link);
}
static void
delete_forw_dep (rtx insn, rtx elem)
{
gcc_assert (current_sched_info->flags & DO_SPECULATION);
#ifdef ENABLE_CHECKING
if (true_dependency_cache)
bitmap_clear_bit (&forward_dependency_cache[INSN_LUID (elem)],
INSN_LUID (insn));
#endif
remove_free_DEPS_LIST_elem (insn, &INSN_DEPEND (elem));
INSN_DEP_COUNT (insn)--;
}
static dw_t
estimate_dep_weak (rtx mem1, rtx mem2)
{
rtx r1, r2;
if (mem1 == mem2)
return MIN_DEP_WEAK;
r1 = XEXP (mem1, 0);
r2 = XEXP (mem2, 0);
if (r1 == r2
|| (REG_P (r1) && REG_P (r2)
&& REGNO (r1) == REGNO (r2)))
return MIN_DEP_WEAK;
else if ((REG_P (r1) && !REG_P (r2))
|| (!REG_P (r1) && REG_P (r2)))
return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
else
return UNCERTAIN_DEP_WEAK;
}
void
add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
{
ds_t ds;
if (dep_type == REG_DEP_TRUE)
ds = DEP_TRUE;
else if (dep_type == REG_DEP_OUTPUT)
ds = DEP_OUTPUT;
else if (dep_type == REG_DEP_ANTI)
ds = DEP_ANTI;
else
gcc_unreachable ();
maybe_add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, 0);
}
enum DEPS_ADJUST_RESULT
add_or_update_back_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
{
return add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, 0);
}
void
add_or_update_back_forw_dep (rtx insn, rtx elem, enum reg_note dep_type,
ds_t ds)
{
enum DEPS_ADJUST_RESULT res;
rtx *linkp;
res = add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, &linkp);
if (res == DEP_CHANGED || res == DEP_CREATED)
{
if (res == DEP_CHANGED)
delete_forw_dep (insn, elem);
else if (res == DEP_CREATED)
linkp = &LOG_LINKS (insn);
adjust_back_add_forw_dep (insn, linkp);
}
}
void
add_back_forw_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
{
add_back_dep (insn, elem, dep_type, ds);
adjust_back_add_forw_dep (insn, &LOG_LINKS (insn));
}
void
delete_back_forw_dep (rtx insn, rtx elem)
{
gcc_assert (current_sched_info->flags & DO_SPECULATION);
if (true_dependency_cache != NULL)
{
bitmap_clear_bit (&true_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
bitmap_clear_bit (&anti_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
bitmap_clear_bit (&output_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
}
remove_free_DEPS_LIST_elem (elem, &LOG_LINKS (insn));
delete_forw_dep (insn, elem);
}
dw_t
get_dep_weak (ds_t ds, ds_t type)
{
ds = ds & type;
switch (type)
{
case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
default: gcc_unreachable ();
}
gcc_assert (MIN_DEP_WEAK <= ds && ds <= MAX_DEP_WEAK);
return (dw_t) ds;
}
ds_t
set_dep_weak (ds_t ds, ds_t type, dw_t dw)
{
gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
ds &= ~type;
switch (type)
{
case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
default: gcc_unreachable ();
}
return ds;
}
ds_t
ds_merge (ds_t ds1, ds_t ds2)
{
ds_t ds, t;
gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
t = FIRST_SPEC_TYPE;
do
{
if ((ds1 & t) && !(ds2 & t))
ds |= ds1 & t;
else if (!(ds1 & t) && (ds2 & t))
ds |= ds2 & t;
else if ((ds1 & t) && (ds2 & t))
{
ds_t dw;
dw = ((ds_t) get_dep_weak (ds1, t)) * ((ds_t) get_dep_weak (ds2, t));
dw /= MAX_DEP_WEAK;
if (dw < MIN_DEP_WEAK)
dw = MIN_DEP_WEAK;
ds = set_dep_weak (ds, t, (dw_t) dw);
}
if (t == LAST_SPEC_TYPE)
break;
t <<= SPEC_TYPE_SHIFT;
}
while (1);
return ds;
}
#endif
#ifdef INSN_SCHEDULING
#ifdef ENABLE_CHECKING
static void
check_dep_status (enum reg_note dt, ds_t ds, bool relaxed_p)
{
if (dt == REG_DEP_TRUE)
gcc_assert (ds & DEP_TRUE);
else if (dt == REG_DEP_OUTPUT)
gcc_assert ((ds & DEP_OUTPUT)
&& !(ds & DEP_TRUE));
else
gcc_assert ((dt == REG_DEP_ANTI)
&& (ds & DEP_ANTI)
&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
gcc_assert (!(ds & HARD_DEP));
if (!(current_sched_info->flags & DO_SPECULATION))
gcc_assert (!(ds & SPECULATIVE));
else if (ds & SPECULATIVE)
{
if (!relaxed_p)
{
ds_t type = FIRST_SPEC_TYPE;
do
{
if (ds & type)
get_dep_weak (ds, type);
if (type == LAST_SPEC_TYPE)
break;
type <<= SPEC_TYPE_SHIFT;
}
while (1);
}
if (ds & BEGIN_SPEC)
{
if (ds & BEGIN_DATA)
gcc_assert (ds & DEP_TRUE);
if (ds & BEGIN_CONTROL)
gcc_assert (ds & DEP_ANTI);
}
else
{
gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
}
if (ds & DEP_TRUE)
gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
gcc_assert (!(ds & DEP_OUTPUT));
if (ds & DEP_ANTI)
gcc_assert (ds & BEGIN_CONTROL);
}
}
#endif
#endif