#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "function.h"
#include "flags.h"
#include "insn-config.h"
#include "insn-attr.h"
#include "except.h"
#include "toplev.h"
#include "recog.h"
#include "cfglayout.h"
#include "params.h"
#include "sched-int.h"
#include "target.h"
#include "output.h"
static int sched_n_insns;
static int n_insns;
static bitmap_head dont_calc_deps;
static bitmap_head ebb_head, ebb_tail;
static basic_block last_bb;
static void init_ready_list (void);
static void begin_schedule_ready (rtx, rtx);
static int schedule_more_p (void);
static const char *ebb_print_insn (rtx, int);
static int rank (rtx, rtx);
static int contributes_to_priority (rtx, rtx);
static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
static basic_block earliest_block_with_similiar_load (basic_block, rtx);
static void add_deps_for_risky_insns (rtx, rtx);
static basic_block schedule_ebb (rtx, rtx);
static void add_remove_insn (rtx, int);
static void add_block1 (basic_block, basic_block);
static basic_block advance_target_bb (basic_block, rtx);
static void fix_recovery_cfg (int, int, int);
#ifdef ENABLE_CHECKING
static int ebb_head_or_leaf_p (basic_block, int);
#endif
static int
schedule_more_p (void)
{
return sched_n_insns < n_insns;
}
static void
init_ready_list (void)
{
int n = 0;
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
rtx insn;
sched_n_insns = 0;
#if 0
if (sched_verbose >= 5)
debug_dependencies ();
#endif
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{
try_ready (insn);
n++;
}
gcc_assert (n == n_insns);
}
static void
begin_schedule_ready (rtx insn, rtx last)
{
sched_n_insns++;
if (BLOCK_FOR_INSN (insn) == last_bb
&& control_flow_insn_p (insn)
&& last != PREV_INSN (insn))
{
edge e;
edge_iterator ei;
basic_block bb;
FOR_EACH_EDGE (e, ei, last_bb->succs)
if (e->flags & EDGE_FALLTHRU)
break;
#ifdef ENABLE_CHECKING
gcc_assert (!e || !(e->flags & EDGE_COMPLEX));
gcc_assert (BLOCK_FOR_INSN (insn) == last_bb
&& !IS_SPECULATION_CHECK_P (insn)
&& BB_HEAD (last_bb) != insn
&& BB_END (last_bb) == insn);
{
rtx x;
x = NEXT_INSN (insn);
if (e)
gcc_assert (NOTE_P (x) || LABEL_P (x));
else
gcc_assert (BARRIER_P (x));
}
#endif
if (e)
{
bb = split_edge (e);
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
}
else
bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb);
current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
gcc_assert (current_sched_info->next_tail);
add_block (bb, last_bb);
gcc_assert (last_bb == bb);
}
}
static const char *
ebb_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED)
{
static char tmp[80];
sprintf (tmp, "%4d", INSN_UID (insn));
return tmp;
}
static int
rank (rtx insn1, rtx insn2)
{
basic_block bb1 = BLOCK_FOR_INSN (insn1);
basic_block bb2 = BLOCK_FOR_INSN (insn2);
if (bb1->count > bb2->count
|| bb1->frequency > bb2->frequency)
return -1;
if (bb1->count < bb2->count
|| bb1->frequency < bb2->frequency)
return 1;
return 0;
}
static int
contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
rtx insn ATTRIBUTE_UNUSED)
{
return 1;
}
static void
compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
regset set)
{
basic_block b = BLOCK_FOR_INSN (insn);
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, b->succs)
if (e->flags & EDGE_FALLTHRU)
bitmap_and (set, glat_start [e->dest->index], cond_set);
else
bitmap_ior_into (used, glat_start [e->dest->index]);
}
static struct sched_info ebb_sched_info =
{
init_ready_list,
NULL,
schedule_more_p,
NULL,
rank,
ebb_print_insn,
contributes_to_priority,
compute_jump_reg_dependencies,
NULL, NULL,
NULL, NULL,
0, 1, 0,
add_remove_insn,
begin_schedule_ready,
add_block1,
advance_target_bb,
fix_recovery_cfg,
#ifdef ENABLE_CHECKING
ebb_head_or_leaf_p,
#endif
SCHED_EBB | USE_GLAT | DETACH_LIFE_INFO
};
static basic_block
earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
{
rtx back_link;
basic_block bb, earliest_block = NULL;
for (back_link = LOG_LINKS (load_insn);
back_link;
back_link = XEXP (back_link, 1))
{
rtx insn1 = XEXP (back_link, 0);
if (GET_MODE (back_link) == VOIDmode)
{
rtx fore_link;
for (fore_link = INSN_DEPEND (insn1);
fore_link;
fore_link = XEXP (fore_link, 1))
{
rtx insn2 = XEXP (fore_link, 0);
basic_block insn2_block = BLOCK_FOR_INSN (insn2);
if (GET_MODE (fore_link) == VOIDmode)
{
if (earliest_block != NULL
&& earliest_block->index < insn2_block->index)
continue;
if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
continue;
for (bb = last_block; bb; bb = bb->aux)
if (insn2_block == bb)
break;
if (!bb)
earliest_block = insn2_block;
}
}
}
}
return earliest_block;
}
static void
add_deps_for_risky_insns (rtx head, rtx tail)
{
rtx insn, prev;
int class;
rtx last_jump = NULL_RTX;
rtx next_tail = NEXT_INSN (tail);
basic_block last_block = NULL, bb;
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
if (control_flow_insn_p (insn))
{
bb = BLOCK_FOR_INSN (insn);
bb->aux = last_block;
last_block = bb;
last_jump = insn;
}
else if (INSN_P (insn) && last_jump != NULL_RTX)
{
class = haifa_classify_insn (insn);
prev = last_jump;
switch (class)
{
case PFREE_CANDIDATE:
if (flag_schedule_speculative_load)
{
bb = earliest_block_with_similiar_load (last_block, insn);
if (bb)
{
bb = bb->aux;
if (!bb)
break;
prev = BB_END (bb);
}
}
case TRAP_RISKY:
case IRISKY:
case PRISKY_CANDIDATE:
if (! sched_insns_conditions_mutex_p (insn, prev))
{
if (!(current_sched_info->flags & DO_SPECULATION))
{
enum DEPS_ADJUST_RESULT res;
res = add_or_update_back_dep (insn, prev,
REG_DEP_ANTI, DEP_ANTI);
if (res == DEP_CREATED)
add_forw_dep (insn, LOG_LINKS (insn));
else
gcc_assert (res != DEP_CHANGED);
}
else
add_or_update_back_forw_dep (insn, prev, REG_DEP_ANTI,
set_dep_weak (DEP_ANTI,
BEGIN_CONTROL,
MAX_DEP_WEAK));
}
break;
default:
break;
}
}
while (last_block)
{
bb = last_block->aux;
last_block->aux = NULL;
last_block = bb;
}
}
static basic_block
schedule_ebb (rtx head, rtx tail)
{
basic_block first_bb, target_bb;
struct deps tmp_deps;
first_bb = BLOCK_FOR_INSN (head);
last_bb = BLOCK_FOR_INSN (tail);
if (no_real_insns_p (head, tail))
return BLOCK_FOR_INSN (tail);
gcc_assert (INSN_P (head) && INSN_P (tail));
if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
{
init_deps_global ();
init_deps (&tmp_deps);
sched_analyze (&tmp_deps, head, tail);
free_deps (&tmp_deps);
compute_forward_dependences (head, tail);
add_deps_for_risky_insns (head, tail);
if (targetm.sched.dependencies_evaluation_hook)
targetm.sched.dependencies_evaluation_hook (head, tail);
finish_deps_global ();
}
else
gcc_assert (first_bb == last_bb);
current_sched_info->sched_max_insns_priority = 0;
n_insns = set_priorities (head, tail);
current_sched_info->sched_max_insns_priority++;
current_sched_info->prev_head = PREV_INSN (head);
current_sched_info->next_tail = NEXT_INSN (tail);
if (write_symbols != NO_DEBUG)
{
save_line_notes (first_bb->index, head, tail);
rm_line_notes (head, tail);
}
if (INSN_P (head))
{
rtx note;
for (note = REG_NOTES (head); note; note = XEXP (note, 1))
if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
remove_note (head, note);
}
rm_other_notes (head, tail);
unlink_bb_notes (first_bb, last_bb);
current_sched_info->queue_must_finish_empty = 1;
target_bb = first_bb;
schedule_block (&target_bb, n_insns);
gcc_assert (sched_n_insns == n_insns);
head = current_sched_info->head;
tail = current_sched_info->tail;
if (write_symbols != NO_DEBUG)
restore_line_notes (head, tail);
if (EDGE_COUNT (last_bb->preds) == 0)
{
gcc_assert (first_bb != last_bb
&& EDGE_COUNT (last_bb->succs) == 0);
last_bb = last_bb->prev_bb;
delete_basic_block (last_bb->next_bb);
}
return last_bb;
}
void
schedule_ebbs (void)
{
basic_block bb;
int probability_cutoff;
rtx tail;
sbitmap large_region_blocks, blocks;
int any_large_regions;
if (profile_info && flag_branch_probabilities)
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
else
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
if (n_basic_blocks == NUM_FIXED_BLOCKS)
return;
current_sched_info = &ebb_sched_info;
sched_init ();
compute_bb_for_insn ();
bitmap_initialize (&dont_calc_deps, 0);
bitmap_clear (&dont_calc_deps);
bitmap_initialize (&ebb_head, 0);
bitmap_clear (&ebb_head);
bitmap_initialize (&ebb_tail, 0);
bitmap_clear (&ebb_tail);
FOR_EACH_BB (bb)
{
rtx head = BB_HEAD (bb);
for (;;)
{
edge e;
edge_iterator ei;
tail = BB_END (bb);
if (bb->next_bb == EXIT_BLOCK_PTR
|| LABEL_P (BB_HEAD (bb->next_bb)))
break;
FOR_EACH_EDGE (e, ei, bb->succs)
if ((e->flags & EDGE_FALLTHRU) != 0)
break;
if (! e)
break;
if (e->probability <= probability_cutoff)
break;
bb = bb->next_bb;
}
while (head != tail)
{
if (NOTE_P (head))
head = NEXT_INSN (head);
else if (NOTE_P (tail))
tail = PREV_INSN (tail);
else if (LABEL_P (head))
head = NEXT_INSN (head);
else
break;
}
bitmap_set_bit (&ebb_head, BLOCK_NUM (head));
bb = schedule_ebb (head, tail);
bitmap_set_bit (&ebb_tail, bb->index);
}
bitmap_clear (&dont_calc_deps);
gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO);
attach_life_info ();
allocate_reg_life_data ();
any_large_regions = 0;
large_region_blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (large_region_blocks);
FOR_EACH_BB (bb)
SET_BIT (large_region_blocks, bb->index);
blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
FOR_EACH_BB (bb)
{
int bbi;
bbi = bb->index;
if (!bitmap_bit_p (&ebb_head, bbi)
|| !bitmap_bit_p (&ebb_tail, bbi)
|| !glat_start[bbi])
any_large_regions = 1;
else
{
SET_BIT (blocks, bbi);
RESET_BIT (large_region_blocks, bbi);
}
}
update_life_info (blocks, UPDATE_LIFE_LOCAL, 0);
sbitmap_free (blocks);
if (any_large_regions)
{
update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL, 0);
#ifdef ENABLE_CHECKING
#endif
}
sbitmap_free (large_region_blocks);
bitmap_clear (&ebb_head);
bitmap_clear (&ebb_tail);
if (reload_completed)
reposition_prologue_and_epilogue_notes (get_insns ());
if (write_symbols != NO_DEBUG)
rm_redundant_line_notes ();
sched_finish ();
}
static void
add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
{
if (!remove_p)
n_insns++;
else
n_insns--;
}
static void
add_block1 (basic_block bb, basic_block after)
{
if (after == EXIT_BLOCK_PTR)
bitmap_set_bit (&dont_calc_deps, bb->index);
else if (after == last_bb)
last_bb = bb;
}
static basic_block
advance_target_bb (basic_block bb, rtx insn)
{
if (insn)
{
if (BLOCK_FOR_INSN (insn) != bb
&& control_flow_insn_p (insn)
&& !IS_SPECULATION_BRANCHY_CHECK_P (insn)
&& !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
{
gcc_assert (!control_flow_insn_p (BB_END (bb))
&& NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
return bb;
}
else
return 0;
}
else
{
do
{
gcc_assert (bb != last_bb);
bb = bb->next_bb;
}
while (bb_note (bb) == BB_END (bb));
return bb;
}
}
static void
fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, int jump_bb_nexti)
{
gcc_assert (last_bb->index != bbi);
if (jump_bb_nexti == last_bb->index)
last_bb = BASIC_BLOCK (jump_bbi);
}
#ifdef ENABLE_CHECKING
static int
ebb_head_or_leaf_p (basic_block bb, int leaf_p)
{
if (!leaf_p)
return bitmap_bit_p (&ebb_head, bb->index);
else
return bitmap_bit_p (&ebb_tail, bb->index);
}
#endif