Remove dead vect_recog_mixed_size_cond_pattern

vect_recog_mixed_size_cond_pattern only applies to COMPARISON_CLASS_P
rhs1 COND_EXPRs which no longer appear - the following removes it.
Its testcases still pass, I believe the situation is mitigated by
bool pattern handling of the compare use in COND_EXPRs.

	* tree-vect-patterns.cc (type_conversion_p): Remove.
	(vect_recog_mixed_size_cond_pattern): Likewise.
	(vect_vect_recog_func_ptrs): Remove vect_recog_mixed_size_cond_pattern
	entry.
This commit is contained in:
Richard Biener 2024-10-26 14:27:14 +02:00 committed by Richard Biener
parent c738a15c50
commit 4cfff6d413

View File

@ -313,55 +313,6 @@ vect_get_internal_def (vec_info *vinfo, tree op)
return NULL;
}
/* Check whether NAME, an ssa-name used in STMT_VINFO,
is a result of a type promotion, such that:
DEF_STMT: NAME = NOP (name0)
If CHECK_SIGN is TRUE, check that either both types are signed or both are
unsigned. */
static bool
type_conversion_p (vec_info *vinfo, tree name, bool check_sign,
tree *orig_type, gimple **def_stmt, bool *promotion)
{
tree type = TREE_TYPE (name);
tree oprnd0;
enum vect_def_type dt;
stmt_vec_info def_stmt_info;
if (!vect_is_simple_use (name, vinfo, &dt, &def_stmt_info, def_stmt))
return false;
if (dt != vect_internal_def
&& dt != vect_external_def && dt != vect_constant_def)
return false;
if (!*def_stmt)
return false;
if (!is_gimple_assign (*def_stmt))
return false;
if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (*def_stmt)))
return false;
oprnd0 = gimple_assign_rhs1 (*def_stmt);
*orig_type = TREE_TYPE (oprnd0);
if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*orig_type)
|| ((TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*orig_type)) && check_sign))
return false;
if (TYPE_PRECISION (type) >= (TYPE_PRECISION (*orig_type) * 2))
*promotion = true;
else
*promotion = false;
if (!vect_is_simple_use (oprnd0, vinfo, &dt))
return false;
return true;
}
/* Holds information about an input operand after some sign changes
and type promotions have been peeled away. */
class vect_unpromoted_value {
@ -5408,154 +5359,6 @@ vect_recog_mod_var_pattern (vec_info *vinfo,
return pattern_stmt;
}
/* Function vect_recog_mixed_size_cond_pattern
Try to find the following pattern:
type x_t, y_t;
TYPE a_T, b_T, c_T;
loop:
S1 a_T = x_t CMP y_t ? b_T : c_T;
where type 'TYPE' is an integral type which has different size
from 'type'. b_T and c_T are either constants (and if 'TYPE' is wider
than 'type', the constants need to fit into an integer type
with the same width as 'type') or results of conversion from 'type'.
Input:
* STMT_VINFO: The stmt from which the pattern search begins.
Output:
* TYPE_OUT: The type of the output of this pattern.
* Return value: A new stmt that will be used to replace the pattern.
Additionally a def_stmt is added.
a_it = x_t CMP y_t ? b_it : c_it;
a_T = (TYPE) a_it; */
static gimple *
vect_recog_mixed_size_cond_pattern (vec_info *vinfo,
stmt_vec_info stmt_vinfo, tree *type_out)
{
gimple *last_stmt = stmt_vinfo->stmt;
tree cond_expr, then_clause, else_clause;
tree type, vectype, comp_vectype, itype = NULL_TREE, vecitype;
gimple *pattern_stmt, *def_stmt;
tree orig_type0 = NULL_TREE, orig_type1 = NULL_TREE;
gimple *def_stmt0 = NULL, *def_stmt1 = NULL;
bool promotion;
tree comp_scalar_type;
if (!is_gimple_assign (last_stmt)
|| gimple_assign_rhs_code (last_stmt) != COND_EXPR
|| STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
return NULL;
cond_expr = gimple_assign_rhs1 (last_stmt);
then_clause = gimple_assign_rhs2 (last_stmt);
else_clause = gimple_assign_rhs3 (last_stmt);
if (!COMPARISON_CLASS_P (cond_expr))
return NULL;
comp_scalar_type = TREE_TYPE (TREE_OPERAND (cond_expr, 0));
comp_vectype = get_vectype_for_scalar_type (vinfo, comp_scalar_type);
if (comp_vectype == NULL_TREE)
return NULL;
type = TREE_TYPE (gimple_assign_lhs (last_stmt));
if (types_compatible_p (type, comp_scalar_type)
|| ((TREE_CODE (then_clause) != INTEGER_CST
|| TREE_CODE (else_clause) != INTEGER_CST)
&& !INTEGRAL_TYPE_P (comp_scalar_type))
|| !INTEGRAL_TYPE_P (type))
return NULL;
if ((TREE_CODE (then_clause) != INTEGER_CST
&& !type_conversion_p (vinfo, then_clause, false,
&orig_type0, &def_stmt0, &promotion))
|| (TREE_CODE (else_clause) != INTEGER_CST
&& !type_conversion_p (vinfo, else_clause, false,
&orig_type1, &def_stmt1, &promotion)))
return NULL;
if (orig_type0 && orig_type1
&& !types_compatible_p (orig_type0, orig_type1))
return NULL;
if (orig_type0)
{
if (!types_compatible_p (orig_type0, comp_scalar_type))
return NULL;
then_clause = gimple_assign_rhs1 (def_stmt0);
itype = orig_type0;
}
if (orig_type1)
{
if (!types_compatible_p (orig_type1, comp_scalar_type))
return NULL;
else_clause = gimple_assign_rhs1 (def_stmt1);
itype = orig_type1;
}
HOST_WIDE_INT cmp_mode_size
= GET_MODE_UNIT_BITSIZE (TYPE_MODE (comp_vectype));
scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
if (GET_MODE_BITSIZE (type_mode) == cmp_mode_size)
return NULL;
vectype = get_vectype_for_scalar_type (vinfo, type);
if (vectype == NULL_TREE)
return NULL;
if (expand_vec_cond_expr_p (vectype, comp_vectype, TREE_CODE (cond_expr)))
return NULL;
if (itype == NULL_TREE)
itype = build_nonstandard_integer_type (cmp_mode_size,
TYPE_UNSIGNED (type));
if (itype == NULL_TREE
|| GET_MODE_BITSIZE (SCALAR_TYPE_MODE (itype)) != cmp_mode_size)
return NULL;
vecitype = get_vectype_for_scalar_type (vinfo, itype);
if (vecitype == NULL_TREE)
return NULL;
if (!expand_vec_cond_expr_p (vecitype, comp_vectype, TREE_CODE (cond_expr)))
return NULL;
if (GET_MODE_BITSIZE (type_mode) > cmp_mode_size)
{
if ((TREE_CODE (then_clause) == INTEGER_CST
&& !int_fits_type_p (then_clause, itype))
|| (TREE_CODE (else_clause) == INTEGER_CST
&& !int_fits_type_p (else_clause, itype)))
return NULL;
}
def_stmt = gimple_build_assign (vect_recog_temp_ssa_var (itype, NULL),
COND_EXPR, unshare_expr (cond_expr),
fold_convert (itype, then_clause),
fold_convert (itype, else_clause));
pattern_stmt = gimple_build_assign (vect_recog_temp_ssa_var (type, NULL),
NOP_EXPR, gimple_assign_lhs (def_stmt));
append_pattern_def_seq (vinfo, stmt_vinfo, def_stmt, vecitype);
*type_out = vectype;
vect_pattern_detected ("vect_recog_mixed_size_cond_pattern", last_stmt);
return pattern_stmt;
}
/* Helper function of vect_recog_bool_pattern. Called recursively, return
true if bool VAR can and should be optimized that way. Assume it shouldn't
@ -7398,7 +7201,6 @@ static vect_recog_func vect_vect_recog_func_ptrs[] = {
{ vect_recog_sat_add_pattern, "sat_add" },
{ vect_recog_sat_sub_pattern, "sat_sub" },
{ vect_recog_sat_trunc_pattern, "sat_trunc" },
{ vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
{ vect_recog_gcond_pattern, "gcond" },
{ vect_recog_bool_pattern, "bool" },
/* This must come before mask conversion, and includes the parts