treewide: Drop nir_ssa_for_src users
Via Coccinelle patch:
@@
expression b, s, n;
@@
-nir_ssa_for_src(b, *s, n)
+s->ssa
@@
expression b, s, n;
@@
-nir_ssa_for_src(b, s, n)
+s.ssa
Reviewed-by: Christian Gmeiner <cgmeiner@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/25247>
This commit is contained in:
parent
0df0980fc4
commit
d1eb17e92e
72 changed files with 135 additions and 145 deletions
|
|
@ -485,7 +485,8 @@ ac_nir_calc_io_offset(nir_builder *b,
|
|||
* so the instruction effectively reads/writes another input/output
|
||||
* when it has an offset
|
||||
*/
|
||||
nir_def *offset_op = nir_imul(b, base_stride, nir_ssa_for_src(b, *nir_get_io_offset_src(intrin), 1));
|
||||
nir_def *offset_op = nir_imul(b, base_stride,
|
||||
nir_get_io_offset_src(intrin)->ssa);
|
||||
|
||||
/* component is in bytes */
|
||||
unsigned const_op = nir_intrinsic_component(intrin) * component_stride;
|
||||
|
|
|
|||
|
|
@ -334,7 +334,7 @@ hs_output_lds_offset(nir_builder *b,
|
|||
}
|
||||
|
||||
if (per_vertex) {
|
||||
nir_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_arrayed_index_src(intrin), 1);
|
||||
nir_def *vertex_index = nir_get_io_arrayed_index_src(intrin)->ssa;
|
||||
nir_def *vertex_index_off = nir_imul_imm(b, vertex_index, output_vertex_size);
|
||||
|
||||
off = nir_iadd_nuw(b, off, vertex_index_off);
|
||||
|
|
@ -361,7 +361,7 @@ hs_per_vertex_output_vmem_offset(nir_builder *b,
|
|||
nir_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
|
||||
nir_def *patch_offset = nir_imul(b, rel_patch_id, nir_imul_imm(b, out_vertices_per_patch, 16u));
|
||||
|
||||
nir_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_arrayed_index_src(intrin), 1);
|
||||
nir_def *vertex_index = nir_get_io_arrayed_index_src(intrin)->ssa;
|
||||
nir_def *vertex_index_off = nir_imul_imm(b, vertex_index, 16u);
|
||||
|
||||
return nir_iadd_nuw(b, nir_iadd_nuw(b, patch_offset, vertex_index_off), io_offset);
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ radv_nir_lower_primitive_shading_rate(nir_shader *nir, enum amd_gfx_level gfx_le
|
|||
|
||||
b.cursor = nir_before_instr(instr);
|
||||
|
||||
nir_def *val = nir_ssa_for_src(&b, intr->src[1], 1);
|
||||
nir_def *val = intr->src[1].ssa;
|
||||
|
||||
/* x_rate = (shadingRate & (Horizontal2Pixels | Horizontal4Pixels)) ? 0x1 : 0x0; */
|
||||
nir_def *x_rate = nir_iand_imm(&b, val, 12);
|
||||
|
|
|
|||
|
|
@ -992,8 +992,8 @@ nir_lower_intersection_shader(nir_shader *intersection, nir_shader *any_hit)
|
|||
continue;
|
||||
|
||||
b->cursor = nir_instr_remove(&intrin->instr);
|
||||
nir_def *hit_t = nir_ssa_for_src(b, intrin->src[0], 1);
|
||||
nir_def *hit_kind = nir_ssa_for_src(b, intrin->src[1], 1);
|
||||
nir_def *hit_t = intrin->src[0].ssa;
|
||||
nir_def *hit_kind = intrin->src[1].ssa;
|
||||
nir_def *min_t = nir_load_ray_t_min(b);
|
||||
nir_def *max_t = nir_load_ray_t_max(b);
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ pass(struct nir_builder *b, nir_intrinsic_instr *intr, UNUSED void *data)
|
|||
return false;
|
||||
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
nir_src_rewrite(offset, nir_u2u16(b, nir_ssa_for_src(b, *offset, 1)));
|
||||
nir_src_rewrite(offset, nir_u2u16(b, offset->ssa));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -84,8 +84,7 @@ agx_txs(nir_builder *b, nir_tex_instr *tex)
|
|||
/* Add LOD offset to first level to get the interesting LOD */
|
||||
int lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_lod);
|
||||
if (lod_idx >= 0) {
|
||||
lod = nir_iadd(
|
||||
b, lod, nir_u2u32(b, nir_ssa_for_src(b, tex->src[lod_idx].src, 1)));
|
||||
lod = nir_iadd(b, lod, nir_u2u32(b, tex->src[lod_idx].src.ssa));
|
||||
}
|
||||
|
||||
if (tex->sampler_dim == GLSL_SAMPLER_DIM_2D && tex->is_array) {
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
|
|||
b->cursor = nir_before_instr(&instr->instr);
|
||||
|
||||
nir_def *color = nir_trim_vector(b,
|
||||
nir_ssa_for_src(b, instr->src[3], 4),
|
||||
instr->src[3].ssa,
|
||||
num_components);
|
||||
nir_def *formatted = NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -140,8 +140,7 @@ v3d_nir_lower_vpm_output(struct v3d_compile *c, nir_builder *b,
|
|||
|
||||
int start_comp = nir_intrinsic_component(intr);
|
||||
unsigned location = nir_intrinsic_io_semantics(intr).location;
|
||||
nir_def *src = nir_ssa_for_src(b, intr->src[0],
|
||||
intr->num_components);
|
||||
nir_def *src = intr->src[0].ssa;
|
||||
/* Save off the components of the position for the setup of VPM inputs
|
||||
* read by fixed function HW.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ lower_store_bitsize(nir_builder *b,
|
|||
if (nir_src_bit_size(intr->src[value_idx]) == 32)
|
||||
return false;
|
||||
|
||||
nir_def *value = nir_ssa_for_src(b, intr->src[value_idx], num_comp);
|
||||
nir_def *value = intr->src[value_idx].ssa;
|
||||
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ static nir_def *
|
|||
v3d_nir_scratch_offset(nir_builder *b, nir_intrinsic_instr *instr)
|
||||
{
|
||||
bool is_store = instr->intrinsic == nir_intrinsic_store_scratch;
|
||||
nir_def *offset = nir_ssa_for_src(b, instr->src[is_store ? 1 : 0], 1);
|
||||
nir_def *offset = instr->src[is_store ? 1 : 0].ssa;
|
||||
|
||||
assert(nir_intrinsic_align_mul(instr) >= 4);
|
||||
assert(nir_intrinsic_align_offset(instr) == 0);
|
||||
|
|
@ -88,8 +88,7 @@ v3d_nir_lower_store_scratch(nir_builder *b, nir_intrinsic_instr *instr)
|
|||
b->cursor = nir_before_instr(&instr->instr);
|
||||
|
||||
nir_def *offset = v3d_nir_scratch_offset(b, instr);
|
||||
nir_def *value = nir_ssa_for_src(b, instr->src[0],
|
||||
instr->num_components);
|
||||
nir_def *value = instr->src[0].ssa;
|
||||
|
||||
for (int i = 0; i < instr->num_components; i++) {
|
||||
if (!(nir_intrinsic_write_mask(instr) & (1 << i)))
|
||||
|
|
|
|||
|
|
@ -622,7 +622,7 @@ lower_tex_src(nir_builder *b,
|
|||
}
|
||||
|
||||
index = nir_iadd(b, index,
|
||||
nir_imul_imm(b, nir_ssa_for_src(b, deref->arr.index, 1),
|
||||
nir_imul_imm(b, deref->arr.index.ssa,
|
||||
array_elements));
|
||||
}
|
||||
|
||||
|
|
@ -760,7 +760,7 @@ lower_image_deref(nir_builder *b,
|
|||
}
|
||||
|
||||
index = nir_iadd(b, index,
|
||||
nir_imul_imm(b, nir_ssa_for_src(b, deref->arr.index, 1),
|
||||
nir_imul_imm(b, deref->arr.index.ssa,
|
||||
array_elements));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ get_block_array_index(nir_builder *b, nir_deref_instr *deref,
|
|||
|
||||
const_array_offset += arr_index * array_elements;
|
||||
} else {
|
||||
nir_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
nir_def *arr_index = deref->arr.index.ssa;
|
||||
arr_index = nir_umin(b, arr_index, nir_imm_int(b, arr_size - 1));
|
||||
nir_def *arr_offset = nir_amul_imm(b, arr_index, array_elements);
|
||||
if (nonconst_index)
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ lower_deref(nir_builder *b, struct lower_samplers_as_deref_state *state,
|
|||
assert((*p)->deref_type == nir_deref_type_array);
|
||||
|
||||
new_deref = nir_build_deref_array(b, new_deref,
|
||||
nir_ssa_for_src(b, (*p)->arr.index, 1));
|
||||
(*p)->arr.index.ssa);
|
||||
}
|
||||
|
||||
return new_deref;
|
||||
|
|
|
|||
|
|
@ -351,7 +351,7 @@ nir_build_deref_offset(nir_builder *b, nir_deref_instr *deref,
|
|||
switch ((*p)->deref_type) {
|
||||
case nir_deref_type_array:
|
||||
case nir_deref_type_ptr_as_array: {
|
||||
nir_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
|
||||
nir_def *index = (*p)->arr.index.ssa;
|
||||
int stride = type_get_array_stride((*p)->type, size_align);
|
||||
offset = nir_iadd(b, offset, nir_amul_imm(b, index, stride));
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -179,8 +179,7 @@ static bool inline_functions_pass(nir_builder *b,
|
|||
const unsigned num_params = call->num_params;
|
||||
NIR_VLA(nir_def *, params, num_params);
|
||||
for (unsigned i = 0; i < num_params; i++) {
|
||||
params[i] = nir_ssa_for_src(b, call->params[i],
|
||||
call->callee->params[i].num_components);
|
||||
params[i] = call->params[i].ssa;
|
||||
}
|
||||
|
||||
nir_inline_function_impl(b, call->callee->impl, params, NULL);
|
||||
|
|
|
|||
|
|
@ -85,10 +85,10 @@ nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
|
|||
if (alpha_to_one) {
|
||||
alpha = nir_imm_float(&b, 1.0);
|
||||
} else if (intr->intrinsic == nir_intrinsic_store_deref) {
|
||||
alpha = nir_channel(&b, nir_ssa_for_src(&b, intr->src[1], 4),
|
||||
alpha = nir_channel(&b, intr->src[1].ssa,
|
||||
3);
|
||||
} else {
|
||||
alpha = nir_channel(&b, nir_ssa_for_src(&b, intr->src[0], 4),
|
||||
alpha = nir_channel(&b, intr->src[0].ssa,
|
||||
3);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
|
|||
if (!(options & nir_lower_indirect_array_deref_of_vec_store))
|
||||
continue;
|
||||
|
||||
nir_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
|
||||
nir_def *index = deref->arr.index.ssa;
|
||||
build_write_masked_stores(&b, vec_deref, value, index,
|
||||
0, num_components);
|
||||
}
|
||||
|
|
@ -143,7 +143,7 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
|
|||
intrin->def.num_components = num_components;
|
||||
intrin->num_components = num_components;
|
||||
|
||||
nir_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
|
||||
nir_def *index = deref->arr.index.ssa;
|
||||
nir_def *scalar =
|
||||
nir_vector_extract(&b, &intrin->def, index);
|
||||
if (scalar->parent_instr->type == nir_instr_type_undef) {
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr, nir_shader *shader)
|
|||
if (is_color_output(shader, out)) {
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
int src = intr->intrinsic == nir_intrinsic_store_deref ? 1 : 0;
|
||||
s = nir_ssa_for_src(b, intr->src[src], intr->num_components);
|
||||
s = intr->src[src].ssa;
|
||||
s = nir_fsat(b, s);
|
||||
nir_src_rewrite(&intr->src[src], s);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ lower_clip_plane_store(nir_builder *b, nir_intrinsic_instr *instr,
|
|||
if (!(clip_plane_enable & (1 << (start + i))))
|
||||
components[i] = nir_imm_int(b, 0);
|
||||
else
|
||||
components[i] = nir_channel(b, nir_ssa_for_src(b, instr->src[1], nir_src_num_components(instr->src[1])), i);
|
||||
components[i] = nir_channel(b, instr->src[1].ssa, i);
|
||||
} else
|
||||
components[i] = nir_undef(b, 1, 32);
|
||||
}
|
||||
|
|
@ -106,7 +106,7 @@ lower_clip_plane_store(nir_builder *b, nir_intrinsic_instr *instr,
|
|||
nir_store_deref(b, deref, nir_imm_int(b, 0), 1);
|
||||
} else {
|
||||
/* storing using a variable index */
|
||||
nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
nir_def *index = deref->arr.index.ssa;
|
||||
unsigned length = glsl_get_length(nir_deref_instr_parent(deref)->type);
|
||||
|
||||
recursive_if_chain(b, deref, instr->src[1].ssa, clip_plane_enable, index, 0, length);
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ lower_pos_write(nir_builder *b, nir_intrinsic_instr *intr,
|
|||
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
nir_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
|
||||
nir_def *pos = intr->src[1].ssa;
|
||||
nir_def *def = nir_vec4(b,
|
||||
nir_channel(b, pos, 0),
|
||||
nir_channel(b, pos, 1),
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ lower_discard_if(nir_builder *b, nir_intrinsic_instr *instr, void *cb_data)
|
|||
|
||||
b->cursor = nir_before_instr(&instr->instr);
|
||||
|
||||
nir_if *if_stmt = nir_push_if(b, nir_ssa_for_src(b, instr->src[0], 1));
|
||||
nir_if *if_stmt = nir_push_if(b, instr->src[0].ssa);
|
||||
switch (instr->intrinsic) {
|
||||
case nir_intrinsic_discard_if:
|
||||
nir_discard(b);
|
||||
|
|
|
|||
|
|
@ -201,7 +201,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
|
|||
*/
|
||||
if (array_index != NULL) {
|
||||
assert((*p)->deref_type == nir_deref_type_array);
|
||||
*array_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
|
||||
*array_index = (*p)->arr.index.ssa;
|
||||
p++;
|
||||
}
|
||||
|
||||
|
|
@ -225,7 +225,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
|
|||
unsigned size = type_size((*p)->type, bts);
|
||||
|
||||
nir_def *mul =
|
||||
nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
|
||||
nir_amul_imm(b, (*p)->arr.index.ssa, size);
|
||||
|
||||
offset = nir_iadd(b, offset, mul);
|
||||
} else if ((*p)->deref_type == nir_deref_type_struct) {
|
||||
|
|
@ -1847,7 +1847,7 @@ nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
|
|||
assert(stride > 0);
|
||||
|
||||
unsigned offset_bit_size = addr_get_offset_bit_size(base_addr, addr_format);
|
||||
nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
nir_def *index = deref->arr.index.ssa;
|
||||
nir_def *offset;
|
||||
|
||||
/* If the access chain has been declared in-bounds, then we know it doesn't
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref, nir_variable *var,
|
|||
* inputs), skip the outermost array index. Process the rest normally.
|
||||
*/
|
||||
if (nir_is_arrayed_io(var, b->shader->info.stage)) {
|
||||
*array_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
|
||||
*array_index = (*p)->arr.index.ssa;
|
||||
p++;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
|
|||
{
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
nir_def *value = nir_ssa_for_src(b, intr->src[0], intr->num_components);
|
||||
nir_def *value = intr->src[0].ssa;
|
||||
|
||||
for (unsigned i = 0; i < intr->num_components; i++) {
|
||||
if (!(nir_intrinsic_write_mask(intr) & (1 << i)))
|
||||
|
|
@ -195,7 +195,7 @@ lower_store_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
|
|||
{
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
nir_def *value = nir_ssa_for_src(b, intr->src[0], intr->num_components);
|
||||
nir_def *value = intr->src[0].ssa;
|
||||
nir_def *base_offset = nir_get_io_offset_src(intr)->ssa;
|
||||
|
||||
/* iterate wrmask instead of num_components to handle split components */
|
||||
|
|
@ -345,7 +345,7 @@ clone_deref_array(nir_builder *b, nir_deref_instr *dst_tail,
|
|||
dst_tail = clone_deref_array(b, dst_tail, parent);
|
||||
|
||||
return nir_build_deref_array(b, dst_tail,
|
||||
nir_ssa_for_src(b, src_head->arr.index, 1));
|
||||
src_head->arr.index.ssa);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -412,7 +412,7 @@ lower_store_output_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
|
|||
{
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
nir_def *value = nir_ssa_for_src(b, intr->src[1], intr->num_components);
|
||||
nir_def *value = intr->src[1].ssa;
|
||||
|
||||
nir_variable **chan_vars = get_channel_variables(split_outputs, var);
|
||||
for (unsigned i = 0; i < intr->num_components; i++) {
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ get_deref_reg_location(nir_deref_instr *deref,
|
|||
base_offset = 0;
|
||||
}
|
||||
|
||||
nir_def *index = nir_i2iN(b, nir_ssa_for_src(b, d->arr.index, 1), 32);
|
||||
nir_def *index = nir_i2iN(b, d->arr.index.ssa, 32);
|
||||
nir_def *offset = nir_imul_imm(b, index, inner_array_size);
|
||||
|
||||
/* Avoid emitting iadd with 0, which is otherwise common, since this
|
||||
|
|
|
|||
|
|
@ -712,8 +712,7 @@ nir_legalize_16bit_sampler_srcs(nir_shader *nir,
|
|||
|
||||
b.cursor = nir_before_instr(&tex->instr);
|
||||
nir_def *conv =
|
||||
convert(&b, nir_ssa_for_src(&b, tex->src[i].src,
|
||||
tex->src[i].src.ssa->num_components));
|
||||
convert(&b, tex->src[i].src.ssa);
|
||||
nir_src_rewrite(&tex->src[i].src, conv);
|
||||
changed = true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ lower_tex_src_to_offset(nir_builder *b,
|
|||
|
||||
index = nir_iadd(b, index,
|
||||
nir_imul_imm(b,
|
||||
nir_ssa_for_src(b, deref->arr.index, 1),
|
||||
deref->arr.index.ssa,
|
||||
array_elements));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ lower_ssbo_instr(nir_builder *b, nir_intrinsic_instr *intr)
|
|||
|
||||
nir_src index = intr->src[is_store ? 1 : 0];
|
||||
nir_src *offset_src = nir_get_io_offset_src(intr);
|
||||
nir_def *offset = nir_ssa_for_src(b, *offset_src, 1);
|
||||
nir_def *offset = offset_src->ssa;
|
||||
|
||||
nir_def *address =
|
||||
nir_iadd(b,
|
||||
|
|
|
|||
|
|
@ -110,8 +110,7 @@ lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
/* This is safe to call on scalar things but it would be silly */
|
||||
assert(intrin->def.num_components > 1);
|
||||
|
||||
nir_def *value = nir_ssa_for_src(b, intrin->src[0],
|
||||
intrin->num_components);
|
||||
nir_def *value = intrin->src[0].ssa;
|
||||
nir_def *reads[NIR_MAX_VEC_COMPONENTS];
|
||||
|
||||
for (unsigned i = 0; i < intrin->num_components; i++) {
|
||||
|
|
@ -623,7 +622,7 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
|
|||
case nir_intrinsic_vote_any:
|
||||
case nir_intrinsic_vote_all:
|
||||
if (options->lower_vote_trivial)
|
||||
return nir_ssa_for_src(b, intrin->src[0], 1);
|
||||
return intrin->src[0].ssa;
|
||||
break;
|
||||
|
||||
case nir_intrinsic_vote_feq:
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ project_src(nir_builder *b, nir_tex_instr *tex)
|
|||
continue;
|
||||
}
|
||||
nir_def *unprojected =
|
||||
nir_ssa_for_src(b, tex->src[i].src, nir_tex_instr_src_size(tex, i));
|
||||
tex->src[i].src.ssa;
|
||||
nir_def *projected = nir_fmul(b, unprojected, inv_proj);
|
||||
|
||||
/* Array indices don't get projected, so make an new vector with the
|
||||
|
|
@ -225,7 +225,7 @@ lower_rect(nir_builder *b, nir_tex_instr *tex)
|
|||
|
||||
if (coord_index != -1) {
|
||||
nir_def *coords =
|
||||
nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
|
||||
tex->src[coord_index].src.ssa;
|
||||
nir_src_rewrite(&tex->src[coord_index].src, nir_fmul(b, coords, scale));
|
||||
}
|
||||
}
|
||||
|
|
@ -241,7 +241,7 @@ lower_rect_tex_scale(nir_builder *b, nir_tex_instr *tex)
|
|||
|
||||
if (coord_index != -1) {
|
||||
nir_def *coords =
|
||||
nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
|
||||
tex->src[coord_index].src.ssa;
|
||||
nir_src_rewrite(&tex->src[coord_index].src, nir_fmul(b, coords, scale));
|
||||
}
|
||||
}
|
||||
|
|
@ -916,7 +916,7 @@ lower_txb_to_txl(nir_builder *b, nir_tex_instr *tex)
|
|||
|
||||
int bias_idx = nir_tex_instr_src_index(tex, nir_tex_src_bias);
|
||||
assert(bias_idx >= 0);
|
||||
lod = nir_fadd(b, nir_channel(b, lod, 1), nir_ssa_for_src(b, tex->src[bias_idx].src, 1));
|
||||
lod = nir_fadd(b, nir_channel(b, lod, 1), tex->src[bias_idx].src.ssa);
|
||||
txl->src[tex->num_srcs - 1] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
|
||||
|
||||
nir_def_init(&txl->instr, &txl->def,
|
||||
|
|
@ -941,7 +941,7 @@ saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
|
|||
|
||||
if (coord_index != -1) {
|
||||
nir_def *src =
|
||||
nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
|
||||
tex->src[coord_index].src.ssa;
|
||||
|
||||
/* split src into components: */
|
||||
nir_def *comp[4];
|
||||
|
|
@ -1245,7 +1245,7 @@ nir_lower_txs_lod(nir_builder *b, nir_tex_instr *tex)
|
|||
unsigned dest_size = nir_tex_instr_dest_size(tex);
|
||||
|
||||
b->cursor = nir_before_instr(&tex->instr);
|
||||
nir_def *lod = nir_ssa_for_src(b, tex->src[lod_idx].src, 1);
|
||||
nir_def *lod = tex->src[lod_idx].src.ssa;
|
||||
|
||||
/* Replace the non-0-LOD in the initial TXS operation by a 0-LOD. */
|
||||
nir_src_rewrite(&tex->src[lod_idx].src, nir_imm_int(b, 0));
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ get_io_index(nir_builder *b, nir_deref_instr *deref)
|
|||
unsigned size = glsl_get_length((*p)->type);
|
||||
|
||||
nir_def *mul =
|
||||
nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
|
||||
nir_amul_imm(b, (*p)->arr.index.ssa, size);
|
||||
|
||||
offset = nir_iadd(b, offset, mul);
|
||||
} else
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data)
|
|||
|
||||
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
||||
|
||||
nir_def *byte_offset = nir_ssa_for_src(b, intr->src[1], 1);
|
||||
nir_def *byte_offset = intr->src[1].ssa;
|
||||
nir_def *vec4_offset = nir_ushr_imm(b, byte_offset, 4);
|
||||
|
||||
unsigned align_mul = nir_intrinsic_align_mul(intr);
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data)
|
|||
/* Increase all UBO binding points by 1. */
|
||||
if (intr->intrinsic == nir_intrinsic_load_ubo &&
|
||||
!b->shader->info.first_ubo_is_default_ubo) {
|
||||
nir_def *old_idx = nir_ssa_for_src(b, intr->src[0], 1);
|
||||
nir_def *old_idx = intr->src[0].ssa;
|
||||
nir_def *new_idx = nir_iadd_imm(b, old_idx, 1);
|
||||
nir_src_rewrite(&intr->src[0], new_idx);
|
||||
return true;
|
||||
|
|
@ -66,7 +66,7 @@ nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data)
|
|||
|
||||
if (intr->intrinsic == nir_intrinsic_load_uniform) {
|
||||
nir_def *ubo_idx = nir_imm_int(b, 0);
|
||||
nir_def *uniform_offset = nir_ssa_for_src(b, intr->src[0], 1);
|
||||
nir_def *uniform_offset = intr->src[0].ssa;
|
||||
|
||||
assert(intr->def.bit_size >= 8);
|
||||
nir_def *load_result;
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ lower_viewport_transform_instr(nir_builder *b, nir_intrinsic_instr *intr,
|
|||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
/* Grab the source and viewport */
|
||||
nir_def *input_point = nir_ssa_for_src(b, intr->src[1], 4);
|
||||
nir_def *input_point = intr->src[1].ssa;
|
||||
nir_def *scale = nir_load_viewport_scale(b);
|
||||
nir_def *offset = nir_load_viewport_offset(b);
|
||||
|
||||
|
|
|
|||
|
|
@ -257,7 +257,7 @@ lower_interp_deref_or_load_baryc_at_offset(lower_wpos_ytransform_state *state,
|
|||
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
offset = nir_ssa_for_src(b, intr->src[offset_src], 2);
|
||||
offset = intr->src[offset_src].ssa;
|
||||
flip_y = nir_fmul(b, nir_channel(b, offset, 1),
|
||||
nir_channel(b, get_transform(state), 0));
|
||||
nir_src_rewrite(&intr->src[offset_src],
|
||||
|
|
|
|||
|
|
@ -103,15 +103,14 @@ split_wrmask(nir_builder *b, nir_intrinsic_instr *intr)
|
|||
unsigned num_srcs = info->num_srcs;
|
||||
unsigned value_idx = value_src(intr->intrinsic);
|
||||
unsigned offset_idx = offset_src(intr->intrinsic);
|
||||
unsigned num_comp = nir_intrinsic_src_components(intr, value_idx);
|
||||
|
||||
unsigned wrmask = nir_intrinsic_write_mask(intr);
|
||||
while (wrmask) {
|
||||
unsigned first_component = ffs(wrmask) - 1;
|
||||
unsigned length = ffs(~(wrmask >> first_component)) - 1;
|
||||
|
||||
nir_def *value = nir_ssa_for_src(b, intr->src[value_idx], num_comp);
|
||||
nir_def *offset = nir_ssa_for_src(b, intr->src[offset_idx], 1);
|
||||
nir_def *value = intr->src[value_idx].ssa;
|
||||
nir_def *offset = intr->src[offset_idx].ssa;
|
||||
|
||||
/* swizzle out the consecutive components that we'll store
|
||||
* in this iteration:
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ normalize_cubemap_coords(nir_builder *b, nir_instr *instr, void *data)
|
|||
return false;
|
||||
|
||||
nir_def *orig_coord =
|
||||
nir_ssa_for_src(b, tex->src[idx].src, nir_tex_instr_src_size(tex, idx));
|
||||
tex->src[idx].src.ssa;
|
||||
assert(orig_coord->num_components >= 3);
|
||||
|
||||
nir_def *orig_xyz = nir_trim_vector(b, orig_coord, 3);
|
||||
|
|
|
|||
|
|
@ -312,7 +312,7 @@ build_small_constant_load(nir_builder *b, nir_deref_instr *deref,
|
|||
nir_def *imm = nir_imm_intN_t(b, constant->data, constant->bit_size);
|
||||
|
||||
assert(deref->deref_type == nir_deref_type_array);
|
||||
nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
nir_def *index = deref->arr.index.ssa;
|
||||
|
||||
nir_def *shift = nir_imul_imm(b, index, constant->bit_stride);
|
||||
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ get_linear_array_offset(nir_builder *b, nir_deref_instr *deref)
|
|||
for (nir_deref_instr **p = &path.path[1]; *p; p++) {
|
||||
switch ((*p)->deref_type) {
|
||||
case nir_deref_type_array: {
|
||||
nir_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
|
||||
nir_def *index = (*p)->arr.index.ssa;
|
||||
int stride = glsl_array_size((*p)->type);
|
||||
if (stride >= 0)
|
||||
offset = nir_iadd(b, offset, nir_amul_imm(b, index, stride));
|
||||
|
|
|
|||
|
|
@ -309,7 +309,7 @@ ir3_nir_lower_array_sampler_cb(struct nir_builder *b, nir_instr *instr, void *_d
|
|||
b->cursor = nir_before_instr(&tex->instr);
|
||||
|
||||
unsigned ncomp = tex->coord_components;
|
||||
nir_def *src = nir_ssa_for_src(b, tex->src[coord_idx].src, ncomp);
|
||||
nir_def *src = tex->src[coord_idx].src.ssa;
|
||||
|
||||
assume(ncomp >= 1);
|
||||
nir_def *ai = nir_channel(b, src, ncomp - 1);
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
|
|||
return false;
|
||||
}
|
||||
|
||||
nir_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
|
||||
nir_def *ubo_offset = instr->src[1].ssa;
|
||||
int const_offset = 0;
|
||||
|
||||
handle_partial_const(b, &ubo_offset, &const_offset);
|
||||
|
|
@ -534,7 +534,7 @@ fixup_load_uniform_instr(struct nir_builder *b, nir_instr *instr, void *arg)
|
|||
|
||||
b->cursor = nir_before_instr(instr);
|
||||
|
||||
nir_def *offset = nir_ssa_for_src(b, intr->src[0], 1);
|
||||
nir_def *offset = intr->src[0].ssa;
|
||||
|
||||
/* We'd like to avoid a sequence like:
|
||||
*
|
||||
|
|
@ -606,7 +606,7 @@ ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
|
|||
unsigned base = nir_intrinsic_base(instr);
|
||||
nir_def *index = nir_imm_int(b, const_state->constant_data_ubo);
|
||||
nir_def *offset =
|
||||
nir_iadd_imm(b, nir_ssa_for_src(b, instr->src[0], 1), base);
|
||||
nir_iadd_imm(b, instr->src[0].ssa, base);
|
||||
|
||||
nir_def *result =
|
||||
nir_load_ubo(b, num_components, 32, index, offset,
|
||||
|
|
|
|||
|
|
@ -78,8 +78,8 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
|
|||
unsigned num_comp = nir_intrinsic_src_components(intr, 0);
|
||||
unsigned wrmask = nir_intrinsic_has_write_mask(intr) ?
|
||||
nir_intrinsic_write_mask(intr) : BITSET_MASK(num_comp);
|
||||
nir_def *val = nir_ssa_for_src(b, intr->src[0], num_comp);
|
||||
nir_def *off = nir_ssa_for_src(b, intr->src[offset_src_idx], 1);
|
||||
nir_def *val = intr->src[0].ssa;
|
||||
nir_def *off = intr->src[offset_src_idx].ssa;
|
||||
|
||||
for (unsigned i = 0; i < num_comp; i++) {
|
||||
if (!(wrmask & BITFIELD_BIT(i)))
|
||||
|
|
@ -115,7 +115,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
|
|||
assert(num_comp == 1);
|
||||
|
||||
nir_def *offset = nir_iadd_imm(b,
|
||||
nir_ssa_for_src(b, intr->src[0], 1), 4);
|
||||
intr->src[0].ssa, 4);
|
||||
|
||||
nir_def *upper = nir_load_kernel_input(b, 1, 32, offset);
|
||||
|
||||
|
|
@ -136,7 +136,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
|
|||
offset_src_idx = 0;
|
||||
}
|
||||
|
||||
nir_def *off = nir_ssa_for_src(b, intr->src[offset_src_idx], 1);
|
||||
nir_def *off = intr->src[offset_src_idx].ssa;
|
||||
|
||||
for (unsigned i = 0; i < num_comp; i++) {
|
||||
nir_intrinsic_instr *load =
|
||||
|
|
@ -247,7 +247,7 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
|
|||
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
||||
bool load = intr->intrinsic != nir_intrinsic_store_global;
|
||||
|
||||
nir_def *addr64 = nir_ssa_for_src(b, intr->src[load ? 0 : 1], 1);
|
||||
nir_def *addr64 = intr->src[load ? 0 : 1].ssa;
|
||||
nir_def *addr = nir_unpack_64_2x32(b, addr64);
|
||||
|
||||
/*
|
||||
|
|
@ -283,7 +283,7 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
|
|||
return nir_build_alu_src_arr(b, nir_op_vec(num_comp), components);
|
||||
} else {
|
||||
unsigned num_comp = nir_intrinsic_src_components(intr, 0);
|
||||
nir_def *value = nir_ssa_for_src(b, intr->src[0], num_comp);
|
||||
nir_def *value = intr->src[0].ssa;
|
||||
for (unsigned off = 0; off < num_comp; off += 4) {
|
||||
unsigned c = MIN2(num_comp - off, 4);
|
||||
nir_def *v = nir_channels(b, value, BITFIELD_MASK(c) << off);
|
||||
|
|
|
|||
|
|
@ -57,8 +57,8 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused)
|
|||
if (is_intrinsic_store(intr->intrinsic)) {
|
||||
unsigned num_comp = nir_intrinsic_src_components(intr, 0);
|
||||
unsigned wrmask = nir_intrinsic_write_mask(intr);
|
||||
nir_def *val = nir_ssa_for_src(b, intr->src[0], num_comp);
|
||||
nir_def *addr = nir_ssa_for_src(b, intr->src[1], 1);
|
||||
nir_def *val = intr->src[0].ssa;
|
||||
nir_def *addr = intr->src[1].ssa;
|
||||
|
||||
for (unsigned off = 0; off < num_comp; off += 4) {
|
||||
unsigned c = MIN2(num_comp - off, 4);
|
||||
|
|
@ -82,7 +82,7 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused)
|
|||
} else {
|
||||
unsigned num_comp = nir_intrinsic_dest_components(intr);
|
||||
unsigned bit_size = intr->def.bit_size;
|
||||
nir_def *addr = nir_ssa_for_src(b, intr->src[0], 1);
|
||||
nir_def *addr = intr->src[0].ssa;
|
||||
nir_def *components[num_comp];
|
||||
|
||||
for (unsigned off = 0; off < num_comp;) {
|
||||
|
|
|
|||
|
|
@ -374,7 +374,7 @@ build_bindless(struct tu_device *dev, nir_builder *b,
|
|||
if (deref->deref_type == nir_deref_type_var)
|
||||
return nir_imm_int(b, idx);
|
||||
|
||||
nir_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
nir_def *arr_index = deref->arr.index.ssa;
|
||||
return nir_iadd_imm(b, nir_imul_imm(b, arr_index, 2), idx);
|
||||
}
|
||||
|
||||
|
|
@ -398,7 +398,7 @@ build_bindless(struct tu_device *dev, nir_builder *b,
|
|||
if (deref->deref_type != nir_deref_type_var) {
|
||||
assert(deref->deref_type == nir_deref_type_array);
|
||||
|
||||
nir_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
nir_def *arr_index = deref->arr.index.ssa;
|
||||
desc_offset = nir_iadd(b, desc_offset,
|
||||
nir_imul_imm(b, arr_index, descriptor_stride));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -255,7 +255,7 @@ get_aoa_deref_offset(nir_builder *b,
|
|||
assert(deref->deref_type == nir_deref_type_array);
|
||||
|
||||
/* This level's element size is the previous level's array size */
|
||||
nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
nir_def *index = deref->arr.index.ssa;
|
||||
assert(deref->arr.index.ssa);
|
||||
offset = nir_iadd(b, offset,
|
||||
nir_imul_imm(b, index, array_size));
|
||||
|
|
@ -477,7 +477,7 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
|
|||
*/
|
||||
b.cursor = nir_before_instr(instr);
|
||||
nir_def *offset =
|
||||
nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
|
||||
nir_iadd_imm(&b, intrin->src[0].ssa,
|
||||
nir_intrinsic_base(intrin));
|
||||
|
||||
if (temp_const_ubo_name == NULL)
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ lower_pos_write(nir_builder *b, struct nir_instr *instr, nir_variable **flip)
|
|||
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
nir_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
|
||||
nir_def *pos = intr->src[1].ssa;
|
||||
nir_def *flip_y = d3d12_get_state_var(b, D3D12_STATE_VAR_Y_FLIP, "d3d12_FlipY",
|
||||
glsl_float_type(), flip);
|
||||
nir_def *def = nir_vec4(b,
|
||||
|
|
@ -224,7 +224,7 @@ lower_uint_color_write(nir_builder *b, struct nir_instr *instr, bool is_signed)
|
|||
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
nir_def *col = nir_ssa_for_src(b, intr->src[1], intr->num_components);
|
||||
nir_def *col = intr->src[1].ssa;
|
||||
nir_def *def = is_signed ? nir_format_float_to_snorm(b, col, bits) :
|
||||
nir_format_float_to_unorm(b, col, bits);
|
||||
if (is_signed)
|
||||
|
|
@ -342,7 +342,7 @@ invert_depth_impl(nir_builder *b, struct invert_depth_state *state)
|
|||
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
nir_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
|
||||
nir_def *pos = intr->src[1].ssa;
|
||||
|
||||
if (state->viewport_index) {
|
||||
nir_push_if(b, nir_test_mask(b, nir_ishl(b, nir_imm_int(b, 1), state->viewport_index), state->viewport_mask));
|
||||
|
|
@ -652,7 +652,7 @@ lower_triangle_strip_store(nir_builder *b, nir_intrinsic_instr *intr,
|
|||
return;
|
||||
|
||||
nir_deref_instr *deref = nir_build_deref_array(b, nir_build_deref_var(b, varyings[var->data.location]), index);
|
||||
nir_def *value = nir_ssa_for_src(b, intr->src[1], intr->num_components);
|
||||
nir_def *value = intr->src[1].ssa;
|
||||
nir_store_deref(b, deref, value, 0xf);
|
||||
nir_instr_remove(&intr->instr);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ lower_ubo_to_uniform(nir_builder *b, nir_instr *instr, void *_data)
|
|||
b->cursor = nir_before_instr(instr);
|
||||
|
||||
/* Undo the operations done in nir_lower_uniforms_to_ubo. */
|
||||
nir_def *ubo_offset = nir_ssa_for_src(b, intr->src[1], 1);
|
||||
nir_def *ubo_offset = intr->src[1].ssa;
|
||||
nir_def *range_base = nir_imm_int(b, nir_intrinsic_range_base(intr));
|
||||
|
||||
nir_def *uniform_offset =
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr)
|
|||
}
|
||||
|
||||
unsigned set = ir3_shader_descriptor_set(b->shader->info.stage);
|
||||
nir_def *src = nir_ssa_for_src(b, intr->src[buffer_src], 1);
|
||||
nir_def *src = intr->src[buffer_src].ssa;
|
||||
src = nir_iadd_imm(b, src, desc_offset);
|
||||
/* An out-of-bounds index into an SSBO/image array can cause a GPU fault
|
||||
* on access to the descriptor (I don't see any hw mechanism to bound the
|
||||
|
|
|
|||
|
|
@ -236,7 +236,7 @@ get_aoa_deref_offset(nir_builder *b,
|
|||
assert(deref->deref_type == nir_deref_type_array);
|
||||
|
||||
/* This level's element size is the previous level's array size */
|
||||
nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
nir_def *index = deref->arr.index.ssa;
|
||||
assert(deref->arr.index.ssa);
|
||||
offset = nir_iadd(b, offset,
|
||||
nir_imul_imm(b, index, array_size));
|
||||
|
|
@ -494,7 +494,7 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
|
|||
b.cursor = nir_instr_remove(&intrin->instr);
|
||||
|
||||
nir_def *offset =
|
||||
nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
|
||||
nir_iadd_imm(&b, intrin->src[0].ssa,
|
||||
nir_intrinsic_base(intrin));
|
||||
|
||||
assert(load_size < b.shader->constant_data_size);
|
||||
|
|
|
|||
|
|
@ -101,9 +101,8 @@ lima_nir_lower_txp_instr(nir_builder *b, nir_instr *instr,
|
|||
* step back and use load_input SSA instead of mov as a source for
|
||||
* newly constructed vec4
|
||||
*/
|
||||
nir_def *proj_ssa = nir_ssa_for_src(b, tex->src[proj_idx].src, 1);
|
||||
nir_def *coords_ssa = nir_ssa_for_src(b, tex->src[coords_idx].src,
|
||||
nir_tex_instr_src_size(tex, coords_idx));
|
||||
nir_def *proj_ssa = tex->src[proj_idx].src.ssa;
|
||||
nir_def *coords_ssa = tex->src[coords_idx].src.ssa;
|
||||
|
||||
int proj_idx_in_vec = -1;
|
||||
nir_def *load_input = get_proj_index(coords_ssa->parent_instr,
|
||||
|
|
|
|||
|
|
@ -398,12 +398,12 @@ LowerSplit64BitVar::split_load_deref_array(nir_intrinsic_instr *intr, nir_src& i
|
|||
auto vars = get_var_pair(old_var);
|
||||
|
||||
auto deref1 = nir_build_deref_var(b, vars.first);
|
||||
auto deref_array1 = nir_build_deref_array(b, deref1, nir_ssa_for_src(b, index, 1));
|
||||
auto deref_array1 = nir_build_deref_array(b, deref1, index.ssa);
|
||||
auto load1 =
|
||||
nir_build_load_deref(b, 2, 64, &deref_array1->def, (enum gl_access_qualifier)0);
|
||||
|
||||
auto deref2 = nir_build_deref_var(b, vars.second);
|
||||
auto deref_array2 = nir_build_deref_array(b, deref2, nir_ssa_for_src(b, index, 1));
|
||||
auto deref_array2 = nir_build_deref_array(b, deref2, index.ssa);
|
||||
|
||||
auto load2 = nir_build_load_deref(
|
||||
b, old_components - 2, 64, &deref_array2->def, (enum gl_access_qualifier)0);
|
||||
|
|
@ -426,13 +426,13 @@ LowerSplit64BitVar::split_store_deref_array(nir_intrinsic_instr *intr,
|
|||
|
||||
auto deref1 = nir_build_deref_var(b, vars.first);
|
||||
auto deref_array1 =
|
||||
nir_build_deref_array(b, deref1, nir_ssa_for_src(b, deref->arr.index, 1));
|
||||
nir_build_deref_array(b, deref1, deref->arr.index.ssa);
|
||||
|
||||
nir_build_store_deref(b, &deref_array1->def, src_xy, 3);
|
||||
|
||||
auto deref2 = nir_build_deref_var(b, vars.second);
|
||||
auto deref_array2 =
|
||||
nir_build_deref_array(b, deref2, nir_ssa_for_src(b, deref->arr.index, 1));
|
||||
nir_build_deref_array(b, deref2, deref->arr.index.ssa);
|
||||
|
||||
if (old_components == 3)
|
||||
nir_build_store_deref(b,
|
||||
|
|
@ -669,11 +669,11 @@ LowerSplit64BitVar::split_reduction3(nir_alu_instr *alu,
|
|||
{
|
||||
nir_def *src[2][2];
|
||||
|
||||
src[0][0] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[0].src, 2), 2);
|
||||
src[0][1] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[1].src, 2), 2);
|
||||
src[0][0] = nir_trim_vector(b, alu->src[0].src.ssa, 2);
|
||||
src[0][1] = nir_trim_vector(b, alu->src[1].src.ssa, 2);
|
||||
|
||||
src[1][0] = nir_channel(b, nir_ssa_for_src(b, alu->src[0].src, 3), 2);
|
||||
src[1][1] = nir_channel(b, nir_ssa_for_src(b, alu->src[1].src, 3), 2);
|
||||
src[1][0] = nir_channel(b, alu->src[0].src.ssa, 2);
|
||||
src[1][1] = nir_channel(b, alu->src[1].src.ssa, 2);
|
||||
|
||||
return split_reduction(src, op1, op2, reduction);
|
||||
}
|
||||
|
|
@ -686,11 +686,11 @@ LowerSplit64BitVar::split_reduction4(nir_alu_instr *alu,
|
|||
{
|
||||
nir_def *src[2][2];
|
||||
|
||||
src[0][0] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[0].src, 2), 2);
|
||||
src[0][1] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[1].src, 2), 2);
|
||||
src[0][0] = nir_trim_vector(b, alu->src[0].src.ssa, 2);
|
||||
src[0][1] = nir_trim_vector(b, alu->src[1].src.ssa, 2);
|
||||
|
||||
src[1][0] = nir_channels(b, nir_ssa_for_src(b, alu->src[0].src, 4), 0xc);
|
||||
src[1][1] = nir_channels(b, nir_ssa_for_src(b, alu->src[1].src, 4), 0xc);
|
||||
src[1][0] = nir_channels(b, alu->src[0].src.ssa, 0xc);
|
||||
src[1][1] = nir_channels(b, alu->src[1].src.ssa, 0xc);
|
||||
|
||||
return split_reduction(src, op1, op2, reduction);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -324,7 +324,7 @@ NirLowerIOToVector::clone_deref_array(nir_builder *b,
|
|||
|
||||
dst_tail = clone_deref_array(b, dst_tail, parent);
|
||||
|
||||
return nir_build_deref_array(b, dst_tail, nir_ssa_for_src(b, src_head->arr.index, 1));
|
||||
return nir_build_deref_array(b, dst_tail, src_head->arr.index.ssa);
|
||||
}
|
||||
|
||||
NirLowerFSOutToVector::NirLowerFSOutToVector():
|
||||
|
|
|
|||
|
|
@ -159,14 +159,14 @@ lower_txl_txf_array_or_cube(nir_builder *b, nir_tex_instr *tex)
|
|||
assert(lod_idx >= 0 || bias_idx >= 0);
|
||||
|
||||
nir_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
|
||||
nir_def *lod = (lod_idx >= 0) ? nir_ssa_for_src(b, tex->src[lod_idx].src, 1)
|
||||
nir_def *lod = (lod_idx >= 0) ? tex->src[lod_idx].src.ssa
|
||||
: nir_get_texture_lod(b, tex);
|
||||
|
||||
if (bias_idx >= 0)
|
||||
lod = nir_fadd(b, lod, nir_ssa_for_src(b, tex->src[bias_idx].src, 1));
|
||||
lod = nir_fadd(b, lod, tex->src[bias_idx].src.ssa);
|
||||
|
||||
if (min_lod_idx >= 0)
|
||||
lod = nir_fmax(b, lod, nir_ssa_for_src(b, tex->src[min_lod_idx].src, 1));
|
||||
lod = nir_fmax(b, lod, tex->src[min_lod_idx].src.ssa);
|
||||
|
||||
/* max lod? */
|
||||
|
||||
|
|
@ -282,11 +282,11 @@ r600_nir_lower_cube_to_2darray_impl(nir_builder *b, nir_instr *instr, void *_opt
|
|||
if (tex->op == nir_texop_txd) {
|
||||
int ddx_idx = nir_tex_instr_src_index(tex, nir_tex_src_ddx);
|
||||
nir_src_rewrite(&tex->src[ddx_idx].src,
|
||||
nir_fmul_imm(b, nir_ssa_for_src(b, tex->src[ddx_idx].src, 3), 0.5));
|
||||
nir_fmul_imm(b, tex->src[ddx_idx].src.ssa, 0.5));
|
||||
|
||||
int ddy_idx = nir_tex_instr_src_index(tex, nir_tex_src_ddy);
|
||||
nir_src_rewrite(&tex->src[ddy_idx].src,
|
||||
nir_fmul_imm(b, nir_ssa_for_src(b, tex->src[ddy_idx].src, 3), 0.5));
|
||||
nir_fmul_imm(b, tex->src[ddy_idx].src.ssa, 0.5));
|
||||
}
|
||||
|
||||
auto new_coord = nir_vec3(b, nir_channel(b, xy, 0), nir_channel(b, xy, 1), z);
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ r600_clone_deref_array(nir_builder *b,
|
|||
|
||||
dst_tail = r600_clone_deref_array(b, dst_tail, parent);
|
||||
|
||||
return nir_build_deref_array(b, dst_tail, nir_ssa_for_src(b, src_head->arr.index, 1));
|
||||
return nir_build_deref_array(b, dst_tail, src_head->arr.index.ssa);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
|
|
|||
|
|
@ -441,7 +441,7 @@ lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex)
|
|||
nir_def *lod = nir_get_texture_lod(b, tex);
|
||||
|
||||
if (bias_idx >= 0)
|
||||
lod = nir_fadd(b, lod, nir_ssa_for_src(b, tex->src[bias_idx].src, 1));
|
||||
lod = nir_fadd(b, lod, tex->src[bias_idx].src.ssa);
|
||||
lod = nir_fadd_imm(b, lod, -1.0);
|
||||
txl->src[s] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
|
||||
|
||||
|
|
|
|||
|
|
@ -72,8 +72,8 @@ static nir_def *lower_vri_intrin_vrri(struct nir_builder *b,
|
|||
nir_instr *instr, void *data_cb)
|
||||
{
|
||||
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
||||
nir_def *old_index = nir_ssa_for_src(b, intrin->src[0], 3);
|
||||
nir_def *delta = nir_ssa_for_src(b, intrin->src[1], 1);
|
||||
nir_def *old_index = intrin->src[0].ssa;
|
||||
nir_def *delta = intrin->src[1].ssa;
|
||||
return nir_vec3(b, nir_channel(b, old_index, 0),
|
||||
nir_iadd(b, nir_channel(b, old_index, 1), delta),
|
||||
nir_channel(b, old_index, 2));
|
||||
|
|
@ -83,7 +83,7 @@ static nir_def *lower_vri_intrin_lvd(struct nir_builder *b,
|
|||
nir_instr *instr, void *data_cb)
|
||||
{
|
||||
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
||||
return nir_ssa_for_src(b, intrin->src[0], 3);
|
||||
return intrin->src[0].ssa;
|
||||
}
|
||||
|
||||
static nir_def *
|
||||
|
|
@ -193,7 +193,7 @@ static nir_def *lower_vri_instr(struct nir_builder *b,
|
|||
case nir_intrinsic_get_ssbo_size: {
|
||||
/* Ignore the offset component. */
|
||||
b->cursor = nir_before_instr(instr);
|
||||
nir_def *resource = nir_ssa_for_src(b, intrin->src[0], 2);
|
||||
nir_def *resource = intrin->src[0].ssa;
|
||||
nir_src_rewrite(&intrin->src[0], resource);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ rusticl_lower_input_instr(struct nir_builder *b, nir_instr *instr, void *_)
|
|||
return NULL;
|
||||
|
||||
nir_def *ubo_idx = nir_imm_int(b, 0);
|
||||
nir_def *uniform_offset = nir_ssa_for_src(b, intrins->src[0], 1);
|
||||
nir_def *uniform_offset = intrins->src[0].ssa;
|
||||
|
||||
assert(intrins->def.bit_size >= 8);
|
||||
nir_def *load_result =
|
||||
|
|
|
|||
|
|
@ -1429,7 +1429,7 @@ brw_pack_primitive_indices_instr(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
|
||||
nir_src *data_src = &intrin->src[1];
|
||||
nir_def *data_def =
|
||||
nir_ssa_for_src(b, *data_src, vertices_per_primitive);
|
||||
data_src->ssa;
|
||||
|
||||
nir_def *new_data =
|
||||
nir_ior(b, nir_ishl_imm(b, nir_channel(b, data_def, 0), 0),
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
|
|||
/* Multiply by the number of per-vertex slots. */
|
||||
nir_def *vertex_offset =
|
||||
nir_imul(b,
|
||||
nir_ssa_for_src(b, *vertex, 1),
|
||||
vertex->ssa,
|
||||
nir_imm_int(b,
|
||||
vue_map->num_per_vertex_slots));
|
||||
|
||||
|
|
@ -223,7 +223,7 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
|
|||
nir_src *offset = nir_get_io_offset_src(intrin);
|
||||
nir_def *total_offset =
|
||||
nir_iadd(b, vertex_offset,
|
||||
nir_ssa_for_src(b, *offset, 1));
|
||||
offset->ssa);
|
||||
|
||||
nir_src_rewrite(offset, total_offset);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -193,8 +193,8 @@ brw_nir_lower_intersection_shader(nir_shader *intersection,
|
|||
switch (intrin->intrinsic) {
|
||||
case nir_intrinsic_report_ray_intersection: {
|
||||
b->cursor = nir_instr_remove(&intrin->instr);
|
||||
nir_def *hit_t = nir_ssa_for_src(b, intrin->src[0], 1);
|
||||
nir_def *hit_kind = nir_ssa_for_src(b, intrin->src[1], 1);
|
||||
nir_def *hit_t = intrin->src[0].ssa;
|
||||
nir_def *hit_kind = intrin->src[1].ssa;
|
||||
nir_def *min_t = nir_load_ray_t_min(b);
|
||||
nir_def *max_t = nir_load_global(b, t_addr, 4, 1, 32);
|
||||
|
||||
|
|
|
|||
|
|
@ -131,7 +131,7 @@ get_ray_query_shadow_addr(nir_builder *b,
|
|||
nir_deref_instr **p = &path.path[1];
|
||||
for (; *p; p++) {
|
||||
if ((*p)->deref_type == nir_deref_type_array) {
|
||||
nir_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
|
||||
nir_def *index = (*p)->arr.index.ssa;
|
||||
|
||||
/**/
|
||||
*out_state_deref = nir_build_deref_array(b, *out_state_deref, index);
|
||||
|
|
|
|||
|
|
@ -1496,7 +1496,7 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
* by constant folding.
|
||||
*/
|
||||
assert(!nir_src_is_const(intrin->src[0]));
|
||||
nir_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
|
||||
nir_def *offset = nir_iadd_imm(b, intrin->src[0].ssa,
|
||||
nir_intrinsic_base(intrin));
|
||||
|
||||
unsigned load_size = intrin->def.num_components *
|
||||
|
|
@ -1561,7 +1561,7 @@ lower_tex_deref(nir_builder *b, nir_tex_instr *tex,
|
|||
if (deref->deref_type != nir_deref_type_var) {
|
||||
assert(deref->deref_type == nir_deref_type_array);
|
||||
|
||||
array_index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
array_index = deref->arr.index.ssa;
|
||||
} else {
|
||||
array_index = nir_imm_int(b, 0);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -878,7 +878,7 @@ lower_image_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
nir_def *index = NULL;
|
||||
if (deref->deref_type != nir_deref_type_var) {
|
||||
assert(deref->deref_type == nir_deref_type_array);
|
||||
index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
index = deref->arr.index.ssa;
|
||||
} else {
|
||||
index = nir_imm_int(b, 0);
|
||||
}
|
||||
|
|
@ -900,7 +900,7 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
* by constant folding.
|
||||
*/
|
||||
assert(!nir_src_is_const(intrin->src[0]));
|
||||
nir_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
|
||||
nir_def *offset = nir_iadd_imm(b, intrin->src[0].ssa,
|
||||
nir_intrinsic_base(intrin));
|
||||
|
||||
nir_def *data;
|
||||
|
|
@ -1036,7 +1036,7 @@ lower_tex_deref(nir_builder *b, nir_tex_instr *tex,
|
|||
*/
|
||||
assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
|
||||
|
||||
index = nir_ssa_for_src(b, deref->arr.index, 1);
|
||||
index = deref->arr.index.ssa;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -516,7 +516,7 @@ st_nir_lower_atifs_samplers_instr(nir_builder *b, nir_instr *instr, void *data)
|
|||
* accidentally enables a cube array).
|
||||
*/
|
||||
if (coord_components != tex->coord_components) {
|
||||
nir_def *coords = nir_ssa_for_src(b, tex->src[coords_idx].src, tex->coord_components);
|
||||
nir_def *coords = tex->src[coords_idx].src.ssa;
|
||||
nir_src_rewrite(&tex->src[coords_idx].src,
|
||||
nir_resize_vector(b, coords, coord_components));
|
||||
tex->coord_components = coord_components;
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ st_nir_lower_fog_instr(nir_builder *b, nir_instr *instr, void *_state)
|
|||
|
||||
b->cursor = nir_before_instr(instr);
|
||||
|
||||
nir_def *color = nir_ssa_for_src(b, intr->src[0], intr->num_components);
|
||||
nir_def *color = intr->src[0].ssa;
|
||||
color = nir_resize_vector(b, color, 4);
|
||||
|
||||
nir_def *fog = fog_result(b, color, state->fog_mode, state->paramList);
|
||||
|
|
|
|||
|
|
@ -360,7 +360,7 @@ clc_lower_nonnormalized_samplers(nir_shader *nir,
|
|||
int coords_idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
|
||||
assert(coords_idx != -1);
|
||||
nir_def *coords =
|
||||
nir_ssa_for_src(&b, tex->src[coords_idx].src, tex->coord_components);
|
||||
tex->src[coords_idx].src.ssa;
|
||||
|
||||
nir_def *txs = nir_i2f32(&b, nir_get_texture_size(&b, tex));
|
||||
|
||||
|
|
|
|||
|
|
@ -1662,7 +1662,7 @@ lower_fquantize2f16(struct nir_builder *b, nir_instr *instr, void *data)
|
|||
*/
|
||||
nir_alu_instr *alu = nir_instr_as_alu(instr);
|
||||
nir_def *src =
|
||||
nir_ssa_for_src(b, alu->src[0].src, nir_src_num_components(alu->src[0].src));
|
||||
alu->src[0].src.ssa;
|
||||
|
||||
nir_def *neg_inf_cond =
|
||||
nir_flt_imm(b, src, -65504.0f);
|
||||
|
|
|
|||
|
|
@ -335,7 +335,7 @@ lower_load_push_constant(struct nir_builder *builder, nir_instr *instr,
|
|||
nir_address_format_bit_size(ubo_format),
|
||||
index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
|
||||
|
||||
nir_def *offset = nir_ssa_for_src(builder, intrin->src[0], 1);
|
||||
nir_def *offset = intrin->src[0].ssa;
|
||||
nir_def *load_data = nir_load_ubo(
|
||||
builder,
|
||||
intrin->def.num_components,
|
||||
|
|
@ -406,7 +406,7 @@ lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
|
|||
|
||||
const struct dxil_spirv_runtime_conf *rt_conf = data->rt_conf;
|
||||
|
||||
nir_def *pos = nir_ssa_for_src(builder, intrin->src[1], 4);
|
||||
nir_def *pos = intrin->src[1].ssa;
|
||||
nir_def *y_pos = nir_channel(builder, pos, 1);
|
||||
nir_def *z_pos = nir_channel(builder, pos, 2);
|
||||
nir_def *y_flip_mask = NULL, *z_flip_mask = NULL, *dyn_yz_flip_mask = NULL;
|
||||
|
|
|
|||
|
|
@ -4584,11 +4584,9 @@ bi_lower_sample_mask_writes(nir_builder *b, nir_intrinsic_instr *intr,
|
|||
|
||||
nir_def *orig = nir_load_sample_mask(b);
|
||||
|
||||
nir_src_rewrite(
|
||||
&intr->src[0],
|
||||
nir_b32csel(b, nir_load_multisampled_pan(b),
|
||||
nir_iand(b, orig, nir_ssa_for_src(b, intr->src[0], 1)),
|
||||
orig));
|
||||
nir_src_rewrite(&intr->src[0],
|
||||
nir_b32csel(b, nir_load_multisampled_pan(b),
|
||||
nir_iand(b, orig, intr->src[0].ssa), orig));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ nir_lod_errata_instr(nir_builder *b, nir_instr *instr, void *data)
|
|||
if (tex->src[i].src_type != nir_tex_src_lod)
|
||||
continue;
|
||||
|
||||
nir_def *lod = nir_ssa_for_src(b, tex->src[i].src, 1);
|
||||
nir_def *lod = tex->src[i].src.ssa;
|
||||
|
||||
nir_def *biased = nir_fadd(b, lod, lod_bias);
|
||||
nir_def *clamped = nir_fmin(b, nir_fmax(b, biased, min_lod), max_lod);
|
||||
|
|
|
|||
|
|
@ -46,8 +46,7 @@ nir_lower_image_bitsize(nir_builder *b, nir_intrinsic_instr *intr,
|
|||
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
nir_def *coord =
|
||||
nir_ssa_for_src(b, intr->src[1], nir_src_num_components(intr->src[1]));
|
||||
nir_def *coord = intr->src[1].ssa;
|
||||
|
||||
nir_def *coord16 = nir_u2u16(b, coord);
|
||||
|
||||
|
|
|
|||
|
|
@ -498,7 +498,7 @@ pan_lower_fb_store(nir_builder *b, nir_intrinsic_instr *intr,
|
|||
bool reorder_comps, unsigned nr_samples)
|
||||
{
|
||||
/* For stores, add conversion before */
|
||||
nir_def *unpacked = nir_ssa_for_src(b, intr->src[0], intr->num_components);
|
||||
nir_def *unpacked = intr->src[0].ssa;
|
||||
unpacked = nir_pad_vec4(b, unpacked);
|
||||
|
||||
/* Re-order the components */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue