spirv: Use linear_alloc for parsing-only data

All the vtn_* structures and arrays are used only during the lifetime of
spirv_to_nir(); we don't need to free them individually nor steal
them out; and some of them are smaller than the 5-pointer header
required for ralloc allocations.

These properties make them a good candidate for using an
arena-style allocation.

Change the code to create a linear_parent and use that for all the vtn_*
allocation.  Note that NIR data structures still go through ralloc,
since we steal them (through the nir_shader) at the end, i.e. they
outlive the parsing.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/25279>
This commit is contained in:
Caio Oliveira 2023-09-16 22:59:05 -07:00 committed by Marge Bot
parent 89afcc94ea
commit d5b4b7356e
7 changed files with 79 additions and 60 deletions

View file

@ -263,7 +263,7 @@ void _vtn_fail_value_not_pointer(struct vtn_builder *b,
static struct vtn_ssa_value *
vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
{
struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
struct vtn_ssa_value *val = vtn_zalloc(b, struct vtn_ssa_value);
val->type = glsl_get_bare_type(type);
if (glsl_type_is_cmat(type)) {
@ -275,7 +275,7 @@ vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
val->def = nir_undef(&b->nb, num_components, bit_size);
} else {
unsigned elems = glsl_get_length(val->type);
val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
val->elems = vtn_alloc_array(b, struct vtn_ssa_value *, elems);
if (glsl_type_is_array_or_matrix(type)) {
const struct glsl_type *elem_type = glsl_get_array_element(type);
for (unsigned i = 0; i < elems; i++)
@ -296,7 +296,7 @@ struct vtn_ssa_value *
vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
const struct glsl_type *type)
{
struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
struct vtn_ssa_value *val = vtn_zalloc(b, struct vtn_ssa_value);
val->type = glsl_get_bare_type(type);
if (glsl_type_is_cmat(type)) {
@ -313,7 +313,7 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
constant->values);
} else {
unsigned elems = glsl_get_length(val->type);
val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
val->elems = vtn_alloc_array(b, struct vtn_ssa_value *, elems);
if (glsl_type_is_array_or_matrix(type)) {
const struct glsl_type *elem_type = glsl_get_array_element(type);
for (unsigned i = 0; i < elems; i++) {
@ -535,7 +535,7 @@ vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
*/
#if UTIL_ARCH_BIG_ENDIAN
{
uint32_t *copy = ralloc_array(b, uint32_t, word_count);
uint32_t *copy = vtn_alloc_array(b, uint32_t, word_count);
for (unsigned i = 0; i < word_count; i++)
copy[i] = util_bswap32(words[i]);
words = copy;
@ -744,7 +744,7 @@ vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
case SpvOpExecutionModeId: {
struct vtn_value *val = vtn_untyped_value(b, target);
struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
struct vtn_decoration *dec = vtn_zalloc(b, struct vtn_decoration);
switch (opcode) {
case SpvOpDecorate:
case SpvOpDecorateId:
@ -776,7 +776,7 @@ vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
case SpvOpMemberName: {
struct vtn_value *val = vtn_untyped_value(b, target);
struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
struct vtn_decoration *dec = vtn_zalloc(b, struct vtn_decoration);
dec->scope = VTN_DEC_STRUCT_MEMBER_NAME0 - *(w++);
@ -794,7 +794,7 @@ vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
for (; w < w_end; w++) {
struct vtn_value *val = vtn_untyped_value(b, *w);
struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
struct vtn_decoration *dec = vtn_zalloc(b, struct vtn_decoration);
dec->group = group;
if (opcode == SpvOpGroupDecorate) {
@ -917,7 +917,7 @@ vtn_type_without_array(struct vtn_type *type)
static struct vtn_type *
vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
{
struct vtn_type *dest = ralloc(b, struct vtn_type);
struct vtn_type *dest = vtn_alloc(b, struct vtn_type);
*dest = *src;
switch (src->base_type) {
@ -938,17 +938,17 @@ vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
break;
case vtn_base_type_struct:
dest->members = ralloc_array(b, struct vtn_type *, src->length);
dest->members = vtn_alloc_array(b, struct vtn_type *, src->length);
memcpy(dest->members, src->members,
src->length * sizeof(src->members[0]));
dest->offsets = ralloc_array(b, unsigned, src->length);
dest->offsets = vtn_alloc_array(b, unsigned, src->length);
memcpy(dest->offsets, src->offsets,
src->length * sizeof(src->offsets[0]));
break;
case vtn_base_type_function:
dest->params = ralloc_array(b, struct vtn_type *, src->length);
dest->params = vtn_alloc_array(b, struct vtn_type *, src->length);
memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
break;
}
@ -1541,7 +1541,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
val = vtn_push_value(b, w[1], vtn_value_type_type);
vtn_fail_if(val->type != NULL,
"Only pointers can have forward declarations");
val->type = rzalloc(b, struct vtn_type);
val->type = vtn_zalloc(b, struct vtn_type);
val->type->id = w[1];
}
@ -1643,8 +1643,8 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
unsigned num_fields = count - 2;
val->type->base_type = vtn_base_type_struct;
val->type->length = num_fields;
val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
val->type->offsets = ralloc_array(b, unsigned, num_fields);
val->type->members = vtn_alloc_array(b, struct vtn_type *, num_fields);
val->type->offsets = vtn_alloc_array(b, unsigned, num_fields);
val->type->packed = false;
NIR_VLA(struct glsl_struct_field, fields, count);
@ -1714,7 +1714,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
const unsigned num_params = count - 3;
val->type->length = num_params;
val->type->params = ralloc_array(b, struct vtn_type *, num_params);
val->type->params = vtn_alloc_array(b, struct vtn_type *, num_params);
for (unsigned i = 0; i < count - 3; i++) {
val->type->params[i] = vtn_get_type(b, w[i + 3]);
}
@ -1743,7 +1743,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
bool has_forward_pointer = false;
if (val->value_type == vtn_value_type_invalid) {
val->value_type = vtn_value_type_type;
val->type = rzalloc(b, struct vtn_type);
val->type = vtn_zalloc(b, struct vtn_type);
val->type->id = w[1];
val->type->base_type = vtn_base_type_pointer;
val->type->storage_class = storage_class;
@ -2701,13 +2701,13 @@ vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
* to a SPIR-V value has the right type. Using bare types everywhere
* ensures that we can pointer-compare.
*/
struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
struct vtn_ssa_value *val = vtn_zalloc(b, struct vtn_ssa_value);
val->type = glsl_get_bare_type(type);
if (!glsl_type_is_vector_or_scalar(type)) {
unsigned elems = glsl_get_length(val->type);
val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
val->elems = vtn_alloc_array(b, struct vtn_ssa_value *, elems);
if (glsl_type_is_array_or_matrix(type) || glsl_type_is_cmat(type)) {
const struct glsl_type *elem_type = glsl_get_array_element(type);
for (unsigned i = 0; i < elems; i++)
@ -3484,7 +3484,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
if (opcode == SpvOpImageTexelPointer) {
struct vtn_value *val =
vtn_push_value(b, w[2], vtn_value_type_image_pointer);
val->image = ralloc(b, struct vtn_image_pointer);
val->image = vtn_alloc(b, struct vtn_image_pointer);
val->image->image = vtn_nir_deref(b, w[3]);
val->image->coord = get_image_coord(b, w[4]);
@ -4218,11 +4218,11 @@ vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
}
static struct vtn_ssa_value *
vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
vtn_composite_copy(struct vtn_builder *b, struct vtn_ssa_value *src)
{
assert(!src->is_variable);
struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
struct vtn_ssa_value *dest = vtn_zalloc(b, struct vtn_ssa_value);
dest->type = src->type;
if (glsl_type_is_vector_or_scalar(src->type)) {
@ -4230,9 +4230,9 @@ vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
} else {
unsigned elems = glsl_get_length(src->type);
dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
dest->elems = vtn_alloc_array(b, struct vtn_ssa_value *, elems);
for (unsigned i = 0; i < elems; i++)
dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
dest->elems[i] = vtn_composite_copy(b, src->elems[i]);
}
return dest;
@ -4358,7 +4358,7 @@ vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
vtn_vector_construct(b, glsl_get_vector_elements(type->type),
elems, srcs);
} else {
ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
ssa->elems = vtn_alloc_array(b, struct vtn_ssa_value *, elems);
for (unsigned i = 0; i < elems; i++)
ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
}
@ -4601,7 +4601,7 @@ vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w,
unsigned name_words;
entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words);
gl_shader_stage stage = vtn_stage_for_execution_model(w[1]);
gl_shader_stage stage = vtn_stage_for_execution_model(w[1]);
vtn_fail_if(stage == MESA_SHADER_NONE,
"Unsupported execution model: %s (%u)",
spirv_executionmodel_to_string(w[1]), w[1]);
@ -4615,7 +4615,7 @@ vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w,
/* Entry points enumerate which global variables are used. */
size_t start = 3 + name_words;
b->interface_ids_count = count - start;
b->interface_ids = ralloc_array(b, uint32_t, b->interface_ids_count);
b->interface_ids = vtn_alloc_array(b, uint32_t, b->interface_ids_count);
memcpy(b->interface_ids, &w[start], b->interface_ids_count * 4);
qsort(b->interface_ids, b->interface_ids_count, 4, cmp_uint32_t);
}
@ -5058,7 +5058,7 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
case SpvCapabilityFragmentBarycentricKHR:
spv_check_supported(fragment_barycentric, cap);
break;
case SpvCapabilityShaderEnqueueAMDX:
spv_check_supported(shader_enqueue, cap);
break;
@ -5743,7 +5743,7 @@ static struct vtn_ssa_value *
vtn_nir_select(struct vtn_builder *b, struct vtn_ssa_value *src0,
struct vtn_ssa_value *src1, struct vtn_ssa_value *src2)
{
struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
struct vtn_ssa_value *dest = vtn_zalloc(b, struct vtn_ssa_value);
dest->type = src1->type;
if (src1->is_variable || src2->is_variable) {
@ -5771,7 +5771,7 @@ vtn_nir_select(struct vtn_builder *b, struct vtn_ssa_value *src0,
} else {
unsigned elems = glsl_get_length(src1->type);
dest->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
dest->elems = vtn_alloc_array(b, struct vtn_ssa_value *, elems);
for (unsigned i = 0; i < elems; i++) {
dest->elems[i] = vtn_nir_select(b, src0,
src1->elems[i], src2->elems[i]);
@ -6696,8 +6696,14 @@ vtn_create_builder(const uint32_t *words, size_t word_count,
{
/* Initialize the vtn_builder object */
struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
/* Allocate all the data that can be dropped after parsing using
* a cheaper allocation strategy.
*/
b->lin_ctx = linear_context(b);
struct spirv_to_nir_options *dup_options =
ralloc(b, struct spirv_to_nir_options);
vtn_alloc(b, struct spirv_to_nir_options);
*dup_options = *options;
b->spirv = words;
@ -6782,7 +6788,7 @@ vtn_create_builder(const uint32_t *words, size_t word_count,
}
b->value_id_bound = value_id_bound;
b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
b->values = vtn_zalloc_array(b, struct vtn_value, value_id_bound);
if (b->options->environment == NIR_SPIRV_VULKAN && b->version < 0x10400)
b->vars_used_indirectly = _mesa_pointer_set_create(b);

View file

@ -42,9 +42,9 @@ wrap_matrix(struct vtn_builder *b, struct vtn_ssa_value *val)
if (glsl_type_is_matrix(val->type))
return val;
struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
struct vtn_ssa_value *dest = vtn_zalloc(b, struct vtn_ssa_value);
dest->type = glsl_get_bare_type(val->type);
dest->elems = ralloc_array(b, struct vtn_ssa_value *, 1);
dest->elems = vtn_alloc_array(b, struct vtn_ssa_value *, 1);
dest->elems[0] = val;
return dest;

View file

@ -172,7 +172,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
switch (opcode) {
case SpvOpFunction: {
vtn_assert(b->func == NULL);
b->func = rzalloc(b, struct vtn_function);
b->func = vtn_zalloc(b, struct vtn_function);
list_inithead(&b->func->body);
b->func->linkage = SpvLinkageTypeMax;
@ -271,7 +271,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
case SpvOpLabel: {
vtn_assert(b->block == NULL);
b->block = rzalloc(b, struct vtn_block);
b->block = vtn_zalloc(b, struct vtn_block);
b->block->label = w;
vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block;
@ -367,7 +367,7 @@ vtn_parse_switch(struct vtn_builder *b,
if (case_entry) {
cse = case_entry->data;
} else {
cse = rzalloc(b, struct vtn_case);
cse = vtn_zalloc(b, struct vtn_case);
cse->block = case_block;
cse->block->switch_case = cse;
util_dynarray_init(&cse->values, b);

View file

@ -386,7 +386,7 @@ static const char *remap_clc_opcode(enum OpenCLstd_Entrypoints opcode)
static struct vtn_type *
get_vtn_type_for_glsl_type(struct vtn_builder *b, const struct glsl_type *type)
{
struct vtn_type *ret = rzalloc(b, struct vtn_type);
struct vtn_type *ret = vtn_zalloc(b, struct vtn_type);
assert(glsl_type_is_vector_or_scalar(type));
ret->type = type;
ret->length = glsl_get_vector_elements(type);
@ -397,7 +397,7 @@ get_vtn_type_for_glsl_type(struct vtn_builder *b, const struct glsl_type *type)
static struct vtn_type *
get_pointer_type(struct vtn_builder *b, struct vtn_type *t, SpvStorageClass storage_class)
{
struct vtn_type *ret = rzalloc(b, struct vtn_type);
struct vtn_type *ret = vtn_zalloc(b, struct vtn_type);
ret->type = nir_address_format_to_glsl_type(
vtn_mode_to_address_format(
b, vtn_storage_class_to_mode(b, storage_class, NULL, NULL)));

View file

@ -121,6 +121,17 @@ _vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
vtn_fail("%s", #expr); \
} while (0)
/* These are used to allocate data that can be dropped at the end of
* the parsing. Any NIR data structure should keep using the ralloc,
* since they will outlive the parsing.
*/
#define vtn_alloc(B, TYPE) linear_alloc(B->lin_ctx, TYPE)
#define vtn_zalloc(B, TYPE) linear_zalloc(B->lin_ctx, TYPE)
#define vtn_alloc_array(B, TYPE, ELEMS) linear_alloc_array(B->lin_ctx, TYPE, ELEMS)
#define vtn_zalloc_array(B, TYPE, ELEMS) linear_zalloc_array(B->lin_ctx, TYPE, ELEMS)
#define vtn_alloc_size(B, SIZE) linear_alloc_child(B->lin_ctx, SIZE)
#define vtn_zalloc_size(B, SIZE) linear_zalloc_child(B->lin_ctx, SIZE)
enum vtn_value_type {
vtn_value_type_invalid = 0,
vtn_value_type_undef,
@ -617,6 +628,8 @@ struct vtn_decoration {
struct vtn_builder {
nir_builder nb;
linear_ctx *lin_ctx;
/* Used by vtn_fail to jump back to the beginning of SPIR-V compilation */
jmp_buf fail_jump;

View file

@ -304,14 +304,14 @@ structured_post_order_traversal(struct vtn_builder *b, struct vtn_block *block)
switch (branch[0] & SpvOpCodeMask) {
case SpvOpBranch:
block->successors_count = 1;
block->successors = rzalloc(b, struct vtn_successor);
block->successors = vtn_zalloc(b, struct vtn_successor);
block->successors[0].block = vtn_block(b, branch[1]);
structured_post_order_traversal(b, block->successors[0].block);
break;
case SpvOpBranchConditional:
block->successors_count = 2;
block->successors = rzalloc_array(b, struct vtn_successor, 2);
block->successors = vtn_zalloc_array(b, struct vtn_successor, 2);
block->successors[0].block = vtn_block(b, branch[2]);
block->successors[1].block = vtn_block(b, branch[3]);
@ -343,7 +343,7 @@ structured_post_order_traversal(struct vtn_builder *b, struct vtn_block *block)
vtn_parse_switch(b, block->branch, &cases);
block->successors_count = list_length(&cases);
block->successors = rzalloc_array(b, struct vtn_successor, block->successors_count);
block->successors = vtn_zalloc_array(b, struct vtn_successor, block->successors_count);
/* The 'Rules for Structured Control-flow constructs' already guarantee
* that the labels of the targets are ordered in a way that if
@ -389,7 +389,7 @@ structured_post_order_traversal(struct vtn_builder *b, struct vtn_block *block)
case SpvOpEmitMeshTasksEXT:
case SpvOpUnreachable:
block->successors_count = 1;
block->successors = rzalloc(b, struct vtn_successor);
block->successors = vtn_zalloc(b, struct vtn_successor);
break;
default:
@ -403,7 +403,7 @@ static void
sort_blocks(struct vtn_builder *b)
{
struct vtn_block **ordered_blocks =
rzalloc_array(b, struct vtn_block *, b->func->block_count);
vtn_zalloc_array(b, struct vtn_block *, b->func->block_count);
b->func->ordered_blocks = ordered_blocks;
@ -524,7 +524,7 @@ cmp_succ_block_pos(const void *pa, const void *pb)
static void
create_constructs(struct vtn_builder *b)
{
struct vtn_construct *func_construct = rzalloc(b, struct vtn_construct);
struct vtn_construct *func_construct = vtn_zalloc(b, struct vtn_construct);
func_construct->type = vtn_construct_type_function;
func_construct->start_pos = 0;
func_construct->end_pos = b->func->ordered_blocks_count;
@ -539,7 +539,7 @@ create_constructs(struct vtn_builder *b)
const unsigned end_pos = vtn_block(b, block->merge[1])->pos;
if (merge_op == SpvOpLoopMerge) {
struct vtn_construct *loop = rzalloc(b, struct vtn_construct);
struct vtn_construct *loop = vtn_zalloc(b, struct vtn_construct);
loop->type = vtn_construct_type_loop;
loop->start_pos = block->pos;
loop->end_pos = end_pos;
@ -551,7 +551,7 @@ create_constructs(struct vtn_builder *b)
loop->continue_pos = continue_block->pos;
if (!vtn_is_single_block_loop(loop)) {
struct vtn_construct *cont = rzalloc(b, struct vtn_construct);
struct vtn_construct *cont = vtn_zalloc(b, struct vtn_construct);
cont->type = vtn_construct_type_continue;
cont->parent = loop;
cont->start_pos = loop->continue_pos;
@ -579,7 +579,7 @@ create_constructs(struct vtn_builder *b)
"an OpBranchConditional instruction that has different "
"True Label and False Label operands where neither are "
"declared merge blocks or Continue Targets.");
struct vtn_construct *sel = rzalloc(b, struct vtn_construct);
struct vtn_construct *sel = vtn_zalloc(b, struct vtn_construct);
sel->type = vtn_construct_type_selection;
sel->start_pos = loop->start_pos;
sel->end_pos = loop->continue_pos;
@ -593,7 +593,7 @@ create_constructs(struct vtn_builder *b)
} else if (branch_op == SpvOpSwitch) {
vtn_assert(merge_op == SpvOpSelectionMerge);
struct vtn_construct *swtch = rzalloc(b, struct vtn_construct);
struct vtn_construct *swtch = vtn_zalloc(b, struct vtn_construct);
swtch->type = vtn_construct_type_switch;
swtch->start_pos = block->pos;
swtch->end_pos = end_pos;
@ -608,7 +608,7 @@ create_constructs(struct vtn_builder *b)
vtn_foreach_case_safe(cse, &cases) {
if (cse->block->pos < end_pos) {
struct vtn_block *case_block = cse->block;
struct vtn_construct *c = rzalloc(b, struct vtn_construct);
struct vtn_construct *c = vtn_zalloc(b, struct vtn_construct);
c->type = vtn_construct_type_case;
c->parent = swtch;
c->start_pos = case_block->pos;
@ -651,7 +651,7 @@ create_constructs(struct vtn_builder *b)
vtn_assert(merge_op == SpvOpSelectionMerge);
vtn_assert(branch_op == SpvOpBranchConditional);
struct vtn_construct *sel = rzalloc(b, struct vtn_construct);
struct vtn_construct *sel = vtn_zalloc(b, struct vtn_construct);
sel->type = vtn_construct_type_selection;
sel->start_pos = block->pos;
sel->end_pos = end_pos;

View file

@ -53,7 +53,7 @@ vtn_align_pointer(struct vtn_builder *b, struct vtn_pointer *ptr,
if (addr_format == nir_address_format_logical)
return ptr;
struct vtn_pointer *copy = ralloc(b, struct vtn_pointer);
struct vtn_pointer *copy = vtn_alloc(b, struct vtn_pointer);
*copy = *ptr;
copy->deref = nir_alignment_deref_cast(&b->nb, ptr->deref, alignment, 0);
@ -115,7 +115,7 @@ vtn_decorate_pointer(struct vtn_builder *b, struct vtn_value *val,
* leaking them any further than actually specified in the SPIR-V.
*/
if (aa.access & ~ptr->access) {
struct vtn_pointer *copy = ralloc(b, struct vtn_pointer);
struct vtn_pointer *copy = vtn_alloc(b, struct vtn_pointer);
*copy = *ptr;
copy->access |= aa.access;
return copy;
@ -177,7 +177,7 @@ vtn_access_chain_create(struct vtn_builder *b, unsigned length)
/* Subtract 1 from the length since there's already one built in */
size_t size = sizeof(*chain) +
(MAX2(length, 1) - 1) * sizeof(chain->link[0]);
chain = rzalloc_size(b, size);
chain = vtn_zalloc_size(b, size);
chain->length = length;
return chain;
@ -404,7 +404,7 @@ vtn_pointer_dereference(struct vtn_builder *b,
* a pointer which just has a block index and a later access chain
* will dereference deeper.
*/
struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
struct vtn_pointer *ptr = vtn_zalloc(b, struct vtn_pointer);
ptr->mode = base->mode;
ptr->type = type;
ptr->block_index = block_index;
@ -489,7 +489,7 @@ vtn_pointer_dereference(struct vtn_builder *b,
access |= type->access;
}
struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
struct vtn_pointer *ptr = vtn_zalloc(b, struct vtn_pointer);
ptr->mode = base->mode;
ptr->type = type;
ptr->var = base->var;
@ -1922,7 +1922,7 @@ vtn_pointer_from_ssa(struct vtn_builder *b, nir_def *ssa,
{
vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
struct vtn_pointer *ptr = vtn_zalloc(b, struct vtn_pointer);
struct vtn_type *without_array =
vtn_type_without_array(ptr_type->deref);
@ -2106,12 +2106,12 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
break;
}
struct vtn_variable *var = rzalloc(b, struct vtn_variable);
struct vtn_variable *var = vtn_zalloc(b, struct vtn_variable);
var->type = type;
var->mode = mode;
var->base_location = -1;
val->pointer = rzalloc(b, struct vtn_pointer);
val->pointer = vtn_zalloc(b, struct vtn_pointer);
val->pointer->mode = var->mode;
val->pointer->type = var->type;
val->pointer->ptr_type = ptr_type;
@ -2632,7 +2632,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
struct vtn_type *sampler_type = vtn_value(b, w[1], vtn_value_type_type)->type;
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
struct vtn_type *ptr_type = rzalloc(b, struct vtn_type);
struct vtn_type *ptr_type = vtn_zalloc(b, struct vtn_type);
ptr_type->base_type = vtn_base_type_pointer;
ptr_type->deref = sampler_type;
ptr_type->storage_class = SpvStorageClassUniform;