freedreno/a6xx: Use same LRZ state for draw and binning
Originally these where split as that is what the blob GL driver did (at least at the time). But that turns out to be unnecessary, so simplify things and combine them into a single LRZ state object like tu does. Signed-off-by: Rob Clark <robdclark@chromium.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/19710>
This commit is contained in:
parent
2b186016a3
commit
d03d08668f
4 changed files with 12 additions and 26 deletions
|
|
@ -149,7 +149,7 @@ setup_state_map(struct fd_context *ctx)
|
|||
fd_context_add_map(ctx, FD_DIRTY_ZSA | FD_DIRTY_RASTERIZER,
|
||||
BIT(FD6_GROUP_ZSA));
|
||||
fd_context_add_map(ctx, FD_DIRTY_ZSA | FD_DIRTY_BLEND | FD_DIRTY_PROG,
|
||||
BIT(FD6_GROUP_LRZ) | BIT(FD6_GROUP_LRZ_BINNING));
|
||||
BIT(FD6_GROUP_LRZ));
|
||||
fd_context_add_map(ctx, FD_DIRTY_PROG | FD_DIRTY_RASTERIZER_CLIP_PLANE_ENABLE,
|
||||
BIT(FD6_GROUP_PROG));
|
||||
fd_context_add_map(ctx, FD_DIRTY_RASTERIZER, BIT(FD6_GROUP_RASTERIZER));
|
||||
|
|
|
|||
|
|
@ -92,10 +92,10 @@ struct fd6_context {
|
|||
struct hash_table *tex_cache;
|
||||
|
||||
struct {
|
||||
/* previous binning/draw lrz state, which is a function of multiple
|
||||
* gallium stateobjs, but doesn't necessarily change as frequently:
|
||||
/* previous lrz state, which is a function of multiple gallium
|
||||
* stateobjs, but doesn't necessarily change as frequently:
|
||||
*/
|
||||
struct fd6_lrz_state lrz[2];
|
||||
struct fd6_lrz_state lrz;
|
||||
} last;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -408,7 +408,7 @@ compute_ztest_mode(struct fd6_emit *emit, bool lrz_valid) assert_dt
|
|||
* to invalidate lrz.
|
||||
*/
|
||||
static struct fd6_lrz_state
|
||||
compute_lrz_state(struct fd6_emit *emit, bool binning_pass) assert_dt
|
||||
compute_lrz_state(struct fd6_emit *emit) assert_dt
|
||||
{
|
||||
struct fd_context *ctx = emit->ctx;
|
||||
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
|
||||
|
|
@ -417,9 +417,7 @@ compute_lrz_state(struct fd6_emit *emit, bool binning_pass) assert_dt
|
|||
|
||||
if (!pfb->zsbuf) {
|
||||
memset(&lrz, 0, sizeof(lrz));
|
||||
if (!binning_pass) {
|
||||
lrz.z_mode = compute_ztest_mode(emit, false);
|
||||
}
|
||||
lrz.z_mode = compute_ztest_mode(emit, false);
|
||||
return lrz;
|
||||
}
|
||||
|
||||
|
|
@ -433,8 +431,6 @@ compute_lrz_state(struct fd6_emit *emit, bool binning_pass) assert_dt
|
|||
if (blend->reads_dest || fs->writes_pos || fs->no_earlyz || fs->has_kill ||
|
||||
blend->base.alpha_to_coverage) {
|
||||
lrz.write = false;
|
||||
if (binning_pass)
|
||||
lrz.enable = false;
|
||||
}
|
||||
|
||||
/* if we change depthfunc direction, bail out on using LRZ. The
|
||||
|
|
@ -458,9 +454,7 @@ compute_lrz_state(struct fd6_emit *emit, bool binning_pass) assert_dt
|
|||
lrz.test = false;
|
||||
}
|
||||
|
||||
if (!binning_pass) {
|
||||
lrz.z_mode = compute_ztest_mode(emit, rsc->lrz_valid);
|
||||
}
|
||||
lrz.z_mode = compute_ztest_mode(emit, rsc->lrz_valid);
|
||||
|
||||
/* Once we start writing to the real depth buffer, we lock in the
|
||||
* direction for LRZ.. if we have to skip a LRZ write for any
|
||||
|
|
@ -480,18 +474,18 @@ compute_lrz_state(struct fd6_emit *emit, bool binning_pass) assert_dt
|
|||
}
|
||||
|
||||
static struct fd_ringbuffer *
|
||||
build_lrz(struct fd6_emit *emit, bool binning_pass) assert_dt
|
||||
build_lrz(struct fd6_emit *emit) assert_dt
|
||||
{
|
||||
struct fd_context *ctx = emit->ctx;
|
||||
struct fd6_context *fd6_ctx = fd6_context(ctx);
|
||||
struct fd6_lrz_state lrz = compute_lrz_state(emit, binning_pass);
|
||||
struct fd6_lrz_state lrz = compute_lrz_state(emit);
|
||||
|
||||
/* If the LRZ state has not changed, we can skip the emit: */
|
||||
if (!ctx->last.dirty &&
|
||||
!memcmp(&fd6_ctx->last.lrz[binning_pass], &lrz, sizeof(lrz)))
|
||||
!memcmp(&fd6_ctx->last.lrz, &lrz, sizeof(lrz)))
|
||||
return NULL;
|
||||
|
||||
fd6_ctx->last.lrz[binning_pass] = lrz;
|
||||
fd6_ctx->last.lrz = lrz;
|
||||
|
||||
struct fd_ringbuffer *ring = fd_submit_new_ringbuffer(
|
||||
ctx->batch->submit, 8 * 4, FD_RINGBUFFER_STREAMING);
|
||||
|
|
@ -840,16 +834,9 @@ fd6_emit_state(struct fd_ringbuffer *ring, struct fd6_emit *emit)
|
|||
fd_ringbuffer_ref(state);
|
||||
break;
|
||||
case FD6_GROUP_LRZ:
|
||||
state = build_lrz(emit, false);
|
||||
state = build_lrz(emit);
|
||||
if (!state)
|
||||
continue;
|
||||
enable_mask = ENABLE_DRAW;
|
||||
break;
|
||||
case FD6_GROUP_LRZ_BINNING:
|
||||
state = build_lrz(emit, true);
|
||||
if (!state)
|
||||
continue;
|
||||
enable_mask = CP_SET_DRAW_STATE__0_BINNING;
|
||||
break;
|
||||
case FD6_GROUP_SCISSOR:
|
||||
state = build_scissor(emit);
|
||||
|
|
|
|||
|
|
@ -49,7 +49,6 @@ enum fd6_state_id {
|
|||
FD6_GROUP_PROG_INTERP,
|
||||
FD6_GROUP_PROG_FB_RAST,
|
||||
FD6_GROUP_LRZ,
|
||||
FD6_GROUP_LRZ_BINNING,
|
||||
FD6_GROUP_VTXSTATE,
|
||||
FD6_GROUP_VBO,
|
||||
FD6_GROUP_CONST,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue