mono: projection layer supporting mono

This commit is contained in:
Meng Jiao 2024-02-07 16:42:51 +08:00
parent 2d26946ffa
commit 95331593e2
41 changed files with 671 additions and 738 deletions

View file

@ -40,7 +40,7 @@ information.
is done with CPU work and ready to do GPU work.
* @ref xrt_comp_discard_frame - The frame is discarded.
* @ref xrt_comp_layer_begin - Called during transfers of layers.
* @ref xrt_comp_layer_stereo_projection - This and other layer functions are
* @ref xrt_comp_layer_projection - This and other layer functions are
called to list the layers the compositor should render.
* @ref xrt_comp_layer_commit - The compositor starts to render the frame,
trying to finish at the **present** time.

View file

@ -270,6 +270,8 @@ u_device_allocate(enum u_device_alloc_flags flags, size_t size, size_t input_cou
if (alloc_hmd) {
xdev->hmd = (struct xrt_hmd_parts *)(ptr + offset_hmd);
// set default view count
xdev->hmd->view_count = 2;
}
if (alloc_tracking) {

View file

@ -554,21 +554,22 @@ client_d3d11_compositor_layer_begin(struct xrt_compositor *xc, const struct xrt_
}
static xrt_result_t
client_d3d11_compositor_layer_stereo_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data)
client_d3d11_compositor_layer_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
const struct xrt_layer_data *data)
{
struct client_d3d11_compositor *c = as_client_d3d11_compositor(xc);
assert(data->type == XRT_LAYER_STEREO_PROJECTION);
assert(data->type == XRT_LAYER_PROJECTION);
struct xrt_swapchain *l_xscn = as_client_d3d11_swapchain(l_xsc)->xsc.get();
struct xrt_swapchain *r_xscn = as_client_d3d11_swapchain(r_xsc)->xsc.get();
struct xrt_swapchain *xscn[XRT_MAX_VIEWS];
for (uint32_t i = 0; i < data->proj.view_count; ++i) {
xscn[i] = as_client_d3d11_swapchain(xsc[i])->xsc.get();
}
// No flip required: D3D11 swapchain image convention matches Vulkan.
return xrt_comp_layer_stereo_projection(&c->xcn->base, xdev, l_xscn, r_xscn, data);
return xrt_comp_layer_projection(&c->xcn->base, xdev, xscn, data);
}
static xrt_result_t
@ -879,7 +880,7 @@ try {
c->base.base.begin_frame = client_d3d11_compositor_begin_frame;
c->base.base.discard_frame = client_d3d11_compositor_discard_frame;
c->base.base.layer_begin = client_d3d11_compositor_layer_begin;
c->base.base.layer_stereo_projection = client_d3d11_compositor_layer_stereo_projection;
c->base.base.layer_projection = client_d3d11_compositor_layer_projection;
c->base.base.layer_stereo_projection_depth = client_d3d11_compositor_layer_stereo_projection_depth;
c->base.base.layer_quad = client_d3d11_compositor_layer_quad;
c->base.base.layer_cube = client_d3d11_compositor_layer_cube;

View file

@ -782,25 +782,23 @@ client_d3d12_compositor_layer_begin(struct xrt_compositor *xc, const struct xrt_
}
static xrt_result_t
client_d3d12_compositor_layer_stereo_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data)
client_d3d12_compositor_layer_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
const struct xrt_layer_data *data)
{
struct client_d3d12_compositor *c = as_client_d3d12_compositor(xc);
assert(data->type == XRT_LAYER_STEREO_PROJECTION);
struct xrt_swapchain *l_xscn = as_client_d3d12_swapchain(l_xsc)->xsc.get();
struct xrt_swapchain *r_xscn = as_client_d3d12_swapchain(r_xsc)->xsc.get();
assert(data->type == XRT_LAYER_PROJECTION);
struct xrt_swapchain *xscn[XRT_MAX_VIEWS];
for (uint32_t i = 0; i < data->proj.view_count; ++i) {
xscn[i] = as_client_d3d12_swapchain(xsc[i])->xsc.get();
}
struct xrt_layer_data d = *data;
client_d3d12_swapchain_scale_rect(l_xsc, &d.stereo.l.sub.norm_rect);
client_d3d12_swapchain_scale_rect(r_xsc, &d.stereo.r.sub.norm_rect);
// No flip required: D3D12 swapchain image convention matches Vulkan.
return xrt_comp_layer_stereo_projection(&c->xcn->base, xdev, l_xscn, r_xscn, &d);
return xrt_comp_layer_projection(&c->xcn->base, xdev, xscn, &d);
}
static xrt_result_t
@ -1137,7 +1135,7 @@ try {
c->base.base.begin_frame = client_d3d12_compositor_begin_frame;
c->base.base.discard_frame = client_d3d12_compositor_discard_frame;
c->base.base.layer_begin = client_d3d12_compositor_layer_begin;
c->base.base.layer_stereo_projection = client_d3d12_compositor_layer_stereo_projection;
c->base.base.layer_projection = client_d3d12_compositor_layer_projection;
c->base.base.layer_stereo_projection_depth = client_d3d12_compositor_layer_stereo_projection_depth;
c->base.base.layer_quad = client_d3d12_compositor_layer_quad;
c->base.base.layer_cube = client_d3d12_compositor_layer_cube;

View file

@ -229,26 +229,23 @@ client_gl_compositor_layer_begin(struct xrt_compositor *xc, const struct xrt_lay
}
static xrt_result_t
client_gl_compositor_layer_stereo_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data)
client_gl_compositor_layer_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
const struct xrt_layer_data *data)
{
struct xrt_compositor *xcn;
struct xrt_swapchain *l_xscn;
struct xrt_swapchain *r_xscn;
assert(data->type == XRT_LAYER_STEREO_PROJECTION);
struct xrt_swapchain *xscn[XRT_MAX_VIEWS];
xcn = to_native_compositor(xc);
l_xscn = to_native_swapchain(l_xsc);
r_xscn = to_native_swapchain(r_xsc);
assert(data->type == XRT_LAYER_PROJECTION);
for (uint32_t i = 0; i < data->proj.view_count; ++i) {
xscn[i] = &client_gl_swapchain(xsc[i])->xscn->base;
}
struct xrt_layer_data d = *data;
d.flip_y = !d.flip_y;
return xrt_comp_layer_stereo_projection(xcn, xdev, l_xscn, r_xscn, &d);
return xrt_comp_layer_projection(xcn, xdev, xscn, &d);
}
static xrt_result_t
@ -613,7 +610,7 @@ client_gl_compositor_init(struct client_gl_compositor *c,
c->base.base.begin_frame = client_gl_compositor_begin_frame;
c->base.base.discard_frame = client_gl_compositor_discard_frame;
c->base.base.layer_begin = client_gl_compositor_layer_begin;
c->base.base.layer_stereo_projection = client_gl_compositor_layer_stereo_projection;
c->base.base.layer_projection = client_gl_compositor_layer_projection;
c->base.base.layer_stereo_projection_depth = client_gl_compositor_layer_stereo_projection_depth;
c->base.base.layer_quad = client_gl_compositor_layer_quad;
c->base.base.layer_cube = client_gl_compositor_layer_cube;

View file

@ -474,23 +474,22 @@ client_vk_compositor_layer_begin(struct xrt_compositor *xc, const struct xrt_lay
}
static xrt_result_t
client_vk_compositor_layer_stereo_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data)
client_vk_compositor_layer_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
const struct xrt_layer_data *data)
{
struct xrt_compositor *xcn;
struct xrt_swapchain *l_xscn;
struct xrt_swapchain *r_xscn;
struct xrt_swapchain *xscn[XRT_MAX_VIEWS];
assert(data->type == XRT_LAYER_STEREO_PROJECTION);
assert(data->type == XRT_LAYER_PROJECTION);
for (uint32_t i = 0; i < data->proj.view_count; ++i) {
xscn[i] = &client_vk_swapchain(xsc[i])->xscn->base;
}
xcn = to_native_compositor(xc);
l_xscn = to_native_swapchain(l_xsc);
r_xscn = to_native_swapchain(r_xsc);
return xrt_comp_layer_stereo_projection(xcn, xdev, l_xscn, r_xscn, data);
return xrt_comp_layer_projection(xcn, xdev, xscn, data);
}
@ -842,7 +841,7 @@ client_vk_compositor_create(struct xrt_compositor_native *xcn,
c->base.base.begin_frame = client_vk_compositor_begin_frame;
c->base.base.discard_frame = client_vk_compositor_discard_frame;
c->base.base.layer_begin = client_vk_compositor_layer_begin;
c->base.base.layer_stereo_projection = client_vk_compositor_layer_stereo_projection;
c->base.base.layer_projection = client_vk_compositor_layer_projection;
c->base.base.layer_stereo_projection_depth = client_vk_compositor_layer_stereo_projection_depth;
c->base.base.layer_quad = client_vk_compositor_layer_quad;
c->base.base.layer_cube = client_vk_compositor_layer_cube;

View file

@ -272,7 +272,7 @@ can_do_one_projection_layer_fast_path(struct comp_compositor *c)
enum xrt_layer_type type = layer->data.type;
// Handled by the distortion shader.
if (type != XRT_LAYER_STEREO_PROJECTION && //
if (type != XRT_LAYER_PROJECTION && //
type != XRT_LAYER_STEREO_PROJECTION_DEPTH) {
return false;
}
@ -989,13 +989,6 @@ comp_main_create_system_compositor(struct xrt_device *xdev,
uint32_t w0 = (uint32_t)(xdev->hmd->views[0].display.w_pixels * scale);
uint32_t h0 = (uint32_t)(xdev->hmd->views[0].display.h_pixels * scale);
uint32_t w1 = (uint32_t)(xdev->hmd->views[1].display.w_pixels * scale);
uint32_t h1 = (uint32_t)(xdev->hmd->views[1].display.h_pixels * scale);
uint32_t w0_2 = xdev->hmd->views[0].display.w_pixels * 2;
uint32_t h0_2 = xdev->hmd->views[0].display.h_pixels * 2;
uint32_t w1_2 = xdev->hmd->views[1].display.w_pixels * 2;
uint32_t h1_2 = xdev->hmd->views[1].display.h_pixels * 2;
c->view_extents.width = w0;
c->view_extents.height = h0;
@ -1052,8 +1045,6 @@ comp_main_create_system_compositor(struct xrt_device *xdev,
/*
* Rest of info.
*/
// Hardcoded for now.
uint32_t view_count = 2;
struct xrt_system_compositor_info sys_info_storage = {0};
struct xrt_system_compositor_info *sys_info = &sys_info_storage;
@ -1066,19 +1057,20 @@ comp_main_create_system_compositor(struct xrt_device *xdev,
sys_info->client_d3d_deviceLUID_valid = c->settings.client_gpu_deviceLUID_valid;
// clang-format off
sys_info->views[0].recommended.width_pixels = w0;
sys_info->views[0].recommended.height_pixels = h0;
sys_info->views[0].recommended.sample_count = 1;
sys_info->views[0].max.width_pixels = w0_2;
sys_info->views[0].max.height_pixels = h0_2;
sys_info->views[0].max.sample_count = 1;
uint32_t view_count = xdev->hmd->view_count;
for (uint32_t i = 0; i < view_count; ++i) {
uint32_t w = (uint32_t)(xdev->hmd->views[i].display.w_pixels * scale);
uint32_t h = (uint32_t)(xdev->hmd->views[i].display.h_pixels * scale);
uint32_t w_2 = xdev->hmd->views[i].display.w_pixels * 2;
uint32_t h_2 = xdev->hmd->views[i].display.h_pixels * 2;
sys_info->views[1].recommended.width_pixels = w1;
sys_info->views[1].recommended.height_pixels = h1;
sys_info->views[1].recommended.sample_count = 1;
sys_info->views[1].max.width_pixels = w1_2;
sys_info->views[1].max.height_pixels = h1_2;
sys_info->views[1].max.sample_count = 1;
sys_info->views[i].recommended.width_pixels = w;
sys_info->views[i].recommended.height_pixels = h;
sys_info->views[i].recommended.sample_count = 1;
sys_info->views[i].max.width_pixels = w_2;
sys_info->views[i].max.height_pixels = h_2;
sys_info->views[i].max.sample_count = 1;
}
// clang-format on
// If we can add e.g. video pass-through capabilities, we may need to change (augment) this list.

View file

@ -114,7 +114,7 @@ struct comp_renderer
{
//! Targets for rendering to the scratch buffer.
struct render_gfx_target_resources targets[COMP_SCRATCH_NUM_IMAGES];
} views[2];
} views[XRT_MAX_VIEWS];
} scratch;
//! @}
@ -217,8 +217,8 @@ renderer_wait_queue_idle(struct comp_renderer *r)
static void
calc_viewport_data(struct comp_renderer *r,
struct render_viewport_data *out_l_viewport_data,
struct render_viewport_data *out_r_viewport_data)
struct render_viewport_data out_viewport_data[XRT_MAX_VIEWS],
size_t view_count)
{
struct comp_compositor *c = r->c;
@ -235,46 +235,28 @@ calc_viewport_data(struct comp_renderer *r,
float scale_x = (float)r->c->target->width / (float)w_i32;
float scale_y = (float)r->c->target->height / (float)h_i32;
struct xrt_view *l_v = &r->c->xdev->hmd->views[0];
struct xrt_view *r_v = &r->c->xdev->hmd->views[1];
struct render_viewport_data l_viewport_data;
struct render_viewport_data r_viewport_data;
if (pre_rotate) {
l_viewport_data = (struct render_viewport_data){
.x = (uint32_t)(l_v->viewport.y_pixels * scale_x),
.y = (uint32_t)(l_v->viewport.x_pixels * scale_y),
.w = (uint32_t)(l_v->viewport.h_pixels * scale_x),
.h = (uint32_t)(l_v->viewport.w_pixels * scale_y),
};
r_viewport_data = (struct render_viewport_data){
.x = (uint32_t)(r_v->viewport.y_pixels * scale_x),
.y = (uint32_t)(r_v->viewport.x_pixels * scale_y),
.w = (uint32_t)(r_v->viewport.h_pixels * scale_x),
.h = (uint32_t)(r_v->viewport.w_pixels * scale_y),
};
} else {
l_viewport_data = (struct render_viewport_data){
.x = (uint32_t)(l_v->viewport.x_pixels * scale_x),
.y = (uint32_t)(l_v->viewport.y_pixels * scale_y),
.w = (uint32_t)(l_v->viewport.w_pixels * scale_x),
.h = (uint32_t)(l_v->viewport.h_pixels * scale_y),
};
r_viewport_data = (struct render_viewport_data){
.x = (uint32_t)(r_v->viewport.x_pixels * scale_x),
.y = (uint32_t)(r_v->viewport.y_pixels * scale_y),
.w = (uint32_t)(r_v->viewport.w_pixels * scale_x),
.h = (uint32_t)(r_v->viewport.h_pixels * scale_y),
};
for (uint32_t i = 0; i < view_count; ++i) {
struct xrt_view *v = &r->c->xdev->hmd->views[i];
if (pre_rotate) {
out_viewport_data[i] = (struct render_viewport_data){
.x = (uint32_t)(v->viewport.y_pixels * scale_x),
.y = (uint32_t)(v->viewport.x_pixels * scale_y),
.w = (uint32_t)(v->viewport.h_pixels * scale_x),
.h = (uint32_t)(v->viewport.w_pixels * scale_y),
};
} else {
out_viewport_data[i] = (struct render_viewport_data){
.x = (uint32_t)(v->viewport.x_pixels * scale_x),
.y = (uint32_t)(v->viewport.y_pixels * scale_y),
.w = (uint32_t)(v->viewport.w_pixels * scale_x),
.h = (uint32_t)(v->viewport.h_pixels * scale_y),
};
}
}
*out_l_viewport_data = l_viewport_data;
*out_r_viewport_data = r_viewport_data;
}
static void
calc_vertex_rot_data(struct comp_renderer *r, struct xrt_matrix_2x2 out_vertex_rots[2])
calc_vertex_rot_data(struct comp_renderer *r, struct xrt_matrix_2x2 out_vertex_rots[XRT_MAX_VIEWS], size_t view_count)
{
bool pre_rotate = false;
if (r->c->target->surface_transform & VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR ||
@ -291,7 +273,7 @@ calc_vertex_rot_data(struct comp_renderer *r, struct xrt_matrix_2x2 out_vertex_r
},
}};
for (uint32_t i = 0; i < 2; i++) {
for (uint32_t i = 0; i < view_count; i++) {
// Get the view.
struct xrt_view *v = &r->c->xdev->hmd->views[i];
@ -310,9 +292,10 @@ calc_vertex_rot_data(struct comp_renderer *r, struct xrt_matrix_2x2 out_vertex_r
static void
calc_pose_data(struct comp_renderer *r,
enum comp_target_fov_source fov_source,
struct xrt_fov out_fovs[2],
struct xrt_pose out_world[2],
struct xrt_pose out_eye[2])
struct xrt_fov out_fovs[XRT_MAX_VIEWS],
struct xrt_pose out_world[XRT_MAX_VIEWS],
struct xrt_pose out_eye[XRT_MAX_VIEWS],
uint32_t view_count)
{
COMP_TRACE_MARKER();
@ -323,20 +306,20 @@ calc_pose_data(struct comp_renderer *r,
};
struct xrt_space_relation head_relation = XRT_SPACE_RELATION_ZERO;
struct xrt_fov xdev_fovs[2] = XRT_STRUCT_INIT;
struct xrt_pose xdev_poses[2] = XRT_STRUCT_INIT;
struct xrt_fov xdev_fovs[XRT_MAX_VIEWS] = XRT_STRUCT_INIT;
struct xrt_pose xdev_poses[XRT_MAX_VIEWS] = XRT_STRUCT_INIT;
xrt_device_get_view_poses( //
r->c->xdev, // xdev
&default_eye_relation, // default_eye_relation
r->c->frame.rendering.predicted_display_time_ns, // at_timestamp_ns
2, // view_count
view_count, // view_count
&head_relation, // out_head_relation
xdev_fovs, // out_fovs
xdev_poses); // out_poses
struct xrt_fov dist_fov[2] = XRT_STRUCT_INIT;
for (uint32_t i = 0; i < 2; i++) {
struct xrt_fov dist_fov[XRT_MAX_VIEWS] = XRT_STRUCT_INIT;
for (uint32_t i = 0; i < view_count; i++) {
dist_fov[i] = r->c->xdev->hmd->distortion.fov[i];
}
@ -347,7 +330,7 @@ calc_pose_data(struct comp_renderer *r,
case COMP_TARGET_FOV_SOURCE_DEVICE_VIEWS: use_xdev = true; break;
}
for (uint32_t i = 0; i < 2; i++) {
for (uint32_t i = 0; i < view_count; i++) {
const struct xrt_fov fov = use_xdev ? xdev_fovs[i] : dist_fov[i];
const struct xrt_pose eye_pose = xdev_poses[i];
@ -588,7 +571,7 @@ renderer_init(struct comp_renderer *r, struct comp_compositor *c, VkExtent2D scr
VK_ATTACHMENT_LOAD_OP_CLEAR, // load_op
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); // final_layout
for (uint32_t i = 0; i < ARRAY_SIZE(r->scratch.views); i++) {
for (uint32_t i = 0; i < c->nr.view_count; i++) {
bret = comp_scratch_single_images_ensure(&r->c->scratch.views[i], &r->c->base.vk, scratch_extent);
if (!bret) {
COMP_ERROR(c, "comp_scratch_single_images_ensure: false");
@ -838,7 +821,7 @@ renderer_fini(struct comp_renderer *r)
comp_mirror_fini(&r->mirror_to_debug_gui, vk);
// Do this after the layer renderer.
for (uint32_t i = 0; i < ARRAY_SIZE(r->scratch.views); i++) {
for (uint32_t i = 0; i < r->c->nr.view_count; i++) {
for (uint32_t k = 0; k < COMP_SCRATCH_NUM_IMAGES; k++) {
render_gfx_target_resources_close(&r->scratch.views[i].targets[k]);
}
@ -883,23 +866,24 @@ dispatch_graphics(struct comp_renderer *r,
assert(!fast_path || c->base.slot.layer_count >= 1);
// Viewport information.
struct render_viewport_data viewport_datas[2];
calc_viewport_data(r, &viewport_datas[0], &viewport_datas[1]);
struct render_viewport_data viewport_datas[XRT_MAX_VIEWS];
calc_viewport_data(r, viewport_datas, rr->r->view_count);
// Vertex rotation information.
struct xrt_matrix_2x2 vertex_rots[2];
calc_vertex_rot_data(r, vertex_rots);
struct xrt_matrix_2x2 vertex_rots[XRT_MAX_VIEWS];
calc_vertex_rot_data(r, vertex_rots, rr->r->view_count);
// Device view information.
struct xrt_fov fovs[2];
struct xrt_pose world_poses[2];
struct xrt_pose eye_poses[2];
calc_pose_data( //
r, // r
fov_source, // fov_source
fovs, // fovs[2]
world_poses, // world_poses[2]
eye_poses); // eye_poses[2]
struct xrt_fov fovs[XRT_MAX_VIEWS];
struct xrt_pose world_poses[XRT_MAX_VIEWS];
struct xrt_pose eye_poses[XRT_MAX_VIEWS];
calc_pose_data( //
r, // r
fov_source, // fov_source
fovs, // fovs
world_poses, // world_poses
eye_poses, // eye_poses
rr->r->view_count); // view_count
// The arguments for the dispatch function.
@ -909,8 +893,7 @@ dispatch_graphics(struct comp_renderer *r,
rtr, // rtr
fast_path, // fast_path
do_timewarp); // do_timewarp
for (uint32_t i = 0; i < 2; i++) {
for (uint32_t i = 0; i < rr->r->view_count; i++) {
// Which image of the scratch images for this view are we using.
uint32_t scratch_index = crss->views[i].index;
@ -1003,23 +986,24 @@ dispatch_compute(struct comp_renderer *r,
bool do_timewarp = !c->debug.atw_off;
// Device view information.
struct xrt_fov fovs[2];
struct xrt_pose world_poses[2];
struct xrt_pose eye_poses[2];
calc_pose_data( //
r, // r
fov_source, // fov_source
fovs, // fovs[2]
world_poses, // world_poses[2]
eye_poses); // eye_poses[2]
struct xrt_fov fovs[XRT_MAX_VIEWS];
struct xrt_pose world_poses[XRT_MAX_VIEWS];
struct xrt_pose eye_poses[XRT_MAX_VIEWS];
calc_pose_data( //
r, // r
fov_source, // fov_source
fovs, // fovs
world_poses, // world_poses
eye_poses, // eye_poses
crc->r->view_count); // view_count
// Target Vulkan resources..
VkImage target_image = r->c->target->images[r->acquired_buffer].handle;
VkImageView target_image_view = r->c->target->images[r->acquired_buffer].view;
// Target view information.
struct render_viewport_data views[2];
calc_viewport_data(r, &views[0], &views[1]);
struct render_viewport_data views[XRT_MAX_VIEWS];
calc_viewport_data(r, views, crc->r->view_count);
// The arguments for the dispatch function.
struct comp_render_dispatch_data data;
@ -1030,7 +1014,7 @@ dispatch_compute(struct comp_renderer *r,
fast_path, // fast_path
do_timewarp); // do_timewarp
for (uint32_t i = 0; i < 2; i++) {
for (uint32_t i = 0; i < crc->r->view_count; i++) {
// Which image of the scratch images for this view are we using.
uint32_t scratch_index = crss->views[i].index;
@ -1139,7 +1123,7 @@ comp_renderer_draw(struct comp_renderer *r)
comp_target_update_timings(ct);
// Hardcoded for now.
uint32_t view_count = 2;
const uint32_t view_count = c->nr.view_count;
enum comp_target_fov_source fov_source = COMP_TARGET_FOV_SOURCE_DISTORTION;
// For sratch image debugging.

View file

@ -188,7 +188,7 @@ mock_create_native_compositor()
// mc->base.base.begin_frame = mock_compositor_begin_frame;
// mc->base.base.discard_frame = mock_compositor_discard_frame;
// mc->base.base.layer_begin = mock_compositor_layer_begin;
// mc->base.base.layer_stereo_projection = mock_compositor_layer_stereo_projection;
// mc->base.base.layer_projection = mock_compositor_layer_projection;
// mc->base.base.layer_stereo_projection_depth = mock_compositor_layer_stereo_projection_depth;
// mc->base.base.layer_quad = mock_compositor_layer_quad;
// mc->base.base.layer_cube = mock_compositor_layer_cube;

View file

@ -154,13 +154,13 @@ struct mock_compositor
enum xrt_blend_mode env_blend_mode);
/*!
* Optional function pointer for mock compositor, called during @ref xrt_comp_layer_stereo_projection
* Optional function pointer for mock compositor, called during @ref xrt_comp_layer_projection
*/
xrt_result_t (*layer_stereo_projection)(struct mock_compositor *mc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data);
xrt_result_t (*layer_projection)(struct mock_compositor *mc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data);
/*!
* Optional function pointer for mock compositor, called during @ref

View file

@ -645,19 +645,19 @@ multi_compositor_layer_begin(struct xrt_compositor *xc, const struct xrt_layer_f
}
static xrt_result_t
multi_compositor_layer_stereo_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data)
multi_compositor_layer_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
const struct xrt_layer_data *data)
{
struct multi_compositor *mc = multi_compositor(xc);
(void)mc;
size_t index = mc->progress.layer_count++;
mc->progress.layers[index].xdev = xdev;
xrt_swapchain_reference(&mc->progress.layers[index].xscs[0], l_xsc);
xrt_swapchain_reference(&mc->progress.layers[index].xscs[1], r_xsc);
for (uint32_t i = 0; i < data->proj.view_count; ++i) {
xrt_swapchain_reference(&mc->progress.layers[index].xscs[i], xsc[i]);
}
mc->progress.layers[index].data = *data;
return XRT_SUCCESS;
@ -969,7 +969,7 @@ multi_compositor_create(struct multi_system_compositor *msc,
mc->base.base.begin_frame = multi_compositor_begin_frame;
mc->base.base.discard_frame = multi_compositor_discard_frame;
mc->base.base.layer_begin = multi_compositor_layer_begin;
mc->base.base.layer_stereo_projection = multi_compositor_layer_stereo_projection;
mc->base.base.layer_projection = multi_compositor_layer_projection;
mc->base.base.layer_stereo_projection_depth = multi_compositor_layer_stereo_projection_depth;
mc->base.base.layer_quad = multi_compositor_layer_quad;
mc->base.base.layer_cube = multi_compositor_layer_cube;

View file

@ -65,7 +65,7 @@ struct multi_layer_entry
*
* How many are actually used depends on the value of @p data.type
*/
struct xrt_swapchain *xscs[4];
struct xrt_swapchain *xscs[2 * XRT_MAX_VIEWS];
/*!
* All basic (trivially-serializable) data associated with a layer,

View file

@ -52,12 +52,16 @@ static void
do_projection_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
{
struct xrt_device *xdev = layer->xdev;
struct xrt_swapchain *l_xcs = layer->xscs[0];
struct xrt_swapchain *r_xcs = layer->xscs[1];
if (l_xcs == NULL || r_xcs == NULL) {
U_LOG_E("Invalid swap chain for projection layer #%u!", i);
return;
// Cast away
struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
// Do not need to copy the reference, but should verify the pointers for consistency
for (uint32_t j = 0; j < data->proj.view_count; j++) {
if (layer->xscs[j] == NULL) {
U_LOG_E("Invalid swap chain for projection layer #%u!", i);
return;
}
}
if (xdev == NULL) {
@ -65,10 +69,7 @@ do_projection_layer(struct xrt_compositor *xc, struct multi_compositor *mc, stru
return;
}
// Cast away
struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
xrt_comp_layer_stereo_projection(xc, xdev, l_xcs, r_xcs, data);
xrt_comp_layer_projection(xc, xdev, layer->xscs, data);
}
static void
@ -282,7 +283,7 @@ transfer_layers_locked(struct multi_system_compositor *msc, uint64_t display_tim
struct multi_layer_entry *layer = &mc->delivered.layers[i];
switch (layer->data.type) {
case XRT_LAYER_STEREO_PROJECTION: do_projection_layer(xc, mc, layer, i); break;
case XRT_LAYER_PROJECTION: do_projection_layer(xc, mc, layer, i); break;
case XRT_LAYER_STEREO_PROJECTION_DEPTH: do_projection_layer_depth(xc, mc, layer, i); break;
case XRT_LAYER_QUAD: do_quad_layer(xc, mc, layer, i); break;
case XRT_LAYER_CUBE: do_cube_layer(xc, mc, layer, i); break;

View file

@ -270,19 +270,18 @@ compositor_init_sys_info(struct null_compositor *c, struct xrt_device *xdev)
(void)sys_info->client_vk_deviceUUID;
(void)sys_info->client_d3d_deviceLUID;
(void)sys_info->client_d3d_deviceLUID_valid;
uint32_t view_count = xdev->hmd->view_count;
// clang-format off
sys_info->views[0].recommended.width_pixels = RECOMMENDED_VIEW_WIDTH;
sys_info->views[0].recommended.height_pixels = RECOMMENDED_VIEW_HEIGHT;
sys_info->views[0].recommended.sample_count = 1;
sys_info->views[0].max.width_pixels = MAX_VIEW_WIDTH;
sys_info->views[0].max.height_pixels = MAX_VIEW_HEIGHT;
sys_info->views[0].max.sample_count = 1;
for (uint32_t i = 0; i < view_count; ++i) {
sys_info->views[i].recommended.width_pixels = RECOMMENDED_VIEW_WIDTH;
sys_info->views[i].recommended.height_pixels = RECOMMENDED_VIEW_HEIGHT;
sys_info->views[i].recommended.sample_count = 1;
sys_info->views[i].max.width_pixels = MAX_VIEW_WIDTH;
sys_info->views[i].max.height_pixels = MAX_VIEW_HEIGHT;
sys_info->views[i].max.sample_count = 1;
}
// clang-format on
// Assumes the two views (eyes) are similarly configured
sys_info->views[1] = sys_info->views[0];
// Copy the list directly.
assert(xdev->hmd->blend_mode_count <= XRT_MAX_DEVICE_BLEND_MODES);
assert(xdev->hmd->blend_mode_count != 0);

View file

@ -51,11 +51,18 @@ calc_dispatch_dims_1_view(const struct render_viewport_data views, uint32_t *out
* For dispatching compute to the view, calculate the number of groups.
*/
static void
calc_dispatch_dims_2_views(const struct render_viewport_data views[2], uint32_t *out_w, uint32_t *out_h)
calc_dispatch_dims_views(const struct render_viewport_data views[XRT_MAX_VIEWS],
uint32_t view_count,
uint32_t *out_w,
uint32_t *out_h)
{
#define IMAX(a, b) ((a) > (b) ? (a) : (b))
uint32_t w = IMAX(views[0].w, views[1].w);
uint32_t h = IMAX(views[0].h, views[1].h);
uint32_t w = 0;
uint32_t h = 0;
for (uint32_t i = 0; i < view_count; ++i) {
w = IMAX(w, views[i].w);
h = IMAX(h, views[i].h);
}
#undef IMAX
// Power of two divide and round up.
@ -76,8 +83,8 @@ calc_dispatch_dims_2_views(const struct render_viewport_data views[2], uint32_t
XRT_MAYBE_UNUSED static void
update_compute_layer_descriptor_set(struct vk_bundle *vk,
uint32_t src_binding,
VkSampler src_samplers[RENDER_MAX_IMAGES],
VkImageView src_image_views[RENDER_MAX_IMAGES],
VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE],
VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE],
uint32_t image_count,
uint32_t target_binding,
VkImageView target_image_view,
@ -86,9 +93,7 @@ update_compute_layer_descriptor_set(struct vk_bundle *vk,
VkDeviceSize ubo_size,
VkDescriptorSet descriptor_set)
{
assert(image_count <= RENDER_MAX_IMAGES);
VkDescriptorImageInfo src_image_info[RENDER_MAX_IMAGES];
VkDescriptorImageInfo src_image_info[RENDER_MAX_IMAGES_SIZE];
for (uint32_t i = 0; i < image_count; i++) {
src_image_info[i].sampler = src_samplers[i];
src_image_info[i].imageView = src_image_views[i];
@ -144,63 +149,32 @@ update_compute_layer_descriptor_set(struct vk_bundle *vk,
XRT_MAYBE_UNUSED static void
update_compute_shared_descriptor_set(struct vk_bundle *vk,
uint32_t src_binding,
VkSampler src_samplers[2],
VkImageView src_image_views[2],
VkSampler src_samplers[XRT_MAX_VIEWS],
VkImageView src_image_views[XRT_MAX_VIEWS],
uint32_t distortion_binding,
VkSampler distortion_samplers[6],
VkImageView distortion_image_views[6],
VkSampler distortion_samplers[3 * XRT_MAX_VIEWS],
VkImageView distortion_image_views[3 * XRT_MAX_VIEWS],
uint32_t target_binding,
VkImageView target_image_view,
uint32_t ubo_binding,
VkBuffer ubo_buffer,
VkDeviceSize ubo_size,
VkDescriptorSet descriptor_set)
VkDescriptorSet descriptor_set,
uint32_t view_count)
{
VkDescriptorImageInfo src_image_info[2] = {
{
.sampler = src_samplers[0],
.imageView = src_image_views[0],
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
},
{
.sampler = src_samplers[1],
.imageView = src_image_views[1],
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
},
};
VkDescriptorImageInfo src_image_info[XRT_MAX_VIEWS];
for (uint32_t i = 0; i < view_count; ++i) {
src_image_info[i].sampler = src_samplers[i];
src_image_info[i].imageView = src_image_views[i];
src_image_info[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
VkDescriptorImageInfo distortion_image_info[6] = {
{
.sampler = distortion_samplers[0],
.imageView = distortion_image_views[0],
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
},
{
.sampler = distortion_samplers[1],
.imageView = distortion_image_views[1],
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
},
{
.sampler = distortion_samplers[2],
.imageView = distortion_image_views[2],
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
},
{
.sampler = distortion_samplers[3],
.imageView = distortion_image_views[3],
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
},
{
.sampler = distortion_samplers[4],
.imageView = distortion_image_views[4],
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
},
{
.sampler = distortion_samplers[5],
.imageView = distortion_image_views[5],
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
},
};
VkDescriptorImageInfo distortion_image_info[3 * XRT_MAX_VIEWS];
for (uint32_t i = 0; i < 3 * view_count; ++i) {
distortion_image_info[i].sampler = distortion_samplers[i];
distortion_image_info[i].imageView = distortion_image_views[i];
distortion_image_info[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
VkDescriptorImageInfo target_image_info = {
.imageView = target_image_view,
@ -218,7 +192,7 @@ update_compute_shared_descriptor_set(struct vk_bundle *vk,
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = descriptor_set,
.dstBinding = src_binding,
.descriptorCount = ARRAY_SIZE(src_image_info),
.descriptorCount = view_count,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = src_image_info,
},
@ -226,7 +200,7 @@ update_compute_shared_descriptor_set(struct vk_bundle *vk,
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = descriptor_set,
.dstBinding = distortion_binding,
.descriptorCount = ARRAY_SIZE(distortion_image_info),
.descriptorCount = 3 * view_count,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = distortion_image_info,
},
@ -263,7 +237,8 @@ update_compute_descriptor_set_target(struct vk_bundle *vk,
uint32_t ubo_binding,
VkBuffer ubo_buffer,
VkDeviceSize ubo_size,
VkDescriptorSet descriptor_set)
VkDescriptorSet descriptor_set,
uint32_t view_count)
{
VkDescriptorImageInfo target_image_info = {
.imageView = target_image_view,
@ -320,7 +295,7 @@ render_compute_init(struct render_compute *crc, struct render_resources *r)
struct vk_bundle *vk = r->vk;
crc->r = r;
for (uint32_t i = 0; i < ARRAY_SIZE(crc->layer_descriptor_sets); i++) {
for (uint32_t i = 0; i < RENDER_MAX_LAYER_RUNS_COUNT; i++) {
ret = vk_create_descriptor_set( //
vk, // vk_bundle
r->compute.descriptor_pool, // descriptor_pool
@ -417,8 +392,8 @@ void
render_compute_layers(struct render_compute *crc,
VkDescriptorSet descriptor_set,
VkBuffer ubo,
VkSampler src_samplers[RENDER_MAX_IMAGES],
VkImageView src_image_views[RENDER_MAX_IMAGES],
VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE],
VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE],
uint32_t num_srcs,
VkImageView target_image_view,
const struct render_viewport_data *view,
@ -477,15 +452,15 @@ render_compute_layers(struct render_compute *crc,
void
render_compute_projection_timewarp(struct render_compute *crc,
VkSampler src_samplers[2],
VkImageView src_image_views[2],
const struct xrt_normalized_rect src_norm_rects[2],
const struct xrt_pose src_poses[2],
const struct xrt_fov src_fovs[2],
const struct xrt_pose new_poses[2],
VkSampler src_samplers[XRT_MAX_VIEWS],
VkImageView src_image_views[XRT_MAX_VIEWS],
const struct xrt_normalized_rect src_norm_rects[XRT_MAX_VIEWS],
const struct xrt_pose src_poses[XRT_MAX_VIEWS],
const struct xrt_fov src_fovs[XRT_MAX_VIEWS],
const struct xrt_pose new_poses[XRT_MAX_VIEWS],
VkImage target_image,
VkImageView target_image_view,
const struct render_viewport_data views[2])
const struct render_viewport_data views[XRT_MAX_VIEWS])
{
assert(crc->r != NULL);
@ -497,29 +472,23 @@ render_compute_projection_timewarp(struct render_compute *crc,
* UBO
*/
struct xrt_matrix_4x4 time_warp_matrix[2];
render_calc_time_warp_matrix( //
&src_poses[0], //
&src_fovs[0], //
&new_poses[0], //
&time_warp_matrix[0]); //
render_calc_time_warp_matrix( //
&src_poses[1], //
&src_fovs[1], //
&new_poses[1], //
&time_warp_matrix[1]); //
struct xrt_matrix_4x4 time_warp_matrix[XRT_MAX_VIEWS];
for (uint32_t i = 0; i < crc->r->view_count; ++i) {
render_calc_time_warp_matrix( //
&src_poses[i], //
&src_fovs[i], //
&new_poses[i], //
&time_warp_matrix[i]); //
}
struct render_compute_distortion_ubo_data *data =
(struct render_compute_distortion_ubo_data *)r->compute.distortion.ubo.mapped;
data->views[0] = views[0];
data->views[1] = views[1];
data->pre_transforms[0] = r->distortion.uv_to_tanangle[0];
data->pre_transforms[1] = r->distortion.uv_to_tanangle[1];
data->transforms[0] = time_warp_matrix[0];
data->transforms[1] = time_warp_matrix[1];
data->post_transforms[0] = src_norm_rects[0];
data->post_transforms[1] = src_norm_rects[1];
for (uint32_t i = 0; i < crc->r->view_count; ++i) {
data->views[i] = views[i];
data->pre_transforms[i] = r->distortion.uv_to_tanangle[i];
data->transforms[i] = time_warp_matrix[i];
data->post_transforms[i] = src_norm_rects[i];
}
/*
* Source, target and distortion images.
@ -544,9 +513,12 @@ render_compute_projection_timewarp(struct render_compute *crc,
subresource_range); //
VkSampler sampler = r->samplers.clamp_to_edge;
VkSampler distortion_samplers[6] = {
sampler, sampler, sampler, sampler, sampler, sampler,
};
VkSampler distortion_samplers[3 * XRT_MAX_VIEWS];
for (uint32_t i = 0; i < crc->r->view_count; ++i) {
distortion_samplers[3 * i + 0] = sampler;
distortion_samplers[3 * i + 1] = sampler;
distortion_samplers[3 * i + 2] = sampler;
}
update_compute_shared_descriptor_set( //
vk, //
@ -561,7 +533,8 @@ render_compute_projection_timewarp(struct render_compute *crc,
r->compute.ubo_binding, //
r->compute.distortion.ubo.buffer, //
VK_WHOLE_SIZE, //
crc->shared_descriptor_set); //
crc->shared_descriptor_set, //
crc->r->view_count); //
vk->vkCmdBindPipeline( //
r->cmd, // commandBuffer
@ -580,7 +553,7 @@ render_compute_projection_timewarp(struct render_compute *crc,
uint32_t w = 0, h = 0;
calc_dispatch_dims_2_views(views, &w, &h);
calc_dispatch_dims_views(views, crc->r->view_count, &w, &h);
assert(w != 0 && h != 0);
vk->vkCmdDispatch( //
@ -616,12 +589,12 @@ render_compute_projection_timewarp(struct render_compute *crc,
void
render_compute_projection(struct render_compute *crc,
VkSampler src_samplers[2],
VkImageView src_image_views[2],
const struct xrt_normalized_rect src_norm_rects[2],
VkSampler src_samplers[XRT_MAX_VIEWS],
VkImageView src_image_views[XRT_MAX_VIEWS],
const struct xrt_normalized_rect src_norm_rects[XRT_MAX_VIEWS],
VkImage target_image,
VkImageView target_image_view,
const struct render_viewport_data views[2])
const struct render_viewport_data views[XRT_MAX_VIEWS])
{
assert(crc->r != NULL);
@ -635,10 +608,10 @@ render_compute_projection(struct render_compute *crc,
struct render_compute_distortion_ubo_data *data =
(struct render_compute_distortion_ubo_data *)r->compute.distortion.ubo.mapped;
data->views[0] = views[0];
data->views[1] = views[1];
data->post_transforms[0] = src_norm_rects[0];
data->post_transforms[1] = src_norm_rects[1];
for (uint32_t i = 0; i < crc->r->view_count; ++i) {
data->views[i] = views[i];
data->post_transforms[i] = src_norm_rects[i];
}
/*
@ -664,9 +637,12 @@ render_compute_projection(struct render_compute *crc,
subresource_range); //
VkSampler sampler = r->samplers.clamp_to_edge;
VkSampler distortion_samplers[6] = {
sampler, sampler, sampler, sampler, sampler, sampler,
};
VkSampler distortion_samplers[3 * XRT_MAX_VIEWS];
for (uint32_t i = 0; i < crc->r->view_count; ++i) {
distortion_samplers[3 * i + 0] = sampler;
distortion_samplers[3 * i + 1] = sampler;
distortion_samplers[3 * i + 2] = sampler;
}
update_compute_shared_descriptor_set( //
vk, //
@ -681,7 +657,8 @@ render_compute_projection(struct render_compute *crc,
r->compute.ubo_binding, //
r->compute.distortion.ubo.buffer, //
VK_WHOLE_SIZE, //
crc->shared_descriptor_set); //
crc->shared_descriptor_set, //
crc->r->view_count); //
vk->vkCmdBindPipeline( //
r->cmd, // commandBuffer
@ -700,7 +677,7 @@ render_compute_projection(struct render_compute *crc,
uint32_t w = 0, h = 0;
calc_dispatch_dims_2_views(views, &w, &h);
calc_dispatch_dims_views(views, crc->r->view_count, &w, &h);
assert(w != 0 && h != 0);
vk->vkCmdDispatch( //
@ -735,10 +712,10 @@ render_compute_projection(struct render_compute *crc,
}
void
render_compute_clear(struct render_compute *crc, //
VkImage target_image, //
VkImageView target_image_view, //
const struct render_viewport_data views[2]) //
render_compute_clear(struct render_compute *crc, //
VkImage target_image, //
VkImageView target_image_view, //
const struct render_viewport_data views[XRT_MAX_VIEWS]) //
{
assert(crc->r != NULL);
@ -751,16 +728,16 @@ render_compute_clear(struct render_compute *crc, //
*/
// Calculate transforms.
struct xrt_matrix_4x4 transforms[2];
for (uint32_t i = 0; i < 2; i++) {
struct xrt_matrix_4x4 transforms[XRT_MAX_VIEWS];
for (uint32_t i = 0; i < crc->r->view_count; i++) {
math_matrix_4x4_identity(&transforms[i]);
}
struct render_compute_distortion_ubo_data *data =
(struct render_compute_distortion_ubo_data *)r->compute.clear.ubo.mapped;
data->views[0] = views[0];
data->views[1] = views[1];
for (uint32_t i = 0; i < crc->r->view_count; ++i) {
data->views[i] = views[i];
}
/*
* Source, target and distortion images.
@ -785,9 +762,16 @@ render_compute_clear(struct render_compute *crc, //
subresource_range); //
VkSampler sampler = r->samplers.mock;
VkSampler src_samplers[2] = {sampler, sampler};
VkImageView src_image_views[2] = {r->mock.color.image_view, r->mock.color.image_view};
VkSampler distortion_samplers[6] = {sampler, sampler, sampler, sampler, sampler, sampler};
VkSampler src_samplers[XRT_MAX_VIEWS];
VkImageView src_image_views[XRT_MAX_VIEWS];
VkSampler distortion_samplers[3 * XRT_MAX_VIEWS];
for (uint32_t i = 0; i < crc->r->view_count; ++i) {
src_samplers[i] = sampler;
src_image_views[i] = r->mock.color.image_view;
distortion_samplers[3 * i + 0] = sampler;
distortion_samplers[3 * i + 1] = sampler;
distortion_samplers[3 * i + 2] = sampler;
}
update_compute_shared_descriptor_set( //
vk, // vk_bundle
@ -802,7 +786,8 @@ render_compute_clear(struct render_compute *crc, //
r->compute.ubo_binding, // ubo_binding
r->compute.clear.ubo.buffer, // ubo_buffer
VK_WHOLE_SIZE, // ubo_size
crc->shared_descriptor_set); // descriptor_set
crc->shared_descriptor_set, // descriptor_set
crc->r->view_count); // view_count
vk->vkCmdBindPipeline( //
r->cmd, // commandBuffer
@ -821,7 +806,7 @@ render_compute_clear(struct render_compute *crc, //
uint32_t w = 0, h = 0;
calc_dispatch_dims_2_views(views, &w, &h);
calc_dispatch_dims_views(views, crc->r->view_count, &w, &h);
assert(w != 0 && h != 0);
vk->vkCmdDispatch( //

View file

@ -285,10 +285,10 @@ render_distortion_buffer_init(struct render_resources *r,
struct xrt_device *xdev,
bool pre_rotate)
{
struct render_buffer bufs[RENDER_DISTORTION_NUM_IMAGES];
VkDeviceMemory device_memories[RENDER_DISTORTION_NUM_IMAGES];
VkImage images[RENDER_DISTORTION_NUM_IMAGES];
VkImageView image_views[RENDER_DISTORTION_NUM_IMAGES];
struct render_buffer bufs[RENDER_DISTORTION_IMAGES_SIZE];
VkDeviceMemory device_memories[RENDER_DISTORTION_IMAGES_SIZE];
VkImage images[RENDER_DISTORTION_IMAGES_SIZE];
VkImageView image_views[RENDER_DISTORTION_IMAGES_SIZE];
VkCommandBuffer upload_buffer = VK_NULL_HANDLE;
VkResult ret;
@ -297,22 +297,20 @@ render_distortion_buffer_init(struct render_resources *r,
* Basics
*/
static_assert(RENDER_DISTORTION_NUM_IMAGES == 6, "Wrong number of distortion images!");
render_calc_uv_to_tangent_lengths_rect(&xdev->hmd->distortion.fov[0], &r->distortion.uv_to_tanangle[0]);
render_calc_uv_to_tangent_lengths_rect(&xdev->hmd->distortion.fov[1], &r->distortion.uv_to_tanangle[1]);
for (uint32_t i = 0; i < r->view_count; ++i) {
render_calc_uv_to_tangent_lengths_rect(&xdev->hmd->distortion.fov[i], &r->distortion.uv_to_tanangle[i]);
}
/*
* Buffers with data to upload.
* view_count=2,RRGGBB
* view_count=3,RRRGGGBBB
*/
ret = create_and_fill_in_distortion_buffer_for_view(vk, xdev, &bufs[0], &bufs[2], &bufs[4], 0, pre_rotate);
VK_CHK_WITH_GOTO(ret, "create_and_fill_in_distortion_buffer_for_view", err_resources);
ret = create_and_fill_in_distortion_buffer_for_view(vk, xdev, &bufs[1], &bufs[3], &bufs[5], 1, pre_rotate);
VK_CHK_WITH_GOTO(ret, "create_and_fill_in_distortion_buffer_for_view", err_resources);
for (uint32_t i = 0; i < r->view_count; ++i) {
ret = create_and_fill_in_distortion_buffer_for_view(vk, xdev, &bufs[i], &bufs[r->view_count + i],
&bufs[2 * r->view_count + i], i, pre_rotate);
VK_CHK_WITH_GOTO(ret, "create_and_fill_in_distortion_buffer_for_view", err_resources);
}
/*
* Command submission.
@ -326,7 +324,7 @@ render_distortion_buffer_init(struct render_resources *r,
VK_CHK_WITH_GOTO(ret, "vk_cmd_pool_create_and_begin_cmd_buffer_locked", err_unlock);
VK_NAME_COMMAND_BUFFER(vk, upload_buffer, "render_resources distortion command buffer");
for (uint32_t i = 0; i < RENDER_DISTORTION_NUM_IMAGES; i++) {
for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) {
ret = create_and_queue_upload_locked( //
vk, // vk_bundle
pool, // pool
@ -349,7 +347,7 @@ render_distortion_buffer_init(struct render_resources *r,
r->distortion.pre_rotated = pre_rotate;
for (uint32_t i = 0; i < RENDER_DISTORTION_NUM_IMAGES; i++) {
for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) {
r->distortion.device_memories[i] = device_memories[i];
r->distortion.images[i] = images[i];
r->distortion.image_views[i] = image_views[i];
@ -360,7 +358,7 @@ render_distortion_buffer_init(struct render_resources *r,
* Tidy
*/
for (uint32_t i = 0; i < RENDER_DISTORTION_NUM_IMAGES; i++) {
for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) {
render_buffer_close(vk, &bufs[i]);
}
@ -374,7 +372,7 @@ err_unlock:
vk_cmd_pool_unlock(pool);
err_resources:
for (uint32_t i = 0; i < RENDER_DISTORTION_NUM_IMAGES; i++) {
for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) {
D(ImageView, image_views[i]);
D(Image, images[i]);
DF(Memory, device_memories[i]);
@ -396,12 +394,7 @@ render_distortion_images_close(struct render_resources *r)
{
struct vk_bundle *vk = r->vk;
static_assert(RENDER_DISTORTION_NUM_IMAGES == ARRAY_SIZE(r->distortion.image_views), "Array size is wrong!");
static_assert(RENDER_DISTORTION_NUM_IMAGES == ARRAY_SIZE(r->distortion.images), "Array size is wrong!");
static_assert(RENDER_DISTORTION_NUM_IMAGES == ARRAY_SIZE(r->distortion.device_memories),
"Array size is wrong!");
for (uint32_t i = 0; i < RENDER_DISTORTION_NUM_IMAGES; i++) {
for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) {
D(ImageView, r->distortion.image_views[i]);
D(Image, r->distortion.images[i]);
DF(Memory, r->distortion.device_memories[i]);

View file

@ -61,7 +61,8 @@ extern "C" {
* Max number of images that can be given at a single time to the layer
* squasher in a single dispatch.
*/
#define RENDER_MAX_IMAGES (RENDER_MAX_LAYERS * 2)
#define RENDER_MAX_IMAGES_SIZE (RENDER_MAX_LAYERS * XRT_MAX_VIEWS)
#define RENDER_MAX_IMAGES_COUNT (RENDER_MAX_LAYERS * r->view_count)
/*!
* Maximum number of times that the layer squasher shader can run per
@ -70,13 +71,15 @@ extern "C" {
* two or more different compositions it's not the maximum number of views per
* composition (which is this number divided by number of composition).
*/
#define RENDER_MAX_LAYER_RUNS (2)
#define RENDER_MAX_LAYER_RUNS_SIZE (XRT_MAX_VIEWS)
#define RENDER_MAX_LAYER_RUNS_COUNT (r->view_count)
//! How large in pixels the distortion image is.
#define RENDER_DISTORTION_IMAGE_DIMENSIONS (128)
//! How many distortion images we have, one for each channel (3 rgb) and per view, total 6.
#define RENDER_DISTORTION_NUM_IMAGES (6)
//! How many distortion images we have, one for each channel (3 rgb) and per view.
#define RENDER_DISTORTION_IMAGES_SIZE (3 * XRT_MAX_VIEWS)
#define RENDER_DISTORTION_IMAGES_COUNT (3 * r->view_count)
//! Which binding does the layer projection and quad shader has it's UBO on.
#define RENDER_BINDING_LAYER_SHARED_UBO 0
@ -347,6 +350,9 @@ render_sub_alloc_ubo_alloc_and_write(struct vk_bundle *vk,
*/
struct render_resources
{
//! The count of views that we are rendering to.
uint32_t view_count;
//! Vulkan resources.
struct vk_bundle *vk;
@ -440,13 +446,13 @@ struct render_resources
struct render_buffer ibo;
uint32_t vertex_count;
uint32_t index_counts[2];
uint32_t index_counts[XRT_MAX_VIEWS];
uint32_t stride;
uint32_t index_offsets[2];
uint32_t index_offsets[XRT_MAX_VIEWS];
uint32_t index_count_total;
//! Info ubos, only supports two views currently.
struct render_buffer ubos[2];
struct render_buffer ubos[XRT_MAX_VIEWS];
} mesh;
/*!
@ -498,7 +504,7 @@ struct render_resources
uint32_t image_array_size;
//! Target info.
struct render_buffer ubos[RENDER_MAX_LAYER_RUNS];
struct render_buffer ubos[RENDER_MAX_LAYER_RUNS_SIZE];
} layer;
struct
@ -534,16 +540,16 @@ struct render_resources
struct
{
//! Transform to go from UV to tangle angles.
struct xrt_normalized_rect uv_to_tanangle[2];
struct xrt_normalized_rect uv_to_tanangle[XRT_MAX_VIEWS];
//! Backing memory to distortion images.
VkDeviceMemory device_memories[RENDER_DISTORTION_NUM_IMAGES];
VkDeviceMemory device_memories[RENDER_DISTORTION_IMAGES_SIZE];
//! Distortion images.
VkImage images[RENDER_DISTORTION_NUM_IMAGES];
VkImage images[RENDER_DISTORTION_IMAGES_SIZE];
//! The views into the distortion images.
VkImageView image_views[RENDER_DISTORTION_NUM_IMAGES];
VkImageView image_views[RENDER_DISTORTION_IMAGES_SIZE];
//! Whether distortion images have been pre-rotated 90 degrees.
bool pre_rotated;
@ -642,7 +648,7 @@ struct render_scratch_images
{
VkExtent2D extent;
struct render_scratch_color_image color[2];
struct render_scratch_color_image color[XRT_MAX_VIEWS];
};
/*!
@ -1096,7 +1102,7 @@ struct render_compute
struct render_resources *r;
//! Layer descriptor set.
VkDescriptorSet layer_descriptor_sets[RENDER_MAX_LAYER_RUNS];
VkDescriptorSet layer_descriptor_sets[RENDER_MAX_LAYER_RUNS_SIZE];
/*!
* Shared descriptor set, used for the clear and distortion shaders. It
@ -1138,15 +1144,15 @@ struct render_compute_layer_ubo_data
{
uint32_t val;
uint32_t unpremultiplied;
uint32_t padding[2];
uint32_t padding[XRT_MAX_VIEWS];
} layer_type[RENDER_MAX_LAYERS];
//! Which image/sampler(s) correspond to each layer.
struct
{
uint32_t images[2];
uint32_t images[XRT_MAX_VIEWS];
//! @todo Implement separated samplers and images (and change to samplers[2])
uint32_t padding[2];
uint32_t padding[XRT_MAX_VIEWS];
} images_samplers[RENDER_MAX_LAYERS];
//! Shared between cylinder and equirect2.
@ -1206,7 +1212,7 @@ struct render_compute_layer_ubo_data
struct
{
struct xrt_vec2 val;
float padding[2];
float padding[XRT_MAX_VIEWS];
} quad_extent[RENDER_MAX_LAYERS];
};
@ -1217,10 +1223,10 @@ struct render_compute_layer_ubo_data
*/
struct render_compute_distortion_ubo_data
{
struct render_viewport_data views[2];
struct xrt_normalized_rect pre_transforms[2];
struct xrt_normalized_rect post_transforms[2];
struct xrt_matrix_4x4 transforms[2];
struct render_viewport_data views[XRT_MAX_VIEWS];
struct xrt_normalized_rect pre_transforms[XRT_MAX_VIEWS];
struct xrt_normalized_rect post_transforms[XRT_MAX_VIEWS];
struct xrt_matrix_4x4 transforms[XRT_MAX_VIEWS];
};
/*!
@ -1270,51 +1276,51 @@ render_compute_end(struct render_compute *crc);
* @public @memberof render_compute
*/
void
render_compute_layers(struct render_compute *crc, //
VkDescriptorSet descriptor_set, //
VkBuffer ubo, //
VkSampler src_samplers[RENDER_MAX_IMAGES], //
VkImageView src_image_views[RENDER_MAX_IMAGES], //
uint32_t num_srcs, //
VkImageView target_image_view, //
const struct render_viewport_data *view, //
bool timewarp); //
render_compute_layers(struct render_compute *crc, //
VkDescriptorSet descriptor_set, //
VkBuffer ubo, //
VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE], //
VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE], //
uint32_t num_srcs, //
VkImageView target_image_view, //
const struct render_viewport_data *view, //
bool timewarp); //
/*!
* @public @memberof render_compute
*/
void
render_compute_projection_timewarp(struct render_compute *crc,
VkSampler src_samplers[2],
VkImageView src_image_views[2],
const struct xrt_normalized_rect src_rects[2],
const struct xrt_pose src_poses[2],
const struct xrt_fov src_fovs[2],
const struct xrt_pose new_poses[2],
VkSampler src_samplers[XRT_MAX_VIEWS],
VkImageView src_image_views[XRT_MAX_VIEWS],
const struct xrt_normalized_rect src_rects[XRT_MAX_VIEWS],
const struct xrt_pose src_poses[XRT_MAX_VIEWS],
const struct xrt_fov src_fovs[XRT_MAX_VIEWS],
const struct xrt_pose new_poses[XRT_MAX_VIEWS],
VkImage target_image,
VkImageView target_image_view,
const struct render_viewport_data views[2]);
const struct render_viewport_data views[XRT_MAX_VIEWS]);
/*!
* @public @memberof render_compute
*/
void
render_compute_projection(struct render_compute *crc, //
VkSampler src_samplers[2], //
VkImageView src_image_views[2], //
const struct xrt_normalized_rect src_rects[2], //
VkImage target_image, //
VkImageView target_image_view, //
const struct render_viewport_data views[2]); //
render_compute_projection(struct render_compute *crc, //
VkSampler src_samplers[XRT_MAX_VIEWS], //
VkImageView src_image_views[XRT_MAX_VIEWS], //
const struct xrt_normalized_rect src_rects[XRT_MAX_VIEWS], //
VkImage target_image, //
VkImageView target_image_view, //
const struct render_viewport_data views[XRT_MAX_VIEWS]); //
/*!
* @public @memberof render_compute
*/
void
render_compute_clear(struct render_compute *crc, //
VkImage target_image, //
VkImageView target_image_view, //
const struct render_viewport_data views[2]); //
render_compute_clear(struct render_compute *crc, //
VkImage target_image, //
VkImageView target_image_view, //
const struct render_viewport_data views[XRT_MAX_VIEWS]); //

View file

@ -19,6 +19,9 @@
#include "render/render_interface.h"
#include <stdio.h>
/*
*
* Gfx shared
@ -143,7 +146,7 @@ init_mesh_vertex_buffers(struct vk_bundle *vk,
}
XRT_CHECK_RESULT static bool
init_mesh_ubo_buffers(struct vk_bundle *vk, struct render_buffer *l_ubo, struct render_buffer *r_ubo)
init_mesh_ubo_buffers(struct vk_bundle *vk, struct render_buffer ubo[XRT_MAX_VIEWS], uint32_t view_count)
{
VkResult ret;
@ -154,29 +157,20 @@ init_mesh_ubo_buffers(struct vk_bundle *vk, struct render_buffer *l_ubo, struct
// Distortion ubo size.
VkDeviceSize ubo_size = sizeof(struct render_gfx_mesh_ubo_data);
for (uint32_t i = 0; i < view_count; ++i) {
ret = render_buffer_init(vk, //
&ubo[i], //
ubo_usage_flags, //
memory_property_flags, //
ubo_size); // size
VK_CHK_WITH_RET(ret, "render_buffer_init", false);
char name[20];
snprintf(name, sizeof(name), "mesh ubo %d", i);
VK_NAME_BUFFER(vk, ubo[i].buffer, name);
ret = render_buffer_init(vk, //
l_ubo, //
ubo_usage_flags, //
memory_property_flags, //
ubo_size); // size
VK_CHK_WITH_RET(ret, "render_buffer_init", false);
VK_NAME_BUFFER(vk, l_ubo->buffer, "mesh l_ubo");
ret = render_buffer_map(vk, l_ubo);
VK_CHK_WITH_RET(ret, "render_buffer_map", false);
ret = render_buffer_init(vk, //
r_ubo, //
ubo_usage_flags, //
memory_property_flags, //
ubo_size); // size
VK_CHK_WITH_RET(ret, "render_buffer_init", false);
VK_NAME_BUFFER(vk, r_ubo->buffer, "mesh r_ubo");
ret = render_buffer_map(vk, r_ubo);
VK_CHK_WITH_RET(ret, "render_buffer_map", false);
ret = render_buffer_map(vk, &ubo[i]);
VK_CHK_WITH_RET(ret, "render_buffer_map", false);
}
return true;
}
@ -532,25 +526,25 @@ render_resources_init(struct render_resources *r,
* Constants
*/
r->view_count = xdev->hmd->view_count;
r->mesh.src_binding = 0;
r->mesh.ubo_binding = 1;
struct xrt_hmd_parts *parts = xdev->hmd;
r->mesh.vertex_count = parts->distortion.mesh.vertex_count;
r->mesh.stride = parts->distortion.mesh.stride;
r->mesh.index_counts[0] = parts->distortion.mesh.index_counts[0];
r->mesh.index_counts[1] = parts->distortion.mesh.index_counts[1];
r->mesh.index_count_total = parts->distortion.mesh.index_count_total;
r->mesh.index_offsets[0] = parts->distortion.mesh.index_offsets[0];
r->mesh.index_offsets[1] = parts->distortion.mesh.index_offsets[1];
for (uint32_t i = 0; i < r->view_count; ++i) {
r->mesh.index_counts[i] = parts->distortion.mesh.index_counts[i];
r->mesh.index_offsets[i] = parts->distortion.mesh.index_offsets[i];
}
r->compute.src_binding = 0;
r->compute.distortion_binding = 1;
r->compute.target_binding = 2;
r->compute.ubo_binding = 3;
r->compute.layer.image_array_size = vk->features.max_per_stage_descriptor_sampled_images;
if (r->compute.layer.image_array_size > RENDER_MAX_IMAGES) {
r->compute.layer.image_array_size = RENDER_MAX_IMAGES;
if (r->compute.layer.image_array_size > RENDER_MAX_IMAGES_COUNT) {
r->compute.layer.image_array_size = RENDER_MAX_IMAGES_COUNT;
}
@ -703,10 +697,10 @@ render_resources_init(struct render_resources *r,
{
// Number of layer shader runs (views) times number of layers.
const uint32_t layer_shader_count = RENDER_MAX_LAYER_RUNS * RENDER_MAX_LAYERS;
const uint32_t layer_shader_count = RENDER_MAX_LAYER_RUNS_COUNT * RENDER_MAX_LAYERS;
// Two mesh distortion runs.
const uint32_t mesh_shader_count = 2;
const uint32_t mesh_shader_count = RENDER_MAX_LAYER_RUNS_COUNT;
struct vk_descriptor_pool_info mesh_pool_info = {
.uniform_per_descriptor_count = 1,
@ -737,7 +731,7 @@ render_resources_init(struct render_resources *r,
buffer_count += layer_shader_count;
// One UBO per mesh shader.
buffer_count += 2;
buffer_count += RENDER_MAX_LAYER_RUNS_COUNT;
// We currently use the aligmnent as max UBO size.
static_assert(sizeof(struct render_gfx_mesh_ubo_data) <= RENDER_ALWAYS_SAFE_UBO_ALIGNMENT, "MAX");
@ -819,10 +813,9 @@ render_resources_init(struct render_resources *r,
return false;
}
bret = init_mesh_ubo_buffers( //
vk, //
&r->mesh.ubos[0], //
&r->mesh.ubos[1]); //
bret = init_mesh_ubo_buffers( //
vk, //
r->mesh.ubos, r->view_count); //
if (!bret) {
return false;
}
@ -838,12 +831,12 @@ render_resources_init(struct render_resources *r,
const uint32_t compute_descriptor_count = //
1 + // Shared/distortion run(s).
RENDER_MAX_LAYER_RUNS; // Layer shader run(s).
RENDER_MAX_LAYER_RUNS_COUNT; // Layer shader run(s).
struct vk_descriptor_pool_info compute_pool_info = {
.uniform_per_descriptor_count = 1,
// layer images
.sampler_per_descriptor_count = r->compute.layer.image_array_size + 6,
.sampler_per_descriptor_count = r->compute.layer.image_array_size + RENDER_DISTORTION_IMAGES_COUNT,
.storage_image_per_descriptor_count = 1,
.storage_buffer_per_descriptor_count = 0,
.descriptor_count = compute_descriptor_count,
@ -921,7 +914,7 @@ render_resources_init(struct render_resources *r,
size_t layer_ubo_size = sizeof(struct render_compute_layer_ubo_data);
for (uint32_t i = 0; i < ARRAY_SIZE(r->compute.layer.ubos); i++) {
for (uint32_t i = 0; i < r->view_count; i++) {
ret = render_buffer_init( //
vk, // vk_bundle
&r->compute.layer.ubos[i], // buffer
@ -1048,13 +1041,13 @@ render_resources_init(struct render_resources *r,
* Compute distortion textures, not created until later.
*/
for (uint32_t i = 0; i < ARRAY_SIZE(r->distortion.image_views); i++) {
for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) {
r->distortion.image_views[i] = VK_NULL_HANDLE;
}
for (uint32_t i = 0; i < ARRAY_SIZE(r->distortion.images); i++) {
for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) {
r->distortion.images[i] = VK_NULL_HANDLE;
}
for (uint32_t i = 0; i < ARRAY_SIZE(r->distortion.device_memories); i++) {
for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) {
r->distortion.device_memories[i] = VK_NULL_HANDLE;
}
@ -1120,8 +1113,9 @@ render_resources_close(struct render_resources *r)
D(QueryPool, r->query_pool);
render_buffer_close(vk, &r->mesh.vbo);
render_buffer_close(vk, &r->mesh.ibo);
render_buffer_close(vk, &r->mesh.ubos[0]);
render_buffer_close(vk, &r->mesh.ubos[1]);
for (uint32_t i = 0; i < r->view_count; ++i) {
render_buffer_close(vk, &r->mesh.ubos[i]);
}
D(DescriptorPool, r->compute.descriptor_pool);
@ -1139,7 +1133,7 @@ render_resources_close(struct render_resources *r)
render_distortion_images_close(r);
render_buffer_close(vk, &r->compute.clear.ubo);
for (uint32_t i = 0; i < ARRAY_SIZE(r->compute.layer.ubos); i++) {
for (uint32_t i = 0; i < r->view_count; i++) {
render_buffer_close(vk, &r->compute.layer.ubos[i]);
}
render_buffer_close(vk, &r->compute.distortion.ubo);
@ -1263,7 +1257,7 @@ render_scratch_images_ensure(struct render_resources *r, struct render_scratch_i
render_scratch_images_close(r, rsi);
for (uint32_t i = 0; i < ARRAY_SIZE(rsi->color); i++) {
for (uint32_t i = 0; i < r->view_count; i++) {
bret = create_scratch_image_and_view( //
r->vk, //
extent, //
@ -1288,7 +1282,7 @@ render_scratch_images_close(struct render_resources *r, struct render_scratch_im
{
struct vk_bundle *vk = r->vk;
for (uint32_t i = 0; i < ARRAY_SIZE(rsi->color); i++) {
for (uint32_t i = 0; i < r->view_count; i++) {
teardown_scratch_color_image(vk, &rsi->color[i]);
}

View file

@ -9,7 +9,7 @@
#include "srgb.inc.glsl"
//! @todo should this be a spcialization const?
#define XRT_LAYER_STEREO_PROJECTION 0
#define XRT_LAYER_PROJECTION 0
#define XRT_LAYER_STEREO_PROJECTION_DEPTH 1
#define XRT_LAYER_QUAD 2
#define XRT_LAYER_CUBE 3
@ -464,7 +464,7 @@ vec4 do_layers(vec2 view_uv)
case XRT_LAYER_EQUIRECT2:
rgba = do_equirect2(view_uv, layer);
break;
case XRT_LAYER_STEREO_PROJECTION:
case XRT_LAYER_PROJECTION:
case XRT_LAYER_STEREO_PROJECTION_DEPTH:
rgba = do_projection(view_uv, layer);
break;

View file

@ -115,19 +115,20 @@ base_layer_begin(struct xrt_compositor *xc, const struct xrt_layer_frame_data *d
}
static xrt_result_t
base_layer_stereo_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data)
base_layer_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
const struct xrt_layer_data *data)
{
struct comp_base *cb = comp_base(xc);
uint32_t layer_id = cb->slot.layer_count;
struct comp_layer *layer = &cb->slot.layers[layer_id];
layer->sc_array[0] = comp_swapchain(l_xsc);
layer->sc_array[1] = comp_swapchain(r_xsc);
assert(ARRAY_SIZE(layer->sc_array) >= data->proj.view_count);
for (uint32_t i = 0; i < data->proj.view_count; ++i) {
layer->sc_array[i] = comp_swapchain(xsc[i]);
}
layer->data = *data;
cb->slot.layer_count++;
@ -256,7 +257,7 @@ comp_base_init(struct comp_base *cb)
cb->base.base.create_semaphore = base_create_semaphore;
cb->base.base.import_fence = base_import_fence;
cb->base.base.layer_begin = base_layer_begin;
cb->base.base.layer_stereo_projection = base_layer_stereo_projection;
cb->base.base.layer_projection = base_layer_projection;
cb->base.base.layer_stereo_projection_depth = base_layer_stereo_projection_depth;
cb->base.base.layer_quad = base_layer_quad;
cb->base.base.layer_cube = base_layer_cube;

View file

@ -33,7 +33,7 @@ struct comp_layer
*
* Unused elements should be set to null.
*/
struct comp_swapchain *sc_array[4];
struct comp_swapchain *sc_array[XRT_MAX_VIEWS * 2];
/*!
* All basic (trivially-serializable) data associated with a layer.
@ -62,9 +62,9 @@ struct comp_layer_slot
bool one_projection_layer_fast_path;
//! fov as reported by device for the current submit.
struct xrt_fov fovs[2];
struct xrt_fov fovs[XRT_MAX_VIEWS];
//! absolute pose as reported by device for the current submit.
struct xrt_pose poses[2];
struct xrt_pose poses[XRT_MAX_VIEWS];
};
/*!

View file

@ -102,7 +102,7 @@ struct comp_render_view_data
*/
struct comp_render_dispatch_data
{
struct comp_render_view_data views[2];
struct comp_render_view_data views[XRT_MAX_VIEWS];
//! The number of views currently in this dispatch data.
uint32_t view_count;
@ -324,7 +324,6 @@ comp_render_cs_layers(struct render_compute *crc,
* layers should it not be possible to do a fast_path. Will insert barriers to
* change the scratch images and target images to the needed layout.
*
* Currently limited to exactly two views.
*
* Expected layouts:
* * Layer images: VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL

View file

@ -44,8 +44,8 @@ do_cs_equirect2_layer(const struct xrt_layer_data *data,
uint32_t cur_image,
VkSampler clamp_to_edge,
VkSampler clamp_to_border_black,
VkSampler src_samplers[RENDER_MAX_IMAGES],
VkImageView src_image_views[RENDER_MAX_IMAGES],
VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE],
VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE],
struct render_compute_layer_ubo_data *ubo_data,
uint32_t *out_cur_image)
{
@ -106,8 +106,8 @@ do_cs_projection_layer(const struct xrt_layer_data *data,
uint32_t cur_image,
VkSampler clamp_to_edge,
VkSampler clamp_to_border_black,
VkSampler src_samplers[RENDER_MAX_IMAGES],
VkImageView src_image_views[RENDER_MAX_IMAGES],
VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE],
VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE],
struct render_compute_layer_ubo_data *ubo_data,
bool do_timewarp,
uint32_t *out_cur_image)
@ -115,7 +115,7 @@ do_cs_projection_layer(const struct xrt_layer_data *data,
const struct xrt_layer_projection_view_data *vd = NULL;
const struct xrt_layer_depth_data *dvd = NULL;
if (data->type == XRT_LAYER_STEREO_PROJECTION) {
if (data->type == XRT_LAYER_PROJECTION) {
view_index_to_projection_data(view_index, data, &vd);
} else {
view_index_to_depth_data(view_index, data, &vd, &dvd);
@ -169,8 +169,8 @@ do_cs_quad_layer(const struct xrt_layer_data *data,
uint32_t cur_image,
VkSampler clamp_to_edge,
VkSampler clamp_to_border_black,
VkSampler src_samplers[RENDER_MAX_IMAGES],
VkImageView src_image_views[RENDER_MAX_IMAGES],
VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE],
VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE],
struct render_compute_layer_ubo_data *ubo_data,
uint32_t *out_cur_image)
{
@ -252,8 +252,8 @@ do_cs_cylinder_layer(const struct xrt_layer_data *data,
uint32_t cur_image,
VkSampler clamp_to_edge,
VkSampler clamp_to_border_black,
VkSampler src_samplers[RENDER_MAX_IMAGES],
VkImageView src_image_views[RENDER_MAX_IMAGES],
VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE],
VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE],
struct render_compute_layer_ubo_data *ubo_data,
uint32_t *out_cur_image)
{
@ -317,17 +317,17 @@ do_cs_cylinder_layer(const struct xrt_layer_data *data,
static void
do_cs_clear(struct render_compute *crc, const struct comp_render_dispatch_data *d)
{
// Hardcoded to two views.
if (d->view_count != 2) {
U_LOG_E("Only supports exactly 2 views!");
assert(d->view_count == 2);
if (d->view_count > XRT_MAX_VIEWS) {
U_LOG_E("Only supports max %d views!", XRT_MAX_VIEWS);
assert(d->view_count < XRT_MAX_VIEWS);
return;
}
const struct render_viewport_data target_viewport_datas[2] = {
d->views[0].target_viewport_data,
d->views[1].target_viewport_data,
};
struct render_viewport_data target_viewport_datas[XRT_MAX_VIEWS];
for (uint32_t i = 0; i < crc->r->view_count; ++i) {
target_viewport_datas[i] = d->views[i].target_viewport_data;
}
render_compute_clear( //
crc, // crc
@ -339,19 +339,17 @@ do_cs_clear(struct render_compute *crc, const struct comp_render_dispatch_data *
static void
do_cs_distortion_from_scratch(struct render_compute *crc, const struct comp_render_dispatch_data *d)
{
// Hardcoded to two views.
if (d->view_count != 2) {
U_LOG_E("Only supports exactly 2 views!");
assert(d->view_count == 2);
if (d->view_count > XRT_MAX_VIEWS) {
U_LOG_E("Only supports max %d views!", XRT_MAX_VIEWS);
assert(d->view_count < XRT_MAX_VIEWS);
return;
}
VkSampler clamp_to_border_black = crc->r->samplers.clamp_to_border_black;
struct render_viewport_data target_viewport_datas[2];
VkImageView src_image_views[2];
VkSampler src_samplers[2];
struct xrt_normalized_rect src_norm_rects[2];
struct render_viewport_data target_viewport_datas[XRT_MAX_VIEWS];
VkImageView src_image_views[XRT_MAX_VIEWS];
VkSampler src_samplers[XRT_MAX_VIEWS];
struct xrt_normalized_rect src_norm_rects[XRT_MAX_VIEWS];
for (uint32_t i = 0; i < d->view_count; i++) {
// Data to be filled in.
@ -382,63 +380,49 @@ do_cs_distortion_from_scratch(struct render_compute *crc, const struct comp_rend
}
static void
do_cs_distortion_from_stereo_layer(struct render_compute *crc,
const struct comp_layer *layer,
const struct xrt_layer_projection_view_data *lvd,
const struct xrt_layer_projection_view_data *rvd,
const struct comp_render_dispatch_data *d)
do_cs_distortion_for_layer(struct render_compute *crc,
const struct comp_layer *layer,
const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS],
const struct comp_render_dispatch_data *d)
{
// Hardcoded to two views.
if (d->view_count != 2) {
U_LOG_E("Only supports exactly 2 views!");
assert(d->view_count == 2);
if (d->view_count > XRT_MAX_VIEWS) {
U_LOG_E("Only supports max %d views!", XRT_MAX_VIEWS);
assert(d->view_count < XRT_MAX_VIEWS);
return;
}
// Fetch from this data.
const struct xrt_layer_data *data = &layer->data;
uint32_t left_array_index = lvd->sub.array_index;
uint32_t right_array_index = rvd->sub.array_index;
const struct comp_swapchain_image *left = &layer->sc_array[0]->images[lvd->sub.image_index];
const struct comp_swapchain_image *right = &layer->sc_array[1]->images[rvd->sub.image_index];
VkSampler clamp_to_border_black = crc->r->samplers.clamp_to_border_black;
// Data to fill in.
struct xrt_pose world_poses[2];
struct render_viewport_data target_viewport_datas[2];
struct xrt_normalized_rect src_norm_rects[2];
struct xrt_pose src_poses[2];
struct xrt_fov src_fovs[2];
VkSampler src_samplers[2];
VkImageView src_image_views[2];
struct xrt_pose world_poses[XRT_MAX_VIEWS];
struct render_viewport_data target_viewport_datas[XRT_MAX_VIEWS];
struct xrt_normalized_rect src_norm_rects[XRT_MAX_VIEWS];
struct xrt_pose src_poses[XRT_MAX_VIEWS];
struct xrt_fov src_fovs[XRT_MAX_VIEWS];
VkSampler src_samplers[XRT_MAX_VIEWS];
VkImageView src_image_views[XRT_MAX_VIEWS];
for (uint32_t i = 0; i < d->view_count; i++) {
struct xrt_pose world_pose;
struct render_viewport_data viewport_data;
struct xrt_pose src_pose;
struct xrt_fov src_fov;
struct xrt_normalized_rect src_norm_rect;
VkImageView src_image_view;
uint32_t array_index = vds[i]->sub.array_index;
const struct comp_swapchain_image *image = &layer->sc_array[i]->images[vds[i]->sub.image_index];
// Gather data.
world_pose = d->views[i].world_pose;
viewport_data = d->views[i].target_viewport_data;
if (!is_view_index_right(i)) {
// Left, aka not right.
src_pose = lvd->pose;
src_fov = lvd->fov;
src_norm_rect = lvd->sub.norm_rect;
src_image_view = get_image_view(left, data->flags, left_array_index);
} else {
// Right
src_pose = rvd->pose;
src_fov = rvd->fov;
src_norm_rect = rvd->sub.norm_rect;
src_image_view = get_image_view(right, data->flags, right_array_index);
}
src_pose = vds[i]->pose;
src_fov = vds[i]->fov;
src_norm_rect = vds[i]->sub.norm_rect;
src_image_view = get_image_view(image, data->flags, array_index);
if (data->flip_y) {
src_norm_rect.h = -src_norm_rect.h;
@ -515,8 +499,8 @@ comp_render_cs_layer(struct render_compute *crc,
// Tightly pack color and optional depth images.
uint32_t cur_image = 0;
VkSampler src_samplers[RENDER_MAX_IMAGES];
VkImageView src_image_views[RENDER_MAX_IMAGES];
VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE];
VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE];
ubo_data->view = *target_view;
ubo_data->pre_transform = *pre_transform;
@ -540,7 +524,7 @@ comp_render_cs_layer(struct render_compute *crc,
switch (data->type) {
case XRT_LAYER_CYLINDER: required_image_samplers = 1; break;
case XRT_LAYER_EQUIRECT2: required_image_samplers = 1; break;
case XRT_LAYER_STEREO_PROJECTION: required_image_samplers = 1; break;
case XRT_LAYER_PROJECTION: required_image_samplers = 1; break;
case XRT_LAYER_STEREO_PROJECTION_DEPTH: required_image_samplers = 2; break;
case XRT_LAYER_QUAD: required_image_samplers = 1; break;
default:
@ -587,7 +571,7 @@ comp_render_cs_layer(struct render_compute *crc,
&cur_image); // out_cur_image
break;
case XRT_LAYER_STEREO_PROJECTION_DEPTH:
case XRT_LAYER_STEREO_PROJECTION: {
case XRT_LAYER_PROJECTION: {
do_cs_projection_layer( //
data, // data
layer, // layer
@ -722,32 +706,31 @@ comp_render_cs_dispatch(struct render_compute *crc,
// We want to read from the images afterwards.
VkImageLayout transition_to = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
if (fast_path && layers[0].data.type == XRT_LAYER_STEREO_PROJECTION) {
if (fast_path && layers[0].data.type == XRT_LAYER_PROJECTION) {
int i = 0;
const struct comp_layer *layer = &layers[i];
const struct xrt_layer_stereo_projection_data *stereo = &layer->data.stereo;
const struct xrt_layer_projection_view_data *lvd = &stereo->l;
const struct xrt_layer_projection_view_data *rvd = &stereo->r;
do_cs_distortion_from_stereo_layer( //
crc, // crc
layer, // layer
lvd, // lvd
rvd, // rvd
d); // d
const struct xrt_layer_projection_data *proj = &layer->data.proj;
const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS];
for (uint32_t view = 0; view < crc->r->view_count; ++view) {
vds[view] = &proj->v[view];
}
do_cs_distortion_for_layer( //
crc, // crc
layer, // layer
vds, // vds
d); // d
} else if (fast_path && layers[0].data.type == XRT_LAYER_STEREO_PROJECTION_DEPTH) {
int i = 0;
const struct comp_layer *layer = &layers[i];
const struct xrt_layer_stereo_projection_depth_data *stereo = &layer->data.stereo_depth;
const struct xrt_layer_projection_view_data *lvd = &stereo->l;
const struct xrt_layer_projection_view_data *rvd = &stereo->r;
do_cs_distortion_from_stereo_layer( //
crc, // crc
layer, // layer
lvd, // lvd
rvd, // rvd
d); // d
const struct xrt_layer_projection_view_data *vds[2];
vds[0] = &stereo->l;
vds[1] = &stereo->r;
do_cs_distortion_for_layer( //
crc, // crc
layer, // layer
vds, // vds
d); // d
} else if (layer_count > 0) {
comp_render_cs_layers( //
crc, //

View file

@ -69,7 +69,7 @@ struct gfx_layer_view_state
*/
struct gfx_layer_state
{
struct gfx_layer_view_state views[2];
struct gfx_layer_view_state views[XRT_MAX_VIEWS];
};
/*
@ -77,7 +77,7 @@ struct gfx_layer_state
*/
struct gfx_mesh_state
{
VkDescriptorSet descriptor_sets[2];
VkDescriptorSet descriptor_sets[XRT_MAX_VIEWS];
};
/*
@ -98,7 +98,7 @@ struct gfx_mesh_view_data
*/
struct gfx_mesh_data
{
struct gfx_mesh_view_data views[2];
struct gfx_mesh_view_data views[XRT_MAX_VIEWS];
};
@ -366,7 +366,7 @@ do_projection_layer(struct render_gfx *rr,
struct vk_bundle *vk = rr->r->vk;
VkResult ret;
if (layer_data->type == XRT_LAYER_STEREO_PROJECTION) {
if (layer_data->type == XRT_LAYER_PROJECTION) {
view_index_to_projection_data(view_index, layer_data, &vd);
} else {
view_index_to_depth_data(view_index, layer_data, &vd, &dvd);
@ -561,7 +561,7 @@ do_layers(struct render_gfx *rr,
state); // state
VK_CHK_WITH_GOTO(ret, "do_equirect2_layer", err_layer);
break;
case XRT_LAYER_STEREO_PROJECTION:
case XRT_LAYER_PROJECTION:
case XRT_LAYER_STEREO_PROJECTION_DEPTH:
ret = do_projection_layer( //
rr, // rr
@ -626,7 +626,7 @@ do_layers(struct render_gfx *rr,
state->premultiplied_alphas[i], //
state->descriptor_sets[i]); //
break;
case XRT_LAYER_STEREO_PROJECTION:
case XRT_LAYER_PROJECTION:
case XRT_LAYER_STEREO_PROJECTION_DEPTH:
render_gfx_layer_projection( //
rr, //
@ -750,38 +750,25 @@ static void
do_mesh_from_proj(struct render_gfx *rr,
const struct comp_render_dispatch_data *d,
const struct comp_layer *layer,
const struct xrt_layer_projection_view_data *lvd,
const struct xrt_layer_projection_view_data *rvd)
const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS])
{
const struct xrt_layer_data *data = &layer->data;
const uint32_t left_array_index = lvd->sub.array_index;
const uint32_t right_array_index = rvd->sub.array_index;
const struct comp_swapchain_image *left = &layer->sc_array[0]->images[lvd->sub.image_index];
const struct comp_swapchain_image *right = &layer->sc_array[1]->images[rvd->sub.image_index];
VkSampler clamp_to_border_black = rr->r->samplers.clamp_to_border_black;
const VkSampler clamp_to_border_black = rr->r->samplers.clamp_to_border_black;
struct gfx_mesh_data md = XRT_STRUCT_INIT;
for (uint32_t i = 0; i < d->view_count; i++) {
const uint32_t array_index = vds[i]->sub.array_index;
const struct comp_swapchain_image *image = &layer->sc_array[i]->images[vds[i]->sub.image_index];
struct xrt_pose src_pose;
struct xrt_fov src_fov;
struct xrt_normalized_rect src_norm_rect;
VkImageView src_image_view;
if (!is_view_index_right(i)) {
// Left, aka not right.
src_pose = lvd->pose;
src_fov = lvd->fov;
src_norm_rect = lvd->sub.norm_rect;
src_image_view = get_image_view(left, data->flags, left_array_index);
} else {
// Right
src_pose = rvd->pose;
src_fov = rvd->fov;
src_norm_rect = rvd->sub.norm_rect;
src_image_view = get_image_view(right, data->flags, right_array_index);
}
src_pose = vds[i]->pose;
src_fov = vds[i]->fov;
src_norm_rect = vds[i]->sub.norm_rect;
const VkImageView src_image_view = get_image_view(image, data->flags, array_index);
if (data->flip_y) {
src_norm_rect.h = -src_norm_rect.h;
@ -827,31 +814,30 @@ comp_render_gfx_dispatch(struct render_gfx *rr,
// Sanity check.
assert(!fast_path || layer_count >= 1);
if (fast_path && layer->data.type == XRT_LAYER_STEREO_PROJECTION) {
if (fast_path && layer->data.type == XRT_LAYER_PROJECTION) {
// Fast path.
const struct xrt_layer_stereo_projection_data *stereo = &layer->data.stereo;
const struct xrt_layer_projection_view_data *lvd = &stereo->l;
const struct xrt_layer_projection_view_data *rvd = &stereo->r;
const struct xrt_layer_projection_data *proj = &layer->data.proj;
const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS];
for (uint32_t j = 0; j < d->view_count; ++j) {
vds[j] = &proj->v[j];
}
do_mesh_from_proj( //
rr, //
d, //
layer, //
lvd, //
rvd); //
vds); //
} else if (fast_path && layer->data.type == XRT_LAYER_STEREO_PROJECTION_DEPTH) {
// Fast path.
const struct xrt_layer_stereo_projection_depth_data *stereo = &layer->data.stereo_depth;
const struct xrt_layer_projection_view_data *lvd = &stereo->l;
const struct xrt_layer_projection_view_data *rvd = &stereo->r;
const struct xrt_layer_projection_view_data *vds[2];
vds[0] = &stereo->l;
vds[1] = &stereo->r;
do_mesh_from_proj( //
rr, //
d, //
layer, //
lvd, //
rvd); //
vds); //
} else {
if (fast_path) {

View file

@ -51,12 +51,12 @@ view_index_to_projection_data(uint32_t view_index,
const struct xrt_layer_data *data,
const struct xrt_layer_projection_view_data **out_vd)
{
const struct xrt_layer_stereo_projection_data *stereo = &data->stereo;
const struct xrt_layer_projection_data *proj = &data->proj;
if (is_view_index_right(view_index)) {
*out_vd = &stereo->r;
*out_vd = &proj->v[view_index];
} else {
*out_vd = &stereo->l;
*out_vd = &proj->v[view_index];
}
}
@ -94,7 +94,7 @@ is_layer_view_visible(const struct xrt_layer_data *data, uint32_t view_index)
case XRT_LAYER_EQUIRECT1: visibility = data->equirect1.visibility; break;
case XRT_LAYER_EQUIRECT2: visibility = data->equirect2.visibility; break;
case XRT_LAYER_QUAD: visibility = data->quad.visibility; break;
case XRT_LAYER_STEREO_PROJECTION:
case XRT_LAYER_PROJECTION:
case XRT_LAYER_STEREO_PROJECTION_DEPTH: return true;
default: return false;
};

View file

@ -75,7 +75,7 @@ typedef uint64_t VkDeviceMemory;
*/
enum xrt_layer_type
{
XRT_LAYER_STEREO_PROJECTION,
XRT_LAYER_PROJECTION,
XRT_LAYER_STEREO_PROJECTION_DEPTH,
XRT_LAYER_QUAD,
XRT_LAYER_CUBE,
@ -228,9 +228,10 @@ struct xrt_layer_projection_view_data
* The @ref xrt_swapchain references and @ref xrt_device are provided outside of
* this struct.
*/
struct xrt_layer_stereo_projection_data
struct xrt_layer_projection_data
{
struct xrt_layer_projection_view_data l, r;
uint32_t view_count;
struct xrt_layer_projection_view_data v[XRT_MAX_VIEWS];
};
/*!
@ -458,7 +459,7 @@ struct xrt_layer_data
* xrt_compositor::layer_commit where this data was passed.
*/
union {
struct xrt_layer_stereo_projection_data stereo;
struct xrt_layer_projection_data proj;
struct xrt_layer_stereo_projection_depth_data stereo_depth;
struct xrt_layer_quad_data quad;
struct xrt_layer_cube_data cube;
@ -1204,17 +1205,15 @@ struct xrt_compositor
*
* @param xc Self pointer
* @param xdev The device the layer is relative to.
* @param l_xsc Swapchain object containing left eye RGB data.
* @param r_xsc Swapchain object containing right eye RGB data.
* @param xsc Swapchain object containing eye RGB data.
* @param data All of the pure data bits (not pointers/handles),
* including what parts of the supplied swapchain
* objects to use for each view.
*/
xrt_result_t (*layer_stereo_projection)(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data);
xrt_result_t (*layer_projection)(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
const struct xrt_layer_data *data);
/*!
* @brief Adds a stereo projection layer for submission, has depth information.
@ -1680,20 +1679,19 @@ xrt_comp_layer_begin(struct xrt_compositor *xc, const struct xrt_layer_frame_dat
}
/*!
* @copydoc xrt_compositor::layer_stereo_projection
* @copydoc xrt_compositor::layer_projection
*
* Helper for calling through the function pointer.
*
* @public @memberof xrt_compositor
*/
static inline xrt_result_t
xrt_comp_layer_stereo_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data)
xrt_comp_layer_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
const struct xrt_layer_data *data)
{
return xc->layer_stereo_projection(xc, xdev, l_xsc, r_xsc, data);
return xc->layer_projection(xc, xdev, xsc, data);
}
/*!
@ -2285,8 +2283,8 @@ struct xrt_system_compositor_info
uint32_t width_pixels;
uint32_t height_pixels;
uint32_t sample_count;
} max; //!< Maximums for this view.
} views[2]; //!< View configuration information.
} max; //!< Maximums for this view.
} views[XRT_MAX_VIEWS]; //!< View configuration information.
//! Maximum number of composition layers supported, never changes.
uint32_t max_layers;

View file

@ -13,7 +13,7 @@
#include "xrt/xrt_defines.h"
#include "xrt/xrt_visibility_mask.h"
#include "xrt/xrt_limits.h"
#ifdef __cplusplus
extern "C" {
@ -107,8 +107,9 @@ struct xrt_hmd_parts
*
* For now hardcoded display to two.
*/
struct xrt_view views[2];
struct xrt_view views[XRT_MAX_VIEWS];
size_t view_count;
/*!
* Array of supported blend modes.
*/
@ -139,15 +140,15 @@ struct xrt_hmd_parts
//! Indices, for triangle strip.
int *indices;
//! Number of indices for the triangle strips (one per view).
uint32_t index_counts[2];
uint32_t index_counts[XRT_MAX_VIEWS];
//! Offsets for the indices (one offset per view).
uint32_t index_offsets[2];
uint32_t index_offsets[XRT_MAX_VIEWS];
//! Total number of elements in mesh::indices array.
uint32_t index_count_total;
} mesh;
//! distortion is subject to the field of view
struct xrt_fov fov[2];
struct xrt_fov fov[XRT_MAX_VIEWS];
} distortion;
};
@ -407,6 +408,7 @@ struct xrt_device
struct xrt_space_relation *out_head_relation,
struct xrt_fov *out_fovs,
struct xrt_pose *out_poses);
/**
* Compute the distortion at a single point.
*

View file

@ -16,6 +16,10 @@
* @addtogroup xrt_iface
* @{
*/
/*
* Max number of views supported by a compositor, artificial limit.
*/
#define XRT_MAX_VIEWS 2
/*!
* Maximum number of handles sent in one call.

View file

@ -581,29 +581,24 @@ ipc_compositor_layer_begin(struct xrt_compositor *xc, const struct xrt_layer_fra
}
static xrt_result_t
ipc_compositor_layer_stereo_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
const struct xrt_layer_data *data)
ipc_compositor_layer_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
struct xrt_swapchain *xsc[XRT_MAX_VIEWS],
const struct xrt_layer_data *data)
{
struct ipc_client_compositor *icc = ipc_client_compositor(xc);
assert(data->type == XRT_LAYER_STEREO_PROJECTION);
assert(data->type == XRT_LAYER_PROJECTION);
struct ipc_shared_memory *ism = icc->ipc_c->ism;
struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
struct ipc_layer_entry *layer = &slot->layers[icc->layers.layer_count];
struct ipc_client_swapchain *l = ipc_client_swapchain(l_xsc);
struct ipc_client_swapchain *r = ipc_client_swapchain(r_xsc);
layer->xdev_id = 0; //! @todo Real id.
layer->swapchain_ids[0] = l->id;
layer->swapchain_ids[1] = r->id;
layer->swapchain_ids[2] = -1;
layer->swapchain_ids[3] = -1;
layer->data = *data;
for (uint32_t i = 0; i < data->proj.view_count; ++i) {
struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc[i]);
layer->swapchain_ids[i] = ics->id;
}
// Increment the number of layers.
icc->layers.layer_count++;
@ -889,7 +884,7 @@ ipc_compositor_init(struct ipc_client_compositor *icc, struct xrt_compositor_nat
icc->base.base.begin_frame = ipc_compositor_begin_frame;
icc->base.base.discard_frame = ipc_compositor_discard_frame;
icc->base.base.layer_begin = ipc_compositor_layer_begin;
icc->base.base.layer_stereo_projection = ipc_compositor_layer_stereo_projection;
icc->base.base.layer_projection = ipc_compositor_layer_projection;
icc->base.base.layer_stereo_projection_depth = ipc_compositor_layer_stereo_projection_depth;
icc->base.base.layer_quad = ipc_compositor_layer_quad;
icc->base.base.layer_cube = ipc_compositor_layer_cube;

View file

@ -188,6 +188,7 @@ ipc_client_hmd_get_view_poses(struct xrt_device *xdev,
ich->device_id, //
default_eye_relation, //
at_timestamp_ns, //
view_count, //
&info); //
IPC_CHK_ONLY_PRINT(ich->ipc_c, xret, "ipc_call_device_get_view_poses_2");
@ -352,12 +353,12 @@ ipc_client_hmd_create(struct ipc_connection *ipc_c, struct xrt_tracking_origin *
for (int i = 0; i < XRT_MAX_DEVICE_BLEND_MODES; i++) {
ich->base.hmd->blend_modes[i] = ipc_c->ism->hmd.blend_modes[i];
}
ich->base.hmd->view_count = ism->hmd.view_count;
ich->base.hmd->blend_mode_count = ipc_c->ism->hmd.blend_mode_count;
ich->base.hmd->views[0].display.w_pixels = ipc_c->ism->hmd.views[0].display.w_pixels;
ich->base.hmd->views[0].display.h_pixels = ipc_c->ism->hmd.views[0].display.h_pixels;
ich->base.hmd->views[1].display.w_pixels = ipc_c->ism->hmd.views[1].display.w_pixels;
ich->base.hmd->views[1].display.h_pixels = ipc_c->ism->hmd.views[1].display.h_pixels;
for (uint32_t i = 0; i < ich->base.hmd->view_count; ++i) {
ich->base.hmd->views[i].display.w_pixels = ipc_c->ism->hmd.views[i].display.w_pixels;
ich->base.hmd->views[i].display.h_pixels = ipc_c->ism->hmd.views[i].display.h_pixels;
}
// Distortion information, fills in xdev->compute_distortion().
u_distortion_mesh_set_none(&ich->base);

View file

@ -739,29 +739,30 @@ _update_projection_layer(struct xrt_compositor *xc,
{
// xdev
uint32_t device_id = layer->xdev_id;
// left
uint32_t lxsci = layer->swapchain_ids[0];
// right
uint32_t rxsci = layer->swapchain_ids[1];
struct xrt_device *xdev = get_xdev(ics, device_id);
struct xrt_swapchain *lxcs = ics->xscs[lxsci];
struct xrt_swapchain *rxcs = ics->xscs[rxsci];
if (lxcs == NULL || rxcs == NULL) {
U_LOG_E("Invalid swap chain for projection layer!");
return false;
}
if (xdev == NULL) {
U_LOG_E("Invalid xdev for projection layer!");
return false;
}
uint32_t view_count = xdev->hmd->view_count;
struct xrt_swapchain *xcs[XRT_MAX_VIEWS];
for (uint32_t k = 0; k < view_count; k++) {
const uint32_t xsci = layer->swapchain_ids[k];
xcs[k] = ics->xscs[xsci];
if (xcs[k] == NULL) {
U_LOG_E("Invalid swap chain for projection layer!");
return false;
}
}
// Cast away volatile.
struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
xrt_comp_layer_stereo_projection(xc, xdev, lxcs, rxcs, data);
xrt_comp_layer_projection(xc, xdev, xcs, data);
return true;
}
@ -971,7 +972,7 @@ _update_layers(volatile struct ipc_client_state *ics, struct xrt_compositor *xc,
volatile struct ipc_layer_entry *layer = &slot->layers[i];
switch (layer->data.type) {
case XRT_LAYER_STEREO_PROJECTION:
case XRT_LAYER_PROJECTION:
if (!_update_projection_layer(xc, ics, layer, i)) {
return false;
}
@ -1744,17 +1745,17 @@ ipc_handle_device_get_view_poses_2(volatile struct ipc_client_state *ics,
uint32_t id,
const struct xrt_vec3 *default_eye_relation,
uint64_t at_timestamp_ns,
uint32_t view_count,
struct ipc_info_get_view_poses_2 *out_info)
{
// To make the code a bit more readable.
uint32_t device_id = id;
struct xrt_device *xdev = get_xdev(ics, device_id);
xrt_device_get_view_poses( //
xdev, //
default_eye_relation, //
at_timestamp_ns, //
2, //
view_count, //
&out_info->head_relation, //
out_info->fovs, //
out_info->poses); //

View file

@ -358,10 +358,12 @@ init_shm(struct ipc_server *s)
// Is this a HMD?
if (xdev->hmd != NULL) {
ism->hmd.views[0].display.w_pixels = xdev->hmd->views[0].display.w_pixels;
ism->hmd.views[0].display.h_pixels = xdev->hmd->views[0].display.h_pixels;
ism->hmd.views[1].display.w_pixels = xdev->hmd->views[1].display.w_pixels;
ism->hmd.views[1].display.h_pixels = xdev->hmd->views[1].display.h_pixels;
// set view count
ism->hmd.view_count = xdev->hmd->view_count;
for (uint32_t view = 0; view < xdev->hmd->view_count; ++view) {
ism->hmd.views[view].display.w_pixels = xdev->hmd->views[view].display.w_pixels;
ism->hmd.views[view].display.h_pixels = xdev->hmd->views[view].display.h_pixels;
}
for (size_t i = 0; i < xdev->hmd->blend_mode_count; i++) {
// Not super necessary, we also do this assert in oxr_system.c

View file

@ -155,7 +155,7 @@ struct ipc_layer_entry
*
* How many are actually used depends on the value of @p data.type
*/
uint32_t swapchain_ids[4];
uint32_t swapchain_ids[XRT_MAX_VIEWS * 2];
/*!
* All basic (trivially-serializable) data associated with a layer,
@ -260,7 +260,8 @@ struct ipc_shared_memory
uint32_t h_pixels;
} display;
} views[2];
// view count
uint32_t view_count;
enum xrt_blend_mode blend_modes[XRT_MAX_DEVICE_BLEND_MODES];
uint32_t blend_mode_count;
} hmd;
@ -328,7 +329,7 @@ struct ipc_arg_swapchain_from_native
*/
struct ipc_info_get_view_poses_2
{
struct xrt_fov fovs[2];
struct xrt_pose poses[2];
struct xrt_fov fovs[XRT_MAX_VIEWS];
struct xrt_pose poses[XRT_MAX_VIEWS];
struct xrt_space_relation head_relation;
};

View file

@ -381,7 +381,8 @@
"in": [
{"name": "id", "type": "uint32_t"},
{"name": "fallback_eye_relation", "type": "struct xrt_vec3"},
{"name": "at_timestamp_ns", "type": "uint64_t"}
{"name": "at_timestamp_ns", "type": "uint64_t"},
{"name": "view_count", "type": "uint32_t"}
],
"out": [
{"name": "info", "type": "struct ipc_info_get_view_poses_2"}

View file

@ -352,8 +352,8 @@ oxr_instance_create(struct oxr_logger *log,
oxr_instance_destroy(log, &inst->handle);
return ret;
}
ret = oxr_system_fill_in(log, inst, XRT_SYSTEM_ID, &inst->system);
uint32_t view_count = dev->hmd->view_count;
ret = oxr_system_fill_in(log, inst, XRT_SYSTEM_ID, view_count, &inst->system);
if (ret != XR_SUCCESS) {
oxr_instance_destroy(log, &inst->handle);
return ret;

View file

@ -913,7 +913,11 @@ oxr_system_select(struct oxr_logger *log,
struct oxr_system **out_selected);
XrResult
oxr_system_fill_in(struct oxr_logger *log, struct oxr_instance *inst, XrSystemId systemId, struct oxr_system *sys);
oxr_system_fill_in(struct oxr_logger *log,
struct oxr_instance *inst,
XrSystemId systemId,
uint32_t view_count,
struct oxr_system *sys);
XrResult
oxr_system_verify_id(struct oxr_logger *log, const struct oxr_instance *inst, XrSystemId systemId);

View file

@ -455,7 +455,7 @@ oxr_session_locate_views(struct oxr_logger *log,
bool print = sess->sys->inst->debug_views;
struct xrt_device *xdev = GET_XDEV_BY_ROLE(sess->sys, head);
struct oxr_space *baseSpc = XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_space *, viewLocateInfo->space);
uint32_t view_count = 2;
uint32_t view_count = xdev->hmd->view_count;
// Start two call handling.
if (viewCountOutput != NULL) {
@ -491,14 +491,14 @@ oxr_session_locate_views(struct oxr_logger *log,
// The head pose as in the xdev's space, aka XRT_INPUT_GENERIC_HEAD_POSE.
struct xrt_space_relation T_xdev_head = XRT_SPACE_RELATION_ZERO;
struct xrt_fov fovs[2] = {0};
struct xrt_pose poses[2] = {0};
struct xrt_fov fovs[XRT_MAX_VIEWS] = {0};
struct xrt_pose poses[XRT_MAX_VIEWS] = {0};
xrt_device_get_view_poses( //
xdev, //
&default_eye_relation, //
xdisplay_time, //
2, //
view_count, //
&T_xdev_head, //
fovs, //
poses);

View file

@ -591,11 +591,11 @@ verify_projection_layer(struct oxr_session *sess,
return ret;
}
if (proj->viewCount != 2) {
if (proj->viewCount < 1 || proj->viewCount > XRT_MAX_VIEWS) {
return oxr_error(log, XR_ERROR_VALIDATION_FAILURE,
"(frameEndInfo->layers[%u]->viewCount == %u) must be 2 for projection layers and the "
"current view configuration",
layer_index, proj->viewCount);
"(frameEndInfo->layers[%u]->viewCount == %u) must be between 1 and %d for projection "
"layers and the current view configuration",
layer_index, proj->viewCount, XRT_MAX_VIEWS);
}
// number of depth layers must be 0 or proj->viewCount
@ -694,7 +694,7 @@ verify_projection_layer(struct oxr_session *sess,
}
#ifdef OXR_HAVE_KHR_composition_layer_depth
if (depth_layer_count > 0 && depth_layer_count != proj->viewCount) {
if (depth_layer_count > 0 && depth_layer_count != proj->viewCount && proj->viewCount != 2) {
return oxr_error(
log, XR_ERROR_VALIDATION_FAILURE,
"(frameEndInfo->layers[%u] projection layer must have %u depth layers or none, but has: %u)",
@ -1259,14 +1259,14 @@ submit_projection_layer(struct oxr_session *sess,
{
struct oxr_space *spc = XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_space *, proj->space);
struct oxr_swapchain *d_scs[2] = {NULL, NULL};
struct oxr_swapchain *scs[2];
struct oxr_swapchain *scs[XRT_MAX_VIEWS];
struct xrt_pose *pose_ptr;
struct xrt_pose pose[2];
struct xrt_pose pose[XRT_MAX_VIEWS];
struct xrt_swapchain *swapchains[XRT_MAX_VIEWS];
enum xrt_layer_composition_flags flags = convert_layer_flags(proj->layerFlags);
uint32_t swapchain_count = ARRAY_SIZE(scs);
for (uint32_t i = 0; i < swapchain_count; i++) {
for (uint32_t i = 0; i < proj->viewCount; i++) {
scs[i] = XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_swapchain *, proj->views[i].subImage.swapchain);
pose_ptr = (struct xrt_pose *)&proj->views[i].pose;
@ -1279,61 +1279,65 @@ submit_projection_layer(struct oxr_session *sess,
flags |= XRT_LAYER_COMPOSITION_VIEW_SPACE_BIT;
}
struct xrt_fov *l_fov = (struct xrt_fov *)&proj->views[0].fov;
struct xrt_fov *r_fov = (struct xrt_fov *)&proj->views[1].fov;
struct xrt_layer_data data;
U_ZERO(&data);
data.type = XRT_LAYER_STEREO_PROJECTION;
data.type = XRT_LAYER_PROJECTION;
data.name = XRT_INPUT_GENERIC_HEAD_POSE;
data.timestamp = xrt_timestamp;
data.flags = flags;
data.stereo.l.fov = *l_fov;
data.stereo.l.pose = pose[0];
data.stereo.r.fov = *r_fov;
data.stereo.r.pose = pose[1];
fill_in_sub_image(scs[0], &proj->views[0].subImage, &data.stereo.l.sub);
fill_in_sub_image(scs[1], &proj->views[1].subImage, &data.stereo.r.sub);
data.proj.view_count = proj->viewCount;
for (size_t i = 0; i < proj->viewCount; ++i) {
struct xrt_fov *fov = (struct xrt_fov *)&proj->views[i].fov;
data.proj.v[i].fov = *fov;
data.proj.v[i].pose = pose[i];
fill_in_sub_image(scs[i], &proj->views[i].subImage, &data.proj.v[i].sub);
swapchains[i] = scs[i]->swapchain;
}
fill_in_color_scale_bias(sess, (XrCompositionLayerBaseHeader *)proj, &data);
fill_in_y_flip(sess, (XrCompositionLayerBaseHeader *)proj, &data);
fill_in_blend_factors(sess, (XrCompositionLayerBaseHeader *)proj, &data);
fill_in_layer_settings(sess, (XrCompositionLayerBaseHeader *)proj, &data);
#ifdef OXR_HAVE_KHR_composition_layer_depth
const XrCompositionLayerDepthInfoKHR *d_l = OXR_GET_INPUT_FROM_CHAIN(
&proj->views[0], XR_TYPE_COMPOSITION_LAYER_DEPTH_INFO_KHR, XrCompositionLayerDepthInfoKHR);
if (d_l) {
data.stereo_depth.l_d.far_z = d_l->farZ;
data.stereo_depth.l_d.near_z = d_l->nearZ;
data.stereo_depth.l_d.max_depth = d_l->maxDepth;
data.stereo_depth.l_d.min_depth = d_l->minDepth;
if (proj->viewCount == 2) {
const XrCompositionLayerDepthInfoKHR *d_l = OXR_GET_INPUT_FROM_CHAIN(
&proj->views[0], XR_TYPE_COMPOSITION_LAYER_DEPTH_INFO_KHR, XrCompositionLayerDepthInfoKHR);
if (d_l) {
data.stereo_depth.l_d.far_z = d_l->farZ;
data.stereo_depth.l_d.near_z = d_l->nearZ;
data.stereo_depth.l_d.max_depth = d_l->maxDepth;
data.stereo_depth.l_d.min_depth = d_l->minDepth;
struct oxr_swapchain *sc = XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_swapchain *, d_l->subImage.swapchain);
struct oxr_swapchain *sc =
XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_swapchain *, d_l->subImage.swapchain);
fill_in_sub_image(sc, &d_l->subImage, &data.stereo_depth.l_d.sub);
fill_in_sub_image(sc, &d_l->subImage, &data.stereo_depth.l_d.sub);
// Need to pass this in.
d_scs[0] = sc;
}
// Need to pass this in.
d_scs[0] = sc;
}
const XrCompositionLayerDepthInfoKHR *d_r = OXR_GET_INPUT_FROM_CHAIN(
&proj->views[1], XR_TYPE_COMPOSITION_LAYER_DEPTH_INFO_KHR, XrCompositionLayerDepthInfoKHR);
const XrCompositionLayerDepthInfoKHR *d_r = OXR_GET_INPUT_FROM_CHAIN(
&proj->views[1], XR_TYPE_COMPOSITION_LAYER_DEPTH_INFO_KHR, XrCompositionLayerDepthInfoKHR);
if (d_r) {
data.stereo_depth.r_d.far_z = d_r->farZ;
data.stereo_depth.r_d.near_z = d_r->nearZ;
data.stereo_depth.r_d.max_depth = d_r->maxDepth;
data.stereo_depth.r_d.min_depth = d_r->minDepth;
if (d_r) {
data.stereo_depth.r_d.far_z = d_r->farZ;
data.stereo_depth.r_d.near_z = d_r->nearZ;
data.stereo_depth.r_d.max_depth = d_r->maxDepth;
data.stereo_depth.r_d.min_depth = d_r->minDepth;
struct oxr_swapchain *sc = XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_swapchain *, d_r->subImage.swapchain);
struct oxr_swapchain *sc =
XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_swapchain *, d_r->subImage.swapchain);
fill_in_sub_image(sc, &d_r->subImage, &data.stereo_depth.r_d.sub);
fill_in_sub_image(sc, &d_r->subImage, &data.stereo_depth.r_d.sub);
// Need to pass this in.
d_scs[1] = sc;
// Need to pass this in.
d_scs[1] = sc;
}
}
#endif // OXR_HAVE_KHR_composition_layer_depth
if (d_scs[0] != NULL && d_scs[1] != NULL) {
#ifdef OXR_HAVE_KHR_composition_layer_depth
fill_in_depth_test(sess, (XrCompositionLayerBaseHeader *)proj, &data);
@ -1351,13 +1355,12 @@ submit_projection_layer(struct oxr_session *sess,
assert(false && "Should not get here");
#endif // OXR_HAVE_KHR_composition_layer_depth
} else {
xrt_result_t xret = xrt_comp_layer_stereo_projection( //
xc, // compositor
head, // xdev
scs[0]->swapchain, // left
scs[1]->swapchain, // right
&data); // data
OXR_CHECK_XRET(log, sess, xret, xrt_comp_layer_stereo_projection);
xrt_result_t xret = xrt_comp_layer_projection( //
xc, // compositor
head, // xdev
swapchains, // swapchains
&data); // data
OXR_CHECK_XRET(log, sess, xret, xrt_comp_layer_projection);
}
return XR_SUCCESS;

View file

@ -107,14 +107,22 @@ oxr_system_get_by_id(struct oxr_logger *log, struct oxr_instance *inst, XrSystem
XrResult
oxr_system_fill_in(struct oxr_logger *log, struct oxr_instance *inst, XrSystemId systemId, struct oxr_system *sys)
oxr_system_fill_in(
struct oxr_logger *log, struct oxr_instance *inst, XrSystemId systemId, uint32_t view_count, struct oxr_system *sys)
{
//! @todo handle other subaction paths?
sys->inst = inst;
sys->systemId = systemId;
sys->form_factor = XR_FORM_FACTOR_HEAD_MOUNTED_DISPLAY;
sys->view_config_type = XR_VIEW_CONFIGURATION_TYPE_PRIMARY_STEREO;
if (view_count == 1) {
sys->view_config_type = XR_VIEW_CONFIGURATION_TYPE_PRIMARY_MONO;
} else if (view_count == 2) {
sys->view_config_type = XR_VIEW_CONFIGURATION_TYPE_PRIMARY_STEREO;
} else {
assert(false && "view_count must be 1 or 2");
}
U_LOG_D("sys->view_config_type = %d", sys->view_config_type);
sys->dynamic_roles_cache = (struct xrt_system_roles)XRT_SYSTEM_ROLES_INIT;
#ifdef XR_USE_GRAPHICS_API_VULKAN
@ -141,41 +149,26 @@ oxr_system_fill_in(struct oxr_logger *log, struct oxr_instance *inst, XrSystemId
struct xrt_system_compositor_info *info = &sys->xsysc->info;
uint32_t w0 = (uint32_t)(info->views[0].recommended.width_pixels * scale);
uint32_t h0 = (uint32_t)(info->views[0].recommended.height_pixels * scale);
uint32_t w1 = (uint32_t)(info->views[1].recommended.width_pixels * scale);
uint32_t h1 = (uint32_t)(info->views[1].recommended.height_pixels * scale);
uint32_t w0_2 = info->views[0].max.width_pixels;
uint32_t h0_2 = info->views[0].max.height_pixels;
uint32_t w1_2 = info->views[1].max.width_pixels;
uint32_t h1_2 = info->views[1].max.height_pixels;
#define imin(a, b) (a < b ? a : b)
for (uint32_t i = 0; i < view_count; ++i) {
uint32_t w = (uint32_t)(info->views[i].recommended.width_pixels * scale);
uint32_t h = (uint32_t)(info->views[i].recommended.height_pixels * scale);
uint32_t w_2 = info->views[i].max.width_pixels;
uint32_t h_2 = info->views[i].max.height_pixels;
w0 = imin(w0, w0_2);
h0 = imin(h0, h0_2);
w1 = imin(w1, w1_2);
h1 = imin(h1, h1_2);
w = imin(w, w_2);
h = imin(h, h_2);
sys->views[i].recommendedImageRectWidth = w;
sys->views[i].maxImageRectWidth = w_2;
sys->views[i].recommendedImageRectHeight = h;
sys->views[i].maxImageRectHeight = h_2;
sys->views[i].recommendedSwapchainSampleCount = info->views[i].recommended.sample_count;
sys->views[i].maxSwapchainSampleCount = info->views[i].max.sample_count;
}
#undef imin
// clang-format off
sys->views[0].recommendedImageRectWidth = w0;
sys->views[0].maxImageRectWidth = w0_2;
sys->views[0].recommendedImageRectHeight = h0;
sys->views[0].maxImageRectHeight = h0_2;
sys->views[0].recommendedSwapchainSampleCount = info->views[0].recommended.sample_count;
sys->views[0].maxSwapchainSampleCount = info->views[0].max.sample_count;
sys->views[1].recommendedImageRectWidth = w1;
sys->views[1].maxImageRectWidth = w1_2;
sys->views[1].recommendedImageRectHeight = h1;
sys->views[1].maxImageRectHeight = h1_2;
sys->views[1].recommendedSwapchainSampleCount = info->views[1].recommended.sample_count;
sys->views[1].maxSwapchainSampleCount = info->views[1].max.sample_count;
// clang-format on
/*
* Blend mode support.
@ -459,7 +452,11 @@ oxr_system_enumerate_view_conf_views(struct oxr_logger *log,
if (viewConfigurationType != sys->view_config_type) {
return oxr_error(log, XR_ERROR_VIEW_CONFIGURATION_TYPE_UNSUPPORTED, "Invalid view configuration type");
}
OXR_TWO_CALL_FILL_IN_HELPER(log, viewCapacityInput, viewCountOutput, views, 2, view_configuration_view_fill_in,
sys->views, XR_SUCCESS);
if (sys->view_config_type == XR_VIEW_CONFIGURATION_TYPE_PRIMARY_MONO) {
OXR_TWO_CALL_FILL_IN_HELPER(log, viewCapacityInput, viewCountOutput, views, 1,
view_configuration_view_fill_in, sys->views, XR_SUCCESS);
} else {
OXR_TWO_CALL_FILL_IN_HELPER(log, viewCapacityInput, viewCountOutput, views, 2,
view_configuration_view_fill_in, sys->views, XR_SUCCESS);
}
}

View file

@ -852,10 +852,14 @@ print_system_devices(u_pp_delegate_t dg, struct xrt_system_devices *xsysd)
u_pp(dg, "\n\tIn roles:");
#define PH(IDENT) \
u_pp(dg, "\n\t\t%s: %s, view count: %d", #IDENT, \
xsysd->static_roles.IDENT ? xsysd->static_roles.IDENT->str : "<none>", \
xsysd->static_roles.IDENT ? xsysd->static_roles.IDENT->hmd->view_count : "<none>")
#define P(IDENT) u_pp(dg, "\n\t\t%s: %s", #IDENT, xsysd->static_roles.IDENT ? xsysd->static_roles.IDENT->str : "<none>")
#define PD(IDENT) u_pp(dg, "\n\t\t%s: %s", #IDENT, roles.IDENT >= 0 ? xsysd->xdevs[roles.IDENT]->str : "<none>")
P(head);
PH(head);
P(eyes);
P(face);
PD(left);

View file

@ -115,12 +115,12 @@ sdl_program_plus_render(struct sdl_program_plus *spp_ptr)
if (spp.c.base.slot.layer_count == 0) {
glClearColor(0.2f, 0.2f, 0.2f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
} else if (spp.c.base.slot.layers[0].data.type == XRT_LAYER_STEREO_PROJECTION ||
} else if (spp.c.base.slot.layers[0].data.type == XRT_LAYER_PROJECTION ||
spp.c.base.slot.layers[0].data.type == XRT_LAYER_STEREO_PROJECTION_DEPTH) {
auto &l = spp.c.base.slot.layers[0];
auto &ssc = *(sdl_swapchain *)l.sc_array[0];
GLuint tex = ssc.textures[l.data.stereo.l.sub.image_index];
GLuint tex = ssc.textures[l.data.proj.v[0].sub.image_index];
glClearColor(0.2f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);