c/main: Implement XR_KHR_composition_layer_equirect2.

This commit is contained in:
Lubosz Sarnecki 2020-10-09 16:35:16 +02:00 committed by Jakob Bornecrantz
parent 9738b4fe93
commit 3195ba8351
9 changed files with 260 additions and 36 deletions

View file

@ -427,11 +427,7 @@ compositor_layer_equirect(struct xrt_compositor *xc,
struct xrt_swapchain *xsc, struct xrt_swapchain *xsc,
const struct xrt_layer_data *data) const struct xrt_layer_data *data)
{ {
#if 0
return do_single(xc, xdev, xsc, data); return do_single(xc, xdev, xsc, data);
#else
return XRT_SUCCESS; //! @todo Implement
#endif
} }
static xrt_result_t static xrt_result_t
@ -493,8 +489,14 @@ compositor_layer_commit(struct xrt_compositor *xc, int64_t frame_id)
image = &layer->scs[0]->images[cyl->sub.image_index]; image = &layer->scs[0]->images[cyl->sub.image_index];
comp_renderer_set_cylinder_layer(c->r, i, image, data); comp_renderer_set_cylinder_layer(c->r, i, image, data);
} break; } break;
case XRT_LAYER_EQUIRECT: {
struct xrt_layer_equirect_data *eq =
&layer->data.equirect;
struct comp_swapchain_image *image;
image = &layer->scs[0]->images[eq->sub.image_index];
comp_renderer_set_equirect_layer(c->r, i, image, data);
} break;
case XRT_LAYER_CUBE: case XRT_LAYER_CUBE:
case XRT_LAYER_EQUIRECT:
// Should never end up here. // Should never end up here.
assert(false); assert(false);
} }

View file

@ -83,6 +83,31 @@ _init_ubos(struct comp_render_layer *self)
return true; return true;
} }
static bool
_init_equirect_ubo(struct comp_render_layer *self)
{
VkBufferUsageFlags usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
VkMemoryPropertyFlags properties =
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
if (!vk_buffer_init(self->vk, sizeof(struct layer_transformation),
usage, properties, &self->equirect_ubo.handle,
&self->equirect_ubo.memory))
return false;
VkResult res = self->vk->vkMapMemory(
self->vk->device, self->equirect_ubo.memory, 0, VK_WHOLE_SIZE, 0,
&self->equirect_ubo.data);
vk_check_error("vkMapMemory", res, false);
memcpy(self->equirect_ubo.data, &self->equirect_data,
sizeof(struct layer_equirect_data));
return true;
}
static void static void
_update_descriptor(struct comp_render_layer *self, _update_descriptor(struct comp_render_layer *self,
struct vk_bundle *vk, struct vk_bundle *vk,
@ -126,6 +151,31 @@ _update_descriptor(struct comp_render_layer *self,
vk->vkUpdateDescriptorSets(vk->device, 2, sets, 0, NULL); vk->vkUpdateDescriptorSets(vk->device, 2, sets, 0, NULL);
} }
static void
_update_descriptor_equirect(struct comp_render_layer *self,
VkDescriptorSet set,
VkBuffer buffer)
{
VkWriteDescriptorSet *sets = (VkWriteDescriptorSet[]){
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = set,
.dstBinding = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pBufferInfo =
&(VkDescriptorBufferInfo){
.buffer = buffer,
.offset = 0,
.range = VK_WHOLE_SIZE,
},
.pTexelBufferView = NULL,
},
};
self->vk->vkUpdateDescriptorSets(self->vk->device, 1, sets, 0, NULL);
}
void void
comp_layer_update_descriptors(struct comp_render_layer *self, comp_layer_update_descriptors(struct comp_render_layer *self,
VkSampler sampler, VkSampler sampler,
@ -137,6 +187,23 @@ comp_layer_update_descriptors(struct comp_render_layer *self,
sampler, image_view); sampler, image_view);
} }
void
comp_layer_update_equirect_descriptor(struct comp_render_layer *self,
struct xrt_layer_equirect_data *data)
{
_update_descriptor_equirect(self, self->descriptor_equirect,
self->equirect_ubo.handle);
self->equirect_data = (struct layer_equirect_data){
.radius = data->radius,
.central_horizontal_angle = data->central_horizontal_angle,
.upper_vertical_angle = data->upper_vertical_angle,
.lower_vertical_angle = data->lower_vertical_angle,
};
memcpy(self->equirect_ubo.data, &self->equirect_data,
sizeof(struct layer_equirect_data));
}
void void
comp_layer_update_stereo_descriptors(struct comp_render_layer *self, comp_layer_update_stereo_descriptors(struct comp_render_layer *self,
VkSampler left_sampler, VkSampler left_sampler,
@ -156,7 +223,8 @@ comp_layer_update_stereo_descriptors(struct comp_render_layer *self,
static bool static bool
_init(struct comp_render_layer *self, _init(struct comp_render_layer *self,
struct vk_bundle *vk, struct vk_bundle *vk,
VkDescriptorSetLayout *layout) VkDescriptorSetLayout *layout,
VkDescriptorSetLayout *layout_equirect)
{ {
self->vk = vk; self->vk = vk;
@ -168,30 +236,36 @@ _init(struct comp_render_layer *self,
if (!_init_ubos(self)) if (!_init_ubos(self))
return false; return false;
uint32_t set_count = 2; if (!_init_equirect_ubo(self))
return false;
VkDescriptorPoolSize pool_sizes[] = { VkDescriptorPoolSize pool_sizes[] = {
{ {
.descriptorCount = set_count, .descriptorCount = 3,
.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
}, },
{ {
.descriptorCount = set_count, .descriptorCount = 2,
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
}, },
}; };
if (!vk_init_descriptor_pool(self->vk, pool_sizes, if (!vk_init_descriptor_pool(self->vk, pool_sizes,
ARRAY_SIZE(pool_sizes), set_count, ARRAY_SIZE(pool_sizes), 3,
&self->descriptor_pool)) &self->descriptor_pool))
return false; return false;
for (uint32_t eye = 0; eye < set_count; eye++) for (uint32_t eye = 0; eye < 2; eye++)
if (!vk_allocate_descriptor_sets( if (!vk_allocate_descriptor_sets(
self->vk, self->descriptor_pool, 1, layout, self->vk, self->descriptor_pool, 1, layout,
&self->descriptor_sets[eye])) &self->descriptor_sets[eye]))
return false; return false;
if (!vk_allocate_descriptor_sets(self->vk, self->descriptor_pool, 1,
layout_equirect,
&self->descriptor_equirect))
return false;
return true; return true;
} }
@ -225,18 +299,32 @@ comp_layer_draw(struct comp_render_layer *self,
case XRT_LAYER_STEREO_PROJECTION: case XRT_LAYER_STEREO_PROJECTION:
_update_mvp_matrix(self, eye, &proj_scale); _update_mvp_matrix(self, eye, &proj_scale);
break; break;
case XRT_LAYER_QUAD: _update_mvp_matrix(self, eye, vp); break; case XRT_LAYER_QUAD:
case XRT_LAYER_CYLINDER: _update_mvp_matrix(self, eye, vp); break; case XRT_LAYER_CYLINDER:
case XRT_LAYER_EQUIRECT: _update_mvp_matrix(self, eye, vp); break;
case XRT_LAYER_STEREO_PROJECTION_DEPTH: case XRT_LAYER_STEREO_PROJECTION_DEPTH:
case XRT_LAYER_CUBE: case XRT_LAYER_CUBE:
case XRT_LAYER_EQUIRECT:
// Should never end up here. // Should never end up here.
assert(false); assert(false);
} }
self->vk->vkCmdBindDescriptorSets(
cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1, if (self->type == XRT_LAYER_EQUIRECT) {
&self->descriptor_sets[eye], 0, NULL); const VkDescriptorSet sets[2] = {
self->descriptor_sets[eye],
self->descriptor_equirect,
};
self->vk->vkCmdBindDescriptorSets(
cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline_layout, 0, 2, sets, 0, NULL);
} else {
self->vk->vkCmdBindDescriptorSets(
cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline_layout, 0, 1, &self->descriptor_sets[eye], 0,
NULL);
}
VkDeviceSize offsets[1] = {0}; VkDeviceSize offsets[1] = {0};
self->vk->vkCmdBindVertexBuffers(cmd_buffer, 0, 1, self->vk->vkCmdBindVertexBuffers(cmd_buffer, 0, 1,
@ -369,11 +457,13 @@ comp_layer_get_cylinder_vertex_buffer(struct comp_render_layer *self)
} }
struct comp_render_layer * struct comp_render_layer *
comp_layer_create(struct vk_bundle *vk, VkDescriptorSetLayout *layout) comp_layer_create(struct vk_bundle *vk,
VkDescriptorSetLayout *layout,
VkDescriptorSetLayout *layout_equirect)
{ {
struct comp_render_layer *q = U_TYPED_CALLOC(struct comp_render_layer); struct comp_render_layer *q = U_TYPED_CALLOC(struct comp_render_layer);
_init(q, vk, layout); _init(q, vk, layout, layout_equirect);
if (!_init_cylinder_vertex_buffer(q)) if (!_init_cylinder_vertex_buffer(q))
return NULL; return NULL;
@ -387,6 +477,8 @@ comp_layer_destroy(struct comp_render_layer *self)
for (uint32_t eye = 0; eye < 2; eye++) for (uint32_t eye = 0; eye < 2; eye++)
vk_buffer_destroy(&self->transformation_ubos[eye], self->vk); vk_buffer_destroy(&self->transformation_ubos[eye], self->vk);
vk_buffer_destroy(&self->equirect_ubo, self->vk);
self->vk->vkDestroyDescriptorPool(self->vk->device, self->vk->vkDestroyDescriptorPool(self->vk->device,
self->descriptor_pool, NULL); self->descriptor_pool, NULL);

View file

@ -20,6 +20,14 @@ struct layer_transformation
bool flip_y; bool flip_y;
}; };
struct layer_equirect_data
{
float radius;
float central_horizontal_angle;
float upper_vertical_angle;
float lower_vertical_angle;
};
struct comp_render_layer struct comp_render_layer
{ {
struct vk_bundle *vk; struct vk_bundle *vk;
@ -33,8 +41,12 @@ struct comp_render_layer
struct layer_transformation transformation[2]; struct layer_transformation transformation[2];
struct vk_buffer transformation_ubos[2]; struct vk_buffer transformation_ubos[2];
struct layer_equirect_data equirect_data;
struct vk_buffer equirect_ubo;
VkDescriptorPool descriptor_pool; VkDescriptorPool descriptor_pool;
VkDescriptorSet descriptor_sets[2]; VkDescriptorSet descriptor_sets[2];
VkDescriptorSet descriptor_equirect;
struct xrt_matrix_4x4 model_matrix; struct xrt_matrix_4x4 model_matrix;
@ -49,7 +61,9 @@ struct comp_render_layer
}; };
struct comp_render_layer * struct comp_render_layer *
comp_layer_create(struct vk_bundle *vk, VkDescriptorSetLayout *layout); comp_layer_create(struct vk_bundle *vk,
VkDescriptorSetLayout *layout,
VkDescriptorSetLayout *layout_equirect);
void void
comp_layer_draw(struct comp_render_layer *self, comp_layer_draw(struct comp_render_layer *self,
@ -89,3 +103,7 @@ comp_layer_get_cylinder_vertex_buffer(struct comp_render_layer *self);
bool bool
comp_layer_update_cylinder_vertex_buffer(struct comp_render_layer *self, comp_layer_update_cylinder_vertex_buffer(struct comp_render_layer *self,
float central_angle); float central_angle);
void
comp_layer_update_equirect_descriptor(struct comp_render_layer *self,
struct xrt_layer_equirect_data *data);

View file

@ -111,17 +111,45 @@ _init_descriptor_layout(struct comp_layer_renderer *self)
return true; return true;
} }
static bool
_init_descriptor_layout_equirect(struct comp_layer_renderer *self)
{
struct vk_bundle *vk = self->vk;
VkDescriptorSetLayoutCreateInfo info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.bindingCount = 1,
.pBindings =
(VkDescriptorSetLayoutBinding[]){
{
.binding = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
},
},
};
VkResult res = vk->vkCreateDescriptorSetLayout(
vk->device, &info, NULL, &self->descriptor_set_layout_equirect);
vk_check_error("vkCreateDescriptorSetLayout", res, false);
return true;
}
static bool static bool
_init_pipeline_layout(struct comp_layer_renderer *self) _init_pipeline_layout(struct comp_layer_renderer *self)
{ {
struct vk_bundle *vk = self->vk; struct vk_bundle *vk = self->vk;
const VkDescriptorSetLayout set_layouts[2] = {
self->descriptor_set_layout, self->descriptor_set_layout_equirect};
VkPipelineLayoutCreateInfo info = { VkPipelineLayoutCreateInfo info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.setLayoutCount = 1, .setLayoutCount = 2,
.pSetLayouts = &self->descriptor_set_layout, .pSetLayouts = set_layouts,
.pushConstantRangeCount = 0,
.pPushConstantRanges = NULL,
}; };
VkResult res = vk->vkCreatePipelineLayout(vk->device, &info, NULL, VkResult res = vk->vkCreatePipelineLayout(vk->device, &info, NULL,
@ -162,7 +190,8 @@ struct __attribute__((__packed__)) comp_pipeline_config
static bool static bool
_init_graphics_pipeline(struct comp_layer_renderer *self, _init_graphics_pipeline(struct comp_layer_renderer *self,
struct comp_shaders *s, VkShaderModule shader_vert,
VkShaderModule shader_frag,
bool premultiplied_alpha, bool premultiplied_alpha,
VkPipeline *pipeline) VkPipeline *pipeline)
{ {
@ -218,13 +247,13 @@ _init_graphics_pipeline(struct comp_layer_renderer *self,
{ {
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_VERTEX_BIT, .stage = VK_SHADER_STAGE_VERTEX_BIT,
.module = s->layer_vert, .module = shader_vert,
.pName = "main", .pName = "main",
}, },
{ {
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_FRAGMENT_BIT, .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
.module = s->layer_frag, .module = shader_frag,
.pName = "main", .pName = "main",
}, },
}; };
@ -347,11 +376,14 @@ _render_eye(struct comp_layer_renderer *self,
{ {
struct xrt_matrix_4x4 vp_world; struct xrt_matrix_4x4 vp_world;
struct xrt_matrix_4x4 vp_eye; struct xrt_matrix_4x4 vp_eye;
struct xrt_matrix_4x4 vp_inv;
math_matrix_4x4_multiply(&self->mat_projection[eye], math_matrix_4x4_multiply(&self->mat_projection[eye],
&self->mat_world_view[eye], &vp_world); &self->mat_world_view[eye], &vp_world);
math_matrix_4x4_multiply(&self->mat_projection[eye], math_matrix_4x4_multiply(&self->mat_projection[eye],
&self->mat_eye_view[eye], &vp_eye); &self->mat_eye_view[eye], &vp_eye);
math_matrix_4x4_inverse_view_projection(
&self->mat_world_view[eye], &self->mat_projection[eye], &vp_inv);
for (uint32_t i = 0; i < self->num_layers; i++) { for (uint32_t i = 0; i < self->num_layers; i++) {
bool unpremultiplied_alpha = bool unpremultiplied_alpha =
@ -370,8 +402,17 @@ _render_eye(struct comp_layer_renderer *self,
unpremultiplied_alpha unpremultiplied_alpha
? self->pipeline_premultiplied_alpha ? self->pipeline_premultiplied_alpha
: self->pipeline_unpremultiplied_alpha; : self->pipeline_unpremultiplied_alpha;
comp_layer_draw(self->layers[i], eye, pipeline, pipeline_layout,
cmd_buffer, vertex_buffer, &vp_world, &vp_eye); if (self->layers[i]->type == XRT_LAYER_EQUIRECT) {
pipeline = self->pipeline_equirect;
comp_layer_draw(self->layers[i], eye, pipeline,
pipeline_layout, cmd_buffer,
vertex_buffer, &vp_inv, &vp_inv);
} else {
comp_layer_draw(self->layers[i], eye, pipeline,
pipeline_layout, cmd_buffer,
vertex_buffer, &vp_world, &vp_eye);
}
} }
} }
@ -436,7 +477,8 @@ comp_layer_renderer_allocate_layers(struct comp_layer_renderer *self,
for (uint32_t i = 0; i < self->num_layers; i++) { for (uint32_t i = 0; i < self->num_layers; i++) {
self->layers[i] = self->layers[i] =
comp_layer_create(vk, &self->descriptor_set_layout); comp_layer_create(vk, &self->descriptor_set_layout,
&self->descriptor_set_layout_equirect);
} }
} }
@ -489,16 +531,29 @@ _init(struct comp_layer_renderer *self,
if (!_init_descriptor_layout(self)) if (!_init_descriptor_layout(self))
return false; return false;
if (!_init_descriptor_layout_equirect(self))
return false;
if (!_init_pipeline_layout(self)) if (!_init_pipeline_layout(self))
return false; return false;
if (!_init_pipeline_cache(self)) if (!_init_pipeline_cache(self))
return false; return false;
if (!_init_graphics_pipeline(self, s, false,
&self->pipeline_premultiplied_alpha))
if (!_init_graphics_pipeline(self, s->layer_vert, s->layer_frag, false,
&self->pipeline_premultiplied_alpha)) {
return false; return false;
if (!_init_graphics_pipeline(self, s, true, }
&self->pipeline_unpremultiplied_alpha))
if (!_init_graphics_pipeline(self, s->layer_vert, s->layer_frag, true,
&self->pipeline_unpremultiplied_alpha)) {
return false; return false;
}
if (!_init_graphics_pipeline(self, s->equirect_vert, s->equirect_frag,
true, &self->pipeline_equirect)) {
return false;
}
if (!_init_vertex_buffer(self)) if (!_init_vertex_buffer(self))
return false; return false;
@ -631,10 +686,13 @@ comp_layer_renderer_destroy(struct comp_layer_renderer *self)
vk->vkDestroyPipelineLayout(vk->device, self->pipeline_layout, NULL); vk->vkDestroyPipelineLayout(vk->device, self->pipeline_layout, NULL);
vk->vkDestroyDescriptorSetLayout(vk->device, vk->vkDestroyDescriptorSetLayout(vk->device,
self->descriptor_set_layout, NULL); self->descriptor_set_layout, NULL);
vk->vkDestroyDescriptorSetLayout(
vk->device, self->descriptor_set_layout_equirect, NULL);
vk->vkDestroyPipeline(vk->device, self->pipeline_premultiplied_alpha, vk->vkDestroyPipeline(vk->device, self->pipeline_premultiplied_alpha,
NULL); NULL);
vk->vkDestroyPipeline(vk->device, self->pipeline_unpremultiplied_alpha, vk->vkDestroyPipeline(vk->device, self->pipeline_unpremultiplied_alpha,
NULL); NULL);
vk->vkDestroyPipeline(vk->device, self->pipeline_equirect, NULL);
for (uint32_t i = 0; i < ARRAY_SIZE(self->shader_modules); i++) for (uint32_t i = 0; i < ARRAY_SIZE(self->shader_modules); i++)
vk->vkDestroyShaderModule(vk->device, self->shader_modules[i], vk->vkDestroyShaderModule(vk->device, self->shader_modules[i],

View file

@ -39,7 +39,10 @@ struct comp_layer_renderer
VkShaderModule shader_modules[2]; VkShaderModule shader_modules[2];
VkPipeline pipeline_premultiplied_alpha; VkPipeline pipeline_premultiplied_alpha;
VkPipeline pipeline_unpremultiplied_alpha; VkPipeline pipeline_unpremultiplied_alpha;
VkPipeline pipeline_equirect;
VkDescriptorSetLayout descriptor_set_layout; VkDescriptorSetLayout descriptor_set_layout;
VkDescriptorSetLayout descriptor_set_layout_equirect;
VkPipelineLayout pipeline_layout; VkPipelineLayout pipeline_layout;
VkPipelineCache pipeline_cache; VkPipelineCache pipeline_cache;

View file

@ -501,6 +501,40 @@ comp_renderer_set_projection_layer(struct comp_renderer *r,
l->transformation[1].extent = data->stereo.r.sub.rect.extent; l->transformation[1].extent = data->stereo.r.sub.rect.extent;
} }
void
comp_renderer_set_equirect_layer(struct comp_renderer *r,
uint32_t layer,
struct comp_swapchain_image *image,
struct xrt_layer_data *data)
{
struct xrt_vec3 s = {1.0f, 1.0f, 1.0f};
struct xrt_matrix_4x4 model_matrix;
math_matrix_4x4_model(&data->equirect.pose, &s, &model_matrix);
comp_layer_set_flip_y(r->lr->layers[layer], data->flip_y);
struct comp_render_layer *l = r->lr->layers[layer];
l->type = XRT_LAYER_EQUIRECT;
l->visibility = data->equirect.visibility;
l->flags = data->flags;
l->view_space =
(data->flags & XRT_LAYER_COMPOSITION_VIEW_SPACE_BIT) != 0;
l->transformation_ubo_binding = r->lr->transformation_ubo_binding;
l->texture_binding = r->lr->texture_binding;
comp_layer_update_descriptors(
l, image->repeat_sampler,
get_image_view(image, data->flags, data->equirect.sub.array_index));
comp_layer_update_equirect_descriptor(l, &data->equirect);
for (uint32_t i = 0; i < 2; i++) {
l->transformation[i].offset = data->equirect.sub.rect.offset;
l->transformation[i].extent = data->equirect.sub.rect.extent;
}
}
void void
comp_renderer_draw(struct comp_renderer *r) comp_renderer_draw(struct comp_renderer *r)
{ {

View file

@ -73,6 +73,12 @@ comp_renderer_set_cylinder_layer(struct comp_renderer *r,
struct comp_swapchain_image *image, struct comp_swapchain_image *image,
struct xrt_layer_data *data); struct xrt_layer_data *data);
void
comp_renderer_set_equirect_layer(struct comp_renderer *r,
uint32_t layer,
struct comp_swapchain_image *image,
struct xrt_layer_data *data);
void void
comp_renderer_allocate_layers(struct comp_renderer *self, uint32_t num_layers); comp_renderer_allocate_layers(struct comp_renderer *self, uint32_t num_layers);

View file

@ -6,13 +6,21 @@
layout (location = 0) in vec2 uv; layout (location = 0) in vec2 uv;
layout (binding = 0, std140) uniform Transformation { layout (set = 0, binding = 0, std140) uniform Transformation {
mat4 mvp; mat4 mvp;
ivec2 offset; ivec2 offset;
ivec2 extent; ivec2 extent;
bool flip_y; bool flip_y;
} ubo; } ubo;
layout (binding = 1) uniform sampler2D image;
layout (set = 0, binding = 1) uniform sampler2D image;
layout (set = 1, binding = 0, std140) uniform Equirect {
float radius;
float central_horizontal_angle;
float upper_vertical_angle;
float lower_vertical_angle;
} equirect;
layout (location = 0) out vec4 out_color; layout (location = 0) out vec4 out_color;

View file

@ -263,6 +263,9 @@ struct xrt_layer_equirect_data
struct xrt_pose pose; struct xrt_pose pose;
float radius; float radius;
float central_horizontal_angle;
float upper_vertical_angle;
float lower_vertical_angle;
}; };
/*! /*!