summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xmain.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/main.c b/main.c
index d8b9b1f..b122b18 100755
--- a/main.c
+++ b/main.c
@@ -191,7 +191,7 @@ struct SwapchainVulkan {
};
struct SemaphoreVulkan {
- VkSemaphore imageAvailable;
+ VkSemaphore imageAcquired;
VkSemaphore renderFinished;
};
@@ -1518,7 +1518,7 @@ int rvkCreateSyncObjects(struct RenderVulkan *vk)
{
VkSemaphoreCreateInfo semaphoreInfo = { 0 };
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- vk->dev->vkCreateSemaphore(vk->device, &semaphoreInfo, ALLOC_VK, &vk->sync.semaphore.imageAvailable);
+ vk->dev->vkCreateSemaphore(vk->device, &semaphoreInfo, ALLOC_VK, &vk->sync.semaphore.imageAcquired);
vk->dev->vkCreateSemaphore(vk->device, &semaphoreInfo, ALLOC_VK, &vk->sync.semaphore.renderFinished);
VkFenceCreateInfo fenceInfo = { 0 };
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
@@ -1551,9 +1551,9 @@ void rvkDestroySyncObjects(struct RenderVulkan *vk)
vk->dev->vkDestroySemaphore(vk->device, vk->sync.semaphore.renderFinished, ALLOC_VK);
vk->sync.semaphore.renderFinished = VK_NULL_HANDLE;
}
- if (vk->sync.semaphore.imageAvailable) {
- vk->dev->vkDestroySemaphore(vk->device, vk->sync.semaphore.imageAvailable, ALLOC_VK);
- vk->sync.semaphore.imageAvailable = VK_NULL_HANDLE;
+ if (vk->sync.semaphore.imageAcquired) {
+ vk->dev->vkDestroySemaphore(vk->device, vk->sync.semaphore.imageAcquired, ALLOC_VK);
+ vk->sync.semaphore.imageAcquired = VK_NULL_HANDLE;
}
}
@@ -1665,30 +1665,31 @@ PuglStatus onDisplay(PuglView *view)
vk->device,
vk->swapchain.rawSwapchain,
UINT64_MAX,
- vk->sync.semaphore.imageAvailable,
+ vk->sync.semaphore.imageAcquired,
VK_NULL_HANDLE,
&imageIndex))) {
rvkSetErrMsg(vk, "Could not acquire swapchain image: %d", result);
return PUGL_FAILURE;
}
- /* If running continuously, Vulkan can blast the queue with rendering
- * work faster than the GPU can execute it, causing RAM usage to grow
- * indefinitely. We use fences to limit the number of submitted frames
- * to the number of swapchain images. These fences will be required
- * later anyway when flushing persistently mapped uniform buffer ranges.
+ /* If running continuously and with an asynchronous presentation engine,
+ * Vulkan can blast the queue with rendering work faster than the GPU
+ * can execute it, causing RAM usage to grow indefinitely. We use fences
+ * to limit the number of submitted frames to the number of swapchain
+ * images. These fences will be required later anyway when flushing
+ * persistently mapped uniform buffer ranges.
*/
vk->dev->vkWaitForFences(vk->device,
1, &vk->sync.fence.swapchain[imageIndex],
VK_TRUE, UINT64_MAX);
vk->dev->vkResetFences(vk->device, 1, &vk->sync.fence.swapchain[imageIndex]);
- VkPipelineStageFlags waitStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ const VkPipelineStageFlags waitStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
VkSubmitInfo submitInfo = { 0 };
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.waitSemaphoreCount = 1;
- submitInfo.pWaitSemaphores = &vk->sync.semaphore.imageAvailable;
+ submitInfo.pWaitSemaphores = &vk->sync.semaphore.imageAcquired;
submitInfo.pWaitDstStageMask = &waitStage;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &vk->commandBuffers[imageIndex];