/* Copyright (c) 2019, Jordan Halase Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Note: Due to the length of many function and variable names in the * Vulkan API, the line width of any Vulkan-related code here may be * extended from 80 columns to 120 colums to improve readability. */ /* The goal of this project is to create an entirely self-contained, embeddable * Vulkan renderer. While dynamic linking directly against the system Vulkan * loader is safe, it will crash if attempting to run on a system without it * installed. By loading the Vulkan library dynamically, the application may * gracefully close or even fall back to a different rendering API. * Furthermore, Vulkan specifies that using functions retrieved via * `vkGetDeviceProcAddr` may actually run faster than if the application were * using the Vulkan loader directly due to bypassing the loader terminator. * * https://vulkan.lunarg.com/doc/sdk/1.0.61.1/windows/LoaderAndLayerInterface.html */ #include #include #include #include #if defined(_WIN32) #include #include #else #include #include #include "pugl/detail/x11.h" #endif #include "pugl/pugl.h" #include "pugl/pugl_stub_backend.h" #define STB_SPRINTF_IMPLEMENTATION #include "stb_sprintf.h" #if defined(__GNUC__) #define APP_THREAD_LOCAL __thread #else #define APP_THREAD_LOCAL __declspec(thread) #endif /** Vulkan allocation callbacks if we ever decide to use them when debugging. * * This is put in a macro so we don't have to look for every Vulkan function * that uses it. */ #define ALLOC_VK NULL struct VulkanAPI { void *handle; PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; PFN_vkCreateInstance vkCreateInstance; PFN_vkDestroyInstance vkDestroyInstance; PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT; PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT; PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; }; struct RenderVulkan { struct VulkanAPI *api; char *errMsg; VkInstance instance; VkDebugReportCallbackEXT debugCallback; VkSurfaceKHR surface; VkPhysicalDeviceProperties deviceProperties; VkPhysicalDevice physicalDevice; uint32_t graphicsIndex; VkDevice device; }; #define RVK_ERRMSG_LEN 4096 void rvkSetErrMsg(struct RenderVulkan *vk, const char *fmt, ...) { vk->errMsg = realloc(vk->errMsg, RVK_ERRMSG_LEN); va_list args; va_start(args, fmt); stbsp_vsnprintf(vk->errMsg, RVK_ERRMSG_LEN, fmt, args); va_end(args); } void rvkClearErrMsg(struct RenderVulkan *vk) { if (vk->errMsg) { free(vk->errMsg); vk->errMsg = NULL; } } const char *rvkGetErrMsg(struct RenderVulkan *vk) { return vk->errMsg; } #if defined(_WIN32) #define VULKAN_SONAME_LATEST "vulkan-1.dll" void *appDlopen(const char *soname) { return LoadLibraryA(soname); } char *appDlerror() { DWORD errCode = GetLastError(); static APP_THREAD_LOCAL char errStr[64]; stbsp_sprintf(errStr, "Dynamic Library Error: %d", errCode); return errStr; } void *appDlsym(void *handle, const char *symbol) { const uintptr_t ulAddr = (uintptr_t)GetProcAddress(handle, symbol); return (void*)ulAddr; } int appDlclose(void *handle) { return FreeLibrary(handle); } void getRequiredInstanceExtensions(void *windowCtx, unsigned *nRequired, const char **const extensions) { (void)windowCtx; static const char *const required[] = { VK_KHR_SURFACE_EXTENSION_NAME, VK_KHR_WIN32_SURFACE_EXTENSION_NAME }; static const unsigned num = sizeof(required) / sizeof(required[0]); if (extensions) { for (int i = 0; i < num; ++i) { extensions[i] = required[i]; } } else { *nRequired = num; } } VkResult createVulkanSurface(PuglView *view, VkInstance instance, PFN_vkGetInstanceProcAddr getInstanceProcAddrFunc, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { PuglWorld *world = ((struct PuglViewImpl*)view)->world; VkWin32SurfaceCreateInfoKHR createInfo = { 0 }; createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; createInfo.hinstance = GetModuleHandle(0); // FIXME createInfo.hwnd = puglGetNativeWindow(view); PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR; uintptr_t *const ulCreateWin32SurfaceKHR = (uintptr_t*)&vkCreateWin32SurfaceKHR; *ulCreateWin32SurfaceKHR = (uintptr_t)getInstanceProcAddrFunc(instance, "vkCreateWin32SurfaceKHR"); return vkCreateWin32SurfaceKHR(instance, &createInfo, pAllocator, pSurface); } #else #define VULKAN_SONAME_LATEST "libvulkan.so.1" #include // XXX: puglDlopen() void *appDlopen(const char *soname) { return dlopen(soname, RTLD_NOW); } char *appDlerror() { return dlerror(); } void *appDlsym(void *handle, const char *symbol) { return dlsym(handle, symbol); } int appDlclose(void *handle) { return dlclose(handle); } /* XXX: puglGetRequiredInstanceExtensions() * TODO: Linux actually has three possible surfaces: Xlib, XCB, and Wayland. * This should be figured out at runtime (without using #ifdefs). * In which case, `windowCtx` will be used to determine which to use. * As of now, Pugl (and LV2!) only supports Xlib. */ void getRequiredInstanceExtensions(void *windowCtx, unsigned *nRequired, const char **const extensions) { (void)windowCtx; static const char *const required[] = { VK_KHR_SURFACE_EXTENSION_NAME, VK_KHR_XLIB_SURFACE_EXTENSION_NAME }; static const unsigned num = sizeof(required) / sizeof(required[0]); if (extensions) { for (int i = 0; i < num; ++i) { extensions[i] = required[i]; } } else { *nRequired = num; } } /* XXX: puglCreateVulkanSurface() * No need to wrap VkFreeSurfaceKHR() */ VkResult createVulkanSurface(PuglView *view, VkInstance instance, PFN_vkGetInstanceProcAddr getInstanceProcAddrFunc, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { PuglWorld *world = ((struct PuglViewImpl*)view)->world; VkXlibSurfaceCreateInfoKHR createInfo = { 0 }; createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR; createInfo.dpy = ((struct PuglWorldImpl*)world)->impl->display; createInfo.window = puglGetNativeWindow(view); PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR; uintptr_t *const ulCreateXlibSurfaceKHR = (uintptr_t*)&vkCreateXlibSurfaceKHR; *ulCreateXlibSurfaceKHR = (uintptr_t)getInstanceProcAddrFunc(instance, "vkCreateXlibSurfaceKHR"); return vkCreateXlibSurfaceKHR(instance, &createInfo, pAllocator, pSurface); } #endif void *loadVulkanLibrary(const char *prefix) { (void)prefix; // TODO void *handle = appDlopen(VULKAN_SONAME_LATEST); return handle; } void loadVulkanGetInstanceProcAddrFunc(void *handle, PFN_vkGetInstanceProcAddr *getInstanceProcAddrFunc) { uintptr_t *ulGetInstanceProcAddrFunc = (uintptr_t*)getInstanceProcAddrFunc; *ulGetInstanceProcAddrFunc = (uintptr_t)appDlsym(handle, "vkGetInstanceProcAddr"); } int unloadVulkanLibrary(void *handle) { if (handle) { return appDlclose(handle); } return 0; } static VKAPI_ATTR VkBool32 VKAPI_CALL debugCallback( VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t obj, size_t location, int32_t code, const char *layerPrefix, const char *msg, void *userData ) { fprintf(stderr, "\nValidation layer:\n%s\n\n", msg); return VK_FALSE; } static VkResult createInstance(struct RenderVulkan *vk, const uint32_t nLayers, const char *const *const layers, const uint32_t nAdditional, const char *const *const additionalExtensions) { VkApplicationInfo appInfo = { 0 }; appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; appInfo.pApplicationName = "Pugl Vulkan Test"; appInfo.applicationVersion = VK_MAKE_VERSION(0, 1, 0); appInfo.pEngineName = "Pugl Vulkan Test Engine"; appInfo.engineVersion = VK_MAKE_VERSION(0, 1, 0); /* MoltenVK for macOS currently only supports Vulkan 1.0 */ appInfo.apiVersion = VK_MAKE_VERSION(1, 0, 0); unsigned i, j, nRequired; getRequiredInstanceExtensions(NULL, &nRequired, NULL); const uint32_t nExtensions = nRequired + nAdditional; const char **const extensions = malloc(sizeof(const char*) * nExtensions); getRequiredInstanceExtensions(NULL, NULL, extensions); for (i = nRequired, j = 0; i < nExtensions; ++i, ++j) { extensions[i] = additionalExtensions[j]; } for (i = 0; i < nExtensions; ++i) { printf("Using instance extension:\t%s\n", extensions[i]); } for (i = 0; i < nLayers; ++i) { printf("Using instance layer:\t\t%s\n", layers[i]); } VkInstanceCreateInfo createInfo = { 0 }; createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; createInfo.pApplicationInfo = &appInfo; createInfo.enabledLayerCount = nLayers; createInfo.ppEnabledLayerNames = layers; createInfo.enabledExtensionCount = nExtensions; createInfo.ppEnabledExtensionNames = extensions; VkResult result = VK_SUCCESS; if ((result = vk->api->vkCreateInstance(&createInfo, ALLOC_VK, &vk->instance))) { rvkSetErrMsg(vk, "Could not create Vulkan Instance: %d\n", result); } free(extensions); return result; } /** Must not be called until all derivative objects are destroyed first */ static void destroyInstance(struct RenderVulkan *vk) { vk->api->vkDestroyInstance(vk->instance, ALLOC_VK); vk->instance = VK_NULL_HANDLE; } /** This must work no matter the current state of `vk` */ void rvkDestroy(struct RenderVulkan *vk) { if (vk) { if (vk->surface) vk->api->vkDestroySurfaceKHR(vk->instance, vk->surface, ALLOC_VK); if (vk->debugCallback) { /* `vk->debugCallback` implies `vk->api` and instance functions loaded */ vk->api->vkDestroyDebugReportCallbackEXT(vk->instance, vk->debugCallback, ALLOC_VK); } if (vk->instance) destroyInstance(vk); if (vk->api) { if (vk->api->handle) unloadVulkanLibrary(vk->api->handle); free(vk->api); } if (vk->errMsg) free(vk->errMsg); if (vk) free(vk); } } /** Create a self-contained Vulkan instance and set up a debug reporter * * If errors occurred, the struct will be returned in an unusable state, * and MUST be checked via `rvkGetErrMsg`. It MUST then be destroyed via * `rvkDestroy`. * */ struct RenderVulkan *rvkCreate() { static const char *const instanceLayers[] = { "VK_LAYER_LUNARG_standard_validation" }; const uint32_t nInstanceLayers = sizeof(instanceLayers) / sizeof(instanceLayers[0]); static const char *const instanceExtensions[] = { VK_EXT_DEBUG_REPORT_EXTENSION_NAME }; const uint32_t nInstanceExtensions = sizeof(instanceExtensions) / sizeof(instanceExtensions[0]); struct RenderVulkan *vk = calloc(1, sizeof(*vk)); vk->api = calloc(1, sizeof(*vk->api)); vk->api->handle = loadVulkanLibrary(NULL); if (!vk->api->handle) { rvkSetErrMsg(vk, "Error loading Vulkan shared library:\n%s\n", appDlerror()); return vk; } loadVulkanGetInstanceProcAddrFunc(vk->api->handle, &vk->api->vkGetInstanceProcAddr); if (!vk->api->vkGetInstanceProcAddr) { rvkSetErrMsg(vk, "Error loading `vkGetInstanceProcAddr`:\n%s", appDlerror()); return vk; } uintptr_t *const ulCreateInstance = (uintptr_t*)&vk->api->vkCreateInstance; *ulCreateInstance = (uintptr_t)vk->api->vkGetInstanceProcAddr(NULL, "vkCreateInstance"); if (!vk->api->vkCreateInstance) { rvkSetErrMsg(vk, "Error loading `vkCreateInstance`"); return vk; } VkResult result; //if ((result = createInstance(vk, 0, NULL, 0, NULL))) { if ((result = createInstance(vk, nInstanceLayers, instanceLayers, nInstanceExtensions, instanceExtensions))) { return vk; } /* TODO: This could perhaps be generated */ static const char *const strErrLd = "Error loading function %s"; static const char *const strDestroyInstance = "vkDestroyInstance"; uintptr_t *const ulDestroyInstance = (uintptr_t*)&vk->api->vkDestroyInstance; *ulDestroyInstance = (uintptr_t)vk->api->vkGetInstanceProcAddr(vk->instance, strDestroyInstance); if (!vk->api->vkDestroyInstance) { rvkSetErrMsg(vk, strErrLd, strDestroyInstance); return vk; } /* It is okay if debug reporter functions are not resolved */ uintptr_t *ulCreateDebugReportCallbackEXT = (uintptr_t*)&vk->api->vkCreateDebugReportCallbackEXT; *ulCreateDebugReportCallbackEXT = (uintptr_t)vk->api->vkGetInstanceProcAddr(vk->instance, "vkCreateDebugReportCallbackEXT"); uintptr_t *ulDestroyDebugReportCallbackEXT = (uintptr_t*)&vk->api->vkDestroyDebugReportCallbackEXT; *ulDestroyDebugReportCallbackEXT = (uintptr_t)vk->api->vkGetInstanceProcAddr(vk->instance, "vkDestroyDebugReportCallbackEXT"); /* But not if we are unable to destroy a created debug reporter */ if (vk->api->vkCreateDebugReportCallbackEXT && !vk->api->vkDestroyDebugReportCallbackEXT) { rvkSetErrMsg(vk, "No debug reporter destroy function loaded for corresponding create function\n"); return vk; } static const char *const strDestroySurfaceKHR = "vkDestroySurfaceKHR"; uintptr_t *ulDestroySurfaceKHR = (uintptr_t*)&vk->api->vkDestroySurfaceKHR; *ulDestroySurfaceKHR = (uintptr_t)vk->api->vkGetInstanceProcAddr(vk->instance, strDestroySurfaceKHR); if (!vk->api->vkDestroySurfaceKHR) { rvkSetErrMsg(vk, strErrLd, strDestroySurfaceKHR); return vk; } static const char *const strEnumeratePhysicalDevices = "vkEnumeratePhysicalDevices"; uintptr_t *ulEnumeratePhysicalDevices = (uintptr_t*)&vk->api->vkEnumeratePhysicalDevices; *ulEnumeratePhysicalDevices = (uintptr_t)vk->api->vkGetInstanceProcAddr(vk->instance, strEnumeratePhysicalDevices); if (!vk->api->vkEnumeratePhysicalDevices) { rvkSetErrMsg(vk, strErrLd, strEnumeratePhysicalDevices); return vk; } static const char *const strGetPhysicalDeviceProperties = "vkGetPhysicalDeviceProperties"; uintptr_t *ulGetPhysicalDeviceProperties = (uintptr_t*)&vk->api->vkGetPhysicalDeviceProperties; *ulGetPhysicalDeviceProperties = (uintptr_t)vk->api->vkGetInstanceProcAddr(vk->instance, strGetPhysicalDeviceProperties); if (!vk->api->vkGetPhysicalDeviceProperties) { rvkSetErrMsg(vk, strErrLd, strGetPhysicalDeviceProperties); return vk; } static const char *const strGetPhysicalDeviceQueueFamilyProperties = "vkGetPhysicalDeviceQueueFamilyProperties"; uintptr_t *ulGetPhysicalDeviceQueueFamilyProperties = (uintptr_t*)&vk->api->vkGetPhysicalDeviceQueueFamilyProperties; *ulGetPhysicalDeviceQueueFamilyProperties = (uintptr_t)vk->api->vkGetInstanceProcAddr(vk->instance, strGetPhysicalDeviceQueueFamilyProperties); if (!vk->api->vkGetPhysicalDeviceQueueFamilyProperties) { rvkSetErrMsg(vk, strErrLd, strGetPhysicalDeviceQueueFamilyProperties); return vk; } static const char *const strGetPhysicalDeviceSurfaceSupportKHR = "vkGetPhysicalDeviceSurfaceSupportKHR"; uintptr_t *ulGetPhysicalDeviceSurfaceSupportKHR = (uintptr_t*)&vk->api->vkGetPhysicalDeviceSurfaceSupportKHR; *ulGetPhysicalDeviceSurfaceSupportKHR = (uintptr_t)vk->api->vkGetInstanceProcAddr(vk->instance, strGetPhysicalDeviceSurfaceSupportKHR); if (!vk->api->vkGetPhysicalDeviceSurfaceSupportKHR) { rvkSetErrMsg(vk, strErrLd, strGetPhysicalDeviceSurfaceSupportKHR); return vk; } static const char *const strGetPhysicalDeviceMemoryProperties = "vkGetPhysicalDeviceMemoryProperties"; uintptr_t *ulGetPhysicalDeviceMemoryProperties = (uintptr_t*)&vk->api->vkGetPhysicalDeviceMemoryProperties; *ulGetPhysicalDeviceMemoryProperties = (uintptr_t)vk->api->vkGetInstanceProcAddr(vk->instance, strGetPhysicalDeviceMemoryProperties); if (!vk->api->vkGetPhysicalDeviceMemoryProperties) { rvkSetErrMsg(vk, strErrLd, strGetPhysicalDeviceMemoryProperties); return vk; } static const char *const strEnumerateDeviceExtensionProperties = "vkEnumerateDeviceExtensionProperties"; uintptr_t *ulEnumerateDeviceExtensionProperties = (uintptr_t*)&vk->api->vkEnumerateDeviceExtensionProperties; *ulEnumerateDeviceExtensionProperties = (uintptr_t)vk->api->vkGetInstanceProcAddr(vk->instance, strEnumerateDeviceExtensionProperties); if (!vk->api->vkEnumerateDeviceExtensionProperties) { rvkSetErrMsg(vk, strErrLd, strEnumerateDeviceExtensionProperties); return vk; } if (vk->api->vkCreateDebugReportCallbackEXT) { VkDebugReportCallbackCreateInfoEXT debugInfo = { 0 }; debugInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT; debugInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT; debugInfo.pfnCallback = debugCallback; if ((result = vk->api->vkCreateDebugReportCallbackEXT(vk->instance, &debugInfo, ALLOC_VK, &vk->debugCallback))) { rvkSetErrMsg(vk, "Could not create debug reporter: %d", result); return vk; } } return vk; } static void rvkCreateSurface(struct RenderVulkan *vk, PuglView *view) { VkResult result; if ((result = createVulkanSurface(view, vk->instance, vk->api->vkGetInstanceProcAddr, ALLOC_VK, &vk->surface))) { rvkSetErrMsg(vk, "Could not create window surface: %d\n", result); } } /** Checks if a particular physical device is suitable for this application. * * Choosing a physical device is an *extremely* application-specific procedure. * * All rendering in Vulkan is done off-screen by default. To get rendered * results on-screen they must be "presented" to a surface. * The Vulkan spec allows devices to have presentation done on a separate queue * family than GRAPHICS, or no present support at all. However, every graphics * card today that is capable of connecting to a display has at least one queue * family capable of both GRAPHICS and present. * * This single-threaded application will use only one queue for all operations. * More specifically, it will make use of GRAPHICS and TRANSFER operations on * a single queue retrieved from a GRAPHICS queue family that supports present. * * In my experience, most cards follow this format: * * INTEGRATED: Typically have one queue family for all operations. * May retrieve only one queue from this single queue family. * DEDICATED: Older cards typically have one queue family for GRAPHICS and one * for TRANSFER. Newer cards typically have a separate queue family * for COMPUTE. Remember that all GRAPHICS queue families * implicitly allow both COMPUTE and TRANSFER operations on it as * well. Programmers making use of separate queue families may allow * devices to work more efficiently. * * May typically retrieve up to a few queues from either queue * family. The maximum number of available queues is specific to * that device's queue families. Programmers making use of separate * queues for highly independent workloads may allow devices to work * more efficiently. Queues must be used from a single thread, so * programmers wanting to make use of multithreaded queue submission * must retrieve queues per-thread. * * GOTCHA: Some devices may have multiple GRAPHICS queue families * with only one of them able to present. Do not assume a device is * unsuitable until ALL queue families have been checked. * * Because this application will load all resources before any rendering begins, * and the amount of resources are small enough to be loaded almost instantly, * it will not make use of a separate TRANSFER queue family. This application * will also not attempt to find the "most powerful" device on the system, and * will choose the first suitable device it finds. * * Information for many devices is available online at * https://vulkan.gpuinfo.org/ */ static int isDeviceSuitable(const struct RenderVulkan *const vk, const VkPhysicalDevice pd, uint32_t *const graphicsIndex) { uint32_t nQueueFamilies; vk->api->vkGetPhysicalDeviceQueueFamilyProperties(pd, &nQueueFamilies, NULL); VkQueueFamilyProperties *queueProperties = malloc(nQueueFamilies * sizeof(*queueProperties)); vk->api->vkGetPhysicalDeviceQueueFamilyProperties(pd, &nQueueFamilies, queueProperties); for (uint32_t i = 0; i < nQueueFamilies; ++i) { printf("Queue Family %d queueCount:\t%d\n", i, queueProperties[i].queueCount); } uint32_t g; for (g = 0; g < nQueueFamilies; ++g) { if (queueProperties[g].queueFlags & VK_QUEUE_GRAPHICS_BIT) { VkBool32 canSurface; vk->api->vkGetPhysicalDeviceSurfaceSupportKHR(pd, g, vk->surface, &canSurface); if (canSurface) break; } } if (g >= nQueueFamilies) { /* We only support graphics and present on the same queue family. */ return 0; } *graphicsIndex = g; return 1; } void rvkSelectPhysicalDevice(struct RenderVulkan *vk) { if (!vk->surface) { rvkSetErrMsg(vk, "Cannot select a physical device without a surface"); return; } static const char *const strErrEnum = "Could not enumerate physical devices: %s"; static const char *const strWarnEnum = "Warn: Incomplete enumeration of physical devices"; uint32_t nDevices; VkResult result; if ((result = vk->api->vkEnumeratePhysicalDevices(vk->instance, &nDevices, NULL))) { if (result != VK_INCOMPLETE) { rvkSetErrMsg(vk, strErrEnum, result); return; } else { fprintf(stderr, "%s\n", strWarnEnum); } } if (!nDevices) { rvkSetErrMsg(vk, "No physical devices found"); return; } VkPhysicalDevice *devices = malloc(nDevices * sizeof(*devices)); VkPhysicalDeviceProperties *deviceProperties = malloc(nDevices * sizeof(*deviceProperties)); if ((result = vk->api->vkEnumeratePhysicalDevices(vk->instance, &nDevices, devices))) { if (result != VK_INCOMPLETE) { rvkSetErrMsg(vk, strErrEnum, result); goto done; } else { fprintf(stderr, "%s\n", strWarnEnum); } } uint32_t i; for (i = 0; i < nDevices; ++i) { vk->api->vkGetPhysicalDeviceProperties(devices[i], &deviceProperties[i]); printf("Found physical device:\t\t`%s`\n", deviceProperties[i].deviceName); } for (i = 0; i < nDevices; ++i) { printf("Checking suitability for\t`%s`...\n", deviceProperties[i].deviceName); if (isDeviceSuitable(vk, devices[i], &vk->graphicsIndex)) { printf("Using physical device:\t\t`%s`\n", deviceProperties[i].deviceName); vk->deviceProperties = deviceProperties[i]; vk->physicalDevice = devices[i]; printf("Graphics Index:\t\t\t%d\n", vk->graphicsIndex); goto done; } printf("Device `%s` not suitable\n", deviceProperties[i].deviceName); } if (i >= nDevices) { rvkSetErrMsg(vk, "No suitable devices found"); } done: free(deviceProperties); free(devices); } void rvkCheckFatal(struct RenderVulkan *vk) { const char *errMsg = NULL; if ((errMsg = rvkGetErrMsg(vk))) { fprintf(stderr, "%s\n", errMsg); rvkDestroy(vk); exit(1); } } int running = 1; PuglStatus onEvent(PuglView *view, const PuglEvent *e) { switch (e->type) { case PUGL_CLOSE: running = 0; break; default: break; } return PUGL_SUCCESS; } int main() { #if defined(__linux__) XInitThreads(); #endif const char *errMsg = NULL; struct RenderVulkan *vk = rvkCreate(); rvkCheckFatal(vk); printf("Created Vulkan Instance Successfully\n"); PuglWorld *world = puglNewWorld(); PuglView *view = puglNewView(world); const PuglRect frame = { 0, 0, 800, 600 }; puglSetBackend(view, puglStubBackend()); PuglStatus status; if ((status = puglCreateWindow(view, "Pugl Vulkan Test"))) { fprintf(stderr, "Could not create window: %d\n", status); rvkDestroy(vk); puglFreeWorld(world); exit(1); } puglSetEventFunc(view, onEvent); rvkCreateSurface(vk, view); rvkCheckFatal(vk); rvkSelectPhysicalDevice(vk); rvkCheckFatal(vk); puglShowWindow(view); while (running) { puglPollEvents(world, -1); puglDispatchEvents(world); } puglFreeView(view); puglFreeWorld(world); rvkDestroy(vk); return 0; } //#if defined(_WIN32) #if 0 int WINAPI WinMain( HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow ) { (void)hInstance; (void)hPrevInstance; (void)lpCmdLine; (void)nCmdShow; return main(); } #endif