pax_global_header00006660000000000000000000000064146730740270014524gustar00rootroot0000000000000052 comment=b24587d4871a630d05e9e26da94c95e6ce4324f2 egl-gbm-1.1.2.1/000077500000000000000000000000001467307402700131765ustar00rootroot00000000000000egl-gbm-1.1.2.1/.gitignore000066400000000000000000000000071467307402700151630ustar00rootroot00000000000000/build egl-gbm-1.1.2.1/COPYING000066400000000000000000000026371467307402700142410ustar00rootroot00000000000000Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------- Note: Individual files contain the following tag instead of the full license text. SPDX-License-Identifier: MIT This enables machine processing of the license information based on the SPDX License Identifiers that are here available: http://spdx.org/licenses/ egl-gbm-1.1.2.1/external/000077500000000000000000000000001467307402700150205ustar00rootroot00000000000000egl-gbm-1.1.2.1/external/gbm_backend_abi.h000066400000000000000000000256371467307402700202350ustar00rootroot00000000000000/* * Copyright © 2011 Intel Corporation * Copyright © 2021 NVIDIA Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Benjamin Franzke * James Jones */ #ifndef GBM_BACKEND_ABI_H_ #define GBM_BACKEND_ABI_H_ #include "gbm.h" /** * \file gbm_backend_abi.h * \brief ABI between the GBM loader and its backends */ struct gbm_backend_desc; /** * The GBM backend interface version defined by this file. * * The GBM device interface version must be incremented whenever the structures * defined in this file are modified. To preserve ABI compatibility with * backends that support only older versions, modifications to this file must * consist only of appending new fields to the end of the structures defined in * it, defining new structures, or declaring new exported functions or global * variables. * * Note this version applies to ALL structures in this file, not just the core, * backend, and device structures which contain it explicitly. Buffer objects, * surfaces, and any other new structures introduced to this file are also part * of the backend ABI. The ABI version of an instance of any object in this file * is defined as the minimum of the version of the backend associated with the * object instance and the loader's core object version. Hence, any new objects * added to this file should contain either a reference to an existing object * defined here, or an explicit version field. * * A few examples of object versions: * * Backend ABI version: 0 * Core ABI version: 3 * ABI version of a device created by the backend: 0 * * Backend ABI version: 2 * Core ABI version: 1 * ABI version of a surface created by a device from the backend: 1 * * Backend ABI version: 4 * Core ABI version: 4 * ABI version of a buffer object created by a device from the backend: 4 */ #define GBM_BACKEND_ABI_VERSION 0 /** * GBM device interface corresponding to GBM_BACKEND_ABI_VERSION = 0 * * DO NOT MODIFY THIS STRUCT. Instead, introduce a gbm_bo_v1, increment * GBM_BACKEND_ABI_VERSION, and append gbm_bo_v1 to gbm_bo. */ struct gbm_device_v0 { const struct gbm_backend_desc *backend_desc; /** * The version of the GBM backend interface supported by this device and its * child objects. This may be less than the maximum version supported by the * GBM loader if the device was created by an older backend, or less than the * maximum version supported by the backend if the device was created by an * older loader. In other words, this will be: * * MIN(backend GBM interface version, loader GBM interface version) * * It is the backend's responsibility to assign this field the value passed * in by the GBM loader to the backend's create_device function. The GBM * loader will pre-clamp the value based on the loader version and the * version reported by the backend in its gbm_backend_v0::backend_version * field. It is the loader's responsibility to respect this version when * directly accessing a device instance or any child objects instantiated by * a device instance. */ uint32_t backend_version; int fd; const char *name; void (*destroy)(struct gbm_device *gbm); int (*is_format_supported)(struct gbm_device *gbm, uint32_t format, uint32_t usage); int (*get_format_modifier_plane_count)(struct gbm_device *device, uint32_t format, uint64_t modifier); struct gbm_bo *(*bo_create)(struct gbm_device *gbm, uint32_t width, uint32_t height, uint32_t format, uint32_t usage, const uint64_t *modifiers, const unsigned int count); struct gbm_bo *(*bo_import)(struct gbm_device *gbm, uint32_t type, void *buffer, uint32_t usage); void *(*bo_map)(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint32_t flags, uint32_t *stride, void **map_data); void (*bo_unmap)(struct gbm_bo *bo, void *map_data); int (*bo_write)(struct gbm_bo *bo, const void *buf, size_t data); int (*bo_get_fd)(struct gbm_bo *bo); int (*bo_get_planes)(struct gbm_bo *bo); union gbm_bo_handle (*bo_get_handle)(struct gbm_bo *bo, int plane); int (*bo_get_plane_fd)(struct gbm_bo *bo, int plane); uint32_t (*bo_get_stride)(struct gbm_bo *bo, int plane); uint32_t (*bo_get_offset)(struct gbm_bo *bo, int plane); uint64_t (*bo_get_modifier)(struct gbm_bo *bo); void (*bo_destroy)(struct gbm_bo *bo); struct gbm_surface *(*surface_create)(struct gbm_device *gbm, uint32_t width, uint32_t height, uint32_t format, uint32_t flags, const uint64_t *modifiers, const unsigned count); struct gbm_bo *(*surface_lock_front_buffer)(struct gbm_surface *surface); void (*surface_release_buffer)(struct gbm_surface *surface, struct gbm_bo *bo); int (*surface_has_free_buffers)(struct gbm_surface *surface); void (*surface_destroy)(struct gbm_surface *surface); }; /** * The device used for the memory allocation. * * The members of this structure should be not accessed directly * * To modify this structure, introduce a new gbm_device_v structure, add it * to the end of this structure, and increment GBM_BACKEND_ABI_VERSION. */ struct gbm_device { /* Hack to make a gbm_device detectable by its first element. */ struct gbm_device *(*dummy)(int); struct gbm_device_v0 v0; }; /** * GBM buffer object interface corresponding to GBM_BACKEND_ABI_VERSION = 0 * * DO NOT MODIFY THIS STRUCT. Instead, introduce a gbm_bo_v1, increment * GBM_BACKEND_ABI_VERSION, and append gbm_bo_v1 to gbm_bo. */ struct gbm_bo_v0 { uint32_t width; uint32_t height; uint32_t stride; uint32_t format; union gbm_bo_handle handle; void *user_data; void (*destroy_user_data)(struct gbm_bo *, void *); }; /** * The allocated buffer object. * * The members in this structure should not be accessed directly. * * To modify this structure, introduce a new gbm_bo_v structure, add it to * the end of this structure, and increment GBM_BACKEND_ABI_VERSION. */ struct gbm_bo { struct gbm_device *gbm; struct gbm_bo_v0 v0; }; /** * GBM surface interface corresponding to GBM_BACKEND_ABI_VERSION = 0 * * DO NOT MODIFY THIS STRUCT. Instead, introduce a gbm_surface_v1, increment * GBM_BACKEND_ABI_VERSION, and append gbm_surface_v1 to gbm_surface. */ struct gbm_surface_v0 { uint32_t width; uint32_t height; uint32_t format; uint32_t flags; struct { uint64_t *modifiers; unsigned count; }; }; /** * An allocated GBM surface. * * To modify this structure, introduce a new gbm_surface_v structure, add it * to the end of this structure, and increment GBM_BACKEND_ABI_VERSION. */ struct gbm_surface { struct gbm_device *gbm; struct gbm_surface_v0 v0; }; /** * GBM backend interfaces corresponding to GBM_BACKEND_ABI_VERSION = 0 * * DO NOT MODIFY THIS STRUCT. Instead, introduce a gbm_backend_v1, increment * GBM_BACKEND_ABI_VERSION, append gbm_backend_v1 to gbm_backend. */ struct gbm_backend_v0 { /** * The version of the GBM backend interface supported by this backend. This * is set by the backend itself, and may be greater or less than the version * supported by the loader. It is the responsibility of the GBM loader to * respect this version when accessing fields in this structure. */ uint32_t backend_version; const char *backend_name; struct gbm_device *(*create_device)(int fd, uint32_t gbm_backend_version); }; /** * The interface exposed by an external GBM backend. * * To modify this structure, introduce a new gbm_backend_v structure, add it * to the end of this structure, and increment GBM_BACKEND_ABI_VERSION. */ struct gbm_backend { struct gbm_backend_v0 v0; }; /** * GBM interfaces exposed to GBM backends at GBM_BACKEND_ABI_VERSION >= 0 * * DO NOT MODIFY THIS STRUCT. Instead, introduce a gbm_core_v1, increment * GBM_BACKEND_ABI_VERSION, and append gbm_core_v1 to gbm_backend. */ struct gbm_core_v0 { /** * The version of the GBM backend interface supported by the GBM loader. This * is set by the loader, and may be greater or less than the version * supported by a given backend. It is the responsibility of the backend to * respect this version when accessing fields in this structure and other * structures allocated or modified by the loader. */ uint32_t core_version; uint32_t (*format_canonicalize)(uint32_t gbm_format); }; /** * The interface exposed by the GBM core/loader code to GBM backends. * * To modify this structure, introduce a new gbm_core_v structure, add it * to the end of this structure, and increment GBM_BACKEND_ABI_VERSION. */ struct gbm_core { struct gbm_core_v0 v0; }; /** * The entrypoint an external GBM backend exports. * * Prior to creating any devices using the backend, GBM will look up and call * this function to request the backend's interface and convey the loader's * version and exported interface to the backend. * * DO NOT MODIFY THIS FUNCTION NAME OR PROTOTYPE. It must remain unchanged to * preserve backwards compatibility with existing GBM backends. */ #define GBM_GET_BACKEND_PROC gbmint_get_backend #define _GBM_MKSTRX(s) _GBM_MKSTR(s) #define _GBM_MKSTR(s) #s #define GBM_GET_BACKEND_PROC_NAME _GBM_MKSTRX(GBM_GET_BACKEND_PROC) typedef const struct gbm_backend *(*GBM_GET_BACKEND_PROC_PTR)(const struct gbm_core *gbm_core); #endif egl-gbm-1.1.2.1/external/gbmint.h000066400000000000000000000031411467307402700164500ustar00rootroot00000000000000/* * Copyright © 2011 Intel Corporation * Copyright © 2021 NVIDIA Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Benjamin Franzke * James Jones */ #ifndef INTERNAL_H_ #define INTERNAL_H_ #include "gbm_backend_abi.h" /* GCC visibility */ #if defined(__GNUC__) #define GBM_EXPORT __attribute__ ((visibility("default"))) #else #define GBM_EXPORT #endif /** * \file gbmint.h * \brief Internal implementation details of gbm */ extern struct gbm_core gbm_core; #endif egl-gbm-1.1.2.1/meson.build000066400000000000000000000027111467307402700153410ustar00rootroot00000000000000project('egl-gbm', 'c', version : '1.1.2', default_options : [ 'buildtype=debugoptimized', 'c_std=gnu99', 'warning_level=1', ], license : 'MIT', meson_version : '>= 0.50' ) cc = meson.get_compiler('c') egl_gbm_version = meson.project_version() var_arr = egl_gbm_version.split('.') egl_gbm_major_version = var_arr[0] egl_gbm_minor_version = var_arr[1] egl_gbm_micro_version = var_arr[2] dep_libdrm = dependency('libdrm', version: '>= 2.4.75') eglexternalplatform = dependency('eglexternalplatform', version : ['>=1.1', '<2']) ext_includes = include_directories('external') gbm = dependency('gbm', version : ['>=21.2']) threads = dependency('threads') pkgconf = configuration_data() pkgconf.set('prefix', get_option('prefix')) pkgconf.set('exec_prefix', '${prefix}') pkgconf.set('libdir', '${exec_prefix}/@0@'.format(get_option('libdir'))) pkgconf.set('includedir', '${prefix}/@0@'.format(get_option('includedir'))) pkgconf.set('datadir', '${datarootdir}') pkgconf.set('datarootdir', '${prefix}/@0@'.format(get_option('datadir'))) pkgconf.set('PACKAGE', meson.project_name()) pkgconf.set('EGL_GBM_EXTERNAL_VERSION', meson.project_version()) pkgconf.set('EGL_EXTERNAL_PLATFORM_MIN_VERSION', '@0@.@1@'.format(egl_gbm_major_version, egl_gbm_minor_version)) pkgconf.set('EGL_EXTERNAL_PLATFORM_MAX_VERSION', egl_gbm_major_version.to_int() + 1) subdir('src') egl-gbm-1.1.2.1/src/000077500000000000000000000000001467307402700137655ustar00rootroot00000000000000egl-gbm-1.1.2.1/src/15_nvidia_gbm.json000066400000000000000000000001571467307402700172670ustar00rootroot00000000000000{ "file_format_version" : "1.0.0", "ICD" : { "library_path" : "libnvidia-egl-gbm.so.1" } } egl-gbm-1.1.2.1/src/gbm-display.c000066400000000000000000000421511467307402700163440ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #include "gbm-display.h" #include "gbm-utils.h" #include "gbm-surface.h" #include #include #include #include #include #include #include #include #include #include #include #include #if !defined(O_CLOEXEC) #if ((defined(__sun__) && defined(__svr4__)) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)) /* * Allow building against old SunOS headers with the assumption this flag will * be available at runtime. */ #define O_CLOEXEC 0x00800000 #else #error "No definition of O_CLOEXEC available" #endif #endif static bool CheckDevicePath(const GbmPlatformData* data, EGLDeviceEXT dev, EGLenum pathEnum, dev_t gbmDev) { struct stat statbuf; const char *devPath; devPath = data->egl.QueryDeviceStringEXT(dev, pathEnum); if (!devPath) return false; memset(&statbuf, 0, sizeof(statbuf)); if (stat(devPath, &statbuf)) return false; if (memcmp(&statbuf.st_rdev, &gbmDev, sizeof(gbmDev))) return false; return true; } static EGLDeviceEXT FindGbmDevice(GbmPlatformData* data, struct gbm_device* gbm) { EGLDeviceEXT* devs = NULL; const char* devExts; struct stat statbuf; EGLDeviceEXT dev = EGL_NO_DEVICE_EXT; EGLint maxDevs, numDevs; int gbmFd = gbm_device_get_fd(gbm); int i; if (gbmFd < 0) { /* * No need to set an error here or various other cases that boil down * to an invalid native display. From the EGL 1.5 spec: * * "If platform is valid but no display matching is * available, then EGL_NO_DISPLAY is returned; no error condition is * raised in this case." */ goto done; } memset(&statbuf, 0, sizeof(statbuf)); if (fstat(gbmFd, &statbuf)) goto done; if (data->egl.QueryDevicesEXT(0, NULL, &maxDevs) != EGL_TRUE) goto done; if (maxDevs <= 0) goto done; devs = malloc(maxDevs * sizeof(*devs)); if (!devs) { eGbmSetError(data, EGL_BAD_ALLOC); goto done; } if (data->egl.QueryDevicesEXT(maxDevs, devs, &numDevs) != EGL_TRUE) goto done; for (i = 0; i < numDevs; i++) { devExts = data->egl.QueryDeviceStringEXT(devs[i], EGL_EXTENSIONS); if (!eGbmFindExtension("EGL_EXT_device_drm", devExts)) continue; if (CheckDevicePath(data, devs[i], EGL_DRM_DEVICE_FILE_EXT, statbuf.st_rdev)) { dev = devs[i]; break; } if (!eGbmFindExtension("EGL_EXT_device_drm_render_node", devExts)) continue; if (CheckDevicePath(data, devs[i], EGL_DRM_RENDER_NODE_FILE_EXT, statbuf.st_rdev)) { dev = devs[i]; break; } } done: free(devs); return dev; } static int OpenDefaultDrmDevice(void) { drmDevicePtr devices[1]; int numDevices = drmGetDevices2(0, devices, 1); int fd = -1; if (numDevices == 0) return -1; if (devices[0]->nodes[DRM_NODE_RENDER]) fd = open(devices[0]->nodes[DRM_NODE_RENDER], O_RDWR | O_CLOEXEC); if ((fd < 0) && devices[0]->nodes[DRM_NODE_PRIMARY]) fd = open(devices[0]->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC); drmFreeDevices(devices, 1); return fd; } static void FreeDisplay(GbmObject* obj) { if (obj) { GbmDisplay* display = (GbmDisplay*)obj; /* * The device file is only opened when the display is * EGL_DEFAULT_DISPLAY, and is the first resource created by that code * path. */ if (display->fd >= 0) { if (display->gbm) gbm_device_destroy(display->gbm); close(display->fd); } free(obj); } } EGLDisplay eGbmGetPlatformDisplayExport(void *dataVoid, EGLenum platform, void *nativeDpy, const EGLAttrib *attribs) { static const EGLAttrib refAttrs[] = { EGL_TRACK_REFERENCES_KHR, EGL_TRUE, EGL_NONE }; GbmPlatformData* data = dataVoid; GbmDisplay* display = NULL; const EGLAttrib *attrs = data->supportsDisplayReference ? refAttrs : NULL; (void)attribs; if (platform != EGL_PLATFORM_GBM_KHR) { eGbmSetError(data, EGL_BAD_PARAMETER); return EGL_NO_DISPLAY; } display = calloc(1, sizeof(*display)); if (!display) { eGbmSetError(data, EGL_BAD_ALLOC); return EGL_NO_DISPLAY; } display->base.dpy = display; display->base.type = EGL_OBJECT_DISPLAY_KHR; display->base.refCount = 1; display->base.free = FreeDisplay; display->data = data; display->fd = -1; display->gbm = nativeDpy; if (nativeDpy == EGL_DEFAULT_DISPLAY) { if ((display->fd = OpenDefaultDrmDevice()) < 0) goto fail; if (!(display->gbm = gbm_create_device(display->fd))) goto fail; } if (data->ptr_gbm_device_get_backend_name != NULL) { const char *name = data->ptr_gbm_device_get_backend_name(display->gbm); if (name == NULL || strcmp(name, "nvidia") != 0) { /* * This is not an NVIDIA device. Return failure, so that libglvnd can * move on to the next driver. */ goto fail; } } display->dev = FindGbmDevice(data, display->gbm); if (display->dev == EGL_NO_DEVICE_EXT) { /* FindGbmDevice() sets an appropriate EGL error on failure */ goto fail; } display->devDpy = display->data->egl.GetPlatformDisplay(EGL_PLATFORM_DEVICE_EXT, display->dev, attrs); if (display->devDpy == EGL_NO_DISPLAY) { /* GetPlatformDisplay will set an appropriate error */ goto fail; } if (!eGbmAddObject(&display->base)) { eGbmSetError(data, EGL_BAD_ALLOC); goto fail; } return (EGLDisplay)display; fail: FreeDisplay(&display->base); return EGL_NO_DISPLAY; } EGLBoolean eGbmInitializeHook(EGLDisplay dpy, EGLint* major, EGLint* minor) { GbmDisplay* display = (GbmDisplay*)eGbmRefHandle(dpy); GbmPlatformData* data; const char* exts; EGLBoolean res; if (!display) { /* No platform data. Can't set error EGL_NO_DISPLAY */ return EGL_FALSE; } data = display->data; res = data->egl.Initialize(display->devDpy, major, minor); if (!res) goto done; exts = data->egl.QueryString(display->devDpy, EGL_EXTENSIONS); if (!exts || !eGbmFindExtension("EGL_KHR_stream", exts) || !eGbmFindExtension("EGL_KHR_stream_producer_eglsurface", exts) || !eGbmFindExtension("EGL_KHR_image_base", exts) || !eGbmFindExtension("EGL_NV_stream_consumer_eglimage", exts) || !eGbmFindExtension("EGL_MESA_image_dma_buf_export", exts) || !eGbmFindExtension("EGL_EXT_sync_reuse", exts)) { data->egl.Terminate(display->devDpy); eGbmSetError(data, EGL_NOT_INITIALIZED); res = EGL_FALSE; } display->gbm->v0.surface_lock_front_buffer = eGbmSurfaceLockFrontBuffer; display->gbm->v0.surface_release_buffer = eGbmSurfaceReleaseBuffer; display->gbm->v0.surface_has_free_buffers = eGbmSurfaceHasFreeBuffers; done: eGbmUnrefObject(&display->base); return res; } EGLBoolean eGbmTerminateHook(EGLDisplay dpy) { GbmDisplay* display = (GbmDisplay*)eGbmRefHandle(dpy); EGLBoolean res; if (!display) { /* No platform data. Can't set error EGL_NO_DISPLAY */ return EGL_FALSE; } res = display->data->egl.Terminate(display->devDpy); eGbmUnrefObject(&display->base); return res; } const char* eGbmQueryStringExport(void *data, EGLDisplay dpy, EGLExtPlatformString name) { (void)data; (void)dpy; switch (name) { case EGL_EXT_PLATFORM_PLATFORM_CLIENT_EXTENSIONS: return "EGL_KHR_platform_gbm EGL_MESA_platform_gbm"; default: break; } return NULL; } EGLBoolean eGbmIsValidNativeDisplayExport(void *data, void *nativeDpy) { /* Is a GBM device? */ char *envPlatform = getenv("EGL_PLATFORM"); (void)data; /* Yes, because the environment said so. */ if (envPlatform && !strcasecmp(envPlatform, "gbm")) return EGL_TRUE; /* GBM devices are pointers to instances of "struct gbm_device". */ if (!eGbmPointerIsDereferenceable(nativeDpy)) return EGL_FALSE; /* * The first member of struct gbm_device is "dummy", a pointer to the * function gbm_create_device() that is there precisely for this purpose: */ return (*(void**)nativeDpy == gbm_create_device) ? EGL_TRUE : EGL_FALSE; } void* eGbmGetInternalHandleExport(EGLDisplay dpy, EGLenum type, void *handle) { GbmObject* obj = handle ? eGbmRefHandle(handle) : NULL; void* res = handle; if (!obj) return res; if (obj->type != type || obj->dpy != dpy) goto done; switch (type) { case EGL_OBJECT_DISPLAY_KHR: res = ((GbmDisplay*)obj)->devDpy; break; case EGL_OBJECT_SURFACE_KHR: res = eGbmSurfaceUnwrap(obj); break; default: break; } done: eGbmUnrefObject(obj); return res; } static uint32_t ConfigToDrmFourCC(GbmDisplay* display, EGLConfig config) { EGLDisplay dpy = display->devDpy; EGLint r, g, b, a; EGLBoolean ret = EGL_TRUE; ret &= display->data->egl.GetConfigAttrib(dpy, config, EGL_RED_SIZE, &r); ret &= display->data->egl.GetConfigAttrib(dpy, config, EGL_GREEN_SIZE, &g); ret &= display->data->egl.GetConfigAttrib(dpy, config, EGL_BLUE_SIZE, &b); ret &= display->data->egl.GetConfigAttrib(dpy, config, EGL_ALPHA_SIZE, &a); if (!ret) { /* * The only reason this could fail is some internal error in the * platform library code or if the application terminated the display * in another thread while this code was running. In either case, * behave as if there is no DRM fourcc format associated with this * config. */ return 0; /* DRM_FORMAT_INVALID */ } /* Handles configs with up to 255 bits per component */ assert(a < 256 && g < 256 && b < 256 && a < 256); #define PACK_CONFIG(r_, g_, b_, a_) \ (((r_) << 24ULL) | ((g_) << 16ULL) | ((b_) << 8ULL) | (a_)) switch (PACK_CONFIG(r, g, b, a)) { case PACK_CONFIG(8, 8, 8, 0): return DRM_FORMAT_XRGB8888; case PACK_CONFIG(8, 8, 8, 8): return DRM_FORMAT_ARGB8888; case PACK_CONFIG(5, 6, 5, 0): return DRM_FORMAT_RGB565; case PACK_CONFIG(10, 10, 10, 0): return DRM_FORMAT_XRGB2101010; case PACK_CONFIG(10, 10, 10, 2): return DRM_FORMAT_ARGB2101010; default: return 0; /* DRM_FORMAT_INVALID */ } } EGLBoolean eGbmChooseConfigHook(EGLDisplay dpy, EGLint const* attribs, EGLConfig* configs, EGLint configSize, EGLint *numConfig) { GbmDisplay* display = (GbmDisplay*)eGbmRefHandle(dpy); GbmPlatformData* data; EGLint *newAttribs = NULL; EGLConfig *newConfigs = NULL; EGLint nAttribs = 0; EGLint nNewAttribs = 0; EGLint nNewConfigs = 0; EGLint cfg; EGLint nativeVisual = EGL_DONT_CARE; EGLint err = EGL_SUCCESS; EGLBoolean ret = EGL_FALSE; bool surfType = false; bool nativeVisualID = false; if (!display) { /* No platform data. Can't set error EGL_NO_DISPLAY */ return EGL_FALSE; } data = display->data; if (attribs) { for (; attribs[nAttribs] != EGL_NONE; nAttribs += 2) { surfType = surfType || (attribs[nAttribs] == EGL_SURFACE_TYPE); if (attribs[nAttribs] == EGL_NATIVE_VISUAL_ID) { nativeVisual = attribs[nAttribs + 1]; nativeVisualID = true; } } } /* * Add room for EGL_SURFACE_TYPE attrib if not present, remove the * EGL_NATIVE_VISUAL_ID attrib if present */ nNewAttribs = (surfType ? nAttribs : nAttribs + 2); nNewAttribs = (nativeVisualID ? nNewAttribs - 2 : nNewAttribs); newAttribs = malloc((nNewAttribs + 1) * sizeof(*newAttribs)); if (!newAttribs) { err = EGL_BAD_ALLOC; goto done; } for (nAttribs = 0, nNewAttribs = 0; attribs[nAttribs] != EGL_NONE; nAttribs += 2) { /* * Convert all instances of EGL_WINDOW_BIT in an EGL_SURFACE_TYPE * attribute's value to EGL_STREAM_BIT_KHR */ if ((attribs[nAttribs] == EGL_SURFACE_TYPE) && (attribs[nAttribs + 1] != EGL_DONT_CARE) && (attribs[nAttribs + 1] & EGL_WINDOW_BIT)) { newAttribs[nNewAttribs++] = attribs[nAttribs]; newAttribs[nNewAttribs++] = (attribs[nAttribs + 1] & ~EGL_WINDOW_BIT) | EGL_STREAM_BIT_KHR; /* Remove all instances of the EGL_nATIVE_VISUAL_ID attribute */ } else if (attribs[nAttribs] != EGL_NATIVE_VISUAL_ID) { newAttribs[nNewAttribs++] = attribs[nAttribs]; newAttribs[nNewAttribs++] = attribs[nAttribs + 1]; } } if (!surfType) { /* * If EGL_SURFACE_TYPE was not specified, convert the default * EGL_WINDOW_BIT to EGL_STREAM_BIT_KHR */ newAttribs[nNewAttribs++] = EGL_SURFACE_TYPE; newAttribs[nNewAttribs++] = EGL_STREAM_BIT_KHR; } newAttribs[nNewAttribs] = EGL_NONE; if (nativeVisual != EGL_DONT_CARE) { /* * Need to query *all* configs that match everything but the specified * native visual ID, then filter them down based on visual ID before * clamping to the number of configs requested. */ ret = data->egl.ChooseConfig(display->devDpy, newAttribs, NULL, 0, &nNewConfigs); if (!ret || !*numConfig) goto done; newConfigs = malloc(sizeof(EGLConfig) * *numConfig); if (!newConfigs) { err = EGL_BAD_ALLOC; goto done; } ret = data->egl.ChooseConfig(display->devDpy, newAttribs, newConfigs, nNewConfigs, &nNewConfigs); if (!ret) goto done; for (cfg = 0, *numConfig = 0; cfg < nNewConfigs && (!configs || *numConfig < configSize); cfg++) { if (ConfigToDrmFourCC(display, newConfigs[cfg]) != (uint32_t)nativeVisual) { continue; } if (!configs) { (*numConfig)++; continue; } configs[(*numConfig)++] = newConfigs[cfg]; } } else { ret = data->egl.ChooseConfig(display->devDpy, newAttribs, configs, configSize, numConfig); } done: free(newAttribs); free(newConfigs); if (err != EGL_SUCCESS) eGbmSetError(data, err); eGbmUnrefObject(&display->base); return ret; } EGLBoolean eGbmGetConfigAttribHook(EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint* value) { GbmDisplay* display = (GbmDisplay*)eGbmRefHandle(dpy); EGLBoolean ret; if (!display) { /* No platform data. Can't set error EGL_NO_DISPLAY */ return EGL_FALSE; } ret = display->data->egl.GetConfigAttrib(display->devDpy, config, attribute, value); if (ret) { switch (attribute) { case EGL_SURFACE_TYPE: if (*value & EGL_STREAM_BIT_KHR) { *value |= EGL_WINDOW_BIT; } else { *value &= ~EGL_WINDOW_BIT; } break; case EGL_NATIVE_VISUAL_ID: *value = ConfigToDrmFourCC(display, config); break; default: break; } } eGbmUnrefObject(&display->base); return ret; } egl-gbm-1.1.2.1/src/gbm-display.h000066400000000000000000000032511467307402700163470ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #ifndef GBM_DISPLAY_H #define GBM_DISPLAY_H #include "gbm-platform.h" #include "gbm-handle.h" typedef struct GbmDisplayRec { GbmObject base; GbmPlatformData* data; EGLDeviceEXT dev; EGLDisplay devDpy; struct gbm_device* gbm; int fd; } GbmDisplay; EGLDisplay eGbmGetPlatformDisplayExport(void *data, EGLenum platform, void *nativeDpy, const EGLAttrib *attribs); const char* eGbmQueryStringExport(void *data, EGLDisplay dpy, EGLExtPlatformString name); EGLBoolean eGbmIsValidNativeDisplayExport(void *data, void *nativeDpy); void* eGbmGetInternalHandleExport(EGLDisplay dpy, EGLenum type, void *handle); EGLBoolean eGbmInitializeHook(EGLDisplay dpy, EGLint* major, EGLint* minor); EGLBoolean eGbmQueryDisplayAttribHook(EGLDisplay dpy, EGLint name, EGLAttrib *value); EGLBoolean eGbmTerminateHook(EGLDisplay dpy); EGLBoolean eGbmChooseConfigHook(EGLDisplay dpy, EGLint const* attribs, EGLConfig* configs, EGLint configSize, EGLint *numConfig); EGLBoolean eGbmGetConfigAttribHook(EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint* value); #endif /* GBM_DISPLAY_H */ egl-gbm-1.1.2.1/src/gbm-egl-imports.h000066400000000000000000000031131467307402700171410ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ DO_EGL_FUNC(PFNEGLCHOOSECONFIGPROC, ChooseConfig) DO_EGL_FUNC(PFNEGLCLIENTWAITSYNCKHRPROC, ClientWaitSyncKHR) DO_EGL_FUNC(PFNEGLCREATEIMAGEKHRPROC, CreateImageKHR) DO_EGL_FUNC(PFNEGLCREATEPBUFFERSURFACEPROC, CreatePbufferSurface) DO_EGL_FUNC(PFNEGLCREATESTREAMKHRPROC, CreateStreamKHR) DO_EGL_FUNC(PFNEGLCREATESYNCKHRPROC, CreateSyncKHR) DO_EGL_FUNC(PFNEGLCREATESTREAMPRODUCERSURFACEKHRPROC, CreateStreamProducerSurfaceKHR) DO_EGL_FUNC(PFNEGLDESTROYIMAGEKHRPROC, DestroyImageKHR) DO_EGL_FUNC(PFNEGLDESTROYSTREAMKHRPROC, DestroyStreamKHR) DO_EGL_FUNC(PFNEGLDESTROYSURFACEPROC, DestroySurface) DO_EGL_FUNC(PFNEGLDESTROYSYNCKHRPROC, DestroySyncKHR) DO_EGL_FUNC(PFNEGLEXPORTDMABUFIMAGEMESAPROC, ExportDMABUFImageMESA) DO_EGL_FUNC(PFNEGLEXPORTDMABUFIMAGEQUERYMESAPROC, ExportDMABUFImageQueryMESA) DO_EGL_FUNC(PFNEGLGETCONFIGATTRIBPROC, GetConfigAttrib) DO_EGL_FUNC(PFNEGLGETERRORPROC, GetError) DO_EGL_FUNC(PFNEGLGETPLATFORMDISPLAYPROC, GetPlatformDisplay) DO_EGL_FUNC(PFNEGLINITIALIZEPROC, Initialize) DO_EGL_FUNC(PFNEGLQUERYDEVICESEXTPROC, QueryDevicesEXT) DO_EGL_FUNC(PFNEGLQUERYDEVICESTRINGEXTPROC, QueryDeviceStringEXT) DO_EGL_FUNC(PFNEGLQUERYSTREAMCONSUMEREVENTNVPROC, QueryStreamConsumerEventNV) DO_EGL_FUNC(PFNEGLQUERYSTRINGPROC, QueryString) DO_EGL_FUNC(PFNEGLSTREAMIMAGECONSUMERCONNECTNVPROC, StreamImageConsumerConnectNV) DO_EGL_FUNC(PFNEGLSTREAMACQUIREIMAGENVPROC, StreamAcquireImageNV) DO_EGL_FUNC(PFNEGLSTREAMRELEASEIMAGENVPROC, StreamReleaseImageNV) DO_EGL_FUNC(PFNEGLTERMINATEPROC, Terminate) egl-gbm-1.1.2.1/src/gbm-handle.c000066400000000000000000000043701467307402700161330ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #include "gbm-handle.h" #include "gbm-mutex.h" #include #include #include static int HandleCompar(const void *a, const void *b) { const GbmObject* objA = a; const GbmObject* objB = b; if (objA->type != objB->type) return objA->type - objB->type; if (objA->dpy != objB->dpy) return (const uint8_t *)objA->dpy - (const uint8_t *)objB->dpy; return objA - objB; } void *handleTreeRoot = NULL; GbmHandle eGbmAddObject(GbmObject* obj) { GbmObject** res = NULL; if (!eGbmHandlesLock()) return NULL; assert(obj->refCount == 1); if (tfind(obj, &handleTreeRoot, HandleCompar)) goto fail; res = tsearch(obj, &handleTreeRoot, HandleCompar); fail: eGbmHandlesUnlock(); return res ? *res : NULL; } GbmObject* eGbmRefHandle(GbmHandle handle) { GbmObject **res = NULL; if (!eGbmHandlesLock()) return NULL; res = tfind(handle, &handleTreeRoot, HandleCompar); if (!res) goto fail; assert((*res)->refCount >= 1); (*res)->refCount++; fail: eGbmHandlesUnlock(); return res ? *res : NULL; } static void UnrefObjectLocked(GbmObject* obj) { assert(obj->refCount >= 1); if (--obj->refCount == 0) { if (!tdelete(obj, &handleTreeRoot, HandleCompar)) assert(!"Failed to find handle in tree for deletion"); eGbmHandlesUnlock(); obj->free(obj); return; } eGbmHandlesUnlock(); } void eGbmUnrefObject(GbmObject* obj) { if (!eGbmHandlesLock()) { assert(!"Failed to lock handle list to unref object"); return; } /* UnrefObjectLocked releases the lock */ UnrefObjectLocked(obj); } bool eGbmDestroyHandle(GbmHandle handle) { GbmObject **res = NULL; if (!eGbmHandlesLock()) { assert(!"Failed to lock handle list to unref object"); return false; } res = tfind(handle, &handleTreeRoot, HandleCompar); if (!res || (*res)->destroyed) { eGbmHandlesUnlock(); return false; } (*res)->destroyed = true; /* UnrefObjectLocked releases the lock */ UnrefObjectLocked(*res); return true; } egl-gbm-1.1.2.1/src/gbm-handle.h000066400000000000000000000011251467307402700161330ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #ifndef GBM_HANDLE_H #define GBM_HANDLE_H #include #include typedef struct GbmObjectRec { void (*free)(struct GbmObjectRec *obj); struct GbmDisplayRec* dpy; EGLenum type; int refCount; bool destroyed; } GbmObject; typedef const GbmObject* GbmHandle; GbmHandle eGbmAddObject(GbmObject* obj); GbmObject* eGbmRefHandle(GbmHandle handle); void eGbmUnrefObject(GbmObject* obj); bool eGbmDestroyHandle(GbmHandle handle); #endif /* GBM_HANDLE_H */ egl-gbm-1.1.2.1/src/gbm-mutex.c000066400000000000000000000027371467307402700160470ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ /* For PTHREAD_MUTEX_ERRORCHECK */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include "gbm-mutex.h" #include #include static pthread_mutex_t handlesMutex; static pthread_once_t onceControl = PTHREAD_ONCE_INIT; static bool mutexInitialized = false; static void InitMutex(void) { pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr)) { assert(!"Failed to initialize pthread mutex attributes"); return; } if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)) { assert(!"Failed to set PTHREAD_MUTEX_ERRORCHECK attribute"); goto fail; } if (pthread_mutex_init(&handlesMutex, &attr)) { assert(!"Failed to initialize handles mutex"); goto fail; } mutexInitialized = true; fail: if (pthread_mutexattr_destroy(&attr)) { assert(!"Failed to destroy pthread mutex attributes"); } } bool eGbmHandlesLock(void) { if (pthread_once(&onceControl, InitMutex)) { assert(!"pthread_once() failed"); return false; } if (!mutexInitialized || pthread_mutex_lock(&handlesMutex)) { assert(!"Failed to lock handles mutex"); return false; } return true; } void eGbmHandlesUnlock(void) { assert(mutexInitialized); if (pthread_mutex_unlock(&handlesMutex)) assert(!"Failed to unlock pthread mutex"); } egl-gbm-1.1.2.1/src/gbm-mutex.h000066400000000000000000000003761467307402700160510ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #ifndef GBM_MUTEX_H #define GBM_MUTEX_H #include bool eGbmHandlesLock(void); void eGbmHandlesUnlock(void); #endif /* GBM_MUTEX_H */ egl-gbm-1.1.2.1/src/gbm-platform.c000066400000000000000000000127311467307402700165240ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #include "gbm-utils.h" #include "gbm-display.h" #include "gbm-platform.h" #include "gbm-surface.h" #include #include #include static void DestroyPlatformData(GbmPlatformData* data) { free(data); } static GbmPlatformData* CreatePlatformData(const EGLExtDriver *driver) { const char* clExts; GbmPlatformData *res = calloc(1, sizeof(*res)); if (!res) return NULL; #if defined(RTLD_DEFAULT) res->ptr_gbm_device_get_backend_name = dlsym(RTLD_DEFAULT, "gbm_device_get_backend_name"); if (res->ptr_gbm_device_get_backend_name == NULL) { /* * We're running with an old version of libgbm that doesn't support * different backends. */ DestroyPlatformData(res); return NULL; } #endif // defined(RTLD_DEFAULT) #define DO_EGL_FUNC(_PROTO, _FUNC) \ res->egl._FUNC = (_PROTO)driver->getProcAddress("egl" #_FUNC); #include "gbm-egl-imports.h" #undef DO_EGL_FUNC res->driver.setError = driver->setError; clExts = res->egl.QueryString(EGL_NO_DISPLAY, EGL_EXTENSIONS); if (!eGbmFindExtension("EGL_EXT_platform_device", clExts) || (!eGbmFindExtension("EGL_EXT_device_query", clExts) && !eGbmFindExtension("EGL_EXT_device_base", clExts))) { DestroyPlatformData(res); return NULL; } if (eGbmFindExtension("EGL_KHR_display_reference", clExts)) res->supportsDisplayReference = true; else res->supportsDisplayReference = false; return res; } static EGLSurface CreatePlatformPixmapSurfaceHook(EGLDisplay dpy, EGLConfig config, void *nativePixmap, const EGLAttrib *attribs) { GbmDisplay* display = (GbmDisplay*)eGbmRefHandle(dpy); (void)config; (void)nativePixmap; (void)attribs; if (!display) return EGL_NO_SURFACE; /* * From the EGL 1.5 spec: * * "If config does not support rendering to pixmaps (the EGL_SURFACE_TYPE * attribute does not contain EGL_PIXMAP_BIT), an EGL_BAD_MATCH error is * generated." * * GBM does not have a native pixmap type. See EGL_KHR_platform_gbm, and * none of the currently advertised EGLConfigs, which are passed through * unmodified from the EGLDevice, would support rendering to pixmaps even * if GBM did. */ eGbmSetError(display->data, EGL_BAD_MATCH); eGbmUnrefObject(&display->base); return EGL_NO_SURFACE; } static EGLSurface CreatePbufferSurfaceHook(EGLDisplay dpy, EGLConfig config, const EGLint *attribs) { GbmDisplay* display = (GbmDisplay*)eGbmRefHandle(dpy); GbmPlatformData* data = display->data; if (!display) { /* No platform data. Can't set error EGL_NO_DISPLAY */ return EGL_NO_SURFACE; } return data->egl.CreatePbufferSurface(display->devDpy, config, attribs); } typedef struct GbmEglHookRec { const char *name; void *func; } GbmEglHook; static const GbmEglHook EglHooksMap[] = { /* Keep names in ascending order */ { "eglChooseConfig", eGbmChooseConfigHook }, { "eglCreatePbufferSurface", CreatePbufferSurfaceHook }, { "eglCreatePlatformPixmapSurface", CreatePlatformPixmapSurfaceHook }, { "eglCreatePlatformWindowSurface", eGbmCreatePlatformWindowSurfaceHook }, { "eglDestroySurface", eGbmDestroySurfaceHook }, { "eglGetConfigAttrib", eGbmGetConfigAttribHook }, { "eglInitialize", eGbmInitializeHook }, { "eglTerminate", eGbmTerminateHook }, }; static int HookCmp(const void* elemA, const void* elemB) { const char* key = (const char*)elemA; const GbmEglHook* hook = (const GbmEglHook*)elemB; return strcmp(key, hook->name); } static void* GetHookAddressExport(void *data, const char *name) { GbmEglHook *hook; (void)data; hook = (GbmEglHook*)bsearch((const void*)name, (const void*)EglHooksMap, sizeof(EglHooksMap)/sizeof(GbmEglHook), sizeof(GbmEglHook), HookCmp); if (hook) return hook->func; return NULL; } static EGLBoolean UnloadPlatformExport(void *data) { DestroyPlatformData(data); return EGL_TRUE; } EGLBoolean loadEGLExternalPlatform(int major, int minor, const EGLExtDriver *driver, EGLExtPlatform *platform) { if (!platform || !EGL_EXTERNAL_PLATFORM_VERSION_CMP(major, minor, GBM_EXTERNAL_VERSION_MAJOR, GBM_EXTERNAL_VERSION_MINOR)) { return EGL_FALSE; } platform->version.major = GBM_EXTERNAL_VERSION_MAJOR; platform->version.minor = GBM_EXTERNAL_VERSION_MINOR; platform->version.micro = GBM_EXTERNAL_VERSION_MICRO; platform->platform = EGL_PLATFORM_GBM_KHR; platform->data = (void *)CreatePlatformData(driver); if (platform->data == NULL) { return EGL_FALSE; } platform->exports.unloadEGLExternalPlatform = UnloadPlatformExport; platform->exports.getHookAddress = GetHookAddressExport; platform->exports.isValidNativeDisplay = eGbmIsValidNativeDisplayExport; platform->exports.getPlatformDisplay = eGbmGetPlatformDisplayExport; platform->exports.queryString = eGbmQueryStringExport; platform->exports.getInternalHandle = eGbmGetInternalHandleExport; return EGL_TRUE; } egl-gbm-1.1.2.1/src/gbm-platform.h000066400000000000000000000042511467307402700165270ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #ifndef GBM_PLATFORM_H #define GBM_PLATFORM_H #include #include #include #include /* * .. * defines the EGL external Wayland * implementation version. * * The includer of this file can override either GBM_EXTERNAL_VERSION_MAJOR * or GBM_EXTERNAL_VERSION_MINOR in order to build against a certain EGL * external API version. * * * How to update this version numbers: * * - GBM_EXTERNAL_VERSION_MAJOR must match the EGL external API major * number this platform implements * * - GBM_EXTERNAL_VERSION_MINOR must match the EGL external API minor * number this platform implements * * - If the platform implementation is changed in any way, increase * GBM_EXTERNAL_VERSION_MICRO by 1 */ #if !defined(GBM_EXTERNAL_VERSION_MAJOR) #define GBM_EXTERNAL_VERSION_MAJOR 1 #if !defined(GBM_EXTERNAL_VERSION_MINOR) #define GBM_EXTERNAL_VERSION_MINOR 1 #endif #elif !defined(GBM_EXTERNAL_VERSION_MINOR) #define GBM_EXTERNAL_VERSION_MINOR 1 #endif #define GBM_EXTERNAL_VERSION_MICRO 0 #define EGL_EXTERNAL_PLATFORM_VERSION_MAJOR GBM_EXTERNAL_VERSION_MAJOR #define EGL_EXTERNAL_PLATFORM_VERSION_MINOR GBM_EXTERNAL_VERSION_MINOR #include #define EGBM_EXPORT __attribute__ ((visibility ("default"))) typedef struct GbmPlatformDataRec { struct { #define DO_EGL_FUNC(_PROTO, _FUNC) \ _PROTO _FUNC; #include "gbm-egl-imports.h" #undef DO_EGL_FUNC } egl; struct { PEGLEXTFNSETERROR setError; } driver; bool supportsDisplayReference; const char * (* ptr_gbm_device_get_backend_name) (struct gbm_device *gbm); } GbmPlatformData; EGBM_EXPORT EGLBoolean loadEGLExternalPlatform(int major, int minor, const EGLExtDriver *driver, EGLExtPlatform *platform); #endif /* GBM_PLATFORM_H */ egl-gbm-1.1.2.1/src/gbm-surface.c000066400000000000000000000410171467307402700163270ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #include "gbm-surface.h" #include "gbm-display.h" #include "gbm-utils.h" #include #include #include #include #include #include #define ARRAY_LEN(a) (sizeof(a) / sizeof(a[0])) #define MAX_STREAM_IMAGES 10 // One front, one back. #define WINDOW_STREAM_FIFO_LENGTH 2 typedef struct GbmSurfaceImageRec { EGLImage image; struct gbm_bo* bo; struct GbmSurfaceImageRec* nextAcquired; bool locked; } GbmSurfaceImage; typedef struct GbmSurfaceRec { GbmObject base; EGLStreamKHR stream; EGLSurface egl; EGLSyncKHR sync; GbmSurfaceImage images[MAX_STREAM_IMAGES]; struct { GbmSurfaceImage *first; GbmSurfaceImage *last; } acquiredImages; /* * The number of free color buffers. This is initially set to the stream's * FIFO length, and updated whenever we acquire or release an EGLImage * to/from the stream. * * FIXME: Our EGLImage handling is wrong: If the application calls * eglSwapBuffers more than once without calling * gbm_surface_lock_front_buffer, then gbm_surface_lock_front_buffer will * return the buffer from the oldest swap, but it should return the newest * swap. * * Also, if an application calls eglSwapBuffers more times than the FIFO * depth without calling gbm_surface_lock_front_buffer, then it will fill * up the FIFO and hang. * * To get something closer to correct behavior, in eglSwapBuffers we'd need * to call eglStreamReleaseImageNV on all unlocked buffers, then call into * the driver's eglSwapBuffers, and then call eglStreamAcquireImageNV to * fetch the EGLImage for that frame. That would require adding an * eglSwapBuffers hook and rewriting eGbmSurfaceLockFrontBuffer and * eGbmSurfaceReleaseBuffer. */ int numFreeImages; } GbmSurface; /* * Returns a pointer to a pointer in the NV-private structure that wraps the * gbm_surface structure. This pointer is reserved for use by this library. */ static inline GbmSurface** GetPrivPtr(struct gbm_surface* s) { uint8_t *ptr = (uint8_t *)s; return (GbmSurface **)(ptr - sizeof(void*)); } static inline GbmSurface* GetSurf(struct gbm_surface* s) { return s ? *GetPrivPtr(s) : NULL; } static inline void SetSurf(struct gbm_surface* s, GbmSurface *surf) { GbmSurface **priv = GetPrivPtr(s); *priv = surf; } static bool AddSurfImage(GbmDisplay* display, GbmSurface* surf) { GbmPlatformData* data = display->data; unsigned int i; for (i = 0; i < ARRAY_LEN(surf->images); i++) { if (surf->images[i].image == EGL_NO_IMAGE_KHR && surf->images[i].bo == NULL) { surf->images[i].image = data->egl.CreateImageKHR(display->devDpy, EGL_NO_CONTEXT, EGL_STREAM_CONSUMER_IMAGE_NV, (EGLClientBuffer)surf->stream, NULL); if (surf->images[i].image == EGL_NO_IMAGE_KHR) break; return true; } } return false; } static void RemoveSurfImage(GbmDisplay* display, GbmSurface* surf, EGLImage img) { GbmPlatformData* data = display->data; GbmSurfaceImage* acqImg; GbmSurfaceImage* prev = NULL; unsigned int i; for (i = 0; i < ARRAY_LEN(surf->images); i++) { if (surf->images[i].image == img) { /* * The EGL_NV_stream_consumer_eglimage spec is unclear if removed * images that are currently acquired still need to be released, but * it does say this: * * If an acquired EGLImage has not yet released when * eglDestroyImage is called, then, then an implicit * eglStreamReleaseImageNV will be called. * * so this should be sufficient either way. */ data->egl.DestroyImageKHR(display->devDpy, img); surf->images[i].image = EGL_NO_IMAGE_KHR; if (!surf->images[i].locked && surf->images[i].bo) { gbm_bo_destroy(surf->images[i].bo); surf->images[i].bo = NULL; } else { /* * If the image is currently acquired from the stream and * available for locking, remove it from the acquired images list. */ for (acqImg = surf->acquiredImages.first; acqImg; prev = acqImg, acqImg = acqImg->nextAcquired) { if (acqImg == &surf->images[i]) { if (prev) prev->nextAcquired = acqImg->nextAcquired; else surf->acquiredImages.first = acqImg->nextAcquired; if (surf->acquiredImages.last == acqImg) surf->acquiredImages.last = prev; assert(surf->numFreeImages < WINDOW_STREAM_FIFO_LENGTH); surf->numFreeImages++; break; } } } break; } } } static bool AcquireSurfImage(GbmDisplay* display, GbmSurface* surf) { GbmPlatformData* data = display->data; EGLDisplay dpy = display->devDpy; GbmSurfaceImage* image = NULL; EGLImage img; unsigned int i; EGLBoolean res; res = data->egl.StreamAcquireImageNV(dpy, surf->stream, &img, surf->sync); if (!res) { /* * Match Mesa EGL dri2 platform behavior when no buffer is available * even though this function is not called from an EGL entry point */ eGbmSetError(data, EGL_BAD_SURFACE); return false; } if (data->egl.ClientWaitSyncKHR(dpy, surf->sync, 0, EGL_FOREVER_KHR) != EGL_CONDITION_SATISFIED_KHR) { /* Release the image back to the stream */ data->egl.StreamReleaseImageNV(dpy, surf->stream, img, surf->sync); /* Not clear what error to use. Pretend no buffer was available. */ eGbmSetError(data, EGL_BAD_SURFACE); return false; } for (i = 0; i < ARRAY_LEN(surf->images); i++) { if (surf->images[i].image == img) { image = &surf->images[i]; break; } } if (surf->acquiredImages.last) surf->acquiredImages.last->nextAcquired = image; else surf->acquiredImages.first = image; surf->acquiredImages.last = image; surf->numFreeImages--; return true; } static bool PumpSurfEvents(GbmDisplay* display, GbmSurface* surf) { GbmPlatformData* data = display->data; EGLenum event; EGLAttrib aux; EGLint evStatus; while (true) { evStatus = data->egl.QueryStreamConsumerEventNV(display->devDpy, surf->stream, 0, &event, &aux); if (evStatus != EGL_TRUE) break; switch (event) { case EGL_STREAM_IMAGE_AVAILABLE_NV: /* * The image must be acquired to clear the IMAGE_AVAILABLE event, * so acquire it here rather than in eGbmSurfaceLockFrontBuffer(). */ if (!AcquireSurfImage(display, surf)) return false; break; case EGL_STREAM_IMAGE_ADD_NV: if (!AddSurfImage(display, surf)) return false; break; case EGL_STREAM_IMAGE_REMOVE_NV: RemoveSurfImage(display, surf, (EGLImage)aux); break; default: assert(!"Unhandled EGLImage stream consumer event"); } } return evStatus != EGL_FALSE; } int eGbmSurfaceHasFreeBuffers(struct gbm_surface* s) { GbmSurface* surf = GetSurf(s); if (!surf) return 0; if (!PumpSurfEvents(surf->base.dpy, surf)) return 0; return (surf->numFreeImages > 0); } struct gbm_bo* eGbmSurfaceLockFrontBuffer(struct gbm_surface* s) { GbmSurface* surf = GetSurf(s); GbmSurfaceImage* image; GbmPlatformData* data; EGLDisplay dpy; uint32_t i; if (!surf) return NULL; data = surf->base.dpy->data; dpy = surf->base.dpy->devDpy; /* Must pump events to ensure images are created before acquiring them */ if (!PumpSurfEvents(surf->base.dpy, surf)) return NULL; if (!surf->acquiredImages.first) return NULL; image = surf->acquiredImages.first; assert(image->image); if (!image->bo) { struct gbm_import_fd_modifier_data buf; uint64_t modifier; EGLint stride; /* XXX support planar formats */ EGLint offset; /* XXX support planar formats */ int format; int planes; int fd; /* XXX support planar separate memory objects */ if (!data->egl.ExportDMABUFImageQueryMESA(dpy, image->image, &format, &planes, &modifier)) goto fail; assert(planes == 1); /* XXX support planar formats */ if (!data->egl.ExportDMABUFImageMESA(dpy, image->image, &fd, &stride, &offset)) { goto fail; } memset(&buf, 0, sizeof(buf)); buf.width = s->v0.width; buf.height = s->v0.height; buf.format = s->v0.format; buf.num_fds = 1; /* XXX support planar separate memory objects */ buf.fds[0] = fd; buf.strides[0] = stride; buf.offsets[0] = offset; buf.modifier = modifier; image->bo = gbm_bo_import(surf->base.dpy->gbm, GBM_BO_IMPORT_FD_MODIFIER, &buf, 0); for (i = 0; i < buf.num_fds; i++) { close(buf.fds[i]); } if (!image->bo) goto fail; } surf->acquiredImages.first = image->nextAcquired; if (!surf->acquiredImages.first) surf->acquiredImages.last = NULL; image->locked = true; return image->bo; fail: /* XXX Can this be called from outside an EGL entry point? */ eGbmSetError(data, EGL_BAD_ALLOC); return NULL; } void eGbmSurfaceReleaseBuffer(struct gbm_surface* s, struct gbm_bo *bo) { GbmSurface* surf = GetSurf(s); GbmDisplay* display; EGLImage img = EGL_NO_IMAGE_KHR; unsigned int i; if (!surf || !bo) return; display = surf->base.dpy; for (i = 0; i < ARRAY_LEN(surf->images); i++) { if (surf->images[i].bo == bo) { surf->images[i].locked = false; img = surf->images[i].image; if (!img) { /* * The stream removed this image while it was locked. Free the * buffer object associated with it as well. */ gbm_bo_destroy(surf->images[i].bo); } break; } } assert(img != EGL_NO_IMAGE_KHR); if (img != EGL_NO_IMAGE_KHR) { display->data->egl.StreamReleaseImageNV(display->devDpy, surf->stream, img, EGL_NO_SYNC_KHR); assert(surf->numFreeImages < WINDOW_STREAM_FIFO_LENGTH); surf->numFreeImages++; } } static void FreeSurface(GbmObject* obj) { if (obj) { GbmSurface* surf = (GbmSurface*)obj; GbmPlatformData* data = obj->dpy->data; EGLDisplay dpy = obj->dpy->devDpy; unsigned int i; for (i = 0; i < ARRAY_LEN(surf->images); i++) { if (surf->images[i].image != EGL_NO_IMAGE_KHR) data->egl.DestroyImageKHR(dpy, surf->images[i].image); if (surf->images[i].bo != NULL) gbm_bo_destroy(surf->images[i].bo); } if (surf->egl != EGL_NO_SURFACE) data->egl.DestroySurface(dpy, surf->egl); if (surf->stream != EGL_NO_STREAM_KHR) data->egl.DestroyStreamKHR(dpy, surf->stream); if (surf->sync != EGL_NO_SYNC_KHR) data->egl.DestroySyncKHR(dpy, surf->sync); /* Drop reference to the display acquired at creation time */ eGbmUnrefObject(&obj->dpy->base); free(obj); } } EGLSurface eGbmCreatePlatformWindowSurfaceHook(EGLDisplay dpy, EGLConfig config, void* nativeWin, const EGLAttrib* attribs) { GbmDisplay* display = (GbmDisplay*)eGbmRefHandle(dpy); GbmPlatformData* data; struct gbm_surface* s = nativeWin; GbmSurface* surf = NULL; EGLint surfType; EGLint err = EGL_BAD_ALLOC; EGLBoolean res; const EGLint surfAttrs[] = { /* XXX Merge in relevant here as well */ EGL_WIDTH, s->v0.width, EGL_HEIGHT, s->v0.height, EGL_NONE }; static const EGLint streamAttrs[] = { EGL_STREAM_FIFO_LENGTH_KHR, WINDOW_STREAM_FIFO_LENGTH, EGL_NONE }; static const EGLint syncAttrs[] = { EGL_SYNC_STATUS_KHR, EGL_SIGNALED_KHR, EGL_NONE }; (void)attribs; if (!display) { /* No platform data. Can't set error EGL_NO_DISPLAY */ return EGL_NO_SURFACE; } data = display->data; dpy = display->devDpy; if (!s) { err = EGL_BAD_NATIVE_WINDOW; goto fail; } if (s->gbm != display->gbm) { err = EGL_BAD_NATIVE_WINDOW; goto fail; } res = data->egl.GetConfigAttrib(dpy, config, EGL_SURFACE_TYPE, &surfType); if (!res || !(surfType & EGL_STREAM_BIT_KHR)) { err = EGL_BAD_CONFIG; goto fail; } surf = calloc(1, sizeof(*surf)); if (!surf) { err = EGL_BAD_ALLOC; goto fail; } surf->base.dpy = display; surf->base.type = EGL_OBJECT_SURFACE_KHR; surf->base.refCount = 1; surf->base.free = FreeSurface; surf->stream = data->egl.CreateStreamKHR(dpy, streamAttrs); surf->numFreeImages = WINDOW_STREAM_FIFO_LENGTH; if (!surf->stream) { err = EGL_BAD_ALLOC; goto fail; } if (!data->egl.StreamImageConsumerConnectNV(dpy, surf->stream, s->v0.count, s->v0.modifiers, NULL)) { err = EGL_BAD_ALLOC; goto fail; } surf->egl = data->egl.CreateStreamProducerSurfaceKHR(dpy, config, surf->stream, surfAttrs); if (!surf->egl) { err = data->egl.GetError(); // Pass EGL_BAD_MATCH through, since that's an allowed error for // eglCreateWindowSurface, and it would still make sense to the // application. Otherwise, send back EGL_BAD_ALLOC. if (err != EGL_BAD_MATCH) { err = EGL_BAD_ALLOC; } goto fail; } surf->sync = data->egl.CreateSyncKHR(dpy, EGL_SYNC_FENCE_KHR, syncAttrs); if (!surf->sync) { err = EGL_BAD_ALLOC; goto fail; } if (!PumpSurfEvents(display, surf)) { err = EGL_BAD_ALLOC; goto fail; } /* The reference to the display object is retained by surf */ if (!eGbmAddObject(&surf->base)) { err = EGL_BAD_ALLOC; goto fail; } SetSurf(s, surf); return (EGLSurface)surf; fail: FreeSurface(&surf->base); eGbmSetError(display->data, err); return EGL_NO_SURFACE; } void* eGbmSurfaceUnwrap(GbmObject* obj) { return ((GbmSurface*)obj)->egl; } EGLBoolean eGbmDestroySurfaceHook(EGLDisplay dpy, EGLSurface eglSurf) { GbmDisplay* display = (GbmDisplay*)eGbmRefHandle(dpy); EGLBoolean ret = EGL_FALSE; if (!display) return ret; if (eGbmDestroyHandle(eglSurf)) ret = EGL_TRUE; eGbmUnrefObject(&display->base); return ret; } egl-gbm-1.1.2.1/src/gbm-surface.h000066400000000000000000000014571467307402700163400ustar00rootroot00000000000000/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #ifndef GBM_SURFACE_H #define GBM_SURFACE_H #include "gbm-handle.h" #include #include int eGbmSurfaceHasFreeBuffers(struct gbm_surface* s); struct gbm_bo* eGbmSurfaceLockFrontBuffer(struct gbm_surface* s); void eGbmSurfaceReleaseBuffer(struct gbm_surface* s, struct gbm_bo *bo); EGLSurface eGbmCreatePlatformWindowSurfaceHook(EGLDisplay dpy, EGLConfig config, void* nativeWin, const EGLAttrib* attribs); void* eGbmSurfaceUnwrap(GbmObject* obj); EGLBoolean eGbmDestroySurfaceHook(EGLDisplay dpy, EGLSurface eglSurf); #endif /* GBM_SURFACE_H */ egl-gbm-1.1.2.1/src/gbm-utils.c000066400000000000000000000100011467307402700160240ustar00rootroot00000000000000/* * Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #include "gbm-utils.h" #include #include #if HAS_MINCORE #include #include #endif EGLBoolean eGbmFindExtension(const char* extension, const char* extensions) { const char* start; const char* where; const char* terminator; start = extensions; for (;;) { where = strstr(start, extension); if (!where) { break; } terminator = where + strlen(extension); if (where == start || *(where - 1) == ' ') { if (*terminator == ' ' || *terminator == '\0') { return EGL_TRUE; } } start = terminator; } return EGL_FALSE; } void eGbmSetErrorInternal(GbmPlatformData *data, EGLint error, const char *file, int line) { static const char *defaultMsg = "GBM external platform error"; char msg[256]; if (!data || !data->driver.setError) return; if (!file || (snprintf(msg, sizeof(msg), "%s:%d: %s", file, line, defaultMsg) <= 0)) { data->driver.setError(error, EGL_DEBUG_MSG_ERROR_KHR, defaultMsg); return; } data->driver.setError(error, EGL_DEBUG_MSG_ERROR_KHR, msg); } #if HAS_MINCORE && defined(RTLD_DEFAULT) EGLBoolean eGbmPointerIsDereferenceable(void* p) { /* * BSD and Solaris have slightly different prototypes for mincore, but * they should be compatible with this. BSD uses: * * (const void*, size_t, char*) * * And Solaris uses: * * (caddr_t, size_t, char*) * * Which I believe are all ABI compatible with the Linux prototype used * below for MINCOREPROC. */ typedef int (*MINCOREPROC)(void*, size_t, unsigned char*); static MINCOREPROC pMinCore = NULL; static EGLBoolean minCoreLoadAttempted = EGL_FALSE; uintptr_t addr = (uintptr_t)p; unsigned char unused; const long page_size = getpagesize(); if (minCoreLoadAttempted == EGL_FALSE) { minCoreLoadAttempted = EGL_TRUE; /* * According to its manpage, mincore was introduced in Linux 2.3.99pre1 * and glibc 2.2. The minimum glibc our driver supports is 2.0, so this * mincore can not be linked in directly. It does however seem * reasonable to assume that Wayland will not be run on glibc < 2.2. * * Attempt to load mincore from the currently available libraries. * mincore comes from libc, which the EGL driver depends on, so it * should always be loaded if our driver is running. */ pMinCore = (MINCOREPROC)dlsym(RTLD_DEFAULT, "mincore"); } /* * If the pointer can't be tested for safety, or is obviously unsafe, * assume it can't be dereferenced. */ if (p == NULL || !pMinCore) { dlerror(); return EGL_FALSE; } /* align addr to page_size */ addr &= ~(page_size - 1); /* * mincore() returns 0 on success, and -1 on failure. The last parameter * is a vector of bytes with one entry for each page queried. mincore * returns page residency information in the first bit of each byte in the * vector. * * Residency doesn't actually matter when determining whether a pointer is * dereferenceable, so the output vector can be ignored. What matters is * whether mincore succeeds. It will fail with ENOMEM if the range * [addr, addr + length) is not mapped into the process, so all that needs * to be checked there is whether the mincore call succeeds or not, as it * can only succeed on dereferenceable memory ranges. */ return (pMinCore((void*)addr, page_size, &unused) >= 0); } #else /* HAS_MINCORE */ EGLBoolean eGbmPointerIsDereferenceable(void* p) { /* * We don't have mincore available, or we can't load it, so just assume * that the pointer is not readable. */ return EGL_FALSE; } #endif /* HAS_MINCORE */ egl-gbm-1.1.2.1/src/gbm-utils.h000066400000000000000000000013601467307402700160410ustar00rootroot00000000000000/* * Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: MIT */ #ifndef GBM_UTILS_H #define GBM_UTILS_H #include "gbm-platform.h" #include #if defined(__QNX__) #define HAS_MINCORE 0 #else #define HAS_MINCORE 1 #endif #ifdef NDEBUG #define eGbmSetError(data, err) \ eGbmSetErrorInternal(data, err, NULL, 0); #else #define eGbmSetError(data, err) \ eGbmSetErrorInternal(data, err, __FILE__, __LINE__); #endif EGLBoolean eGbmFindExtension(const char* extension, const char* extensions); void eGbmSetErrorInternal(GbmPlatformData *data, EGLint error, const char *file, int line); EGLBoolean eGbmPointerIsDereferenceable(void* p); #endif /* GBM_UTILS_H */ egl-gbm-1.1.2.1/src/meson.build000066400000000000000000000016321467307402700161310ustar00rootroot00000000000000if not cc.has_function('dlsym') libdl = cc.find_library('dl') else libdl = [] endif add_project_arguments('-Wall', language : 'c') add_project_arguments('-Werror', language : 'c') add_project_arguments('-fvisibility=hidden', language : 'c') add_project_arguments('-D_GNU_SOURCE', language : 'c') if cc.has_argument('-Wpedantic') add_project_arguments('-Wno-pedantic', language : 'c') endif src = [ 'gbm-platform.c', 'gbm-display.c', 'gbm-utils.c', 'gbm-mutex.c', 'gbm-handle.c', 'gbm-surface.c', ] egl_gbm = library('nvidia-egl-gbm', src, dependencies : [ eglexternalplatform, gbm, dep_libdrm, threads, libdl, ], include_directories : ext_includes, version : meson.project_version(), install : true, ) install_data('15_nvidia_gbm.json', install_dir: '@0@/egl/egl_external_platform.d'.format(get_option('datadir')))