| /* |
| * Copyright © 2008-2011 Kristian Høgsberg |
| * Copyright © 2011 Intel Corporation |
| * Copyright © 2017, 2018 Collabora, Ltd. |
| * Copyright © 2017, 2018 General Electric Company |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining |
| * a copy of this software and associated documentation files (the |
| * "Software"), to deal in the Software without restriction, including |
| * without limitation the rights to use, copy, modify, merge, publish, |
| * distribute, sublicense, and/or sell copies of the Software, and to |
| * permit persons to whom the Software is furnished to do so, subject to |
| * the following conditions: |
| * |
| * The above copyright notice and this permission notice (including the |
| * next paragraph) shall be included in all copies or substantial |
| * portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| |
| #include "config.h" |
| |
| #include <errno.h> |
| #include <stdint.h> |
| #include <stdlib.h> |
| #include <ctype.h> |
| #include <string.h> |
| #include <fcntl.h> |
| #include <unistd.h> |
| #include <linux/input.h> |
| #include <linux/vt.h> |
| #include <assert.h> |
| #include <sys/mman.h> |
| #include <dlfcn.h> |
| #include <time.h> |
| |
| #include <xf86drm.h> |
| #include <xf86drmMode.h> |
| #include <drm_fourcc.h> |
| |
| #include <gbm.h> |
| #include <libudev.h> |
| |
| #include "compositor.h" |
| #include "compositor-drm.h" |
| #include "shared/helpers.h" |
| #include "shared/timespec-util.h" |
| #include "gl-renderer.h" |
| #include "weston-egl-ext.h" |
| #include "pixman-renderer.h" |
| #include "pixel-formats.h" |
| #include "libbacklight.h" |
| #include "libinput-seat.h" |
| #include "launcher-util.h" |
| #include "vaapi-recorder.h" |
| #include "presentation-time-server-protocol.h" |
| #include "linux-dmabuf.h" |
| #include "linux-dmabuf-unstable-v1-server-protocol.h" |
| |
| #ifndef DRM_CAP_TIMESTAMP_MONOTONIC |
| #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 |
| #endif |
| |
| #ifndef DRM_CLIENT_CAP_UNIVERSAL_PLANES |
| #define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 |
| #endif |
| |
| #ifndef DRM_CLIENT_CAP_ASPECT_RATIO |
| #define DRM_CLIENT_CAP_ASPECT_RATIO 4 |
| #endif |
| |
| #ifndef DRM_CAP_CURSOR_WIDTH |
| #define DRM_CAP_CURSOR_WIDTH 0x8 |
| #endif |
| |
| #ifndef DRM_CAP_CURSOR_HEIGHT |
| #define DRM_CAP_CURSOR_HEIGHT 0x9 |
| #endif |
| |
| #ifndef GBM_BO_USE_CURSOR |
| #define GBM_BO_USE_CURSOR GBM_BO_USE_CURSOR_64X64 |
| #endif |
| |
| #define MAX_CLONED_CONNECTORS 4 |
| |
| /** |
| * aspect ratio info taken from the drmModeModeInfo flag bits 19-22, |
| * which should be used to fill the aspect ratio field in weston_mode. |
| */ |
| #define DRM_MODE_FLAG_PIC_AR_BITS_POS 19 |
| #ifndef DRM_MODE_FLAG_PIC_AR_MASK |
| #define DRM_MODE_FLAG_PIC_AR_MASK (0xF << DRM_MODE_FLAG_PIC_AR_BITS_POS) |
| #endif |
| |
| /** |
| * Represents the values of an enum-type KMS property |
| */ |
| struct drm_property_enum_info { |
| const char *name; /**< name as string (static, not freed) */ |
| bool valid; /**< true if value is supported; ignore if false */ |
| uint64_t value; /**< raw value */ |
| }; |
| |
| /** |
| * Holds information on a DRM property, including its ID and the enum |
| * values it holds. |
| * |
| * DRM properties are allocated dynamically, and maintained as DRM objects |
| * within the normal object ID space; they thus do not have a stable ID |
| * to refer to. This includes enum values, which must be referred to by |
| * integer values, but these are not stable. |
| * |
| * drm_property_info allows a cache to be maintained where Weston can use |
| * enum values internally to refer to properties, with the mapping to DRM |
| * ID values being maintained internally. |
| */ |
| struct drm_property_info { |
| const char *name; /**< name as string (static, not freed) */ |
| uint32_t prop_id; /**< KMS property object ID */ |
| unsigned int num_enum_values; /**< number of enum values */ |
| struct drm_property_enum_info *enum_values; /**< array of enum values */ |
| }; |
| |
| /** |
| * List of properties attached to DRM planes |
| */ |
| enum wdrm_plane_property { |
| WDRM_PLANE_TYPE = 0, |
| WDRM_PLANE_SRC_X, |
| WDRM_PLANE_SRC_Y, |
| WDRM_PLANE_SRC_W, |
| WDRM_PLANE_SRC_H, |
| WDRM_PLANE_CRTC_X, |
| WDRM_PLANE_CRTC_Y, |
| WDRM_PLANE_CRTC_W, |
| WDRM_PLANE_CRTC_H, |
| WDRM_PLANE_FB_ID, |
| WDRM_PLANE_CRTC_ID, |
| WDRM_PLANE_IN_FORMATS, |
| WDRM_PLANE__COUNT |
| }; |
| |
| /** |
| * Possible values for the WDRM_PLANE_TYPE property. |
| */ |
| enum wdrm_plane_type { |
| WDRM_PLANE_TYPE_PRIMARY = 0, |
| WDRM_PLANE_TYPE_CURSOR, |
| WDRM_PLANE_TYPE_OVERLAY, |
| WDRM_PLANE_TYPE__COUNT |
| }; |
| |
| static struct drm_property_enum_info plane_type_enums[] = { |
| [WDRM_PLANE_TYPE_PRIMARY] = { |
| .name = "Primary", |
| }, |
| [WDRM_PLANE_TYPE_OVERLAY] = { |
| .name = "Overlay", |
| }, |
| [WDRM_PLANE_TYPE_CURSOR] = { |
| .name = "Cursor", |
| }, |
| }; |
| |
| static const struct drm_property_info plane_props[] = { |
| [WDRM_PLANE_TYPE] = { |
| .name = "type", |
| .enum_values = plane_type_enums, |
| .num_enum_values = WDRM_PLANE_TYPE__COUNT, |
| }, |
| [WDRM_PLANE_SRC_X] = { .name = "SRC_X", }, |
| [WDRM_PLANE_SRC_Y] = { .name = "SRC_Y", }, |
| [WDRM_PLANE_SRC_W] = { .name = "SRC_W", }, |
| [WDRM_PLANE_SRC_H] = { .name = "SRC_H", }, |
| [WDRM_PLANE_CRTC_X] = { .name = "CRTC_X", }, |
| [WDRM_PLANE_CRTC_Y] = { .name = "CRTC_Y", }, |
| [WDRM_PLANE_CRTC_W] = { .name = "CRTC_W", }, |
| [WDRM_PLANE_CRTC_H] = { .name = "CRTC_H", }, |
| [WDRM_PLANE_FB_ID] = { .name = "FB_ID", }, |
| [WDRM_PLANE_CRTC_ID] = { .name = "CRTC_ID", }, |
| [WDRM_PLANE_IN_FORMATS] = { .name = "IN_FORMATS" }, |
| }; |
| |
| /** |
| * List of properties attached to a DRM connector |
| */ |
| enum wdrm_connector_property { |
| WDRM_CONNECTOR_EDID = 0, |
| WDRM_CONNECTOR_DPMS, |
| WDRM_CONNECTOR_CRTC_ID, |
| WDRM_CONNECTOR__COUNT |
| }; |
| |
| enum wdrm_dpms_state { |
| WDRM_DPMS_STATE_OFF = 0, |
| WDRM_DPMS_STATE_ON, |
| WDRM_DPMS_STATE_STANDBY, /* unused */ |
| WDRM_DPMS_STATE_SUSPEND, /* unused */ |
| WDRM_DPMS_STATE__COUNT |
| }; |
| |
| static struct drm_property_enum_info dpms_state_enums[] = { |
| [WDRM_DPMS_STATE_OFF] = { |
| .name = "Off", |
| }, |
| [WDRM_DPMS_STATE_ON] = { |
| .name = "On", |
| }, |
| [WDRM_DPMS_STATE_STANDBY] = { |
| .name = "Standby", |
| }, |
| [WDRM_DPMS_STATE_SUSPEND] = { |
| .name = "Suspend", |
| }, |
| }; |
| |
| static const struct drm_property_info connector_props[] = { |
| [WDRM_CONNECTOR_EDID] = { .name = "EDID" }, |
| [WDRM_CONNECTOR_DPMS] = { |
| .name = "DPMS", |
| .enum_values = dpms_state_enums, |
| .num_enum_values = WDRM_DPMS_STATE__COUNT, |
| }, |
| [WDRM_CONNECTOR_CRTC_ID] = { .name = "CRTC_ID", }, |
| }; |
| |
| /** |
| * List of properties attached to DRM CRTCs |
| */ |
| enum wdrm_crtc_property { |
| WDRM_CRTC_MODE_ID = 0, |
| WDRM_CRTC_ACTIVE, |
| WDRM_CRTC__COUNT |
| }; |
| |
| static const struct drm_property_info crtc_props[] = { |
| [WDRM_CRTC_MODE_ID] = { .name = "MODE_ID", }, |
| [WDRM_CRTC_ACTIVE] = { .name = "ACTIVE", }, |
| }; |
| |
| /** |
| * Mode for drm_output_state_duplicate. |
| */ |
| enum drm_output_state_duplicate_mode { |
| DRM_OUTPUT_STATE_CLEAR_PLANES, /**< reset all planes to off */ |
| DRM_OUTPUT_STATE_PRESERVE_PLANES, /**< preserve plane state */ |
| }; |
| |
| /** |
| * Mode for drm_pending_state_apply and co. |
| */ |
| enum drm_state_apply_mode { |
| DRM_STATE_APPLY_SYNC, /**< state fully processed */ |
| DRM_STATE_APPLY_ASYNC, /**< state pending event delivery */ |
| DRM_STATE_TEST_ONLY, /**< test if the state can be applied */ |
| }; |
| |
| struct drm_backend { |
| struct weston_backend base; |
| struct weston_compositor *compositor; |
| |
| struct udev *udev; |
| struct wl_event_source *drm_source; |
| |
| struct udev_monitor *udev_monitor; |
| struct wl_event_source *udev_drm_source; |
| |
| struct { |
| int id; |
| int fd; |
| char *filename; |
| } drm; |
| struct gbm_device *gbm; |
| struct wl_listener session_listener; |
| uint32_t gbm_format; |
| |
| /* we need these parameters in order to not fail drmModeAddFB2() |
| * due to out of bounds dimensions, and then mistakenly set |
| * sprites_are_broken: |
| */ |
| int min_width, max_width; |
| int min_height, max_height; |
| |
| struct wl_list plane_list; |
| int sprites_are_broken; |
| int sprites_hidden; |
| |
| void *repaint_data; |
| |
| bool state_invalid; |
| |
| /* CRTC IDs not used by any enabled output. */ |
| struct wl_array unused_crtcs; |
| |
| int cursors_are_broken; |
| |
| bool universal_planes; |
| bool atomic_modeset; |
| |
| int use_pixman; |
| bool use_pixman_shadow; |
| |
| struct udev_input input; |
| |
| int32_t cursor_width; |
| int32_t cursor_height; |
| |
| uint32_t pageflip_timeout; |
| |
| bool shutting_down; |
| |
| bool aspect_ratio_supported; |
| }; |
| |
| struct drm_mode { |
| struct weston_mode base; |
| drmModeModeInfo mode_info; |
| uint32_t blob_id; |
| }; |
| |
| enum drm_fb_type { |
| BUFFER_INVALID = 0, /**< never used */ |
| BUFFER_CLIENT, /**< directly sourced from client */ |
| BUFFER_DMABUF, /**< imported from linux_dmabuf client */ |
| BUFFER_PIXMAN_DUMB, /**< internal Pixman rendering */ |
| BUFFER_GBM_SURFACE, /**< internal EGL rendering */ |
| BUFFER_CURSOR, /**< internal cursor buffer */ |
| }; |
| |
| struct drm_fb { |
| enum drm_fb_type type; |
| |
| int refcnt; |
| |
| uint32_t fb_id, size; |
| uint32_t handles[4]; |
| uint32_t strides[4]; |
| uint32_t offsets[4]; |
| const struct pixel_format_info *format; |
| uint64_t modifier; |
| int width, height; |
| int fd; |
| struct weston_buffer_reference buffer_ref; |
| |
| /* Used by gbm fbs */ |
| struct gbm_bo *bo; |
| struct gbm_surface *gbm_surface; |
| |
| /* Used by dumb fbs */ |
| void *map; |
| }; |
| |
| struct drm_edid { |
| char eisa_id[13]; |
| char monitor_name[13]; |
| char pnp_id[5]; |
| char serial_number[13]; |
| }; |
| |
| /** |
| * Pending state holds one or more drm_output_state structures, collected from |
| * performing repaint. This pending state is transient, and only lives between |
| * beginning a repaint group and flushing the results: after flush, each |
| * output state will complete and be retired separately. |
| */ |
| struct drm_pending_state { |
| struct drm_backend *backend; |
| struct wl_list output_list; |
| }; |
| |
| /* |
| * Output state holds the dynamic state for one Weston output, i.e. a KMS CRTC, |
| * plus >= 1 each of encoder/connector/plane. Since everything but the planes |
| * is currently statically assigned per-output, we mainly use this to track |
| * plane state. |
| * |
| * pending_state is set when the output state is owned by a pending_state, |
| * i.e. when it is being constructed and has not yet been applied. When the |
| * output state has been applied, the owning pending_state is freed. |
| */ |
| struct drm_output_state { |
| struct drm_pending_state *pending_state; |
| struct drm_output *output; |
| struct wl_list link; |
| enum dpms_enum dpms; |
| struct wl_list plane_list; |
| }; |
| |
| /** |
| * Plane state holds the dynamic state for a plane: where it is positioned, |
| * and which buffer it is currently displaying. |
| * |
| * The plane state is owned by an output state, except when setting an initial |
| * state. See drm_output_state for notes on state object lifetime. |
| */ |
| struct drm_plane_state { |
| struct drm_plane *plane; |
| struct drm_output *output; |
| struct drm_output_state *output_state; |
| |
| struct drm_fb *fb; |
| |
| struct weston_view *ev; /**< maintained for drm_assign_planes only */ |
| |
| int32_t src_x, src_y; |
| uint32_t src_w, src_h; |
| int32_t dest_x, dest_y; |
| uint32_t dest_w, dest_h; |
| |
| bool complete; |
| |
| struct wl_list link; /* drm_output_state::plane_list */ |
| }; |
| |
| /** |
| * A plane represents one buffer, positioned within a CRTC, and stacked |
| * relative to other planes on the same CRTC. |
| * |
| * Each CRTC has a 'primary plane', which use used to display the classic |
| * framebuffer contents, as accessed through the legacy drmModeSetCrtc |
| * call (which combines setting the CRTC's actual physical mode, and the |
| * properties of the primary plane). |
| * |
| * The cursor plane also has its own alternate legacy API. |
| * |
| * Other planes are used opportunistically to display content we do not |
| * wish to blit into the primary plane. These non-primary/cursor planes |
| * are referred to as 'sprites'. |
| */ |
| struct drm_plane { |
| struct weston_plane base; |
| |
| struct drm_backend *backend; |
| |
| enum wdrm_plane_type type; |
| |
| uint32_t possible_crtcs; |
| uint32_t plane_id; |
| uint32_t count_formats; |
| |
| struct drm_property_info props[WDRM_PLANE__COUNT]; |
| |
| /* The last state submitted to the kernel for this plane. */ |
| struct drm_plane_state *state_cur; |
| |
| struct wl_list link; |
| |
| struct { |
| uint32_t format; |
| uint32_t count_modifiers; |
| uint64_t *modifiers; |
| } formats[]; |
| }; |
| |
| struct drm_head { |
| struct weston_head base; |
| struct drm_backend *backend; |
| |
| drmModeConnector *connector; |
| uint32_t connector_id; |
| struct drm_edid edid; |
| |
| /* Holds the properties for the connector */ |
| struct drm_property_info props_conn[WDRM_CONNECTOR__COUNT]; |
| |
| struct backlight *backlight; |
| |
| drmModeModeInfo inherited_mode; /**< Original mode on the connector */ |
| uint32_t inherited_crtc_id; /**< Original CRTC assignment */ |
| }; |
| |
| struct drm_output { |
| struct weston_output base; |
| |
| uint32_t crtc_id; /* object ID to pass to DRM functions */ |
| int pipe; /* index of CRTC in resource array / bitmasks */ |
| |
| /* Holds the properties for the CRTC */ |
| struct drm_property_info props_crtc[WDRM_CRTC__COUNT]; |
| |
| int vblank_pending; |
| int page_flip_pending; |
| int atomic_complete_pending; |
| int destroy_pending; |
| int disable_pending; |
| int dpms_off_pending; |
| |
| struct drm_fb *gbm_cursor_fb[2]; |
| struct drm_plane *cursor_plane; |
| struct weston_view *cursor_view; |
| int current_cursor; |
| |
| struct gbm_surface *gbm_surface; |
| uint32_t gbm_format; |
| |
| /* Plane being displayed directly on the CRTC */ |
| struct drm_plane *scanout_plane; |
| |
| /* The last state submitted to the kernel for this CRTC. */ |
| struct drm_output_state *state_cur; |
| /* The previously-submitted state, where the hardware has not |
| * yet acknowledged completion of state_cur. */ |
| struct drm_output_state *state_last; |
| |
| struct drm_fb *dumb[2]; |
| pixman_image_t *image[2]; |
| int current_image; |
| pixman_region32_t previous_damage; |
| |
| struct vaapi_recorder *recorder; |
| struct wl_listener recorder_frame_listener; |
| |
| struct wl_event_source *pageflip_timer; |
| }; |
| |
| static const char *const aspect_ratio_as_string[] = { |
| [WESTON_MODE_PIC_AR_NONE] = "", |
| [WESTON_MODE_PIC_AR_4_3] = " 4:3", |
| [WESTON_MODE_PIC_AR_16_9] = " 16:9", |
| [WESTON_MODE_PIC_AR_64_27] = " 64:27", |
| [WESTON_MODE_PIC_AR_256_135] = " 256:135", |
| }; |
| |
| static struct gl_renderer_interface *gl_renderer; |
| |
| static const char default_seat[] = "seat0"; |
| |
| static void |
| wl_array_remove_uint32(struct wl_array *array, uint32_t elm) |
| { |
| uint32_t *pos, *end; |
| |
| end = (uint32_t *) ((char *) array->data + array->size); |
| |
| wl_array_for_each(pos, array) { |
| if (*pos != elm) |
| continue; |
| |
| array->size -= sizeof(*pos); |
| if (pos + 1 == end) |
| break; |
| |
| memmove(pos, pos + 1, (char *) end - (char *) (pos + 1)); |
| break; |
| } |
| } |
| |
| static inline struct drm_head * |
| to_drm_head(struct weston_head *base) |
| { |
| return container_of(base, struct drm_head, base); |
| } |
| |
| static inline struct drm_output * |
| to_drm_output(struct weston_output *base) |
| { |
| return container_of(base, struct drm_output, base); |
| } |
| |
| static inline struct drm_backend * |
| to_drm_backend(struct weston_compositor *base) |
| { |
| return container_of(base->backend, struct drm_backend, base); |
| } |
| |
| static int |
| pageflip_timeout(void *data) { |
| /* |
| * Our timer just went off, that means we're not receiving drm |
| * page flip events anymore for that output. Let's gracefully exit |
| * weston with a return value so devs can debug what's going on. |
| */ |
| struct drm_output *output = data; |
| struct weston_compositor *compositor = output->base.compositor; |
| |
| weston_log("Pageflip timeout reached on output %s, your " |
| "driver is probably buggy! Exiting.\n", |
| output->base.name); |
| weston_compositor_exit_with_code(compositor, EXIT_FAILURE); |
| |
| return 0; |
| } |
| |
| /* Creates the pageflip timer. Note that it isn't armed by default */ |
| static int |
| drm_output_pageflip_timer_create(struct drm_output *output) |
| { |
| struct wl_event_loop *loop = NULL; |
| struct weston_compositor *ec = output->base.compositor; |
| |
| loop = wl_display_get_event_loop(ec->wl_display); |
| assert(loop); |
| output->pageflip_timer = wl_event_loop_add_timer(loop, |
| pageflip_timeout, |
| output); |
| |
| if (output->pageflip_timer == NULL) { |
| weston_log("creating drm pageflip timer failed: %m\n"); |
| return -1; |
| } |
| |
| return 0; |
| } |
| |
| static inline struct drm_mode * |
| to_drm_mode(struct weston_mode *base) |
| { |
| return container_of(base, struct drm_mode, base); |
| } |
| |
| /** |
| * Get the current value of a KMS property |
| * |
| * Given a drmModeObjectGetProperties return, as well as the drm_property_info |
| * for the target property, return the current value of that property, |
| * with an optional default. If the property is a KMS enum type, the return |
| * value will be translated into the appropriate internal enum. |
| * |
| * If the property is not present, the default value will be returned. |
| * |
| * @param info Internal structure for property to look up |
| * @param props Raw KMS properties for the target object |
| * @param def Value to return if property is not found |
| */ |
| static uint64_t |
| drm_property_get_value(struct drm_property_info *info, |
| const drmModeObjectProperties *props, |
| uint64_t def) |
| { |
| unsigned int i; |
| |
| if (info->prop_id == 0) |
| return def; |
| |
| for (i = 0; i < props->count_props; i++) { |
| unsigned int j; |
| |
| if (props->props[i] != info->prop_id) |
| continue; |
| |
| /* Simple (non-enum) types can return the value directly */ |
| if (info->num_enum_values == 0) |
| return props->prop_values[i]; |
| |
| /* Map from raw value to enum value */ |
| for (j = 0; j < info->num_enum_values; j++) { |
| if (!info->enum_values[j].valid) |
| continue; |
| if (info->enum_values[j].value != props->prop_values[i]) |
| continue; |
| |
| return j; |
| } |
| |
| /* We don't have a mapping for this enum; return default. */ |
| break; |
| } |
| |
| return def; |
| } |
| |
| /** |
| * Cache DRM property values |
| * |
| * Update a per-object array of drm_property_info structures, given the |
| * DRM properties of the object. |
| * |
| * Call this every time an object newly appears (note that only connectors |
| * can be hotplugged), the first time it is seen, or when its status changes |
| * in a way which invalidates the potential property values (currently, the |
| * only case for this is connector hotplug). |
| * |
| * This updates the property IDs and enum values within the drm_property_info |
| * array. |
| * |
| * DRM property enum values are dynamic at runtime; the user must query the |
| * property to find out the desired runtime value for a requested string |
| * name. Using the 'type' field on planes as an example, there is no single |
| * hardcoded constant for primary plane types; instead, the property must be |
| * queried at runtime to find the value associated with the string "Primary". |
| * |
| * This helper queries and caches the enum values, to allow us to use a set |
| * of compile-time-constant enums portably across various implementations. |
| * The values given in enum_names are searched for, and stored in the |
| * same-indexed field of the map array. |
| * |
| * @param b DRM backend object |
| * @param src DRM property info array to source from |
| * @param info DRM property info array to copy into |
| * @param num_infos Number of entries in the source array |
| * @param props DRM object properties for the object |
| */ |
| static void |
| drm_property_info_populate(struct drm_backend *b, |
| const struct drm_property_info *src, |
| struct drm_property_info *info, |
| unsigned int num_infos, |
| drmModeObjectProperties *props) |
| { |
| drmModePropertyRes *prop; |
| unsigned i, j; |
| |
| for (i = 0; i < num_infos; i++) { |
| unsigned int j; |
| |
| info[i].name = src[i].name; |
| info[i].prop_id = 0; |
| info[i].num_enum_values = src[i].num_enum_values; |
| |
| if (src[i].num_enum_values == 0) |
| continue; |
| |
| info[i].enum_values = |
| malloc(src[i].num_enum_values * |
| sizeof(*info[i].enum_values)); |
| assert(info[i].enum_values); |
| for (j = 0; j < info[i].num_enum_values; j++) { |
| info[i].enum_values[j].name = src[i].enum_values[j].name; |
| info[i].enum_values[j].valid = false; |
| } |
| } |
| |
| for (i = 0; i < props->count_props; i++) { |
| unsigned int k; |
| |
| prop = drmModeGetProperty(b->drm.fd, props->props[i]); |
| if (!prop) |
| continue; |
| |
| for (j = 0; j < num_infos; j++) { |
| if (!strcmp(prop->name, info[j].name)) |
| break; |
| } |
| |
| /* We don't know/care about this property. */ |
| if (j == num_infos) { |
| #ifdef DEBUG |
| weston_log("DRM debug: unrecognized property %u '%s'\n", |
| prop->prop_id, prop->name); |
| #endif |
| drmModeFreeProperty(prop); |
| continue; |
| } |
| |
| if (info[j].num_enum_values == 0 && |
| (prop->flags & DRM_MODE_PROP_ENUM)) { |
| weston_log("DRM: expected property %s to not be an" |
| " enum, but it is; ignoring\n", prop->name); |
| drmModeFreeProperty(prop); |
| continue; |
| } |
| |
| info[j].prop_id = props->props[i]; |
| |
| if (info[j].num_enum_values == 0) { |
| drmModeFreeProperty(prop); |
| continue; |
| } |
| |
| if (!(prop->flags & DRM_MODE_PROP_ENUM)) { |
| weston_log("DRM: expected property %s to be an enum," |
| " but it is not; ignoring\n", prop->name); |
| drmModeFreeProperty(prop); |
| info[j].prop_id = 0; |
| continue; |
| } |
| |
| for (k = 0; k < info[j].num_enum_values; k++) { |
| int l; |
| |
| for (l = 0; l < prop->count_enums; l++) { |
| if (!strcmp(prop->enums[l].name, |
| info[j].enum_values[k].name)) |
| break; |
| } |
| |
| if (l == prop->count_enums) |
| continue; |
| |
| info[j].enum_values[k].valid = true; |
| info[j].enum_values[k].value = prop->enums[l].value; |
| } |
| |
| drmModeFreeProperty(prop); |
| } |
| |
| #ifdef DEBUG |
| for (i = 0; i < num_infos; i++) { |
| if (info[i].prop_id == 0) |
| weston_log("DRM warning: property '%s' missing\n", |
| info[i].name); |
| } |
| #endif |
| } |
| |
| /** |
| * Free DRM property information |
| * |
| * Frees all memory associated with a DRM property info array and zeroes |
| * it out, leaving it usable for a further drm_property_info_update() or |
| * drm_property_info_free(). |
| * |
| * @param info DRM property info array |
| * @param num_props Number of entries in array to free |
| */ |
| static void |
| drm_property_info_free(struct drm_property_info *info, int num_props) |
| { |
| int i; |
| |
| for (i = 0; i < num_props; i++) |
| free(info[i].enum_values); |
| |
| memset(info, 0, sizeof(*info) * num_props); |
| } |
| |
| static void |
| drm_output_set_cursor(struct drm_output_state *output_state); |
| |
| static void |
| drm_output_update_msc(struct drm_output *output, unsigned int seq); |
| |
| static void |
| drm_output_destroy(struct weston_output *output_base); |
| |
| /** |
| * Returns true if the plane can be used on the given output for its current |
| * repaint cycle. |
| */ |
| static bool |
| drm_plane_is_available(struct drm_plane *plane, struct drm_output *output) |
| { |
| assert(plane->state_cur); |
| |
| /* The plane still has a request not yet completed by the kernel. */ |
| if (!plane->state_cur->complete) |
| return false; |
| |
| /* The plane is still active on another output. */ |
| if (plane->state_cur->output && plane->state_cur->output != output) |
| return false; |
| |
| /* Check whether the plane can be used with this CRTC; possible_crtcs |
| * is a bitmask of CRTC indices (pipe), rather than CRTC object ID. */ |
| return !!(plane->possible_crtcs & (1 << output->pipe)); |
| } |
| |
| static struct drm_output * |
| drm_output_find_by_crtc(struct drm_backend *b, uint32_t crtc_id) |
| { |
| struct drm_output *output; |
| |
| wl_list_for_each(output, &b->compositor->output_list, base.link) { |
| if (output->crtc_id == crtc_id) |
| return output; |
| } |
| |
| return NULL; |
| } |
| |
| static struct drm_head * |
| drm_head_find_by_connector(struct drm_backend *backend, uint32_t connector_id) |
| { |
| struct weston_head *base; |
| struct drm_head *head; |
| |
| wl_list_for_each(base, |
| &backend->compositor->head_list, compositor_link) { |
| head = to_drm_head(base); |
| if (head->connector_id == connector_id) |
| return head; |
| } |
| |
| return NULL; |
| } |
| |
| static void |
| drm_fb_destroy(struct drm_fb *fb) |
| { |
| if (fb->fb_id != 0) |
| drmModeRmFB(fb->fd, fb->fb_id); |
| weston_buffer_reference(&fb->buffer_ref, NULL); |
| free(fb); |
| } |
| |
| static void |
| drm_fb_destroy_dumb(struct drm_fb *fb) |
| { |
| struct drm_mode_destroy_dumb destroy_arg; |
| |
| assert(fb->type == BUFFER_PIXMAN_DUMB); |
| |
| if (fb->map && fb->size > 0) |
| munmap(fb->map, fb->size); |
| |
| memset(&destroy_arg, 0, sizeof(destroy_arg)); |
| destroy_arg.handle = fb->handles[0]; |
| drmIoctl(fb->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_arg); |
| |
| drm_fb_destroy(fb); |
| } |
| |
| static void |
| drm_fb_destroy_gbm(struct gbm_bo *bo, void *data) |
| { |
| struct drm_fb *fb = data; |
| |
| assert(fb->type == BUFFER_GBM_SURFACE || fb->type == BUFFER_CLIENT || |
| fb->type == BUFFER_CURSOR); |
| drm_fb_destroy(fb); |
| } |
| |
| static int |
| drm_fb_addfb(struct drm_fb *fb) |
| { |
| int ret = -EINVAL; |
| #ifdef HAVE_DRM_ADDFB2_MODIFIERS |
| uint64_t mods[4] = { }; |
| size_t i; |
| #endif |
| |
| /* If we have a modifier set, we must only use the WithModifiers |
| * entrypoint; we cannot import it through legacy ioctls. */ |
| if (fb->modifier != DRM_FORMAT_MOD_INVALID) { |
| /* KMS demands that if a modifier is set, it must be the same |
| * for all planes. */ |
| #ifdef HAVE_DRM_ADDFB2_MODIFIERS |
| for (i = 0; i < ARRAY_LENGTH(mods) && fb->handles[i]; i++) |
| mods[i] = fb->modifier; |
| ret = drmModeAddFB2WithModifiers(fb->fd, fb->width, fb->height, |
| fb->format->format, |
| fb->handles, fb->strides, |
| fb->offsets, mods, &fb->fb_id, |
| DRM_MODE_FB_MODIFIERS); |
| #endif |
| return ret; |
| } |
| |
| ret = drmModeAddFB2(fb->fd, fb->width, fb->height, fb->format->format, |
| fb->handles, fb->strides, fb->offsets, &fb->fb_id, |
| 0); |
| if (ret == 0) |
| return 0; |
| |
| /* Legacy AddFB can't always infer the format from depth/bpp alone, so |
| * check if our format is one of the lucky ones. */ |
| if (!fb->format->depth || !fb->format->bpp) |
| return ret; |
| |
| /* Cannot fall back to AddFB for multi-planar formats either. */ |
| if (fb->handles[1] || fb->handles[2] || fb->handles[3]) |
| return ret; |
| |
| ret = drmModeAddFB(fb->fd, fb->width, fb->height, |
| fb->format->depth, fb->format->bpp, |
| fb->strides[0], fb->handles[0], &fb->fb_id); |
| return ret; |
| } |
| |
| static struct drm_fb * |
| drm_fb_create_dumb(struct drm_backend *b, int width, int height, |
| uint32_t format) |
| { |
| struct drm_fb *fb; |
| int ret; |
| |
| struct drm_mode_create_dumb create_arg; |
| struct drm_mode_destroy_dumb destroy_arg; |
| struct drm_mode_map_dumb map_arg; |
| |
| fb = zalloc(sizeof *fb); |
| if (!fb) |
| return NULL; |
| fb->refcnt = 1; |
| |
| fb->format = pixel_format_get_info(format); |
| if (!fb->format) { |
| weston_log("failed to look up format 0x%lx\n", |
| (unsigned long) format); |
| goto err_fb; |
| } |
| |
| if (!fb->format->depth || !fb->format->bpp) { |
| weston_log("format 0x%lx is not compatible with dumb buffers\n", |
| (unsigned long) format); |
| goto err_fb; |
| } |
| |
| memset(&create_arg, 0, sizeof create_arg); |
| create_arg.bpp = fb->format->bpp; |
| create_arg.width = width; |
| create_arg.height = height; |
| |
| ret = drmIoctl(b->drm.fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_arg); |
| if (ret) |
| goto err_fb; |
| |
| fb->type = BUFFER_PIXMAN_DUMB; |
| fb->modifier = DRM_FORMAT_MOD_INVALID; |
| fb->handles[0] = create_arg.handle; |
| fb->strides[0] = create_arg.pitch; |
| fb->size = create_arg.size; |
| fb->width = width; |
| fb->height = height; |
| fb->fd = b->drm.fd; |
| |
| if (drm_fb_addfb(fb) != 0) { |
| weston_log("failed to create kms fb: %m\n"); |
| goto err_bo; |
| } |
| |
| memset(&map_arg, 0, sizeof map_arg); |
| map_arg.handle = fb->handles[0]; |
| ret = drmIoctl(fb->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_arg); |
| if (ret) |
| goto err_add_fb; |
| |
| fb->map = mmap(NULL, fb->size, PROT_WRITE, |
| MAP_SHARED, b->drm.fd, map_arg.offset); |
| if (fb->map == MAP_FAILED) |
| goto err_add_fb; |
| |
| return fb; |
| |
| err_add_fb: |
| drmModeRmFB(b->drm.fd, fb->fb_id); |
| err_bo: |
| memset(&destroy_arg, 0, sizeof(destroy_arg)); |
| destroy_arg.handle = create_arg.handle; |
| drmIoctl(b->drm.fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_arg); |
| err_fb: |
| free(fb); |
| return NULL; |
| } |
| |
| static struct drm_fb * |
| drm_fb_ref(struct drm_fb *fb) |
| { |
| fb->refcnt++; |
| return fb; |
| } |
| |
| static void |
| drm_fb_destroy_dmabuf(struct drm_fb *fb) |
| { |
| /* We deliberately do not close the GEM handles here; GBM manages |
| * their lifetime through the BO. */ |
| if (fb->bo) |
| gbm_bo_destroy(fb->bo); |
| drm_fb_destroy(fb); |
| } |
| |
| static struct drm_fb * |
| drm_fb_get_from_dmabuf(struct linux_dmabuf_buffer *dmabuf, |
| struct drm_backend *backend, bool is_opaque) |
| { |
| #ifdef HAVE_GBM_FD_IMPORT |
| struct drm_fb *fb; |
| struct gbm_import_fd_data import_legacy = { |
| .width = dmabuf->attributes.width, |
| .height = dmabuf->attributes.height, |
| .format = dmabuf->attributes.format, |
| .stride = dmabuf->attributes.stride[0], |
| .fd = dmabuf->attributes.fd[0], |
| }; |
| struct gbm_import_fd_modifier_data import_mod = { |
| .width = dmabuf->attributes.width, |
| .height = dmabuf->attributes.height, |
| .format = dmabuf->attributes.format, |
| .num_fds = dmabuf->attributes.n_planes, |
| .modifier = dmabuf->attributes.modifier[0], |
| }; |
| int i; |
| |
| /* XXX: TODO: |
| * |
| * Currently the buffer is rejected if any dmabuf attribute |
| * flag is set. This keeps us from passing an inverted / |
| * interlaced / bottom-first buffer (or any other type that may |
| * be added in the future) through to an overlay. Ultimately, |
| * these types of buffers should be handled through buffer |
| * transforms and not as spot-checks requiring specific |
| * knowledge. */ |
| if (dmabuf->attributes.flags) |
| return NULL; |
| |
| fb = zalloc(sizeof *fb); |
| if (fb == NULL) |
| return NULL; |
| |
| fb->refcnt = 1; |
| fb->type = BUFFER_DMABUF; |
| |
| static_assert(ARRAY_LENGTH(import_mod.fds) == |
| ARRAY_LENGTH(dmabuf->attributes.fd), |
| "GBM and linux_dmabuf FD size must match"); |
| static_assert(sizeof(import_mod.fds) == sizeof(dmabuf->attributes.fd), |
| "GBM and linux_dmabuf FD size must match"); |
| memcpy(import_mod.fds, dmabuf->attributes.fd, sizeof(import_mod.fds)); |
| |
| static_assert(ARRAY_LENGTH(import_mod.strides) == |
| ARRAY_LENGTH(dmabuf->attributes.stride), |
| "GBM and linux_dmabuf stride size must match"); |
| static_assert(sizeof(import_mod.strides) == |
| sizeof(dmabuf->attributes.stride), |
| "GBM and linux_dmabuf stride size must match"); |
| memcpy(import_mod.strides, dmabuf->attributes.stride, |
| sizeof(import_mod.strides)); |
| |
| static_assert(ARRAY_LENGTH(import_mod.offsets) == |
| ARRAY_LENGTH(dmabuf->attributes.offset), |
| "GBM and linux_dmabuf offset size must match"); |
| static_assert(sizeof(import_mod.offsets) == |
| sizeof(dmabuf->attributes.offset), |
| "GBM and linux_dmabuf offset size must match"); |
| memcpy(import_mod.offsets, dmabuf->attributes.offset, |
| sizeof(import_mod.offsets)); |
| |
| /* The legacy FD-import path does not allow us to supply modifiers, |
| * multiple planes, or buffer offsets. */ |
| if (dmabuf->attributes.modifier[0] != DRM_FORMAT_MOD_INVALID || |
| import_mod.num_fds > 1 || |
| import_mod.offsets[0] > 0) { |
| fb->bo = gbm_bo_import(backend->gbm, GBM_BO_IMPORT_FD_MODIFIER, |
| &import_mod, |
| GBM_BO_USE_SCANOUT); |
| } else { |
| fb->bo = gbm_bo_import(backend->gbm, GBM_BO_IMPORT_FD, |
| &import_legacy, |
| GBM_BO_USE_SCANOUT); |
| } |
| |
| if (!fb->bo) |
| goto err_free; |
| |
| fb->width = dmabuf->attributes.width; |
| fb->height = dmabuf->attributes.height; |
| fb->modifier = dmabuf->attributes.modifier[0]; |
| fb->size = 0; |
| fb->fd = backend->drm.fd; |
| |
| static_assert(ARRAY_LENGTH(fb->strides) == |
| ARRAY_LENGTH(dmabuf->attributes.stride), |
| "drm_fb and dmabuf stride size must match"); |
| static_assert(sizeof(fb->strides) == sizeof(dmabuf->attributes.stride), |
| "drm_fb and dmabuf stride size must match"); |
| memcpy(fb->strides, dmabuf->attributes.stride, sizeof(fb->strides)); |
| static_assert(ARRAY_LENGTH(fb->offsets) == |
| ARRAY_LENGTH(dmabuf->attributes.offset), |
| "drm_fb and dmabuf offset size must match"); |
| static_assert(sizeof(fb->offsets) == sizeof(dmabuf->attributes.offset), |
| "drm_fb and dmabuf offset size must match"); |
| memcpy(fb->offsets, dmabuf->attributes.offset, sizeof(fb->offsets)); |
| |
| fb->format = pixel_format_get_info(dmabuf->attributes.format); |
| if (!fb->format) { |
| weston_log("couldn't look up format info for 0x%lx\n", |
| (unsigned long) dmabuf->attributes.format); |
| goto err_free; |
| } |
| |
| if (is_opaque) |
| fb->format = pixel_format_get_opaque_substitute(fb->format); |
| |
| if (backend->min_width > fb->width || |
| fb->width > backend->max_width || |
| backend->min_height > fb->height || |
| fb->height > backend->max_height) { |
| weston_log("bo geometry out of bounds\n"); |
| goto err_free; |
| } |
| |
| for (i = 0; i < dmabuf->attributes.n_planes; i++) { |
| fb->handles[i] = gbm_bo_get_handle_for_plane(fb->bo, i).u32; |
| if (!fb->handles[i]) |
| goto err_free; |
| } |
| |
| if (drm_fb_addfb(fb) != 0) { |
| weston_log("failed to create kms fb: %m\n"); |
| goto err_free; |
| } |
| |
| return fb; |
| |
| err_free: |
| drm_fb_destroy_dmabuf(fb); |
| #endif |
| return NULL; |
| } |
| |
| static struct drm_fb * |
| drm_fb_get_from_bo(struct gbm_bo *bo, struct drm_backend *backend, |
| bool is_opaque, enum drm_fb_type type) |
| { |
| struct drm_fb *fb = gbm_bo_get_user_data(bo); |
| #ifdef HAVE_GBM_MODIFIERS |
| int i; |
| #endif |
| |
| if (fb) { |
| assert(fb->type == type); |
| return drm_fb_ref(fb); |
| } |
| |
| fb = zalloc(sizeof *fb); |
| if (fb == NULL) |
| return NULL; |
| |
| fb->type = type; |
| fb->refcnt = 1; |
| fb->bo = bo; |
| fb->fd = backend->drm.fd; |
| |
| fb->width = gbm_bo_get_width(bo); |
| fb->height = gbm_bo_get_height(bo); |
| fb->format = pixel_format_get_info(gbm_bo_get_format(bo)); |
| fb->size = 0; |
| |
| #ifdef HAVE_GBM_MODIFIERS |
| fb->modifier = gbm_bo_get_modifier(bo); |
| for (i = 0; i < gbm_bo_get_plane_count(bo); i++) { |
| fb->strides[i] = gbm_bo_get_stride_for_plane(bo, i); |
| fb->handles[i] = gbm_bo_get_handle_for_plane(bo, i).u32; |
| fb->offsets[i] = gbm_bo_get_offset(bo, i); |
| } |
| #else |
| fb->strides[0] = gbm_bo_get_stride(bo); |
| fb->handles[0] = gbm_bo_get_handle(bo).u32; |
| fb->modifier = DRM_FORMAT_MOD_INVALID; |
| #endif |
| |
| if (!fb->format) { |
| weston_log("couldn't look up format 0x%lx\n", |
| (unsigned long) gbm_bo_get_format(bo)); |
| goto err_free; |
| } |
| |
| /* We can scanout an ARGB buffer if the surface's opaque region covers |
| * the whole output, but we have to use XRGB as the KMS format code. */ |
| if (is_opaque) |
| fb->format = pixel_format_get_opaque_substitute(fb->format); |
| |
| if (backend->min_width > fb->width || |
| fb->width > backend->max_width || |
| backend->min_height > fb->height || |
| fb->height > backend->max_height) { |
| weston_log("bo geometry out of bounds\n"); |
| goto err_free; |
| } |
| |
| if (drm_fb_addfb(fb) != 0) { |
| weston_log("failed to create kms fb: %m\n"); |
| goto err_free; |
| } |
| |
| gbm_bo_set_user_data(bo, fb, drm_fb_destroy_gbm); |
| |
| return fb; |
| |
| err_free: |
| free(fb); |
| return NULL; |
| } |
| |
| static void |
| drm_fb_set_buffer(struct drm_fb *fb, struct weston_buffer *buffer) |
| { |
| assert(fb->buffer_ref.buffer == NULL); |
| assert(fb->type == BUFFER_CLIENT || fb->type == BUFFER_DMABUF); |
| weston_buffer_reference(&fb->buffer_ref, buffer); |
| } |
| |
| static void |
| drm_fb_unref(struct drm_fb *fb) |
| { |
| if (!fb) |
| return; |
| |
| assert(fb->refcnt > 0); |
| if (--fb->refcnt > 0) |
| return; |
| |
| switch (fb->type) { |
| case BUFFER_PIXMAN_DUMB: |
| drm_fb_destroy_dumb(fb); |
| break; |
| case BUFFER_CURSOR: |
| case BUFFER_CLIENT: |
| gbm_bo_destroy(fb->bo); |
| break; |
| case BUFFER_GBM_SURFACE: |
| gbm_surface_release_buffer(fb->gbm_surface, fb->bo); |
| break; |
| case BUFFER_DMABUF: |
| drm_fb_destroy_dmabuf(fb); |
| break; |
| default: |
| assert(NULL); |
| break; |
| } |
| } |
| |
| /** |
| * Allocate a new, empty, plane state. |
| */ |
| static struct drm_plane_state * |
| drm_plane_state_alloc(struct drm_output_state *state_output, |
| struct drm_plane *plane) |
| { |
| struct drm_plane_state *state = zalloc(sizeof(*state)); |
| |
| assert(state); |
| state->output_state = state_output; |
| state->plane = plane; |
| |
| /* Here we only add the plane state to the desired link, and not |
| * set the member. Having an output pointer set means that the |
| * plane will be displayed on the output; this won't be the case |
| * when we go to disable a plane. In this case, it must be part of |
| * the commit (and thus the output state), but the member must be |
| * NULL, as it will not be on any output when the state takes |
| * effect. |
| */ |
| if (state_output) |
| wl_list_insert(&state_output->plane_list, &state->link); |
| else |
| wl_list_init(&state->link); |
| |
| return state; |
| } |
| |
| /** |
| * Free an existing plane state. As a special case, the state will not |
| * normally be freed if it is the current state; see drm_plane_set_state. |
| */ |
| static void |
| drm_plane_state_free(struct drm_plane_state *state, bool force) |
| { |
| if (!state) |
| return; |
| |
| wl_list_remove(&state->link); |
| wl_list_init(&state->link); |
| state->output_state = NULL; |
| |
| if (force || state != state->plane->state_cur) { |
| drm_fb_unref(state->fb); |
| free(state); |
| } |
| } |
| |
| /** |
| * Duplicate an existing plane state into a new plane state, storing it within |
| * the given output state. If the output state already contains a plane state |
| * for the drm_plane referenced by 'src', that plane state is freed first. |
| */ |
| static struct drm_plane_state * |
| drm_plane_state_duplicate(struct drm_output_state *state_output, |
| struct drm_plane_state *src) |
| { |
| struct drm_plane_state *dst = malloc(sizeof(*dst)); |
| struct drm_plane_state *old, *tmp; |
| |
| assert(src); |
| assert(dst); |
| *dst = *src; |
| wl_list_init(&dst->link); |
| |
| wl_list_for_each_safe(old, tmp, &state_output->plane_list, link) { |
| /* Duplicating a plane state into the same output state, so |
| * it can replace itself with an identical copy of itself, |
| * makes no sense. */ |
| assert(old != src); |
| if (old->plane == dst->plane) |
| drm_plane_state_free(old, false); |
| } |
| |
| wl_list_insert(&state_output->plane_list, &dst->link); |
| if (src->fb) |
| dst->fb = drm_fb_ref(src->fb); |
| dst->output_state = state_output; |
| dst->complete = false; |
| |
| return dst; |
| } |
| |
| /** |
| * Remove a plane state from an output state; if the plane was previously |
| * enabled, then replace it with a disabling state. This ensures that the |
| * output state was untouched from it was before the plane state was |
| * modified by the caller of this function. |
| * |
| * This is required as drm_output_state_get_plane may either allocate a |
| * new plane state, in which case this function will just perform a matching |
| * drm_plane_state_free, or it may instead repurpose an existing disabling |
| * state (if the plane was previously active), in which case this function |
| * will reset it. |
| */ |
| static void |
| drm_plane_state_put_back(struct drm_plane_state *state) |
| { |
| struct drm_output_state *state_output; |
| struct drm_plane *plane; |
| |
| if (!state) |
| return; |
| |
| state_output = state->output_state; |
| plane = state->plane; |
| drm_plane_state_free(state, false); |
| |
| /* Plane was previously disabled; no need to keep this temporary |
| * state around. */ |
| if (!plane->state_cur->fb) |
| return; |
| |
| (void) drm_plane_state_alloc(state_output, plane); |
| } |
| |
| static bool |
| drm_view_transform_supported(struct weston_view *ev, struct weston_output *output) |
| { |
| struct weston_buffer_viewport *viewport = &ev->surface->buffer_viewport; |
| |
| /* This will incorrectly disallow cases where the combination of |
| * buffer and view transformations match the output transform. |
| * Fixing this requires a full analysis of the transformation |
| * chain. */ |
| if (ev->transform.enabled && |
| ev->transform.matrix.type >= WESTON_MATRIX_TRANSFORM_ROTATE) |
| return false; |
| |
| if (viewport->buffer.transform != output->transform) |
| return false; |
| |
| return true; |
| } |
| |
| /** |
| * Given a weston_view, fill the drm_plane_state's co-ordinates to display on |
| * a given plane. |
| */ |
| static bool |
| drm_plane_state_coords_for_view(struct drm_plane_state *state, |
| struct weston_view *ev) |
| { |
| struct drm_output *output = state->output; |
| struct weston_buffer *buffer = ev->surface->buffer_ref.buffer; |
| pixman_region32_t dest_rect, src_rect; |
| pixman_box32_t *box, tbox; |
| float sxf1, syf1, sxf2, syf2; |
| |
| if (!drm_view_transform_supported(ev, &output->base)) |
| return false; |
| |
| /* Update the base weston_plane co-ordinates. */ |
| box = pixman_region32_extents(&ev->transform.boundingbox); |
| state->plane->base.x = box->x1; |
| state->plane->base.y = box->y1; |
| |
| /* First calculate the destination co-ordinates by taking the |
| * area of the view which is visible on this output, performing any |
| * transforms to account for output rotation and scale as necessary. */ |
| pixman_region32_init(&dest_rect); |
| pixman_region32_intersect(&dest_rect, &ev->transform.boundingbox, |
| &output->base.region); |
| pixman_region32_translate(&dest_rect, -output->base.x, -output->base.y); |
| box = pixman_region32_extents(&dest_rect); |
| tbox = weston_transformed_rect(output->base.width, |
| output->base.height, |
| output->base.transform, |
| output->base.current_scale, |
| *box); |
| state->dest_x = tbox.x1; |
| state->dest_y = tbox.y1; |
| state->dest_w = tbox.x2 - tbox.x1; |
| state->dest_h = tbox.y2 - tbox.y1; |
| pixman_region32_fini(&dest_rect); |
| |
| /* Now calculate the source rectangle, by finding the extents of the |
| * view, and working backwards to source co-ordinates. */ |
| pixman_region32_init(&src_rect); |
| pixman_region32_intersect(&src_rect, &ev->transform.boundingbox, |
| &output->base.region); |
| box = pixman_region32_extents(&src_rect); |
| weston_view_from_global_float(ev, box->x1, box->y1, &sxf1, &syf1); |
| weston_surface_to_buffer_float(ev->surface, sxf1, syf1, &sxf1, &syf1); |
| weston_view_from_global_float(ev, box->x2, box->y2, &sxf2, &syf2); |
| weston_surface_to_buffer_float(ev->surface, sxf2, syf2, &sxf2, &syf2); |
| pixman_region32_fini(&src_rect); |
| |
| /* Buffer transforms may mean that x2 is to the left of x1, and/or that |
| * y2 is above y1. */ |
| if (sxf2 < sxf1) { |
| double tmp = sxf1; |
| sxf1 = sxf2; |
| sxf2 = tmp; |
| } |
| if (syf2 < syf1) { |
| double tmp = syf1; |
| syf1 = syf2; |
| syf2 = tmp; |
| } |
| |
| /* Shift from S23.8 wl_fixed to U16.16 KMS fixed-point encoding. */ |
| state->src_x = wl_fixed_from_double(sxf1) << 8; |
| state->src_y = wl_fixed_from_double(syf1) << 8; |
| state->src_w = wl_fixed_from_double(sxf2 - sxf1) << 8; |
| state->src_h = wl_fixed_from_double(syf2 - syf1) << 8; |
| |
| /* Clamp our source co-ordinates to surface bounds; it's possible |
| * for intermediate translations to give us slightly incorrect |
| * co-ordinates if we have, for example, multiple zooming |
| * transformations. View bounding boxes are also explicitly rounded |
| * greedily. */ |
| if (state->src_x < 0) |
| state->src_x = 0; |
| if (state->src_y < 0) |
| state->src_y = 0; |
| if (state->src_w > (uint32_t) ((buffer->width << 16) - state->src_x)) |
| state->src_w = (buffer->width << 16) - state->src_x; |
| if (state->src_h > (uint32_t) ((buffer->height << 16) - state->src_y)) |
| state->src_h = (buffer->height << 16) - state->src_y; |
| |
| return true; |
| } |
| |
| static bool |
| drm_view_is_opaque(struct weston_view *ev) |
| { |
| pixman_region32_t r; |
| bool ret = false; |
| |
| pixman_region32_init_rect(&r, 0, 0, |
| ev->surface->width, |
| ev->surface->height); |
| pixman_region32_subtract(&r, &r, &ev->surface->opaque); |
| |
| if (!pixman_region32_not_empty(&r)) |
| ret = true; |
| |
| pixman_region32_fini(&r); |
| |
| return ret; |
| } |
| |
| static struct drm_fb * |
| drm_fb_get_from_view(struct drm_output_state *state, struct weston_view *ev) |
| { |
| struct drm_output *output = state->output; |
| struct drm_backend *b = to_drm_backend(output->base.compositor); |
| struct weston_buffer *buffer = ev->surface->buffer_ref.buffer; |
| bool is_opaque = drm_view_is_opaque(ev); |
| struct linux_dmabuf_buffer *dmabuf; |
| struct drm_fb *fb; |
| |
| if (ev->alpha != 1.0f) |
| return NULL; |
| |
| if (!drm_view_transform_supported(ev, &output->base)) |
| return NULL; |
| |
| if (!buffer) |
| return NULL; |
| |
| if (wl_shm_buffer_get(buffer->resource)) |
| return NULL; |
| |
| /* GBM is used for dmabuf import as well as from client wl_buffer. */ |
| if (!b->gbm) |
| return NULL; |
| |
| dmabuf = linux_dmabuf_buffer_get(buffer->resource); |
| if (dmabuf) { |
| fb = drm_fb_get_from_dmabuf(dmabuf, b, is_opaque); |
| if (!fb) |
| return NULL; |
| } else { |
| struct gbm_bo *bo; |
| |
| bo = gbm_bo_import(b->gbm, GBM_BO_IMPORT_WL_BUFFER, |
| buffer->resource, GBM_BO_USE_SCANOUT); |
| if (!bo) |
| return NULL; |
| |
| fb = drm_fb_get_from_bo(bo, b, is_opaque, BUFFER_CLIENT); |
| if (!fb) { |
| gbm_bo_destroy(bo); |
| return NULL; |
| } |
| } |
| |
| drm_fb_set_buffer(fb, buffer); |
| return fb; |
| } |
| |
| /** |
| * Return a plane state from a drm_output_state. |
| */ |
| static struct drm_plane_state * |
| drm_output_state_get_existing_plane(struct drm_output_state *state_output, |
| struct drm_plane *plane) |
| { |
| struct drm_plane_state *ps; |
| |
| wl_list_for_each(ps, &state_output->plane_list, link) { |
| if (ps->plane == plane) |
| return ps; |
| } |
| |
| return NULL; |
| } |
| |
| /** |
| * Return a plane state from a drm_output_state, either existing or |
| * freshly allocated. |
| */ |
| static struct drm_plane_state * |
| drm_output_state_get_plane(struct drm_output_state *state_output, |
| struct drm_plane *plane) |
| { |
| struct drm_plane_state *ps; |
| |
| ps = drm_output_state_get_existing_plane(state_output, plane); |
| if (ps) |
| return ps; |
| |
| return drm_plane_state_alloc(state_output, plane); |
| } |
| |
| /** |
| * Allocate a new, empty drm_output_state. This should not generally be used |
| * in the repaint cycle; see drm_output_state_duplicate. |
| */ |
| static struct drm_output_state * |
| drm_output_state_alloc(struct drm_output *output, |
| struct drm_pending_state *pending_state) |
| { |
| struct drm_output_state *state = zalloc(sizeof(*state)); |
| |
| assert(state); |
| state->output = output; |
| state->dpms = WESTON_DPMS_OFF; |
| state->pending_state = pending_state; |
| if (pending_state) |
| wl_list_insert(&pending_state->output_list, &state->link); |
| else |
| wl_list_init(&state->link); |
| |
| wl_list_init(&state->plane_list); |
| |
| return state; |
| } |
| |
| /** |
| * Duplicate an existing drm_output_state into a new one. This is generally |
| * used during the repaint cycle, to capture the existing state of an output |
| * and modify it to create a new state to be used. |
| * |
| * The mode determines whether the output will be reset to an a blank state, |
| * or an exact mirror of the current state. |
| */ |
| static struct drm_output_state * |
| drm_output_state_duplicate(struct drm_output_state *src, |
| struct drm_pending_state *pending_state, |
| enum drm_output_state_duplicate_mode plane_mode) |
| { |
| struct drm_output_state *dst = malloc(sizeof(*dst)); |
| struct drm_plane_state *ps; |
| |
| assert(dst); |
| |
| /* Copy the whole structure, then individually modify the |
| * pending_state, as well as the list link into our pending |
| * state. */ |
| *dst = *src; |
| |
| dst->pending_state = pending_state; |
| if (pending_state) |
| wl_list_insert(&pending_state->output_list, &dst->link); |
| else |
| wl_list_init(&dst->link); |
| |
| wl_list_init(&dst->plane_list); |
| |
| wl_list_for_each(ps, &src->plane_list, link) { |
| /* Don't carry planes which are now disabled; these should be |
| * free for other outputs to reuse. */ |
| if (!ps->output) |
| continue; |
| |
| if (plane_mode == DRM_OUTPUT_STATE_CLEAR_PLANES) |
| (void) drm_plane_state_alloc(dst, ps->plane); |
| else |
| (void) drm_plane_state_duplicate(dst, ps); |
| } |
| |
| return dst; |
| } |
| |
| /** |
| * Free an unused drm_output_state. |
| */ |
| static void |
| drm_output_state_free(struct drm_output_state *state) |
| { |
| struct drm_plane_state *ps, *next; |
| |
| if (!state) |
| return; |
| |
| wl_list_for_each_safe(ps, next, &state->plane_list, link) |
| drm_plane_state_free(ps, false); |
| |
| wl_list_remove(&state->link); |
| |
| free(state); |
| } |
| |
| /** |
| * Get output state to disable output |
| * |
| * Returns a pointer to an output_state object which can be used to disable |
| * an output (e.g. DPMS off). |
| * |
| * @param pending_state The pending state object owning this update |
| * @param output The output to disable |
| * @returns A drm_output_state to disable the output |
| */ |
| static struct drm_output_state * |
| drm_output_get_disable_state(struct drm_pending_state *pending_state, |
| struct drm_output *output) |
| { |
| struct drm_output_state *output_state; |
| |
| output_state = drm_output_state_duplicate(output->state_cur, |
| pending_state, |
| DRM_OUTPUT_STATE_CLEAR_PLANES); |
| output_state->dpms = WESTON_DPMS_OFF; |
| |
| return output_state; |
| } |
| |
| /** |
| * Allocate a new drm_pending_state |
| * |
| * Allocate a new, empty, 'pending state' structure to be used across a |
| * repaint cycle or similar. |
| * |
| * @param backend DRM backend |
| * @returns Newly-allocated pending state structure |
| */ |
| static struct drm_pending_state * |
| drm_pending_state_alloc(struct drm_backend *backend) |
| { |
| struct drm_pending_state *ret; |
| |
| ret = calloc(1, sizeof(*ret)); |
| if (!ret) |
| return NULL; |
| |
| ret->backend = backend; |
| wl_list_init(&ret->output_list); |
| |
| return ret; |
| } |
| |
| /** |
| * Free a drm_pending_state structure |
| * |
| * Frees a pending_state structure, as well as any output_states connected |
| * to this pending state. |
| * |
| * @param pending_state Pending state structure to free |
| */ |
| static void |
| drm_pending_state_free(struct drm_pending_state *pending_state) |
| { |
| struct drm_output_state *output_state, *tmp; |
| |
| if (!pending_state) |
| return; |
| |
| wl_list_for_each_safe(output_state, tmp, &pending_state->output_list, |
| link) { |
| drm_output_state_free(output_state); |
| } |
| |
| free(pending_state); |
| } |
| |
| /** |
| * Find an output state in a pending state |
| * |
| * Given a pending_state structure, find the output_state for a particular |
| * output. |
| * |
| * @param pending_state Pending state structure to search |
| * @param output Output to find state for |
| * @returns Output state if present, or NULL if not |
| */ |
| static struct drm_output_state * |
| drm_pending_state_get_output(struct drm_pending_state *pending_state, |
| struct drm_output *output) |
| { |
| struct drm_output_state *output_state; |
| |
| wl_list_for_each(output_state, &pending_state->output_list, link) { |
| if (output_state->output == output) |
| return output_state; |
| } |
| |
| return NULL; |
| } |
| |
| static int drm_pending_state_apply_sync(struct drm_pending_state *state); |
| static int drm_pending_state_test(struct drm_pending_state *state); |
| |
| /** |
| * Mark a drm_output_state (the output's last state) as complete. This handles |
| * any post-completion actions such as updating the repaint timer, disabling the |
| * output, and finally freeing the state. |
| */ |
| static void |
| drm_output_update_complete(struct drm_output *output, uint32_t flags, |
| unsigned int sec, unsigned int usec) |
| { |
| struct drm_backend *b = to_drm_backend(output->base.compositor); |
| struct drm_plane_state *ps; |
| struct timespec ts; |
| |
| /* Stop the pageflip timer instead of rearming it here */ |
| if (output->pageflip_timer) |
| wl_event_source_timer_update(output->pageflip_timer, 0); |
| |
| wl_list_for_each(ps, &output->state_cur->plane_list, link) |
| ps->complete = true; |
| |
| drm_output_state_free(output->state_last); |
| output->state_last = NULL; |
| |
| if (output->destroy_pending) { |
| output->destroy_pending = 0; |
| output->disable_pending = 0; |
| output->dpms_off_pending = 0; |
| drm_output_destroy(&output->base); |
| return; |
| } else if (output->disable_pending) { |
| output->disable_pending = 0; |
| output->dpms_off_pending = 0; |
| weston_output_disable(&output->base); |
| return; |
| } else if (output->dpms_off_pending) { |
| struct drm_pending_state *pending = drm_pending_state_alloc(b); |
| output->dpms_off_pending = 0; |
| drm_output_get_disable_state(pending, output); |
| drm_pending_state_apply_sync(pending); |
| return; |
| } else if (output->state_cur->dpms == WESTON_DPMS_OFF && |
| output->base.repaint_status != REPAINT_AWAITING_COMPLETION) { |
| /* DPMS can happen to us either in the middle of a repaint |
| * cycle (when we have painted fresh content, only to throw it |
| * away for DPMS off), or at any other random point. If the |
| * latter is true, then we cannot go through finish_frame, |
| * because the repaint machinery does not expect this. */ |
| return; |
| } |
| |
| ts.tv_sec = sec; |
| ts.tv_nsec = usec * 1000; |
| weston_output_finish_frame(&output->base, &ts, flags); |
| |
| /* We can't call this from frame_notify, because the output's |
| * repaint needed flag is cleared just after that */ |
| if (output->recorder) |
| weston_output_schedule_repaint(&output->base); |
| } |
| |
| /** |
| * Mark an output state as current on the output, i.e. it has been |
| * submitted to the kernel. The mode argument determines whether this |
| * update will be applied synchronously (e.g. when calling drmModeSetCrtc), |
| * or asynchronously (in which case we wait for events to complete). |
| */ |
| static void |
| drm_output_assign_state(struct drm_output_state *state, |
| enum drm_state_apply_mode mode) |
| { |
| struct drm_output *output = state->output; |
| struct drm_backend *b = to_drm_backend(output->base.compositor); |
| struct drm_plane_state *plane_state; |
| |
| assert(!output->state_last); |
| |
| if (mode == DRM_STATE_APPLY_ASYNC) |
| output->state_last = output->state_cur; |
| else |
| drm_output_state_free(output->state_cur); |
| |
| wl_list_remove(&state->link); |
| wl_list_init(&state->link); |
| state->pending_state = NULL; |
| |
| output->state_cur = state; |
| |
| if (b->atomic_modeset && mode == DRM_STATE_APPLY_ASYNC) |
| output->atomic_complete_pending = 1; |
| |
| /* Replace state_cur on each affected plane with the new state, being |
| * careful to dispose of orphaned (but only orphaned) previous state. |
| * If the previous state is not orphaned (still has an output_state |
| * attached), it will be disposed of by freeing the output_state. */ |
| wl_list_for_each(plane_state, &state->plane_list, link) { |
| struct drm_plane *plane = plane_state->plane; |
| |
| if (plane->state_cur && !plane->state_cur->output_state) |
| drm_plane_state_free(plane->state_cur, true); |
| plane->state_cur = plane_state; |
| |
| if (mode != DRM_STATE_APPLY_ASYNC) { |
| plane_state->complete = true; |
| continue; |
| } |
| |
| if (b->atomic_modeset) |
| continue; |
| |
| if (plane->type == WDRM_PLANE_TYPE_OVERLAY) |
| output->vblank_pending++; |
| else if (plane->type == WDRM_PLANE_TYPE_PRIMARY) |
| output->page_flip_pending = 1; |
| } |
| } |
| |
| enum drm_output_propose_state_mode { |
| DRM_OUTPUT_PROPOSE_STATE_MIXED, /**< mix renderer & planes */ |
| DRM_OUTPUT_PROPOSE_STATE_RENDERER_ONLY, /**< only assign to renderer & cursor */ |
| DRM_OUTPUT_PROPOSE_STATE_PLANES_ONLY, /**< no renderer use, only planes */ |
| }; |
| |
| static struct drm_plane_state * |
| drm_output_prepare_scanout_view(struct drm_output_state *output_state, |
| struct weston_view *ev, |
| enum drm_output_propose_state_mode mode) |
| { |
| struct drm_output *output = output_state->output; |
| struct drm_backend *b = to_drm_backend(output->base.compositor); |
| struct drm_plane *scanout_plane = output->scanout_plane; |
| struct drm_plane_state *state; |
| struct drm_fb *fb; |
| pixman_box32_t *extents; |
| |
| assert(!b->sprites_are_broken); |
| assert(mode == DRM_OUTPUT_PROPOSE_STATE_PLANES_ONLY); |
| |
| /* Check the view spans exactly the output size, calculated in the |
| * logical co-ordinate space. */ |
| extents = pixman_region32_extents(&ev->transform.boundingbox); |
| if (extents->x1 != output->base.x || |
| extents->y1 != output->base.y || |
| extents->x2 != output->base.x + output->base.width || |
| extents->y2 != output->base.y + output->base.height) |
| return NULL; |
| |
| if (ev->alpha != 1.0f) |
| return NULL; |
| |
| fb = drm_fb_get_from_view(output_state, ev); |
| if (!fb) |
| return NULL; |
| |
| /* Can't change formats with just a pageflip */ |
| if (fb->format->format != output->gbm_format) { |
| drm_fb_unref(fb); |
| return NULL; |
| } |
| |
| state = drm_output_state_get_plane(output_state, scanout_plane); |
| |
| /* The only way we can already have a buffer in the scanout plane is |
| * if we are in mixed mode, or if a client buffer has already been |
| * placed into scanout. The former case will never call into here, |
| * and in the latter case, the view must have been marked as occluded, |
| * meaning we should never have ended up here. */ |
| assert(!state->fb); |
| state->fb = fb; |
| state->ev = ev; |
| state->output = output; |
| if (!drm_plane_state_coords_for_view(state, ev)) |
| goto err; |
| |
| /* The legacy API does not let us perform cropping or scaling. */ |
| if (state->src_x != 0 || state->src_y != 0 || |
| state->src_w != state->dest_w << 16 || |
| state->src_h != state->dest_h << 16 || |
| state->dest_x != 0 || state->dest_y != 0 || |
| state->dest_w != (unsigned) output->base.current_mode->width || |
| state->dest_h != (unsigned) output->base.current_mode->height) |
| goto err; |
| |
| /* In plane-only mode, we don't need to test the state now, as we |
| * will only test it once at the end. */ |
| return state; |
| |
| err: |
| drm_plane_state_put_back(state); |
| return NULL; |
| } |
| |
| static struct drm_fb * |
| drm_output_render_gl(struct drm_output_state *state, pixman_region32_t *damage) |
| { |
| struct drm_output *output = state->output; |
| struct drm_backend *b = to_drm_backend(output->base.compositor); |
| struct gbm_bo *bo; |
| struct drm_fb *ret; |
| |
| output->base.compositor->renderer->repaint_output(&output->base, |
| damage); |
| |
| bo = gbm_surface_lock_front_buffer(output->gbm_surface); |
| if (!bo) { |
| weston_log("failed to lock front buffer: %m\n"); |
| return NULL; |
| } |
| |
| /* The renderer always produces an opaque image. */ |
| ret = drm_fb_get_from_bo(bo, b, true, BUFFER_GBM_SURFACE); |
| if (!ret) { |
| weston_log("failed to get drm_fb for bo\n"); |
| gbm_surface_release_buffer(output->gbm_surface, bo); |
| return NULL; |
| } |
| ret->gbm_surface = output->gbm_surface; |
| |
| return ret; |
| } |
| |
| static struct drm_fb * |
| drm_output_render_pixman(struct drm_output_state *state, |
| pixman_region32_t *damage) |
| { |
| struct drm_output *output = state->output; |
| struct weston_compositor *ec = output->base.compositor; |
| |
| output->current_image ^= 1; |
| |
| pixman_renderer_output_set_buffer(&output->base, |
| output->image[output->current_image]); |
| pixman_renderer_output_set_hw_extra_damage(&output->base, |
| &output->previous_damage); |
| |
| ec->renderer->repaint_output(&output->base, damage); |
| |
| pixman_region32_copy(&output->previous_damage, damage); |
| |
| return drm_fb_ref(output->dumb[output->current_image]); |
| } |
| |
| static void |
| drm_output_render(struct drm_output_state *state, pixman_region32_t *damage) |
| { |
| struct drm_output *output = state->output; |
| struct weston_compositor *c = output->base.compositor; |
| struct drm_plane_state *scanout_state; |
| struct drm_plane *scanout_plane = output->scanout_plane; |
| struct drm_backend *b = to_drm_backend(c); |
| struct drm_fb *fb; |
| |
| /* If we already have a client buffer promoted to scanout, then we don't |
| * want to render. */ |
| scanout_state = drm_output_state_get_plane(state, |
| output->scanout_plane); |
| if (scanout_state->fb) |
| return; |
| |
| if (!pixman_region32_not_empty(damage) && |
| scanout_plane->state_cur->fb && |
| (scanout_plane->state_cur->fb->type == BUFFER_GBM_SURFACE || |
| scanout_plane->state_cur->fb->type == BUFFER_PIXMAN_DUMB) && |
| scanout_plane->state_cur->fb->width == |
| output->base.current_mode->width && |
| scanout_plane->state_cur->fb->height == |
| output->base.current_mode->height) { |
| fb = drm_fb_ref(scanout_plane->state_cur->fb); |
| } else if (b->use_pixman) { |
| fb = drm_output_render_pixman(state, damage); |
| } else { |
| fb = drm_output_render_gl(state, damage); |
| } |
| |
| if (!fb) { |
| drm_plane_state_put_back(scanout_state); |
| return; |
| } |
| |
| scanout_state->fb = fb; |
| scanout_state->output = output; |
| |
| scanout_state->src_x = 0; |
| scanout_state->src_y = 0; |
| scanout_state->src_w = output->base.current_mode->width << 16; |
| scanout_state->src_h = output->base.current_mode->height << 16; |
| |
| scanout_state->dest_x = 0; |
| scanout_state->dest_y = 0; |
| scanout_state->dest_w = scanout_state->src_w >> 16; |
| scanout_state->dest_h = scanout_state->src_h >> 16; |
| |
| |
| pixman_region32_subtract(&c->primary_plane.damage, |
| &c->primary_plane.damage, damage); |
| } |
| |
| static void |
| drm_output_set_gamma(struct weston_output *output_base, |
| uint16_t size, uint16_t *r, uint16_t *g, uint16_t *b) |
| { |
| int rc; |
| struct drm_output *output = to_drm_output(output_base); |
| struct drm_backend *backend = |
| to_drm_backend(output->base.compositor); |
| |
| /* check */ |
| if (output_base->gamma_size != size) |
| return; |
| |
| rc = drmModeCrtcSetGamma(backend->drm.fd, |
| output->crtc_id, |
| size, r, g, b); |
| if (rc) |
| weston_log("set gamma failed: %m\n"); |
| } |
| |
| /* Determine the type of vblank synchronization to use for the output. |
| * |
| * The pipe parameter indicates which CRTC is in use. Knowing this, we |
| * can determine which vblank sequence type to use for it. Traditional |
| * cards had only two CRTCs, with CRTC 0 using no special flags, and |
| * CRTC 1 using DRM_VBLANK_SECONDARY. The first bit of the pipe |
| * parameter indicates this. |
| * |
| * Bits 1-5 of the pipe parameter are 5 bit wide pipe number between |
| * 0-31. If this is non-zero it indicates we're dealing with a |
| * multi-gpu situation and we need to calculate the vblank sync |
| * using DRM_BLANK_HIGH_CRTC_MASK. |
| */ |
| static unsigned int |
| drm_waitvblank_pipe(struct drm_output *output) |
| { |
| if (output->pipe > 1) |
| return (output->pipe << DRM_VBLANK_HIGH_CRTC_SHIFT) & |
| DRM_VBLANK_HIGH_CRTC_MASK; |
| else if (output->pipe > 0) |
| return DRM_VBLANK_SECONDARY; |
| else |
| return 0; |
| } |
| |
| static int |
| drm_output_apply_state_legacy(struct drm_output_state *state) |
| { |
| struct drm_output *output = state->output; |
| struct drm_backend *backend = to_drm_backend(output->base.compositor); |
| struct drm_plane *scanout_plane = output->scanout_plane; |
| struct drm_property_info *dpms_prop; |
| struct drm_plane_state *scanout_state; |
| struct drm_plane_state *ps; |
| struct drm_mode *mode; |
| struct drm_head *head; |
| uint32_t connectors[MAX_CLONED_CONNECTORS]; |
| int n_conn = 0; |
| struct timespec now; |
| int ret = 0; |
| |
| wl_list_for_each(head, &output->base.head_list, base.output_link) { |
| assert(n_conn < MAX_CLONED_CONNECTORS); |
| connectors[n_conn++] = head->connector_id; |
| } |
| |
| /* If disable_planes is set then assign_planes() wasn't |
| * called for this render, so we could still have a stale |
| * cursor plane set up. |
| */ |
| if (output->base.disable_planes) { |
| output->cursor_view = NULL; |
| if (output->cursor_plane) { |
| output->cursor_plane->base.x = INT32_MIN; |
| output->cursor_plane->base.y = INT32_MIN; |
| } |
| } |
| |
| if (state->dpms != WESTON_DPMS_ON) { |
| wl_list_for_each(ps, &state->plane_list, link) { |
| struct drm_plane *p = ps->plane; |
| assert(ps->fb == NULL); |
| assert(ps->output == NULL); |
| |
| if (p->type != WDRM_PLANE_TYPE_OVERLAY) |
| continue; |
| |
| ret = drmModeSetPlane(backend->drm.fd, p->plane_id, |
| 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); |
| if (ret) |
| weston_log("drmModeSetPlane failed disable: %m\n"); |
| } |
| |
| if (output->cursor_plane) { |
| ret = drmModeSetCursor(backend->drm.fd, output->crtc_id, |
| 0, 0, 0); |
| if (ret) |
| weston_log("drmModeSetCursor failed disable: %m\n"); |
| } |
| |
| ret = drmModeSetCrtc(backend->drm.fd, output->crtc_id, 0, 0, 0, |
| NULL, 0, NULL); |
| if (ret) |
| weston_log("drmModeSetCrtc failed disabling: %m\n"); |
| |
| drm_output_assign_state(state, DRM_STATE_APPLY_SYNC); |
| weston_compositor_read_presentation_clock(output->base.compositor, &now); |
| drm_output_update_complete(output, |
| WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION, |
| now.tv_sec, now.tv_nsec / 1000); |
| |
| return 0; |
| } |
| |
| scanout_state = |
| drm_output_state_get_existing_plane(state, scanout_plane); |
| |
| /* The legacy SetCrtc API doesn't allow us to do scaling, and the |
| * legacy PageFlip API doesn't allow us to do clipping either. */ |
| assert(scanout_state->src_x == 0); |
| assert(scanout_state->src_y == 0); |
| assert(scanout_state->src_w == |
| (unsigned) (output->base.current_mode->width << 16)); |
| assert(scanout_state->src_h == |
| (unsigned) (output->base.current_mode->height << 16)); |
| assert(scanout_state->dest_x == 0); |
| assert(scanout_state->dest_y == 0); |
| assert(scanout_state->dest_w == scanout_state->src_w >> 16); |
| assert(scanout_state->dest_h == scanout_state->src_h >> 16); |
| |
| mode = to_drm_mode(output->base.current_mode); |
| if (backend->state_invalid || |
| !scanout_plane->state_cur->fb || |
| scanout_plane->state_cur->fb->strides[0] != |
| scanout_state->fb->strides[0]) { |
| ret = drmModeSetCrtc(backend->drm.fd, output->crtc_id, |
| scanout_state->fb->fb_id, |
| 0, 0, |
| connectors, n_conn, |
| &mode->mode_info); |
| if (ret) { |
| weston_log("set mode failed: %m\n"); |
| goto err; |
| } |
| } |
| |
| if (drmModePageFlip(backend->drm.fd, output->crtc_id, |
| scanout_state->fb->fb_id, |
| DRM_MODE_PAGE_FLIP_EVENT, output) < 0) { |
| weston_log("queueing pageflip failed: %m\n"); |
| goto err; |
| } |
| |
| assert(!output->page_flip_pending); |
| |
| if (output->pageflip_timer) |
| wl_event_source_timer_update(output->pageflip_timer, |
| backend->pageflip_timeout); |
| |
| drm_output_set_cursor(state); |
| |
| /* |
| * Now, update all the sprite surfaces |
| */ |
| wl_list_for_each(ps, &state->plane_list, link) { |
| uint32_t flags = 0, fb_id = 0; |
| drmVBlank vbl = { |
| .request.type = DRM_VBLANK_RELATIVE | DRM_VBLANK_EVENT, |
| .request.sequence = 1, |
| }; |
| struct drm_plane *p = ps->plane; |
| |
| if (p->type != WDRM_PLANE_TYPE_OVERLAY) |
| continue; |
| |
| assert(p->state_cur->complete); |
| assert(!!p->state_cur->output == !!p->state_cur->fb); |
| assert(!p->state_cur->output || p->state_cur->output == output); |
| assert(!ps->complete); |
| assert(!ps->output || ps->output == output); |
| assert(!!ps->output == !!ps->fb); |
| |
| if (ps->fb && !backend->sprites_hidden) |
| fb_id = ps->fb->fb_id; |
| |
| ret = drmModeSetPlane(backend->drm.fd, p->plane_id, |
| output->crtc_id, fb_id, flags, |
| ps->dest_x, ps->dest_y, |
| ps->dest_w, ps->dest_h, |
| ps->src_x, ps->src_y, |
| ps->src_w, ps->src_h); |
| if (ret) |
| weston_log("setplane failed: %d: %s\n", |
| ret, strerror(errno)); |
| |
| vbl.request.type |= drm_waitvblank_pipe(output); |
| |
| /* |
| * Queue a vblank signal so we know when the surface |
| * becomes active on the display or has been replaced. |
| */ |
| vbl.request.signal = (unsigned long) ps; |
| ret = drmWaitVBlank(backend->drm.fd, &vbl); |
| if (ret) { |
| weston_log("vblank event request failed: %d: %s\n", |
| ret, strerror(errno)); |
| } |
| } |
| |
| if (state->dpms != output->state_cur->dpms) { |
| wl_list_for_each(head, &output->base.head_list, base.output_link) { |
| dpms_prop = &head->props_conn[WDRM_CONNECTOR_DPMS]; |
| if (dpms_prop->prop_id == 0) |
| continue; |
| |
| ret = drmModeConnectorSetProperty(backend->drm.fd, |
| head->connector_id, |
| dpms_prop->prop_id, |
| state->dpms); |
| if (ret) { |
| weston_log("DRM: DPMS: failed property set for %s\n", |
| head->base.name); |
| } |
| } |
| } |
| |
| drm_output_assign_state(state, DRM_STATE_APPLY_ASYNC); |
| |
| return 0; |
| |
| err: |
| output->cursor_view = NULL; |
| drm_output_state_free(state); |
| return -1; |
| } |
| |
| #ifdef HAVE_DRM_ATOMIC |
| static int |
| crtc_add_prop(drmModeAtomicReq *req, struct drm_output *output, |
| enum wdrm_crtc_property prop, uint64_t val) |
| { |
| struct drm_property_info *info = &output->props_crtc[prop]; |
| int ret; |
| |
| if (info->prop_id == 0) |
| return -1; |
| |
| ret = drmModeAtomicAddProperty(req, output->crtc_id, info->prop_id, |
| val); |
| return (ret <= 0) ? -1 : 0; |
| } |
| |
| static int |
| connector_add_prop(drmModeAtomicReq *req, struct drm_head *head, |
| enum wdrm_connector_property prop, uint64_t val) |
| { |
| struct drm_property_info *info = &head->props_conn[prop]; |
| int ret; |
| |
| if (info->prop_id == 0) |
| return -1; |
| |
| ret = drmModeAtomicAddProperty(req, head->connector_id, |
| info->prop_id, val); |
| return (ret <= 0) ? -1 : 0; |
| } |
| |
| static int |
| plane_add_prop(drmModeAtomicReq *req, struct drm_plane *plane, |
| enum wdrm_plane_property prop, uint64_t val) |
| { |
| struct drm_property_info *info = &plane->props[prop]; |
| int ret; |
| |
| if (info->prop_id == 0) |
| return -1; |
| |
| ret = drmModeAtomicAddProperty(req, plane->plane_id, info->prop_id, |
| val); |
| return (ret <= 0) ? -1 : 0; |
| } |
| |
| static int |
| drm_mode_ensure_blob(struct drm_backend *backend, struct drm_mode *mode) |
| { |
| int ret; |
| |
| if (mode->blob_id) |
| return 0; |
| |
| ret = drmModeCreatePropertyBlob(backend->drm.fd, |
| &mode->mode_info, |
| sizeof(mode->mode_info), |
| &mode->blob_id); |
| if (ret != 0) |
| weston_log("failed to create mode property blob: %m\n"); |
| |
| return ret; |
| } |
| |
| static int |
| drm_output_apply_state_atomic(struct drm_output_state *state, |
| drmModeAtomicReq *req, |
| uint32_t *flags) |
| { |
| struct drm_output *output = state->output; |
| struct drm_backend *backend = to_drm_backend(output->base.compositor); |
| struct drm_plane_state *plane_state; |
| struct drm_mode *current_mode = to_drm_mode(output->base.current_mode); |
| struct drm_head *head; |
| int ret = 0; |
| |
| if (state->dpms != output->state_cur->dpms) |
| *flags |= DRM_MODE_ATOMIC_ALLOW_MODESET; |
| |
| if (state->dpms == WESTON_DPMS_ON) { |
| ret = drm_mode_ensure_blob(backend, current_mode); |
| if (ret != 0) |
| return ret; |
| |
| ret |= crtc_add_prop(req, output, WDRM_CRTC_MODE_ID, |
| current_mode->blob_id); |
| ret |= crtc_add_prop(req, output, WDRM_CRTC_ACTIVE, 1); |
| |
| /* No need for the DPMS property, since it is implicit in |
| * routing and CRTC activity. */ |
| wl_list_for_each(head, &output->base.head_list, base.output_link) { |
| ret |= connector_add_prop(req, head, WDRM_CONNECTOR_CRTC_ID, |
| output->crtc_id); |
| } |
| } else { |
| ret |= crtc_add_prop(req, output, WDRM_CRTC_MODE_ID, 0); |
| ret |= crtc_add_prop(req, output, WDRM_CRTC_ACTIVE, 0); |
| |
| /* No need for the DPMS property, since it is implicit in |
| * routing and CRTC activity. */ |
| wl_list_for_each(head, &output->base.head_list, base.output_link) |
| ret |= connector_add_prop(req, head, WDRM_CONNECTOR_CRTC_ID, 0); |
| } |
| |
| if (ret != 0) { |
| weston_log("couldn't set atomic CRTC/connector state\n"); |
| return ret; |
| } |
| |
| wl_list_for_each(plane_state, &state->plane_list, link) { |
| struct drm_plane *plane = plane_state->plane; |
| |
| ret |= plane_add_prop(req, plane, WDRM_PLANE_FB_ID, |
| plane_state->fb ? plane_state->fb->fb_id : 0); |
| ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, |
| plane_state->fb ? output->crtc_id : 0); |
| ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_X, |
| plane_state->src_x); |
| ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_Y, |
| plane_state->src_y); |
| ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_W, |
| plane_state->src_w); |
| ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_H, |
| plane_state->src_h); |
| ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_X, |
| plane_state->dest_x); |
| ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_Y, |
| plane_state->dest_y); |
| ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_W, |
| plane_state->dest_w); |
| ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_H, |
| plane_state->dest_h); |
| |
| if (ret != 0) { |
| weston_log("couldn't set plane state\n"); |
| return ret; |
| } |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * Helper function used only by drm_pending_state_apply, with the same |
| * guarantees and constraints as that function. |
| */ |
| static int |
| drm_pending_state_apply_atomic(struct drm_pending_state *pending_state, |
| enum drm_state_apply_mode mode) |
| { |
| struct drm_backend *b = pending_state->backend; |
| struct drm_output_state *output_state, *tmp; |
| struct drm_plane *plane; |
| drmModeAtomicReq *req = drmModeAtomicAlloc(); |
| uint32_t flags = 0; |
| int ret = 0; |
| |
| if (!req) |
| return -1; |
| |
| if (b->state_invalid) { |
| struct weston_head *head_base; |
| struct drm_head *head; |
| uint32_t *unused; |
| int err; |
| |
| /* If we need to reset all our state (e.g. because we've |
| * just started, or just been VT-switched in), explicitly |
| * disable all the CRTCs and connectors we aren't using. */ |
| wl_list_for_each(head_base, |
| &b->compositor->head_list, compositor_link) { |
| struct drm_property_info *info; |
| |
| if (weston_head_is_enabled(head_base)) |
| continue; |
| |
| head = to_drm_head(head_base); |
| |
| info = &head->props_conn[WDRM_CONNECTOR_CRTC_ID]; |
| err = drmModeAtomicAddProperty(req, head->connector_id, |
| info->prop_id, 0); |
| if (err <= 0) |
| ret = -1; |
| } |
| |
| wl_array_for_each(unused, &b->unused_crtcs) { |
| struct drm_property_info infos[WDRM_CRTC__COUNT]; |
| struct drm_property_info *info; |
| drmModeObjectProperties *props; |
| uint64_t active; |
| |
| memset(infos, 0, sizeof(infos)); |
| |
| /* We can't emit a disable on a CRTC that's already |
| * off, as the kernel will refuse to generate an event |
| * for an off->off state and fail the commit. |
| */ |
| props = drmModeObjectGetProperties(b->drm.fd, |
| *unused, |
| DRM_MODE_OBJECT_CRTC); |
| if (!props) { |
| ret = -1; |
| continue; |
| } |
| |
| drm_property_info_populate(b, crtc_props, infos, |
| WDRM_CRTC__COUNT, |
| props); |
| |
| info = &infos[WDRM_CRTC_ACTIVE]; |
| active = drm_property_get_value(info, props, 0); |
| drmModeFreeObjectProperties(props); |
| if (active == 0) { |
| drm_property_info_free(infos, WDRM_CRTC__COUNT); |
| continue; |
| } |
| |
| err = drmModeAtomicAddProperty(req, *unused, |
| info->prop_id, 0); |
| if (err <= 0) |
| ret = -1; |
| |
| info = &infos[WDRM_CRTC_MODE_ID]; |
| err = drmModeAtomicAddProperty(req, *unused, |
| info->prop_id, 0); |
| if (err <= 0) |
| ret = -1; |
| |
| drm_property_info_free(infos, WDRM_CRTC__COUNT); |
| } |
| |
| /* Disable all the planes; planes which are being used will |
| * override this state in the output-state application. */ |
| wl_list_for_each(plane, &b->plane_list, link) { |
| plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, 0); |
| plane_add_prop(req, plane, WDRM_PLANE_FB_ID, 0); |
| } |
| |
| flags |= DRM_MODE_ATOMIC_ALLOW_MODESET; |
| } |
| |
| wl_list_for_each(output_state, &pending_state->output_list, link) { |
| if (mode == DRM_STATE_APPLY_SYNC) |
| assert(output_state->dpms == WESTON_DPMS_OFF); |
| ret |= drm_output_apply_state_atomic(output_state, req, &flags); |
| } |
| |
| if (ret != 0) { |
| weston_log("atomic: couldn't compile atomic state\n"); |
| goto out; |
| } |
| |
| switch (mode) { |
| case DRM_STATE_APPLY_SYNC: |
| break; |
| case DRM_STATE_APPLY_ASYNC: |
| flags |= DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK; |
| break; |
| case DRM_STATE_TEST_ONLY: |
| flags |= DRM_MODE_ATOMIC_TEST_ONLY; |
| break; |
| } |
| |
| ret = drmModeAtomicCommit(b->drm.fd, req, flags, b); |
| |
| /* Test commits do not take ownership of the state; return |
| * without freeing here. */ |
| if (mode == DRM_STATE_TEST_ONLY) { |
| drmModeAtomicFree(req); |
| return ret; |
| } |
| |
| if (ret != 0) { |
| weston_log("atomic: couldn't commit new state: %m\n"); |
| goto out; |
| } |
| |
| wl_list_for_each_safe(output_state, tmp, &pending_state->output_list, |
| link) |
| drm_output_assign_state(output_state, mode); |
| |
| b->state_invalid = false; |
| |
| assert(wl_list_empty(&pending_state->output_list)); |
| |
| out: |
| drmModeAtomicFree(req); |
| drm_pending_state_free(pending_state); |
| return ret; |
| } |
| #endif |
| |
| /** |
| * Tests a pending state, to see if the kernel will accept the update as |
| * constructed. |
| * |
| * Using atomic modesetting, the kernel performs the same checks as it would |
| * on a real commit, returning success or failure without actually modifying |
| * the running state. It does not return -EBUSY if there are pending updates |
| * in flight, so states may be tested at any point, however this means a |
| * state which passed testing may fail on a real commit if the timing is not |
| * respected (e.g. committing before the previous commit has completed). |
| * |
| * Without atomic modesetting, we have no way to check, so we optimistically |
| * claim it will work. |
| * |
| * Unlike drm_pending_state_apply() and drm_pending_state_apply_sync(), this |
| * function does _not_ take ownership of pending_state, nor does it clear |
| * state_invalid. |
| */ |
| static int |
| drm_pending_state_test(struct drm_pending_state *pending_state) |
| { |
| #ifdef HAVE_DRM_ATOMIC |
| struct drm_backend *b = pending_state->backend; |
| |
| if (b->atomic_modeset) |
| return drm_pending_state_apply_atomic(pending_state, |
| DRM_STATE_TEST_ONLY); |
| #endif |
| |
| /* We have no way to test state before application on the legacy |
| * modesetting API, so just claim it succeeded. */ |
| return 0; |
| } |
| |
| /** |
| * Applies all of a pending_state asynchronously: the primary entry point for |
| * applying KMS state to a device. Updates the state for all outputs in the |
| * pending_state, as well as disabling any unclaimed outputs. |
| * |
| * Unconditionally takes ownership of pending_state, and clears state_invalid. |
| */ |
| static int |
| drm_pending_state_apply(struct drm_pending_state *pending_state) |
| { |
| struct drm_backend *b = pending_state->backend; |
| struct drm_output_state *output_state, *tmp; |
| uint32_t *unused; |
| |
| #ifdef HAVE_DRM_ATOMIC |
| if (b->atomic_modeset) |
| return drm_pending_state_apply_atomic(pending_state, |
| DRM_STATE_APPLY_ASYNC); |
| #endif |
| |
| if (b->state_invalid) { |
| /* If we need to reset all our state (e.g. because we've |
| * just started, or just been VT-switched in), explicitly |
| * disable all the CRTCs we aren't using. This also disables |
| * all connectors on these CRTCs, so we don't need to do that |
| * separately with the pre-atomic API. */ |
| wl_array_for_each(unused, &b->unused_crtcs) |
| drmModeSetCrtc(b->drm.fd, *unused, 0, 0, 0, NULL, 0, |
| NULL); |
| } |
| |
| wl_list_for_each_safe(output_state, tmp, &pending_state->output_list, |
| link) { |
| struct drm_output *output = output_state->output; |
| int ret; |
| |
| ret = drm_output_apply_state_legacy(output_state); |
| if (ret != 0) { |
| weston_log("Couldn't apply state for output %s\n", |
| output->base.name); |
| } |
| } |
| |
| b->state_invalid = false; |
| |
| assert(wl_list_empty(&pending_state->output_list)); |
| |
| drm_pending_state_free(pending_state); |
| |
| return 0; |
| } |
| |
| /** |
| * The synchronous version of drm_pending_state_apply. May only be used to |
| * disable outputs. Does so synchronously: the request is guaranteed to have |
| * completed on return, and the output will not be touched afterwards. |
| * |
| * Unconditionally takes ownership of pending_state, and clears state_invalid. |
| */ |
| static int |
| drm_pending_state_apply_sync(struct drm_pending_state *pending_state) |
| { |
| struct drm_backend *b = pending_state->backend; |
| struct drm_output_state *output_state, *tmp; |
| uint32_t *unused; |
| |
| #ifdef HAVE_DRM_ATOMIC |
| if (b->atomic_modeset) |
| return drm_pending_state_apply_atomic(pending_state, |
| DRM_STATE_APPLY_SYNC); |
| #endif |
| |
| if (b->state_invalid) { |
| /* If we need to reset all our state (e.g. because we've |
| * just started, or just been VT-switched in), explicitly |
| * disable all the CRTCs we aren't using. This also disables |
| * all connectors on these CRTCs, so we don't need to do that |
| * separately with the pre-atomic API. */ |
| wl_array_for_each(unused, &b->unused_crtcs) |
| drmModeSetCrtc(b->drm.fd, *unused, 0, 0, 0, NULL, 0, |
| NULL); |
| } |
| |
| wl_list_for_each_safe(output_state, tmp, &pending_state->output_list, |
| link) { |
| int ret; |
| |
| assert(output_state->dpms == WESTON_DPMS_OFF); |
| ret = drm_output_apply_state_legacy(output_state); |
| if (ret != 0) { |
| weston_log("Couldn't apply state for output %s\n", |
| output_state->output->base.name); |
| } |
| } |
| |
| b->state_invalid = false; |
| |
| assert(wl_list_empty(&pending_state->output_list)); |
| |
| drm_pending_state_free(pending_state); |
| |
| return 0; |
| } |
| |
| static int |
| drm_output_repaint(struct weston_output *output_base, |
| pixman_region32_t *damage, |
| void *repaint_data) |
| { |
| struct drm_pending_state *pending_state = repaint_data; |
| struct drm_output *output = to_drm_output(output_base); |
| struct drm_output_state *state = NULL; |
| struct drm_plane_state *scanout_state; |
| |
| if (output->disable_pending || output->destroy_pending) |
| goto err; |
| |
| assert(!output->state_last); |
| |
| /* If planes have been disabled in the core, we might not have |
| * hit assign_planes at all, so might not have valid output state |
| * here. */ |
| state = drm_pending_state_get_output(pending_state, output); |
| if (!state) |
| state = drm_output_state_duplicate(output->state_cur, |
| pending_state, |
| DRM_OUTPUT_STATE_CLEAR_PLANES); |
| state->dpms = WESTON_DPMS_ON; |
| |
| drm_output_render(state, damage); |
| scanout_state = drm_output_state_get_plane(state, |
| output->scanout_plane); |
| if (!scanout_state || !scanout_state->fb) |
| goto err; |
| |
| return 0; |
| |
| err: |
| drm_output_state_free(state); |
| return -1; |
| } |
| |
| static void |
| drm_output_start_repaint_loop(struct weston_output *output_base) |
| { |
| struct drm_output *output = to_drm_output(output_base); |
| struct drm_pending_state *pending_state; |
| struct drm_plane *scanout_plane = output->scanout_plane; |
| struct drm_backend *backend = |
| to_drm_backend(output_base->compositor); |
| struct timespec ts, tnow; |
| struct timespec vbl2now; |
| int64_t refresh_nsec; |
| int ret; |
| drmVBlank vbl = { |
| .request.type = DRM_VBLANK_RELATIVE, |
| .request.sequence = 0, |
| .request.signal = 0, |
| }; |
| |
| if (output->disable_pending || output->destroy_pending) |
| return; |
| |
| if (!output->scanout_plane->state_cur->fb) { |
| /* We can't page flip if there's no mode set */ |
| goto finish_frame; |
| } |
| |
| /* Need to smash all state in from scratch; current timings might not |
| * be what we want, page flip might not work, etc. |
| */ |
| if (backend->state_invalid) |
| goto finish_frame; |
| |
| assert(scanout_plane->state_cur->output == output); |
| |
| /* Try to get current msc and timestamp via instant query */ |
| vbl.request.type |= drm_waitvblank_pipe(output); |
| ret = drmWaitVBlank(backend->drm.fd, &vbl); |
| |
| /* Error ret or zero timestamp means failure to get valid timestamp */ |
| if ((ret == 0) && (vbl.reply.tval_sec > 0 || vbl.reply.tval_usec > 0)) { |
| ts.tv_sec = vbl.reply.tval_sec; |
| ts.tv_nsec = vbl.reply.tval_usec * 1000; |
| |
| /* Valid timestamp for most recent vblank - not stale? |
| * Stale ts could happen on Linux 3.17+, so make sure it |
| * is not older than 1 refresh duration since now. |
| */ |
| weston_compositor_read_presentation_clock(backend->compositor, |
| &tnow); |
| timespec_sub(&vbl2now, &tnow, &ts); |
| refresh_nsec = |
| millihz_to_nsec(output->base.current_mode->refresh); |
| if (timespec_to_nsec(&vbl2now) < refresh_nsec) { |
| drm_output_update_msc(output, vbl.reply.sequence); |
| weston_output_finish_frame(output_base, &ts, |
| WP_PRESENTATION_FEEDBACK_INVALID); |
| return; |
| } |
| } |
| |
| /* Immediate query didn't provide valid timestamp. |
| * Use pageflip fallback. |
| */ |
| |
| assert(!output->page_flip_pending); |
| assert(!output->state_last); |
| |
| pending_state = drm_pending_state_alloc(backend); |
| drm_output_state_duplicate(output->state_cur, pending_state, |
| DRM_OUTPUT_STATE_PRESERVE_PLANES); |
| |
| ret = drm_pending_state_apply(pending_state); |
| if (ret != 0) { |
| weston_log("applying repaint-start state failed: %m\n"); |
| goto finish_frame; |
| } |
| |
| return; |
| |
| finish_frame: |
| /* if we cannot page-flip, immediately finish frame */ |
| weston_output_finish_frame(output_base, NULL, |
| WP_PRESENTATION_FEEDBACK_INVALID); |
| } |
| |
| static void |
| drm_output_update_msc(struct drm_output *output, unsigned int seq) |
| { |
| uint64_t msc_hi = output->base.msc >> 32; |
| |
| if (seq < (output->base.msc & 0xffffffff)) |
| msc_hi++; |
| |
| output->base.msc = (msc_hi << 32) + seq; |
| } |
| |
| static void |
| vblank_handler(int fd, unsigned int frame, unsigned int sec, unsigned int usec, |
| void *data) |
| { |
| struct drm_plane_state *ps = (struct drm_plane_state *) data; |
| struct drm_output_state *os = ps->output_state; |
| struct drm_output *output = os->output; |
| struct drm_backend *b = to_drm_backend(output->base.compositor); |
| uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION | |
| WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK; |
| |
| assert(!b->atomic_modeset); |
| |
| drm_output_update_msc(output, frame); |
| output->vblank_pending--; |
| assert(output->vblank_pending >= 0); |
| |
| assert(ps->fb); |
| |
| if (output->page_flip_pending || output->vblank_pending) |
| return; |
| |
| drm_output_update_complete(output, flags, sec, usec); |
| } |
| |
| static void |
| page_flip_handler(int fd, unsigned int frame, |
| unsigned int sec, unsigned int usec, void *data) |
| { |
| struct drm_output *output = data; |
| struct drm_backend *b = to_drm_backend(output->base.compositor); |
| uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC | |
| WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION | |
| WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK; |
| |
| drm_output_update_msc(output, frame); |
| |
| assert(!b->atomic_modeset); |
| assert(output->page_flip_pending); |
| output->page_flip_pending = 0; |
| |
| if (output->vblank_pending) |
| return; |
| |
| drm_output_update_complete(output, flags, sec, usec); |
| } |
| |
| /** |
| * Begin a new repaint cycle |
| * |
| * Called by the core compositor at the beginning of a repaint cycle. Creates |
| * a new pending_state structure to own any output state created by individual |
| * output repaint functions until the repaint is flushed or cancelled. |
| */ |
| static void * |
| drm_repaint_begin(struct weston_compositor *compositor) |
| { |
| struct drm_backend *b = to_drm_backend(compositor); |
| struct drm_pending_state *ret; |
| |
| ret = drm_pending_state_alloc(b); |
| b->repaint_data = ret; |
| |
| return ret; |
| } |
| |
| /** |
| * Flush a repaint set |
| * |
| * Called by the core compositor when a repaint cycle has been completed |
| * and should be flushed. Frees the pending state, transitioning ownership |
| * of the output state from the pending state, to the update itself. When |
| * the update completes (see drm_output_update_complete), the output |
| * state will be freed. |
| */ |
| static void |
| drm_repaint_flush(struct weston_compositor *compositor, void *repaint_data) |
| { |
| struct drm_backend *b = to_drm_backend(compositor); |
| struct drm_pending_state *pending_state = repaint_data; |
| |
| drm_pending_state_apply(pending_state); |
| b->repaint_data = NULL; |
| } |
| |
| /** |
| * Cancel a repaint set |
| * |
| * Called by the core compositor when a repaint has finished, so the data |
| * held across the repaint cycle should be discarded. |
| */ |
| static void |
| drm_repaint_cancel(struct weston_compositor *compositor, void *repaint_data) |
| { |
| struct drm_backend *b = to_drm_backend(compositor); |
| struct drm_pending_state *pending_state = repaint_data; |
| |
| drm_pending_state_free(pending_state); |
| b->repaint_data = NULL; |
| } |
| |
| #ifdef HAVE_DRM_ATOMIC |
| static void |
| atomic_flip_handler(int fd, unsigned int frame, unsigned int sec, |
| unsigned int usec, unsigned int crtc_id, void *data) |
| { |
| struct drm_backend *b = data; |
| struct drm_output *output = drm_output_find_by_crtc(b, crtc_id); |
| uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC | |
| WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION | |
| WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK; |
| |
| /* During the initial modeset, we can disable CRTCs which we don't |
| * actually handle during normal operation; this will give us events |
| * for unknown outputs. Ignore them. */ |
| if (!output || !output->base.enabled) |
| return; |
| |
| drm_output_update_msc(output, frame); |
| |
| assert(b->atomic_modeset); |
| assert(output->atomic_complete_pending); |
| output->atomic_complete_pending = 0; |
| |
| drm_output_update_complete(output, flags, sec, usec); |
| } |
| #endif |
| |
| static struct drm_plane_state * |
| drm_output_prepare_overlay_view(struct drm_output_state *output_state, |
| struct weston_view *ev, |
| enum drm_output_propose_state_mode mode) |
| { |
| struct drm_output *output = output_state->output; |
| struct weston_compositor *ec = output->base.compositor; |
| struct drm_backend *b = to_drm_backend(ec); |
| struct drm_plane *p; |
| struct drm_plane_state *state = NULL; |
| struct drm_fb *fb; |
| unsigned int i; |
| int ret; |
| |
| assert(!b->sprites_are_broken); |
| |
| fb = drm_fb_get_from_view(output_state, ev); |
| if (!fb) |
| return NULL; |
| |
| wl_list_for_each(p, &b->plane_list, link) { |
| if (p->type != WDRM_PLANE_TYPE_OVERLAY) |
| continue; |
| |
| if (!drm_plane_is_available(p, output)) |
| continue; |
| |
| /* Check whether the format is supported */ |
| for (i = 0; i < p->count_formats; i++) { |
| unsigned int j; |
| |
| if (p->formats[i].format != fb->format->format) |
| continue; |
| |
| if (fb->modifier == DRM_FORMAT_MOD_INVALID) |
| break; |
| |
| for (j = 0; j < p->formats[i].count_modifiers; j++) { |
| if (p->formats[i].modifiers[j] == fb->modifier) |
| break; |
| } |
| if (j != p->formats[i].count_modifiers) |
| break; |
| } |
| if (i == p->count_formats) |
| continue; |
| |
| state = drm_output_state_get_plane(output_state, p); |
| if (state->fb) { |
| state = NULL; |
| continue; |
| } |
| |
| state->ev = ev; |
| state->output = output; |
| if (!drm_plane_state_coords_for_view(state, ev)) { |
| drm_plane_state_put_back(state); |
| state = NULL; |
| continue; |
| } |
| if (state->src_w != state->dest_w << 16 || |
| state->src_h != state->dest_h << 16) { |
| drm_plane_state_put_back(state); |
| state = NULL; |
| continue; |
| } |
| |
| /* We hold one reference for the lifetime of this function; |
| * from calling drm_fb_get_from_view, to the out label where |
| * we unconditionally drop the reference. So, we take another |
| * reference here to live within the state. */ |
| state->fb = drm_fb_ref(fb); |
| |
| /* In planes-only mode, we don't have an incremental state to |
| * test against, so we just hope it'll work. */ |
| if (mode == DRM_OUTPUT_PROPOSE_STATE_PLANES_ONLY) |
| goto out; |
| |
| ret = drm_pending_state_test(output_state->pending_state); |
| if (ret == 0) |
| goto out; |
| |
| drm_plane_state_put_back(state); |
| state = NULL; |
| } |
| |
| out: |
| drm_fb_unref(fb); |
| return state; |
| } |
| |
| /** |
| * Update the image for the current cursor surface |
| * |
| * @param plane_state DRM cursor plane state |
| * @param ev Source view for cursor |
| */ |
| static void |
| cursor_bo_update(struct drm_plane_state *plane_state, struct weston_view *ev) |
| { |
| struct drm_backend *b = plane_state->plane->backend; |
| struct gbm_bo *bo = plane_state->fb-> |