pax_global_header00006660000000000000000000000064135710135370014517gustar00rootroot0000000000000052 comment=62db782929ccb794feeac0573fc9b0c57cce3eb0 wf-recorder-0.2/000077500000000000000000000000001357101353700135775ustar00rootroot00000000000000wf-recorder-0.2/LICENSE000066400000000000000000000020701357101353700146030ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2019 Ilia Bozhinov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. wf-recorder-0.2/README.md000066400000000000000000000047301357101353700150620ustar00rootroot00000000000000# wf-recorder wf-recorder is a utility program for screen recording of `wlroots`-based compositors (more specifically, those that support `wlr-screencopy-v1` and `xdg-output`). Its dependences are `ffmpeg`, `wayland-client` and `wayland-protocols`. # installation ## archlinux Arch users can use [wf-recorder-git](https://aur.archlinux.org/packages/wf-recorder-git/) from the AUR. ``` yay -S wf-recorder-git ``` ## from source ``` git clone https://github.com/ammen99/wf-recorder.git && cd wf-recorder meson build --prefix=/usr --buildtype=release ninja -C build ``` Optionally configure with `-Ddefault_codec='codec'`. The default is libx264. Now you can just run `./build/wf-recorder` or install it with `sudo ninja -C build install`. Optionally install `scdoc`, a tool by ddevault, for building the manpage. # usage In it's simplest form, run `wf-recorder` to start recording and use Ctrl+C to stop. This will create a file called recording.mp4 in the current working directory using the default codec. Use `-f ` to specify the output file. In case of multiple outputs, you'll first be prompted to select the output you want to record. If you know the output name beforehand, you can use the `-o ` option. To select a specific part of the screen you can either use the `-g `, or use [slurp](https://github.com/emersion/slurp) for interactive selection of the area ``` wf-recorder -g "$(slurp)" ``` to select and limit the recording to a part of the screen. To specify a codec, use the `-c ` option. To modify codec parameters, use `-p =` To set a specific output format use the `--muxer` option. For example, to output to a video4linux2 loopback you might use: ``` wf-recorder --muxer=v4l2 --codec=rawvideo --file=/dev/video2 ``` To use gpu encoding, use a VAAPI codec (for ex. `h264_vaapi`) and specify a GPU device to use with the `-d` option: ``` wf-recorder -f test-vaapi.mkv -c h264_vaapi -d /dev/dri/renderD128 ``` Some drivers report support for rgb0 data for vaapi input but really only support yuv planar formats. In this case, use the `-t` or `--force-yuv` option in addition to the vaapi options to convert the data to yuv planar data before sending it to the gpu. The `-e` option attempts to use OpenCL if wf-recorder was built with OpenCL support and `-t` or `--force-yuv` is specified, even without vaapi gpu encoding. Use `-e#` or `--opencl=#` to use a specific OpenCL device, where `#` is one of the devices listed. wf-recorder-0.2/config.h.in000066400000000000000000000001551357101353700156230ustar00rootroot00000000000000#pragma once #define DEFAULT_CODEC "@default_codec@" #mesondefine HAVE_OPENCL #mesondefine HAVE_LIBAVDEVICE wf-recorder-0.2/manpage/000077500000000000000000000000001357101353700152075ustar00rootroot00000000000000wf-recorder-0.2/manpage/wf-recorder.1.scd000066400000000000000000000043101357101353700202560ustar00rootroot00000000000000wf-recorder(1) # NAME wf-recorder - A simple screen recording program for wlroots-based compositors # SYNOPSIS *wf-recorder* [options...] -f [file] # OPTIONS *-a, --audio [DEVICE]* Starts recording the screen with audio. *[DEVICE]* argument is optional. In case you want to specify the pulseaudio device which will capture the audio, you can run this command with the name of that device. You can find your device by running: *pactl list sinks | grep Name* *-c, --codec* Specifies the codec of the video. Supports GIF output also. To modify codec parameters, use *-p =* *-d, --device* Selects the device to use when encoding the video Some drivers report support for rgb0 data for vaapi input but really only support yuv. Use the -t or --to-yuv option in addition to the vaapi options to convert the data in software, before sending it to the gpu. *-f .ext* By using the -f option the output file will have the name :filename.ext and the file format will be determined by provided while extension .ext . If the extension .ext provided is not recognized by your FFmpeg muxers, the command will fail. You can check the muxers that your FFmpeg installation supports by running : *ffmpeg -muxers* *-m, --muxer* Set the output format to a specific muxer instead of detecting it from the filename. *-x, --pixel-format* Set the output pixel format. These can be found by running : *ffmpeg -pix_fmts* *-g, --geometry* Selects a specific part of the screen. *-h, --help* Prints this help screen. *-l, --log* Generates a log on the current terminal. Debug purposes. *-o, --output* Specify the output where the video is to be recorded. *-p, --codec-param* Change the codec parameters. *-p =* *-t, to-yuv* Use the -t or --to-yuv option in addition to the vaapi options to convert the data in software, before sending it to the gpu. # DESCRIPTION *wf-recorder* is a tool built to screen record your screen on Wayland compositors. It makes use of wlr-screencopy for capturing video and ffmpeg for encoding it. # SEE ALSO *ffmpeg*(1) *pactl*(1) wf-recorder-0.2/meson.build000066400000000000000000000041671357101353700157510ustar00rootroot00000000000000project( 'wf-recorder', 'c', 'cpp', version: '0.2', license: 'MIT', meson_version: '>=0.47.0', default_options: [ 'cpp_std=c++11', 'c_std=c11', 'warning_level=2', 'werror=false', ], ) conf_data = configuration_data() conf_data.set('default_codec', get_option('default_codec')) include_directories(['.']) add_project_arguments(['-Wno-deprecated-declarations'], language: 'cpp') project_sources = ['src/frame-writer.cpp', 'src/main.cpp', 'src/pulse.cpp', 'src/averr.c'] wayland_client = dependency('wayland-client') wayland_protos = dependency('wayland-protocols') opencl = dependency('OpenCL', required : get_option('opencl')) if opencl.found() conf_data.set('HAVE_OPENCL', true) project_sources += 'src/opencl.cpp' endif libavutil = dependency('libavutil') libavcodec = dependency('libavcodec') libavformat = dependency('libavformat') libavdevice = dependency('libavdevice', required: false) sws = dependency('libswscale') swr = dependency('libswresample') threads = dependency('threads') pulse = dependency('libpulse-simple') conf_data.set('HAVE_LIBAVDEVICE', libavdevice.found()) configure_file(input: 'config.h.in', output: 'config.h', configuration: conf_data) scdoc = dependency('scdoc', version: '>=1.9.2', native: true, required: get_option('man-pages')) if scdoc.found() scdoc_bin=find_program(scdoc.get_pkgconfig_variable('scdoc'), native: true) sh = find_program('sh', native: true) mandir=get_option('mandir') filename ='manpage/wf-recorder.1.scd' project_name='wf-recorder' section='1' output= '@0@.@1@'.format(project_name, section) custom_target( project_name, input: filename, output:output, command: [ sh, '-c', '@0@ < @INPUT@ > @1@'.format(scdoc_bin.path(), output) ], install: true, install_dir: '@0@/man@1@'.format(mandir, section) ) endif subdir('proto') dependencies = [ wayland_client, wayland_protos, libavutil, libavcodec, libavformat, libavdevice, wf_protos, sws, threads, pulse, swr, opencl ] executable('wf-recorder', project_sources, dependencies: dependencies, install: true) wf-recorder-0.2/meson_options.txt000066400000000000000000000004371357101353700172400ustar00rootroot00000000000000option('default_codec', type: 'string', value: 'libx264', description: 'Codec that will be used by default') option('man-pages', type: 'feature', value: 'auto', description: 'Generate and install man pages') option('opencl', type: 'feature', value: 'auto', description: 'Enable OpenCL') wf-recorder-0.2/proto/000077500000000000000000000000001357101353700147425ustar00rootroot00000000000000wf-recorder-0.2/proto/meson.build000066400000000000000000000017701357101353700171110ustar00rootroot00000000000000wl_protocol_dir = wayland_protos.get_pkgconfig_variable('pkgdatadir') wayland_scanner = find_program('wayland-scanner') wayland_scanner_code = generator( wayland_scanner, output: '@BASENAME@-protocol.c', arguments: ['private-code', '@INPUT@', '@OUTPUT@'], ) wayland_scanner_client = generator( wayland_scanner, output: '@BASENAME@-client-protocol.h', arguments: ['client-header', '@INPUT@', '@OUTPUT@'], ) client_protocols = [ [wl_protocol_dir, 'unstable/xdg-output/xdg-output-unstable-v1.xml'], 'wlr-screencopy-unstable-v1.xml' ] wl_protos_client_src = [] wl_protos_headers = [] foreach p : client_protocols xml = join_paths(p) wl_protos_client_src += wayland_scanner_code.process(xml) wl_protos_headers += wayland_scanner_client.process(xml) endforeach lib_wl_protos = static_library('wl_protos', wl_protos_client_src + wl_protos_headers, dependencies: [wayland_client]) # for the include directory wf_protos = declare_dependency( link_with: lib_wl_protos, sources: wl_protos_headers, ) wf-recorder-0.2/proto/wlr-screencopy-unstable-v1.xml000066400000000000000000000167511357101353700226110ustar00rootroot00000000000000 Copyright © 2018 Simon Ser Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This protocol allows clients to ask the compositor to copy part of the screen content to a client buffer. Warning! The protocol described in this file is experimental and backward incompatible changes may be made. Backward compatible changes may be added together with the corresponding interface version bump. Backward incompatible changes are done by bumping the version number in the protocol and interface names and resetting the interface version. Once the protocol is to be declared stable, the 'z' prefix and the version number in the protocol and interface names are removed and the interface version number is reset. This object is a manager which offers requests to start capturing from a source. Capture the next frame of an entire output. Capture the next frame of an output's region. The region is given in output logical coordinates, see xdg_output.logical_size. The region will be clipped to the output's extents. All objects created by the manager will still remain valid, until their appropriate destroy request has been called. This object represents a single frame. When created, a "buffer" event will be sent. The client will then be able to send a "copy" request. If the capture is successful, the compositor will send a "flags" followed by a "ready" event. If the capture failed, the "failed" event is sent. This can happen anytime before the "ready" event. Once either a "ready" or a "failed" event is received, the client should destroy the frame. Provides information about the frame's buffer. This event is sent once as soon as the frame is created. The client should then create a buffer with the provided attributes, and send a "copy" request. Copy the frame to the supplied buffer. The buffer must have a the correct size, see zwlr_screencopy_frame_v1.buffer. The buffer needs to have a supported format. If the frame is successfully copied, a "flags" and a "ready" events are sent. Otherwise, a "failed" event is sent. Provides flags about the frame. This event is sent once before the "ready" event. Called as soon as the frame is copied, indicating it is available for reading. This event includes the time at which presentation happened at. The timestamp is expressed as tv_sec_hi, tv_sec_lo, tv_nsec triples, each component being an unsigned 32-bit value. Whole seconds are in tv_sec which is a 64-bit value combined from tv_sec_hi and tv_sec_lo, and the additional fractional part in tv_nsec as nanoseconds. Hence, for valid timestamps tv_nsec must be in [0, 999999999]. The seconds part may have an arbitrary offset at start. After receiving this event, the client should destroy the object. This event indicates that the attempted frame copy has failed. After receiving this event, the client should destroy the object. Destroys the frame. This request can be sent at any time by the client. wf-recorder-0.2/src/000077500000000000000000000000001357101353700143665ustar00rootroot00000000000000wf-recorder-0.2/src/averr.c000066400000000000000000000001171357101353700156500ustar00rootroot00000000000000#include "averr.h" const char* averr(int err) { return av_err2str(err); } wf-recorder-0.2/src/averr.h000066400000000000000000000003221357101353700156530ustar00rootroot00000000000000#include /* the macro av_err2str doesn't work in C++, so we have a wrapper for it here */ #ifdef __cplusplus extern "C" { #endif const char* averr(int err); #ifdef __cplusplus } #endif wf-recorder-0.2/src/frame-writer.cpp000066400000000000000000000436001357101353700175010ustar00rootroot00000000000000// Adapted from https://stackoverflow.com/questions/34511312/how-to-encode-a-video-from-several-images-generated-in-a-c-program-without-wri // (Later) adapted from https://github.com/apc-llc/moviemaker-cpp // // Audio encoding - thanks to wlstream, a lot of the code/ideas are taken from there #include #include "frame-writer.hpp" #include #include #include #include "averr.h" #define FPS 60 #define AUDIO_RATE 44100 class FFmpegInitialize { public : FFmpegInitialize() { // Loads the whole database of available codecs and formats. av_register_all(); } }; static FFmpegInitialize ffmpegInitialize; void FrameWriter::init_hw_accel() { int ret = av_hwdevice_ctx_create(&this->hw_device_context, av_hwdevice_find_type_by_name("vaapi"), params.hw_device.c_str(), NULL, 0); if (ret != 0) { std::cerr << "Failed to create hw encoding device " << params.hw_device << ": " << averr(ret) << std::endl; std::exit(-1); } this->hw_frame_context = av_hwframe_ctx_alloc(hw_device_context); if (!this->hw_frame_context) { std::cerr << "Failed to initialize hw frame context" << std::endl; av_buffer_unref(&hw_device_context); std::exit(-1); } AVHWFramesConstraints *cst; cst = av_hwdevice_get_hwframe_constraints(hw_device_context, NULL); if (!cst) { std::cerr << "Failed to get hwframe constraints" << std::endl; av_buffer_unref(&hw_device_context); std::exit(-1); } AVHWFramesContext *ctx = (AVHWFramesContext*)this->hw_frame_context->data; ctx->width = params.width; ctx->height = params.height; ctx->format = cst->valid_hw_formats[0]; ctx->sw_format = AV_PIX_FMT_NV12; if ((ret = av_hwframe_ctx_init(hw_frame_context))) { std::cerr << "Failed to initialize hwframe context: " << averr(ret) << std::endl; av_buffer_unref(&hw_device_context); av_buffer_unref(&hw_frame_context); std::exit(-1); } } void FrameWriter::load_codec_options(AVDictionary **dict) { static const std::map default_x264_options = { {"tune", "zerolatency"}, {"preset", "ultrafast"}, {"crf", "20"}, }; if (params.codec.find("libx264") != std::string::npos || params.codec.find("libx265") != std::string::npos) { for (const auto& param : default_x264_options) { if (!params.codec_options.count(param.first)) params.codec_options[param.first] = param.second; } } for (auto& opt : params.codec_options) { std::cout << "Setting codec option: " << opt.first << "=" << opt.second << std::endl; av_dict_set(dict, opt.first.c_str(), opt.second.c_str(), 0); } } bool is_fmt_supported(AVPixelFormat fmt, const AVPixelFormat *supported) { for (int i = 0; supported[i] != AV_PIX_FMT_NONE; i++) { if (supported[i] == fmt) return true; } return false; } AVPixelFormat FrameWriter::get_input_format() { return params.format == INPUT_FORMAT_BGR0 ? AV_PIX_FMT_BGR0 : AV_PIX_FMT_RGB0; } AVPixelFormat FrameWriter::lookup_pixel_format(std::string pix_fmt) { AVPixelFormat fmt = av_get_pix_fmt(pix_fmt.c_str()); if (fmt != AV_PIX_FMT_NONE) return fmt; std::cerr << "Failed to find the pixel format: " << pix_fmt << std::endl; std::exit(-1); } AVPixelFormat FrameWriter::choose_sw_format(AVCodec *codec) { auto in_fmt = get_input_format(); if (!params.pix_fmt.empty()) return lookup_pixel_format(params.pix_fmt); /* For codecs such as rawvideo no supported formats are listed */ if (!codec->pix_fmts) return in_fmt; /* If the codec supports getting the appropriate RGB format * directly, we want to use it since we don't have to convert data */ if (is_fmt_supported(in_fmt, codec->pix_fmts)) return in_fmt; /* Otherwise, try to use the already tested YUV420p */ if (is_fmt_supported(AV_PIX_FMT_YUV420P, codec->pix_fmts)) return AV_PIX_FMT_YUV420P; /* Lastly, use the first supported format */ return codec->pix_fmts[0]; } void FrameWriter::init_video_stream() { AVDictionary *options = NULL; load_codec_options(&options); AVCodec* codec = avcodec_find_encoder_by_name(params.codec.c_str()); if (!codec) { std::cerr << "Failed to find the given codec: " << params.codec << std::endl; std::exit(-1); } videoStream = avformat_new_stream(fmtCtx, codec); if (!videoStream) { std::cerr << "Failed to open stream" << std::endl; std::exit(-1); } videoCodecCtx = videoStream->codec; videoCodecCtx->width = params.width; videoCodecCtx->height = params.height; videoCodecCtx->time_base = (AVRational){ 1, FPS }; if (params.bframes != -1) videoCodecCtx->max_b_frames = params.bframes; if (params.codec.find("vaapi") != std::string::npos) { videoCodecCtx->pix_fmt = AV_PIX_FMT_VAAPI; init_hw_accel(); videoCodecCtx->hw_frames_ctx = av_buffer_ref(hw_frame_context); if (params.force_yuv) init_sws(AV_PIX_FMT_YUV420P); } else { videoCodecCtx->pix_fmt = choose_sw_format(codec); std::cout << "Choosing pixel format " << av_get_pix_fmt_name(videoCodecCtx->pix_fmt) << std::endl; init_sws(videoCodecCtx->pix_fmt); } if (fmtCtx->oformat->flags & AVFMT_GLOBALHEADER) videoCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; int ret; char err[256]; if ((ret = avcodec_open2(videoCodecCtx, codec, &options)) < 0) { av_strerror(ret, err, 256); std::cerr << "avcodec_open2 failed: " << err << std::endl; std::exit(-1); } av_dict_free(&options); } static uint64_t get_codec_channel_layout(AVCodec *codec) { int i = 0; if (!codec->channel_layouts) return AV_CH_LAYOUT_STEREO; while (1) { if (!codec->channel_layouts[i]) break; if (codec->channel_layouts[i] == AV_CH_LAYOUT_STEREO) return codec->channel_layouts[i]; i++; } return codec->channel_layouts[0]; } static enum AVSampleFormat get_codec_sample_fmt(AVCodec *codec) { int i = 0; if (!codec->sample_fmts) return AV_SAMPLE_FMT_S16; while (1) { if (codec->sample_fmts[i] == -1) break; if (av_get_bytes_per_sample(codec->sample_fmts[i]) >= 2) return codec->sample_fmts[i]; i++; } return codec->sample_fmts[0]; } void FrameWriter::init_audio_stream() { AVCodec* codec = avcodec_find_encoder_by_name("aac"); if (!codec) { std::cerr << "Failed to find the aac codec" << std::endl; std::exit(-1); } audioStream = avformat_new_stream(fmtCtx, codec); if (!audioStream) { std::cerr << "Failed to open audio stream" << std::endl; std::exit(-1); } audioCodecCtx = audioStream->codec; audioCodecCtx->bit_rate = lrintf(128000.0f); audioCodecCtx->sample_fmt = get_codec_sample_fmt(codec); audioCodecCtx->channel_layout = get_codec_channel_layout(codec); audioCodecCtx->sample_rate = AUDIO_RATE; audioCodecCtx->time_base = (AVRational) { 1, 1000 }; audioCodecCtx->channels = av_get_channel_layout_nb_channels(audioCodecCtx->channel_layout); if (fmtCtx->oformat->flags & AVFMT_GLOBALHEADER) audioCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; int err; if ((err = avcodec_open2(audioCodecCtx, codec, NULL)) < 0) { std::cerr << "(audio) avcodec_open2 failed " << err << std::endl; std::exit(-1); } swrCtx = swr_alloc(); if (!swrCtx) { std::cerr << "Failed to allocate swr context" << std::endl; std::exit(-1); } av_opt_set_int(swrCtx, "in_sample_rate", AUDIO_RATE, 0); av_opt_set_int(swrCtx, "out_sample_rate", audioCodecCtx->sample_rate, 0); av_opt_set_sample_fmt(swrCtx, "in_sample_fmt", AV_SAMPLE_FMT_FLT, 0); av_opt_set_sample_fmt(swrCtx, "out_sample_fmt", audioCodecCtx->sample_fmt, 0); av_opt_set_channel_layout(swrCtx, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_channel_layout(swrCtx, "out_channel_layout", audioCodecCtx->channel_layout, 0); if (swr_init(swrCtx)) { std::cerr << "Failed to initialize swr" << std::endl; std::exit(-1); } } void FrameWriter::init_codecs() { init_video_stream(); if (params.enable_audio) init_audio_stream(); av_dump_format(fmtCtx, 0, params.file.c_str(), 1); if (avio_open(&fmtCtx->pb, params.file.c_str(), AVIO_FLAG_WRITE)) { std::cerr << "avio_open failed" << std::endl; std::exit(-1); } AVDictionary *dummy = NULL; char err[256]; int ret; if ((ret = avformat_write_header(fmtCtx, &dummy)) != 0) { std::cerr << "Failed to write file header" << std::endl; av_strerror(ret, err, 256); std::cerr << err << std::endl; std::exit(-1); } av_dict_free(&dummy); } void FrameWriter::init_sws(AVPixelFormat format) { swsCtx = sws_getContext(params.width, params.height, get_input_format(), params.width, params.height, format, SWS_FAST_BILINEAR, NULL, NULL, NULL); if (!swsCtx) { std::cerr << "Failed to create sws context" << std::endl; std::exit(-1); } } static const char* determine_output_format(const FrameWriterParams& params) { if (!params.muxer.empty()) return params.muxer.c_str(); if (params.file.find("rtmp") == 0) return "flv"; if (params.file.find("udp") == 0) return "mpegts"; return NULL; } FrameWriter::FrameWriter(const FrameWriterParams& _params) : params(_params) { if (params.enable_ffmpeg_debug_output) av_log_set_level(AV_LOG_DEBUG); #ifdef HAVE_LIBAVDEVICE avdevice_register_all(); #endif // Preparing the data concerning the format and codec, // in order to write properly the header, frame data and end of file. this->outputFmt = av_guess_format(NULL, params.file.c_str(), NULL); auto streamFormat = determine_output_format(params); auto context_ret = avformat_alloc_output_context2(&this->fmtCtx, NULL, streamFormat, params.file.c_str()); if (context_ret < 0) { std::cerr << "Failed to allocate output context" << std::endl; std::exit(-1); } #ifndef HAVE_OPENCL if (params.opencl) std::cerr << "This version of wf-recorder was built without OpenCL support. Ignoring OpenCL option." << std::endl; #endif init_codecs(); encoder_frame = av_frame_alloc(); if (hw_device_context) { encoder_frame->format = params.force_yuv ? AV_PIX_FMT_YUV420P : get_input_format(); } else { encoder_frame->format = videoCodecCtx->pix_fmt; } encoder_frame->width = params.width; encoder_frame->height = params.height; if (av_frame_get_buffer(encoder_frame, 1)) { std::cerr << "Failed to allocate frame buffer" << std::endl; std::exit(-1); } if (hw_device_context) { hw_frame = av_frame_alloc(); AVHWFramesContext *frctx = (AVHWFramesContext*)hw_frame_context->data; hw_frame->format = frctx->format; hw_frame->hw_frames_ctx = av_buffer_ref(hw_frame_context); hw_frame->width = params.width; hw_frame->height = params.height; if (av_hwframe_get_buffer(hw_frame_context, hw_frame, 0)) { std::cerr << "failed to hw frame buffer" << std::endl; std::exit(-1); } } } void FrameWriter::convert_pixels_to_yuv(const uint8_t *pixels, const uint8_t *formatted_pixels, int stride[]) { bool y_invert = (pixels != formatted_pixels); bool converted_with_opencl = false; #ifdef HAVE_OPENCL if (params.opencl && params.force_yuv) { int r = opencl->do_frame(pixels, encoder_frame, get_input_format(), y_invert); converted_with_opencl = (r == 0); } #else /* Silence compiler warning when opencl is disabled */ (void)(y_invert); #endif if (!converted_with_opencl) { sws_scale(swsCtx, &formatted_pixels, stride, 0, params.height, encoder_frame->data, encoder_frame->linesize); } } void FrameWriter::encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt) { int ret; /* send the frame to the encoder */ ret = avcodec_send_frame(enc_ctx, frame); if (ret < 0) { fprintf(stderr, "error sending a frame for encoding\n"); return; } while (ret >= 0) { ret = avcodec_receive_packet(enc_ctx, pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { return; } if (ret < 0) { fprintf(stderr, "error during encoding\n"); return; } finish_frame(enc_ctx, *pkt); } } void FrameWriter::add_frame(const uint8_t* pixels, int64_t usec, bool y_invert) { /* Calculate data after y-inversion */ int stride[] = {int(4 * params.width)}; const uint8_t *formatted_pixels = pixels; if (y_invert) { formatted_pixels += stride[0] * (params.height - 1); stride[0] *= -1; } AVFrame **output_frame; AVBufferRef *saved_buf0 = NULL; if (hw_device_context) { if (params.force_yuv) { convert_pixels_to_yuv(pixels, formatted_pixels, stride); } else { encoder_frame->data[0] = (uint8_t*) formatted_pixels; encoder_frame->linesize[0] = stride[0]; } if (av_hwframe_transfer_data(hw_frame, encoder_frame, 0)) { std::cerr << "Failed to upload data to the gpu!" << std::endl; return; } output_frame = &hw_frame; } else if(get_input_format() == videoCodecCtx->pix_fmt) { output_frame = &encoder_frame; encoder_frame->data[0] = (uint8_t*)formatted_pixels; encoder_frame->linesize[0] = stride[0]; /* Force ffmpeg to create a copy of the frame, if the codec needs it */ saved_buf0 = encoder_frame->buf[0]; encoder_frame->buf[0] = NULL; } else { convert_pixels_to_yuv(pixels, formatted_pixels, stride); /* Force ffmpeg to create a copy of the frame, if the codec needs it */ saved_buf0 = encoder_frame->buf[0]; encoder_frame->buf[0] = NULL; output_frame = &encoder_frame; } (*output_frame)->pts = usec; AVPacket pkt; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; encode(videoCodecCtx, *output_frame, &pkt); /* Restore frame buffer, so that it can be properly freed in the end */ if (saved_buf0) encoder_frame->buf[0] = saved_buf0; } #define SRC_RATE 1e6 #define DST_RATE 1e3 static int64_t conv_audio_pts(SwrContext *ctx, int64_t in) { int64_t d = (int64_t) AUDIO_RATE * AUDIO_RATE; /* Convert from audio_src_tb to 1/(src_samplerate * dst_samplerate) */ in = av_rescale_rnd(in, d, SRC_RATE, AV_ROUND_NEAR_INF); /* In units of 1/(src_samplerate * dst_samplerate) */ in = swr_next_pts(ctx, in); /* Convert from 1/(src_samplerate * dst_samplerate) to audio_dst_tb */ return av_rescale_rnd(in, DST_RATE, d, AV_ROUND_NEAR_INF); } void FrameWriter::send_audio_pkt(AVFrame *frame) { AVPacket pkt; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; encode(audioCodecCtx, frame, &pkt); } size_t FrameWriter::get_audio_buffer_size() { return audioCodecCtx->frame_size << 3; } void FrameWriter::add_audio(const void* buffer) { AVFrame *inputf = av_frame_alloc(); inputf->sample_rate = AUDIO_RATE; inputf->format = AV_SAMPLE_FMT_FLT; inputf->channel_layout = AV_CH_LAYOUT_STEREO; inputf->nb_samples = audioCodecCtx->frame_size; av_frame_get_buffer(inputf, 0); memcpy(inputf->data[0], buffer, get_audio_buffer_size()); AVFrame *outputf = av_frame_alloc(); outputf->format = audioCodecCtx->sample_fmt; outputf->sample_rate = audioCodecCtx->sample_rate; outputf->channel_layout = audioCodecCtx->channel_layout; outputf->nb_samples = audioCodecCtx->frame_size; av_frame_get_buffer(outputf, 0); outputf->pts = conv_audio_pts(swrCtx, INT64_MIN); swr_convert_frame(swrCtx, outputf, inputf); send_audio_pkt(outputf); av_frame_free(&inputf); av_frame_free(&outputf); } void FrameWriter::finish_frame(AVCodecContext *enc_ctx, AVPacket& pkt) { static std::mutex fmt_mutex, pending_mutex; if (enc_ctx == videoCodecCtx) { av_packet_rescale_ts(&pkt, (AVRational){ 1, 1000000 }, videoStream->time_base); pkt.stream_index = videoStream->index; } else { av_packet_rescale_ts(&pkt, (AVRational){ 1, 1000 }, audioStream->time_base); pkt.stream_index = audioStream->index; } /* We use two locks to ensure that if WLOG the audio thread is waiting for * the video one, when the video becomes ready the audio thread will be the * next one to obtain the lock */ if (params.enable_audio) { pending_mutex.lock(); fmt_mutex.lock(); pending_mutex.unlock(); } av_interleaved_write_frame(fmtCtx, &pkt); av_packet_unref(&pkt); if (params.enable_audio) fmt_mutex.unlock(); } FrameWriter::~FrameWriter() { // Writing the delayed frames: AVPacket pkt; av_init_packet(&pkt); encode(videoCodecCtx, NULL, &pkt); if (params.enable_audio) { encode(audioCodecCtx, NULL, &pkt); } // Writing the end of the file. av_write_trailer(fmtCtx); // Closing the file. if (outputFmt && (!(outputFmt->flags & AVFMT_NOFILE))) avio_closep(&fmtCtx->pb); avcodec_close(videoStream->codec); // Freeing all the allocated memory: sws_freeContext(swsCtx); av_frame_free(&encoder_frame); if (params.enable_audio) avcodec_close(audioStream->codec); // TODO: free all the hw accel avformat_free_context(fmtCtx); } wf-recorder-0.2/src/frame-writer.hpp000066400000000000000000000064041357101353700175070ustar00rootroot00000000000000// Adapted from https://stackoverflow.com/questions/34511312/how-to-encode-a-video-from-several-images-generated-in-a-c-program-without-wri // (Later) adapted from https://github.com/apc-llc/moviemaker-cpp #ifndef FRAME_WRITER #define FRAME_WRITER #include #include #include #include #include "config.h" #define AUDIO_RATE 44100 extern "C" { #include #include #include #ifdef HAVE_LIBAVDEVICE #include #endif #include #include #include #include #include } #include "config.h" #ifdef HAVE_OPENCL #include #include "opencl.hpp" class OpenCL; #endif enum InputFormat { INPUT_FORMAT_BGR0, INPUT_FORMAT_RGB0 }; struct FrameWriterParams { std::string file; int width; int height; InputFormat format; std::string codec; std::string muxer; std::string pix_fmt; std::string hw_device; // used only if codec contains vaapi std::map codec_options; int64_t audio_sync_offset; bool enable_audio; bool enable_ffmpeg_debug_output; bool opencl; bool force_yuv; int opencl_device; int bframes; }; class FrameWriter { FrameWriterParams params; void load_codec_options(AVDictionary **dict); SwsContext* swsCtx; AVOutputFormat* outputFmt; AVStream* videoStream; AVCodecContext* videoCodecCtx; AVFormatContext* fmtCtx; AVBufferRef *hw_device_context = NULL; AVBufferRef *hw_frame_context = NULL; AVPixelFormat lookup_pixel_format(std::string pix_fmt); AVPixelFormat choose_sw_format(AVCodec *codec); AVPixelFormat get_input_format(); void init_hw_accel(); void init_sws(AVPixelFormat format); void init_codecs(); void init_video_stream(); AVFrame *encoder_frame = NULL; AVFrame *hw_frame = NULL; /** * Convert the given pixels to YUV and store in encoder_frame. * Calls OpenCL if it is enabled. * * @param formatted_pixels contains the same data as pixels but y-inverted * if the input format requires y-inversion. */ void convert_pixels_to_yuv(const uint8_t *pixels, const uint8_t *formatted_pixels, int stride[]); void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt); SwrContext *swrCtx; AVStream *audioStream; AVCodecContext *audioCodecCtx; void init_swr(); void init_audio_stream(); void send_audio_pkt(AVFrame *frame); void finish_frame(AVCodecContext *enc_ctx, AVPacket& pkt); public : FrameWriter(const FrameWriterParams& params); void add_frame(const uint8_t* pixels, int64_t usec, bool y_invert); /* Buffer must have size get_audio_buffer_size() */ void add_audio(const void* buffer); size_t get_audio_buffer_size(); #ifdef HAVE_OPENCL std::unique_ptr opencl; #endif ~FrameWriter(); }; #include #include #include extern std::mutex frame_writer_mutex, frame_writer_pending_mutex; extern std::unique_ptr frame_writer; extern std::atomic exit_main_loop; #endif // FRAME_WRITER wf-recorder-0.2/src/main.cpp000066400000000000000000000623251357101353700160260ustar00rootroot00000000000000#define _XOPEN_SOURCE 700 #define _POSIX_C_SOURCE 199309L #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "frame-writer.hpp" #include "pulse.hpp" #include "wlr-screencopy-unstable-v1-client-protocol.h" #include "xdg-output-unstable-v1-client-protocol.h" #include "config.h" #ifdef HAVE_OPENCL std::unique_ptr opencl; #endif std::mutex frame_writer_mutex, frame_writer_pending_mutex; std::unique_ptr frame_writer; static struct wl_shm *shm = NULL; static struct zxdg_output_manager_v1 *xdg_output_manager = NULL; static struct zwlr_screencopy_manager_v1 *screencopy_manager = NULL; struct wf_recorder_output { wl_output *output; zxdg_output_v1 *zxdg_output; std::string name, description; int32_t x, y, width, height; }; std::vector available_outputs; static void handle_xdg_output_logical_position(void*, zxdg_output_v1* zxdg_output, int32_t x, int32_t y) { for (auto& wo : available_outputs) { if (wo.zxdg_output == zxdg_output) { wo.x = x; wo.y = y; } } } static void handle_xdg_output_logical_size(void*, zxdg_output_v1* zxdg_output, int32_t w, int32_t h) { for (auto& wo : available_outputs) { if (wo.zxdg_output == zxdg_output) { wo.width = w; wo.height = h; } } } static void handle_xdg_output_done(void*, zxdg_output_v1*) { } static void handle_xdg_output_name(void*, zxdg_output_v1 *zxdg_output_v1, const char *name) { for (auto& wo : available_outputs) { if (wo.zxdg_output == zxdg_output_v1) wo.name = name; } } static void handle_xdg_output_description(void*, zxdg_output_v1 *zxdg_output_v1, const char *description) { for (auto& wo : available_outputs) { if (wo.zxdg_output == zxdg_output_v1) wo.description = description; } } const zxdg_output_v1_listener xdg_output_implementation = { .logical_position = handle_xdg_output_logical_position, .logical_size = handle_xdg_output_logical_size, .done = handle_xdg_output_done, .name = handle_xdg_output_name, .description = handle_xdg_output_description }; struct wf_buffer { struct wl_buffer *wl_buffer; void *data; enum wl_shm_format format; int width, height, stride; bool y_invert; timespec presented; uint32_t base_usec; std::atomic released{true}; // if the buffer can be used to store new pending frames std::atomic available{false}; // if the buffer can be used to feed the encoder }; std::atomic exit_main_loop{false}; #define MAX_BUFFERS 16 wf_buffer buffers[MAX_BUFFERS]; size_t active_buffer = 0; bool buffer_copy_done = false; static int backingfile(off_t size) { char name[] = "/tmp/wf-recorder-shared-XXXXXX"; int fd = mkstemp(name); if (fd < 0) { return -1; } int ret; while ((ret = ftruncate(fd, size)) == EINTR) { // No-op } if (ret < 0) { close(fd); return -1; } unlink(name); return fd; } static struct wl_buffer *create_shm_buffer(uint32_t fmt, int width, int height, int stride, void **data_out) { int size = stride * height; int fd = backingfile(size); if (fd < 0) { fprintf(stderr, "creating a buffer file for %d B failed: %m\n", size); return NULL; } void *data = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (data == MAP_FAILED) { fprintf(stderr, "mmap failed: %m\n"); close(fd); return NULL; } struct wl_shm_pool *pool = wl_shm_create_pool(shm, fd, size); close(fd); struct wl_buffer *buffer = wl_shm_pool_create_buffer(pool, 0, width, height, stride, fmt); wl_shm_pool_destroy(pool); *data_out = data; return buffer; } static void frame_handle_buffer(void *, struct zwlr_screencopy_frame_v1 *frame, uint32_t format, uint32_t width, uint32_t height, uint32_t stride) { auto& buffer = buffers[active_buffer]; buffer.format = (wl_shm_format)format; buffer.width = width; buffer.height = height; buffer.stride = stride; if (!buffer.wl_buffer) { buffer.wl_buffer = create_shm_buffer(format, width, height, stride, &buffer.data); } if (buffer.wl_buffer == NULL) { fprintf(stderr, "failed to create buffer\n"); exit(EXIT_FAILURE); } zwlr_screencopy_frame_v1_copy(frame, buffer.wl_buffer); } static void frame_handle_flags(void*, struct zwlr_screencopy_frame_v1 *, uint32_t flags) { buffers[active_buffer].y_invert = flags & ZWLR_SCREENCOPY_FRAME_V1_FLAGS_Y_INVERT; } static void frame_handle_ready(void *, struct zwlr_screencopy_frame_v1 *, uint32_t tv_sec_hi, uint32_t tv_sec_low, uint32_t tv_nsec) { auto& buffer = buffers[active_buffer]; buffer_copy_done = true; buffer.presented.tv_sec = ((1ll * tv_sec_hi) << 32ll) | tv_sec_low; buffer.presented.tv_nsec = tv_nsec; } static void frame_handle_failed(void *, struct zwlr_screencopy_frame_v1 *) { fprintf(stderr, "failed to copy frame\n"); exit_main_loop = true; } static const struct zwlr_screencopy_frame_v1_listener frame_listener = { .buffer = frame_handle_buffer, .flags = frame_handle_flags, .ready = frame_handle_ready, .failed = frame_handle_failed, }; static void handle_global(void*, struct wl_registry *registry, uint32_t name, const char *interface, uint32_t) { if (strcmp(interface, wl_output_interface.name) == 0) { auto output = (wl_output*)wl_registry_bind(registry, name, &wl_output_interface, 1); wf_recorder_output wro; wro.output = output; available_outputs.push_back(wro); } else if (strcmp(interface, wl_shm_interface.name) == 0) { shm = (wl_shm*) wl_registry_bind(registry, name, &wl_shm_interface, 1); } else if (strcmp(interface, zwlr_screencopy_manager_v1_interface.name) == 0) { screencopy_manager = (zwlr_screencopy_manager_v1*) wl_registry_bind(registry, name, &zwlr_screencopy_manager_v1_interface, 1); } else if (strcmp(interface, zxdg_output_manager_v1_interface.name) == 0) { xdg_output_manager = (zxdg_output_manager_v1*) wl_registry_bind(registry, name, &zxdg_output_manager_v1_interface, 2); // version 2 for name & description, if available } } static void handle_global_remove(void*, struct wl_registry *, uint32_t) { // Who cares? } static const struct wl_registry_listener registry_listener = { .global = handle_global, .global_remove = handle_global_remove, }; static uint64_t timespec_to_usec (const timespec& ts) { return ts.tv_sec * 1000000ll + 1ll * ts.tv_nsec / 1000ll; } static int next_frame(int frame) { return (frame + 1) % MAX_BUFFERS; } static InputFormat get_input_format(wf_buffer& buffer) { if (buffer.format == WL_SHM_FORMAT_ARGB8888) return INPUT_FORMAT_BGR0; if (buffer.format == WL_SHM_FORMAT_XRGB8888) return INPUT_FORMAT_BGR0; if (buffer.format == WL_SHM_FORMAT_XBGR8888) return INPUT_FORMAT_RGB0; if (buffer.format == WL_SHM_FORMAT_ABGR8888) return INPUT_FORMAT_RGB0; fprintf(stderr, "Unsupported buffer format %d, exiting.", buffer.format); std::exit(0); } static void write_loop(FrameWriterParams params, PulseReaderParams pulseParams) { /* Ignore SIGINT, main loop is responsible for the exit_main_loop signal */ sigset_t sigset; sigemptyset(&sigset); sigaddset(&sigset, SIGINT); pthread_sigmask(SIG_BLOCK, &sigset, NULL); int last_encoded_frame = 0; std::unique_ptr pr; while(!exit_main_loop) { // wait for frame to become available while(buffers[last_encoded_frame].available != true) { std::this_thread::sleep_for(std::chrono::microseconds(1000)); } auto& buffer = buffers[last_encoded_frame]; frame_writer_pending_mutex.lock(); frame_writer_mutex.lock(); frame_writer_pending_mutex.unlock(); if (!frame_writer) { /* This is the first time buffer attributes are available */ params.format = get_input_format(buffer); params.width = buffer.width; params.height = buffer.height; frame_writer = std::unique_ptr (new FrameWriter(params)); #ifdef HAVE_OPENCL if (params.opencl && params.force_yuv) { frame_writer->opencl = std::move(opencl); frame_writer->opencl->init(params.width, params.height); } #endif if (params.enable_audio) { pulseParams.audio_frame_size = frame_writer->get_audio_buffer_size(); pr = std::unique_ptr (new PulseReader(pulseParams)); pr->start(); } } frame_writer->add_frame((unsigned char*)buffer.data, buffer.base_usec, buffer.y_invert); frame_writer_mutex.unlock(); buffer.available = false; buffer.released = true; last_encoded_frame = next_frame(last_encoded_frame); } std::lock_guard lock(frame_writer_mutex); /* Free the PulseReader connection first. This way it'd flush any remaining * frames to the FrameWriter */ pr = nullptr; frame_writer = nullptr; } void handle_sigint(int) { exit_main_loop = true; } static void check_has_protos() { if (shm == NULL) { fprintf(stderr, "compositor is missing wl_shm\n"); exit(EXIT_FAILURE); } if (screencopy_manager == NULL) { fprintf(stderr, "compositor doesn't support wlr-screencopy-unstable-v1\n"); exit(EXIT_FAILURE); } if (xdg_output_manager == NULL) { fprintf(stderr, "compositor doesn't support xdg-output-unstable-v1\n"); exit(EXIT_FAILURE); } if (available_outputs.empty()) { fprintf(stderr, "no outputs available\n"); exit(EXIT_FAILURE); } } wl_display *display = NULL; static void sync_wayland() { wl_display_dispatch(display); wl_display_roundtrip(display); } static void load_output_info() { for (auto& wo : available_outputs) { wo.zxdg_output = zxdg_output_manager_v1_get_xdg_output( xdg_output_manager, wo.output); zxdg_output_v1_add_listener(wo.zxdg_output, &xdg_output_implementation, NULL); } sync_wayland(); } static wf_recorder_output* choose_interactive() { fprintf(stdout, "Please select an output from the list to capture (enter output no.):\n"); int i = 1; for (auto& wo : available_outputs) { printf("%d. Name: %s Description: %s\n", i++, wo.name.c_str(), wo.description.c_str()); } printf("Enter output no.:"); fflush(stdout); int choice; if (scanf("%d", &choice) != 1 || choice > (int)available_outputs.size() || choice <= 0) return nullptr; return &available_outputs[choice - 1]; } struct capture_region { int32_t x, y; int32_t width, height; capture_region() : capture_region(0, 0, 0, 0) {} capture_region(int32_t _x, int32_t _y, int32_t _width, int32_t _height) : x(_x), y(_y), width(_width), height(_height) { } /* Make sure that dimension is even, while trying to keep the segment * [coordinate, coordinate+dimension) as good as possible (i.e not going * out of the monitor) */ void make_even(int32_t& coordinate, int32_t& dimension) { if (dimension % 2 == 0) return; /* We need to increase dimension to make it an even number */ ++dimension; /* Try to decrease coordinate. If coordinate > 0, we can always lower it * by 1 pixel and stay inside the screen. */ coordinate = std::max(coordinate - 1, 0); } void set_from_string(std::string geometry_string) { if (sscanf(geometry_string.c_str(), "%d,%d %dx%d", &x, &y, &width, &height) != 4) { fprintf(stderr, "Bad geometry: %s, capturing whole output instead.\n", geometry_string.c_str()); x = y = width = height = 0; return; } /* ffmpeg requires even width and height */ make_even(x, width); make_even(y, height); printf("Adjusted geometry: %d,%d %dx%d\n", x, y, width, height); } bool is_selected() { return width > 0 && height > 0; } bool contained_in(const capture_region& output) const { return output.x <= x && output.x + output.width >= x + width && output.y <= y && output.y + output.height >= y + height; } }; static wf_recorder_output* detect_output_from_region(const capture_region& region) { for (auto& wo : available_outputs) { const capture_region output_region{wo.x, wo.y, wo.width, wo.height}; if (region.contained_in(output_region)) { std::cout << "Detected output based on geometry: " << wo.name << std::endl; return &wo; } } std::cerr << "Failed to detect output based on geometry (is your geometry overlapping outputs?)" << std::endl; return nullptr; } static void help() { printf(R"(Usage: wf-recorder [OPTION]... -f [FILE]... Screen recording of wlroots-based compositors With no FILE, start recording the current screen. -a, --audio [DEVICE] Starts recording the screen with audio. [DEVICE] argument is optional. In case you want to specify the pulseaudio device which will capture the audio, you can run this command with the name of that device. You can find your device by running: pactl list sinks | grep Name -c, --codec Specifies the codec of the video. Supports GIF output also. To modify codec parameters, use -p = -d, --device Selects the device to use when encoding the video Some drivers report support for rgb0 data for vaapi input but really only support yuv. -f .ext By using the -f option the output file will have the name : filename.ext and the file format will be determined by provided while extension .ext . If the extension .ext provided is not recognized by your FFmpeg muxers, the command will fail. You can check the muxers that your FFmpeg installation supports by running : ffmpeg -muxers -m, --muxer Set the output format to a specific muxer instead of detecting it from the filename. -x, --pixel-format Set the output pixel format. These can be found by running: *ffmpeg -pix_fmts* -g, --geometry Selects a specific part of the screen. -h, --help Prints this help screen. -l, --log Generates a log on the current terminal. Debug purposes. -o, --output Specify the output where the video is to be recorded. -p, --codec-param Change the codec parameters. -p =)"); #ifdef HAVE_OPENCL printf(R"( -e, --opencl Use the -e[#] or --opencl[=#] in conjunction with -t or --force-yuv option to use opencl for gpu accelerated conversion of data to yuv. # is one of the devices listed when running without specifying #.)"); #endif printf(R"( -t, --force-yuv Use the -t or --force-yuv option to force conversion of the data to yuv format, before sending it to the gpu. -b, --bframes This option is used to set the maximum number of b-frames to be used. If b-frames are not supported by your hardware, set this to 0.)" "\n\n" R"( Examples: Video Only: - wf-recorder Records the video. Use Ctrl+C to stop recording. The video file will be stored as recording.mp4 in the current working directory. - wf-recorder -f .ext Records the video. Use Ctrl+C to stop recording. The video file will be stored as .ext in the current working directory. Video and Audio: - wf-recorder -a Records the audio. Use Ctrl+C to stop recording. The video file will be stored as recording.mp4 in the current working directory. - wf-recorder -a -f .ext Records the audio. Use Ctrl+C to stop recording. The video file will be stored as .ext in the current working directory. )" "\n"); exit(EXIT_SUCCESS); } int main(int argc, char *argv[]) { FrameWriterParams params; params.file = "recording.mp4"; params.codec = DEFAULT_CODEC; params.enable_ffmpeg_debug_output = false; params.enable_audio = false; params.force_yuv = false; params.opencl = false; params.opencl_device = -1; params.bframes = -1; PulseReaderParams pulseParams; constexpr const char* default_cmdline_output = "interactive"; std::string cmdline_output = default_cmdline_output; capture_region selected_region{}; struct option opts[] = { { "output", required_argument, NULL, 'o' }, { "file", required_argument, NULL, 'f' }, { "muxer", required_argument, NULL, 'm' }, { "pixel-format", required_argument, NULL, 'x' }, { "geometry", required_argument, NULL, 'g' }, { "codec", required_argument, NULL, 'c' }, { "codec-param", required_argument, NULL, 'p' }, { "device", required_argument, NULL, 'd' }, { "log", no_argument, NULL, 'l' }, { "audio", optional_argument, NULL, 'a' }, { "help", no_argument, NULL, 'h' }, { "force-yuv", no_argument, NULL, 't' }, { "opencl", optional_argument, NULL, 'e' }, { "bframes", optional_argument, NULL, 'b' }, { 0, 0, NULL, 0 } }; int c, i; std::string param; size_t pos; while((c = getopt_long(argc, argv, "o:f:m:x:g:c:p:d:b:la::te::h", opts, &i)) != -1) { switch(c) { case 'f': params.file = optarg; break; case 'o': cmdline_output = optarg; break; case 'm': params.muxer = optarg; break; case 'x': params.pix_fmt = optarg; break; case 'g': selected_region.set_from_string(optarg); break; case 'c': params.codec = optarg; break; case 'd': params.hw_device = optarg; break; case 'b': params.bframes = optarg ? atoi(optarg) : -1; break; case 'l': params.enable_ffmpeg_debug_output = true; break; case 'a': params.enable_audio = true; pulseParams.audio_source = optarg ? strdup(optarg) : NULL; break; case 't': params.force_yuv = true; break; case 'h': help(); break; case 'e': params.opencl = true; params.opencl_device = optarg ? atoi(optarg) : -1; break; case 'p': param = optarg; pos = param.find("="); if (pos != std::string::npos && pos != param.length() - 1) { auto optname = param.substr(0, pos); auto optvalue = param.substr(pos + 1, param.length() - pos - 1); params.codec_options[optname] = optvalue; } else { printf("Invalid codec option %s\n", optarg); } break; default: printf("Unsupported command line argument %s\n", optarg); } } display = wl_display_connect(NULL); if (display == NULL) { fprintf(stderr, "failed to create display: %m\n"); return EXIT_FAILURE; } struct wl_registry *registry = wl_display_get_registry(display); wl_registry_add_listener(registry, ®istry_listener, NULL); sync_wayland(); check_has_protos(); load_output_info(); wf_recorder_output *chosen_output = nullptr; if (available_outputs.size() == 1) { chosen_output = &available_outputs[0]; if (chosen_output->name != cmdline_output && cmdline_output != default_cmdline_output) { std::cerr << "Couldn't find requested output " << cmdline_output << std::endl; return EXIT_FAILURE; } } else { for (auto& wo : available_outputs) { if (wo.name == cmdline_output) chosen_output = &wo; } if (chosen_output == NULL) { if (cmdline_output != default_cmdline_output) { std::cerr << "Couldn't find requested output " << cmdline_output.c_str() << std::endl; return EXIT_FAILURE; } if (selected_region.is_selected()) { chosen_output = detect_output_from_region(selected_region); } else { chosen_output = choose_interactive(); } } } if (chosen_output == nullptr) { fprintf(stderr, "Failed to select output, exiting\n"); return EXIT_FAILURE; } if (selected_region.is_selected()) { if (!selected_region.contained_in({chosen_output->x, chosen_output->y, chosen_output->width, chosen_output->height})) { fprintf(stderr, "Invalid region to capture: must be completely " "inside the output\n"); selected_region = capture_region{}; } } printf("selected region %d %d %d %d\n", selected_region.x, selected_region.y, selected_region.width, selected_region.height); #ifdef HAVE_OPENCL if (params.opencl && params.force_yuv) opencl = std::unique_ptr (new OpenCL(params.opencl_device)); #endif timespec first_frame; first_frame.tv_sec = -1; active_buffer = 0; for (auto& buffer : buffers) { buffer.wl_buffer = NULL; buffer.available = false; buffer.released = true; } bool spawned_thread = false; std::thread writer_thread; signal(SIGINT, handle_sigint); while(!exit_main_loop) { // wait for a free buffer while(buffers[active_buffer].released != true) { std::this_thread::sleep_for(std::chrono::microseconds(500)); } buffer_copy_done = false; struct zwlr_screencopy_frame_v1 *frame = NULL; /* Capture the whole output if the user hasn't provided a good geometry */ if (!selected_region.is_selected()) { frame = zwlr_screencopy_manager_v1_capture_output( screencopy_manager, 1, chosen_output->output); } else { frame = zwlr_screencopy_manager_v1_capture_output_region( screencopy_manager, 1, chosen_output->output, selected_region.x - chosen_output->x, selected_region.y - chosen_output->y, selected_region.width, selected_region.height); } zwlr_screencopy_frame_v1_add_listener(frame, &frame_listener, NULL); while (!buffer_copy_done && wl_display_dispatch(display) != -1) { // This space is intentionally left blank } auto& buffer = buffers[active_buffer]; //std::cout << "first buffer at " << timespec_to_usec(get_ct()) / 1.0e6<< std::endl; if (!spawned_thread) { writer_thread = std::thread([=] () { write_loop(params, pulseParams); }); spawned_thread = true; } if (first_frame.tv_sec == -1) first_frame = buffer.presented; buffer.base_usec = timespec_to_usec(buffer.presented) - timespec_to_usec(first_frame); buffer.released = false; buffer.available = true; active_buffer = next_frame(active_buffer); zwlr_screencopy_frame_v1_destroy(frame); } writer_thread.join(); for (auto& buffer : buffers) wl_buffer_destroy(buffer.wl_buffer); return EXIT_SUCCESS; } wf-recorder-0.2/src/opencl.cpp000066400000000000000000000304341357101353700163560ustar00rootroot00000000000000/* * Adapted from an example found here https://stackoverflow.com/questions/4979504/fast-rgb-yuv-conversion-in-opencl * Copyright 2019 Scott Moreau * */ #include #include "opencl.hpp" static char const *cl_source_str = R"( __kernel void rgbx_2_yuv420 (__global unsigned int *sourceImage, __global unsigned char *destImage, unsigned int srcWidth, unsigned int srcHeight, short rgb0) { int i, d; unsigned int pixels[4], posSrc[2]; unsigned int RGB, ValueY, ValueU, ValueV, c1, c2, c3, u_offset, v_offset; unsigned char r, g, b; unsigned int posX = get_global_id(0); unsigned int posY = get_global_id(1); unsigned int X2 = posX * 2; unsigned int Y2 = posY * 2; unsigned int size = srcWidth * srcHeight; unsigned int halfWidth = ((srcWidth + 1) >> 1); unsigned int halfHeight = ((srcHeight + 1) >> 1); if (posX >= halfWidth || posY >= halfHeight) return; posSrc[0] = (Y2 * srcWidth) + X2; posSrc[1] = ((Y2 + 1) * srcWidth) + X2; pixels[0] = sourceImage[posSrc[0] + 0]; pixels[1] = sourceImage[posSrc[0] + 1]; pixels[2] = sourceImage[posSrc[1] + 0]; pixels[3] = sourceImage[posSrc[1] + 1]; for (i = 0; i < 4; i++) { if (i == 1 && (X2 + 1) >= srcWidth) continue; if (i > 1 && (posSrc[1] + ((i - 1) >> 1)) >= size) break; RGB = pixels[i]; if (rgb0) { r = (RGB) & 0xff; g = (RGB >> 8) & 0xff; b = (RGB >> 16) & 0xff; } else //bgr0 { b = (RGB) & 0xff; g = (RGB >> 8) & 0xff; r = (RGB >> 16) & 0xff; } // Y plane - pack 1 * 8-bit Y within each 8-bit unit. ValueY = ((66 * r + 129 * g + 25 * b) >> 8) + 16; if (i < 2) destImage[(Y2 * srcWidth) + X2 + i] = ValueY; else destImage[((Y2 + 1) * srcWidth) + X2 + (i - 2)] = ValueY; } c1 = (pixels[0] & 0xff); c2 = ((pixels[0] >> 8) & 0xff); c3 = ((pixels[0] >> 16) & 0xff); d = 0; if ((X2 + 1) < srcWidth) { c1 += (pixels[1] & 0xff); c2 += ((pixels[1] >> 8) & 0xff); c3 += ((pixels[1] >> 16) & 0xff); d++; } if ((Y2 + 1) < srcHeight) { c1 += (pixels[2] & 0xff); c2 += ((pixels[2] >> 8) & 0xff); c3 += ((pixels[2] >> 16) & 0xff); d++; } if (d == 2) { c1 += (pixels[3] & 0xff); c2 += ((pixels[3] >> 8) & 0xff); c3 += ((pixels[3] >> 16) & 0xff); } if (rgb0) { r = c1 >> d; g = c2 >> d; b = c3 >> d; } else //bgr0 { b = c1 >> d; g = c2 >> d; r = c3 >> d; } // UV plane - pack 1 * 8-bit U and 1 * 8-bit V for each subsample average ValueU = ((-38 * r - 74 * g + 112 * b) >> 8) + 128; ValueV = ((112 * r - 94 * g - 18 * b) >> 8) + 128; u_offset = size + (posY * halfWidth); v_offset = u_offset + (halfWidth * halfHeight); destImage[u_offset + posX] = ValueU; destImage[v_offset + posX] = ValueV; return; } )"; cl_device_id OpenCL::get_device_id(int device) { uint32_t i, j; char* value; size_t valueSize; cl_uint platformCount; cl_platform_id* platforms; cl_uint deviceCount; cl_device_id* devices; cl_device_id device_id; std::vector all_devices; ret = clGetPlatformIDs(0, NULL, &platformCount); if (ret) { std::cerr << "clGetPlatformIDs failed!" << std::endl; return NULL; } if (!platformCount) { std::cerr << "No OpenCL platforms detected." << std::endl; return NULL; } platforms = (cl_platform_id*) malloc(sizeof(cl_platform_id) * platformCount); ret = clGetPlatformIDs(platformCount, platforms, NULL); if (ret) { std::cerr << "clGetPlatformIDs failed!" << std::endl; return NULL; } if (platformCount == 1 && device <= 0) { ret = clGetDeviceIDs(platforms[0], CL_DEVICE_TYPE_ALL, 0, NULL, &deviceCount); if (ret) { std::cerr << "clGetDeviceIDs failed!" << std::endl; return NULL; } if (!deviceCount) { std::cerr << "No OpenCL devices detected." << std::endl; return NULL; } if (deviceCount == 1) { ret = clGetDeviceIDs(platforms[0], CL_DEVICE_TYPE_DEFAULT, 1, &device_id, &deviceCount); if (ret) { std::cerr << "clGetDeviceIDs failed!" << std::endl; return NULL; } return device_id; } } if (device < 0) { std::cout << std::endl; std::cout << "Please choose an OpenCL device:" << std::endl; } for (i = 0; i < platformCount; i++) { ret = clGetDeviceIDs(platforms[i], CL_DEVICE_TYPE_ALL, 0, NULL, &deviceCount); switch (ret) { case CL_INVALID_PLATFORM: case CL_INVALID_DEVICE_TYPE: case CL_INVALID_VALUE: case CL_DEVICE_NOT_FOUND: continue; break; case CL_SUCCESS: default: break; } if (!deviceCount) { std::cerr << "No OpenCL devices detected for platform " << i + 1 << std::endl; continue; } devices = (cl_device_id*) malloc(sizeof(cl_device_id) * deviceCount); ret = clGetDeviceIDs(platforms[i], CL_DEVICE_TYPE_ALL, deviceCount, devices, NULL); switch (ret) { case CL_INVALID_PLATFORM: case CL_INVALID_DEVICE_TYPE: case CL_INVALID_VALUE: case CL_DEVICE_NOT_FOUND: continue; break; case CL_SUCCESS: default: break; } for (j = 0; j < deviceCount; j++) { ret = clGetDeviceInfo(devices[j], CL_DEVICE_NAME, 0, NULL, &valueSize); if (ret) { std::cerr << "clGetDeviceInfo failed!" << std::endl; return NULL; } value = (char*) malloc(valueSize); ret = clGetDeviceInfo(devices[j], CL_DEVICE_NAME, valueSize, value, NULL); if (ret) { std::cerr << "clGetDeviceInfo failed!" << std::endl; return NULL; } all_devices.push_back(devices[j]); if (device < 0) std::cout << all_devices.size() << ": " << value << std::endl; free(value); if (device == (int) all_devices.size()) break; } free(devices); if (device == (int) all_devices.size()) break; } free(platforms); if (device > (int) all_devices.size()) { std::cerr << "Max OpenCL device number is " << all_devices.size() << std::endl; return NULL; } if (!device) return all_devices[device]; if (device > 0) return all_devices[device - 1]; std::cout << "Enter device no.:"; fflush(stdout); int choice; if (scanf("%d", &choice) != 1 || choice > (int) all_devices.size() || choice <= 0) { std::cerr << "Bad choice." << std::endl; return NULL; } return all_devices[choice - 1]; } int OpenCL::init(int _width, int _height) { if (ret) return ret; width = _width; height = _height; halfWidth = ((width + 1) >> 1); halfHeight = ((height + 1) >> 1); unsigned int frameSize = width * height; unsigned int frameSizeUV = halfWidth * halfHeight; argbSize = frameSize * 4; // ARGB pixels yuv420Size = frameSize + frameSizeUV * 2; // Y+UV planes yuv420_buffer = clCreateBuffer(context, CL_MEM_WRITE_ONLY, yuv420Size * sizeof(uint8_t), 0, &ret); if (ret) { std::cerr << "clCreateBuffer (yuv420) failure!" << std::endl; return ret; } local_yuv420_buffer = (uint8_t *) malloc(yuv420Size * sizeof(uint8_t)); if (!local_yuv420_buffer) { std::cerr << "malloc failure!" << std::endl; ret = -1; return ret; } std::cout << "Using OpenCL for accelerated RGB to YUV420 conversion" << std::endl; return ret; } OpenCL::OpenCL(int device) { device_id = get_device_id(device); if (!device_id) { ret = -1; return; } // Create an OpenCL context context = clCreateContext(NULL, 1, &device_id, NULL, NULL, &ret); if (ret) { std::cerr << "clCreateContext failed!" << std::endl; return; } // Create a command queue command_queue = clCreateCommandQueue(context, device_id, 0, &ret); if (ret) { std::cerr << "clCreateCommandQueue failed!" << std::endl; return; } // Create a program from the kernel source program = clCreateProgramWithSource(context, 1, (const char **)&cl_source_str, NULL, &ret); if (ret) { std::cerr << "clCreateProgramWithSource failed!" << std::endl; return; } // Build the program ret |= clBuildProgram(program, 1, &device_id, NULL, NULL, NULL); if (ret) { std::cerr << "clBuildProgram failed!" << std::endl; char *build_log; size_t ret_val_size; clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size); build_log = new char[ret_val_size+1]; clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, NULL); std::cout << build_log << std::endl; delete build_log; } // Create the OpenCL kernel kernel = clCreateKernel(program, "rgbx_2_yuv420", &ret); if (ret) { std::cerr << "clCreateKernel failed!" << std::endl; return; } } int OpenCL::do_frame(const uint8_t* pixels, AVFrame *encoder_frame, AVPixelFormat format, bool y_invert) { const uint8_t *formatted_pixels; short rgb0 = format == AV_PIX_FMT_RGB0 ? 1 : 0; if (ret) return ret; rgb_buffer = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, argbSize, (void *) pixels, &ret); if (ret) { std::cerr << "clCreateBuffer (rgb) failed!" << std::endl; return ret; } ret |= clSetKernelArg ( kernel, 0, sizeof(cl_mem), &rgb_buffer ); ret |= clSetKernelArg ( kernel, 1, sizeof(cl_mem), &yuv420_buffer ); ret |= clSetKernelArg ( kernel, 2, sizeof(unsigned int), &width); ret |= clSetKernelArg ( kernel, 3, sizeof(unsigned int), &height); ret |= clSetKernelArg ( kernel, 4, sizeof(short), &rgb0); if (ret) { std::cerr << "clSetKernelArg failed!" << std::endl; return ret; } const size_t global_ws[] = {halfWidth, halfHeight}; ret |= clEnqueueNDRangeKernel(command_queue, kernel, 2, NULL, global_ws, NULL, 0, NULL, NULL); if (ret) { std::cerr << "clEnqueueNDRangeKernel failed!" << std::endl; return ret; } // Read yuv420 buffer from gpu ret |= clEnqueueReadBuffer(command_queue, yuv420_buffer, CL_TRUE, 0, yuv420Size * sizeof(uint8_t), local_yuv420_buffer, 0, NULL, NULL); if (ret) { std::cerr << "clEnqueueReadBuffer failed!" << std::endl; return ret; } ret |= clReleaseMemObject(rgb_buffer); if (ret) { std::cerr << "clReleaseMemObject failed!" << std::endl; return ret; } formatted_pixels = local_yuv420_buffer; if (y_invert) formatted_pixels += width * (height - 1); encoder_frame->data[0] = (uint8_t *) formatted_pixels; if (y_invert) formatted_pixels += (halfWidth) * (halfHeight - 1) + width; else formatted_pixels += width * height; encoder_frame->data[1] = (uint8_t *) formatted_pixels; formatted_pixels += halfWidth * halfHeight; encoder_frame->data[2] = (uint8_t *) formatted_pixels; short flip = y_invert ? -1 : 1; encoder_frame->linesize[0] = width * flip; encoder_frame->linesize[1] = halfWidth * flip; encoder_frame->linesize[2] = halfWidth * flip; return ret; } OpenCL::~OpenCL() { free(local_yuv420_buffer); if (ret) return; clFlush(command_queue); clFinish(command_queue); clReleaseKernel(kernel); clReleaseProgram(program); clReleaseMemObject(yuv420_buffer); clReleaseCommandQueue(command_queue); clReleaseContext(context); }wf-recorder-0.2/src/opencl.hpp000066400000000000000000000013021357101353700163530ustar00rootroot00000000000000/* Copyright 2019 Scott Moreau */ #pragma once #define CL_TARGET_OPENCL_VERSION 110 #include #include "frame-writer.hpp" class OpenCL { cl_device_id device_id; cl_mem yuv420_buffer, rgb_buffer; unsigned int argbSize, yuv420Size, width, height, halfWidth, halfHeight; cl_kernel kernel; cl_context context; cl_command_queue command_queue; cl_program program; cl_int ret = 0; uint8_t *local_yuv420_buffer; cl_device_id get_device_id(int device); public: OpenCL(int device); ~OpenCL(); int init(int width, int height); int do_frame(const uint8_t* pixels, AVFrame *encoder_frame, AVPixelFormat format, bool y_invert); }; wf-recorder-0.2/src/pulse.cpp000066400000000000000000000030711357101353700162230ustar00rootroot00000000000000#include "pulse.hpp" #include "frame-writer.hpp" #include #include #include #include PulseReader::PulseReader(PulseReaderParams _p) : params(_p) { pa_channel_map map; std::memset(&map, 0, sizeof(map)); pa_channel_map_init_stereo(&map); pa_buffer_attr attr; attr.maxlength = params.audio_frame_size * 4; attr.fragsize = params.audio_frame_size * 4; pa_sample_spec sample_spec = { .format = PA_SAMPLE_FLOAT32LE, .rate = 44100, .channels = 2, }; int perr; std::cout << "Using PulseAudio device: " << (params.audio_source ?: "default") << std::endl; pa = pa_simple_new(NULL, "wf-recorder3", PA_STREAM_RECORD, params.audio_source, "wf-recorder3", &sample_spec, &map, &attr, &perr); if (!pa) { std::cerr << "Failed to connect to PulseAudio: " << pa_strerror(perr) << "\nRecording won't have audio" << std::endl; } } bool PulseReader::loop() { static std::vector buffer; buffer.resize(params.audio_frame_size); int perr; if (pa_simple_read(pa, buffer.data(), buffer.size(), &perr) < 0) { std::cerr << "Failed to read from PulseAudio stream: " << pa_strerror(perr) << std::endl; return false; } frame_writer->add_audio(buffer.data()); return !exit_main_loop; } void PulseReader::start() { if (!pa) return; read_thread = std::thread([=] () { while (loop()); }); } PulseReader::~PulseReader() { if (pa) read_thread.join(); } wf-recorder-0.2/src/pulse.hpp000066400000000000000000000007311357101353700162300ustar00rootroot00000000000000#ifndef PULSE_HPP #define PULSE_HPP #include #include #include struct PulseReaderParams { size_t audio_frame_size; /* Can be NULL */ char *audio_source; }; class PulseReader { PulseReaderParams params; pa_simple *pa; bool loop(); std::thread read_thread; public: PulseReader(PulseReaderParams params); ~PulseReader(); void start(); }; #endif /* end of include guard: PULSE_HPP */