Repository: CasparCG/Server Branch: master Commit: 7964b57a52fb Files: 492 Total size: 3.5 MB Directory structure: gitextract_p4etv4dp/ ├── .clang-format ├── .dockerignore ├── .editorconfig ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.yaml │ │ ├── config.yml │ │ └── feature_request.yaml │ ├── dependabot.yml │ └── workflows/ │ ├── linux-system.yml │ ├── linux.yml │ └── windows.yml ├── .gitignore ├── BUILDING.md ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── _typos.toml ├── src/ │ ├── CMakeLists.txt │ ├── CMakeModules/ │ │ ├── Bootstrap_Linux.cmake │ │ ├── Bootstrap_Windows.cmake │ │ ├── CasparCG_Util.cmake │ │ └── FindFFmpeg.cmake │ ├── CMakeSettings.json │ ├── accelerator/ │ │ ├── CMakeLists.txt │ │ ├── StdAfx.h │ │ ├── accelerator.cpp │ │ ├── accelerator.h │ │ ├── d3d/ │ │ │ ├── d3d_device.cpp │ │ │ ├── d3d_device.h │ │ │ ├── d3d_device_context.cpp │ │ │ ├── d3d_device_context.h │ │ │ ├── d3d_texture2d.cpp │ │ │ └── d3d_texture2d.h │ │ ├── ogl/ │ │ │ ├── image/ │ │ │ │ ├── image_kernel.cpp │ │ │ │ ├── image_kernel.h │ │ │ │ ├── image_mixer.cpp │ │ │ │ ├── image_mixer.h │ │ │ │ ├── image_shader.cpp │ │ │ │ ├── image_shader.h │ │ │ │ ├── shader.frag │ │ │ │ └── shader.vert │ │ │ └── util/ │ │ │ ├── buffer.cpp │ │ │ ├── buffer.h │ │ │ ├── context.cpp │ │ │ ├── context.h │ │ │ ├── device.cpp │ │ │ ├── device.h │ │ │ ├── matrix.cpp │ │ │ ├── matrix.h │ │ │ ├── shader.cpp │ │ │ ├── shader.h │ │ │ ├── texture.cpp │ │ │ ├── texture.h │ │ │ ├── transforms.cpp │ │ │ └── transforms.h │ │ └── vulkan/ │ │ ├── image/ │ │ │ ├── fragment_shader.frag │ │ │ ├── image_kernel.cpp │ │ │ ├── image_kernel.h │ │ │ ├── image_mixer.cpp │ │ │ ├── image_mixer.h │ │ │ └── vertex_shader.vert │ │ └── util/ │ │ ├── buffer.cpp │ │ ├── buffer.h │ │ ├── device.cpp │ │ ├── device.h │ │ ├── draw_params.h │ │ ├── matrix.cpp │ │ ├── matrix.h │ │ ├── pipeline.cpp │ │ ├── pipeline.h │ │ ├── renderpass.cpp │ │ ├── renderpass.h │ │ ├── texture.cpp │ │ ├── texture.h │ │ ├── transforms.cpp │ │ ├── transforms.h │ │ └── uniform_block.h │ ├── common/ │ │ ├── CMakeLists.txt │ │ ├── array.h │ │ ├── assert.h │ │ ├── base64.cpp │ │ ├── base64.h │ │ ├── bit_depth.h │ │ ├── compiler/ │ │ │ └── vs/ │ │ │ └── disable_silly_warnings.h │ │ ├── diagnostics/ │ │ │ ├── graph.cpp │ │ │ └── graph.h │ │ ├── endian.h │ │ ├── enum_class.h │ │ ├── env.cpp │ │ ├── env.h │ │ ├── except.h │ │ ├── executor.h │ │ ├── filesystem.cpp │ │ ├── filesystem.h │ │ ├── future.h │ │ ├── gl/ │ │ │ ├── gl_check.cpp │ │ │ └── gl_check.h │ │ ├── log.cpp │ │ ├── log.h │ │ ├── memory.h │ │ ├── memshfl.h │ │ ├── os/ │ │ │ ├── filesystem.h │ │ │ ├── linux/ │ │ │ │ ├── filesystem.cpp │ │ │ │ ├── prec_timer.cpp │ │ │ │ └── thread.cpp │ │ │ ├── thread.h │ │ │ └── windows/ │ │ │ ├── filesystem.cpp │ │ │ ├── prec_timer.cpp │ │ │ ├── thread.cpp │ │ │ └── windows.h │ │ ├── param.h │ │ ├── prec_timer.h │ │ ├── ptree.h │ │ ├── scope_exit.h │ │ ├── stdafx.cpp │ │ ├── stdafx.h │ │ ├── timer.h │ │ ├── tweener.cpp │ │ ├── tweener.h │ │ ├── utf.cpp │ │ └── utf.h │ ├── core/ │ │ ├── CMakeLists.txt │ │ ├── StdAfx.h │ │ ├── consumer/ │ │ │ ├── channel_info.h │ │ │ ├── frame_consumer.cpp │ │ │ ├── frame_consumer.h │ │ │ ├── frame_consumer_registry.cpp │ │ │ ├── frame_consumer_registry.h │ │ │ ├── output.cpp │ │ │ └── output.h │ │ ├── diagnostics/ │ │ │ ├── call_context.cpp │ │ │ ├── call_context.h │ │ │ ├── osd_graph.cpp │ │ │ └── osd_graph.h │ │ ├── frame/ │ │ │ ├── draw_frame.cpp │ │ │ ├── draw_frame.h │ │ │ ├── frame.cpp │ │ │ ├── frame.h │ │ │ ├── frame_factory.h │ │ │ ├── frame_transform.cpp │ │ │ ├── frame_transform.h │ │ │ ├── frame_visitor.h │ │ │ ├── geometry.cpp │ │ │ ├── geometry.h │ │ │ └── pixel_format.h │ │ ├── fwd.h │ │ ├── mixer/ │ │ │ ├── audio/ │ │ │ │ ├── audio_mixer.cpp │ │ │ │ ├── audio_mixer.h │ │ │ │ └── audio_util.h │ │ │ ├── image/ │ │ │ │ ├── blend_modes.cpp │ │ │ │ ├── blend_modes.h │ │ │ │ └── image_mixer.h │ │ │ ├── mixer.cpp │ │ │ └── mixer.h │ │ ├── module_dependencies.h │ │ ├── monitor/ │ │ │ └── monitor.h │ │ ├── producer/ │ │ │ ├── cg_proxy.cpp │ │ │ ├── cg_proxy.h │ │ │ ├── color/ │ │ │ │ ├── color_producer.cpp │ │ │ │ └── color_producer.h │ │ │ ├── frame_producer.cpp │ │ │ ├── frame_producer.h │ │ │ ├── frame_producer_registry.cpp │ │ │ ├── frame_producer_registry.h │ │ │ ├── layer.cpp │ │ │ ├── layer.h │ │ │ ├── route/ │ │ │ │ ├── route_producer.cpp │ │ │ │ └── route_producer.h │ │ │ ├── separated/ │ │ │ │ ├── separated_producer.cpp │ │ │ │ └── separated_producer.h │ │ │ ├── stage.cpp │ │ │ ├── stage.h │ │ │ └── transition/ │ │ │ ├── sting_producer.cpp │ │ │ ├── sting_producer.h │ │ │ ├── transition_producer.cpp │ │ │ └── transition_producer.h │ │ ├── video_channel.cpp │ │ ├── video_channel.h │ │ ├── video_format.cpp │ │ └── video_format.h │ ├── modules/ │ │ ├── CMakeLists.txt │ │ ├── artnet/ │ │ │ ├── CMakeLists.txt │ │ │ ├── artnet.cpp │ │ │ ├── artnet.h │ │ │ ├── consumer/ │ │ │ │ ├── artnet_consumer.cpp │ │ │ │ └── artnet_consumer.h │ │ │ └── util/ │ │ │ ├── fixture_calculation.cpp │ │ │ └── fixture_calculation.h │ │ ├── bluefish/ │ │ │ ├── CMakeLists.txt │ │ │ ├── StdAfx.h │ │ │ ├── bluefish.cpp │ │ │ ├── bluefish.h │ │ │ ├── consumer/ │ │ │ │ ├── bluefish_consumer.cpp │ │ │ │ └── bluefish_consumer.h │ │ │ ├── interop/ │ │ │ │ ├── BlueDriver_p.h │ │ │ │ ├── BlueTypes.h │ │ │ │ └── BlueVelvetCFuncPtr.h │ │ │ ├── producer/ │ │ │ │ ├── bluefish_producer.cpp │ │ │ │ └── bluefish_producer.h │ │ │ └── util/ │ │ │ ├── blue_velvet.cpp │ │ │ ├── blue_velvet.h │ │ │ └── memory.h │ │ ├── decklink/ │ │ │ ├── CMakeLists.txt │ │ │ ├── StdAfx.h │ │ │ ├── consumer/ │ │ │ │ ├── config.cpp │ │ │ │ ├── config.h │ │ │ │ ├── decklink_consumer.cpp │ │ │ │ ├── decklink_consumer.h │ │ │ │ ├── format_strategy.h │ │ │ │ ├── monitor.cpp │ │ │ │ ├── monitor.h │ │ │ │ ├── sdr_bgra_strategy.cpp │ │ │ │ ├── v210_strategies.cpp │ │ │ │ ├── vanc.cpp │ │ │ │ ├── vanc.h │ │ │ │ ├── vanc_op47_strategy.cpp │ │ │ │ └── vanc_scte104_strategy.cpp │ │ │ ├── decklink.cpp │ │ │ ├── decklink.h │ │ │ ├── decklink_api.h │ │ │ ├── interop/ │ │ │ │ ├── DeckLinkAPIVersion.h │ │ │ │ ├── DeckLinkAPI_i.c │ │ │ │ └── DecklinkAPI.h │ │ │ ├── linux_interop/ │ │ │ │ ├── DeckLinkAPI.h │ │ │ │ ├── DeckLinkAPIConfiguration.h │ │ │ │ ├── DeckLinkAPIConfiguration_v10_11.h │ │ │ │ ├── DeckLinkAPIConfiguration_v10_2.h │ │ │ │ ├── DeckLinkAPIConfiguration_v10_4.h │ │ │ │ ├── DeckLinkAPIConfiguration_v10_5.h │ │ │ │ ├── DeckLinkAPIConfiguration_v10_9.h │ │ │ │ ├── DeckLinkAPIDeckControl.h │ │ │ │ ├── DeckLinkAPIDiscovery.h │ │ │ │ ├── DeckLinkAPIDispatch.cpp │ │ │ │ ├── DeckLinkAPIDispatch_v10_11.cpp │ │ │ │ ├── DeckLinkAPIDispatch_v10_8.cpp │ │ │ │ ├── DeckLinkAPIDispatch_v7_6.cpp │ │ │ │ ├── DeckLinkAPIDispatch_v8_0.cpp │ │ │ │ ├── DeckLinkAPIModes.h │ │ │ │ ├── DeckLinkAPITypes.h │ │ │ │ ├── DeckLinkAPIVersion.h │ │ │ │ ├── DeckLinkAPIVideoEncoderInput_v10_11.h │ │ │ │ ├── DeckLinkAPIVideoInput_v10_11.h │ │ │ │ ├── DeckLinkAPIVideoInput_v11_4.h │ │ │ │ ├── DeckLinkAPIVideoInput_v11_5_1.h │ │ │ │ ├── DeckLinkAPIVideoOutput_v10_11.h │ │ │ │ ├── DeckLinkAPIVideoOutput_v11_4.h │ │ │ │ ├── DeckLinkAPI_v10_11.h │ │ │ │ ├── DeckLinkAPI_v10_2.h │ │ │ │ ├── DeckLinkAPI_v10_4.h │ │ │ │ ├── DeckLinkAPI_v10_5.h │ │ │ │ ├── DeckLinkAPI_v10_6.h │ │ │ │ ├── DeckLinkAPI_v10_9.h │ │ │ │ ├── DeckLinkAPI_v11_5.h │ │ │ │ ├── DeckLinkAPI_v11_5_1.h │ │ │ │ ├── DeckLinkAPI_v7_1.h │ │ │ │ ├── DeckLinkAPI_v7_3.h │ │ │ │ ├── DeckLinkAPI_v7_6.h │ │ │ │ ├── DeckLinkAPI_v7_9.h │ │ │ │ ├── DeckLinkAPI_v8_0.h │ │ │ │ ├── DeckLinkAPI_v8_1.h │ │ │ │ ├── DeckLinkAPI_v9_2.h │ │ │ │ ├── DeckLinkAPI_v9_9.h │ │ │ │ └── LinuxCOM.h │ │ │ ├── producer/ │ │ │ │ ├── decklink_producer.cpp │ │ │ │ └── decklink_producer.h │ │ │ └── util/ │ │ │ └── util.h │ │ ├── ffmpeg/ │ │ │ ├── CMakeLists.txt │ │ │ ├── StdAfx.h │ │ │ ├── consumer/ │ │ │ │ ├── ffmpeg_consumer.cpp │ │ │ │ └── ffmpeg_consumer.h │ │ │ ├── ffmpeg.cpp │ │ │ ├── ffmpeg.h │ │ │ ├── producer/ │ │ │ │ ├── av_input.cpp │ │ │ │ ├── av_input.h │ │ │ │ ├── av_producer.cpp │ │ │ │ ├── av_producer.h │ │ │ │ ├── ffmpeg_producer.cpp │ │ │ │ └── ffmpeg_producer.h │ │ │ └── util/ │ │ │ ├── audio_resampler.cpp │ │ │ ├── audio_resampler.h │ │ │ ├── av_assert.h │ │ │ ├── av_util.cpp │ │ │ └── av_util.h │ │ ├── flash/ │ │ │ ├── CMakeLists.txt │ │ │ ├── StdAfx.h │ │ │ ├── flash.cpp │ │ │ ├── flash.h │ │ │ ├── interop/ │ │ │ │ ├── Flash9e.IDL │ │ │ │ ├── Flash9e_i.c │ │ │ │ ├── TimerHelper.h │ │ │ │ └── axflash.h │ │ │ ├── producer/ │ │ │ │ ├── FlashAxContainer.cpp │ │ │ │ ├── FlashAxContainer.h │ │ │ │ ├── flash_producer.cpp │ │ │ │ └── flash_producer.h │ │ │ └── util/ │ │ │ ├── swf.cpp │ │ │ └── swf.h │ │ ├── html/ │ │ │ ├── CMakeLists.txt │ │ │ ├── html.cpp │ │ │ ├── html.h │ │ │ ├── producer/ │ │ │ │ ├── html_cg_proxy.cpp │ │ │ │ ├── html_cg_proxy.h │ │ │ │ ├── html_producer.cpp │ │ │ │ └── html_producer.h │ │ │ └── util.h │ │ ├── image/ │ │ │ ├── CMakeLists.txt │ │ │ ├── consumer/ │ │ │ │ ├── image_consumer.cpp │ │ │ │ └── image_consumer.h │ │ │ ├── image.cpp │ │ │ ├── image.h │ │ │ ├── producer/ │ │ │ │ ├── image_producer.cpp │ │ │ │ ├── image_producer.h │ │ │ │ ├── image_scroll_producer.cpp │ │ │ │ └── image_scroll_producer.h │ │ │ └── util/ │ │ │ ├── image_algorithms.cpp │ │ │ ├── image_algorithms.h │ │ │ ├── image_converter.cpp │ │ │ ├── image_converter.h │ │ │ ├── image_loader.cpp │ │ │ ├── image_loader.h │ │ │ └── image_view.h │ │ ├── newtek/ │ │ │ ├── CMakeLists.txt │ │ │ ├── StdAfx.h │ │ │ ├── consumer/ │ │ │ │ ├── newtek_ndi_consumer.cpp │ │ │ │ └── newtek_ndi_consumer.h │ │ │ ├── interop/ │ │ │ │ ├── Processing.NDI.DynamicLoad.h │ │ │ │ ├── Processing.NDI.Find.h │ │ │ │ ├── Processing.NDI.FrameSync.h │ │ │ │ ├── Processing.NDI.Lib.cplusplus.h │ │ │ │ ├── Processing.NDI.Lib.h │ │ │ │ ├── Processing.NDI.Recv.ex.h │ │ │ │ ├── Processing.NDI.Recv.h │ │ │ │ ├── Processing.NDI.RecvAdvertiser.h │ │ │ │ ├── Processing.NDI.RecvListener.h │ │ │ │ ├── Processing.NDI.Routing.h │ │ │ │ ├── Processing.NDI.Send.h │ │ │ │ ├── Processing.NDI.SendAdvertiser.h │ │ │ │ ├── Processing.NDI.SendListener.h │ │ │ │ ├── Processing.NDI.compat.h │ │ │ │ ├── Processing.NDI.deprecated.h │ │ │ │ ├── Processing.NDI.structs.h │ │ │ │ └── Processing.NDI.utilities.h │ │ │ ├── newtek.cpp │ │ │ ├── newtek.h │ │ │ ├── producer/ │ │ │ │ ├── newtek_ndi_producer.cpp │ │ │ │ └── newtek_ndi_producer.h │ │ │ └── util/ │ │ │ ├── ndi.cpp │ │ │ └── ndi.h │ │ ├── oal/ │ │ │ ├── CMakeLists.txt │ │ │ ├── consumer/ │ │ │ │ ├── oal_consumer.cpp │ │ │ │ └── oal_consumer.h │ │ │ ├── oal.cpp │ │ │ └── oal.h │ │ └── screen/ │ │ ├── CMakeLists.txt │ │ ├── consumer/ │ │ │ ├── screen.frag │ │ │ ├── screen.vert │ │ │ ├── screen_consumer.cpp │ │ │ └── screen_consumer.h │ │ ├── screen.cpp │ │ ├── screen.h │ │ └── util/ │ │ ├── x11_util.cpp │ │ └── x11_util.h │ ├── protocol/ │ │ ├── CMakeLists.txt │ │ ├── StdAfx.h │ │ ├── amcp/ │ │ │ ├── AMCPCommand.cpp │ │ │ ├── AMCPCommand.h │ │ │ ├── AMCPCommandQueue.cpp │ │ │ ├── AMCPCommandQueue.h │ │ │ ├── AMCPCommandsImpl.cpp │ │ │ ├── AMCPCommandsImpl.h │ │ │ ├── AMCPProtocolStrategy.cpp │ │ │ ├── AMCPProtocolStrategy.h │ │ │ ├── amcp_args.cpp │ │ │ ├── amcp_args.h │ │ │ ├── amcp_command_context.h │ │ │ ├── amcp_command_repository.cpp │ │ │ ├── amcp_command_repository.h │ │ │ ├── amcp_command_repository_wrapper.cpp │ │ │ ├── amcp_command_repository_wrapper.h │ │ │ └── amcp_shared.h │ │ ├── osc/ │ │ │ ├── client.cpp │ │ │ ├── client.h │ │ │ └── oscpack/ │ │ │ ├── MessageMappingOscPacketListener.h │ │ │ ├── OscException.h │ │ │ ├── OscHostEndianness.h │ │ │ ├── OscOutboundPacketStream.cpp │ │ │ ├── OscOutboundPacketStream.h │ │ │ ├── OscPacketListener.h │ │ │ ├── OscPrintReceivedElements.cpp │ │ │ ├── OscPrintReceivedElements.h │ │ │ ├── OscReceivedElements.cpp │ │ │ ├── OscReceivedElements.h │ │ │ ├── OscTypes.cpp │ │ │ └── OscTypes.h │ │ └── util/ │ │ ├── AsyncEventServer.cpp │ │ ├── AsyncEventServer.h │ │ ├── ClientInfo.h │ │ ├── ProtocolStrategy.h │ │ ├── http_request.cpp │ │ ├── http_request.h │ │ ├── lock_container.cpp │ │ ├── lock_container.h │ │ ├── protocol_strategy.h │ │ ├── strategy_adapters.cpp │ │ ├── strategy_adapters.h │ │ ├── tokenize.cpp │ │ └── tokenize.h │ ├── shell/ │ │ ├── CMakeLists.txt │ │ ├── casparcg.config │ │ ├── casparcg_auto_restart.bat │ │ ├── copy_deps.sh │ │ ├── included_modules.tmpl │ │ ├── liberation-fonts/ │ │ │ ├── AUTHORS │ │ │ ├── ChangeLog │ │ │ ├── LICENSE │ │ │ ├── README │ │ │ └── TODO │ │ ├── linux_specific.cpp │ │ ├── main.cpp │ │ ├── platform_specific.h │ │ ├── resource.h │ │ ├── run.sh │ │ ├── server.cpp │ │ ├── server.h │ │ ├── shell.rc │ │ └── windows_specific.cpp │ ├── tools/ │ │ ├── CMakeLists.txt │ │ └── bin2c.cpp │ └── version.tmpl └── tools/ ├── linux/ │ ├── Dockerfile │ ├── build-in-docker │ ├── deb/ │ │ ├── INSTALLING │ │ ├── debian-sid/ │ │ │ └── debian/ │ │ │ ├── .gitignore │ │ │ ├── casparcg-server-beta.docs │ │ │ ├── casparcg-server-beta.manpages │ │ │ ├── casparcg.txt2man │ │ │ ├── control │ │ │ ├── copyright │ │ │ ├── gbp.conf │ │ │ ├── patches/ │ │ │ │ ├── README │ │ │ │ └── series │ │ │ ├── rules │ │ │ ├── source/ │ │ │ │ └── format │ │ │ ├── upstream/ │ │ │ │ └── metadata │ │ │ └── watch │ │ ├── debian-trixie/ │ │ │ └── debian/ │ │ │ ├── .gitignore │ │ │ ├── casparcg-server-beta.docs │ │ │ ├── casparcg-server-beta.manpages │ │ │ ├── casparcg.txt2man │ │ │ ├── control │ │ │ ├── copyright │ │ │ ├── gbp.conf │ │ │ ├── patches/ │ │ │ │ ├── README │ │ │ │ └── series │ │ │ ├── rules │ │ │ ├── source/ │ │ │ │ └── format │ │ │ ├── upstream/ │ │ │ │ └── metadata │ │ │ └── watch │ │ ├── ubuntu-noble/ │ │ │ └── debian/ │ │ │ ├── .gitignore │ │ │ ├── casparcg-server-beta.docs │ │ │ ├── casparcg-server-beta.manpages │ │ │ ├── casparcg.txt2man │ │ │ ├── control │ │ │ ├── copyright │ │ │ ├── gbp.conf │ │ │ ├── patches/ │ │ │ │ ├── README │ │ │ │ └── series │ │ │ ├── rules │ │ │ ├── source/ │ │ │ │ └── format │ │ │ ├── upstream/ │ │ │ │ └── metadata │ │ │ └── watch │ │ └── ubuntu-resolute/ │ │ └── debian/ │ │ ├── .gitignore │ │ ├── casparcg-server-beta.docs │ │ ├── casparcg-server-beta.manpages │ │ ├── casparcg.txt2man │ │ ├── control │ │ ├── copyright │ │ ├── gbp.conf │ │ ├── patches/ │ │ │ ├── README │ │ │ └── series │ │ ├── rules │ │ ├── source/ │ │ │ └── format │ │ ├── upstream/ │ │ │ └── metadata │ │ └── watch │ ├── extract-from-docker │ ├── install-dependencies │ ├── run_docker.sh │ └── start_docker.sh ├── update-format.sh ├── verify-format.sh └── windows/ ├── build.bat └── package.bat ================================================ FILE CONTENTS ================================================ ================================================ FILE: .clang-format ================================================ --- # TODO BreakBeforeLambdaBody # TODO IncludeBlocks Language: Cpp AccessModifierOffset: -2 AlignAfterOpenBracket: Align AlignConsecutiveAssignments: true AlignConsecutiveDeclarations: true AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: All AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: true BinPackArguments: false BinPackParameters: false BreakBeforeBraces: Custom BraceWrapping: AfterClass: true AfterControlStatement: false AfterEnum: true AfterFunction: true AfterNamespace: false AfterObjCDeclaration: false AfterStruct: true AfterUnion: true #AfterExternBlock: true BeforeCatch: false BeforeElse: false IndentBraces: false SplitEmptyFunction: true SplitEmptyRecord: true SplitEmptyNamespace: true BreakBeforeBinaryOperators: None BreakBeforeInheritanceComma: true BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: true BreakConstructorInitializers: BeforeColon BreakAfterJavaFieldAnnotations: false BreakStringLiterals: true ColumnLimit: 120 CommentPragmas: '^ IWYU pragma:' CompactNamespaces: true ConstructorInitializerAllOnOneLineOrOnePerLine: false ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false DisableFormat: false ExperimentalAutoDetectBinPacking: false FixNamespaceComments: true ForEachMacros: - foreach - Q_FOREACH - BOOST_FOREACH IncludeCategories: - Regex: '^"(llvm|llvm-c|clang|clang-c)/' Priority: 2 - Regex: '^(<|"(gtest|gmock|isl|json)/)' Priority: 3 - Regex: '.*' Priority: 1 IncludeIsMainRegex: '(Test)?$' IndentCaseLabels: true #IndentPPDirectives: None IndentWidth: 4 IndentWrappedFunctionNames: false KeepEmptyLinesAtTheStartOfBlocks: false MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Left #RawStringFormats: # - Delimiter: pb # Language: TextProto # BasedOnStyle: google ReflowComments: true SortIncludes: true SortUsingDeclarations: true SpaceAfterCStyleCast: false SpaceAfterTemplateKeyword: true SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 UseTab: Never ... ================================================ FILE: .dockerignore ================================================ src/packages src/cmake-build-* build-scripts build resources .git .vscode CMakeFiles casparcg_server dist build src/cmake-build-* ================================================ FILE: .editorconfig ================================================ root = true [*] charset = utf-8 indent_style = space indent_size = 4 end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true [*.diff] trim_trailing_whitespace = false [*.patch] trim_trailing_whitespace = false [debian/rules] indent_style = tab ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.yaml ================================================ name: Bug report 🐛 description: Use this if you've found a bug title: "Bug: [Short description of the bug]" labels: - type/bug body: - type: markdown attributes: value: | Before you post, make sure to check for existing bug reports of the issue: https://github.com/CasparCG/server/issues?q=+is%3Aissue+label%3Atype%2Fbug - type: textarea attributes: label: Observed Behavior description: What happened? validations: required: true - type: textarea attributes: label: Expected behaviour description: What did you expect to happen? validations: required: true - type: textarea attributes: label: Steps to reproduce description: How can we reproduce the issue? value: | 1. 2. 3. ... validations: required: true - type: textarea attributes: label: Environment description: What version of CasparCG and OS are you using? value: | * Commit: [e.g. ab1234c] * Server version: [e.g. v2.2] * Operating system: [e.g. Windows 10] validations: required: true ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false contact_links: - name: Question or need help? url: https://casparcgforum.org/ about: Ask the community here. ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.yaml ================================================ name: Feature request description: Suggest an idea for this project labels: - type/enhancement body: - type: markdown attributes: value: | Before you post, make sure to check for existing matching feature requests: https://github.com/CasparCG/server/issues?q=+is%3Aissue+label%3Atype%2Fbug - type: textarea attributes: label: Description description: How should the feature/enhancement work? validations: required: true - type: textarea attributes: label: Solution suggestion description: If you have any suggestions on how the solution should work, add that here. ================================================ FILE: .github/dependabot.yml ================================================ version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" ================================================ FILE: .github/workflows/linux-system.yml ================================================ name: Build Linux with system dependencies on: push: # branches: [ "master" ] pull_request: branches: ["master"] jobs: build: name: ${{ matrix.container || matrix.os }} ${{ matrix.arch }} (${{matrix.family}}:${{ matrix.distribution }}) strategy: fail-fast: false matrix: include: - os: ubuntu-24.04 family: ubuntu distribution: noble arch: amd64 container: "" apt_prefix: sudo upload_artifacts: true - os: ubuntu-latest family: ubuntu distribution: resolute arch: amd64 container: ubuntu:26.04 apt_prefix: "" upload_artifacts: false - os: ubuntu-24.04-arm family: ubuntu distribution: resolute arch: arm64 container: ubuntu:26.04 apt_prefix: "" upload_artifacts: false - os: ubuntu-latest family: debian distribution: trixie arch: amd64 container: debian:trixie # Debian 13 apt_prefix: "" upload_artifacts: false # Because of CEF, this is not built correctly - os: ubuntu-latest family: debian distribution: sid arch: amd64 container: debian:sid # Debian Testing apt_prefix: "" upload_artifacts: false # Because of CEF, this is not built correctly - os: ubuntu-latest family: debian distribution: trixie arch: amd64 container: debian:trixie apt_prefix: "" upload_artifacts: false # Because of CEF, this is not built correctly - os: ubuntu-latest family: archlinux distribution: latest arch: amd64 container: archlinux:latest apt_prefix: "" upload_artifacts: false # Not supported runs-on: ${{ matrix.os }} container: ${{ matrix.container }} defaults: run: shell: bash steps: - name: Install git (archlinux) if: ${{ matrix.family == 'archlinux' }} run: pacman -Sy --noconfirm git - uses: actions/checkout@v6 - name: Setup environment id: setup-env if: ${{ matrix.family == 'ubuntu' || matrix.family == 'debian' }} run: | ln -s tools/linux/deb/${{ matrix.family }}-${{ matrix.distribution }}/debian ./ # Build a version number for this build GH_REF="${GITHUB_REF##*/}" GH_REF=$(echo "$GH_REF" | sed 's/[\/]/_/g' | sed 's/ /_/g') VERSION_MAJOR=$(grep -oPi 'set\(CONFIG_VERSION_MAJOR \K\d+' src/CMakeLists.txt) VERSION_MINOR=$(grep -oPi 'set\(CONFIG_VERSION_MINOR \K\d+' src/CMakeLists.txt) VERSION_PATCH=$(grep -oPi 'set\(CONFIG_VERSION_BUG \K\d+' src/CMakeLists.txt) BUILD_VERSION="${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}~${GH_REF}-${{ matrix.distribution }}+${GITHUB_SHA:0:7}" echo "BUILD_VERSION=$BUILD_VERSION" >> $GITHUB_OUTPUT echo "Building as $BUILD_VERSION" BUILD_FILENAME="casparcg-server-${BUILD_VERSION}" echo "BUILD_FILENAME=$BUILD_FILENAME" >> $GITHUB_OUTPUT # install some needed tooling $APT_PREFIX apt-get update $APT_PREFIX apt-get -y install build-essential dpkg-dev debhelper devscripts if [[ "${{ matrix.family }}" == "ubuntu" ]]; then $APT_PREFIX apt-get -y install software-properties-common $APT_PREFIX add-apt-repository ppa:casparcg/ppa fi # Put something in the changelog export DEBEMAIL="builds@casparcg.com" export DEBFULLNAME="CasparCG Builds" dch -v "$BUILD_VERSION" --create -D ${{ matrix.distribution }} --package casparcg-server-beta "Build" # Install build dependencies $APT_PREFIX apt-get -y build-dep . if [[ "${{ matrix.family }}" == "ubuntu" ]]; then # update the control file to reference the current cef version CASPARCG_CEF_VER=$(dpkg-query -W -f='${Version}' casparcg-cef-142) sed -i "s/@CASPARCG_CEF_VER@/${CASPARCG_CEF_VER}/" debian/control # Download required packages cd .. apt-get download casparcg-cef-142=$CASPARCG_CEF_VER apt-get download casparcg-scanner fi env: CI: 1 DEBIAN_FRONTEND: noninteractive APT_PREFIX: ${{ matrix.apt_prefix }} - name: Run build if: ${{ matrix.family == 'ubuntu' || matrix.family == 'debian' }} run: | # Perform build debuild -b -uc -us env: CI: 1 - name: Collect artifacts id: artifacts if: ${{ (matrix.family == 'ubuntu' || matrix.family == 'debian') && matrix.upload_artifacts }} run: | mkdir -p dist mv ../*.deb dist/ # collect some docs for the zip cp README.md dist/ cp tools/linux/deb/INSTALLING dist/ # check if a release branch, or master, or a tag if [[ "${{ github.ref_name }}" == "master" || "${{ github.ref_name }}" == "2.3.x-lts" ]] then # Only proceed if we have an sftp password if [ -n "${{ secrets.SFTP_PASSWORD }}" ] then zip -r "${{ steps.setup-env.outputs.BUILD_FILENAME }}.zip" dist set -x eval $(ssh-agent -s) mkdir -v -m 700 $HOME/.ssh ssh-keyscan -H ${{ secrets.SFTP_HOST }} > $HOME/.ssh/known_hosts sshpass -p '${{ secrets.SFTP_PASSWORD }}' rsync -avvz --mkpath "${{ steps.setup-env.outputs.BUILD_FILENAME }}.zip" "${{ secrets.SFTP_USERNAME }}@${{ secrets.SFTP_HOST }}:${{ secrets.SFTP_ROOT }}/${{ github.ref_name }}/${{ steps.setup-env.outputs.BUILD_FILENAME }}.zip" fi fi env: CI: 1 - uses: actions/upload-artifact@v7 if: ${{ (matrix.family == 'ubuntu' || matrix.family == 'debian') && matrix.upload_artifacts }} with: name: ${{ steps.setup-env.outputs.BUILD_FILENAME }} path: dist - name: Setup environment (archlinux) if: ${{ matrix.family == 'archlinux' }} run: | pacman -Syu --noconfirm pacman -S --noconfirm --needed \ base-devel cmake ninja pkg-config \ boost boost-libs ffmpeg glew libglvnd \ onetbb openal sfml \ libxrandr simde env: CI: 1 - name: Run build (archlinux) if: ${{ matrix.family == 'archlinux' }} run: | cmake -B build -S src \ -DCMAKE_BUILD_TYPE=Release \ -DDIAG_FONT_PATH="/usr/share/fonts/liberation/LiberationMono-Regular.ttf" \ -DENABLE_HTML=OFF \ -G Ninja cmake --build build env: CI: 1 ================================================ FILE: .github/workflows/linux.yml ================================================ name: Build Linux on: push: # branches: [ "master" ] pull_request: branches: ["master"] jobs: build: runs-on: ubuntu-latest permissions: packages: write steps: - uses: actions/checkout@v6 - name: Run build run: | ./tools/linux/build-in-docker env: CI: 1 ================================================ FILE: .github/workflows/windows.yml ================================================ name: Build Windows on: push: # branches: [ "master" ] pull_request: branches: ["master"] jobs: build: runs-on: windows-2022 steps: - uses: actions/checkout@v6 - name: Download media-scanner uses: robinraju/release-downloader@v1.12 with: repository: "casparcg/media-scanner" latest: true fileName: "*-win32-x64.zip" tarBall: false zipBall: false out-file-path: "media-scanner" extract: true - name: Tidy media-scanner download shell: bash run: | rm media-scanner/*.zip - name: Run build run: | ./tools/windows/build.bat env: CI: 1 MEDIA_SCANNER_FOLDER: "..\\media-scanner" - name: Rename build id: "rename-build" shell: bash run: | TARGET=casparcg-server-${{ github.sha }}-windows.zip mv dist/casparcg_server.zip "$TARGET" echo "artifactname=$TARGET" >> $GITHUB_OUTPUT # check if a release branch, or master, or a tag if [[ "${{ github.ref_name }}" == "master" || "${{ github.ref_name }}" == "2.3.x-lts" ]] then # Only report if we have an sftp password if [ -n "${{ secrets.SFTP_PASSWORD }}" ] then echo "uploadname=$TARGET" >> $GITHUB_OUTPUT fi fi - uses: actions/upload-artifact@v7 with: path: ${{ steps.rename-build.outputs.artifactname }} archive: false - name: Copy single file to remote uses: garygrossgarten/github-action-scp@0.10.0 if: ${{ steps.rename-build.outputs.uploadname }} timeout-minutes: 5 with: local: "${{ steps.rename-build.outputs.uploadname }}" remote: "${{ secrets.SFTP_ROOT }}/${{ github.ref_name }}/${{ steps.rename-build.outputs.uploadname }}" host: ${{ secrets.SFTP_HOST }} username: ${{ secrets.SFTP_USERNAME }} password: ${{ secrets.SFTP_PASSWORD }} ================================================ FILE: .gitignore ================================================ *.dir *.vcxproj *.vcxproj.filters *.tlog *.sln .settings .vscode .idea .vs build dist CMakeFiles casparcg_server *.spv src/cmake-build-* src/.idea # debian build artifacts /debian /obj-* ================================================ FILE: BUILDING.md ================================================ # Building the CasparCG Server The CasparCG Server source code uses the CMake build system in order to easily generate build systems for multiple platforms. CMake is basically a build system for generating build systems. On Windows we can use CMake to generate a .sln file and .vcproj files. On Linux CMake can generate make files or ninja files. Qt Creator has support for loading CMakeLists.txt files directly. # Dependency caching CMake will automatically download some dependencies as part of the build process. These are taken from https://github.com/CasparCG/dependencies/releases (make sure to expand the 'Assets' group under each release to see the files), most of which are direct copies of distributions from upstream. During the build, you can specify the CMake option `CASPARCG_DOWNLOAD_MIRROR` to download from an alternate HTTP server (such as an internally hosted mirror), or `CASPARCG_DOWNLOAD_CACHE` to use a specific path on disk for the local cache of these files, by default a folder called `external` will be created inside the build directory to cache these files. If you want to be able to build CasparCG offline, you may need to manually seed this cache. You can do so by placing the correct tar.gz or zip into a folder and using `CASPARCG_DOWNLOAD_CACHE` to tell CMake where to find it. You can figure out which files you need by looking at each of the `ExternalProject_Add` function calls inside of [Bootstrap_Linux.cmake](./src/CMakeModules/Bootstrap_Linux.cmake) or [Bootstrap_Windows.cmake](./src/CMakeModules/Bootstrap_Windows.cmake). Some of the ones listed are optional, depending on other CMake flags. # Windows ## Building distributable 1. Install Visual Studio 2022. 2. Install 7-zip (https://www.7-zip.org/). 3. `git clone --single-branch --branch master https://github.com/CasparCG/server casparcg-server-master` 4. `cd casparcg-server-master` 5. `.\tools\windows\build.bat` 6. Copy the `dist\casparcg_server.zip` file for distribution ## Development using Visual Studio 1. Install Visual Studio 2022. 2. `git clone --single-branch --branch master https://github.com/CasparCG/server casparcg-server-master` 3. Open the cloned folder in Visual Studio. 4. Build All and ensure it builds successfully # Linux ## Building on your system We only officially support Ubuntu LTS releases, other distros may work but often run into build issues. We are happy to accept PRs to resolve these issues, but are unlikely to write fixes ourselves. We currently document two approaches to building CasparCG. The recommended way is to use the `deb` packaging we have in the repository, but we only provide that for Ubuntu LTS releases. Other deb based distros can work with some tweaks to one of those, other distros will need something else which is not documented here. We also provide a script to produce a build in docker, but this is not recommended unless absolutely necessary. The resulting builds are often rather brittle depending on where they are used. To perform a custom build, follow the Development steps below, and you may need to do some extra packaging steps, or install steps on the target systems. ### Building inside Docker 1. `git clone --single-branch --branch master https://github.com/CasparCG/server casparcg-server-master` 2. `cd casparcg-server-master` 3. `./tools/linux/build-in-docker` If all goes to plan, a docker image `casparcg/server` has been created containing CasparCG Server. ### Extracting CasparCG Server from Docker 1. `./tools/linux/extract-from-docker` You will then find a folder called `casparcg_server` which should contain everything you need to run CasparCG Server. _Note: if you ran docker with sudo, CasparCG server will not be able to run without sudo out of the box. For security reasons we do not recommend to run CasparCG with sudo. Instead you can use chown to change the ownership of the CasparCG Server folder._ ## Development Before beginning, check the build options section below, to decide if you want to use any to simplify or customise your build. 1. `git clone --single-branch --branch master https://github.com/CasparCG/server casparcg-server-master` 2. `cd casparcg-server-master` 3. Install dependencies, this can be done with `sudo ./tools/linux/install-dependencies` 4. If using system CEF (default & recommended), `sudo add-apt-repository ppa:casparcg/ppa` and `sudo apt-get install casparcg-cef-142-dev` 5. `mkdir build && cd build` 6. `cmake ../src` You can add any of the build options from below to this command 7. `cmake --build . --parallel` 8. `cmake --install . --prefix staging` If all goes to plan, a folder called 'staging' has been created with everything you need to run CasparCG server. ## Build options -DENABLE_HTML=OFF - useful if you lack CEF, and would like to build without that module. -DUSE_STATIC_BOOST=ON - (Linux only, default OFF) statically link against Boost. -DUSE_SYSTEM_CEF=OFF - (Linux only, default ON) use the version of CEF from your OS. This expects to be using builds from https://launchpad.net/~casparcg/+archive/ubuntu/ppa -DENABLE_AVX2=ON (Linux only, default OFF) Enable the AVX and AVX2 instruction sets (requires a CPU that supports it) -DDIAG_FONT_PATH - Specify an alternate path/font to use for the DIAG window. On linux, this will often want to be set to an absolute path of a font -DCASPARCG_BINARY_NAME=casparcg-server - (Linux only) generate the executable with the specified name. This also reconfigures the install target to be a bit more friendly with system package managers. ================================================ FILE: CHANGELOG.md ================================================ CasparCG 2.5.0 Stable ========================================== ### Core ##### Improvements * Initial support for HDR. This is limited to a subset of producers and consumers at this stage. * Build for Windows with VS2022 * Rework linux builds to produce ubuntu deb files * Update ffmpeg to 7.0 * Reimplement mixer transforms, to handle routes correctly * Support more pixel formats from ffmpeg, to preserve colour accuracy better * Support running on headless linux * Transitions: Additional behaviours ##### Fixes * Build with boost 1.85/1.86/1.87/1.88 * Build with ffmpeg 7.1 * Only produce mixed frames on channels which have consumers * Routed channels not compositing correctly when channel used a MIXER KEY * Handle audio for fractional framerates properly * Gracefully exit on SIGINT and SIGTERM ### Producers ##### Improvements * FFmpeg: Support loading with a scaling-mode, to configure how clips get fit into the channel * FFmpeg: Support more pixel formats without cpu conversion * FFmpeg: Enable alpha for webm videos * Image: Support loading with a scaling-mode, to configure how images get fit into the channel * Image: Replace freeimage with ffmpeg * HTML: Update CEF to 142 * HTML: Support audio ##### Fixes * Route: Use full field rate when performing i->p channel route * HTML: Gracefully handle page load errors * HTML: Always set cache path ### Consumers ##### Improvements * Screen: Set size and position from AMCP * Screen: Improve performance * Image: Propagate AMCP parameters from PRINT command * FFmpeg: Remove unnecessary forced conversion to YUVA422 * Decklink: Support explicit yuv output (requires AVX2) * Decklink: Allow selecting device by hardware persistent id ##### Fixes * FFmpeg: Correctly handle PTS on frame drop CasparCG 2.4.3 Stable ========================================== ### Core ##### Fixes * Improve error handling for invalid config files #1571 * Flush logs before exit #1571 * Check audio cadence values look sane before accepting format #1588 * Cross-channel routes from progressive to interlaced showing lots of black #1576 * Transition: ignoring some transforms of input frames #1602 ### Producers ##### Fixes * FFmpeg: fix crash on invalid frame header * Decklink: Crash with ffmpeg 7 #1582 * HTML: Fix crash during uninit on exit * Image: update state during init #1601 ### Consumers ##### Fixes * FFmpeg: set frame_rate for rtmp streams #1462 CasparCG 2.4.2 Stable ========================================== ### Consumers ##### Fixes * Decklink: fix support for driver 14.3 and later CasparCG 2.4.1 Stable ========================================== ### Core ##### Fixes * Fix bad config file examples * Fix `casparcg_auto_restart.bat` not starting scanner * Revert removal of tbbmalloc, due to notable performance loss on windows * Suppress some cmake build warnings * Build failure when doxygen installed on system * Build failures with ffmpeg 7.0 * Revert RPATH linking changes ### Producers ##### Fixes * FFmpeg: Ignore ndi:// urls * FFmpeg: Using both in and seek could result in incorrect duration * Route: Race condition during destruction * Image: Update freeimage on windows with some CVE fixes and failures with certain pngs * Image: Respect EXIF rotate flag * NDI: list local sources ### Consumers ##### Fixes * Decklink: subregion copy not respecting frame height * Decklink: subregion vertical offset * Decklink: subregion height limited with some formats CasparCG 2.4.0 Stable ========================================== ### Core ##### Improvements * Custom resolutions can be specified in casparcg.config * Interlaced mixer pipeline to ensure field accuracy * Preserve unicode characters in console input/output * Producers to be run at startup can be defined in casparcg.config * Support 8K frames * Support 4K DCI frames * Remove undocumented CII and CLK protocol implementations * Config parameter can be an absolute system path, not just relative to the working directory * AMCP: Add CLEAR ALL command * AMCP: Command batching syntax * AMCP: LOAD/LOADBG/PLAY commands accept a CLEAR_ON_404 parameter, to instruct the layer to be cleared when the requested file was not found * AMCP: Add commands to subscribe and unsubscribe to OSC on any port number * AMCP: Add CALLBG command to perform CALL on background producer * Build: Require C++17 for building * Build: Support newer versions of Boost * Build: Support newer versions of TBB * Build: Disable precompiled headers for linux * Build: Support VS2022 * Build: Replace nuget and locally committed dependencies with direct http downloads * Build: Allow configuring diag font path at build time * Linux: Support setting thread priorities * Linux: Initial ARM64 compatibility * Linux: Rework build to always use system boost * Linux: Rework build process to better support being build as a system package * Logging: add config option to disable logging to file and to disable column alignment * Transitions: Support additional audio fade properties for STING transition ##### Fixes * Crash upon exiting if HTML producer was running * AMCP: Ensure all consumers and producers are reported in `INFO` commands * AMCP: Deferred mixer operations were not being cleared after being applied * AMCP: `LOAD` command would show a frame or two of black while new producer was loading * OpenGL: Fix support for recent Linux drivers * Linux: Fix endless looping on stdin * Route: Fix error when clearing layer * Transitions: Fix wipe duration ### Producers ##### Improvements * Decklink: Require driver 11.0 or later * Decklink: Scale received frames on GPU * FFmpeg: Update to v5.1 * FFmpeg: Improve performance * FFmpeg: Allow specifying both SEEK and IN for PLAY commands * HTML: Update to CEF 117 * HTML: `CALL 1-10 RELOAD` to reload a renderer * HTML: Expose `cache-path` setting * NDI: Upgrade to NDI5 * System Audio: Allow specifying output device to use ##### Fixes * Decklink: Log spamming when using some input formats * FFmpeg: Prevent loading unreadable files * FFmpeg: Unable to play files with unicode filenames * FFmpeg: Don't lowercase filter parameters * FFmpeg: Support parameters with name containing a dash * HTML: media-stream permission denied * HTML: Expose angle backend config field, the best backend varies depending on the templates and machine * HTML: Crash when multiple iframes were loaded within a renderer * Image: Improve file loading algorithm to match the case insensitive and absolute path support already used by ffmpeg ### Consumers ##### Improvements * Artnet: New artnet consumer * Decklink: Configure device duplex modes in casparcg.config * Decklink: Output a subregion of the channel * Decklink: Add secondary outputs in a consumer, to ensure sync when used within a single card * iVGA: Remove consumer * NDI: Upgrade to NDI5 ##### Fixes * Decklink: Fix stutter when loading clips * FFmpeg: Fix RTMP streaming missing headers * NDI: dejitter CasparCG 2.3.3 LTS Stable ========================================== ### Producers ##### Improvements * Image Scroll Producer: Ported from 2.1 CasparCG 2.3.2 LTS Stable ========================================== ### Producers ##### Fixes * Packages: Update TBB library to v2021.1.1 - fixes CPU and memory growth when deleting threads * FFmpeg: Fix possible deadlock leading to producer not being cleaned up correctly CasparCG 2.3.2 Beta ========================================== ### Producers ##### Fixes * Packages: Update TBB library to v2021.1.1 - fixes CPU and memory growth when deleting threads * FFmpeg: Fix possible deadlock leading to producer not being cleaned up correctly CasparCG 2.3.1 Stable ========================================== ### Producers ##### Fixes * Flash: Use proper file urls when loading templates, to allow it to work after Flash Player EOL * FFmpeg: Various HTTP playback improvements CasparCG 2.3.0 Stable ========================================== ### Producers ##### Features * FFmpeg: Add more common file extensions to the supported list * NDI: Require minimum of NDI v4.0 ##### Fixes * HTML: Minimise performance impact on other producers CasparCG 2.3.0 RC ========================================== ### Producers ##### Features * Flash: Disable by default, requires enabling in the config file * FFmpeg: Remove fixed thread limit to better auto select a number ##### Fixes * Decklink: Downgrade severity of video-format not supported * FFmpeg: Correctly handle error codes. Ignore exit errors during initialisation * Route: Detect circular routes and break the loop ### Consumers ##### Features * Bluefish: Various improvmements including support for Kronos K8 ### General ##### Fixes * Diag not reflecting channel videoformat changes CasparCG 2.3.0 Beta 1 ========================================== ### Producers ##### Features * Decklink: Detect and update input format when no format is specified in AMCP * Decklink: Improve performance (gpu colour conversion & less heavy deinterlacing when possible) * Decklink: `LOAD DECKLINK` will display live frames instead of black * FFmpeg: Update to 4.2.2 * HTML: Better performance for gpu-enabled mode * HTML: `window.remove()` has been partially reimplemented * NDI: Native NDI producer * Route: Allow routing first frame of background producer * Route: zero delay routes when within a channel, with 1 frame when cross-channel * Transition: Add sting transitions * Add frames_left field to osc/info for progress towards autonext ##### Fixes * Colour: parsing too much of amcp string as list of colours * FFmpeg: Always resample clips to 48khz * FFmpeg: Ensure frame time reaches the end of the clip * FFmpeg: RTMP stream playback * FFmpeg: SEEK and LENGTH parameters causing issues with AUTONEXT * FFmpeg: Ensure packets/frames after the decided end of the clip are not displayed * FFmpeg: Incorrect seek for audio when not 48khz * FFmpeg: Some cases where it would not be destroyed if playing a bad stream * HTML: unlikely but possible exception when handling frames * HTML: set autoplay-policy * HTML: animations being ticked too much * Route: Sending empty frame into a route would cause the destination to reuse the last frame ### Consumers ##### Features * Audio: Fix audio crackling * Audio: Fix memory leak * Bluefish: Various improvmements including supporting more channels and UHD. * NDI: Native NDI consumer * Screen: Add side by side key output * Screen: Add support for Datavideo TC-100/TC-200 ##### Fixes * Decklink: Tick channel at roughly consistent rate when running interlaced output * Possible crash when adding/removing consumers ### General ##### Features * Add mixer colour invert property * Restore `INFO CONFIG` and `INFO PATHS` commands * Linux: Update docker images to support running in docker (not recommended for production use) ##### Fixes * NTSC audio cadence * Ignore empty lines in console input * Fix building with clang on linux * Fix building with vs2019 * Better error when startup fails due to AMCP port being in use * Backslash is a valid trailing slash for windows CasparCG 2.2.0 ========================================== General ------- * C++14 * Major refactoring, cleanup, optimization and stability improvements. * Removed unmaintained documentation API. * Removed unmaintained program options API. * Removed unused frame age API. * Removed misc unused and/or unmaintained APIs. * Removed TCP logger. * Fixed memory leak in transition producer. * Removed PSD Producer (moved to 3.0.0). * Removed Text Producer (moved to 3.0.0). * Removed SyncTo consumer. * Removed channel layout in favor of 8 channel passthrough and FFMPEG audio filters. * Major stability and performance improvements of GPU code. * Requires OpenGL 4.5. * Repo cleanup (>2GB => <100MB when cloning). * Misc cleanup and fixes. Build ----- * Linux build re done with Docker. * Windows build re done with Nuget. HTML ---- * Updated to Chromium 63 (Julusian). * Allow running templates from arbitrary urls (Julusian). DECKLINK -------- * Fixed broken Linux. * Misc cleanup and fixes. * Complex FFMPEG filters (VF, AF). MIXER ----- * Performance improvements. * Removed straight output (moved to 3.0.0). * Proper OpenGL pipelining. * Blend modes are always enabled. * Misc cleanup and fixes. * Removed CPU mixer. * Mixer always runs in progressive mode. Consumers are expected to convert to interlaced if required. IMAGE ----- * Correctly apply alpha to base64 encoded pngs from AMCP (Julusian). * Unmultiply frame before writing to png (Julusian). * Removed scroll producer (moved to 3.0.0) ROUTE ----- * Reimplemented, simplified. * Cross channel routing will render full stage instead of simply copying channel output. * Reduced overhead and latency. FFMPEG ------ * Rewritten from scratch for better accuracy, stability and performance. * Update freezed frame during seeking. * FFMPEG 3.4.1. * Reduce blocking during initialization. * Fixed timestamp handling. * Fixed V/A sync. * Fixed interlacing. * Fixed framerate handling. * Fixed looping. * Fixed seeking. * Fixed duration. * Audio resampling to match timestamps. * Fixed invalid interlaced YUV (411, 420) handling. * Added YUV(A)444. * Added IO timeout. * Added HTTP reconnect. * FFMPEG video filter support. * FFMPEG audio filter support. * Complex FFMPEG filters (VF, AF). * CALL SEEK return actually sought value. * All AMCP options are based on channel format. * Misc improvements, cleanup and fixes. Bluefish -------- * Misc cleanup and fixes. OAL ------------ * Added audio sample compensation to avoid audio distortions during time drift. * Misc cleanup and fixes. Screen --------------- * Proper OpenGL pipelining. * Misc cleanup and fixes. AMCP ---- * Added PING command (Julusian). * Removed INFO commands in favor of OSC. * Moved CLS, CINF, TLS, FLS, TLS, THUMBNAIL implementations into a separate NodeJS service which is proxied through an HTTP API. * Misc cleanup and fixes. CasparCG 2.1.0 Next (w.r.t 2.1.0 Beta 2) ========================================== General ------- * Removed asmlib dependency in favor of using standard library std::memcpy and std::memset, because of better performance. CasparCG 2.1.0 Beta 2 (w.r.t 2.1.0 Beta 1) ========================================== General ------- * Fail early with clear error message if configured paths are not creatable/writable. * Added backwards compatibility (with deprecation warning) for using thumbnails-path instead of thumbnail-path in casparcg.config. * Suppress the logging of full path names in stack traces so that only the relative path within the source tree is visible. * General stability improvements. * Native thread id is now logged in Linux as well. Finally they are mappable against INFO THREADS, ps and top. * Created automatically generated build number, so that it is easier to see whether a build is newer or older than an other. * Changed configuration element mipmapping_default_on to mipmapping-default-on for consistency with the rest of the configuration (Jesper Stærkær). * Handle stdin EOF as EXIT. * Added support for RESTART in Linux startup script run.sh. * Copy casparcg_auto_restart.bat into Windows releases. * Fixed bug with thumbnail generation when there are .-files in the media folder. * Removed CMake platform specification in Linux build script (Krzysztof Pyrkosz). * Build script for building FFmpeg for Linux now part of the repository. Contributions during development (not w.r.t 2.1.0 Beta 1): * Fix ffmpeg build dependencies on clean Ubuntu desktop amd64 14.04.3 or higher (Walter Sonius). * Added support for video modes 2160p5000, 2160p5994 and 2160p6000 (Antonio Ruano Cuesta). * Fixed serious buffer overrun in FFmpeg logging code. Consumers --------- * FFmpeg consumer: * Fixed long overdue bug where HD material was always recorded using the BT.601 color matrix instead of the BT.709 color matrix. RGB codecs like qtrle was never affected but all the YCbCr based codecs were. * Fixed bug in parsing of paths containing -. * Fixed bugs where previously effective arguments like -pix_fmt were ignored. * Fixed bug where interlaced channels where not recorded correctly for some codecs. * DeckLink consumer: * Rewrote the frame hand-off between send() and ScheduledFrameCompleted() in a way that hopefully resolves all dead-lock scenarios previously possible. * Bluefish consumer: * Largely rewritten against newest SDK Driver 5.11.0.47 (Satchit Nambiar and James Wise sponsored by Bluefish444): * Added support for Epoch Neutron and Supernova CG. All current Epoch cards are now supported. * Added support for for multiple SDI channels per card. 1 to 4 channels per Bluefish444 card depending on model and firmware. * Added support for single SDI output, complementing existing external key output support. * Added support for internal key using the Bluefish444 hardware keyer. * Screen consumer: * Fixed full screen mode. Producers --------- * FFmpeg producer: * Increased the max number of frames that audio/video can be badly interleaved with (Dimitry Ishenko). * Fixed bug where decoders sometimes requires more than one video packet to decode the first frame. * Added support for IN and OUT parameters (Dimitry Ishenko). * Added DV/HDV video device support under Linux (Walter Sonius). * Remove unused flags variable in queued_seek (Dimitry Ishenko). * Now recognizes .ts files without probing contents (Ovidijus Striaukas). * Fixed uninitialized value causing initial log printout to usually say that clips are interlaced when they are not. * Destroy producer proxy: * Created workaround for bug in FFmpeg where every new thread used to cleanup caused handles to leak (not sure why). Reduced the effect by using only one thread for all producer destructions. * Framerate producer: * Fixed bug when INFO was used on a not yet playing framerate producer. * HTML producer: * Fixed bug where only URL:s with . in them where recognized. * Image producer: * Added LENGTH parameter to allow for queueing with LOADBG AUTO. * Fixed inconsistency in what file extensions are supported vs listed in CLS/CINF. * Layer producer: * Fixed serious bug where a circular reference of layer producers caused a stack overflow and server crash. * Can now route from layer on a channel with an incompatible framerate. * Channel producer: * Can now route from channel with an incompatible framerate. * Deinterlaces interlaced content from source channel. * Added optional NO_AUTO_DEINTERLACE parameter to opt out of the mentioned deinterlacing. * Scene producer: * Added abs(), floor(), to_lower(), to_upper() and length() functions to the expression language. * Created XML Schema for the *.scene XML format. Allows for IDE-like auto- completion, API documentation and validation. * Added possibility to specify the width and height of a layer instead of letting the producer on the layer decide. * Added global variables scene_width, scene_height and fps. * Made it possible to use expressions in keyframe values. * Fixed serious bug where uninitialized values were used. * Created more example scenes. * Can now forward CALL, CG PLAY, CG STOP, CG NEXT and CG INVOKE to the producer on a layer. * CG proxy wrapper producer: * New in 2.1.0. * Allows all CG producers to be used as an ordinary producer inside a layer in a scene. * Allows the Scene producer to know what variables are available in a template. * Color producer: * Now has support for gradients. * PSD producer: * Added support for centered and right justified text. * Text producer: * Fixed bug where tracking contributed to the overall text width on the last character. Mixer ----- * Fixed bug in the contrast/saturation/brightness code where the wrong luma coefficients was used. * Rewrote the chroma key code to support variable hue, instead of fixed green or blue. Threshold setting was removed in favour of separate hue width, minimum saturation and minimum brightness constraints. Also a much more effective spill suppression method was implemented. * Fixed bug where glReadPixels() was done from the last drawn to texture instead of always from the target texture. This means that for example a MIXER KEYER layer without a layer above to key, as well as a separate alpha file with MIXER OPACITY 0 now works as expected. * Fixed bug where already drawn GL_QUADS were not composited against, causing for example italic texts to be rendered incorrectly in the text_producer. AMCP ---- * INFO PATHS now adds all the path elements even if they are using the default values. * MIXER CHROMA syntax deprecated (still supported) in favour of the more advanced syntax required by the rewritten chroma key code. * Added special command REQ that can be prepended before any command to identify the response with a client specified request id, allowing a client to know exactly what asynchronous response matched a specific request. * Added support for listing contents of a specific directory for CLS, TLS, DATA LIST and THUMBNAIL LIST. * Fixed bug where CINF only returned the first match. * Fixed bug where a client closing the connection after BYE instead of letting the server close the connection caused an exception to be logged. CasparCG 2.1.0 Beta 1 (w.r.t 2.0.7 Stable) ========================================== General ------- * 64 bit! * Linux support! * Moved to CMake build system for better platform independence. * Contributions before build system switch (not w.r.t 2.0.7 Stable): * gitrev.bat adaptions for 2.1 (Thomas Kaltz III). * Thanks to our already heavy use of the pimpl idiom, abstracting platform specifics was easily done by having different versions of the .cpp files included in the build depending on target platform. No #ifdef necessary, except for in header only platform specific code. * Flash, Bluefish and NewTek modules are not ported to the Linux build. * Contributions during development (not w.r.t 2.0.7 Stable): * Fixed compilation problems in Linux build (Dimitry Ishenko). * Fixed compilation problem in GCC 5 (Krzysztof Pyrkosz). * Fixed thumbnail image saving on Linux (Krzysztof Pyrkosz). * Fixed compilation problem in PSD module (Krzysztof Pyrkosz). * Major code refactoring: * Mixer abstraction so different implementations can be created. Currently CPU mixer and GPU mixer (previously the usage of the GPU was mandatory) exists. * Flattened folder structure for easier inclusion of header files. * Many classes renamed to better describe the abstractions they provide. * Sink parameters usually taken by value and moved into place instead of taken by const reference as previously done. * Old Windows specific AsyncEventServer class has been replaced by platform independent implementation based on Boost.Asio. * Pimpl classes are now stack allocated with internal shared_ptr to implementation, instead of both handle and body being dynamically allocated. This means that objects are now often passed by value instead of via safe_ptr/shared_ptr, because they are internally reference counted. * Protocol strategies are now easier to implement correctly, because of separation of state between different client connections. * Complete AMCP command refactoring. * On-line help system that forces the developer to document AMCP commands, producer syntaxes and consumer syntaxes making the documentation coupled to the code, which is great. * Added missing help for VERSION command (Jesper Stærkær). * Upgraded Windows build to target Visual Studio 2015 making it possible to use the C++11 features also supported by GCC 4.8 which is targeted on Linux. * Fixed compilation problems in Visual Studio 2015 Update 1 (Roman Tarasov) * Created abstraction of the different forms of templates (flash, html, psd and scene). Each module registers itself as a CG producer provides. All CG commands transparently works with all of them. * Audio mixer now uses double samples instead of float samples to fully accommodate all int32 samples. * Reduced coupling between core and modules (and modules and modules): * Modules can register system info providers to contribute to INFO SYSTEM. * XML configuration factories for adding support for new consumer elements in casparcg.config. * Server startup hooks can be registered (used by HTML producer to fork its sub process). * Version providers can contribute content to the VERSION command. * Refactored multichannel audio support to use FFmpeg's PAN filter and simplified the configuration a lot. * Upgraded most third party libraries we depend on. * Some unit tests have been created. * Renamed README.txt to README, CHANGES.txt to CHANGELOG and LICENSE.txt to LICENSE * Created README.md for github front page in addition to README which is distributed with builds. * README file updates (Jonas Hummelstrand). * Created BUILDING file describing how to build the server on Windows and Linux. * Diagnostics: * Now also sent over OSC. * Diag window is now scrollable and without squeezing of graphs. * Contextual information such as video channel and video layer now included in graphs. * Logging: * Implemented a TCP server, simply sending every log line to each connected client. Default port is 3250. * Changed default log level to info and moved debug statements that are interesting in a production system to info. * Try to not log full stack traces when user error is the cause. Stacktraces should ideally only be logged when a system error or a programming error has occurred. * More contextual information about an error added to exceptions. An example of this is that XML configuration errors now cause the XPath of the error is logged. * Improved the readability of the log format. * Added optional calltrace.log for logging method calls. Allows for trace logging to be enabled while calltracing is disabled etc. OSC --- * Improved message formatting performance. * Added possibility to disable sending OSC to connected AMCP clients. * Fixed inconsistent element name predefined_client to predefined-client in casparcg.config (Krzysztof Pyrkosz). Consumers --------- * System audio consumer: * Pushes data to openal instead of being callbacked by SFML when data is needed. * Added possibility to specify the expected delay in the sound card. Might help get better consumer synchronization. * Screen consumer: * Added mouse interaction support, usable by the producers running on the video channel. * FFmpeg consumer: * Replaced by Streaming Consumer after it was adapted to support everything that FFmpeg Consumer did. * Added support for recording all audio channels into separate mono audio streams. * Now sends recording progress via OSC. * SyncTo consumer: * New in 2.1.0. * Allows the pace of a channel to follow another channel. This is useful for virtual "precomp" channels without a DeckLink consumer to pace it. * DeckLink consumer: * Added workaround for timescale bug found in Decklink SDK 10.7. * Now ScheduledFrameCompleted is no longer only used for video scheduling but for audio as well, simplifying the code a lot. * iVGA consumer: * No longer provides sync to the video channel. * Supports NewTek NDI out of the box just by upgrading the Processing.AirSend library. Producers --------- * Scene producer: * New in 2.1.0. * Utilizes CasparCG concepts such as producers, mixer transforms and uses them in a nested way to form infinite number of sub layers. Think movie clip in Flash. * A scene consists of variables, layers, timelines and marks (intro and outro for example). * Mostly for use by other producers but comes with a XML based producer that is a registered CG producer and shows up in TLS. * Enables frame accurate compositions and animations. * Has a powerful variable binding system (think expressions in After Effects or JavaFX Bindings). * PSD producer: * New in 2.1.0. * Parses PSD files and sets up a scene for the Scene producer to display. * Text layers based on CG parameters. * Supports Photoshop timeline. * Uses Photoshop comment key-frames to describe where intro and outro (CG PLAY and CG STOP) should be in the timeline. * Shows up as regular templates in TLS. * Text producer: * New in 2.1.0. * Renders text using FreeType library. * Is used by the PSD producer for dynamic text layers. * Image scroll producer: * Speed can be changed while running using a CALL. The speed change can be tweened. * Added support for an absolute end time so that the duration is calculated based on when PLAY is called for shows when an exact end time is important. * Image producer: * Fixed bug where too large (OpenGL limit) images were accepted, causing problems during thumbnail generation. * Framerate producer: * New in 2.1.0. * Wraps a producer with one framerate and converts it to another. It is not usable on its own but is utilized in the FFmpeg producer and the DeckLink consumer. * Supports different interpolation algorithms. Currently a no-op drop-and-repeat mode and a two different frame blending modes. * It also supports changing the speed on demand with tweening support. * FFmpeg producer: * Supports decoding all audio streams from a clip. Useful with .mxf files which usually have separate mono streams for every audio channel. * No longer do framerate conversion (half or double), but delegates that task to the Framerate producer. * Added support for v4l2 devices. * Added relative and "from end" seeking (Dimitry Ishenko). * Contributions during development (not w.r.t 2.0.7 Stable): * Fixed 100% CPU problem on clip EOF (Peter Keuter, Robert Nagy). * Constrained SEEK within the length of a clip (Dimitry Ishenko). * Fixed a regular expression (Dimitry Ishenko). * DeckLink producer: * No longer do framerate conversion (half or double), but delegates that task to the Framerate producer. * Route producer: * Added possibility to delay frames routed from a layer or a channel. * HTML Producer: * Disabled web security in HTML Producer (Robert Nagy). * Reimplemented requestAnimationFrame handling in Javascript instead of C++. * Implemented cancelAnimationFrame. * Increased animation smoothness in HTML Producer with interlaced video modes. * Added remote debugging support. * Added mouse interaction support by utilizing the Screen consumer's new interaction support. * Flash Producer: * Contributions during development (not w.r.t 2.0.7 Stable): * Workaround for flickering with high CPU usage and CPU accelerator (Robert Nagy) AMCP ---- * TLS has a new column for "template type" for clients that want to differentiate between html and flash for example. * SET CHANNEL_LAYOUT added to be able to change the audio channel layout of a video channel at runtime. * HELP command added for accessing the new on-line help system. * FLS added to list the fonts usable by the Text producer. * LOCK command added for controlling/gaining exclusive access to a video channel. * LOG CATEGORY command added to enable/disable the new log categories. * SWAP command now optionally supports swapping the transforms as well as the layers. * VERSION command can now provide CEF version. CasparCG Server 2.0.7 Stable (as compared to CasparCG Server 2.0.7 Beta 2) ========================================================================== General ------- * Added support for using a different configuration file at startup than the default casparcg.config by simply adding the name of the file to use as the first command line argument to casparcg.exe. * Upgraded FFmpeg to latest stable. * Created build script. * Fixed bug where both layer_producer and channel_producer display:s and empty/late first frame when the producer is called before the consumer in the other end has received the first frame. * Added rudimentary support for audio for layer_producer and channel_producer. * Upgraded DeckLink SDK to 10.1.4, bringing new 2K and 4K DCI video modes. New template hosts also available for those modes. * General bug fixes (mostly memory and resource leaks, some serious). * Updated Boost to version 1.57 * Frontend no longer maintained and therefore not included in the release. Mixer ----- * Added support for rotation. * Added support for changing the anchor point around which fill_translation, fill_scale and rotation will be done from. * Added support for perspective correct corner pinning. * Added support for mipmapped textures with anisotropic filtering for increased downscaling quality. Whether to enable by default can be configured in casparcg.config. * Added support for cropping a layer. Not the same as clipping. AMCP ---- * Added RESUME command to complement PAUSE. (Peter Keuter) * To support the new mixer features the following commands has been added: * MIXER ANCHOR -- will return or modify the anchor point for a layer (default is 0 0 for backwards compatibility). Example: MIXER 1-10 ANCHOR 0.5 0.5 ...for changing the anchor to the middle of the layer (a MIXER 1-10 FILL 0.5 0.5 1 1 will be necessary to place the layer at the same place on screen as it was before). * MIXER ROTATION -- will return or modify the angle of which a layer is rotated by (clockwise degrees) around the point specified by ANCHOR. * MIXER PERSPECTIVE -- will return or modify the corners of the perspective transformation of a layer. One X Y pair for each corner (order upper left, upper right, lower right and lower left). Example: MIXER 1-10 PERSPECTIVE 0.4 0.4 0.6 0.4 1 1 0 1 * MIXER MIPMAP -- will return or modify whether to enable mipmapping of textures produced on a layer. Only frames produced after a change will be affected. So for example image_producer will not be affected while the image is displayed. * MIXER CROP -- will return or modify how textures on a layer will be cropped. One X Y pair each for the upper left corner and for the lower right corner. * Added INFO QUEUES command for debugging AMCP command queues. Useful for debugging command queue overflows, where a command is deadlocked. Hopefully always accessible via console, even though the TCP command queue may be full. * Added GL command: - GL INFO prints information about device buffers and host buffers. - GL GC garbage collects pooled but unused GL resources. * Added INFO THREADS command listing the known threads and their descriptive names. Can be matched against the thread id column of log entries. Consumers --------- * Removed blocking_decklink_consumer. It was more like an experiment at best and its usefulness was questionable. * Added a 10 second time-out for consumer sends, to detect/recover from blocked consumers. * Some consumers which are usually added and removed during playout (for example ffmpeg_consumer, streaming_consumer and channel_consumer) no longer affect the presentation time on other consumers. Previously a lag on the SDI output could be seen when adding such consumers. HTML producer ------------- * No longer tries to play all files with a . in their name. (Georgi Chorbadzhiyski) * Reimplemented using CEF3 instead of Berkelium, which enables use of WebGL and more. CEF3 is actively maintained, which Berkelium is not. (Robert Nagy) * Implements a custom version of window.requestAnimationFrame which will follow the pace of the channel, for perfectly smooth animations. * No longer manually interlaces frames, to allow for mixer fill transforms without artifacts. * Now uses CEF3 event loop to avoid 100% CPU core usage. CasparCG Server 2.0.7 Beta 2 (as compared to CasparCG Server 2.0.7 Beta 1) ========================================================================== General ------- * Added sending of OSC messages for channel_grid channel in addition to regular channels. Producers --------- * FFmpeg: Reports correct nb_frames() when using SEEK (Thomas Kaltz III) * Flash: Fixed bug where CG PLAY, CG INVOKE did not work. Consumers --------- * channel_consumer: Added support for more than one channel_consumer per channel. * decklink_consumer: Added support for a single instance of the consumer to manage a separate key output for use with DeckLink Duo/Quad cards: 1 2 external_separate_device ...in the configuration will enable the feature. The value of defaults to the value of + 1. * synchronizing_consumer: Removed in favour of a single decklink_consumer managing both fill and key device. * streaming_consumer: A new implementation of ffmpeg_consumer with added support for streaming and other PTS dependent protocols. Examples: udp://localhost:5004 -vcodec libx264 -tune zerolatency -preset ultrafast -crf 25 -format mpegts -vf scale=240:180 ...in configuration or: ADD 1 STREAM udp://localhost:5004 -vcodec libx264 -tune zerolatency -preset ultrafast -crf 25 -format mpegts -vf scale=240:180 ...via AMCP. (Robert Nagy sponsored by Ericsson Broadcasting Services) * newtek_ivga_consumer: Added support for iVGA consumer to not provide channel sync even though connected. Useful for iVGA clients that downloads as fast as possible instead of in frame-rate pace, like Wirecast. To enable: false ...in config to not provide channel sync when connected. The default is true. AMCP ---- * Added support in ADD and REMOVE for a placeholder which will resolve to the connected AMCP client's IPV4 address. * Fixed bug where AMCP commands split into multiple TCP packets where not correctly parsed (http://casparcg.com/forum/viewtopic.php?f=3&t=2480) CasparCG Server 2.0.7 Beta 1 (as compared to 2.0.6 Stable) ========================================================== General ------- * FFmpeg: Upgraded to master and adapted CasparCG to FFmpeg API changes (Robert Nagy sponsored by SVT) * FFmpeg: Fixed problem with frame count calculation (Thomas Kaltz III) * Fixed broken CG UPDATE. Producers --------- * New HTML producer has been created (Robert Nagy sponsored by Flemish Radio and Television Broadcasting Organization, VRT) CasparCG Server 2.0.6 Stable (as compared to 2.0.4 Stable) ========================================================== General ------- * iVGA: Allow for the server to work without Processing.AirSend.x86.dll to prevent a possible GPL violation. It is available as a separate optional download. * iVGA: Only provide sync to channel while connected, to prevent channel ticking too fast. * FFmpeg: Fixed bug during deinterlace-bob-reinterlace where output fields were offset by one field in relation to input fields. * FFmpeg: Fixed bug in ffmpeg_consumer where an access violation occurred during destruction. * FFmpeg: Improved seeking. (Robert Nagy and Thomas Kaltz III) * Frontend: Only writes elements to casparcg.config which overrides a default value to keep the file as compact as possible. * System audio: Patched sfml-audio to work better with oal-consumer and therefore removed PortAudio as the system audio implementation and went back to oal. * Flash: Changed so that the initial buffer fill of frames is rendered at a frame-duration pace instead of as fast as possible. Otherwise time based animations render incorrectly. During buffer recovery, a higher paced rendering takes place, but still not as fast as possible, which can cause animations to be somewhat incorrectly rendered. This is the only way though if we want the buffer to be able to recover after depletion. * Fixed race condition during server shutdown. * OSC: outgoing audio levels from the audio mixer for each audio channel is now transmitted (pFS and dBFS). (Thomas Kaltz III) * Stage: Fixed bug where tweened transforms were only ticked when a corresponding layer existed. * Screen consumer: Added borderless option and correct handling of name option. (Thomas Kaltz III) * AMCP: CLS now reports duration and framerate for MOVIE files were information is possible to extract. (Robert Nagy) * Version bump to keep up with CasparCG Client version. CasparCG Server 2.0.4 Stable (as compared to 2.0.4 Beta 1) ========================================================== General ------- * Can now open media with file names that only consist of digits. (Cambell Prince) * Miscellaneous stability and performance improvements. Video mixer ----------- * Conditional compilation of chroma key support and straight alpha output support in shader (just like with blend-modes) because of performance impact even when not in use on a layer or on a channel. New element added to configuration for turning on mixer features that not everybody would want to pay for (performance-wise.) blend-modes also moved into this element. * Fixed bug where MIXER LEVELS interpreted arguments in the wrong order, so that gamma was interpreted as max_input and vice versa. Consumers --------- * Added support for NewTek iVGA, which enables the use of CasparCG Server fill+key output(s) as input source(s) to a NewTek TriCaster without requiring video card(s) in the CasparCG Server machine, or taking up inputs in the TriCaster. element in config enables iVGA on a channel. (Robert Nagy sponsored by NewTek) * DeckLink: Created custom decklink allocator to reduce the memory footprint. * Replaced usage of SFML for with PortAudio, because of problems with SFML since change to static linkage. Also PortAudio seems to give lower latency. Producers --------- * FFmpeg: Added support for arbitrary FFmpeg options/parameters in ffmpeg_producer. (Cambell Prince) * Flash: Flash Player 11.8 now tested and fully supported. * Flash: No longer starts a Flash Player to service CG commands that mean nothing without an already running Flash Player. * Flash: globally serialize initialization and destruction of Flash Players, to avoid race conditions in Flash. * Flash: changed so that the Flash buffer is filled with Flash Player generated content at initialization instead of empty frames. OSC --- * Performance improvements. (Robert Nagy sponsored by Boffins Technologies) * Never sends old values to OSC receivers. Collects the latest value of each path logged since last UDP send, and sends the new UDP packet (to each subscribing OSC receiver) with the values collected. (Robert Nagy sponsored by Boffins Technologies) * Batches as many OSC messages as possible in an OSC bundle to reduce the number of UDP packets sent. Breakup into separate packages if necessary to avoid fragmentation. (Robert Nagy sponsored by Boffins Technologies) * Removed usage of Microsoft Agents library (Server ran out of memory after a while) in favour of direct synchronous invocations. CasparCG Server 2.0.4 Beta 1 (as compared to 2.0.3 Stable) ========================================================== General ------- * Front-end GUI for simplified configuration and easy access to common tasks. (Thomas Kaltz III and Jeff Lafforgue) * Added support for video and images file thumbnail generation. By default the media directory is scanned every 5 seconds for new/modified/removed files and thumbnails are generated/regenerated/removed accordingly. * Support for new video modes: 1556p2398, 1556p2400, 1556p2500, 2160p2398, 2160p2400, 2160p2500, 2160p2997 and 2160p3000. * Experimental ATI graphics card support by using static linking against SFML instead of dynamic. Should improve ATI GPU support, but needs testing. * Added support for playback and pass-through of up to 16 audio channels. See http://casparcg.com/forum/viewtopic.php?f=3&t=1453 for more information. * Optimizations in AMCP protocol implementations for large incoming messages, for example base64 encoded PNG images. * Logging output now includes milliseconds and has modified format: YYYY-MM-DD hh:mm:ss.zzz * Improved audio playback with 720p5994 and 720p6000 channels. * An attempt to improve output synchronization of consumers has been made. Use for example: 1 true 2 true ...to instruct the server to keep both DeckLink consumers in sync with each other. Consider this experimental, so don't wrap everything in unless synchronization of consumer outputs is needed. For synchronization to be effective all synchronized cards must have genlock reference signal connected. * Transfer of source code and issue tracker to github. (Thomas Kaltz III) Layer ----- * Fixed a problem where the first frame was not always shown on LOAD. (Robert Nagy) Stage ----- * Support for layer consumers for listening to frames coming out of producers. (Cambell Prince) Audio mixer ----------- * Added support for a master volume mixer setting for each channel. Video mixer ----------- * Added support for chroma keying. (Cambell Prince) * Fixed bug where MIXER CONTRAST set to < 1 can cause transparency issues. * Experimental support for straight alpha output. Consumers --------- * Avoid that the FFmpeg consumer blocks the channel output when it can't keep up with the frame rate (drops frames instead). * Added support for to create a separate key and fill file when recording with the FFmpeg consumer. Add the SEPARATE_KEY parameter to the FFmpeg consumer parameter list. The key file will get the _A file name suffix to be picked up by the separated_producer when doing playback. * The Image consumer now writes to the media folder instead of the data folder. * Fixed bug in DeckLink consumer where we submit too few audio samples to the driver when the video format has a frame rate > 50. * Added another experimental DeckLink consumer implementation where scheduled playback is not used, but a similar approach as in the bluefish consumer where we wait for a frame to be displayed and then display the next frame. It is configured via a consumer element. The benefits of this consumer is lower latency and more deterministic synchronization between multiple instances (should not need to be wrapped in a element when separated key/fill is used). Producers --------- * Added support for playing .swf files using the Flash producer. (Robert Nagy) * Image producer premultiplies PNG images with their alpha. * Image producer can load a PNG image encoded as base64 via: PLAY 1-0 [PNG_BASE64] * FFmpeg producer can now use a directshow input filters: PLAY 1-10 "dshow://video=Some Camera" (Cambell Prince, Julian Waller and Robert Nagy) * New layer producer which directs the output of a layer to another layer via a layer consumer. (Cambell Prince) AMCP ---- * The master volume feature is controlled via the MASTERVOLUME MIXER parameter. Example: MIXER 1 MASTERVOLUME 0.5 * THUMBNAIL LIST/RETRIEVE/GENERATE/GENERATE_ALL command was added to support the thumbnail feature. * ADD 1 FILE output.mov SEPARATE_KEY activates the separate key feature of the FFmpeg consumer creating an additional output_a.mov containing only the key. * Added KILL command for shutting down the server without console access. * Added RESTART command for shutting down the server in the same way as KILL except that the return code from CasparCG Server is 5 instead of 0, which can be used by parent process to take other actions. The 'casparcg_auto_restart.bat' script restarts the server if the return code is 5. * DATA RETRIEVE now returns linefeeds encoded as an actual linefeed (the single character 0x0a) instead of the previous two characters: \ followed by n. * MIXER CHROMA command added to control the chroma keying. Example: MIXER 1-1 CHROMA GREEN|BLUE 0.10 0.04 (Cambell Prince) * Fixed bug where MIXER FILL overrides any previous MIXER CLIP on the same layer. The bug-fix also has the side effect of supporting negative scale on MIXER FILL, causing the image to be flipped. * MIXER STRAIGHT_ALPHA_OUTPUT added to control whether to output straight alpha or not. * Added INFO DELAY and INFO - DELAY commands for showing some delay measurements. * PLAY 1-1 2-10 creates a layer producer on 1-1 redirecting the output of 2-10. (Cambell Prince) OSC --- * Support for sending OSC messages over UDP to either a predefined set of clients (servers in the OSC sense) or dynamically to the ip addresses of the currently connected AMCP clients. (Robert Nagy sponsored by Boffins Technologies) * /channel/[1-9]/stage/layer/[0-9] * always /paused [paused or not] * color producer /color [color string] * ffmpeg producer /profiler/time [render time] [frame duration] * ffmpeg producer /file/time [elapsed seconds] [total seconds] * ffmpeg producer /file/frame [frame] [total frames] * ffmpeg producer /file/fps [fps] * ffmpeg producer /file/path [file path] * ffmpeg producer /loop [looping or not] * during transitions /transition/frame [current frame] [total frames] * during transitions /transition/type [transition type] * flash producer /host/path [filename] * flash producer /host/width [width] * flash producer /host/height [height] * flash producer /host/fps [fps] * flash producer /buffer [buffered] [buffer size] * image producer /file/path [file path] CasparCG Server 2.0.3 Stable (as compared to 2.0.3 Alpha) ========================================================= Stage ----- * Fixed dead-lock that can occur with multiple mixer tweens. (Robert Nagy) AMCP ---- * DATA STORE now supports creating folders of path specified if they does not exist. (Jeff Lafforgue) * DATA REMOVE command was added. (Jeff Lafforgue) CasparCG Server 2.0.3 Alpha (as compared to 2.0 Stable) ======================================================= General ------- * Data files are now stored in UTF-8 with BOM. Latin1 files are still supported for backwards compatibility. * Commands written in UTF-8 to log file but only ASCII characters to console. * Added supported video formats: * 720p2398 (not supported by DeckLink) * 720p2400 (not supported by DeckLink) * 1080p5994 * 1080p6000 * 720p30 (not supported by DeckLink) * 720p29.976 (not supported by DeckLink) CLK --- * CLK protocol implementation can now serve more than one connection at a time safely. * Added timeline support to the CLK protocol. * Refactored parts of the CLK parser implementation. Consumers --------- * Consumers on same channel now invoked asynchronously to allow for proper sync of multiple consumers. * System audio consumer: * no longer provides sync to the video channel. * Screen consumer: * Support for multiple screen consumers on the same channel * No longer spin-waits for vsync. * Now deinterlaces to two separate frames so for example 50i will no longer be converted to 25p but instead to 50p for smooth playback of interlaced content. * DeckLink consumer now logs whether a reference signal is detected or not. Producers --------- * Image scroll producer: * Field-rate motion instead of frame-rate motion with interlaced video formats. This can be overridden by giving the PROGRESSIVE parameter. * SPEED parameter now defines pixels per frame/field instead of half pixels per frame. The scrolling direction is also reversed so SPEED 0.5 is the previous equivalent of SPEED -1. Movements are done with sub-pixel accuracy. * Fixed incorrect starting position of image. * Rounding error fixes to allow for more exact scrolling. * Added support for motion blur via a new BLUR parameter * Added PREMULTIPLY parameter to support images stored with straight alpha. CasparCG Server 2.0 Stable (as compared to Beta 3) ================================================== General ------- * Misc stability and performance fixes. Consumers --------- * File Consumer * Changed semantics to more closely follow FFmpeg (see forums). * Added options, -r, -acodec, -s, -pix_fmt, -f and more. * Screen Consumer * Added vsync support. CasparCG Server 2.0 Beta 3 (as compared to Beta 1) ================================================== Formats ------- * ProRes Support * Both encoding and decoding. * NTSC Support * Updated audio-pipeline for native NTSC support. Previous implementation did not fully support NTSC audio and could cause incorrect behaviour or even crashes. Consumers --------- * File Consumer added * See updated wiki or ask in forum for more information. * Should support anything FFmpeg supports. However, we will work mainly with DNxHD, PRORES and H264. - Key-only is not supported. * Bluefish Consumer * 24 bit audio support. - Embedded-audio does not work with Epoch cards. * DeckLink Consumer * Low latency enabled by default. * Added graphs for driver buffers. * Screen Consumer * Changed screen consumer square PAL to the more common wide-square PAL. * Can now be closed. * Fixed interpolation artifacts when running non-square video-modes. * Automatically deinterlace interlaced input. Producers --------- * DeckLink Producer * Improved color quality be avoiding unnecessary conversion to BGRA. * FFMPEG Producer * Fixed missing alpha for (RGB)A formats when deinterlacing. * Updated buffering to work better with files with long audio/video interleaving. * Seekable while running and after reaching EOF. CALL 1-1 SEEK 200. * Enable/disable/query looping while running. CALL 1-1 LOOP 1. * Fixed bug with duration calculation. * Fixed bug with fps calculation. * Improved auto-transcode accuracy. * Improved seeking accuracy. * Fixed bug with looping and LENGTH. * Updated to newer FFmpeg version. * Fixed incorrect scaling of NTSC DV files. * Optimized color conversion when using YADIF filters. * Flash Producer * Release Flash Player when empty. * Use native resolution TemplateHost. * TemplateHosts are now chosen automatically if not configured. The TemplateHost with the corresponding video-mode name is now chosen. * Use square pixel dimensions. AMCP ---- * When possible, commands will no longer wait for rendering pipeline. This reduces command execution latencies, especially when sending a lot of commands in a short timespan. * Fixed CINF command. * ADD/REMOVE no longer require subindex, e.g. "ADD 1 SCREEN" / "REMOVE 1 SCREEN" instead of "ADD 1-1 SCREEN" / ... * PARAM is renamed to CALL. * STATUS command is replaced by INFO. * INFO command has been extended: * INFO (lists channels). * INFO 1 (channel info). * INFO 1-1 (layer info). * INFO 1-1 F (foreground producer info). * INFO 1-1 B (background producer info). * INFO TEMPLATE mytemplate (template meta-data info, e.g. field names). * CG INFO command has been extended. * CG INFO 1 (template-host information, e.g. what layers are occupied). Mixer ----- * Fixed alpha with blend modes. * Automatically deinterlace for MIXER FILL commands. Channel ------- * SET MODE now reverts back to old video-mode on failure. Diagnostics ----------- * Improved graphs and added more status information. * Print configuration into log at startup. * Use the same log file for the entire day, instead of one per startup as previously. * Diagnostics window is now closable. CasparCG Server 2.0 Beta 1 (as compared to Alpha) ================================================= * Blending Modes (needs to be explicitly enabled) * overlay * screen * multiply * and many more. * Added additive keyer in addition to linear keyer. * Image adjustments * saturation * brightness * contrast * min input-level * max input-level * min output-level * max output-level * gamma * Support for FFmpeg-filters such as (ee http://ffmpeg.org/libavfilter.html) * yadif deinterlacer (optimized in CasparCG for full multi-core support) * de-noising * dithering * box blur * and many more * 32-bit SSE optimized audio pipeline. * DeckLink-Consumer uses external-key by default. * DeckLink-Consumer has 24 bit embedded-audio support. * DeckLink-Producer has 24 bit embedded-audio support. * LOADBG with AUTO feature which automatically plays queued clip when foreground clip has ended. * STATUS command for layers. * LOG LEVEL command for log filtering. * MIX transition works with transparent clips. * Freeze on last frame. * Producer buffering is now configurable. * Consumer buffering is now configurable. * Now possible to configure template-hosts for different video-modes. * Added auto transcoder for FFmpeg producer which automatically transcodes input video into compatible video format for the channel. * interlacing (50p -> 50i) * deinterlacing (50i -> 25p) * bob-deinterlacing (50i -> 50p) * bob-deinterlacing and reinterlacing (w1xh150i -> w2xh250i) * doubling (25p -> 50p) * halfing (50p -> 25p) * field-order swap (upper <-> lower) * Screen consumer now automatically deinterlaces when receiving interlaced content. * Optimized renderer. * Renderer can now be run asynchronously with producer by using a producer-buffer size greater than 0. * Improved error and crash recovery. * Improved logging. * Added Image-Scroll-Producer. * Key-only has now near zero performance overhead. * Reduced memory requirements. * Removed "warm up lag" which occurred when playing the first media clip after the server has started. * Added read-back fence for OpenGL device for improved multi-channel performance. * Memory support increased from standard 2 GB to 4 GB on 64 bit Win 7 OS. * Added support for 2* DeckLink cards in Full HD. * Misc bugs fixes and performance improvements. * Color producer now support some color codes in addition to color codes, e.g. EMPTY, BLACK, RED etc... * Alpha value in color codes is now optional. * More than 2 DeckLink cards might be possible but have not yet been tested. CasparCG Server 2.0 Alpha (as compared to 1.8) ============================================== General ------- * Mayor refactoring for improved readability and maintainability. * Some work towards platform-independence. Currently the greatest challenge for full platform-independence is flash-producer. * Misc improved scalability. * XML-configuration. * DeckLink * Support for multiple DeckLink cards. Core ---- * Multiple producers per video_channel. * Multiple consumers per video_channel. * Swap producers between layers and channels during run-time. * Support for upper-field and lower-field interlacing. * Add and remove consumers during run-time. * Preliminary support for NTSC. AMCP ---- * Query flash and template-host version. * Recursive media-folder listing. * Misc changes. Mixer ----- * Animated tween transforms. * Image-Mixer * Fully GPU accelerated (all features listed below are done on the GPU), * Layer composition. * Color spaces (rgba, bgra, argb, yuv, yuva, yuv-hd, yuva-hd). * Interlacing. * Per-layer image transforms: * Opacity * Gain * Scaling * Clipping * Translation * Audio Mixer * Per-layer and per-sample audio transforms: * Gain * Fully internal audio mixing. Single output video_channel. Consumers --------- * DeckLink Consumer * Embedded audio. * HD support. * Hardware clock. * Bluefish Consumer * Drivers are loaded on-demand (server now runs on computers without installed Bluefish drivers). * Embedded audio. * Allocated frames are no longer leaked. Producers --------- * Decklink Producer * Embedded audio. * HD support. * Color Producer * GPU accelerated. * FFMPEG Producer * Asynchronous file IO. * Parallel decoding of audio and video. * Color space transform are moved to GPU. * Transition Producer * Fully interlaced transition (previously only progressive, even when running in interlaced mode). * Per-sample mixing between source and destination clips. * Tween transitions. * Flash Producer * DirectDraw access (slightly improved performance). * Improved time-sync. Smoother animations and proper interlacing. * Image Producer * Support for various image formats through FreeImage library. Diagnostics ----------- * Graphs for monitoring performance and events. * Misc logging improvements. * Separate log file for every run of the server. * Error logging provides full exception details, instead of only printing that an error has occurred. * Console with real-time logging output. * Console with AMCP input. Removed ------- * Registry configuration (replaced by XML Configuration). * TGA Producer (replaced by Image Producer). * TGA Scroll Producer ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # CasparCG Code of Conduct > CasparCG is all about showing respect and professionalism one to another. ## Community We want a positive and inspiring community that welcomes and embraces new ideas with respect and professionalism. CasparCG community has arisen through hard work and tremendous enthusiasm of hundreds of people around the world. This Code of Conduct is about how we behave, act and cooperate in this positive and inspiring community. Contributors and users who engage in the CasparCG project in one way or another share this Code of Conduct. ## We strive to: ### Be considerate CasparCG is used by other people and in various environments such as production. Your actions will affect other community members and therefore we expect you to take those consequences into account when making decisions. ### Be respectful In a community there will always be some form of disagreement, but that is no excuse for poor behavior and poor manners. Treat one another with respect and professionalism to make community members feel comfortable. We work together, as a union, to resolve conflicts and to maintain the positive and inspiring atmosphere in our community. We do not tolerate personal attacks, racism, sexism or any other form of discrimination or abuses. Maintainers of the project have right to suspend the person who persistently breaks to our Code of Conduct. ### Be collaborative Collaboration helps to reduce redundancy while improving the quality of the project and the software produced. To avoid misunderstanding, try to be clear when requesting for help, giving help or asking for feedback. If something is unclear ask for clarification. ### Be supportive CasparCG community is made up by mutual respect, collaboration and professional behavior. If you witness others being discriminated or abused, think about how you can be supportive. If the identified situation is beyond your ability, ask the victim privately if some form of official intervention is needed to solve the uncomfortable situation. ### Ask for help By asking questions early we can avoid many problems that can arise later on. We encourage you to ask questions. Those who are asked should be helpful and answer with professionalism in mind. ================================================ FILE: LICENSE ================================================ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ================================================ FILE: README.md ================================================ CasparCG Server =============== Thank you for your interest in CasparCG Server, a professional software used to play out and record professional graphics, audio and video to multiple outputs. CasparCG Server has been in 24/7 broadcast production since 2006. The CasparCG Server works on Windows and Linux. System Requirements ------------------- - A graphics card (GPU) capable of OpenGL 4.5 is required. - An Nvidia GPU is recommended, but other GPU's will likely work fine. - Intel and AMD CPU's have been tested and are known to work - PCIE bandwidth is important between your GPU and CPU, as well as Decklink and CPU. Avoid chipset lanes when possible. ### Windows - Windows 11 is recommended - Windows 10 is supported as best effort ### Linux - Ubuntu 24.04 is recommended - Other distributions and releases will work but you will need to compile it yourself and have not been tested Getting Started --------------- 1. Download a release from (http://casparcg.com/downloads). Alternatively, newer testing versions can be downloaded from (http://builds.casparcg.com) or [built from source](BUILDING.md) 2. Install any optional non-GPL modules - Flash template support (Windows only): 1. Uninstall any previous version of the Adobe Flash Player using this file: (http://download.macromedia.com/get/flashplayer/current/support/uninstall_flash_player.exe) 2. Download and unpack (http://download.macromedia.com/pub/flashplayer/installers/archive/fp_11.8.800.94_archive.zip) 3. Install Adobe Flash Player 11.8.800.94 from the unpacked archive: fp_11.8.800.94_archive\11_8_r800_94\flashplayer11_8r800_94_winax.exe 3. Configure the server by editing the self-documented "casparcg.config" file in a text editor. 4. 1. Windows: start `casparcg_auto_restart.bat`, or `casparcg.exe` and `scanner.exe` separately. 1. Linux: start the `run.sh` program or use tools/linux/start_docker.sh to run within docker (documentation is at the top of the file). 5. Connect to the Server from a client software, such as the "CasparCG Client" which is available as a separate download. Documentation ------------- The most up-to-date documentation is always available at https://github.com/CasparCG/help/wiki Ask questions in the forum: https://casparcgforum.org/ Development ----------- See [BUILDING](BUILDING.md) for instructions on how to build the CasparCG Server from source manually. License --------- CasparCG Server is distributed under the GNU General Public License GPLv3 or higher, see [LICENSE](LICENSE) for details. CasparCG Server uses the following third party libraries: - FFmpeg (http://ffmpeg.org/) under the GPLv2 Licence. FFmpeg is a trademark of Fabrice Bellard, originator of the FFmpeg project. - Threading Building Blocks (http://www.threadingbuildingblocks.org/) library under the GPLv2 Licence. - SFML (http://www.sfml-dev.org/) under the zlib/libpng License. - GLEW (http://glew.sourceforge.net) under the modified BSD License. - boost (http://www.boost.org/) under the Boost Software License, version 1.0. ================================================ FILE: _typos.toml ================================================ [default] # Ignore lines following the commment `// typos: ignore-next extend-ignore-re = [".*// typos: ignore-next\\n[^\\n]*\\n"] [default.extend-words] # NDI is a common acronym in the context of video and audio over IP ndi = "ndi" # it was seeoing NDIfoo as ND-Ifoo and turning it into ANDIfoo nd = "nd" # ANC is short for "ancillary" and is used in the bluefish drivers anc = "anc" ================================================ FILE: src/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 3.16) # Project settings project("CasparCG Server") set(CONFIG_VERSION_MAJOR 2) set(CONFIG_VERSION_MINOR 6) set(CONFIG_VERSION_BUG 0) set(CONFIG_VERSION_TAG "Dev") # support for language servers (eg, clangd or ccls) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CASPARCG_DOWNLOAD_MIRROR https://github.com/CasparCG/dependencies/releases/download/ CACHE STRING "Source/mirror to use for external dependencies") set(CASPARCG_DOWNLOAD_CACHE ${CMAKE_CURRENT_BINARY_DIR}/external CACHE STRING "Download cache directory for cmake ExternalProjects") option(ENABLE_HTML "Enable HTML module, require CEF" ON) set(DIAG_FONT_PATH "LiberationMono-Regular.ttf" CACHE STRING "Path to font that will be used to load diag font at runtime. By default this loads the font distributed with the application from the working directory. It can be set to an absolute path to instead load a font from the system." ) add_compile_definitions("DIAG_FONT_PATH=\"${DIAG_FONT_PATH}\"") # Add custom cmake modules path set(CASPARCG_PATCH_DIR ${CMAKE_SOURCE_DIR}/CMakeModules/patches) list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/CMakeModules) # Determine the git hash find_package(Git) set(CONFIG_VERSION_GIT_HASH "N/A") if(DEFINED ENV{GIT_HASH} AND NOT $ENV{GIT_HASH} STREQUAL "") set(CONFIG_VERSION_GIT_HASH "$ENV{GIT_HASH}") elseif(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/../.git") execute_process( COMMAND "${GIT_EXECUTABLE}" rev-parse --verify --short HEAD WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}/../" OUTPUT_VARIABLE CONFIG_VERSION_GIT_HASH OUTPUT_STRIP_TRAILING_WHITESPACE ) endif() cmake_host_system_information(RESULT CONFIG_CPU_COUNT QUERY NUMBER_OF_PHYSICAL_CORES) include(CasparCG_Util) configure_file("${PROJECT_SOURCE_DIR}/version.tmpl" "${CMAKE_BINARY_DIR}/generated/version.h") if(MSVC) include(Bootstrap_Windows) else () include(Bootstrap_Linux) # Output build settings message(STATUS "Target architecture: ${CMAKE_SYSTEM_PROCESSOR}") message(STATUS "CPU Count: ${CONFIG_CPU_COUNT}") message(STATUS "Target build type: ${CMAKE_BUILD_TYPE}") message(STATUS "Build Version: ${CONFIG_VERSION_MAJOR}.${CONFIG_VERSION_MINOR}.${CONFIG_VERSION_BUG} ${CONFIG_VERSION_TAG} (Revision: ${CONFIG_VERSION_GIT_HASH})") endif () IF(ENABLE_VULKAN) add_definitions(-DENABLE_VULKAN) ENDIF() include_directories("${CMAKE_BINARY_DIR}/generated") add_subdirectory(tools) add_subdirectory(accelerator) add_subdirectory(common) add_subdirectory(core) add_subdirectory(modules) add_subdirectory(protocol) add_subdirectory(shell) ================================================ FILE: src/CMakeModules/Bootstrap_Linux.cmake ================================================ cmake_minimum_required (VERSION 3.28) include(ExternalProject) include(FetchContent) if(POLICY CMP0135) cmake_policy(SET CMP0135 NEW) endif() # Prefer the new boost helper if(POLICY CMP0167) cmake_policy(SET CMP0167 NEW) endif() set(ENABLE_HTML ON CACHE BOOL "Enable CEF and HTML producer") set(USE_STATIC_BOOST OFF CACHE BOOL "Use shared library version of Boost") set(USE_SYSTEM_CEF ON CACHE BOOL "Use the version of cef from your OS (only tested with Ubuntu)") set(CASPARCG_BINARY_NAME "casparcg" CACHE STRING "Custom name of the binary to build (this disables some install files)") set(ENABLE_AVX2 OFF CACHE BOOL "Enable the AVX2 instruction set (requires a CPU that supports it)") set(ENABLE_VULKAN OFF CACHE BOOL "Enable Vulkan support") # Determine build (target) platform SET (PLATFORM_FOLDER_NAME "linux") IF (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) MESSAGE (STATUS "Setting build type to 'Release' as none was specified.") SET (CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build." FORCE) SET_PROPERTY (CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") ENDIF () MARK_AS_ADVANCED (CMAKE_INSTALL_PREFIX) if (USE_STATIC_BOOST) SET (Boost_USE_STATIC_LIBS ON) endif() find_package(Boost 1.83.0 COMPONENTS thread filesystem log_setup log locale regex date_time coroutine REQUIRED) find_package(FFmpeg REQUIRED) find_package(OpenGL REQUIRED COMPONENTS OpenGL GLX EGL) find_package(GLEW REQUIRED) find_package(TBB REQUIRED) find_package(OpenAL REQUIRED) find_package(SFML 3 COMPONENTS Graphics System Window QUIET) if(NOT SFML_FOUND) find_package(SFML 2 COMPONENTS graphics system window REQUIRED) endif() IF (ENABLE_VULKAN) find_package(Vulkan REQUIRED) FetchContent_Declare(vk_bootstrap URL ${CASPARCG_DOWNLOAD_MIRROR}/vk-bootstrap/vk-bootstrap-1.4.328.tar.gz URL_HASH SHA256=3be0220de218dc3e692aeac552b2953860a0e0a48257f4a61c3f1c1472674744 DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} ) FetchContent_MakeAvailable(vk_bootstrap) FetchContent_Declare(vma URL ${CASPARCG_DOWNLOAD_MIRROR}/VulkanMemoryAllocator/VulkanMemoryAllocator-3.3.0.tar.gz URL_HASH SHA256=c4f6bbe6b5a45c2eb610ca9d231158e313086d5b1a40c9922cb42b597419b14e DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} ) FetchContent_MakeAvailable(vma) endif() find_package(X11 REQUIRED) if (ENABLE_HTML) if (USE_SYSTEM_CEF) set(CEF_LIB_PATH "/usr/lib/casparcg-cef-142") add_library(CEF::CEF INTERFACE IMPORTED) target_include_directories(CEF::CEF INTERFACE "/usr/include/casparcg-cef-142" ) target_link_libraries(CEF::CEF INTERFACE "-Wl,-rpath,${CEF_LIB_PATH} ${CEF_LIB_PATH}/libcef.so" "${CEF_LIB_PATH}/libcef_dll_wrapper.a" ) else() casparcg_add_external_project(cef) ExternalProject_Add(cef URL ${CASPARCG_DOWNLOAD_MIRROR}/cef/cef_binary_142.0.17+g60aac24+chromium-142.0.7444.176_linux64_minimal.tar.bz2 URL_HASH SHA256=1d89e19b2f446105f9a1fe6fdc96bced86249b5884241dcc4013b7c94dabf424 DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} CMAKE_ARGS -DUSE_SANDBOX=Off INSTALL_COMMAND "" BUILD_BYPRODUCTS "/Release/libcef.so" "/libcef_dll_wrapper/libcef_dll_wrapper.a" ) ExternalProject_Get_Property(cef SOURCE_DIR) ExternalProject_Get_Property(cef BINARY_DIR) add_library(CEF::CEF INTERFACE IMPORTED) target_include_directories(CEF::CEF INTERFACE "${SOURCE_DIR}" ) target_link_libraries(CEF::CEF INTERFACE # Note: All of these must be referenced in the BUILD_BYPRODUCTS above, to satisfy ninja "${SOURCE_DIR}/Release/libcef.so" "${BINARY_DIR}/libcef_dll_wrapper/libcef_dll_wrapper.a" ) install(DIRECTORY ${SOURCE_DIR}/Resources/locales TYPE LIB) install(FILES ${SOURCE_DIR}/Resources/chrome_100_percent.pak TYPE LIB) install(FILES ${SOURCE_DIR}/Resources/chrome_200_percent.pak TYPE LIB) install(FILES ${SOURCE_DIR}/Resources/icudtl.dat TYPE LIB) install(FILES ${SOURCE_DIR}/Resources/resources.pak TYPE LIB) install(FILES ${SOURCE_DIR}/Release/chrome-sandbox TYPE LIB) install(FILES ${SOURCE_DIR}/Release/libcef.so TYPE LIB) install(FILES ${SOURCE_DIR}/Release/libEGL.so TYPE LIB) install(FILES ${SOURCE_DIR}/Release/libGLESv2.so TYPE LIB) install(FILES ${SOURCE_DIR}/Release/libvk_swiftshader.so TYPE LIB) install(FILES ${SOURCE_DIR}/Release/libvulkan.so.1 TYPE LIB) install(FILES ${SOURCE_DIR}/Release/v8_context_snapshot.bin TYPE LIB) install(FILES ${SOURCE_DIR}/Release/vk_swiftshader_icd.json TYPE LIB) endif() endif () SET (BOOST_INCLUDE_PATH "${Boost_INCLUDE_DIRS}") SET (FFMPEG_INCLUDE_PATH "${FFMPEG_INCLUDE_DIRS}") LINK_DIRECTORIES("${FFMPEG_LIBRARY_DIRS}") SET_PROPERTY (GLOBAL PROPERTY USE_FOLDERS ON) ADD_DEFINITIONS (-DSFML_STATIC) ADD_DEFINITIONS (-DUNICODE) ADD_DEFINITIONS (-D_UNICODE) ADD_DEFINITIONS (-DGLEW_NO_GLU) ADD_DEFINITIONS (-DGLEW_EGL) ADD_DEFINITIONS (-D__NO_INLINE__) # Needed for precompiled headers to work ADD_DEFINITIONS (-DBOOST_NO_SWPRINTF) # swprintf on Linux seems to always use , as decimal point regardless of C-locale or C++-locale ADD_DEFINITIONS (-DTBB_USE_CAPTURED_EXCEPTION=1) ADD_DEFINITIONS (-DNDEBUG) # Needed for precompiled headers to work ADD_DEFINITIONS (-DBOOST_LOCALE_HIDE_AUTO_PTR) # Needed for C++17 in boost 1.67+ if (NOT USE_STATIC_BOOST) ADD_DEFINITIONS (-DBOOST_ALL_DYN_LINK) endif() IF (NOT CMAKE_BUILD_TYPE STREQUAL "Debug") ADD_COMPILE_OPTIONS (-O3) # Needed for precompiled headers to work endif() IF (CMAKE_SYSTEM_PROCESSOR MATCHES "(i[3-6]86|x64|x86_64|amd64|e2k)") ADD_COMPILE_OPTIONS (-msse3) ADD_COMPILE_OPTIONS (-mssse3) ADD_COMPILE_OPTIONS (-msse4.1) IF (ENABLE_AVX2) ADD_COMPILE_OPTIONS (-mfma) ADD_COMPILE_OPTIONS (-mavx) ADD_COMPILE_OPTIONS (-mavx2) ENDIF () ELSE () ADD_COMPILE_DEFINITIONS (USE_SIMDE) # Enable OpenMP support in simde ADD_COMPILE_DEFINITIONS (SIMDE_ENABLE_OPENMP) # Enable OpenMP support in simde ADD_COMPILE_OPTIONS (-fopenmp-simd) # Enable OpenMP SIMD support ENDIF () ADD_COMPILE_OPTIONS (-fnon-call-exceptions) # Allow signal handler to throw exception ADD_COMPILE_OPTIONS (-Wno-deprecated-declarations -Wno-write-strings -Wno-multichar -Wno-cpp -Werror) IF (ENABLE_VULKAN) ADD_COMPILE_OPTIONS (-Wno-nonnull -Wno-nullability-completeness) ENDIF() IF (CMAKE_CXX_COMPILER_ID MATCHES "GNU") ADD_COMPILE_OPTIONS (-Wno-terminate) ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang") # Help TBB figure out what compiler support for c++11 features # https://github.com/01org/tbb/issues/22 string(REPLACE "." "0" TBB_USE_GLIBCXX_VERSION ${CMAKE_CXX_COMPILER_VERSION}) message(STATUS "ADDING: -DTBB_USE_GLIBCXX_VERSION=${TBB_USE_GLIBCXX_VERSION}") add_definitions(-DTBB_USE_GLIBCXX_VERSION=${TBB_USE_GLIBCXX_VERSION}) ENDIF () set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_DEBUG") ================================================ FILE: src/CMakeModules/Bootstrap_Windows.cmake ================================================ cmake_minimum_required (VERSION 3.28) include(ExternalProject) include(FetchContent) if(POLICY CMP0135) cmake_policy(SET CMP0135 NEW) endif() # Prefer the new boost helper if(POLICY CMP0167) cmake_policy(SET CMP0167 NEW) endif() set(BOOST_USE_PRECOMPILED ON CACHE BOOL "Use precompiled boost") set(ENABLE_VULKAN OFF CACHE BOOL "Enable Vulkan support") set(CASPARCG_RUNTIME_DEPENDENCIES_RELEASE "" CACHE INTERNAL "") set(CASPARCG_RUNTIME_DEPENDENCIES_DEBUG "" CACHE INTERNAL "") set(CASPARCG_RUNTIME_DEPENDENCIES_RELEASE_DIRS "" CACHE INTERNAL "") set(CASPARCG_RUNTIME_DEPENDENCIES_DEBUG_DIRS "" CACHE INTERNAL "") function(casparcg_add_runtime_dependency FILE_TO_COPY) if ("${ARGV1}" STREQUAL "Release" OR NOT ARGV1) set(CASPARCG_RUNTIME_DEPENDENCIES_RELEASE "${CASPARCG_RUNTIME_DEPENDENCIES_RELEASE}" "${FILE_TO_COPY}" CACHE INTERNAL "") endif() if ("${ARGV1}" STREQUAL "Debug" OR NOT ARGV1) set(CASPARCG_RUNTIME_DEPENDENCIES_DEBUG "${CASPARCG_RUNTIME_DEPENDENCIES_DEBUG}" "${FILE_TO_COPY}" CACHE INTERNAL "") endif() endfunction() function(casparcg_add_runtime_dependency_dir FILE_TO_COPY) if ("${ARGV1}" STREQUAL "Release" OR NOT ARGV1) set(CASPARCG_RUNTIME_DEPENDENCIES_RELEASE_DIRS "${CASPARCG_RUNTIME_DEPENDENCIES_RELEASE_DIRS}" "${FILE_TO_COPY}" CACHE INTERNAL "") endif() if ("${ARGV1}" STREQUAL "Debug" OR NOT ARGV1) set(CASPARCG_RUNTIME_DEPENDENCIES_DEBUG_DIRS "${CASPARCG_RUNTIME_DEPENDENCIES_DEBUG_DIRS}" "${FILE_TO_COPY}" CACHE INTERNAL "") endif() endfunction() function(casparcg_add_runtime_dependency_from_target TARGET) get_target_property(_runtime_lib_name ${TARGET} IMPORTED_LOCATION_RELEASE) if (NOT "${_runtime_lib_name}" STREQUAL "") set(CASPARCG_RUNTIME_DEPENDENCIES_RELEASE "${CASPARCG_RUNTIME_DEPENDENCIES_RELEASE}" "${_runtime_lib_name}" CACHE INTERNAL "") endif() get_target_property(_runtime_lib_name ${TARGET} IMPORTED_LOCATION_DEBUG) if (NOT "${_runtime_lib_name}" STREQUAL "") set(CASPARCG_RUNTIME_DEPENDENCIES_DEBUG "${CASPARCG_RUNTIME_DEPENDENCIES_DEBUG}" "${_runtime_lib_name}" CACHE INTERNAL "") endif() endfunction() casparcg_add_runtime_dependency("${PROJECT_SOURCE_DIR}/shell/casparcg.config") # BOOST casparcg_add_external_project(boost) if (BOOST_USE_PRECOMPILED) ExternalProject_Add(boost URL ${CASPARCG_DOWNLOAD_MIRROR}/boost/boost_1_83_0-win32-x64-debug-release.zip URL_HASH MD5=0b9990a24259867c8c04ae30c423f86b DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" ) ExternalProject_Get_Property(boost SOURCE_DIR) set(BOOST_INCLUDE_PATH "${SOURCE_DIR}/include/boost-1_83") link_directories("${SOURCE_DIR}/lib") else () set(BOOST_INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR}/boost-install) ExternalProject_Add(boost URL ${CASPARCG_DOWNLOAD_MIRROR}/boost/boost_1_83_0.zip URL_HASH MD5=03d5aea72401ffed848cb5daf8cd2b9b DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND ./bootstrap.bat --with-libraries=filesystem --with-libraries=locale --with-libraries=log --with-libraries=log_setup --with-libraries=regex --with-libraries=system --with-libraries=thread BUILD_COMMAND ./b2 install debug release --prefix=${BOOST_INSTALL_DIR} link=static threading=multi runtime-link=shared -j ${CONFIG_CPU_COUNT} INSTALL_COMMAND "" ) set(BOOST_INCLUDE_PATH "${BOOST_INSTALL_DIR}/include/boost-1_83") link_directories("${BOOST_INSTALL_DIR}/lib") endif () add_definitions( -DBOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE ) add_definitions( -DBOOST_COROUTINES_NO_DEPRECATION_WARNING ) add_definitions( -DBOOST_LOCALE_HIDE_AUTO_PTR ) # FFMPEG casparcg_add_external_project(ffmpeg-lib) ExternalProject_Add(ffmpeg-lib URL ${CASPARCG_DOWNLOAD_MIRROR}/ffmpeg/ffmpeg-7.0.2-full_build-shared.7z URL_HASH MD5=c5127aeed36a9a86dd3b84346be182f8 DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" ) ExternalProject_Get_Property(ffmpeg-lib SOURCE_DIR) set(FFMPEG_INCLUDE_PATH "${SOURCE_DIR}/include") set(FFMPEG_BIN_PATH "${SOURCE_DIR}/bin") link_directories("${SOURCE_DIR}/lib") casparcg_add_runtime_dependency("${FFMPEG_BIN_PATH}/avcodec-61.dll") casparcg_add_runtime_dependency("${FFMPEG_BIN_PATH}/avdevice-61.dll") casparcg_add_runtime_dependency("${FFMPEG_BIN_PATH}/avfilter-10.dll") casparcg_add_runtime_dependency("${FFMPEG_BIN_PATH}/avformat-61.dll") casparcg_add_runtime_dependency("${FFMPEG_BIN_PATH}/avutil-59.dll") casparcg_add_runtime_dependency("${FFMPEG_BIN_PATH}/postproc-58.dll") casparcg_add_runtime_dependency("${FFMPEG_BIN_PATH}/swresample-5.dll") casparcg_add_runtime_dependency("${FFMPEG_BIN_PATH}/swscale-8.dll") # for scanner: casparcg_add_runtime_dependency("${FFMPEG_BIN_PATH}/ffmpeg.exe") casparcg_add_runtime_dependency("${FFMPEG_BIN_PATH}/ffprobe.exe") get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) set(EXTERNAL_CMAKE_ARGS "") if (is_multi_config) set(EXTERNAL_CMAKE_ARGS "-DCMAKE_BUILD_TYPE:STRING=$") else() set(EXTERNAL_CMAKE_ARGS "-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}") endif () # TBB FetchContent_Declare(tbb URL ${CASPARCG_DOWNLOAD_MIRROR}/tbb/oneapi-tbb-2022.3.0-win.zip URL_HASH SHA256=e1b2373f25558bf47d16b4c89cf0a31e6689aaf7221400d209e8527afc7c9eee DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} ) FetchContent_MakeAvailable(tbb) list(APPEND CMAKE_PREFIX_PATH ${tbb_SOURCE_DIR}/lib/cmake/tbb) find_package(tbb REQUIRED) casparcg_add_runtime_dependency_from_target(TBB::tbb) casparcg_add_runtime_dependency_from_target(TBB::tbbmalloc) casparcg_add_runtime_dependency_from_target(TBB::tbbmalloc_proxy) # GLEW FetchContent_Declare(glew URL ${CASPARCG_DOWNLOAD_MIRROR}/glew/glew-2.2.0-win32.zip URL_HASH MD5=1feddfe8696c192fa46a0df8eac7d4bf DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} ) FetchContent_MakeAvailable(glew) add_library(GLEW::glew INTERFACE IMPORTED) target_include_directories(GLEW::glew INTERFACE ${glew_SOURCE_DIR}/include) target_link_directories(GLEW::glew INTERFACE ${glew_SOURCE_DIR}/lib/Release/x64) target_link_libraries(GLEW::glew INTERFACE glew32) casparcg_add_runtime_dependency("${glew_SOURCE_DIR}/bin/Release/x64/glew32.dll") IF(ENABLE_VULKAN) find_package(Vulkan REQUIRED) FetchContent_Declare(vk_bootstrap URL ${CASPARCG_DOWNLOAD_MIRROR}/vk-bootstrap/vk-bootstrap-1.4.328.zip URL_HASH SHA256=10f257c30a0a49d30b28a72cf3a7942d93a61f977adaa04bee29304c6506dc12 DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} ) FetchContent_MakeAvailable(vk_bootstrap) FetchContent_Declare(vma URL ${CASPARCG_DOWNLOAD_MIRROR}/VulkanMemoryAllocator/VulkanMemoryAllocator-3.3.0.zip URL_HASH SHA256=81755d8fcb411b97292c6682e828501315db319374c7c34ba6e1226452c6c392 DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} ) FetchContent_MakeAvailable(vma) ENDIF() # SFML FetchContent_Declare(sfml URL ${CASPARCG_DOWNLOAD_MIRROR}/sfml/SFML-2.6.2-windows-vc17-64-bit.zip URL_HASH MD5=dee0602d6f94d1843eef4d7568d2c23d DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} ) FetchContent_MakeAvailable(sfml) list(APPEND CMAKE_PREFIX_PATH ${sfml_SOURCE_DIR}/lib/cmake/SFML) # set(SFML_STATIC_LIBRARIES TRUE) find_package(SFML 2 COMPONENTS graphics system window REQUIRED) casparcg_add_runtime_dependency_from_target(sfml-graphics) casparcg_add_runtime_dependency_from_target(sfml-system) casparcg_add_runtime_dependency_from_target(sfml-window) #ZLIB casparcg_add_external_project(zlib) ExternalProject_Add(zlib URL ${CASPARCG_DOWNLOAD_MIRROR}/zlib/zlib-1.3.tar.gz URL_HASH MD5=60373b133d630f74f4a1f94c1185a53f DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} CMAKE_ARGS ${EXTERNAL_CMAKE_ARGS} INSTALL_COMMAND "" ) ExternalProject_Get_Property(zlib SOURCE_DIR) ExternalProject_Get_Property(zlib BINARY_DIR) set(ZLIB_INCLUDE_PATH "${SOURCE_DIR};${BINARY_DIR}") link_directories(${BINARY_DIR}) # OpenAL FetchContent_Declare(openal URL ${CASPARCG_DOWNLOAD_MIRROR}/openal/openal-soft-1.19.1-bin.zip URL_HASH MD5=b78ef1ba26f7108e763f92df6bbc3fa5 DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} ) FetchContent_MakeAvailable(openal) file(COPY_FILE ${openal_SOURCE_DIR}/bin/Win64/soft_oal.dll ${openal_SOURCE_DIR}/bin/Win64/OpenAL32.dll) add_library(OpenAL::OpenAL INTERFACE IMPORTED) target_include_directories(OpenAL::OpenAL INTERFACE ${openal_SOURCE_DIR}/include) target_link_directories(OpenAL::OpenAL INTERFACE ${openal_SOURCE_DIR}/libs/Win64) target_link_libraries(OpenAL::OpenAL INTERFACE OpenAL32) casparcg_add_runtime_dependency("${openal_SOURCE_DIR}/bin/Win64/OpenAL32.dll") # flash template host casparcg_add_external_project(flashtemplatehost) ExternalProject_Add(flashtemplatehost URL ${CASPARCG_DOWNLOAD_MIRROR}/flash-template-host/flash-template-host-files.zip URL_HASH MD5=360184ce21e34d585d1d898fdd7a6bd8 DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} BUILD_IN_SOURCE 1 CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" ) ExternalProject_Get_Property(flashtemplatehost SOURCE_DIR) set(TEMPLATE_HOST_PATH "${SOURCE_DIR}") # casparcg_add_runtime_dependency_dir("${TEMPLATE_HOST_PATH}") # LIBERATION_FONTS set(LIBERATION_FONTS_BIN_PATH "${PROJECT_SOURCE_DIR}/shell/liberation-fonts") casparcg_add_runtime_dependency("${LIBERATION_FONTS_BIN_PATH}/LiberationMono-Regular.ttf") # CEF if (ENABLE_HTML) casparcg_add_external_project(cef) ExternalProject_Add(cef URL ${CASPARCG_DOWNLOAD_MIRROR}/cef/cef_binary_142.0.17+g60aac24+chromium-142.0.7444.176_windows64_minimal.tar.bz2 URL_HASH SHA256=16c072a44484fe521037c74d03a339a77573b1fc0146cf44cc71e79fd0cc0198 DOWNLOAD_DIR ${CASPARCG_DOWNLOAD_CACHE} CMAKE_ARGS -DUSE_SANDBOX=Off -DCEF_RUNTIME_LIBRARY_FLAG=/MD ${EXTERNAL_CMAKE_ARGS} INSTALL_COMMAND "" ) ExternalProject_Get_Property(cef SOURCE_DIR) ExternalProject_Get_Property(cef BINARY_DIR) add_library(CEF::CEF INTERFACE IMPORTED) add_dependencies(CEF::CEF cef) target_include_directories(CEF::CEF INTERFACE "${SOURCE_DIR}" ) set(CEF_RESOURCE_PATH ${SOURCE_DIR}/Resources) set(CEF_BIN_PATH ${SOURCE_DIR}/Release) if (is_multi_config) target_link_libraries(CEF::CEF INTERFACE ${SOURCE_DIR}/Release/libcef.lib optimized ${BINARY_DIR}/libcef_dll_wrapper/Release/libcef_dll_wrapper.lib debug ${BINARY_DIR}/libcef_dll_wrapper/Debug/libcef_dll_wrapper.lib) else() link_directories(${SOURCE_DIR}/Release ${BINARY_DIR}/libcef_dll_wrapper) target_link_libraries(CEF::CEF INTERFACE libcef.lib libcef_dll_wrapper.lib) endif() casparcg_add_runtime_dependency_dir("${CEF_RESOURCE_PATH}/locales") casparcg_add_runtime_dependency("${CEF_RESOURCE_PATH}/chrome_100_percent.pak") casparcg_add_runtime_dependency("${CEF_RESOURCE_PATH}/chrome_200_percent.pak") casparcg_add_runtime_dependency("${CEF_RESOURCE_PATH}/resources.pak") casparcg_add_runtime_dependency("${CEF_RESOURCE_PATH}/icudtl.dat") casparcg_add_runtime_dependency("${CEF_BIN_PATH}/v8_context_snapshot.bin") casparcg_add_runtime_dependency("${CEF_BIN_PATH}/libcef.dll") casparcg_add_runtime_dependency("${CEF_BIN_PATH}/chrome_elf.dll") casparcg_add_runtime_dependency("${CEF_BIN_PATH}/d3dcompiler_47.dll") casparcg_add_runtime_dependency("${CEF_BIN_PATH}/libEGL.dll") casparcg_add_runtime_dependency("${CEF_BIN_PATH}/libGLESv2.dll") casparcg_add_runtime_dependency("${CEF_BIN_PATH}/vk_swiftshader.dll") casparcg_add_runtime_dependency("${CEF_BIN_PATH}/vk_swiftshader_icd.json") casparcg_add_runtime_dependency("${CEF_BIN_PATH}/vulkan-1.dll") endif () set_property(GLOBAL PROPERTY USE_FOLDERS ON) set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT casparcg) add_definitions(-DUNICODE) add_definitions(-D_UNICODE) add_definitions(-DCASPAR_SOURCE_PREFIX="${CMAKE_CURRENT_SOURCE_DIR}") add_definitions(-D_WIN32_WINNT=0x601) # ignore boost deprecated headers, as these are often reported inside boost add_definitions("-DBOOST_ALLOW_DEPRECATED_HEADERS") # Ensure /EHsc is not defined as it clashes with EHa below string(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHa /Zi /W4 /WX /MP /fp:fast /Zm192 /FIcommon/compiler/vs/disable_silly_warnings.h") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /D TBB_USE_ASSERT=1 /D TBB_USE_DEBUG /bigobj") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oi /arch:AVX /Ot /Gy /bigobj") ================================================ FILE: src/CMakeModules/CasparCG_Util.cmake ================================================ # Collect all ExternalProjects that have been defined set(CASPARCG_EXTERNAL_PROJECTS "" CACHE INTERNAL "") FUNCTION (casparcg_add_external_project NAME) SET (CASPARCG_EXTERNAL_PROJECTS "${CASPARCG_EXTERNAL_PROJECTS}" "${NAME}" CACHE INTERNAL "") ENDFUNCTION() # Mark a project as depending on all of the ExternalProjects, to ensure build order FUNCTION(casparcg_add_build_dependencies TARGET) if (CASPARCG_EXTERNAL_PROJECTS) ADD_DEPENDENCIES (${TARGET} ${CASPARCG_EXTERNAL_PROJECTS}) endif() ENDFUNCTION() SET (CASPARCG_MODULE_INCLUDE_STATEMENTS "" CACHE INTERNAL "") SET (CASPARCG_MODULE_INIT_STATEMENTS "" CACHE INTERNAL "") SET (CASPARCG_MODULE_UNINIT_STATEMENTS "" CACHE INTERNAL "") SET (CASPARCG_MODULE_COMMAND_LINE_ARG_INTERCEPTORS_STATEMENTS "" CACHE INTERNAL "") SET (CASPARCG_MODULE_TARGETS "" CACHE INTERNAL "") # CasparCG version of CMake `add_library` FUNCTION (casparcg_add_library TARGET) cmake_parse_arguments( PARSED_ARGS # prefix of output variables "" # list of names of the boolean arguments (only defined ones will be true) "" # list of names of mono-valued arguments "SOURCES" # list of names of multi-valued arguments (output variables are lists) ${ARGN} # arguments of the function to parse, here we take the all original ones ) if(NOT TARGET) message(FATAL_ERROR "You must provide a target name") endif() # Setup the library and some default config ADD_LIBRARY (${TARGET} ${PARSED_ARGS_SOURCES}) target_compile_features (${TARGET} PRIVATE cxx_std_20) target_include_directories(${TARGET} SYSTEM PRIVATE ${BOOST_INCLUDE_PATH} ) target_link_libraries(${TARGET} PRIVATE TBB::tbb) if (CASPARCG_EXTERNAL_PROJECTS) # Setup dependency on ExternalProject ADD_DEPENDENCIES (${TARGET} ${CASPARCG_EXTERNAL_PROJECTS}) endif() ENDFUNCTION () # CasparCG version of CMake `add_library` specifically for modules SET (CASPARCG_MODULE_TARGETS "" CACHE INTERNAL "") FUNCTION (casparcg_add_module_project TARGET) cmake_parse_arguments( PARSED_ARGS # prefix of output variables "" # list of names of the boolean arguments (only defined ones will be true) "NAME;HEADER_FILE;INIT_FUNCTION;UNINIT_FUNCTION;CLI_INTERCEPTOR" # list of names of mono-valued arguments "SOURCES" # list of names of multi-valued arguments (output variables are lists) ${ARGN} # arguments of the function to parse, here we take the all original ones ) # Use target if name is missing if (NOT PARSED_ARGS_NAME) set (PARSED_ARGS_NAME ${TARGET}) endif() # Use default path if header not defined if (NOT PARSED_ARGS_HEADER_FILE) set (PARSED_ARGS_HEADER_FILE "modules/${TARGET}/${TARGET}.h") endif() # Use default init name if not d if (NOT PARSED_ARGS_INIT_FUNCTION) message(FATAL_ERROR "You must provide an INIT_FUNCTION") endif() # Setup the library and some default config casparcg_add_library (${TARGET} SOURCES ${PARSED_ARGS_SOURCES}) target_link_libraries(${TARGET} PRIVATE common core) target_include_directories(${TARGET} PRIVATE # TODO: This should be replaced by the linked libraries eventually ../.. ) # Setup linker and code loading SET (CASPARCG_MODULE_TARGETS "${CASPARCG_MODULE_TARGETS}" "${TARGET}" CACHE INTERNAL "") SET (CASPARCG_MODULE_INCLUDE_STATEMENTS "${CASPARCG_MODULE_INCLUDE_STATEMENTS}" "#include <${PARSED_ARGS_HEADER_FILE}>" CACHE INTERNAL "" ) SET (CASPARCG_MODULE_INIT_STATEMENTS "${CASPARCG_MODULE_INIT_STATEMENTS}" " ${PARSED_ARGS_INIT_FUNCTION}(dependencies)\;" " CASPAR_LOG(info) << L\"Initialized ${PARSED_ARGS_NAME} module.\"\;" "" CACHE INTERNAL "" ) IF (PARSED_ARGS_UNINIT_FUNCTION) SET (CASPARCG_MODULE_UNINIT_STATEMENTS " ${PARSED_ARGS_UNINIT_FUNCTION}()\;" "${CASPARCG_MODULE_UNINIT_STATEMENTS}" CACHE INTERNAL "" ) ENDIF () IF (PARSED_ARGS_CLI_INTERCEPTOR) SET (CASPARCG_MODULE_COMMAND_LINE_ARG_INTERCEPTORS_STATEMENTS "${CASPARCG_MODULE_COMMAND_LINE_ARG_INTERCEPTORS_STATEMENTS}" " if (${PARSED_ARGS_CLI_INTERCEPTOR}(argc, argv))" " return true\;" "" CACHE INTERNAL "" ) ENDIF () ENDFUNCTION () # http://stackoverflow.com/questions/7172670/best-shortest-way-to-join-a-list-in-cmake FUNCTION (join_list VALUES GLUE OUTPUT) STRING (REGEX REPLACE "([^\\]|^);" "\\1${GLUE}" _TMP_STR "${VALUES}") STRING (REGEX REPLACE "[\\](.)" "\\1" _TMP_STR "${_TMP_STR}") #fixes escaping SET (${OUTPUT} "${_TMP_STR}" PARENT_SCOPE) ENDFUNCTION () ================================================ FILE: src/CMakeModules/FindFFmpeg.cmake ================================================ # vim: ts=2 sw=2 # - Try to find the required ffmpeg components(default: AVFORMAT, AVUTIL, AVCODEC) # # Once done this will define # FFMPEG_FOUND - System has the all required components. # FFMPEG_INCLUDE_DIRS - Include directory necessary for using the required components headers. # FFMPEG_LIBRARIES - Link these to use the required ffmpeg components. # FFMPEG_DEFINITIONS - Compiler switches required for using the required ffmpeg components. # # For each of the components it will additionally set. # - AVCODEC # - AVDEVICE # - AVFORMAT # - AVFILTER # - AVUTIL # - SWSCALE # the following variables will be defined # _FOUND - System has # _INCLUDE_DIRS - Include directory necessary for using the headers # _LIBRARIES - Link these to use # _DEFINITIONS - Compiler switches required for using # _VERSION - The components version # # Copyright (c) 2006, Matthias Kretz, # Copyright (c) 2008, Alexander Neundorf, # Copyright (c) 2011, Michael Jansen, # # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. include(FindPackageHandleStandardArgs) # The default components were taken from a survey over other FindFFMPEG.cmake files if (NOT FFmpeg_FIND_COMPONENTS) set(FFmpeg_FIND_COMPONENTS AVCODEC AVFORMAT AVUTIL AVDEVICE AVFILTER SWSCALE SWRESAMPLE) endif () # ### Macro: set_component_found # # Marks the given component as found if both *_LIBRARIES AND *_INCLUDE_DIRS is present. # macro(set_component_found _component ) if (${_component}_LIBRARIES AND ${_component}_INCLUDE_DIRS) # message(STATUS " - ${_component} found.") set(${_component}_FOUND TRUE) else () # message(STATUS " - ${_component} not found.") endif () endmacro() # ### Macro: find_component # # Checks for the given component by invoking pkgconfig and then looking up the libraries and # include directories. # macro(find_component _component _pkgconfig _library _header) if (NOT WIN32) # use pkg-config to get the directories and then use these values # in the FIND_PATH() and FIND_LIBRARY() calls find_package(PkgConfig) if (PKG_CONFIG_FOUND) pkg_check_modules(${_component} ${_pkgconfig}) endif () endif (NOT WIN32) find_path(${_component}_INCLUDE_DIRS ${_header} HINTS ${PC_LIB${_component}_INCLUDEDIR} ${PC_LIB${_component}_INCLUDE_DIRS} PATH_SUFFIXES ffmpeg ) find_library(${_component}_LIBRARIES NAMES ${_library} HINTS ${PC_LIB${_component}_LIBDIR} ${PC_LIB${_component}_LIBRARY_DIRS} ) set(${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.") set(${_component}_VERSION ${PC_${_component}_VERSION} CACHE STRING "The ${_component} version number.") set_component_found(${_component}) mark_as_advanced( ${_component}_LIBRARY_DIRS ${_component}_INCLUDE_DIRS ${_component}_LIBRARIES ${_component}_DEFINITIONS ${_component}_VERSION ) endmacro() # Check for cached results. If there are skip the costly part. if (NOT FFMPEG_LIBRARIES) # Check for all possible component. find_component(AVCODEC libavcodec avcodec libavcodec/avcodec.h) find_component(AVFORMAT libavformat avformat libavformat/avformat.h) find_component(AVDEVICE libavdevice avdevice libavdevice/avdevice.h) find_component(AVUTIL libavutil avutil libavutil/avutil.h) find_component(AVFILTER libavfilter avfilter libavfilter/avfilter.h) find_component(SWSCALE libswscale swscale libswscale/swscale.h) find_component(SWRESAMPLE libswresample swresample libswresample/swresample.h) # Check if the required components were found and add their stuff to the FFMPEG_* vars. foreach (_component ${FFmpeg_FIND_COMPONENTS}) if (${_component}_FOUND) # message(STATUS "Required component ${_component} present.") set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} ${${_component}_LIBRARIES}) set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} ${${_component}_DEFINITIONS}) list(APPEND FFMPEG_LIBRARY_DIRS ${${_component}_LIBRARY_DIRS}) list(APPEND FFMPEG_INCLUDE_DIRS ${${_component}_INCLUDE_DIRS}) else () # message(STATUS "Required component ${_component} missing.") endif () endforeach () # Build the include path with duplicates removed. if (FFMPEG_INCLUDE_DIRS) list(REMOVE_DUPLICATES FFMPEG_INCLUDE_DIRS) endif () # Build the lib path with duplicates removed. if (FFMPEG_LIBRARY_DIRS) list(REMOVE_DUPLICATES FFMPEG_LIBRARY_DIRS) endif () # cache the vars. set(FFMPEG_INCLUDE_DIRS ${FFMPEG_INCLUDE_DIRS} CACHE STRING "The FFmpeg include directories." FORCE) set(FFMPEG_LIBRARY_DIRS ${FFMPEG_LIBRARY_DIRS} CACHE STRING "The FFmpeg lib directories." FORCE) set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} CACHE STRING "The FFmpeg libraries." FORCE) set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} CACHE STRING "The FFmpeg cflags." FORCE) mark_as_advanced( FFMPEG_INCLUDE_DIRS FFMPEG_LIBRARY_DIRS FFMPEG_LIBRARIES FFMPEG_DEFINITIONS ) endif () # Now set the noncached _FOUND vars for the components. foreach (_component AVCODEC AVDEVICE AVFORMAT AVUTIL SWSCALE) set_component_found(${_component}) endforeach () # Compile the list of required vars if (FFMPEG_INCLUDE_DIRS) set(_FFmpeg_REQUIRED_VARS FFMPEG_LIBRARIES FFMPEG_INCLUDE_DIRS) else () set(_FFmpeg_REQUIRED_VARS FFMPEG_LIBRARIES) endif () foreach (_component ${FFmpeg_FIND_COMPONENTS}) if (${_component}_INCLUDE_DIRS) list(APPEND _FFmpeg_REQUIRED_VARS ${_component}_LIBRARIES ${_component}_INCLUDE_DIRS) else () list(APPEND _FFmpeg_REQUIRED_VARS ${_component}_LIBRARIES) endif () endforeach () # Give a nice error message if some of the required vars are missing. find_package_handle_standard_args(FFmpeg DEFAULT_MSG ${_FFmpeg_REQUIRED_VARS}) ================================================ FILE: src/CMakeSettings.json ================================================ { "configurations": [ { "name": "x64-Debug", "generator": "Ninja", "configurationType": "Debug", "inheritEnvironments": [ "msvc_x64_x64" ], "buildRoot": "${projectDir}\\out\\build\\${name}", "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "buildCommandArgs": "", "ctestCommandArgs": "" }, { "name": "x64-Release", "generator": "Ninja", "configurationType": "Release", "buildRoot": "${projectDir}\\out\\build\\${name}", "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "buildCommandArgs": "", "ctestCommandArgs": "", "inheritEnvironments": [ "msvc_x64_x64" ] }, { "name": "x64-RelWithDebInfo", "generator": "Ninja", "configurationType": "RelWithDebInfo", "buildRoot": "${projectDir}\\out\\build\\${name}", "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "buildCommandArgs": "", "ctestCommandArgs": "", "inheritEnvironments": [ "msvc_x64_x64" ], "variables": [] } ] } ================================================ FILE: src/accelerator/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 3.28) project (accelerator) set(SOURCES ogl/image/image_kernel.cpp ogl/image/image_mixer.cpp ogl/image/image_shader.cpp ogl/util/buffer.cpp ogl/util/context.cpp ogl/util/device.cpp ogl/util/shader.cpp ogl/util/texture.cpp ogl/util/matrix.cpp ogl/util/transforms.cpp accelerator.cpp ) set(HEADERS ogl/image/image_kernel.h ogl/image/image_mixer.h ogl/image/image_shader.h ogl/util/buffer.h ogl/util/context.h ogl/util/device.h ogl/util/shader.h ogl/util/texture.h ogl/util/matrix.h ogl/util/transforms.h ogl_image_vertex.h ogl_image_fragment.h accelerator.h StdAfx.h ) IF(ENABLE_VULKAN) list(APPEND SOURCES vulkan/image/image_kernel.cpp vulkan/image/image_mixer.cpp vulkan/util/buffer.cpp vulkan/util/device.cpp vulkan/util/pipeline.cpp vulkan/util/renderpass.cpp vulkan/util/texture.cpp vulkan/util/matrix.cpp vulkan/util/transforms.cpp ) list(APPEND HEADERS vulkan/image/image_kernel.h vulkan/image/image_mixer.h vulkan/util/buffer.h vulkan/util/device.h vulkan/util/draw_params.h vulkan/util/pipeline.h vulkan/util/texture.h vulkan/util/matrix.h vulkan/util/transforms.h vulkan/util/uniform_block.h vulkan_image_vertex.h vulkan_image_fragment.h ) ENDIF() if (MSVC) list(APPEND SOURCES d3d/d3d_device.cpp d3d/d3d_device_context.cpp d3d/d3d_texture2d.cpp ) list(APPEND HEADERS d3d/d3d_device.h d3d/d3d_device_context.h d3d/d3d_texture2d.h ) endif () IF (ENABLE_VULKAN) find_program(glslc_executable NAMES glslc HINTS Vulkan::glslc) set(SHADER_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/vulkan/image) set(SHADER_BINARY_DIR ${CMAKE_CURRENT_SOURCE_DIR}/vulkan/image) file(GLOB SHADERS ${SHADER_SOURCE_DIR}/vertex_shader.vert ${SHADER_SOURCE_DIR}/fragment_shader.frag) foreach(source IN LISTS SHADERS) get_filename_component(FILENAME ${source} NAME_WLE) add_custom_command( COMMAND ${glslc_executable} # -MD -MF ${SHADER_BINARY_DIR}/${FILENAME}.d -o ${SHADER_BINARY_DIR}/${FILENAME}.spv ${source} OUTPUT ${SHADER_BINARY_DIR}/${FILENAME}.spv DEPENDS ${source} ${SHADER_BINARY_DIR} COMMENT "Compiling ${FILENAME}" ) list(APPEND SPV_SHADERS ${SHADER_BINARY_DIR}/${FILENAME}.spv) endforeach() add_custom_target(shaders ALL DEPENDS ${SPV_SHADERS}) bin2c("vulkan/image/vertex_shader.spv" "vulkan_image_vertex.h" "caspar::accelerator::vulkan" "vertex_shader") bin2c("vulkan/image/fragment_shader.spv" "vulkan_image_fragment.h" "caspar::accelerator::vulkan" "fragment_shader") ENDIF() bin2c("ogl/image/shader.vert" "ogl_image_vertex.h" "caspar::accelerator::ogl" "vertex_shader") bin2c("ogl/image/shader.frag" "ogl_image_fragment.h" "caspar::accelerator::ogl" "fragment_shader") casparcg_add_library(accelerator SOURCES ${SOURCES} ${HEADERS}) target_include_directories(accelerator PRIVATE .. ${CMAKE_CURRENT_BINARY_DIR}) target_precompile_headers(accelerator PRIVATE StdAfx.h) target_link_libraries(accelerator PRIVATE common core GLEW::glew sfml-window) IF(ENABLE_VULKAN) target_link_libraries(accelerator PRIVATE vk-bootstrap::vk-bootstrap GPUOpen::VulkanMemoryAllocator Vulkan::Headers) target_compile_definitions(accelerator PUBLIC VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1) ENDIF() source_group(sources ./.*) source_group(sources\\cpu\\image cpu/image/.*) source_group(sources\\cpu\\util cpu/util/.*) source_group(sources\\ogl\\image ogl/image/.*) source_group(sources\\ogl\\util ogl/util/.*) IF(ENABLE_VULKAN) source_group(sources\\vulkan\\image vulkan/image/.*) source_group(sources\\vulkan\\util vulkan/util/.*) ENDIF() ================================================ FILE: src/accelerator/StdAfx.h ================================================ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ================================================ FILE: src/accelerator/accelerator.cpp ================================================ #include "accelerator.h" #include "ogl/image/image_mixer.h" #include "ogl/util/device.h" #ifdef ENABLE_VULKAN #include "vulkan/image/image_mixer.h" #include "vulkan/util/device.h" #endif #include #include #include #include #include #include #include namespace caspar { namespace accelerator { struct accelerator::impl { std::shared_ptr device_; const core::video_format_repository format_repository_; accelerator_backend backend_; impl(const core::video_format_repository format_repository) : format_repository_(format_repository) , backend_(accelerator_backend::invalid) { } void set_backend(accelerator_backend backend) { if (backend_ != accelerator_backend::invalid) { CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Accelerator backend already set")); } backend_ = backend; } std::unique_ptr create_image_mixer(int channel_id, common::bit_depth depth) { #ifdef ENABLE_VULKAN if (backend_ == accelerator_backend::vulkan) { return std::make_unique( spl::make_shared_ptr(std::dynamic_pointer_cast(get_device())), channel_id, format_repository_.get_max_video_format_size(), depth); } #endif return std::make_unique( spl::make_shared_ptr(std::dynamic_pointer_cast(get_device())), channel_id, format_repository_.get_max_video_format_size(), depth); } std::shared_ptr get_device() { if (backend_ == accelerator_backend::invalid) { CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Accelerator backend not set")); } #ifdef ENABLE_VULKAN if (backend_ == accelerator_backend::vulkan) { if (!device_) { device_ = std::dynamic_pointer_cast(std::make_shared()); } return device_; } #endif if (!device_) { device_ = std::dynamic_pointer_cast(std::make_shared()); } return device_; } }; accelerator::accelerator(const core::video_format_repository format_repository) : impl_(std::make_unique(format_repository)) { } accelerator::~accelerator() {} void accelerator::set_backend(accelerator_backend backend) { impl_->set_backend(backend); } std::unique_ptr accelerator::create_image_mixer(const int channel_id, common::bit_depth depth) { return impl_->create_image_mixer(channel_id, depth); } std::shared_ptr accelerator::get_device() const { return impl_->get_device(); } }} // namespace caspar::accelerator ================================================ FILE: src/accelerator/accelerator.h ================================================ #pragma once #include #include #include #include #include #include #include #include namespace caspar { namespace accelerator { class accelerator_device { public: virtual boost::property_tree::wptree info() const = 0; virtual std::future gc() = 0; }; enum class accelerator_backend { invalid = 0, opengl, #ifdef ENABLE_VULKAN vulkan, #endif }; class accelerator { public: explicit accelerator(const core::video_format_repository format_repository); accelerator(accelerator&) = delete; ~accelerator(); accelerator& operator=(accelerator&) = delete; void set_backend(accelerator_backend backend); std::unique_ptr create_image_mixer(int channel_id, common::bit_depth depth); std::shared_ptr get_device() const; private: struct impl; std::unique_ptr impl_; }; }} // namespace caspar::accelerator ================================================ FILE: src/accelerator/d3d/d3d_device.cpp ================================================ #include "d3d_device.h" #include "d3d_device_context.h" #include "d3d_texture2d.h" #include #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace accelerator { namespace d3d { struct d3d_device::impl : public std::enable_shared_from_this { using texture_queue_t = tbb::concurrent_bounded_queue>; mutable std::mutex device_pools_mutex_; tbb::concurrent_unordered_map device_pools_; std::wstring adaptor_name_ = L"N/A"; std::shared_ptr device_; std::shared_ptr ctx_; impl() { HRESULT hr; UINT flags = D3D11_CREATE_DEVICE_BGRA_SUPPORT; D3D_FEATURE_LEVEL feature_levels[] = { D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_1, D3D_FEATURE_LEVEL_10_0, }; UINT num_feature_levels = sizeof(feature_levels) / sizeof(feature_levels[0]); D3D_FEATURE_LEVEL selected_level = D3D_FEATURE_LEVEL_9_3; ID3D11Device* pdev = nullptr; ID3D11DeviceContext* pctx = nullptr; hr = D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, flags, feature_levels, num_feature_levels, D3D11_SDK_VERSION, &pdev, &selected_level, &pctx); if (hr == E_INVALIDARG) { // DirectX 11.0 platforms will not recognize D3D_FEATURE_LEVEL_11_1 // so we need to retry without it hr = D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, flags, &feature_levels[1], num_feature_levels - 1, D3D11_SDK_VERSION, &pdev, &selected_level, &pctx); } if (SUCCEEDED(hr)) { device_ = std::shared_ptr(pdev, [](ID3D11Device* p) { if (p) p->Release(); }); ctx_ = std::make_shared(pctx); CComQIPtr d3d11_1(device_.get()); if (!d3d11_1) { return; } /* needs to support extended resource sharing */ D3D11_FEATURE_DATA_D3D11_OPTIONS opts = {}; hr = d3d11_1->CheckFeatureSupport(D3D11_FEATURE_D3D11_OPTIONS, &opts, sizeof(opts)); if (FAILED(hr) || !opts.ExtendedResourceSharing) { CASPAR_THROW_EXCEPTION(bad_alloc() << msg_info(L"Device does not support ExtendedResourceSharing")); } { CComQIPtr dxgi_dev = device_.get(); if (dxgi_dev) { IDXGIAdapter* dxgi_adapt = nullptr; hr = dxgi_dev->GetAdapter(&dxgi_adapt); if (SUCCEEDED(hr)) { DXGI_ADAPTER_DESC desc; hr = dxgi_adapt->GetDesc(&desc); dxgi_adapt->Release(); if (SUCCEEDED(hr)) { adaptor_name_ = u16(desc.Description); } } } } CASPAR_LOG(info) << L"D3D11: Selected adapter: " << adaptor_name_; CASPAR_LOG(info) << L"D3D11: Selected feature level: " << selected_level; } else CASPAR_THROW_EXCEPTION(bad_alloc() << msg_info(L"Failed to create d3d11 device")); } std::shared_ptr open_shared_texture(void* handle) { ID3D11Texture2D* tex = nullptr; CComQIPtr dev = device_.get(); if (dev) { auto hr = dev->OpenSharedResource1((HANDLE)(uintptr_t)handle, __uuidof(ID3D11Texture2D), (void**)(&tex)); if (SUCCEEDED(hr)) return std::make_shared(tex); } return nullptr; } }; d3d_device::d3d_device() : impl_(new impl()) { } d3d_device::~d3d_device() {} std::wstring d3d_device::adapter_name() const { return impl_->adaptor_name_; } void* d3d_device::device() const { return impl_->device_.get(); } std::shared_ptr d3d_device::immedidate_context() { return impl_->ctx_; } std::shared_ptr d3d_device::open_shared_texture(void* handle) { return impl_->open_shared_texture(handle); } const std::shared_ptr& d3d_device::get_device() { static std::shared_ptr device = []() -> std::shared_ptr { if (!WGLEW_NV_DX_interop2) { // Device doesn't support the extension, so skip return nullptr; } try { return std::make_shared(); } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } return nullptr; }(); return device; } }}} // namespace caspar::accelerator::d3d ================================================ FILE: src/accelerator/d3d/d3d_device.h ================================================ #pragma once #include #include namespace caspar { namespace accelerator { namespace d3d { class d3d_device { public: d3d_device(); ~d3d_device(); d3d_device(const d3d_device&) = delete; d3d_device& operator=(const d3d_device&) = delete; std::wstring adapter_name() const; void* device() const; std::shared_ptr immedidate_context(); std::shared_ptr open_shared_texture(void* handle); static const std::shared_ptr& get_device(); private: struct impl; std::shared_ptr impl_; }; }}} // namespace caspar::accelerator::d3d ================================================ FILE: src/accelerator/d3d/d3d_device_context.cpp ================================================ #include "d3d_device_context.h" namespace caspar { namespace accelerator { namespace d3d { d3d_device_context::d3d_device_context(ID3D11DeviceContext* ctx) : ctx_(std::shared_ptr(ctx, [](ID3D11DeviceContext* p) { if (p) p->Release(); })) { } }}} // namespace caspar::accelerator::d3d ================================================ FILE: src/accelerator/d3d/d3d_device_context.h ================================================ #pragma once #undef NOMINMAX #define NOMINMAX #include #include namespace caspar { namespace accelerator { namespace d3d { class d3d_device_context { public: d3d_device_context(ID3D11DeviceContext* ctx); d3d_device_context(const d3d_device_context&) = delete; d3d_device_context& operator=(const d3d_device_context&) = delete; ID3D11DeviceContext* context() const { return ctx_.get(); } private: std::shared_ptr const ctx_; }; }}} // namespace caspar::accelerator::d3d ================================================ FILE: src/accelerator/d3d/d3d_texture2d.cpp ================================================ #include "d3d_texture2d.h" #include #include #include #include #include "../ogl/util/device.h" namespace caspar { namespace accelerator { namespace d3d { d3d_texture2d::d3d_texture2d(ID3D11Texture2D* tex) : texture_(tex) { share_handle_ = nullptr; D3D11_TEXTURE2D_DESC desc; texture_->GetDesc(&desc); width_ = desc.Width; height_ = desc.Height; format_ = desc.Format; if ((desc.MiscFlags & D3D11_RESOURCE_MISC_SHARED_NTHANDLE) == 0) { CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("D3D texture is not sharable.")); } { CComQIPtr res = texture_; if (res) { res->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr, &share_handle_); } } if (share_handle_ == nullptr || !wglDXSetResourceShareHandleNV(texture_, share_handle_)) { CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to setup shared d3d texture.")); } } d3d_texture2d::~d3d_texture2d() { const std::shared_ptr ogl = ogl_.lock(); if (ogl != nullptr) { // The cleanup must happen be done on the opengl thread ogl->dispatch_sync([&] { const std::shared_ptr interop = ogl->d3d_interop(); if (texture_handle_ != nullptr && interop != nullptr) { wglDXUnlockObjectsNV(interop.get(), 1, &texture_handle_); wglDXUnregisterObjectNV(interop.get(), texture_handle_); texture_handle_ = nullptr; } if (gl_texture_id_ != 0) { GL(glDeleteTextures(1, &gl_texture_id_)); gl_texture_id_ = 0; } // TODO: This appears to be leaking something opengl, but it is not clear what that is. if (share_handle_ != nullptr) { CloseHandle(share_handle_); share_handle_ = nullptr; } }); } if (texture_ != nullptr) { texture_->Release(); texture_ = nullptr; } } void d3d_texture2d::gen_gl_texture(std::shared_ptr ogl) { if (gl_texture_id_ != 0 || texture_ == nullptr) return; ogl_ = ogl; const std::shared_ptr interop = ogl->d3d_interop(); if (!interop) { CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("d3d interop not setup to bind shared d3d texture.")); } ogl->dispatch_sync([&] { GL(glGenTextures(1, &gl_texture_id_)); texture_handle_ = wglDXRegisterObjectNV(interop.get(), texture_, gl_texture_id_, GL_TEXTURE_2D, WGL_ACCESS_READ_ONLY_NV); if (!texture_handle_) { GL(glDeleteTextures(1, &gl_texture_id_)); gl_texture_id_ = 0; CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to bind shared d3d texture.")); } if (!wglDXLockObjectsNV(interop.get(), 1, &texture_handle_)) { wglDXUnregisterObjectNV(interop.get(), texture_handle_); texture_handle_ = nullptr; GL(glDeleteTextures(1, &gl_texture_id_)); gl_texture_id_ = 0; CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to lock shared d3d texture.")); } }); } }}} // namespace caspar::accelerator::d3d ================================================ FILE: src/accelerator/d3d/d3d_texture2d.h ================================================ #pragma once #undef NOMINMAX #define NOMINMAX #include #include namespace caspar { namespace accelerator { namespace d3d { class d3d_texture2d { public: d3d_texture2d(ID3D11Texture2D* tex); ~d3d_texture2d(); d3d_texture2d(const d3d_texture2d&) = delete; d3d_texture2d& operator=(const d3d_texture2d&) = delete; uint32_t width() const { return width_; } uint32_t height() const { return height_; } DXGI_FORMAT format() const { return format_; } void* share_handle() const { return share_handle_; } ID3D11Texture2D* texture() const { return texture_; } uint32_t gl_texture_id() const { return gl_texture_id_; } void gen_gl_texture(std::shared_ptr); private: HANDLE share_handle_; ID3D11Texture2D* texture_; uint32_t width_ = 0; uint32_t height_ = 0; DXGI_FORMAT format_ = DXGI_FORMAT::DXGI_FORMAT_UNKNOWN; std::weak_ptr ogl_; HANDLE texture_handle_ = nullptr; uint32_t gl_texture_id_ = 0; }; }}} // namespace caspar::accelerator::d3d ================================================ FILE: src/accelerator/ogl/image/image_kernel.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "image_kernel.h" #include "image_shader.h" #include "../util/device.h" #include "../util/shader.h" #include "../util/texture.h" #include #include #include #include #include #include #include #include #include namespace caspar::accelerator::ogl { double get_precision_factor(common::bit_depth depth) { switch (depth) { case common::bit_depth::bit8: return 1.0; case common::bit_depth::bit10: return 64.0; case common::bit_depth::bit12: return 16.0; case common::bit_depth::bit16: return 1.0; default: return 1.0; } } bool is_above_screen(double y) { return y < 0.0; } bool is_below_screen(double y) { return y > 1.0; } bool is_left_of_screen(double x) { return x < 0.0; } bool is_right_of_screen(double x) { return x > 1.0; } bool is_outside_screen(const std::vector& coords) { auto x_coords = coords | boost::adaptors::transformed([](const core::frame_geometry::coord& c) { return c.vertex_x; }); auto y_coords = coords | boost::adaptors::transformed([](const core::frame_geometry::coord& c) { return c.vertex_y; }); return boost::algorithm::all_of(x_coords, &is_left_of_screen) || boost::algorithm::all_of(x_coords, &is_right_of_screen) || boost::algorithm::all_of(y_coords, &is_above_screen) || boost::algorithm::all_of(y_coords, &is_below_screen); } static const double epsilon = 0.001; struct image_kernel::impl { spl::shared_ptr ogl_; spl::shared_ptr shader_; GLuint vao_; GLuint vbo_; explicit impl(const spl::shared_ptr& ogl) : ogl_(ogl) , shader_(ogl_->dispatch_sync([&] { return get_image_shader(ogl); })) { ogl_->dispatch_sync([&] { GL(glGenVertexArrays(1, &vao_)); GL(glGenBuffers(1, &vbo_)); }); } ~impl() { ogl_->dispatch_sync([&] { GL(glDeleteVertexArrays(1, &vao_)); GL(glDeleteBuffers(1, &vbo_)); }); } void draw(draw_params params) { CASPAR_ASSERT(params.pix_desc.planes.size() == params.textures.size()); if (params.textures.empty() || !params.background) { return; } if (params.transforms.image_transform.opacity < epsilon) { return; } auto coords = params.geometry.data(); if (coords.empty()) { return; } auto transforms = params.transforms; auto const first_plane = params.pix_desc.planes.at(0); if (params.geometry.mode() != core::frame_geometry::scale_mode::stretch && first_plane.width > 0 && first_plane.height > 0) { auto width_scale = static_cast(params.target_width) / static_cast(first_plane.width); auto height_scale = static_cast(params.target_height) / static_cast(first_plane.height); core::image_transform transform; double target_scale; switch (params.geometry.mode()) { case core::frame_geometry::scale_mode::fit: target_scale = std::min(width_scale, height_scale); transform.fill_scale[0] *= target_scale / width_scale; transform.fill_scale[1] *= target_scale / height_scale; break; case core::frame_geometry::scale_mode::fill: target_scale = std::max(width_scale, height_scale); transform.fill_scale[0] *= target_scale / width_scale; transform.fill_scale[1] *= target_scale / height_scale; break; case core::frame_geometry::scale_mode::original: transform.fill_scale[0] /= width_scale; transform.fill_scale[1] /= height_scale; break; case core::frame_geometry::scale_mode::hfill: transform.fill_scale[1] *= width_scale / height_scale; break; case core::frame_geometry::scale_mode::vfill: transform.fill_scale[0] *= height_scale / width_scale; break; default:; } transforms = transforms.combine_transform(transform, params.aspect_ratio); } coords = transforms.transform_coords(coords); // Skip drawing if all the coordinates will be outside the screen. if (coords.size() < 3 || is_outside_screen(coords)) { return; } double precision_factor[4] = {1, 1, 1, 1}; // Bind textures for (int n = 0; n < params.textures.size(); ++n) { params.textures[n]->bind(n); precision_factor[n] = get_precision_factor(params.textures[n]->depth()); } if (params.local_key) { params.local_key->bind(static_cast(texture_id::local_key)); } if (params.layer_key) { params.layer_key->bind(static_cast(texture_id::layer_key)); } const auto is_hd = params.pix_desc.planes.at(0).height > 700; const auto color_space = is_hd ? params.pix_desc.color_space : core::color_space::bt601; const float color_matrices[3][9] = { {1.0, 0.0, 1.402, 1.0, -0.344, -0.509, 1.0, 1.772, 0.0}, // bt.601 {1.0, 0.0, 1.5748, 1.0, -0.1873, -0.4681, 1.0, 1.8556, 0.0}, // bt.709 {1.0, 0.0, 1.4746, 1.0, -0.16455312684366, -0.57135312684366, 1.0, 1.8814, 0.0}}; // bt.2020 const auto color_matrix = color_matrices[static_cast(color_space)]; const float luma_coefficients[3][3] = {{0.299, 0.587, 0.114}, // bt.601 {0.2126, 0.7152, 0.0722}, // bt.709 {0.2627, 0.6780, 0.0593}}; // bt.2020 const auto luma_coeff = luma_coefficients[static_cast(color_space)]; // Setup shader shader_->use(); shader_->set("is_straight_alpha", params.pix_desc.is_straight_alpha); shader_->set("plane[0]", texture_id::plane0); shader_->set("plane[1]", texture_id::plane1); shader_->set("plane[2]", texture_id::plane2); shader_->set("plane[3]", texture_id::plane3); shader_->set("precision_factor[0]", precision_factor[0]); shader_->set("precision_factor[1]", precision_factor[1]); shader_->set("precision_factor[2]", precision_factor[2]); shader_->set("precision_factor[3]", precision_factor[3]); shader_->set("local_key", texture_id::local_key); shader_->set("layer_key", texture_id::layer_key); shader_->set_matrix3("color_matrix", color_matrix); shader_->set("luma_coeff", luma_coeff[0], luma_coeff[1], luma_coeff[2]); shader_->set("has_local_key", static_cast(params.local_key)); shader_->set("has_layer_key", static_cast(params.layer_key)); shader_->set("pixel_format", params.pix_desc.format); shader_->set("opacity", transforms.image_transform.is_key ? 1.0 : transforms.image_transform.opacity); if (transforms.image_transform.chroma.enable) { shader_->set("chroma", true); shader_->set("chroma_show_mask", transforms.image_transform.chroma.show_mask); shader_->set("chroma_target_hue", transforms.image_transform.chroma.target_hue / 360.0); shader_->set("chroma_hue_width", transforms.image_transform.chroma.hue_width); shader_->set("chroma_min_saturation", transforms.image_transform.chroma.min_saturation); shader_->set("chroma_min_brightness", transforms.image_transform.chroma.min_brightness); shader_->set("chroma_softness", 1.0 + transforms.image_transform.chroma.softness); shader_->set("chroma_spill_suppress", transforms.image_transform.chroma.spill_suppress / 360.0); shader_->set("chroma_spill_suppress_saturation", transforms.image_transform.chroma.spill_suppress_saturation); } else { shader_->set("chroma", false); } // Setup blend_func if (transforms.image_transform.is_key) { params.blend_mode = core::blend_mode::normal; } params.background->bind(static_cast(texture_id::background)); shader_->set("background", texture_id::background); shader_->set("blend_mode", params.blend_mode); shader_->set("keyer", params.keyer); // Setup image-adjustments shader_->set("invert", transforms.image_transform.invert); if (transforms.image_transform.levels.min_input > epsilon || transforms.image_transform.levels.max_input < 1.0 - epsilon || transforms.image_transform.levels.min_output > epsilon || transforms.image_transform.levels.max_output < 1.0 - epsilon || std::abs(transforms.image_transform.levels.gamma - 1.0) > epsilon) { shader_->set("levels", true); shader_->set("min_input", transforms.image_transform.levels.min_input); shader_->set("max_input", transforms.image_transform.levels.max_input); shader_->set("min_output", transforms.image_transform.levels.min_output); shader_->set("max_output", transforms.image_transform.levels.max_output); shader_->set("gamma", transforms.image_transform.levels.gamma); } else { shader_->set("levels", false); } if (std::abs(transforms.image_transform.brightness - 1.0) > epsilon || std::abs(transforms.image_transform.saturation - 1.0) > epsilon || std::abs(transforms.image_transform.contrast - 1.0) > epsilon) { shader_->set("csb", true); shader_->set("brt", transforms.image_transform.brightness); shader_->set("sat", transforms.image_transform.saturation); shader_->set("con", transforms.image_transform.contrast); } else { shader_->set("csb", false); } // Setup drawing area GL(glViewport(0, 0, params.background->width(), params.background->height())); glDisable(GL_DEPTH_TEST); // Set render target params.background->attach(); // Draw GL(glBindVertexArray(vao_)); GL(glBindBuffer(GL_ARRAY_BUFFER, vbo_)); GL(glBufferData(GL_ARRAY_BUFFER, static_cast(sizeof(core::frame_geometry::coord)) * coords.size(), coords.data(), GL_STATIC_DRAW)); auto stride = static_cast(sizeof(core::frame_geometry::coord)); auto vtx_loc = shader_->get_attrib_location("Position"); auto tex_loc = shader_->get_attrib_location("TexCoordIn"); GL(glEnableVertexAttribArray(vtx_loc)); GL(glEnableVertexAttribArray(tex_loc)); GL(glVertexAttribPointer(vtx_loc, 2, GL_DOUBLE, GL_FALSE, stride, nullptr)); GL(glVertexAttribPointer(tex_loc, 4, GL_DOUBLE, GL_FALSE, stride, (GLvoid*)(2 * sizeof(GLdouble)))); GL(glDrawArrays(GL_TRIANGLE_FAN, 0, static_cast(coords.size()))); GL(glTextureBarrier()); GL(glDisableVertexAttribArray(vtx_loc)); GL(glDisableVertexAttribArray(tex_loc)); GL(glBindVertexArray(0)); GL(glBindBuffer(GL_ARRAY_BUFFER, 0)); // Cleanup GL(glDisable(GL_SCISSOR_TEST)); GL(glDisable(GL_BLEND)); } }; image_kernel::image_kernel(const spl::shared_ptr& ogl) : impl_(new impl(ogl)) { } image_kernel::~image_kernel() {} void image_kernel::draw(const draw_params& params) { impl_->draw(params); } } // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/image/image_kernel.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include #include #include #include "../util/matrix.h" #include "../util/transforms.h" namespace caspar { namespace accelerator { namespace ogl { enum class keyer { linear = 0, additive, }; struct draw_params final { core::pixel_format_desc pix_desc = core::pixel_format_desc(core::pixel_format::invalid); std::vector> textures; draw_transforms transforms; core::frame_geometry geometry = core::frame_geometry::get_default(); core::blend_mode blend_mode = core::blend_mode::normal; ogl::keyer keyer = ogl::keyer::linear; std::shared_ptr background; std::shared_ptr local_key; std::shared_ptr layer_key; double aspect_ratio = 1.0; int target_width; int target_height; }; class image_kernel final { image_kernel(const image_kernel&); image_kernel& operator=(const image_kernel&); public: explicit image_kernel(const spl::shared_ptr& ogl); ~image_kernel(); void draw(const draw_params& params); private: struct impl; spl::unique_ptr impl_; }; }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/image/image_mixer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "image_mixer.h" #include "image_kernel.h" #include "../util/buffer.h" #include "../util/device.h" #include "../util/texture.h" #ifdef WIN32 #include "../../d3d/d3d_texture2d.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace accelerator { namespace ogl { using future_texture = std::shared_future>; struct item { core::pixel_format_desc pix_desc = core::pixel_format_desc(core::pixel_format::invalid); std::vector textures; draw_transforms transforms; core::frame_geometry geometry = core::frame_geometry::get_default(); }; struct layer { std::vector sublayers; std::vector items; core::blend_mode blend_mode; explicit layer(core::blend_mode blend_mode) : blend_mode(blend_mode) { } }; class image_renderer { spl::shared_ptr ogl_; image_kernel kernel_; const size_t max_frame_size_; common::bit_depth depth_; public: explicit image_renderer(const spl::shared_ptr& ogl, const size_t max_frame_size, common::bit_depth depth) : ogl_(ogl) , kernel_(ogl_) , max_frame_size_(max_frame_size) , depth_(depth) { } std::future, std::shared_ptr>> operator()(std::vector layers, const core::video_format_desc& format_desc) { if (layers.empty()) { // Bypass GPU with empty frame. static const std::vector> buffer(max_frame_size_, 0); return make_ready_future, std::shared_ptr>>( {array(buffer.data(), format_desc.size, true), nullptr}); } auto f = std::move( ogl_->dispatch_async([this, format_desc, layers = std::move(layers)]() mutable -> std::tuple>, std::shared_ptr> { auto target_texture = ogl_->create_texture(format_desc.width, format_desc.height, 4, depth_); draw(target_texture, std::move(layers), format_desc); return {ogl_->copy_async(target_texture), target_texture}; })); return std::async( std::launch::deferred, [f = std::move(f)]() mutable -> std::tuple, std::shared_ptr> { auto tuple = std::move(f.get()); return {std::move(std::get<0>(tuple).get()), std::move(std::get<1>(tuple))}; }); } common::bit_depth depth() const { return depth_; } private: void draw(std::shared_ptr& target_texture, std::vector layers, const core::video_format_desc& format_desc) { std::shared_ptr layer_key_texture; for (auto& layer : layers) { draw(target_texture, layer.sublayers, format_desc); draw(target_texture, std::move(layer), layer_key_texture, format_desc); } } void draw(std::shared_ptr& target_texture, layer layer, std::shared_ptr& layer_key_texture, const core::video_format_desc& format_desc) { if (layer.items.empty()) return; std::shared_ptr local_key_texture; std::shared_ptr local_mix_texture; if (layer.blend_mode != core::blend_mode::normal) { auto layer_texture = ogl_->create_texture(target_texture->width(), target_texture->height(), 4, depth_); for (auto& item : layer.items) draw(layer_texture, std::move(item), layer_key_texture, local_key_texture, local_mix_texture, format_desc); draw(layer_texture, std::move(local_mix_texture), format_desc, core::blend_mode::normal); draw(target_texture, std::move(layer_texture), format_desc, layer.blend_mode); } else // fast path { for (auto& item : layer.items) draw(target_texture, std::move(item), layer_key_texture, local_key_texture, local_mix_texture, format_desc); draw(target_texture, std::move(local_mix_texture), format_desc, core::blend_mode::normal); } layer_key_texture = std::move(local_key_texture); } void draw(std::shared_ptr& target_texture, item item, std::shared_ptr& layer_key_texture, std::shared_ptr& local_key_texture, std::shared_ptr& local_mix_texture, const core::video_format_desc& format_desc) { draw_params draw_params; draw_params.target_width = format_desc.square_width; draw_params.target_height = format_desc.square_height; // TODO: Pass the target color_space draw_params.pix_desc = std::move(item.pix_desc); draw_params.transforms = std::move(item.transforms); draw_params.geometry = std::move(item.geometry); draw_params.aspect_ratio = static_cast(format_desc.square_width) / static_cast(format_desc.square_height); for (auto& future_texture : item.textures) { draw_params.textures.push_back(spl::make_shared_ptr(future_texture.get())); } if (draw_params.transforms.image_transform .is_key) { // A key means we will use it for the next non-key item as a mask local_key_texture = local_key_texture ? local_key_texture : ogl_->create_texture(target_texture->width(), target_texture->height(), 1, depth_); draw_params.background = local_key_texture; draw_params.local_key = nullptr; draw_params.layer_key = nullptr; kernel_.draw(std::move(draw_params)); } else if (draw_params.transforms.image_transform .is_mix) { // A mix means precomp the items to a texture, before drawing to the channel local_mix_texture = local_mix_texture ? local_mix_texture : ogl_->create_texture(target_texture->width(), target_texture->height(), 4, depth_); draw_params.background = local_mix_texture; draw_params.local_key = std::move(local_key_texture); // Use and reset the key draw_params.layer_key = layer_key_texture; draw_params.keyer = keyer::additive; kernel_.draw(std::move(draw_params)); } else { // If there is a mix, this is the end so draw it and reset draw(target_texture, std::move(local_mix_texture), format_desc, core::blend_mode::normal); draw_params.background = target_texture; draw_params.local_key = std::move(local_key_texture); draw_params.layer_key = layer_key_texture; kernel_.draw(std::move(draw_params)); } } void draw(std::shared_ptr& target_texture, std::shared_ptr&& source_texture, core::video_format_desc format_desc, core::blend_mode blend_mode = core::blend_mode::normal) { if (!source_texture) return; draw_params draw_params; draw_params.target_width = format_desc.square_width; draw_params.target_height = format_desc.square_height; draw_params.pix_desc.format = core::pixel_format::bgra; draw_params.pix_desc.planes = {core::pixel_format_desc::plane( source_texture->width(), source_texture->height(), 4, source_texture->depth())}; draw_params.textures = {spl::make_shared_ptr(source_texture)}; draw_params.blend_mode = blend_mode; draw_params.background = target_texture; draw_params.geometry = core::frame_geometry::get_default(); kernel_.draw(std::move(draw_params)); } }; struct image_mixer::impl : public core::frame_factory , public std::enable_shared_from_this { spl::shared_ptr ogl_; image_renderer renderer_; std::vector transform_stack_; std::vector layers_; // layer/stream/items std::vector layer_stack_; double aspect_ratio_ = 1.0; public: impl(const spl::shared_ptr& ogl, const int channel_id, const size_t max_frame_size, common::bit_depth depth) : ogl_(ogl) , renderer_(ogl, max_frame_size, depth) , transform_stack_(1) { CASPAR_LOG(info) << L"Initialized OpenGL Accelerated GPU Image Mixer for channel " << channel_id; } void update_aspect_ratio(double aspect_ratio) { aspect_ratio_ = aspect_ratio; } void push(const core::frame_transform& transform) { auto previous_layer_depth = transform_stack_.back().image_transform.layer_depth; transform_stack_.push_back(transform_stack_.back().combine_transform(transform.image_transform, aspect_ratio_)); auto new_layer_depth = transform_stack_.back().image_transform.layer_depth; if (previous_layer_depth < new_layer_depth) { layer new_layer(transform_stack_.back().image_transform.blend_mode); if (layer_stack_.empty()) { layers_.push_back(std::move(new_layer)); layer_stack_.push_back(&layers_.back()); } else { layer_stack_.back()->sublayers.push_back(std::move(new_layer)); layer_stack_.push_back(&layer_stack_.back()->sublayers.back()); } } } void visit(const core::const_frame& frame) { if (frame.pixel_format_desc().format == core::pixel_format::invalid) return; if (frame.pixel_format_desc().planes.empty()) return; item item; item.pix_desc = frame.pixel_format_desc(); item.transforms = transform_stack_.back(); item.geometry = frame.geometry(); auto textures_ptr = std::any_cast>>(frame.opaque()); if (textures_ptr) { item.textures = *textures_ptr; } else { for (int n = 0; n < static_cast(item.pix_desc.planes.size()); ++n) { item.textures.emplace_back(ogl_->copy_async(frame.image_data(n), item.pix_desc.planes[n].width, item.pix_desc.planes[n].height, item.pix_desc.planes[n].stride, item.pix_desc.planes[n].depth)); } } layer_stack_.back()->items.push_back(item); } void pop() { transform_stack_.pop_back(); layer_stack_.resize(transform_stack_.back().image_transform.layer_depth); } std::future, std::shared_ptr>> render(const core::video_format_desc& format_desc) { return renderer_(std::move(layers_), format_desc); } core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc) override { return create_frame(tag, desc, common::bit_depth::bit8); } core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc, common::bit_depth depth) override { std::vector> image_data; for (auto& plane : desc.planes) { auto bytes_per_pixel = depth == common::bit_depth::bit8 ? 1 : 2; image_data.push_back(ogl_->create_array(plane.size * bytes_per_pixel)); } std::weak_ptr weak_self = shared_from_this(); return core::mutable_frame(tag, std::move(image_data), array{}, desc, [weak_self, desc](std::vector> image_data) -> std::any { auto self = weak_self.lock(); if (!self) { return std::any{}; } std::vector textures; for (int n = 0; n < static_cast(desc.planes.size()); ++n) { textures.emplace_back(self->ogl_->copy_async(image_data[n], desc.planes[n].width, desc.planes[n].height, desc.planes[n].stride, desc.planes[n].depth)); } return std::make_shared(std::move(textures)); }); } #ifdef WIN32 core::const_frame import_d3d_texture(const void* tag, const std::shared_ptr& d3d_texture, core::pixel_format format, common::bit_depth depth) override { // map directx texture with wgl texture if (d3d_texture->gl_texture_id() == 0) d3d_texture->gen_gl_texture(ogl_); // copy directx texture to gl texture auto gl_texture = ogl_->dispatch_sync([this, d3d_texture, depth]() { return ogl_->copy_async( d3d_texture->gl_texture_id(), d3d_texture->width(), d3d_texture->height(), 4, depth); }); // make gl texture to draw std::vector textures{make_ready_future(gl_texture.get())}; std::weak_ptr weak_self = shared_from_this(); core::pixel_format_desc desc(core::pixel_format::bgra); desc.planes.push_back(core::pixel_format_desc::plane(d3d_texture->width(), d3d_texture->height(), 4, depth)); auto frame = core::mutable_frame( tag, std::vector>{}, array{}, desc, [weak_self, texs = std::move(textures)](std::vector> image_data) -> std::any { auto self = weak_self.lock(); if (!self) { return std::any{}; } return std::make_shared(std::move(texs)); }); return core::const_frame(std::move(frame)); } #endif common::bit_depth depth() const { return renderer_.depth(); } }; image_mixer::image_mixer(const spl::shared_ptr& ogl, const int channel_id, const size_t max_frame_size, common::bit_depth depth) : impl_(std::make_unique(ogl, channel_id, max_frame_size, depth)) { } image_mixer::~image_mixer() {} void image_mixer::push(const core::frame_transform& transform) { impl_->push(transform); } void image_mixer::visit(const core::const_frame& frame) { impl_->visit(frame); } void image_mixer::pop() { impl_->pop(); } void image_mixer::update_aspect_ratio(double aspect_ratio) { impl_->update_aspect_ratio(aspect_ratio); } std::future, std::shared_ptr>> image_mixer::render(const core::video_format_desc& format_desc) { return impl_->render(format_desc); } core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc) { return impl_->create_frame(tag, desc); } core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc, common::bit_depth depth) { return impl_->create_frame(tag, desc, depth); } #ifdef WIN32 core::const_frame image_mixer::import_d3d_texture(const void* tag, const std::shared_ptr& d3d_texture, core::pixel_format format, common::bit_depth depth) { return impl_->import_d3d_texture(tag, d3d_texture, format, depth); } #endif common::bit_depth image_mixer::depth() const { return impl_->depth(); } }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/image/image_mixer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include #include #include #include #include namespace caspar { namespace accelerator { namespace ogl { class image_mixer final : public core::image_mixer { public: image_mixer(const spl::shared_ptr& ogl, int channel_id, const size_t max_frame_size, common::bit_depth depth); image_mixer(const image_mixer&) = delete; ~image_mixer(); image_mixer& operator=(const image_mixer&) = delete; std::future, std::shared_ptr>> render(const core::video_format_desc& format_desc) override; core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc) override; core::mutable_frame create_frame(const void* video_stream_tag, const core::pixel_format_desc& desc, common::bit_depth depth) override; #ifdef WIN32 core::const_frame import_d3d_texture(const void* tag, const std::shared_ptr& d3d_texture, core::pixel_format format, common::bit_depth depth) override; #endif void update_aspect_ratio(double aspect_ratio) override; // core::image_mixer void push(const core::frame_transform& frame) override; void visit(const core::const_frame& frame) override; void pop() override; common::bit_depth depth() const override; private: struct impl; std::shared_ptr impl_; }; }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/image/image_shader.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "image_shader.h" #include "../util/device.h" #include "../util/shader.h" #include "ogl_image_fragment.h" #include "ogl_image_vertex.h" namespace caspar { namespace accelerator { namespace ogl { std::weak_ptr g_shader; std::mutex g_shader_mutex; std::shared_ptr get_image_shader(const spl::shared_ptr& ogl) { std::lock_guard lock(g_shader_mutex); auto existing_shader = g_shader.lock(); if (existing_shader) { return existing_shader; } // The deleter is alive until the weak pointer is destroyed, so we have // to weakly reference ogl, to not keep it alive until atexit std::weak_ptr weak_ogl = ogl; auto deleter = [weak_ogl](shader* p) { auto ogl = weak_ogl.lock(); if (ogl) { ogl->dispatch_async([=] { delete p; }); } }; existing_shader.reset(new shader(std::string(reinterpret_cast(vertex_shader)), std::string(reinterpret_cast(fragment_shader))), deleter); g_shader = existing_shader; return existing_shader; } }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/image/image_shader.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include namespace caspar { namespace accelerator { namespace ogl { class shader; class device; enum class texture_id { plane0 = 0, plane1, plane2, plane3, local_key, layer_key, background }; std::shared_ptr get_image_shader(const spl::shared_ptr& ogl); }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/image/shader.frag ================================================ #version 450 in vec4 TexCoord; in vec4 TexCoord2; out vec4 fragColor; uniform sampler2D background; uniform sampler2D plane[4]; uniform sampler2D local_key; uniform sampler2D layer_key; uniform bool is_straight_alpha; uniform mat3 color_matrix; uniform vec3 luma_coeff; uniform bool has_local_key; uniform bool has_layer_key; uniform int blend_mode; uniform int keyer; uniform int pixel_format; uniform bool invert; uniform float opacity; uniform bool levels; uniform float min_input; uniform float max_input; uniform float gamma; uniform float min_output; uniform float max_output; uniform float precision_factor[4]; uniform bool csb; uniform float brt; uniform float sat; uniform float con; uniform bool chroma; uniform bool chroma_show_mask; uniform float chroma_target_hue; uniform float chroma_hue_width; uniform float chroma_min_saturation; uniform float chroma_min_brightness; uniform float chroma_softness; uniform float chroma_spill_suppress; uniform float chroma_spill_suppress_saturation; /* ** Contrast, saturation, brightness ** Code of this function is from TGM's shader pack ** http://irrlicht.sourceforge.net/phpBB2/viewtopic.php?t=21057 */ vec3 ContrastSaturationBrightness(vec4 color, float brt, float sat, float con) { const float AvgLumR = 0.5; const float AvgLumG = 0.5; const float AvgLumB = 0.5; vec3 LumCoeff = luma_coeff.bgr; if (color.a > 0.0) color.rgb /= color.a; vec3 AvgLumin = vec3(AvgLumR, AvgLumG, AvgLumB); vec3 brtColor = color.rgb * brt; vec3 intensity = vec3(dot(brtColor, LumCoeff)); vec3 satColor = mix(intensity, brtColor, sat); vec3 conColor = mix(AvgLumin, satColor, con); conColor.rgb *= color.a; return conColor; } /* ** Gamma correction ** Details: http://blog.mouaif.org/2009/01/22/photoshop-gamma-correction-shader/ */ #define GammaCorrection(color, gamma) pow(color, vec3(1.0 / gamma)) /* ** Levels control (input (+gamma), output) ** Details: http://blog.mouaif.org/2009/01/28/levels-control-shader/ */ #define LevelsControlInputRange(color, minInput, maxInput) min(max(color - vec3(minInput), vec3(0.0)) / (vec3(maxInput) - vec3(minInput)), vec3(1.0)) #define LevelsControlInput(color, minInput, gamma, maxInput) GammaCorrection(LevelsControlInputRange(color, minInput, maxInput), gamma) #define LevelsControlOutputRange(color, minOutput, maxOutput) mix(vec3(minOutput), vec3(maxOutput), color) #define LevelsControl(color, minInput, gamma, maxInput, minOutput, maxOutput) LevelsControlOutputRange(LevelsControlInput(color, minInput, gamma, maxInput), minOutput, maxOutput) /* ** Photoshop & misc math ** Blending modes, RGB/HSL/Contrast/Desaturate, levels control ** ** Romain Dura | Romz ** Blog: http://blog.mouaif.org ** Post: http://blog.mouaif.org/?p=94 */ /* ** Desaturation */ vec4 Desaturate(vec3 color, float Desaturation) { vec3 grayXfer = vec3(0.3, 0.59, 0.11); vec3 gray = vec3(dot(grayXfer, color)); return vec4(mix(color, gray, Desaturation), 1.0); } /* ** Hue, saturation, luminance */ vec3 RGBToHSL(vec3 color) { vec3 hsl; float fmin = min(min(color.r, color.g), color.b); float fmax = max(max(color.r, color.g), color.b); float delta = fmax - fmin; hsl.z = (fmax + fmin) / 2.0; if (delta == 0.0) { hsl.x = 0.0; hsl.y = 0.0; } else { if (hsl.z < 0.5) hsl.y = delta / (fmax + fmin); else hsl.y = delta / (2.0 - fmax - fmin); float deltaR = (((fmax - color.r) / 6.0) + (delta / 2.0)) / delta; float deltaG = (((fmax - color.g) / 6.0) + (delta / 2.0)) / delta; float deltaB = (((fmax - color.b) / 6.0) + (delta / 2.0)) / delta; if (color.r == fmax ) hsl.x = deltaB - deltaG; else if (color.g == fmax) hsl.x = (1.0 / 3.0) + deltaR - deltaB; else if (color.b == fmax) hsl.x = (2.0 / 3.0) + deltaG - deltaR; if (hsl.x < 0.0) hsl.x += 1.0; else if (hsl.x > 1.0) hsl.x -= 1.0; } return hsl; } float HueToRGB(float f1, float f2, float hue) { if (hue < 0.0) hue += 1.0; else if (hue > 1.0) hue -= 1.0; float res; if ((6.0 * hue) < 1.0) res = f1 + (f2 - f1) * 6.0 * hue; else if ((2.0 * hue) < 1.0) res = f2; else if ((3.0 * hue) < 2.0) res = f1 + (f2 - f1) * ((2.0 / 3.0) - hue) * 6.0; else res = f1; return res; } vec3 HSLToRGB(vec3 hsl) { vec3 rgb; if (hsl.y == 0.0) rgb = vec3(hsl.z); else { float f2; if (hsl.z < 0.5) f2 = hsl.z * (1.0 + hsl.y); else f2 = (hsl.z + hsl.y) - (hsl.y * hsl.z); float f1 = 2.0 * hsl.z - f2; rgb.r = HueToRGB(f1, f2, hsl.x + (1.0/3.0)); rgb.g = HueToRGB(f1, f2, hsl.x); rgb.b= HueToRGB(f1, f2, hsl.x - (1.0/3.0)); } return rgb; } /* ** Float blending modes ** Adapted from here: http://www.nathanm.com/photoshop-blending-math/ ** But I modified the HardMix (wrong condition), Overlay, SoftLight, ColorDodge, ColorBurn, VividLight, PinLight (inverted layers) ones to have correct results */ #define BlendLinearDodgef BlendAddf #define BlendLinearBurnf BlendSubstractf #define BlendAddf(base, blend) min(base + blend, 1.0) #define BlendSubstractf(base, blend) max(base + blend - 1.0, 0.0) #define BlendLightenf(base, blend) max(blend, base) #define BlendDarkenf(base, blend) min(blend, base) #define BlendLinearLightf(base, blend) (blend < 0.5 ? BlendLinearBurnf(base, (2.0 * blend)) : BlendLinearDodgef(base, (2.0 * (blend - 0.5)))) #define BlendScreenf(base, blend) (1.0 - ((1.0 - base) * (1.0 - blend))) #define BlendOverlayf(base, blend) (base < 0.5 ? (2.0 * base * blend) : (1.0 - 2.0 * (1.0 - base) * (1.0 - blend))) #define BlendSoftLightf(base, blend) ((blend < 0.5) ? (2.0 * base * blend + base * base * (1.0 - 2.0 * blend)) : (sqrt(base) * (2.0 * blend - 1.0) + 2.0 * base * (1.0 - blend))) #define BlendColorDodgef(base, blend) ((blend == 1.0) ? blend : min(base / (1.0 - blend), 1.0)) #define BlendColorBurnf(base, blend) ((blend == 0.0) ? blend : max((1.0 - ((1.0 - base) / blend)), 0.0)) #define BlendVividLightf(base, blend) ((blend < 0.5) ? BlendColorBurnf(base, (2.0 * blend)) : BlendColorDodgef(base, (2.0 * (blend - 0.5)))) #define BlendPinLightf(base, blend) ((blend < 0.5) ? BlendDarkenf(base, (2.0 * blend)) : BlendLightenf(base, (2.0 *(blend - 0.5)))) #define BlendHardMixf(base, blend) ((BlendVividLightf(base, blend) < 0.5) ? 0.0 : 1.0) #define BlendReflectf(base, blend) ((blend == 1.0) ? blend : min(base * base / (1.0 - blend), 1.0)) /* ** Vector3 blending modes */ #define Blend(base, blend, funcf) vec3(funcf(base.r, blend.r), funcf(base.g, blend.g), funcf(base.b, blend.b)) #define BlendNormal(base, blend) (blend) #define BlendLighten BlendLightenf #define BlendDarken BlendDarkenf #define BlendMultiply(base, blend) (base * blend) #define BlendAverage(base, blend) ((base + blend) / 2.0) #define BlendAdd(base, blend) min(base + blend, vec3(1.0)) #define BlendSubstract(base, blend) max(base + blend - vec3(1.0), vec3(0.0)) #define BlendDifference(base, blend) abs(base - blend) #define BlendNegation(base, blend) (vec3(1.0) - abs(vec3(1.0) - base - blend)) #define BlendExclusion(base, blend) (base + blend - 2.0 * base * blend) #define BlendScreen(base, blend) Blend(base, blend, BlendScreenf) #define BlendOverlay(base, blend) Blend(base, blend, BlendOverlayf) #define BlendSoftLight(base, blend) Blend(base, blend, BlendSoftLightf) #define BlendHardLight(base, blend) BlendOverlay(blend, base) #define BlendColorDodge(base, blend) Blend(base, blend, BlendColorDodgef) #define BlendColorBurn(base, blend) Blend(base, blend, BlendColorBurnf) #define BlendLinearDodge BlendAdd #define BlendLinearBurn BlendSubstract #define BlendLinearLight(base, blend) Blend(base, blend, BlendLinearLightf) #define BlendVividLight(base, blend) Blend(base, blend, BlendVividLightf) #define BlendPinLight(base, blend) Blend(base, blend, BlendPinLightf) #define BlendHardMix(base, blend) Blend(base, blend, BlendHardMixf) #define BlendReflect(base, blend) Blend(base, blend, BlendReflectf) #define BlendGlow(base, blend) BlendReflect(blend, base) #define BlendPhoenix(base, blend) (min(base, blend) - max(base, blend) + vec3(1.0)) #define BlendOpacity(base, blend, F, O) (F(base, blend) * O + blend * (1.0 - O)) vec3 BlendHue(vec3 base, vec3 blend) { vec3 baseHSL = RGBToHSL(base); return HSLToRGB(vec3(RGBToHSL(blend).r, baseHSL.g, baseHSL.b)); } vec3 BlendSaturation(vec3 base, vec3 blend) { vec3 baseHSL = RGBToHSL(base); return HSLToRGB(vec3(baseHSL.r, RGBToHSL(blend).g, baseHSL.b)); } vec3 BlendColor(vec3 base, vec3 blend) { vec3 blendHSL = RGBToHSL(blend); return HSLToRGB(vec3(blendHSL.r, blendHSL.g, RGBToHSL(base).b)); } vec3 BlendLuminosity(vec3 base, vec3 blend) { vec3 baseHSL = RGBToHSL(base); return HSLToRGB(vec3(baseHSL.r, baseHSL.g, RGBToHSL(blend).b)); } // Chroma keying // Author: Tim Eves // // This implements the Chroma key algorithm described in the paper: // 'Software Chroma Keying in an Imersive Virtual Environment' // by F. van den Bergh & V. Lalioti // but as a pixel shader algorithm. // vec4 grey_xfer = vec4(luma_coeff, 0); // This allows us to implement the paper's alphaMap curve in software // rather than a largeish array float alpha_map(float d) { return 1.0 - smoothstep(1.0, chroma_softness, d); } // http://stackoverflow.com/questions/15095909/from-rgb-to-hsv-in-opengl-glsl vec3 rgb2hsv(vec3 c) { vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0); vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g)); vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r)); float d = q.x - min(q.w, q.y); float e = 1.0e-10; return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x); } // From the same page vec3 hsv2rgb(vec3 c) { vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www); return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y); } float AngleDiff(float angle1, float angle2) { return 0.5 - abs(abs(angle1 - angle2) - 0.5); } float AngleDiffDirectional(float angle1, float angle2) { float diff = angle1 - angle2; return diff < -0.5 ? diff + 1.0 : (diff > 0.5 ? diff - 1.0 : diff); } float Distance(float actual, float target) { return min(0.0, target - actual); } float ColorDistance(vec3 hsv) { float hueDiff = AngleDiff(hsv.x, chroma_target_hue) * 2; float saturationDiff = Distance(hsv.y, chroma_min_saturation); float brightnessDiff = Distance(hsv.z, chroma_min_brightness); float saturationBrightnessScore = max(brightnessDiff, saturationDiff); float hueScore = hueDiff - chroma_hue_width; return -hueScore * saturationBrightnessScore; } vec3 supress_spill(vec3 c) { float hue = c.x; float diff = AngleDiffDirectional(hue, chroma_target_hue); float distance = abs(diff) / chroma_spill_suppress; if (distance < 1) { c.x = diff < 0 ? chroma_target_hue - chroma_spill_suppress : chroma_target_hue + chroma_spill_suppress; c.y *= min(1.0, distance + chroma_spill_suppress_saturation); } return c; } // Key on any color vec4 ChromaOnCustomColor(vec4 c) { vec3 hsv = rgb2hsv(c.rgb); float distance = ColorDistance(hsv); float d = distance * -2.0 + 1.0; vec4 suppressed = vec4(hsv2rgb(supress_spill(hsv)), 1.0); float alpha = alpha_map(d); suppressed *= alpha; return chroma_show_mask ? vec4(suppressed.a, suppressed.a, suppressed.a, 1) : suppressed; } vec3 get_blend_color(vec3 back, vec3 fore) { switch(blend_mode) { case 0: return BlendNormal(back, fore); case 1: return BlendLighten(back, fore); case 2: return BlendDarken(back, fore); case 3: return BlendMultiply(back, fore); case 4: return BlendAverage(back, fore); case 5: return BlendAdd(back, fore); case 6: return BlendSubstract(back, fore); case 7: return BlendDifference(back, fore); case 8: return BlendNegation(back, fore); case 9: return BlendExclusion(back, fore); case 10: return BlendScreen(back, fore); case 11: return BlendOverlay(back, fore); // case 12: return BlendSoftLight(back, fore); case 13: return BlendHardLight(back, fore); case 14: return BlendColorDodge(back, fore); case 15: return BlendColorBurn(back, fore); case 16: return BlendLinearDodge(back, fore); case 17: return BlendLinearBurn(back, fore); case 18: return BlendLinearLight(back, fore); case 19: return BlendVividLight(back, fore); case 20: return BlendPinLight(back, fore); case 21: return BlendHardMix(back, fore); case 22: return BlendReflect(back, fore); case 23: return BlendGlow(back, fore); case 24: return BlendPhoenix(back, fore); case 25: return BlendHue(back, fore); case 26: return BlendSaturation(back, fore); case 27: return BlendColor(back, fore); case 28: return BlendLuminosity(back, fore); } return BlendNormal(back, fore); } vec4 blend(vec4 fore) { vec4 back = texture(background, TexCoord2.st).bgra; if(blend_mode != 0) fore.rgb = get_blend_color(back.rgb/(back.a+0.0000001), fore.rgb/(fore.a+0.0000001))*fore.a; switch(keyer) { case 1: return fore + back; // additive default: return fore + (1.0-fore.a)*back; // linear } } vec4 chroma_key(vec4 c) { return ChromaOnCustomColor(c.bgra).bgra; } vec4 ycbcra_to_rgba(float Y, float Cb, float Cr, float A) { const float luma_coefficient = 255.0/219.0; const float chroma_coefficient = 255.0/224.0; vec3 YCbCr = vec3(Y, Cb, Cr) * 255; YCbCr -= vec3(16.0, 128.0, 128.0); YCbCr *= vec3(luma_coefficient, chroma_coefficient, chroma_coefficient); return vec4(color_matrix * YCbCr / 255, A).bgra; } vec4 get_sample(sampler2D sampler, vec2 coords) { return texture(sampler, coords); } vec4 get_rgba_color() { switch(pixel_format) { case 0: //gray return vec4(get_sample(plane[0], TexCoord.st / TexCoord.q).rrr * precision_factor[0], 1.0); case 1: //bgra, return get_sample(plane[0], TexCoord.st / TexCoord.q).bgra * precision_factor[0]; case 2: //rgba, return get_sample(plane[0], TexCoord.st / TexCoord.q).rgba * precision_factor[0]; case 3: //argb, return get_sample(plane[0], TexCoord.st / TexCoord.q).argb * precision_factor[0]; case 4: //abgr, return get_sample(plane[0], TexCoord.st / TexCoord.q).gbar * precision_factor[0]; case 5: //ycbcr, { float y = get_sample(plane[0], TexCoord.st / TexCoord.q).r * precision_factor[0]; float cb = get_sample(plane[1], TexCoord.st / TexCoord.q).r * precision_factor[1]; float cr = get_sample(plane[2], TexCoord.st / TexCoord.q).r * precision_factor[2]; return ycbcra_to_rgba(y, cb, cr, 1.0); } case 6: //ycbcra { float y = get_sample(plane[0], TexCoord.st / TexCoord.q).r * precision_factor[0]; float cb = get_sample(plane[1], TexCoord.st / TexCoord.q).r * precision_factor[1]; float cr = get_sample(plane[2], TexCoord.st / TexCoord.q).r * precision_factor[2]; float a = get_sample(plane[3], TexCoord.st / TexCoord.q).r * precision_factor[3]; return ycbcra_to_rgba(y, cb, cr, a); } case 7: //luma { vec3 y3 = get_sample(plane[0], TexCoord.st / TexCoord.q).rrr * precision_factor[0]; return vec4((y3-0.065)/0.859, 1.0); } case 8: //bgr, return vec4(get_sample(plane[0], TexCoord.st / TexCoord.q).bgr * precision_factor[0], 1.0); case 9: //rgb, return vec4(get_sample(plane[0], TexCoord.st / TexCoord.q).rgb * precision_factor[0], 1.0); case 10: // uyvy { float y = get_sample(plane[0], TexCoord.st / TexCoord.q).g * precision_factor[0]; float cb = get_sample(plane[1], TexCoord.st / TexCoord.q).b * precision_factor[1]; float cr = get_sample(plane[1], TexCoord.st / TexCoord.q).r * precision_factor[1]; return ycbcra_to_rgba(y, cb, cr, 1.0); } case 11: // gbrp { float g = get_sample(plane[0], TexCoord.st / TexCoord.q).r * precision_factor[0]; float b = get_sample(plane[1], TexCoord.st / TexCoord.q).r * precision_factor[1]; float r = get_sample(plane[2], TexCoord.st / TexCoord.q).r * precision_factor[2]; return vec4(b, g, r, 1.0); } case 12: // gbrap { float g = get_sample(plane[0], TexCoord.st / TexCoord.q).r * precision_factor[0]; float b = get_sample(plane[1], TexCoord.st / TexCoord.q).r * precision_factor[1]; float r = get_sample(plane[2], TexCoord.st / TexCoord.q).r * precision_factor[2]; float a = get_sample(plane[3], TexCoord.st / TexCoord.q).r * precision_factor[3]; return vec4(b, g, r, a); } } return vec4(0.0, 0.0, 0.0, 0.0); } void main() { vec4 color = get_rgba_color(); if (is_straight_alpha) color.rgb *= color.a; if (chroma) color = chroma_key(color); if(levels) color.rgb = LevelsControl(color.rgb, min_input, gamma, max_input, min_output, max_output); if(csb) color.rgb = ContrastSaturationBrightness(color, brt, sat, con); if(has_local_key) color *= texture(local_key, TexCoord2.st).r; if(has_layer_key) color *= texture(layer_key, TexCoord2.st).r; color *= opacity; if (invert) color = 1.0 - color; if (blend_mode >= 0) color = blend(color); fragColor = color.bgra; } ================================================ FILE: src/accelerator/ogl/image/shader.vert ================================================ #version 450 in vec4 TexCoordIn; in vec2 Position; out vec4 TexCoord; out vec4 TexCoord2; void main() { TexCoord = TexCoordIn; vec4 pos = vec4(Position, 0, 1); TexCoord2 = vec4(pos.xy, 0.0, 0.0); pos.x = pos.x*2.0 - 1.0; pos.y = pos.y*2.0 - 1.0; gl_Position = pos; } ================================================ FILE: src/accelerator/ogl/util/buffer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "buffer.h" #include #include #include #include namespace caspar { namespace accelerator { namespace ogl { static std::atomic g_w_total_count; static std::atomic g_w_total_size; static std::atomic g_r_total_count; static std::atomic g_r_total_size; struct buffer::impl { GLuint id_ = 0; GLsizei size_ = 0; void* data_ = nullptr; bool write_ = false; GLenum target_ = 0; GLbitfield flags_ = 0; impl(const impl&) = delete; impl& operator=(const impl&) = delete; public: impl(int size, bool write) : size_(size) , write_(write) , target_(!write ? GL_PIXEL_PACK_BUFFER : GL_PIXEL_UNPACK_BUFFER) , flags_(GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT | (write ? GL_MAP_WRITE_BIT : GL_MAP_READ_BIT)) { GL(glCreateBuffers(1, &id_)); GL(glNamedBufferStorage(id_, size_, nullptr, flags_)); data_ = GL2(glMapNamedBufferRange(id_, 0, size_, flags_)); (write ? g_w_total_count : g_r_total_count)++; (write ? g_w_total_size : g_r_total_size) += size_; } ~impl() { GL(glUnmapNamedBuffer(id_)); glDeleteBuffers(1, &id_); (write_ ? g_w_total_size : g_r_total_size) -= size_; (write_ ? g_w_total_count : g_r_total_count)--; } void bind() { GL(glBindBuffer(target_, id_)); } void unbind() { GL(glBindBuffer(target_, 0)); } }; buffer::buffer(int size, bool write) : impl_(new impl(size, write)) { } buffer::buffer(buffer&& other) : impl_(std::move(other.impl_)) { } buffer::~buffer() {} buffer& buffer::operator=(buffer&& other) { impl_ = std::move(other.impl_); return *this; } void* buffer::data() { return impl_->data_; } bool buffer::write() const { return impl_->write_; } int buffer::size() const { return impl_->size_; } void buffer::bind() { return impl_->bind(); } void buffer::unbind() { return impl_->unbind(); } int buffer::id() const { return impl_->id_; } boost::property_tree::wptree buffer::info() { boost::property_tree::wptree info; info.add(L"total_read_count", g_r_total_count); info.add(L"total_write_count", g_w_total_count); info.add(L"total_read_size", g_r_total_size); info.add(L"total_write_size", g_w_total_size); return info; } }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/buffer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include namespace caspar { namespace accelerator { namespace ogl { class buffer final { public: static boost::property_tree::wptree info(); buffer(int size, bool write); buffer(const buffer&) = delete; buffer(buffer&& other); ~buffer(); buffer& operator=(const buffer&) = delete; buffer& operator=(buffer&& other); void bind(); void unbind(); int id() const; void* data(); int size() const; bool write() const; private: struct impl; std::unique_ptr impl_; }; }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/context.cpp ================================================ #include "context.h" #include #include #if SFML_VERSION_MAJOR >= 3 #include #endif #include // std::ignore #ifndef _MSC_VER #include #include #include #endif namespace caspar::accelerator::ogl { struct device_context::impl { virtual ~impl() {} virtual void bind() = 0; virtual void unbind() = 0; }; struct impl_sfml : public device_context::impl { sf::Context device_; impl_sfml() #if SFML_VERSION_MAJOR >= 3 : device_(sf::ContextSettings{.depthBits = 0, .stencilBits = 0, .antiAliasingLevel = 0, .majorVersion = 4, .minorVersion = 5, .attributeFlags = sf::ContextSettings::Attribute::Core}, {1, 1}) #else : device_(sf::ContextSettings(0, 0, 0, 4, 5, sf::ContextSettings::Attribute::Core), 1, 1) #endif { CASPAR_LOG(info) << L"Initializing OpenGL Device (sfml)."; } virtual ~impl_sfml() {} virtual void bind() override { std::ignore = device_.setActive(true); } virtual void unbind() override { std::ignore = device_.setActive(false); } }; #ifndef _MSC_VER struct impl_egl : public device_context::impl { EGLDisplay eglDisplay_; EGLContext eglContext_; impl_egl() : eglDisplay_(EGL_NO_DISPLAY) , eglContext_(EGL_NO_CONTEXT) { CASPAR_LOG(info) << L"Initializing OpenGL Device (EGL)."; eglDisplay_ = eglGetDisplay(EGL_DEFAULT_DISPLAY); EGLint major, minor; eglInitialize(eglDisplay_, &major, &minor); const EGLint configAttribs[] = {EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_BLUE_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_RED_SIZE, 8, EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_NONE}; EGLint numConfigs; EGLConfig eglConfig; if (!eglChooseConfig(eglDisplay_, configAttribs, &eglConfig, 1, &numConfigs)) { CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize OpenGL: eglChooseConfig")); } if (!eglBindAPI(EGL_OPENGL_API)) { CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize OpenGL: eglBindAPI")); } eglContext_ = eglCreateContext(eglDisplay_, eglConfig, EGL_NO_CONTEXT, NULL); if (eglContext_ == EGL_NO_CONTEXT) { CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize OpenGL: eglCreateContext")); } if (!eglMakeCurrent(eglDisplay_, EGL_NO_SURFACE, EGL_NO_SURFACE, eglContext_)) { CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize OpenGL: eglMakeCurrent")); } } virtual ~impl_egl() { eglMakeCurrent(eglDisplay_, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); if (eglContext_ != EGL_NO_CONTEXT) { eglDestroyContext(eglDisplay_, eglContext_); } eglTerminate(eglDisplay_); } virtual void bind() override { eglMakeCurrent(eglDisplay_, EGL_NO_SURFACE, EGL_NO_SURFACE, eglContext_); } virtual void unbind() override { eglMakeCurrent(eglDisplay_, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); } }; #endif #ifndef _MSC_VER device_context::device_context() : impl_(std::getenv("DISPLAY") == nullptr ? spl::make_shared() : spl::make_shared()) { } #else device_context::device_context() : impl_(new impl_sfml()) { } #endif device_context::~device_context() {} void device_context::bind() { impl_->bind(); } void device_context::unbind() { impl_->unbind(); } } // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/context.h ================================================ #pragma once #include namespace caspar::accelerator::ogl { class device_context final { public: device_context(); ~device_context(); device_context(const device_context&) = delete; device_context& operator=(const device_context&) = delete; void bind(); void unbind(); struct impl; private: spl::shared_ptr impl_; }; } // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/device.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "device.h" #include "buffer.h" #include "context.h" #include "shader.h" #include "texture.h" #include #include #include #include #include #include #include #ifdef WIN32 #include "../../d3d/d3d_device.h" #include #endif #include #include #include #include #include #include #include #include #include namespace caspar { namespace accelerator { namespace ogl { using namespace boost::asio; struct device::impl : public std::enable_shared_from_this { using texture_queue_t = tbb::concurrent_bounded_queue>; using buffer_queue_t = tbb::concurrent_bounded_queue>; std::unique_ptr context_; std::array, 4>, 2> device_pools_; std::array, 2> host_pools_; GLuint fbo_; std::wstring version_; #ifdef WIN32 std::shared_ptr d3d_device_; std::shared_ptr interop_handle_; #endif io_context io_context_; decltype(make_work_guard(io_context_)) work_; std::thread thread_; impl() : context_(new device_context()) , work_(make_work_guard(io_context_)) { CASPAR_LOG(info) << L"Initializing OpenGL Device."; context_->bind(); auto err = glewInit(); if (err != GLEW_OK && err != 4) { // GLEW_ERROR_NO_GLX_DISPLAY std::stringstream str; str << "Failed to initialize GLEW (" << (int)err << "): " << glewGetErrorString(err) << std::endl; CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info(str.str())); } #ifdef WIN32 if (wglewInit() != GLEW_OK) { CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize GLEW.")); } #endif version_ = u16(reinterpret_cast(GL2(glGetString(GL_VERSION)))) + L" " + u16(reinterpret_cast(GL2(glGetString(GL_VENDOR)))); CASPAR_LOG(info) << L"Initialized OpenGL " << version(); if (!GLEW_VERSION_4_5 && !glewIsSupported("GL_ARB_sync GL_ARB_shader_objects GL_ARB_multitexture " "GL_ARB_direct_state_access GL_ARB_texture_barrier")) { CASPAR_THROW_EXCEPTION(not_supported() << msg_info("Your graphics card does not meet the minimum hardware requirements " "since it does not support OpenGL 4.5 or higher.")); } GL(glCreateFramebuffers(1, &fbo_)); GL(glBindFramebuffer(GL_FRAMEBUFFER, fbo_)); context_->unbind(); #ifdef WIN32 if (env::properties().get(L"configuration.html.enable-gpu", false)) { d3d_device_ = d3d::d3d_device::get_device(); } if (d3d_device_) { interop_handle_ = std::shared_ptr(wglDXOpenDeviceNV(d3d_device_->device()), [](void* p) { if (p) wglDXCloseDeviceNV(p); }); if (!interop_handle_) CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize d3d interop.")); } #endif thread_ = std::thread([&] { context_->bind(); set_thread_name(L"OpenGL Device"); io_context_.run(); context_->unbind(); }); } ~impl() { work_.reset(); thread_.join(); context_->bind(); for (auto& pool : host_pools_) pool.clear(); for (auto& pools : device_pools_) for (auto& pool : pools) pool.clear(); GL(glDeleteFramebuffers(1, &fbo_)); } template auto spawn_async(Func&& func) { using result_type = decltype(func(std::declval())); using task_type = std::packaged_task; auto task = task_type(std::forward(func)); auto future = task.get_future(); boost::asio::spawn(io_context_, std::move(task), [](std::exception_ptr e) { if (e) std::rethrow_exception(e); }); return future; } template auto dispatch_async(Func&& func) { using result_type = decltype(func()); using task_type = std::packaged_task; auto task = task_type(std::forward(func)); auto future = task.get_future(); boost::asio::dispatch(io_context_, std::move(task)); return future; } template auto dispatch_sync(Func&& func) -> decltype(func()) { return dispatch_async(std::forward(func)).get(); } std::wstring version() { return version_; } std::shared_ptr create_texture(int width, int height, int stride, common::bit_depth depth, bool clear) { CASPAR_VERIFY(stride > 0 && stride < 5); CASPAR_VERIFY(width > 0 && height > 0); auto depth_pool_index = depth == common::bit_depth::bit8 ? 0 : 1; // TODO (perf) Shared pool. auto pool = &device_pools_[depth_pool_index][stride - 1][(width << 16 & 0xFFFF0000) | (height & 0x0000FFFF)]; std::shared_ptr tex; if (!pool->try_pop(tex)) { tex = std::make_shared(width, height, stride, depth); } tex->set_depth(depth); if (clear) { tex->clear(); } auto ptr = tex.get(); return std::shared_ptr( ptr, [tex = std::move(tex), pool, self = shared_from_this()](texture*) mutable { pool->push(tex); }); } std::shared_ptr create_buffer(int size, bool write) { CASPAR_VERIFY(size > 0); // TODO (perf) Shared pool. auto pool = &host_pools_[static_cast(write ? 1 : 0)][size]; std::shared_ptr buf; if (!pool->try_pop(buf)) { // TODO (perf) Avoid blocking in create_array. dispatch_sync([&] { buf = std::make_shared(size, write); }); } auto ptr = buf.get(); return std::shared_ptr(ptr, [buf = std::move(buf), self = shared_from_this()](buffer*) mutable { auto pool = &self->host_pools_[static_cast(buf->write() ? 1 : 0)][buf->size()]; pool->push(std::move(buf)); }); } array create_array(int size) { auto buf = create_buffer(size, true); auto ptr = reinterpret_cast(buf->data()); return array(ptr, buf->size(), std::move(buf)); } std::future> copy_async(const array& source, int width, int height, int stride, common::bit_depth depth) { return dispatch_async([=, this] { std::shared_ptr buf; auto tmp = source.storage>(); if (tmp) { buf = *tmp; } else { buf = create_buffer(static_cast(source.size()), true); // TODO (perf) Copy inside a TBB worker. std::memcpy(buf->data(), source.data(), source.size()); } auto tex = create_texture(width, height, stride, depth, false); tex->copy_from(*buf); // TODO (perf) save tex on source return tex; }); } std::future> copy_async(const std::shared_ptr& source) { return spawn_async([=, this](yield_context yield) { auto buf = create_buffer(source->size(), false); source->copy_to(*buf); auto fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); GL(glFlush()); deadline_timer timer(io_context_); for (auto n = 0; true; ++n) { // TODO (perf) Smarter non-polling solution? timer.expires_from_now(boost::posix_time::milliseconds(2)); timer.async_wait(yield); auto wait = glClientWaitSync(fence, 0, 1); if (wait == GL_ALREADY_SIGNALED || wait == GL_CONDITION_SATISFIED) { break; } } glDeleteSync(fence); auto ptr = reinterpret_cast(buf->data()); auto size = buf->size(); return array(ptr, size, std::move(buf)); }); } #ifdef WIN32 std::future> copy_async(GLuint source, int width, int height, int stride, common::bit_depth depth) { return spawn_async([=, this](yield_context yield) { auto tex = create_texture(width, height, stride, depth, false); tex->copy_from(source); auto fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); GL(glFlush()); deadline_timer timer(io_context_); for (auto n = 0; true; ++n) { // TODO (perf) Smarter non-polling solution? timer.expires_from_now(boost::posix_time::milliseconds(2)); timer.async_wait(yield); auto wait = glClientWaitSync(fence, 0, 1); if (wait == GL_ALREADY_SIGNALED || wait == GL_CONDITION_SATISFIED) { break; } } glDeleteSync(fence); return tex; }); } #endif boost::property_tree::wptree info() const { boost::property_tree::wptree info; boost::property_tree::wptree pooled_device_buffers; size_t total_pooled_device_buffer_size = 0; size_t total_pooled_device_buffer_count = 0; for (size_t i = 0; i < device_pools_.size(); ++i) { auto& depth_pools = device_pools_.at(i); for (size_t j = 0; j < depth_pools.size(); ++j) { auto& pools = depth_pools.at(j); bool mipmapping = j > 3; auto stride = mipmapping ? j - 3 : j + 1; for (auto& pool : pools) { auto width = pool.first >> 16; auto height = pool.first & 0x0000FFFF; auto size = width * height * stride; auto count = pool.second.size(); if (count == 0) continue; boost::property_tree::wptree pool_info; pool_info.add(L"stride", stride); pool_info.add(L"mipmapping", mipmapping); pool_info.add(L"width", width); pool_info.add(L"height", height); pool_info.add(L"size", size); pool_info.add(L"count", count); total_pooled_device_buffer_size += size * count; total_pooled_device_buffer_count += count; pooled_device_buffers.add_child(L"device_buffer_pool", pool_info); } } } info.add_child(L"gl.details.pooled_device_buffers", pooled_device_buffers); boost::property_tree::wptree pooled_host_buffers; size_t total_read_size = 0; size_t total_write_size = 0; size_t total_read_count = 0; size_t total_write_count = 0; for (size_t i = 0; i < host_pools_.size(); ++i) { auto& pools = host_pools_.at(i); auto is_write = i == 1; for (auto& pool : pools) { auto size = pool.first; auto count = pool.second.size(); if (count == 0) continue; boost::property_tree::wptree pool_info; pool_info.add(L"usage", is_write ? L"write_only" : L"read_only"); pool_info.add(L"size", size); pool_info.add(L"count", count); pooled_host_buffers.add_child(L"host_buffer_pool", pool_info); (is_write ? total_write_count : total_read_count) += count; (is_write ? total_write_size : total_read_size) += size * count; } } info.add_child(L"gl.details.pooled_host_buffers", pooled_host_buffers); info.add(L"gl.summary.pooled_device_buffers.total_count", total_pooled_device_buffer_count); info.add(L"gl.summary.pooled_device_buffers.total_size", total_pooled_device_buffer_size); // info.add_child(L"gl.summary.all_device_buffers", texture::info()); info.add(L"gl.summary.pooled_host_buffers.total_read_count", total_read_count); info.add(L"gl.summary.pooled_host_buffers.total_write_count", total_write_count); info.add(L"gl.summary.pooled_host_buffers.total_read_size", total_read_size); info.add(L"gl.summary.pooled_host_buffers.total_write_size", total_write_size); info.add_child(L"gl.summary.all_host_buffers", buffer::info()); return info; } std::future gc() { return spawn_async([this](yield_context yield) { CASPAR_LOG(info) << " ogl: Running GC."; try { for (auto& depth_pools : device_pools_) { for (auto& pools : depth_pools) { for (auto& pool : pools) pool.second.clear(); } } for (auto& pools : host_pools_) { for (auto& pool : pools) pool.second.clear(); } } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } }); } }; device::device() : impl_(new impl()) { } device::~device() {} std::shared_ptr device::create_texture(int width, int height, int stride, common::bit_depth depth) { return impl_->create_texture(width, height, stride, depth, true); } array device::create_array(int size) { return impl_->create_array(size); } std::future> device::copy_async(const array& source, int width, int height, int stride, common::bit_depth depth) { return impl_->copy_async(source, width, height, stride, depth); } std::future> device::copy_async(const std::shared_ptr& source) { return impl_->copy_async(source); } #ifdef WIN32 std::shared_ptr device::d3d_interop() const { return impl_->interop_handle_; } std::future> device::copy_async(GLuint source, int width, int height, int stride, common::bit_depth depth) { return impl_->copy_async(source, width, height, stride, depth); } #endif void device::dispatch(std::function func) { boost::asio::dispatch(impl_->io_context_, std::move(func)); } std::wstring device::version() const { return impl_->version(); } boost::property_tree::wptree device::info() const { return impl_->info(); } std::future device::gc() { return impl_->gc(); } }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/device.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include #include #ifdef WIN32 #include #endif namespace caspar { namespace accelerator { namespace ogl { class device final : public std::enable_shared_from_this , public accelerator_device { public: device(); ~device(); device(const device&) = delete; device& operator=(const device&) = delete; std::shared_ptr create_texture(int width, int height, int stride, common::bit_depth depth); array create_array(int size); std::future> copy_async(const array& source, int width, int height, int stride, common::bit_depth depth); std::future> copy_async(const std::shared_ptr& source); #ifdef WIN32 std::shared_ptr d3d_interop() const; std::future> copy_async(GLuint source, int width, int height, int stride, common::bit_depth depth); #endif template auto dispatch_async(Func&& func) { using result_type = decltype(func()); using task_type = std::packaged_task; auto task = std::make_shared(std::forward(func)); auto future = task->get_future(); dispatch([=] { (*task)(); }); return future; } template auto dispatch_sync(Func&& func) { return dispatch_async(std::forward(func)).get(); } std::wstring version() const; boost::property_tree::wptree info() const; std::future gc(); private: void dispatch(std::function func); struct impl; std::shared_ptr impl_; }; }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/matrix.cpp ================================================ #include #include #include #include #include #include #include #include #include #include #include "matrix.h" namespace caspar::accelerator::ogl { t_matrix create_matrix(std::vector> data) { if (data.empty()) CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"data cannot be empty")); t_matrix matrix(data.size(), data.at(0).size()); for (int y = 0; y < matrix.size1(); ++y) { if (data.at(y).size() != matrix.size2()) CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"Each row must be of the same length")); for (int x = 0; x < matrix.size2(); ++x) matrix(x, y) = data.at(y).at(x); } return matrix; } t_matrix get_vertex_matrix(const core::image_transform& transform, double aspect_ratio) { using namespace boost::numeric::ublas; auto anchor_matrix = create_matrix({{1.0, 0.0, -transform.anchor[0]}, {0.0, 1.0, -transform.anchor[1]}, {0.0, 0.0, 1.0}}); auto scale_matrix = create_matrix({{transform.fill_scale[0], 0.0, 0.0}, {0.0, transform.fill_scale[1], 0.0}, {0.0, 0.0, 1.0}}); auto aspect_matrix = create_matrix({{1.0, 0.0, 0.0}, {0.0, 1.0 / aspect_ratio, 0.0}, {0.0, 0.0, 1.0}}); auto aspect_inv_matrix = create_matrix({{1.0, 0.0, 0.0}, {0.0, aspect_ratio, 0.0}, {0.0, 0.0, 1.0}}); auto rotation_matrix = create_matrix({{std::cos(transform.angle), -std::sin(transform.angle), 0.0}, {std::sin(transform.angle), std::cos(transform.angle), 0.0}, {0.0, 0.0, 1.0}}); auto translation_matrix = create_matrix( {{1.0, 0.0, transform.fill_translation[0]}, {0.0, 1.0, transform.fill_translation[1]}, {0.0, 0.0, 1.0}}); return anchor_matrix * aspect_matrix * scale_matrix * rotation_matrix * aspect_inv_matrix * translation_matrix; } } // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/matrix.h ================================================ #pragma once #include #include #include namespace caspar::accelerator::ogl { typedef boost::numeric::ublas::matrix> t_matrix; typedef boost::numeric::ublas::vector> t_point; t_matrix get_vertex_matrix(const core::image_transform& transform, double aspect_ratio); } // namespace caspar::accelerator::ogl namespace boost::numeric::ublas { template boost::numeric::ublas::matrix operator*(const boost::numeric::ublas::matrix& lhs, const boost::numeric::ublas::matrix& rhs) { return boost::numeric::ublas::matrix(boost::numeric::ublas::prod(lhs, rhs)); } template boost::numeric::ublas::vector operator*(const boost::numeric::ublas::vector& lhs, const boost::numeric::ublas::matrix& rhs) { return boost::numeric::ublas::vector(boost::numeric::ublas::prod(lhs, rhs)); } template bool operator==(const boost::numeric::ublas::matrix& lhs, const boost::numeric::ublas::matrix& rhs) { if (lhs.size1() != rhs.size1() || lhs.size2() != rhs.size2()) return false; for (int y = 0; y < lhs.size1(); ++y) for (int x = 0; x < lhs.size2(); ++x) if (lhs(y, x) != rhs(y, x)) return false; return true; } } // namespace boost::numeric::ublas ================================================ FILE: src/accelerator/ogl/util/shader.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "shader.h" #include #include #include namespace caspar { namespace accelerator { namespace ogl { struct shader::impl { GLuint program_; std::unordered_map uniform_locations_; std::unordered_map attrib_locations_; impl(const impl&) = delete; impl& operator=(const impl&) = delete; public: impl(const std::string& vertex_source_str, const std::string& fragment_source_str) : program_(0) { GLint success; const char* vertex_source = vertex_source_str.c_str(); auto vertex_shader = glCreateShaderObjectARB(GL_VERTEX_SHADER_ARB); GL(glShaderSourceARB(vertex_shader, 1, &vertex_source, NULL)); GL(glCompileShaderARB(vertex_shader)); GL(glGetObjectParameterivARB(vertex_shader, GL_OBJECT_COMPILE_STATUS_ARB, &success)); if (success == GL_FALSE) { char info[2048]; GL(glGetInfoLogARB(vertex_shader, sizeof(info), 0, info)); GL(glDeleteObjectARB(vertex_shader)); std::stringstream str; str << "Failed to compile vertex shader:" << std::endl << info << std::endl; CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(str.str())); } const char* fragment_source = fragment_source_str.c_str(); auto fragmemt_shader = glCreateShaderObjectARB(GL_FRAGMENT_SHADER_ARB); GL(glShaderSourceARB(fragmemt_shader, 1, &fragment_source, NULL)); GL(glCompileShaderARB(fragmemt_shader)); GL(glGetObjectParameterivARB(fragmemt_shader, GL_OBJECT_COMPILE_STATUS_ARB, &success)); if (success == GL_FALSE) { char info[2048]; GL(glGetInfoLogARB(fragmemt_shader, sizeof(info), 0, info)); GL(glDeleteObjectARB(fragmemt_shader)); std::stringstream str; str << "Failed to compile fragment shader:" << std::endl << info << std::endl; CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(str.str())); } program_ = glCreateProgramObjectARB(); GL(glAttachObjectARB(program_, vertex_shader)); GL(glAttachObjectARB(program_, fragmemt_shader)); GL(glLinkProgramARB(program_)); GL(glDeleteObjectARB(vertex_shader)); GL(glDeleteObjectARB(fragmemt_shader)); GL(glGetObjectParameterivARB(program_, GL_OBJECT_LINK_STATUS_ARB, &success)); if (success == GL_FALSE) { char info[2048]; GL(glGetInfoLogARB(program_, sizeof(info), 0, info)); GL(glDeleteObjectARB(program_)); std::stringstream str; str << "Failed to link shader program:" << std::endl << info << std::endl; CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(str.str())); } GL(glUseProgramObjectARB(program_)); } ~impl() { glDeleteProgram(program_); } GLint get_uniform_location(const char* name) { auto it = uniform_locations_.find(name); if (it == uniform_locations_.end()) it = uniform_locations_.insert(std::make_pair(name, glGetUniformLocation(program_, name))).first; return it->second; } GLint get_attrib_location(const char* name) { auto it = attrib_locations_.find(name); if (it == attrib_locations_.end()) it = attrib_locations_.insert(std::make_pair(name, glGetAttribLocation(program_, name))).first; return it->second; } void set(const std::string& name, bool value) { set(name, value ? 1 : 0); } void set(const std::string& name, int value) { GL(glUniform1i(get_uniform_location(name.c_str()), value)); } void set(const std::string& name, float value) { GL(glUniform1f(get_uniform_location(name.c_str()), value)); } void set(const std::string& name, double value0, double value1) { GL(glUniform2f(get_uniform_location(name.c_str()), static_cast(value0), static_cast(value1))); } void set(const std::string& name, double value0, double value1, double value2) { GL(glUniform3f(get_uniform_location(name.c_str()), static_cast(value0), static_cast(value1), static_cast(value1))); } void set(const std::string& name, double value) { GL(glUniform1f(get_uniform_location(name.c_str()), static_cast(value))); } void set_matrix3(const std::string& name, const float* value) { GL(glUniformMatrix3fv(get_uniform_location(name.c_str()), 1, GL_TRUE, value)); } void use() { GL(glUseProgramObjectARB(program_)); } }; shader::shader(const std::string& vertex_source_str, const std::string& fragment_source_str) : impl_(new impl(vertex_source_str, fragment_source_str)) { } shader::~shader() {} void shader::set(const std::string& name, bool value) { impl_->set(name, value); } void shader::set(const std::string& name, int value) { impl_->set(name, value); } void shader::set(const std::string& name, float value) { impl_->set(name, value); } void shader::set(const std::string& name, double value0, double value1) { impl_->set(name, value0, value1); } void shader::set(const std::string& name, double value0, double value1, double value2) { impl_->set(name, value0, value1, value2); } void shader::set(const std::string& name, double value) { impl_->set(name, value); } void shader::set_matrix3(const std::string& name, const float* value) { impl_->set_matrix3(name, value); } GLint shader::get_attrib_location(const char* name) { return impl_->get_attrib_location(name); } int shader::id() const { return impl_->program_; } void shader::use() const { impl_->use(); } }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/shader.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include namespace caspar { namespace accelerator { namespace ogl { class shader final { shader(const shader&); shader& operator=(const shader&); public: shader(const std::string& vertex_source_str, const std::string& fragment_source_str); ~shader(); void set(const std::string& name, bool value); void set(const std::string& name, int value); void set(const std::string& name, float value); void set(const std::string& name, double value0, double value1); void set(const std::string& name, double value0, double value1, double value2); void set(const std::string& name, double value); void set_matrix3(const std::string& name, const float* value); GLint get_attrib_location(const char* name); template typename std::enable_if::value, void>::type set(const std::string& name, E value) { set(name, static_cast::type>(value)); } void use() const; int id() const; private: struct impl; std::unique_ptr impl_; }; }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/texture.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "texture.h" #include "buffer.h" #include #include #include namespace caspar { namespace accelerator { namespace ogl { static GLenum FORMAT[] = {0, GL_RED, GL_RG, GL_BGR, GL_BGRA}; static GLenum INTERNAL_FORMAT[][5] = {{0, GL_R8, GL_RG8, GL_RGB8, GL_RGBA8}, {0, GL_R16, GL_RG16, GL_RGB16, GL_RGBA16}}; static GLenum TYPE[][5] = {{0, GL_UNSIGNED_BYTE, GL_UNSIGNED_BYTE, GL_UNSIGNED_BYTE, GL_UNSIGNED_INT_8_8_8_8_REV}, {0, GL_UNSIGNED_SHORT, GL_UNSIGNED_SHORT, GL_UNSIGNED_SHORT, GL_UNSIGNED_SHORT}}; struct texture::impl { GLuint id_ = 0; GLsizei width_ = 0; GLsizei height_ = 0; GLsizei stride_ = 0; GLsizei size_ = 0; common::bit_depth depth_; impl(const impl&) = delete; impl& operator=(const impl&) = delete; public: impl(int width, int height, int stride, common::bit_depth depth) : width_(width) , height_(height) , stride_(stride) , depth_(depth) , size_(width * height * stride * (depth == common::bit_depth::bit8 ? 1 : 2)) { GL(glCreateTextures(GL_TEXTURE_2D, 1, &id_)); GL(glTextureParameteri(id_, GL_TEXTURE_MIN_FILTER, GL_LINEAR)); GL(glTextureParameteri(id_, GL_TEXTURE_MAG_FILTER, GL_LINEAR)); GL(glTextureParameteri(id_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)); GL(glTextureParameteri(id_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)); GL(glTextureStorage2D( id_, 1, INTERNAL_FORMAT[depth_ == common::bit_depth::bit8 ? 0 : 1][stride_], width_, height_)); } ~impl() { glDeleteTextures(1, &id_); } void bind() { GL(glBindTexture(GL_TEXTURE_2D, id_)); } void bind(int index) { GL(glActiveTexture(GL_TEXTURE0 + index)); bind(); } void unbind() { GL(glBindTexture(GL_TEXTURE_2D, 0)); } void attach() { GL(glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + 0, GL_TEXTURE_2D, id_, 0)); } void clear() { GL(glClearTexImage(id_, 0, FORMAT[stride_], TYPE[depth_ == common::bit_depth::bit8 ? 0 : 1][stride_], nullptr)); } #ifdef WIN32 void copy_from(int texture_id) { GL(glCopyImageSubData( texture_id, GL_TEXTURE_2D, 0, 0, 0, 0, id_, GL_TEXTURE_2D, 0, 0, 0, 0, width_, height_, 1)); } #endif void copy_from(buffer& src) { src.bind(); if (width_ % 16 > 0) { glPixelStorei(GL_UNPACK_ALIGNMENT, 1); } else { glPixelStorei(GL_UNPACK_ALIGNMENT, 4); } GL(glTextureSubImage2D(id_, 0, 0, 0, width_, height_, FORMAT[stride_], TYPE[depth_ == common::bit_depth::bit8 ? 0 : 1][stride_], nullptr)); src.unbind(); } void copy_to(buffer& dst) { dst.bind(); GL(glGetTextureImage( id_, 0, FORMAT[stride_], TYPE[depth_ == common::bit_depth::bit8 ? 0 : 1][stride_], size_, nullptr)); dst.unbind(); } }; texture::texture(int width, int height, int stride, common::bit_depth depth) : impl_(new impl(width, height, stride, depth)) { } texture::texture(texture&& other) : impl_(std::move(other.impl_)) { } texture::~texture() {} texture& texture::operator=(texture&& other) { impl_ = std::move(other.impl_); return *this; } void texture::bind(int index) { impl_->bind(index); } void texture::unbind() { impl_->unbind(); } void texture::attach() { impl_->attach(); } void texture::clear() { impl_->clear(); } #ifdef WIN32 void texture::copy_from(int source) { impl_->copy_from(source); } #endif void texture::copy_from(buffer& source) { impl_->copy_from(source); } void texture::copy_to(buffer& dest) { impl_->copy_to(dest); } int texture::width() const { return impl_->width_; } int texture::height() const { return impl_->height_; } int texture::stride() const { return impl_->stride_; } common::bit_depth texture::depth() const { return impl_->depth_; } void texture::set_depth(common::bit_depth depth) { impl_->depth_ = depth; } int texture::size() const { return impl_->size_; } int texture::id() const { return impl_->id_; } }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/texture.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include namespace caspar { namespace accelerator { namespace ogl { class texture final : public core::texture { public: texture(int width, int height, int stride, common::bit_depth depth = common::bit_depth::bit8); texture(const texture&) = delete; texture(texture&& other); ~texture(); texture& operator=(const texture&) = delete; texture& operator=(texture&& other); #ifdef WIN32 void copy_from(int source); #endif void copy_from(class buffer& source); void copy_to(class buffer& dest); void attach(); void clear(); virtual void bind(int index) override; virtual void unbind() override; int width() const; int height() const; int stride() const; common::bit_depth depth() const; void set_depth(common::bit_depth depth); int size() const; int id() const; private: struct impl; std::unique_ptr impl_; }; }}} // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/transforms.cpp ================================================ #include "transforms.h" #include #include namespace caspar::accelerator::ogl { draw_crop_region::draw_crop_region(double left, double top, double right, double bottom) { // upper left coords[0] = t_point(3); coords[0](0) = left; coords[0](1) = top; coords[0](2) = 1; // upper right coords[1] = t_point(3); coords[1](0) = right; coords[1](1) = top; coords[1](2) = 1; // lower right coords[2] = t_point(3); coords[2](0) = right; coords[2](1) = bottom; coords[2](2) = 1; // lower left coords[3] = t_point(3); coords[3](0) = left; coords[3](1) = bottom; coords[3](2) = 1; } void draw_crop_region::apply_transform(const caspar::accelerator::ogl::t_matrix& matrix) { coords[0] = coords[0] * matrix; coords[1] = coords[1] * matrix; coords[2] = coords[2] * matrix; coords[3] = coords[3] * matrix; } void apply_transform_colour_values(core::image_transform& self, const core::image_transform& other) { // Note: this intentionally does not affect any geometry-related fields, they follow a separate flow self.opacity *= other.opacity; self.brightness *= other.brightness; self.contrast *= other.contrast; self.saturation *= other.saturation; self.levels.min_input = std::max(self.levels.min_input, other.levels.min_input); self.levels.max_input = std::min(self.levels.max_input, other.levels.max_input); self.levels.min_output = std::max(self.levels.min_output, other.levels.min_output); self.levels.max_output = std::min(self.levels.max_output, other.levels.max_output); self.levels.gamma *= other.levels.gamma; self.chroma.enable |= other.chroma.enable; self.chroma.show_mask |= other.chroma.show_mask; self.chroma.target_hue = std::max(other.chroma.target_hue, self.chroma.target_hue); self.chroma.min_saturation = std::max(other.chroma.min_saturation, self.chroma.min_saturation); self.chroma.min_brightness = std::max(other.chroma.min_brightness, self.chroma.min_brightness); self.chroma.hue_width = std::max(other.chroma.hue_width, self.chroma.hue_width); self.chroma.softness = std::max(other.chroma.softness, self.chroma.softness); self.chroma.spill_suppress = std::max(other.chroma.spill_suppress, self.chroma.spill_suppress); self.chroma.spill_suppress_saturation = std::min(other.chroma.spill_suppress_saturation, self.chroma.spill_suppress_saturation); self.is_key |= other.is_key; self.invert |= other.invert; self.is_mix |= other.is_mix; self.blend_mode = std::max(self.blend_mode, other.blend_mode); self.layer_depth += other.layer_depth; } bool is_default_perspective(const core::corners& perspective) { return perspective.ul[0] == 0 && perspective.ul[1] == 0 && perspective.ur[0] == 1 && perspective.ur[1] == 0 && perspective.ll[0] == 0 && perspective.ll[1] == 1 && perspective.lr[0] == 1 && perspective.lr[1] == 1; } draw_transforms draw_transforms::combine_transform(const core::image_transform& transform, double aspect_ratio) const { draw_transforms new_transform(image_transform, steps); auto transform_before = new_transform.current().vertex_matrix; // Get matrix for turning coords in 'transform' into the parent frame. auto new_matrix = get_vertex_matrix(transform, aspect_ratio); apply_transform_colour_values(new_transform.image_transform, transform); new_transform.current().vertex_matrix = new_matrix * new_transform.current().vertex_matrix; // Only enable this for some transforms, to avoid applying crops when a draw_frame is just being used to flatten // other draw_frames if (transform.enable_geometry_modifiers) { // Push the new clip before the new transform applied draw_crop_region new_clip(transform.clip_translation[0], transform.clip_translation[1], transform.clip_translation[0] + transform.clip_scale[0], transform.clip_translation[1] + transform.clip_scale[1]); new_clip.apply_transform(transform_before); new_transform.current().crop_regions.push_back(std::move(new_clip)); if (!is_default_perspective(transform.perspective)) { // Split into a new step new_transform.steps.emplace_back(transform.perspective, boost::numeric::ublas::identity_matrix(3, 3)); } // Push the new crop region with the new transform applied draw_crop_region new_crop( transform.crop.ul[0], transform.crop.ul[1], transform.crop.lr[0], transform.crop.lr[1]); new_crop.apply_transform(new_transform.current().vertex_matrix); new_transform.current().crop_regions.push_back(std::move(new_crop)); } return std::move(new_transform); } void apply_perspective_to_vertex(t_point& vertex, const core::corners& perspective) { const double x = vertex(0); const double y = vertex(1); // ul: x' = (1-y) * a + (1 - a * (1-y)) * x vertex(0) += (1 - y) * perspective.ul[0] + (1 - perspective.ul[0] + perspective.ul[0] * y) * x - x; vertex(1) += (1 - x) * perspective.ul[1] + (1 - perspective.ul[1] + perspective.ul[1] * x) * y - y; // ur/ll: x' = x * (a * (1-y) + y) vertex(0) += x * (perspective.ur[0] * (1 - y) + y) - x; vertex(1) += y * (perspective.ll[1] * (1 - x) + x) - y; // ur/ll: x' = y * a + x * (1 - a * y) vertex(0) += y * perspective.ll[0] + x * (1 - perspective.ll[0] * y) - x; vertex(1) += x * perspective.ur[1] + y * (1 - perspective.ur[1] * x) - y; // lr: x' = x * (y * a + (1-y)) vertex(0) += x * (y * perspective.lr[0] + (1 - y)) - x; vertex(1) += y * (x * perspective.lr[1] + (1 - x)) - y; } struct wrapped_vertex { explicit wrapped_vertex(const core::frame_geometry::coord& coord) { vertex(0) = coord.vertex_x; vertex(1) = coord.vertex_y; vertex(2) = 1; texture_x = coord.texture_x; texture_y = coord.texture_y; texture_r = coord.texture_r; texture_q = coord.texture_q; } explicit wrapped_vertex() { vertex(2) = 1; }; [[nodiscard]] core::frame_geometry::coord as_geometry() const { core::frame_geometry::coord res = {vertex(0), vertex(1), texture_x, texture_y}; res.texture_r = texture_r; res.texture_q = texture_q; return res; } t_point vertex = t_point(3); double texture_x = 0.0; double texture_y = 0.0; double texture_r = 0.0; double texture_q = 1.0; }; static const double epsilon = 0.001; bool inline point_is_outside_of_line(const t_point& line_1, const t_point& line_2, const t_point& vertex, bool invert_winding) { // use a cross product to check if the point is outside the crop region auto cross = (line_2(0) - line_1(0)) * (vertex(1) - line_1(1)) - (line_2(1) - line_1(1)) * (vertex(0) - line_1(0)); return invert_winding ? cross > epsilon : cross < -epsilon; } // http://stackoverflow.com/questions/563198/how-do-you-detect-where-two-line-segments-intersect bool get_intersection_with_crop_line(const t_point& crop0, const t_point& crop1, const t_point& p0, const t_point& p1, t_point& result) { double s1_x = crop1(0) - crop0(0); double s1_y = crop1(1) - crop0(1); double s2_x = p1(0) - p0(0); double s2_y = p1(1) - p0(1); double s = (-s1_y * (crop0(0) - p0(0)) + s1_x * (crop0(1) - p0(1))) / (-s2_x * s1_y + s1_x * s2_y); double t = (s2_x * (crop0(1) - p0(1)) - s2_y * (crop0(0) - p0(0))) / (-s2_x * s1_y + s1_x * s2_y); if (s >= 0 && s <= 1) { // Collision detected result(0) = crop0(0) + t * s1_x; result(1) = crop0(1) + t * s1_y; return true; } return false; // No collision } double hypotenuse(double x1, double y1, double x2, double y2) { auto x = x2 - x1; auto y = y2 - y1; return std::sqrt(x * x + y * y); } double calc_q(double close_diagonal, double distant_diagonal) { return (close_diagonal + distant_diagonal) / distant_diagonal; } void crop_texture_for_vertex(const wrapped_vertex& line_a, const wrapped_vertex& line_b, wrapped_vertex& vertex) { auto delta_point = vertex.vertex - line_a.vertex; auto delta_line = line_b.vertex - line_a.vertex; // Calculate the dot product auto dot_product = delta_point(0) * delta_line(0) + delta_point(1) * delta_line(1); auto line_len_squared = delta_line(0) * delta_line(0) + delta_line(1) * delta_line(1); // Skip if line has no length if (line_len_squared == 0) { vertex.texture_x = line_a.texture_x; vertex.texture_y = line_a.texture_y; return; } auto dist_delta = dot_product / line_len_squared; vertex.texture_x = line_a.texture_x + dist_delta * (line_b.texture_x - line_a.texture_x); vertex.texture_y = line_a.texture_y + dist_delta * (line_b.texture_y - line_a.texture_y); vertex.texture_q = line_a.texture_q + dist_delta * (line_b.texture_q - line_a.texture_q); } void fill_texture_q_for_quad(std::vector& coords) { if (coords.size() != 4) return; // Based on formula from: // http://www.reedbeta.com/blog/2012/05/26/quadrilateral-interpolation-part-1/ double s1_x = coords[2].vertex(0) - coords[0].vertex(0); double s1_y = coords[2].vertex(1) - coords[0].vertex(1); double s2_x = coords[3].vertex(0) - coords[1].vertex(0); double s2_y = coords[3].vertex(1) - coords[1].vertex(1); double s = (-s1_y * (coords[0].vertex(0) - coords[1].vertex(0)) + s1_x * (coords[0].vertex(1) - coords[1].vertex(1))) / (-s2_x * s1_y + s1_x * s2_y); double t = (s2_x * (coords[0].vertex(1) - coords[1].vertex(1)) - s2_y * (coords[0].vertex(0) - coords[1].vertex(0))) / (-s2_x * s1_y + s1_x * s2_y); if (s >= 0 && s <= 1 && t >= 0 && t <= 1) { // Collision detected double diagonal_intersection_x = coords[0].vertex(0) + t * s1_x; double diagonal_intersection_y = coords[0].vertex(1) + t * s1_y; auto d0 = hypotenuse(coords[3].vertex(0), coords[3].vertex(1), diagonal_intersection_x, diagonal_intersection_y); auto d1 = hypotenuse(coords[2].vertex(0), coords[2].vertex(1), diagonal_intersection_x, diagonal_intersection_y); auto d2 = hypotenuse(coords[1].vertex(0), coords[1].vertex(1), diagonal_intersection_x, diagonal_intersection_y); auto d3 = hypotenuse(coords[0].vertex(0), coords[0].vertex(1), diagonal_intersection_x, diagonal_intersection_y); auto ulq = calc_q(d3, d1); auto urq = calc_q(d2, d0); auto lrq = calc_q(d1, d3); auto llq = calc_q(d0, d2); std::vector q_values = {ulq, urq, lrq, llq}; int corner = 0; for (auto& coord : coords) { coord.texture_q = q_values[corner]; coord.texture_x *= q_values[corner]; coord.texture_y *= q_values[corner]; if (++corner == 4) corner = 0; } } } void transform_vertex(const draw_transform_step& step, t_point& vertex) { // Apply basic transforms of this step vertex = vertex * step.vertex_matrix; // Apply perspective. These rely on x and y of the coord, so can't be done as a shared matrix apply_perspective_to_vertex(vertex, step.perspective); } std::vector draw_transforms::transform_coords(const std::vector& coords) const { // Convert to matrix representations std::vector cropped_coords; cropped_coords.reserve(coords.size()); for (const auto& coord : coords) { cropped_coords.emplace_back(coord); } std::vector transformed_regions; // Apply the transforms for (int i = (int)steps.size() - 1; i >= 0; i--) { for (auto& coord : cropped_coords) { transform_vertex(steps[i], coord.vertex); } // Transform existing regions for (auto& region : transformed_regions) { for (int l = 0; l < 4; ++l) { transform_vertex(steps[i], region.coords[l]); } } // Push new regions for (auto& region : steps[i].crop_regions) { draw_crop_region new_region = region; for (int l = 0; l < 4; ++l) { // Only apply perspective for new ones apply_perspective_to_vertex(new_region.coords[l], steps[i].perspective); } transformed_regions.push_back(new_region); } } // Apply the perspective correction fill_texture_q_for_quad(cropped_coords); // Perform the crop for (auto& crop_region : transformed_regions) { // Determine the winding order of the crop region // Calculate the signed area to determine if the region is clockwise or counter-clockwise double signed_area = 0.0; for (int l = 0; l < 4; ++l) { int next_l = (l + 1) % 4; signed_area += (crop_region.coords[next_l](0) - crop_region.coords[l](0)) * (crop_region.coords[next_l](1) + crop_region.coords[l](1)); } // In screen coordinates (Y down), normal clockwise winding gives negative signed area // We need to invert the test when the winding is flipped (positive signed area) bool invert_winding = signed_area > 0; for (int l = 0; l < 4; ++l) { // Apply the crop, one edge at a time int to_index = l == 3 ? 0 : l + 1; t_point from_point = crop_region.coords[l]; t_point to_point = crop_region.coords[to_index]; std::unordered_set points_outside_of_line; // Figure out which points are outside the crop region for (size_t j = 0; j < cropped_coords.size(); ++j) { bool v = point_is_outside_of_line(from_point, to_point, cropped_coords[j].vertex, invert_winding); if (v) points_outside_of_line.insert(j); } if (points_outside_of_line.empty()) { // Line has no effect, skip continue; } else if (points_outside_of_line.size() == cropped_coords.size()) { // All are outside, shape has no geometry return {}; } std::vector new_coords; new_coords.reserve(cropped_coords.size() * 2); // Avoid reallocs for complex shapes // Iterate through the coords for (size_t j = 0; j < cropped_coords.size(); ++j) { if (points_outside_of_line.count(j) == 0) { new_coords.push_back(cropped_coords[j]); continue; } size_t prev_index = j == 0 ? cropped_coords.size() - 1 : j - 1; size_t next_index = j == cropped_coords.size() - 1 ? 0 : j + 1; bool prev_is_outside = points_outside_of_line.count(prev_index) == 1; bool next_is_outside = points_outside_of_line.count(next_index) == 1; if (prev_is_outside && next_is_outside) { // Vertex and its edges are completely outside, skip continue; } if (!prev_is_outside) { // This edge intersects the crop line, calculate the new coordinates wrapped_vertex new_coord; if (get_intersection_with_crop_line(to_point, from_point, cropped_coords[prev_index].vertex, cropped_coords[j].vertex, new_coord.vertex)) { crop_texture_for_vertex(cropped_coords[prev_index], cropped_coords[j], new_coord); new_coords.emplace_back(std::move(new_coord)); } else { // Geometry error! skip coordinate } } if (!next_is_outside) { // This edge intersects the crop line, calculate the new coordinates wrapped_vertex new_coord; if (get_intersection_with_crop_line(to_point, from_point, cropped_coords[j].vertex, cropped_coords[next_index].vertex, new_coord.vertex)) { crop_texture_for_vertex(cropped_coords[j], cropped_coords[next_index], new_coord); new_coords.emplace_back(std::move(new_coord)); } else { // Geometry error! skip coordinate } } } // Polygon is cropped, update state cropped_coords = new_coords; } { static const double pixel_epsilon = 0.0001; // less than a pixel at 8k // Prune duplicate coords std::vector new_coords; new_coords.reserve(cropped_coords.size()); // Avoid reallocs for (size_t j = 0; j < cropped_coords.size(); ++j) { size_t prev_index = j == 0 ? cropped_coords.size() - 1 : j - 1; auto delta = cropped_coords[j].vertex - cropped_coords[prev_index].vertex; if (std::abs(delta(0)) > pixel_epsilon || std::abs(delta(1)) > pixel_epsilon) { new_coords.emplace_back(cropped_coords[j]); } } if (new_coords.size() < 3) { // Not enough coords to draw anything return {}; } cropped_coords = new_coords; } } // Convert back to frame_geometry types std::vector result; result.reserve(cropped_coords.size()); for (auto& coord : cropped_coords) { result.push_back(coord.as_geometry()); } return result; } } // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/ogl/util/transforms.h ================================================ #pragma once #include #include #include #include #include #include #include "matrix.h" namespace caspar::accelerator::ogl { struct draw_crop_region { explicit draw_crop_region(double left, double top, double right, double bottom); void apply_transform(const t_matrix& matrix); std::array coords; }; struct draw_transform_step { draw_transform_step() : vertex_matrix(boost::numeric::ublas::identity_matrix(3, 3)) { } draw_transform_step(const core::corners& perspective, const t_matrix& vertex_matrix) : perspective(perspective) , vertex_matrix(vertex_matrix) { } core::corners perspective; std::vector crop_regions; t_matrix vertex_matrix; }; struct draw_transforms { std::vector steps; draw_transforms() : image_transform(core::image_transform()) , steps({draw_transform_step()}) { } explicit draw_transforms(core::image_transform transform, std::vector steps) : image_transform(transform) , steps(std::move(steps)) { } core::image_transform image_transform; draw_transform_step& current() { return steps.back(); } [[nodiscard]] draw_transforms combine_transform(const core::image_transform& transform, double aspect_ratio) const; [[nodiscard]] std::vector transform_coords(const std::vector& coords) const; }; } // namespace caspar::accelerator::ogl ================================================ FILE: src/accelerator/vulkan/image/fragment_shader.frag ================================================ #version 450 #extension GL_ARB_fragment_shader_interlock : enable #extension GL_EXT_nonuniform_qualifier : enable layout(location = 0) in vec4 TexCoord; layout(location = 1) in vec4 TexCoord2; layout(location = 0) out vec4 fragColor; // bindless texture array layout(binding = 0) uniform sampler2D textures[8]; layout(binding = 1, input_attachment_index = 0) uniform subpassInput background; const uint PLANE0 = 0; const uint PLANE1 = 1; const uint PLANE2 = 2; const uint PLANE3 = 3; const uint LOCAL_KEY = 4; const uint LAYER_KEY = 5; const uint is_straight_alpha_mask = 1u << 0; const uint has_local_key_mask = 1u << 1; const uint has_layer_key_mask = 1u << 2; const uint invert_mask = 1u << 3; const uint levels_mask = 1u << 4; const uint csb_mask = 1u << 5; const uint chroma_mask = 1u << 6; const uint chroma_show_mask_mask = 1u << 7; layout(push_constant) uniform ParamsBlock { uint color_space_index; float precision_factor[4]; int blend_mode; int keyer; int pixel_format; float opacity; /* levels */ float min_input; float max_input; float gamma; float min_output; float max_output; /* contrast, saturation & brightness */ float brt; float sat; float con; /* Chroma */ float chroma_target_hue; float chroma_hue_width; float chroma_min_saturation; float chroma_min_brightness; float chroma_softness; float chroma_spill_suppress; float chroma_spill_suppress_saturation; uint flags; }; const mat3[3] color_matrices = mat3[3]( mat3(1.0, 0.0, 1.402, 1.0, -0.344, -0.509, 1.0, 1.772, 0.0), mat3(1.0, 0.0, 1.5748, 1.0, -0.1873, -0.4681, 1.0, 1.8556, 0.0), mat3(1.0, 0.0, 1.4746, 1.0, -0.16455312684366, -0.57135312684366, 1.0, 1.8814, 0.0) ); const vec3[3] luma_coefficients = vec3[3]( vec3(0.299, 0.587, 0.114), // Rec. 601 vec3(0.2126, 0.7152, 0.0722), // Rec. 709 vec3(0.2627, 0.6780, 0.0593) // Rec. 2020 ); /* ** Contrast, saturation, brightness ** Code of this function is from TGM's shader pack ** http://irrlicht.sourceforge.net/phpBB2/viewtopic.php?t=21057 */ vec3 ContrastSaturationBrightness(vec4 color, float brt, float sat, float con) { vec3 luma_coeff = luma_coefficients[color_space_index]; const float AvgLumR = 0.5; const float AvgLumG = 0.5; const float AvgLumB = 0.5; if (color.a > 0.0) color.rgb /= color.a; vec3 AvgLumin = vec3(AvgLumR, AvgLumG, AvgLumB); vec3 brtColor = color.rgb * brt; vec3 intensity = vec3(dot(brtColor, luma_coeff)); vec3 satColor = mix(intensity, brtColor, sat); vec3 conColor = mix(AvgLumin, satColor, con); conColor.rgb *= color.a; return conColor; } /* ** Gamma correction ** Details: http://blog.mouaif.org/2009/01/22/photoshop-gamma-correction-shader/ */ #define GammaCorrection(color, gamma) pow(color, vec3(1.0 / gamma)) /* ** Levels control (input (+gamma), output) ** Details: http://blog.mouaif.org/2009/01/28/levels-control-shader/ */ #define LevelsControlInputRange(color, minInput, maxInput) min(max(color - vec3(minInput), vec3(0.0)) / (vec3(maxInput) - vec3(minInput)), vec3(1.0)) #define LevelsControlInput(color, minInput, gamma, maxInput) GammaCorrection(LevelsControlInputRange(color, minInput, maxInput), gamma) #define LevelsControlOutputRange(color, minOutput, maxOutput) mix(vec3(minOutput), vec3(maxOutput), color) #define LevelsControl(color, minInput, gamma, maxInput, minOutput, maxOutput) LevelsControlOutputRange(LevelsControlInput(color, minInput, gamma, maxInput), minOutput, maxOutput) /* ** Photoshop & misc math ** Blending modes, RGB/HSL/Contrast/Desaturate, levels control ** ** Romain Dura | Romz ** Blog: http://blog.mouaif.org ** Post: http://blog.mouaif.org/?p=94 */ /* ** Desaturation */ vec4 Desaturate(vec3 color, float Desaturation) { vec3 grayXfer = vec3(0.3, 0.59, 0.11); vec3 gray = vec3(dot(grayXfer, color)); return vec4(mix(color, gray, Desaturation), 1.0); } /* ** Hue, saturation, luminance */ vec3 RGBToHSL(vec3 color) { vec3 hsl; float fmin = min(min(color.r, color.g), color.b); float fmax = max(max(color.r, color.g), color.b); float delta = fmax - fmin; hsl.z = (fmax + fmin) / 2.0; if (delta == 0.0) { hsl.x = 0.0; hsl.y = 0.0; } else { if (hsl.z < 0.5) hsl.y = delta / (fmax + fmin); else hsl.y = delta / (2.0 - fmax - fmin); float deltaR = (((fmax - color.r) / 6.0) + (delta / 2.0)) / delta; float deltaG = (((fmax - color.g) / 6.0) + (delta / 2.0)) / delta; float deltaB = (((fmax - color.b) / 6.0) + (delta / 2.0)) / delta; if (color.r == fmax ) hsl.x = deltaB - deltaG; else if (color.g == fmax) hsl.x = (1.0 / 3.0) + deltaR - deltaB; else if (color.b == fmax) hsl.x = (2.0 / 3.0) + deltaG - deltaR; if (hsl.x < 0.0) hsl.x += 1.0; else if (hsl.x > 1.0) hsl.x -= 1.0; } return hsl; } float HueToRGB(float f1, float f2, float hue) { if (hue < 0.0) hue += 1.0; else if (hue > 1.0) hue -= 1.0; float res; if ((6.0 * hue) < 1.0) res = f1 + (f2 - f1) * 6.0 * hue; else if ((2.0 * hue) < 1.0) res = f2; else if ((3.0 * hue) < 2.0) res = f1 + (f2 - f1) * ((2.0 / 3.0) - hue) * 6.0; else res = f1; return res; } vec3 HSLToRGB(vec3 hsl) { vec3 rgb; if (hsl.y == 0.0) rgb = vec3(hsl.z); else { float f2; if (hsl.z < 0.5) f2 = hsl.z * (1.0 + hsl.y); else f2 = (hsl.z + hsl.y) - (hsl.y * hsl.z); float f1 = 2.0 * hsl.z - f2; rgb.r = HueToRGB(f1, f2, hsl.x + (1.0/3.0)); rgb.g = HueToRGB(f1, f2, hsl.x); rgb.b= HueToRGB(f1, f2, hsl.x - (1.0/3.0)); } return rgb; } /* ** Float blending modes ** Adapted from here: http://www.nathanm.com/photoshop-blending-math/ ** But I modified the HardMix (wrong condition), Overlay, SoftLight, ColorDodge, ColorBurn, VividLight, PinLight (inverted layers) ones to have correct results */ #define BlendLinearDodgef BlendAddf #define BlendLinearBurnf BlendSubstractf #define BlendAddf(base, blend) min(base + blend, 1.0) #define BlendSubstractf(base, blend) max(base + blend - 1.0, 0.0) #define BlendLightenf(base, blend) max(blend, base) #define BlendDarkenf(base, blend) min(blend, base) #define BlendLinearLightf(base, blend) (blend < 0.5 ? BlendLinearBurnf(base, (2.0 * blend)) : BlendLinearDodgef(base, (2.0 * (blend - 0.5)))) #define BlendScreenf(base, blend) (1.0 - ((1.0 - base) * (1.0 - blend))) #define BlendOverlayf(base, blend) (base < 0.5 ? (2.0 * base * blend) : (1.0 - 2.0 * (1.0 - base) * (1.0 - blend))) #define BlendSoftLightf(base, blend) ((blend < 0.5) ? (2.0 * base * blend + base * base * (1.0 - 2.0 * blend)) : (sqrt(base) * (2.0 * blend - 1.0) + 2.0 * base * (1.0 - blend))) #define BlendColorDodgef(base, blend) ((blend == 1.0) ? blend : min(base / (1.0 - blend), 1.0)) #define BlendColorBurnf(base, blend) ((blend == 0.0) ? blend : max((1.0 - ((1.0 - base) / blend)), 0.0)) #define BlendVividLightf(base, blend) ((blend < 0.5) ? BlendColorBurnf(base, (2.0 * blend)) : BlendColorDodgef(base, (2.0 * (blend - 0.5)))) #define BlendPinLightf(base, blend) ((blend < 0.5) ? BlendDarkenf(base, (2.0 * blend)) : BlendLightenf(base, (2.0 *(blend - 0.5)))) #define BlendHardMixf(base, blend) ((BlendVividLightf(base, blend) < 0.5) ? 0.0 : 1.0) #define BlendReflectf(base, blend) ((blend == 1.0) ? blend : min(base * base / (1.0 - blend), 1.0)) /* ** Vector3 blending modes */ #define Blend(base, blend, funcf) vec3(funcf(base.r, blend.r), funcf(base.g, blend.g), funcf(base.b, blend.b)) #define BlendNormal(base, blend) (blend) #define BlendLighten BlendLightenf #define BlendDarken BlendDarkenf #define BlendMultiply(base, blend) (base * blend) #define BlendAverage(base, blend) ((base + blend) / 2.0) #define BlendAdd(base, blend) min(base + blend, vec3(1.0)) #define BlendSubstract(base, blend) max(base + blend - vec3(1.0), vec3(0.0)) #define BlendDifference(base, blend) abs(base - blend) #define BlendNegation(base, blend) (vec3(1.0) - abs(vec3(1.0) - base - blend)) #define BlendExclusion(base, blend) (base + blend - 2.0 * base * blend) #define BlendScreen(base, blend) Blend(base, blend, BlendScreenf) #define BlendOverlay(base, blend) Blend(base, blend, BlendOverlayf) #define BlendSoftLight(base, blend) Blend(base, blend, BlendSoftLightf) #define BlendHardLight(base, blend) BlendOverlay(blend, base) #define BlendColorDodge(base, blend) Blend(base, blend, BlendColorDodgef) #define BlendColorBurn(base, blend) Blend(base, blend, BlendColorBurnf) #define BlendLinearDodge BlendAdd #define BlendLinearBurn BlendSubstract #define BlendLinearLight(base, blend) Blend(base, blend, BlendLinearLightf) #define BlendVividLight(base, blend) Blend(base, blend, BlendVividLightf) #define BlendPinLight(base, blend) Blend(base, blend, BlendPinLightf) #define BlendHardMix(base, blend) Blend(base, blend, BlendHardMixf) #define BlendReflect(base, blend) Blend(base, blend, BlendReflectf) #define BlendGlow(base, blend) BlendReflect(blend, base) #define BlendPhoenix(base, blend) (min(base, blend) - max(base, blend) + vec3(1.0)) #define BlendOpacity(base, blend, F, O) (F(base, blend) * O + blend * (1.0 - O)) vec3 BlendHue(vec3 base, vec3 blend) { vec3 baseHSL = RGBToHSL(base); return HSLToRGB(vec3(RGBToHSL(blend).r, baseHSL.g, baseHSL.b)); } vec3 BlendSaturation(vec3 base, vec3 blend) { vec3 baseHSL = RGBToHSL(base); return HSLToRGB(vec3(baseHSL.r, RGBToHSL(blend).g, baseHSL.b)); } vec3 BlendColor(vec3 base, vec3 blend) { vec3 blendHSL = RGBToHSL(blend); return HSLToRGB(vec3(blendHSL.r, blendHSL.g, RGBToHSL(base).b)); } vec3 BlendLuminosity(vec3 base, vec3 blend) { vec3 baseHSL = RGBToHSL(base); return HSLToRGB(vec3(baseHSL.r, baseHSL.g, RGBToHSL(blend).b)); } // Chroma keying // Author: Tim Eves // // This implements the Chroma key algorithm described in the paper: // 'Software Chroma Keying in an Imersive Virtual Environment' // by F. van den Bergh & V. Lalioti // but as a pixel shader algorithm. // // This allows us to implement the paper's alphaMap curve in software // rather than a largeish array float alpha_map(float d) { return 1.0 - smoothstep(1.0, chroma_softness, d); } // http://stackoverflow.com/questions/15095909/from-rgb-to-hsv-in-opengl-glsl vec3 rgb2hsv(vec3 c) { vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0); vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g)); vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r)); float d = q.x - min(q.w, q.y); float e = 1.0e-10; return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x); } // From the same page vec3 hsv2rgb(vec3 c) { vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www); return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y); } float AngleDiff(float angle1, float angle2) { return 0.5 - abs(abs(angle1 - angle2) - 0.5); } float AngleDiffDirectional(float angle1, float angle2) { float diff = angle1 - angle2; return diff < -0.5 ? diff + 1.0 : (diff > 0.5 ? diff - 1.0 : diff); } float Distance(float actual, float target) { return min(0.0, target - actual); } float ColorDistance(vec3 hsv) { float hueDiff = AngleDiff(hsv.x, chroma_target_hue) * 2; float saturationDiff = Distance(hsv.y, chroma_min_saturation); float brightnessDiff = Distance(hsv.z, chroma_min_brightness); float saturationBrightnessScore = max(brightnessDiff, saturationDiff); float hueScore = hueDiff - chroma_hue_width; return -hueScore * saturationBrightnessScore; } vec3 supress_spill(vec3 c) { float hue = c.x; float diff = AngleDiffDirectional(hue, chroma_target_hue); float distance = abs(diff) / chroma_spill_suppress; if (distance < 1) { c.x = diff < 0 ? chroma_target_hue - chroma_spill_suppress : chroma_target_hue + chroma_spill_suppress; c.y *= min(1.0, distance + chroma_spill_suppress_saturation); } return c; } // Key on any color vec4 ChromaOnCustomColor(vec4 c, bool show_mask) { vec3 hsv = rgb2hsv(c.rgb); float distance = ColorDistance(hsv); float d = distance * -2.0 + 1.0; vec4 suppressed = vec4(hsv2rgb(supress_spill(hsv)), 1.0); float alpha = alpha_map(d); suppressed *= alpha; return show_mask ? vec4(suppressed.a, suppressed.a, suppressed.a, 1) : suppressed; } vec3 get_blend_color(vec3 back, vec3 fore) { switch(blend_mode) { case 0: return BlendNormal(back, fore); case 1: return BlendLighten(back, fore); case 2: return BlendDarken(back, fore); case 3: return BlendMultiply(back, fore); case 4: return BlendAverage(back, fore); case 5: return BlendAdd(back, fore); case 6: return BlendSubstract(back, fore); case 7: return BlendDifference(back, fore); case 8: return BlendNegation(back, fore); case 9: return BlendExclusion(back, fore); case 10: return BlendScreen(back, fore); case 11: return BlendOverlay(back, fore); // case 12: return BlendSoftLight(back, fore); case 13: return BlendHardLight(back, fore); case 14: return BlendColorDodge(back, fore); case 15: return BlendColorBurn(back, fore); case 16: return BlendLinearDodge(back, fore); case 17: return BlendLinearBurn(back, fore); case 18: return BlendLinearLight(back, fore); case 19: return BlendVividLight(back, fore); case 20: return BlendPinLight(back, fore); case 21: return BlendHardMix(back, fore); case 22: return BlendReflect(back, fore); case 23: return BlendGlow(back, fore); case 24: return BlendPhoenix(back, fore); case 25: return BlendHue(back, fore); case 26: return BlendSaturation(back, fore); case 27: return BlendColor(back, fore); case 28: return BlendLuminosity(back, fore); } return BlendNormal(back, fore); } vec4 blend(vec4 fore) { // attachments have bgra format to match opengl accelerator vec4 back = subpassLoad(background).bgra; if(blend_mode != 0) fore.rgb = get_blend_color(back.rgb/(back.a+0.0000001), fore.rgb/(fore.a+0.0000001))*fore.a; switch(keyer) { case 1: return fore + back; // additive default: return fore + (1.0-fore.a)*back; // linear } } vec4 ycbcra_to_rgba(float Y, float Cb, float Cr, float A) { mat3 color_matrix = transpose(color_matrices[color_space_index]); const float luma_coefficient = 255.0/219.0; const float chroma_coefficient = 255.0/224.0; vec3 YCbCr = vec3(Y, Cb, Cr) * 255; YCbCr -= vec3(16.0, 128.0, 128.0); YCbCr *= vec3(luma_coefficient, chroma_coefficient, chroma_coefficient); return vec4(color_matrix * YCbCr / 255, A); } vec4 get_sample(sampler2D texSampler, vec2 coords) { return texture(texSampler, coords); } vec4 get_rgba_color() { vec2 uv = TexCoord.st / TexCoord.q; switch(pixel_format) { case 0: //gray return vec4(get_sample(textures[PLANE0], uv).rrr * precision_factor[0], 1.0); case 1: //bgra, return get_sample(textures[PLANE0], uv).bgra * precision_factor[0]; case 2: //rgba, return get_sample(textures[PLANE0], uv).rgba * precision_factor[0]; case 3: //argb, return get_sample(textures[PLANE0], uv).argb * precision_factor[0]; case 4: //abgr, return get_sample(textures[PLANE0], uv).gbar * precision_factor[0]; case 5: //ycbcr, { float y = get_sample(textures[PLANE0], uv).r * precision_factor[0]; float cb = get_sample(textures[PLANE1], uv).r * precision_factor[1]; float cr = get_sample(textures[PLANE2], uv).r * precision_factor[2]; return ycbcra_to_rgba(y, cb, cr, 1.0); } case 6: //ycbcra { float y = get_sample(textures[PLANE0], uv).r * precision_factor[0]; float cb = get_sample(textures[PLANE1], uv).r * precision_factor[1]; float cr = get_sample(textures[PLANE2], uv).r * precision_factor[2]; float a = get_sample(textures[PLANE3], uv).r * precision_factor[3]; return ycbcra_to_rgba(y, cb, cr, a); } case 7: //luma { vec3 y3 = get_sample(textures[PLANE0], uv).rrr * precision_factor[0]; return vec4((y3-0.065)/0.859, 1.0); } case 8: //bgr, return vec4(get_sample(textures[PLANE0], uv).bgr * precision_factor[0], 1.0); case 9: //rgb, return vec4(get_sample(textures[PLANE0], uv).rgb * precision_factor[0], 1.0); case 10: // uyvy { float y = get_sample(textures[PLANE0], uv).g * precision_factor[0]; float cb = get_sample(textures[PLANE1], uv).b * precision_factor[1]; float cr = get_sample(textures[PLANE1], uv).r * precision_factor[1]; return ycbcra_to_rgba(y, cb, cr, 1.0); } case 11: // gbrp { float g = get_sample(textures[PLANE0], uv).r * precision_factor[0]; float b = get_sample(textures[PLANE1], uv).r * precision_factor[1]; float r = get_sample(textures[PLANE2], uv).r * precision_factor[2]; return vec4(b, g, r, 1.0); } case 12: // gbrap { float g = get_sample(textures[PLANE0], uv).r * precision_factor[0]; float b = get_sample(textures[PLANE1], uv).r * precision_factor[1]; float r = get_sample(textures[PLANE2], uv).r * precision_factor[2]; float a = get_sample(textures[PLANE3], uv).r * precision_factor[3]; return vec4(b, g, r, a); } } return vec4(0.0, 0.0, 0.0, 0.0); } void main() { bool is_straight_alpha = (flags & is_straight_alpha_mask) == is_straight_alpha_mask; bool has_local_key = (flags & has_local_key_mask) == has_local_key_mask; bool has_layer_key = (flags & has_layer_key_mask) == has_layer_key_mask; bool invert = (flags & invert_mask) == invert_mask; bool levels = (flags & levels_mask) == levels_mask; bool csb = (flags & csb_mask) == csb_mask; bool chroma = (flags & chroma_mask) == chroma_mask; bool chroma_show_mask = (flags & chroma_show_mask_mask) == chroma_show_mask_mask; vec4 color = get_rgba_color(); if (is_straight_alpha) color.rgb *= color.a; if (chroma) color = ChromaOnCustomColor(color, chroma_show_mask); if(levels) color.rgb = LevelsControl(color.rgb, min_input, gamma, max_input, min_output, max_output); if(csb) color.rgb = ContrastSaturationBrightness(color, brt, sat, con); if(has_local_key) color *= texture(textures[LOCAL_KEY], TexCoord2.st).r; if(has_layer_key) color *= texture(textures[LAYER_KEY], TexCoord2.st).r; color *= opacity; if (invert) color = 1.0 - color; if (blend_mode >= 0) color = blend(color); // attachments store color in bgra format to match opengl accelerator fragColor = color.bgra; } ================================================ FILE: src/accelerator/vulkan/image/image_kernel.cpp ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #include "image_kernel.h" #include "../util/device.h" #include "../util/pipeline.h" #include "../util/renderpass.h" #include "../util/texture.h" #include #include #include #include #include #include #include namespace caspar::accelerator::vulkan { float get_precision_factor(common::bit_depth depth) { switch (depth) { case common::bit_depth::bit8: return 1.0f; case common::bit_depth::bit10: return 64.0f; case common::bit_depth::bit12: return 16.0f; case common::bit_depth::bit16: default: return 1.0f; } } bool is_above_screen(double y) { return y < 0.0; } bool is_below_screen(double y) { return y > 1.0; } bool is_left_of_screen(double x) { return x < 0.0; } bool is_right_of_screen(double x) { return x > 1.0; } bool is_outside_screen(const std::vector& coords) { auto x_coords = coords | boost::adaptors::transformed([](const core::frame_geometry::coord& c) { return c.vertex_x; }); auto y_coords = coords | boost::adaptors::transformed([](const core::frame_geometry::coord& c) { return c.vertex_y; }); return boost::algorithm::all_of(x_coords, &is_left_of_screen) || boost::algorithm::all_of(x_coords, &is_right_of_screen) || boost::algorithm::all_of(y_coords, &is_above_screen) || boost::algorithm::all_of(y_coords, &is_below_screen); } static const double epsilon = 0.001; static const uint32_t frame_buffer_size = 3; struct image_kernel::impl { spl::shared_ptr vulkan_; common::bit_depth depth_; struct frame_data : public frame_context { image_kernel::impl* parent = nullptr; vk::Buffer buffer = nullptr; void* data = nullptr; vk::DeviceMemory memory = nullptr; size_t size = 0; vk::CommandBuffer cmd_buffer = nullptr; vk::Fence fence = nullptr; explicit frame_data(image_kernel::impl* parent) : parent(parent) { } virtual vk::Buffer upload_vertex_data(const std::vector& src) { return parent->upload_vertex_buffer(*this, (void*)src.data(), src.size() * sizeof(float)); } virtual draw_data create_draw_data(const draw_params& params) { return parent->draw(params); } virtual std::shared_ptr get_pipeline() { return parent->vulkan_->get_pipeline(parent->depth_); } virtual vk::CommandBuffer get_command_buffer() { return cmd_buffer; } virtual void submit() { fence = parent->vulkan_->getVkDevice().createFence({}); vk::SubmitInfo submitInfo{}; submitInfo.setCommandBuffers(cmd_buffer); parent->vulkan_->submit(submitInfo, fence); } virtual std::shared_ptr create_attachment(uint32_t width, uint32_t height, uint32_t components_count) { return parent->vulkan_->create_attachment(width, height, parent->depth_, components_count); } }; frame_data frames_[frame_buffer_size]; uint32_t current_frame_index_ = 0; explicit impl(const spl::shared_ptr& vulkan, common::bit_depth depth) : vulkan_(vulkan) , depth_(depth) , frames_{frame_data{this}, frame_data{this}, frame_data{this}} { auto cmd_buffers = vulkan_->allocateCommandBuffers(frame_buffer_size); for (uint32_t i = 0; i < frame_buffer_size; ++i) { frames_[i].cmd_buffer = cmd_buffers[i]; } } ~impl() { auto vk_device = vulkan_->getVkDevice(); for (auto& frame : frames_) { if (frame.buffer) { vk_device.unmapMemory(frame.memory); vk_device.destroyBuffer(frame.buffer); vk_device.freeMemory(frame.memory); if (frame.fence) { vk_device.destroyFence(frame.fence); } } } } spl::shared_ptr create_renderpass(uint32_t width, uint32_t height) { auto device = vulkan_->getVkDevice(); auto& ctx = frames_[(++current_frame_index_) % frame_buffer_size]; if (ctx.fence) { auto result = device.waitForFences(ctx.fence, true, 1000000000); // wait up to one second if (result == vk::Result::eTimeout) { CASPAR_LOG(warning) << L"[Vulkan image_kernel] Timeout waiting for fence"; } device.destroyFence(ctx.fence); ctx.fence = nullptr; } ctx.cmd_buffer.reset({}); return spl::make_shared(&ctx, width, height); } uint32_t findDedicatedMemoryType(uint32_t typeMask, vk::MemoryPropertyFlags properties) { auto memProperties = vulkan_->getMemoryProperties(); for (uint32_t i = 0; i < memProperties.memoryTypeCount; ++i) { if ((typeMask & (1 << i)) && ((memProperties.memoryTypes[i].propertyFlags & properties) == properties)) { return i; } } throw std::runtime_error("[Vulkan image_kernel] Failed to find suitable memory type"); } vk::Buffer upload_vertex_buffer(frame_data& vb, void* data, size_t size) { if (vb.size < size) { auto vk_device = vulkan_->getVkDevice(); if (vb.buffer) { vk_device.unmapMemory(vb.memory); vk_device.destroyBuffer(vb.buffer); vk_device.freeMemory(vb.memory); } // staging buffer vk::BufferCreateInfo stagingInfo{}; stagingInfo.size = size; stagingInfo.usage = vk::BufferUsageFlagBits::eVertexBuffer; stagingInfo.sharingMode = vk::SharingMode::eExclusive; vb.buffer = vk_device.createBuffer(stagingInfo); auto stagingMemReq = vk_device.getBufferMemoryRequirements(vb.buffer); vk::MemoryAllocateInfo stagingAlloc{}; stagingAlloc.allocationSize = stagingMemReq.size; stagingAlloc.memoryTypeIndex = findDedicatedMemoryType(stagingMemReq.memoryTypeBits, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent); vb.memory = vk_device.allocateMemory(stagingAlloc); vk_device.bindBufferMemory(vb.buffer, vb.memory, 0); vb.data = vk_device.mapMemory(vb.memory, 0, size); vb.size = size; } memcpy(vb.data, data, size); return vb.buffer; } std::pair, uniform_block> draw(const draw_params& params) { CASPAR_ASSERT(params.pix_desc.planes.size() == params.textures.size()); if (params.textures.empty() || !params.background) { return {}; } if (params.transforms.image_transform.opacity < epsilon) { return {}; } if (params.geometry.data().empty()) { return {}; } auto coords = params.geometry.data(); auto transforms = params.transforms; auto const first_plane = params.pix_desc.planes.at(0); if (params.geometry.mode() != core::frame_geometry::scale_mode::stretch && first_plane.width > 0 && first_plane.height > 0) { auto width_scale = static_cast(params.target_width) / static_cast(first_plane.width); auto height_scale = static_cast(params.target_height) / static_cast(first_plane.height); core::image_transform transform; double target_scale; switch (params.geometry.mode()) { case core::frame_geometry::scale_mode::fit: target_scale = std::min(width_scale, height_scale); transform.fill_scale[0] *= target_scale / width_scale; transform.fill_scale[1] *= target_scale / height_scale; break; case core::frame_geometry::scale_mode::fill: target_scale = std::max(width_scale, height_scale); transform.fill_scale[0] *= target_scale / width_scale; transform.fill_scale[1] *= target_scale / height_scale; break; case core::frame_geometry::scale_mode::original: transform.fill_scale[0] /= width_scale; transform.fill_scale[1] /= height_scale; break; case core::frame_geometry::scale_mode::hfill: transform.fill_scale[1] *= width_scale / height_scale; break; case core::frame_geometry::scale_mode::vfill: transform.fill_scale[0] *= height_scale / width_scale; break; default:; } transforms = transforms.combine_transform(transform, params.aspect_ratio); } coords = transforms.transform_coords(coords); // Skip drawing if all the coordinates will be outside the screen. if (coords.size() < 3 || is_outside_screen(coords)) { return {}; } uniform_block uniforms; for (int n = 0; n < params.textures.size(); ++n) { uniforms.precision_factor[n] = get_precision_factor(params.textures[n]->depth()); } const auto is_hd = params.pix_desc.planes.at(0).height > 700; const auto color_space = is_hd ? params.pix_desc.color_space : core::color_space::bt601; uniforms.color_space_index = static_cast(color_space); if (params.pix_desc.is_straight_alpha) { uniforms.flags |= static_cast(shader_flags::is_straight_alpha); } if (static_cast(params.local_key)) { uniforms.flags |= static_cast(shader_flags::has_local_key); } if (static_cast(params.layer_key)) { uniforms.flags |= static_cast(shader_flags::has_layer_key); } uniforms.pixel_format = static_cast(params.pix_desc.format); uniforms.opacity = transforms.image_transform.is_key ? 1.0f : static_cast(transforms.image_transform.opacity); if (transforms.image_transform.chroma.enable) { uniforms.flags |= static_cast(shader_flags::chroma); if (transforms.image_transform.chroma.show_mask) uniforms.flags |= static_cast(shader_flags::chroma_show_mask); uniforms.chroma_target_hue = static_cast(transforms.image_transform.chroma.target_hue) / 360.0f; uniforms.chroma_hue_width = static_cast(transforms.image_transform.chroma.hue_width); uniforms.chroma_min_saturation = static_cast(transforms.image_transform.chroma.min_saturation); uniforms.chroma_min_brightness = static_cast(transforms.image_transform.chroma.min_brightness); uniforms.chroma_softness = 1.0f + static_cast(transforms.image_transform.chroma.softness); uniforms.chroma_spill_suppress = static_cast(transforms.image_transform.chroma.spill_suppress) / 360.0f; uniforms.chroma_spill_suppress_saturation = static_cast(transforms.image_transform.chroma.spill_suppress_saturation); } // Setup blend_func auto blend_mode = params.blend_mode; if (transforms.image_transform.is_key) { blend_mode = core::blend_mode::normal; } uniforms.blend_mode = static_cast(blend_mode); uniforms.keyer = static_cast(params.keyer); if (transforms.image_transform.invert) { uniforms.flags |= static_cast(shader_flags::invert); } if (transforms.image_transform.levels.min_input > epsilon || transforms.image_transform.levels.max_input < 1.0 - epsilon || transforms.image_transform.levels.min_output > epsilon || transforms.image_transform.levels.max_output < 1.0 - epsilon || std::abs(transforms.image_transform.levels.gamma - 1.0) > epsilon) { uniforms.flags |= static_cast(shader_flags::levels); uniforms.min_input = static_cast(transforms.image_transform.levels.min_input); uniforms.max_input = static_cast(transforms.image_transform.levels.max_input); uniforms.min_output = static_cast(transforms.image_transform.levels.min_output); uniforms.max_output = static_cast(transforms.image_transform.levels.max_output); uniforms.gamma = static_cast(transforms.image_transform.levels.gamma); } if (std::abs(transforms.image_transform.brightness - 1.0) > epsilon || std::abs(transforms.image_transform.saturation - 1.0) > epsilon || std::abs(transforms.image_transform.contrast - 1.0) > epsilon) { uniforms.flags |= static_cast(shader_flags::csb); uniforms.brt = static_cast(transforms.image_transform.brightness); uniforms.sat = static_cast(transforms.image_transform.saturation); uniforms.con = static_cast(transforms.image_transform.contrast); } return {std::move(coords), uniforms}; } }; image_kernel::image_kernel(const spl::shared_ptr& device, common::bit_depth depth) : impl_(new impl(device, depth)) { } image_kernel::~image_kernel() {} spl::shared_ptr image_kernel::create_renderpass(uint32_t width, uint32_t height) { return impl_->create_renderpass(width, height); } } // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/image/image_kernel.h ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #pragma once #include #include #include #include #include "../util/draw_params.h" #include "../util/matrix.h" #include "../util/transforms.h" #include "../util/uniform_block.h" namespace caspar { namespace accelerator { namespace vulkan { class image_kernel final : public std::enable_shared_from_this { image_kernel(const image_kernel&); image_kernel& operator=(const image_kernel&); public: image_kernel(const spl::shared_ptr& device, common::bit_depth depth); ~image_kernel(); spl::shared_ptr create_renderpass(uint32_t width, uint32_t height); private: struct impl; spl::unique_ptr impl_; }; }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/image/image_mixer.cpp ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #include "image_mixer.h" #include "image_kernel.h" #include "../util/buffer.h" #include "../util/device.h" #include "../util/renderpass.h" #include "../util/texture.h" #ifdef WIN32 #include "../../d3d/d3d_texture2d.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace accelerator { namespace vulkan { using future_texture = std::shared_future>; struct item { core::pixel_format_desc pix_desc = core::pixel_format_desc(core::pixel_format::invalid); std::vector textures; draw_transforms transforms; core::frame_geometry geometry = core::frame_geometry::get_default(); }; struct layer { std::vector sublayers; std::vector items; core::blend_mode blend_mode; explicit layer(core::blend_mode blend_mode) : blend_mode(blend_mode) { } }; class image_renderer { spl::shared_ptr vulkan_; image_kernel kernel_; const size_t max_frame_size_; common::bit_depth depth_; public: explicit image_renderer(const spl::shared_ptr& vulkan, const size_t max_frame_size, common::bit_depth depth) : vulkan_(vulkan) , kernel_(vulkan_, depth) , max_frame_size_(max_frame_size) , depth_(depth) { } std::future, std::shared_ptr>> operator()(std::vector layers, const core::video_format_desc& format_desc) { if (layers.empty()) { // Bypass GPU with empty frame. static const std::vector> buffer(max_frame_size_, 0); return make_ready_future, std::shared_ptr>>( {array(buffer.data(), format_desc.size, true), nullptr}); } auto f = std::move(vulkan_->dispatch_async( [this, format_desc, layers = std::move(layers)]() mutable -> std::tuple>, std::shared_ptr> { auto pass = kernel_.create_renderpass(format_desc.square_width, format_desc.square_height); auto target = pass->default_attachment(); draw(target, std::move(layers), format_desc, pass); pass->commit(); return {vulkan_->copy_async(target), nullptr}; })); return std::async( std::launch::deferred, [f = std::move(f)]() mutable -> std::tuple, std::shared_ptr> { auto tuple = std::move(f.get()); return {std::move(std::get<0>(tuple).get()), std::move(std::get<1>(tuple))}; }); } common::bit_depth depth() const { return depth_; } private: void draw(std::shared_ptr& target_texture, std::vector layers, const core::video_format_desc& format_desc, spl::shared_ptr pass) { std::shared_ptr layer_key_texture; for (auto& layer : layers) { draw(target_texture, layer.sublayers, format_desc, pass); draw(target_texture, std::move(layer), layer_key_texture, format_desc, pass); } } void draw(std::shared_ptr& target_texture, layer layer, std::shared_ptr& layer_key_texture, const core::video_format_desc& format_desc, spl::shared_ptr pass) { if (layer.items.empty()) return; std::shared_ptr local_key_texture; std::shared_ptr local_mix_texture; if (layer.blend_mode != core::blend_mode::normal) { auto layer_texture = pass->create_attachment(); for (auto& item : layer.items) draw(layer_texture, std::move(item), layer_key_texture, local_key_texture, local_mix_texture, format_desc, pass); draw(layer_texture, std::move(local_mix_texture), format_desc, pass, core::blend_mode::normal); draw(target_texture, std::move(layer_texture), format_desc, pass, layer.blend_mode); } else // fast path { for (auto& item : layer.items) draw(target_texture, std::move(item), layer_key_texture, local_key_texture, local_mix_texture, format_desc, pass); draw(target_texture, std::move(local_mix_texture), format_desc, pass, core::blend_mode::normal); } layer_key_texture = std::move(local_key_texture); } void draw(std::shared_ptr& target_texture, item item, std::shared_ptr& layer_key_texture, std::shared_ptr& local_key_texture, std::shared_ptr& local_mix_texture, const core::video_format_desc& format_desc, spl::shared_ptr pass) { draw_params draw_params; draw_params.target_width = format_desc.square_width; draw_params.target_height = format_desc.square_height; // TODO: Pass the target color_space draw_params.pix_desc = std::move(item.pix_desc); draw_params.transforms = std::move(item.transforms); draw_params.geometry = std::move(item.geometry); draw_params.aspect_ratio = static_cast(format_desc.square_width) / static_cast(format_desc.square_height); for (auto& future_texture : item.textures) { draw_params.textures.push_back(spl::make_shared_ptr(future_texture.get())); } if (draw_params.transforms.image_transform .is_key) { // A key means we will use it for the next non-key item as a mask local_key_texture = local_key_texture ? local_key_texture : pass->create_attachment(); draw_params.background = local_key_texture; draw_params.local_key = nullptr; draw_params.layer_key = nullptr; pass->draw(std::move(draw_params)); } else if (draw_params.transforms.image_transform .is_mix) { // A mix means precomp the items to a texture, before drawing to the channel local_mix_texture = local_mix_texture ? local_mix_texture : pass->create_attachment(); draw_params.background = local_mix_texture; draw_params.local_key = std::move(local_key_texture); // Use and reset the key draw_params.layer_key = layer_key_texture; draw_params.keyer = keyer::additive; pass->draw(std::move(draw_params)); } else { // If there is a mix, this is the end so draw it and reset draw(target_texture, std::move(local_mix_texture), format_desc, pass, core::blend_mode::normal); draw_params.background = target_texture; draw_params.local_key = std::move(local_key_texture); draw_params.layer_key = layer_key_texture; pass->draw(std::move(draw_params)); } } void draw(std::shared_ptr& target_texture, std::shared_ptr&& source_texture, core::video_format_desc format_desc, spl::shared_ptr pass, core::blend_mode blend_mode = core::blend_mode::normal) { if (!source_texture) return; draw_params draw_params; draw_params.target_width = format_desc.square_width; draw_params.target_height = format_desc.square_height; draw_params.pix_desc.format = core::pixel_format::bgra; draw_params.pix_desc.planes = {core::pixel_format_desc::plane( source_texture->width(), source_texture->height(), 4, source_texture->depth())}; draw_params.textures = {spl::make_shared_ptr(source_texture)}; draw_params.blend_mode = blend_mode; draw_params.background = target_texture; draw_params.geometry = core::frame_geometry::get_default(); pass->draw(std::move(draw_params)); } }; struct image_mixer::impl : public core::frame_factory , public std::enable_shared_from_this { spl::shared_ptr vulkan_; image_renderer renderer_; std::vector transform_stack_; std::vector layers_; // layer/stream/items std::vector layer_stack_; double aspect_ratio_ = 1.0; public: impl(const spl::shared_ptr& device, const int channel_id, const size_t max_frame_size, common::bit_depth depth) : vulkan_(device) , renderer_(device, max_frame_size, depth) , transform_stack_(1) { CASPAR_LOG(info) << L"Initialized Vulkan Accelerated GPU Image Mixer for channel " << channel_id; } void update_aspect_ratio(double aspect_ratio) { aspect_ratio_ = aspect_ratio; } void push(const core::frame_transform& transform) { auto previous_layer_depth = transform_stack_.back().image_transform.layer_depth; transform_stack_.push_back(transform_stack_.back().combine_transform(transform.image_transform, aspect_ratio_)); auto new_layer_depth = transform_stack_.back().image_transform.layer_depth; if (previous_layer_depth < new_layer_depth) { layer new_layer(transform_stack_.back().image_transform.blend_mode); if (layer_stack_.empty()) { layers_.push_back(std::move(new_layer)); layer_stack_.push_back(&layers_.back()); } else { layer_stack_.back()->sublayers.push_back(std::move(new_layer)); layer_stack_.push_back(&layer_stack_.back()->sublayers.back()); } } } void visit(const core::const_frame& frame) { if (frame.pixel_format_desc().format == core::pixel_format::invalid) return; if (frame.pixel_format_desc().planes.empty()) return; item item; item.pix_desc = frame.pixel_format_desc(); item.transforms = transform_stack_.back(); item.geometry = frame.geometry(); auto textures_ptr = std::any_cast>>(frame.opaque()); if (textures_ptr) { item.textures = *textures_ptr; } else { for (int n = 0; n < static_cast(item.pix_desc.planes.size()); ++n) { item.textures.emplace_back(vulkan_->copy_async(frame.image_data(n), item.pix_desc.planes[n].width, item.pix_desc.planes[n].height, item.pix_desc.planes[n].stride, item.pix_desc.planes[n].depth)); } } layer_stack_.back()->items.push_back(item); } void pop() { transform_stack_.pop_back(); layer_stack_.resize(transform_stack_.back().image_transform.layer_depth); } std::future, std::shared_ptr>> render(const core::video_format_desc& format_desc) { return renderer_(std::move(layers_), format_desc); } core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc) override { return create_frame(tag, desc, common::bit_depth::bit8); } core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc, common::bit_depth depth) override { std::vector> image_data; for (auto& plane : desc.planes) { auto bytes_per_pixel = depth == common::bit_depth::bit8 ? 1 : 2; image_data.push_back(vulkan_->create_array(plane.size * bytes_per_pixel)); } std::weak_ptr weak_self = shared_from_this(); return core::mutable_frame(tag, std::move(image_data), array{}, desc, [weak_self, desc](std::vector> image_data) -> std::any { auto self = weak_self.lock(); if (!self) { return std::any{}; } std::vector textures; for (int n = 0; n < static_cast(desc.planes.size()); ++n) { textures.emplace_back(self->vulkan_->copy_async(image_data[n], desc.planes[n].width, desc.planes[n].height, desc.planes[n].stride, desc.planes[n].depth)); } return std::make_shared(std::move(textures)); }); } #ifdef WIN32 core::const_frame import_d3d_texture(const void* tag, const std::shared_ptr& d3d_texture, core::pixel_format format, common::bit_depth depth) override { throw std::runtime_error("d3d texture import not supported on vulkan accelerator"); } #endif common::bit_depth depth() const { return renderer_.depth(); } }; image_mixer::image_mixer(const spl::shared_ptr& vulkan, const int channel_id, const size_t max_frame_size, common::bit_depth depth) : impl_(std::make_unique(vulkan, channel_id, max_frame_size, depth)) { } image_mixer::~image_mixer() {} void image_mixer::push(const core::frame_transform& transform) { impl_->push(transform); } void image_mixer::visit(const core::const_frame& frame) { impl_->visit(frame); } void image_mixer::pop() { impl_->pop(); } void image_mixer::update_aspect_ratio(double aspect_ratio) { impl_->update_aspect_ratio(aspect_ratio); } std::future, std::shared_ptr>> image_mixer::render(const core::video_format_desc& format_desc) { return impl_->render(format_desc); } core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc) { return impl_->create_frame(tag, desc); } core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc, common::bit_depth depth) { return impl_->create_frame(tag, desc, depth); } #ifdef WIN32 core::const_frame image_mixer::import_d3d_texture(const void* tag, const std::shared_ptr& d3d_texture, core::pixel_format format, common::bit_depth depth) { return impl_->import_d3d_texture(tag, d3d_texture, format, depth); } #endif common::bit_depth image_mixer::depth() const { return impl_->depth(); } }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/image/image_mixer.h ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #pragma once #include #include #include #include #include #include #include #include namespace caspar { namespace accelerator { namespace vulkan { class image_mixer final : public core::image_mixer { public: image_mixer(const spl::shared_ptr& vulkan, int channel_id, const size_t max_frame_size, common::bit_depth depth); image_mixer(const image_mixer&) = delete; ~image_mixer(); image_mixer& operator=(const image_mixer&) = delete; std::future, std::shared_ptr>> render(const core::video_format_desc& format_desc) override; core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc) override; core::mutable_frame create_frame(const void* video_stream_tag, const core::pixel_format_desc& desc, common::bit_depth depth) override; #ifdef WIN32 core::const_frame import_d3d_texture(const void* tag, const std::shared_ptr& d3d_texture, core::pixel_format format, common::bit_depth depth) override; #endif void update_aspect_ratio(double aspect_ratio) override; // core::image_mixer void push(const core::frame_transform& frame) override; void visit(const core::const_frame& frame) override; void pop() override; common::bit_depth depth() const override; private: struct impl; std::shared_ptr impl_; }; }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/image/vertex_shader.vert ================================================ #version 450 layout(location = 0) in vec2 Position; layout(location = 1) in vec4 TexCoordIn; layout(location = 0) out vec4 TexCoord; layout(location = 1) out vec4 TexCoord2; void main() { TexCoord = TexCoordIn; vec4 pos = vec4(Position, 0, 1); TexCoord2 = vec4(pos.xy, 0.0, 0.0); pos.x = pos.x*2.0 - 1.0; pos.y = pos.y*2.0 - 1.0; gl_Position = pos; } ================================================ FILE: src/accelerator/vulkan/util/buffer.cpp ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #include "buffer.h" #include #include #pragma warning(push) #pragma warning(disable : 4189) #include #pragma warning(pop) #include namespace caspar { namespace accelerator { namespace vulkan { static std::atomic g_w_total_count; static std::atomic g_w_total_size; static std::atomic g_r_total_count; static std::atomic g_r_total_size; struct buffer::impl { int size_ = 0; bool write_ = false; VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; VmaAllocator allocator_; impl(const impl&) = delete; impl& operator=(const impl&) = delete; public: impl(int size, bool write, VmaAllocator allocator) : size_(size) , write_(write) , allocator_(allocator) { VkBufferCreateInfo bufCreateInfo = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; bufCreateInfo.size = size; bufCreateInfo.usage = write ? VK_BUFFER_USAGE_TRANSFER_SRC_BIT : VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = write ? VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT : VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; vmaCreateBuffer(allocator_, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); (write ? g_w_total_count : g_r_total_count)++; (write ? g_w_total_size : g_r_total_size) += size_; } ~impl() { vmaDestroyBuffer(allocator_, buf, alloc); (write_ ? g_w_total_size : g_r_total_size) -= size_; (write_ ? g_w_total_count : g_r_total_count)--; } }; buffer::buffer(int size, bool write, VmaAllocator allocator) : impl_(new impl(size, write, allocator)) { } buffer::buffer(buffer&& other) : impl_(std::move(other.impl_)) { } buffer::~buffer() {} buffer& buffer::operator=(buffer&& other) { impl_ = std::move(other.impl_); return *this; } void* buffer::data() { return impl_->allocInfo.pMappedData; } bool buffer::write() const { return impl_->write_; } int buffer::size() const { return static_cast(impl_->allocInfo.size); } VkBuffer buffer::id() const { return impl_->buf; } boost::property_tree::wptree buffer::info() { boost::property_tree::wptree info; info.add(L"total_read_count", g_r_total_count); info.add(L"total_write_count", g_w_total_count); info.add(L"total_read_size", g_r_total_size); info.add(L"total_write_size", g_w_total_size); return info; } }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/buffer.h ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #pragma once #include #include #include namespace caspar { namespace accelerator { namespace vulkan { class buffer final { public: static boost::property_tree::wptree info(); buffer(int size, bool write, VmaAllocator allocator); buffer(const buffer&) = delete; buffer(buffer&& other); ~buffer(); buffer& operator=(const buffer&) = delete; buffer& operator=(buffer&& other); VkBuffer id() const; void* data(); int size() const; bool write() const; private: struct impl; std::unique_ptr impl_; }; }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/device.cpp ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #include "device.h" #include "../image/image_kernel.h" #include "buffer.h" #include "pipeline.h" #include "texture.h" #include #include #include #include #include #include #include VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE #define VMA_STATIC_VULKAN_FUNCTIONS 0 #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 #define VMA_IMPLEMENTATION #pragma warning(push) #pragma warning(disable : 4189) #include #pragma warning(pop) #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace accelerator { namespace vulkan { using namespace boost::asio; inline VKAPI_ATTR VkBool32 VKAPI_CALL default_debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageType, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, void*) { auto ms = vkb::to_string_message_severity(messageSeverity); auto mt = vkb::to_string_message_type(messageType); if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) { CASPAR_LOG(info) << "[" << ms << ": " << mt << "] - " << pCallbackData->pMessageIdName << ", " << pCallbackData->pMessage; // printf("[%s: %s] - %s\n%s\n", ms, mt, pCallbackData->pMessageIdName, pCallbackData->pMessage); } else { if (pCallbackData->pMessage != nullptr) { CASPAR_LOG(info) << "[" << ms << ": " << mt << "] " << pCallbackData->pMessage; // printf("[%s: %s]\n%s\n", ms, mt, pCallbackData->pMessage); } } return VK_FALSE; // Applications must return false here (Except Validation, if return true, will skip calling to // driver) } void transitionImageLayout(const vk::Image& image, vk::ImageLayout oldLayout, vk::AccessFlags2 srcAccessMask, vk::PipelineStageFlags2 srcStage, vk::ImageLayout newLayout, vk::AccessFlags2 dstAccessMask, vk::PipelineStageFlags2 dstStage, vk::CommandBuffer cmdBuffer) { auto range = vk::ImageSubresourceRange(vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1); vk::ImageMemoryBarrier2 barrier{}; barrier.oldLayout = oldLayout, barrier.newLayout = newLayout, barrier.srcQueueFamilyIndex = vk::QueueFamilyIgnored, barrier.dstQueueFamilyIndex = vk::QueueFamilyIgnored, barrier.image = image, barrier.subresourceRange = range; barrier.srcAccessMask = srcAccessMask; barrier.srcStageMask = srcStage; barrier.dstAccessMask = dstAccessMask; barrier.dstStageMask = dstStage; vk::DependencyInfo dep_info; dep_info.setImageMemoryBarriers(barrier); cmdBuffer.pipelineBarrier2(dep_info); } struct device::impl : public std::enable_shared_from_this { using texture_queue_t = tbb::concurrent_bounded_queue>; using buffer_queue_t = tbb::concurrent_bounded_queue>; std::array, 2> attachment_pools_; std::array, 4>, 2> device_pools_; std::array, 2> host_pools_; std::wstring version_; vkb::Instance _vkb_instance; vkb::PhysicalDevice _vkb_physical_device; vk::PhysicalDeviceMemoryProperties _memoryProperties; vk::PhysicalDevice _physical_device; vk::Device _device; vk::Queue _queue; vk::CommandPool _command_pool; VmaAllocator _allocator; std::array, 2> _pipelines; struct inflight_command_buffer { vk::CommandBuffer cmd; uint64_t semaphore_value; }; std::deque _transfer_cmd_buffers; vk::Semaphore _semaphore; uint64_t _semaphore_value{0}; io_context io_context_; decltype(make_work_guard(io_context_)) work_; std::thread thread_; impl() : work_(make_work_guard(io_context_)) { CASPAR_LOG(info) << L"Initializing Vulkan Device."; auto instance_builder = vkb::InstanceBuilder() #ifdef _DEBUG .enable_validation_layers(true) .set_debug_messenger_severity(VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) .set_debug_messenger_type(VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT) .set_debug_callback(default_debug_callback) #endif .set_app_name("CasparCG") .set_headless(true) .set_engine_name("CasparCG") .require_api_version(VK_API_VERSION_1_3); auto instance_ret = instance_builder.build(); if (!instance_ret) { CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to create Vulkan instance: " + instance_ret.error().message())); } _vkb_instance = instance_ret.value(); VULKAN_HPP_DEFAULT_DISPATCHER.init(_vkb_instance.fp_vkGetInstanceProcAddr); VULKAN_HPP_DEFAULT_DISPATCHER.init(vk::Instance(_vkb_instance.instance)); // Find suitable physical device auto gpu_selector = vkb::PhysicalDeviceSelector(_vkb_instance); vk::PhysicalDeviceVulkan12Features features12; features12.descriptorIndexing = true; features12.descriptorBindingPartiallyBound = true; features12.runtimeDescriptorArray = true; features12.shaderSampledImageArrayNonUniformIndexing = true; features12.timelineSemaphore = true; vk::PhysicalDeviceVulkan13Features features13; features13.dynamicRendering = true; features13.synchronization2 = true; vk::PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR localReadFeatures; localReadFeatures.dynamicRenderingLocalRead = true; auto gpu_res = gpu_selector.set_minimum_version(1, 3) .set_required_features_12(features12) .set_required_features_13(features13) .add_required_extension(VK_KHR_DYNAMIC_RENDERING_LOCAL_READ_EXTENSION_NAME) .add_required_extension_features(localReadFeatures) .prefer_gpu_device_type(vkb::PreferredDeviceType::discrete) .select(); if (!gpu_res) { CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to select physical device: " + gpu_res.error().message())); } _vkb_physical_device = gpu_res.value(); CASPAR_LOG(info) << "Selected Vulkan device: " << _vkb_physical_device.properties.deviceName; vk::PhysicalDeviceRobustness2FeaturesEXT robustness2Features; robustness2Features.nullDescriptor = true; _vkb_physical_device.enable_extension_features_if_present(robustness2Features); // Create the logical device auto device_builder = vkb::DeviceBuilder(_vkb_physical_device); _physical_device = vk::PhysicalDevice(_vkb_physical_device.physical_device); auto device_res = device_builder.build(); if (!device_res) { CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to create device: " + device_res.error().message())); } auto vkb_device = device_res.value(); _device = vk::Device(vkb_device.device); VULKAN_HPP_DEFAULT_DISPATCHER.init(_device); _queue = vk::Queue(vkb_device.get_queue(vkb::QueueType::graphics).value()); auto queue_family = vkb_device.get_queue_index(vkb::QueueType::graphics).value(); vk::CommandPoolCreateInfo pool_info; pool_info.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer; pool_info.queueFamilyIndex = queue_family; _command_pool = _device.createCommandPool(pool_info); vk::SemaphoreTypeCreateInfo timeline_info{}; timeline_info.semaphoreType = vk::SemaphoreType::eTimeline; timeline_info.initialValue = 0; vk::SemaphoreCreateInfo semaphore_info{}; semaphore_info.pNext = &timeline_info; _semaphore = _device.createSemaphore(semaphore_info); VmaVulkanFunctions vulkanFunctions = {}; vulkanFunctions.vkGetInstanceProcAddr = _vkb_instance.fp_vkGetInstanceProcAddr; vulkanFunctions.vkGetDeviceProcAddr = vkb_device.fp_vkGetDeviceProcAddr; VmaAllocatorCreateInfo allocatorCreateInfo = {}; allocatorCreateInfo.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT; allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_3; allocatorCreateInfo.physicalDevice = _physical_device; allocatorCreateInfo.device = _device; allocatorCreateInfo.instance = _vkb_instance.instance; allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions; vmaCreateAllocator(&allocatorCreateInfo, &_allocator); _memoryProperties = _physical_device.getMemoryProperties(); _pipelines[0] = std::make_shared(_device, vk::Format::eR8G8B8A8Unorm); _pipelines[1] = std::make_shared(_device, vk::Format::eR16G16B16A16Unorm); thread_ = std::thread([&] { set_thread_name(L"Vulkan Device"); io_context_.run(); }); } ~impl() { work_.reset(); thread_.join(); _device.waitIdle(); for (auto& pool : host_pools_) pool.clear(); for (auto& pool : attachment_pools_) pool.clear(); for (auto& pools : device_pools_) for (auto& pool : pools) pool.clear(); _transfer_cmd_buffers.clear(); _device.destroySemaphore(_semaphore); _device.destroyCommandPool(_command_pool); vmaDestroyAllocator(_allocator); for (auto& pipeline : _pipelines) { pipeline.reset(); } _device.destroy(); vkb::destroy_instance(_vkb_instance); } template auto spawn_async(Func&& func) { using result_type = decltype(func(std::declval())); using task_type = std::packaged_task; auto task = task_type(std::forward(func)); auto future = task.get_future(); boost::asio::spawn(io_context_, std::move(task) #if BOOST_VERSION >= 108000 , [](std::exception_ptr e) { if (e) std::rethrow_exception(e); } #endif ); return future; } template auto dispatch_async(Func&& func) { using result_type = decltype(func()); using task_type = std::packaged_task; auto task = task_type(std::forward(func)); auto future = task.get_future(); boost::asio::dispatch(io_context_, std::move(task)); return future; } template auto dispatch_sync(Func&& func) -> decltype(func()) { return dispatch_async(std::forward(func)).get(); } std::wstring version() { return version_; } uint32_t findDedicatedMemoryType(uint32_t typeMask, vk::MemoryPropertyFlags properties) { for (uint32_t i = 0; i < _memoryProperties.memoryTypeCount; ++i) { if ((typeMask & (1 << i)) && ((_memoryProperties.memoryTypes[i].propertyFlags & properties) == properties)) { return i; } } throw std::runtime_error("Failed to find suitable memory type"); } uint64_t submitSingleTimeCommands(std::function func) { vk::CommandBuffer cmd_buffer = nullptr; if (_transfer_cmd_buffers.size() > 1) { auto completed = _device.getSemaphoreCounterValue(_semaphore); // try to reuse the oldest existing command buffer if (_transfer_cmd_buffers.front().semaphore_value <= completed) { cmd_buffer = _transfer_cmd_buffers.front().cmd; cmd_buffer.reset(); _transfer_cmd_buffers.pop_front(); } } if (!cmd_buffer) { // create a new command buffer vk::CommandBufferAllocateInfo allocInfo{}; allocInfo.commandPool = _command_pool; allocInfo.level = vk::CommandBufferLevel::ePrimary; allocInfo.commandBufferCount = 1; cmd_buffer = _device.allocateCommandBuffers(allocInfo)[0]; } cmd_buffer.begin(vk::CommandBufferBeginInfo{vk::CommandBufferUsageFlagBits::eOneTimeSubmit}); func(cmd_buffer); cmd_buffer.end(); auto signal_value = ++_semaphore_value; vk::TimelineSemaphoreSubmitInfo timelineInfo{}; timelineInfo.setSignalSemaphoreValues(signal_value); vk::SubmitInfo submitInfo{}; submitInfo.setCommandBuffers(cmd_buffer); submitInfo.setSignalSemaphores(_semaphore); submitInfo.pNext = &timelineInfo; _queue.submit(submitInfo); _transfer_cmd_buffers.push_back({cmd_buffer, signal_value}); return signal_value; } std::vector allocateCommandBuffers(uint32_t count) { return _device.allocateCommandBuffers( vk::CommandBufferAllocateInfo(_command_pool, vk::CommandBufferLevel::ePrimary, count)); } void submit(const vk::SubmitInfo& submitInfo, vk::Fence fence) { _queue.submit(submitInfo, fence); } std::shared_ptr create_attachment(int width, int height, common::bit_depth depth, uint32_t components_count) { CASPAR_VERIFY(width > 0 && height > 0); auto depth_pool_index = depth == common::bit_depth::bit8 ? 0 : 1; auto format = depth == common::bit_depth::bit8 ? vk::Format::eR8G8B8A8Unorm : vk::Format::eR16G16B16A16Unorm; // TODO (perf) Shared pool. auto pool = &attachment_pools_[depth_pool_index][(width << 16 & 0xFFFF0000) | (height & 0x0000FFFF)]; auto extent = vk::Extent3D{static_cast(width), static_cast(height), 1}; std::shared_ptr tex; if (!pool->try_pop(tex)) { vk::ImageCreateInfo imageInfo{}; imageInfo.imageType = vk::ImageType::e2D; imageInfo.format = format; imageInfo.extent = extent; imageInfo.mipLevels = 1; imageInfo.arrayLayers = 1; imageInfo.initialLayout = vk::ImageLayout::eUndefined; imageInfo.samples = vk::SampleCountFlagBits::e1; imageInfo.tiling = vk::ImageTiling::eOptimal; imageInfo.usage = vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eInputAttachment | vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled; imageInfo.sharingMode = vk::SharingMode::eExclusive; auto image = _device.createImage(imageInfo); auto memReq = _device.getImageMemoryRequirements(image); vk::MemoryAllocateInfo allocInfo{}; allocInfo.allocationSize = memReq.size; allocInfo.memoryTypeIndex = findDedicatedMemoryType(memReq.memoryTypeBits, vk::MemoryPropertyFlagBits::eDeviceLocal); auto imageMemory = _device.allocateMemory(allocInfo); _device.bindImageMemory(image, imageMemory, 0); auto range = vk::ImageSubresourceRange(vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1); vk::ImageViewCreateInfo createInfo( {}, image, vk::ImageViewType::e2D, format, vk::ComponentMapping(), range); auto imageView = _device.createImageView(createInfo); tex = std::make_shared( width, height, components_count, depth, image, imageMemory, imageView, _device); } submitSingleTimeCommands([&](vk::CommandBuffer cmd) { transitionImageLayout( tex->id(), vk::ImageLayout::eUndefined, vk::AccessFlagBits2::eNone, vk::PipelineStageFlagBits2::eTopOfPipe, vk::ImageLayout::eRenderingLocalRead, vk::AccessFlagBits2::eColorAttachmentWrite | vk::AccessFlagBits2::eInputAttachmentRead, vk::PipelineStageFlagBits2::eColorAttachmentOutput | vk::PipelineStageFlagBits2::eFragmentShader, cmd); }); tex->set_depth(depth); auto ptr = tex.get(); return std::shared_ptr( ptr, [tex = std::move(tex), pool, self = shared_from_this()](texture*) mutable { pool->push(tex); }); } std::shared_ptr create_texture(int width, int height, int stride, common::bit_depth depth, bool clear) { CASPAR_VERIFY(stride > 0 && stride < 5); CASPAR_VERIFY(width > 0 && height > 0); static vk::Format INTERNAL_FORMAT[][5] = {{vk::Format::eUndefined, vk::Format::eR8Unorm, vk::Format::eR8G8Unorm, vk::Format::eR8G8B8Unorm, vk::Format::eR8G8B8A8Unorm}, {vk::Format::eUndefined, vk::Format::eR16Unorm, vk::Format::eR16G16Unorm, vk::Format::eR16G16B16Unorm, vk::Format::eR16G16B16A16Unorm}}; auto depth_pool_index = depth == common::bit_depth::bit8 ? 0 : 1; auto format = INTERNAL_FORMAT[depth_pool_index][stride]; auto pool = &device_pools_[depth_pool_index][stride - 1][(width << 16 & 0xFFFF0000) | (height & 0x0000FFFF)]; auto extent = vk::Extent3D{static_cast(width), static_cast(height), 1}; std::shared_ptr tex; if (!pool->try_pop(tex)) { vk::ImageCreateInfo imageInfo{}; imageInfo.imageType = vk::ImageType::e2D; imageInfo.format = format; imageInfo.extent = extent; imageInfo.mipLevels = 1; imageInfo.arrayLayers = 1; imageInfo.initialLayout = vk::ImageLayout::eUndefined; imageInfo.samples = vk::SampleCountFlagBits::e1; imageInfo.tiling = vk::ImageTiling::eOptimal; imageInfo.usage = vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled; imageInfo.sharingMode = vk::SharingMode::eExclusive; auto image = _device.createImage(imageInfo); auto memReq = _device.getImageMemoryRequirements(image); vk::MemoryAllocateInfo allocInfo{}; allocInfo.allocationSize = memReq.size; allocInfo.memoryTypeIndex = findDedicatedMemoryType(memReq.memoryTypeBits, vk::MemoryPropertyFlagBits::eDeviceLocal); auto imageMemory = _device.allocateMemory(allocInfo); _device.bindImageMemory(image, imageMemory, 0); auto clearValue = vk::ClearColorValue(std::array{0.0f, 0.0f, 0.0f, 1.0f}); auto range = vk::ImageSubresourceRange(vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1); vk::ImageViewCreateInfo createInfo( {}, image, vk::ImageViewType::e2D, format, vk::ComponentMapping(), range); auto imageView = _device.createImageView(createInfo); tex = std::make_shared(width, height, stride, depth, image, imageMemory, imageView, _device); } tex->set_depth(depth); auto ptr = tex.get(); return std::shared_ptr( ptr, [tex = std::move(tex), pool, self = shared_from_this()](texture*) mutable { pool->push(tex); }); } std::shared_ptr create_buffer(int size, bool write) { CASPAR_VERIFY(size > 0); // TODO (perf) Shared pool. auto pool = &host_pools_[static_cast(write ? 1 : 0)][size]; std::shared_ptr buf; if (!pool->try_pop(buf)) { buf = std::make_shared(size, write, _allocator); } auto ptr = buf.get(); return std::shared_ptr(ptr, [buf = std::move(buf), self = shared_from_this()](buffer*) mutable { auto pool = &self->host_pools_[static_cast(buf->write() ? 1 : 0)][buf->size()]; pool->push(std::move(buf)); }); } array create_array(int size) { auto buf = create_buffer(size, true); auto ptr = reinterpret_cast(buf->data()); return array(ptr, buf->size(), std::move(buf)); } std::future> copy_async(const array& source, int width, int height, int stride, common::bit_depth depth) { return dispatch_async([this, source, width, height, stride, depth]() { std::shared_ptr buf; auto tmp = source.storage>(); if (tmp) { buf = *tmp; } else { buf = create_buffer(static_cast(source.size()), true); std::memcpy(buf->data(), source.data(), source.size()); } auto tex = create_texture(width, height, stride, depth, false); vk::BufferImageCopy region(0, 0, 0, vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1), vk::Offset3D(0, 0, 0), vk::Extent3D(width, height, 1)); submitSingleTimeCommands([&](vk::CommandBuffer cmd) { transitionImageLayout(tex->id(), vk::ImageLayout::eUndefined, vk::AccessFlagBits2::eNone, vk::PipelineStageFlagBits2::eTopOfPipe, vk::ImageLayout::eTransferDstOptimal, vk::AccessFlagBits2::eTransferWrite, vk::PipelineStageFlagBits2::eTransfer, cmd); cmd.copyBufferToImage(buf->id(), tex->id(), vk::ImageLayout::eTransferDstOptimal, region); transitionImageLayout(tex->id(), vk::ImageLayout::eTransferDstOptimal, vk::AccessFlagBits2::eTransferWrite, vk::PipelineStageFlagBits2::eTransfer, vk::ImageLayout::eShaderReadOnlyOptimal, vk::AccessFlagBits2::eShaderRead, vk::PipelineStageFlagBits2::eFragmentShader, cmd); }); // No need to wait here, GPU-GPU deps (the usage of this texture on the device) are enforced by the memory // barriers return tex; }); } std::future> copy_async(const std::shared_ptr& source) { auto f = dispatch_async([this, source]() -> std::pair, uint64_t> { auto buf = create_buffer(source->size(), false); vk::CopyImageToBufferInfo2 copyInfo{}; copyInfo.dstBuffer = buf->id(); copyInfo.srcImage = source->id(); copyInfo.srcImageLayout = vk::ImageLayout::eTransferSrcOptimal; vk::BufferImageCopy2 region{}; region.bufferOffset = 0; region.imageSubresource = vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1); region.imageOffset = vk::Offset3D{0, 0, 0}; region.imageExtent = vk::Extent3D{static_cast(source->width()), static_cast(source->height()), 1}; copyInfo.setRegions(region); auto signal_value = submitSingleTimeCommands([&](vk::CommandBuffer cmd) { transitionImageLayout(source->id(), vk::ImageLayout::eRenderingLocalRead, vk::AccessFlagBits2::eColorAttachmentWrite, vk::PipelineStageFlagBits2::eColorAttachmentOutput, vk::ImageLayout::eTransferSrcOptimal, vk::AccessFlagBits2::eHostRead, vk::PipelineStageFlagBits2::eHost, cmd); cmd.copyImageToBuffer2(copyInfo); }); return {buf, signal_value}; }); return std::async(std::launch::deferred, [this, f = std::move(f)]() mutable { auto [buf, signal_value] = f.get(); vk::SemaphoreWaitInfo waitInfo{}; waitInfo.setSemaphores(_semaphore); waitInfo.setValues(signal_value); auto res = _device.waitSemaphores(waitInfo, 1000000000); if (res != vk::Result::eSuccess) { CASPAR_LOG(warning) << L"[Vulkan] Timeout waiting for readback semaphore"; } auto ptr = reinterpret_cast(buf->data()); auto size = buf->size(); return array(ptr, size, std::move(buf)); }); } boost::property_tree::wptree info() const { boost::property_tree::wptree info; boost::property_tree::wptree pooled_device_buffers; size_t total_pooled_device_buffer_size = 0; size_t total_pooled_device_buffer_count = 0; for (size_t i = 0; i < device_pools_.size(); ++i) { auto& depth_pools = device_pools_.at(i); for (size_t j = 0; j < depth_pools.size(); ++j) { auto& pools = depth_pools.at(j); bool mipmapping = j > 3; auto stride = mipmapping ? j - 3 : j + 1; for (auto& pool : pools) { auto width = pool.first >> 16; auto height = pool.first & 0x0000FFFF; auto size = width * height * stride; auto count = pool.second.size(); if (count == 0) continue; boost::property_tree::wptree pool_info; pool_info.add(L"stride", stride); pool_info.add(L"mipmapping", mipmapping); pool_info.add(L"width", width); pool_info.add(L"height", height); pool_info.add(L"size", size); pool_info.add(L"count", count); total_pooled_device_buffer_size += size * count; total_pooled_device_buffer_count += count; pooled_device_buffers.add_child(L"device_buffer_pool", pool_info); } } } info.add_child(L"gl.details.pooled_device_buffers", pooled_device_buffers); boost::property_tree::wptree pooled_host_buffers; size_t total_read_size = 0; size_t total_write_size = 0; size_t total_read_count = 0; size_t total_write_count = 0; for (size_t i = 0; i < host_pools_.size(); ++i) { auto& pools = host_pools_.at(i); auto is_write = i == 1; for (auto& pool : pools) { auto size = pool.first; auto count = pool.second.size(); if (count == 0) continue; boost::property_tree::wptree pool_info; pool_info.add(L"usage", is_write ? L"write_only" : L"read_only"); pool_info.add(L"size", size); pool_info.add(L"count", count); pooled_host_buffers.add_child(L"host_buffer_pool", pool_info); (is_write ? total_write_count : total_read_count) += count; (is_write ? total_write_size : total_read_size) += size * count; } } info.add_child(L"gl.details.pooled_host_buffers", pooled_host_buffers); info.add(L"gl.summary.pooled_device_buffers.total_count", total_pooled_device_buffer_count); info.add(L"gl.summary.pooled_device_buffers.total_size", total_pooled_device_buffer_size); // info.add_child(L"gl.summary.all_device_buffers", texture::info()); info.add(L"gl.summary.pooled_host_buffers.total_read_count", total_read_count); info.add(L"gl.summary.pooled_host_buffers.total_write_count", total_write_count); info.add(L"gl.summary.pooled_host_buffers.total_read_size", total_read_size); info.add(L"gl.summary.pooled_host_buffers.total_write_size", total_write_size); info.add_child(L"gl.summary.all_host_buffers", buffer::info()); return info; } std::future gc() { return spawn_async([this](yield_context yield) { CASPAR_LOG(info) << " vulkan: Running GC."; try { for (auto& depth_pools : device_pools_) { for (auto& pools : depth_pools) { for (auto& pool : pools) pool.second.clear(); } } for (auto& pools : host_pools_) { for (auto& pool : pools) pool.second.clear(); } for (auto& pools : attachment_pools_) { for (auto& pool : pools) pool.second.clear(); } } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } }); } }; device::device() : impl_(new impl()) { } device::~device() {} vk::PhysicalDeviceMemoryProperties device::getMemoryProperties() { return impl_->_memoryProperties; } std::vector device::allocateCommandBuffers(uint32_t count) { return impl_->allocateCommandBuffers(count); } void device::submit(const vk::SubmitInfo& submitInfo, vk::Fence fence) { impl_->submit(submitInfo, fence); } vk::Device device::getVkDevice() const { return impl_->_device; } std::shared_ptr device::get_pipeline(common::bit_depth depth) { return impl_->_pipelines[depth == common::bit_depth::bit8 ? 0 : 1]; } std::shared_ptr device::create_attachment(int width, int height, common::bit_depth depth, uint32_t components_count) { return impl_->create_attachment(width, height, depth, components_count); } std::shared_ptr device::create_texture(int width, int height, int stride, common::bit_depth depth) { return impl_->create_texture(width, height, stride, depth, true); } array device::create_array(int size) { return impl_->create_array(size); } std::future> device::copy_async(const array& source, int width, int height, int stride, common::bit_depth depth) { return impl_->copy_async(source, width, height, stride, depth); } std::future> device::copy_async(const std::shared_ptr& source) { return impl_->copy_async(source); } void device::dispatch(std::function func) { boost::asio::dispatch(impl_->io_context_, std::move(func)); } std::wstring device::version() const { return impl_->version(); } boost::property_tree::wptree device::info() const { return impl_->info(); } std::future device::gc() { return impl_->gc(); } }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/device.h ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #pragma once #include #include #include #include #include #include #include namespace caspar { namespace accelerator { namespace vulkan { struct draw_params; class image_kernel; class device final : public std::enable_shared_from_this , public accelerator_device { public: device(); ~device(); device(const device&) = delete; device& operator=(const device&) = delete; std::shared_ptr get_pipeline(common::bit_depth depth); std::pair upload_vertex_buffer(const std::vector& coords); vk::PhysicalDeviceMemoryProperties getMemoryProperties(); std::vector allocateCommandBuffers(uint32_t count); void submit(const vk::SubmitInfo& submitInfo, vk::Fence fence); vk::Device getVkDevice() const; std::shared_ptr create_attachment(int width, int height, common::bit_depth depth, uint32_t components_count); std::shared_ptr create_texture(int width, int height, int stride, common::bit_depth depth); array create_array(int size); std::future> copy_async(const array& source, int width, int height, int stride, common::bit_depth depth); std::future> copy_async(const std::shared_ptr& source); template auto dispatch_async(Func&& func) { using result_type = decltype(func()); using task_type = std::packaged_task; auto task = std::make_shared(std::forward(func)); auto future = task->get_future(); dispatch([=] { (*task)(); }); return future; } template auto dispatch_sync(Func&& func) { return dispatch_async(std::forward(func)).get(); } std::wstring version() const; boost::property_tree::wptree info() const; std::future gc(); private: void dispatch(std::function func); struct impl; std::shared_ptr impl_; }; }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/draw_params.h ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #pragma once #include "transforms.h" #include #include #include #include #include namespace caspar { namespace accelerator { namespace vulkan { enum class keyer { linear = 0, additive, }; struct draw_params final { core::pixel_format_desc pix_desc = core::pixel_format_desc(core::pixel_format::invalid); std::vector> textures; draw_transforms transforms; core::frame_geometry geometry = core::frame_geometry::get_default(); core::blend_mode blend_mode = core::blend_mode::normal; vulkan::keyer keyer = vulkan::keyer::linear; std::shared_ptr background; std::shared_ptr local_key; std::shared_ptr layer_key; double aspect_ratio = 1.0; int target_width; int target_height; }; }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/matrix.cpp ================================================ #include #include #include #include #include #include #include #include #include #include #include "matrix.h" namespace caspar::accelerator::vulkan { t_matrix create_matrix(std::vector> data) { if (data.empty()) CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"data cannot be empty")); t_matrix matrix(data.size(), data.at(0).size()); for (int y = 0; y < matrix.size1(); ++y) { if (data.at(y).size() != matrix.size2()) CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"Each row must be of the same length")); for (int x = 0; x < matrix.size2(); ++x) matrix(x, y) = data.at(y).at(x); } return matrix; } t_matrix get_vertex_matrix(const core::image_transform& transform, double aspect_ratio) { using namespace boost::numeric::ublas; auto anchor_matrix = create_matrix({{1.0, 0.0, -transform.anchor[0]}, {0.0, 1.0, -transform.anchor[1]}, {0.0, 0.0, 1.0}}); auto scale_matrix = create_matrix({{transform.fill_scale[0], 0.0, 0.0}, {0.0, transform.fill_scale[1], 0.0}, {0.0, 0.0, 1.0}}); auto aspect_matrix = create_matrix({{1.0, 0.0, 0.0}, {0.0, 1.0 / aspect_ratio, 0.0}, {0.0, 0.0, 1.0}}); auto aspect_inv_matrix = create_matrix({{1.0, 0.0, 0.0}, {0.0, aspect_ratio, 0.0}, {0.0, 0.0, 1.0}}); auto rotation_matrix = create_matrix({{std::cos(transform.angle), -std::sin(transform.angle), 0.0}, {std::sin(transform.angle), std::cos(transform.angle), 0.0}, {0.0, 0.0, 1.0}}); auto translation_matrix = create_matrix( {{1.0, 0.0, transform.fill_translation[0]}, {0.0, 1.0, transform.fill_translation[1]}, {0.0, 0.0, 1.0}}); return anchor_matrix * aspect_matrix * scale_matrix * rotation_matrix * aspect_inv_matrix * translation_matrix; } } // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/matrix.h ================================================ #pragma once #include #include #include namespace caspar::accelerator::vulkan { typedef boost::numeric::ublas::matrix> t_matrix; typedef boost::numeric::ublas::vector> t_point; t_matrix get_vertex_matrix(const core::image_transform& transform, double aspect_ratio); } // namespace caspar::accelerator::vulkan namespace boost::numeric::ublas { template boost::numeric::ublas::matrix operator*(const boost::numeric::ublas::matrix& lhs, const boost::numeric::ublas::matrix& rhs) { return boost::numeric::ublas::matrix(boost::numeric::ublas::prod(lhs, rhs)); } template boost::numeric::ublas::vector operator*(const boost::numeric::ublas::vector& lhs, const boost::numeric::ublas::matrix& rhs) { return boost::numeric::ublas::vector(boost::numeric::ublas::prod(lhs, rhs)); } template bool operator==(const boost::numeric::ublas::matrix& lhs, const boost::numeric::ublas::matrix& rhs) { if (lhs.size1() != rhs.size1() || lhs.size2() != rhs.size2()) return false; for (int y = 0; y < lhs.size1(); ++y) for (int x = 0; x < lhs.size2(); ++x) if (lhs(y, x) != rhs(y, x)) return false; return true; } } // namespace boost::numeric::ublas ================================================ FILE: src/accelerator/vulkan/util/pipeline.cpp ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #include "pipeline.h" #include "../image/image_kernel.h" #include "texture.h" #include "vulkan_image_fragment.h" #include "vulkan_image_vertex.h" #include #include #include namespace caspar { namespace accelerator { namespace vulkan { std::vector create_shader_program(vk::Device device) { // Helper to create shader module auto createShaderModule = [&](const uint8_t* code, size_t size) { vk::ShaderModuleCreateInfo createInfo{}; createInfo.codeSize = size; createInfo.pCode = reinterpret_cast(code); return device.createShaderModule(createInfo); }; auto vertShaderModule = createShaderModule(vertex_shader, sizeof(vertex_shader) - 1); auto fragShaderModule = createShaderModule(fragment_shader, sizeof(fragment_shader) - 1); vk::PipelineShaderStageCreateInfo vertShaderStageInfo; vertShaderStageInfo.stage = vk::ShaderStageFlagBits::eVertex; vertShaderStageInfo.module = vertShaderModule; vertShaderStageInfo.pName = "main"; vk::PipelineShaderStageCreateInfo fragShaderStageInfo; fragShaderStageInfo.stage = vk::ShaderStageFlagBits::eFragment; fragShaderStageInfo.module = fragShaderModule; fragShaderStageInfo.pName = "main"; return {vertShaderStageInfo, fragShaderStageInfo}; } std::array get_attribute_descriptions(uint32_t binding) { std::array attributeDescriptions{ {{0, binding, vk::Format::eR32G32Sfloat, 0}, {1, binding, vk::Format::eR32G32B32A32Sfloat, 2 * sizeof(float)}}}; return attributeDescriptions; } const int DescriptorPoolSize = 64; const int BindlessTextureCount = 8; struct pipeline::impl { vk::Device device_; vk::Format format_; vk::Sampler textureSampler_; vk::Sampler keySampler_; vk::DescriptorSetLayout descriptorSetLayout_; vk::DescriptorPool descriptorPool_; std::vector descriptorSets_; vk::PipelineLayout pipelineLayout_; vk::Pipeline pipeline_; size_t currentDescriptorSet_ = 0; impl(const impl&) = delete; impl& operator=(const impl&) = delete; void setup_descriptors() { // Binding 0: bindless texture array for planes (up to 4), local_key, and layer_key vk::DescriptorSetLayoutBinding texturesLayoutBinding{}; texturesLayoutBinding.binding = 0; texturesLayoutBinding.descriptorType = vk::DescriptorType::eCombinedImageSampler; texturesLayoutBinding.descriptorCount = BindlessTextureCount; texturesLayoutBinding.stageFlags = vk::ShaderStageFlagBits::eFragment; // Binding 1: input attachment for background vk::DescriptorSetLayoutBinding backgroundLayoutBinding{}; backgroundLayoutBinding.binding = 1; backgroundLayoutBinding.descriptorType = vk::DescriptorType::eInputAttachment; backgroundLayoutBinding.descriptorCount = 1; backgroundLayoutBinding.stageFlags = vk::ShaderStageFlagBits::eFragment; vk::DescriptorSetLayoutCreateInfo layoutInfo{}; std::array bindings{texturesLayoutBinding, backgroundLayoutBinding}; layoutInfo.setBindings(bindings); std::array bindingFlags{vk::DescriptorBindingFlagBits::ePartiallyBound, vk::DescriptorBindingFlags{}}; vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsInfo; bindingFlagsInfo.setBindingFlags(bindingFlags); layoutInfo.pNext = &bindingFlagsInfo; descriptorSetLayout_ = device_.createDescriptorSetLayout(layoutInfo); // Create descriptor pool vk::DescriptorPoolSize samplerPoolSize(vk::DescriptorType::eCombinedImageSampler, BindlessTextureCount * DescriptorPoolSize); vk::DescriptorPoolSize inputAttachmentPoolSize(vk::DescriptorType::eInputAttachment, 1 * DescriptorPoolSize); std::array poolSizes{samplerPoolSize, inputAttachmentPoolSize}; vk::DescriptorPoolCreateInfo poolInfo{}; poolInfo.maxSets = DescriptorPoolSize; poolInfo.setPoolSizes(poolSizes); descriptorPool_ = device_.createDescriptorPool(poolInfo); // Allocate descriptor sets std::vector layouts(DescriptorPoolSize, descriptorSetLayout_); vk::DescriptorSetAllocateInfo allocInfo; allocInfo.descriptorPool = descriptorPool_; allocInfo.setSetLayouts(layouts); descriptorSets_ = device_.allocateDescriptorSets(allocInfo); } void setup_sampler() { vk::SamplerCreateInfo samplerInfo{}; samplerInfo.magFilter = vk::Filter::eLinear; samplerInfo.minFilter = vk::Filter::eLinear; samplerInfo.mipmapMode = vk::SamplerMipmapMode::eLinear; samplerInfo.addressModeU = vk::SamplerAddressMode::eRepeat; samplerInfo.addressModeV = vk::SamplerAddressMode::eRepeat; samplerInfo.addressModeW = vk::SamplerAddressMode::eRepeat; samplerInfo.mipLodBias = 0.0f; samplerInfo.anisotropyEnable = VK_FALSE; samplerInfo.maxAnisotropy = 2; samplerInfo.compareEnable = VK_FALSE; samplerInfo.compareOp = vk::CompareOp::eAlways; samplerInfo.minLod = 0.0f; samplerInfo.maxLod = 0.0f; samplerInfo.borderColor = vk::BorderColor::eIntOpaqueBlack; samplerInfo.unnormalizedCoordinates = VK_FALSE; textureSampler_ = device_.createSampler(samplerInfo); samplerInfo.magFilter = vk::Filter::eNearest; samplerInfo.minFilter = vk::Filter::eNearest; samplerInfo.mipmapMode = vk::SamplerMipmapMode::eNearest; keySampler_ = device_.createSampler(samplerInfo); } public: impl(vk::Device device, vk::Format format) : device_(device) , format_(format) { setup_descriptors(); setup_sampler(); // Vertex input auto attributeDescriptions = get_attribute_descriptions(0); auto vertexBindings = vk::VertexInputBindingDescription(0, sizeof(float) * 6, vk::VertexInputRate::eVertex); vk::PipelineVertexInputStateCreateInfo vertexInputInfo; vertexInputInfo.setVertexBindingDescriptions(vertexBindings); vertexInputInfo.setVertexAttributeDescriptions(attributeDescriptions); // Input assembly vk::PipelineInputAssemblyStateCreateInfo inputAssembly{}; inputAssembly.topology = vk::PrimitiveTopology::eTriangleFan; inputAssembly.primitiveRestartEnable = VK_TRUE; vk::PipelineViewportStateCreateInfo viewportState{}; viewportState.scissorCount = 1; viewportState.viewportCount = 1; vk::DynamicState dynamicStates[]{vk::DynamicState::eViewport, vk::DynamicState::eScissor}; // Rasterizer vk::PipelineRasterizationStateCreateInfo rasterizer{}; rasterizer.depthClampEnable = VK_FALSE; rasterizer.rasterizerDiscardEnable = VK_FALSE; rasterizer.polygonMode = vk::PolygonMode::eFill; rasterizer.cullMode = vk::CullModeFlagBits::eNone; rasterizer.frontFace = vk::FrontFace::eClockwise; rasterizer.depthBiasEnable = VK_FALSE; rasterizer.lineWidth = 1.0f; // Multisampling vk::PipelineMultisampleStateCreateInfo multisampling{}; multisampling.rasterizationSamples = vk::SampleCountFlagBits::e1; multisampling.sampleShadingEnable = VK_FALSE; // Color blending vk::PipelineColorBlendAttachmentState colorBlendAttachment{}; colorBlendAttachment.blendEnable = vk::False; colorBlendAttachment.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA; vk::PipelineColorBlendStateCreateInfo colorBlending{}; colorBlending.logicOpEnable = vk::False; colorBlending.logicOp = vk::LogicOp::eCopy; colorBlending.setAttachments(colorBlendAttachment); vk::PushConstantRange range{}; range.stageFlags = vk::ShaderStageFlagBits::eFragment; range.offset = 0; range.size = sizeof(uniform_block); // Pipeline layout vk::PipelineLayoutCreateInfo pipelineLayoutInfo{}; pipelineLayoutInfo.setSetLayouts(descriptorSetLayout_); pipelineLayoutInfo.setPushConstantRanges(range); pipelineLayout_ = device_.createPipelineLayout(pipelineLayoutInfo); vk::PipelineDynamicStateCreateInfo dynamicState{}; dynamicState.setDynamicStates(dynamicStates); // Graphics pipeline vk::GraphicsPipelineCreateInfo pipelineInfo{}; pipelineInfo.pVertexInputState = &vertexInputInfo; pipelineInfo.pInputAssemblyState = &inputAssembly; pipelineInfo.pViewportState = &viewportState; pipelineInfo.pRasterizationState = &rasterizer; pipelineInfo.pDynamicState = &dynamicState; pipelineInfo.pMultisampleState = &multisampling; pipelineInfo.pColorBlendState = &colorBlending; pipelineInfo.layout = pipelineLayout_; pipelineInfo.renderPass = nullptr; pipelineInfo.subpass = 0; auto shaderStages = std::move(create_shader_program(device_)); pipelineInfo.setStages(shaderStages); vk::PipelineRenderingCreateInfo rendering_info{}; rendering_info.setColorAttachmentFormats({format}); pipelineInfo.pNext = &rendering_info; pipeline_ = device_.createGraphicsPipeline(nullptr, pipelineInfo).value; // Cleanup shader modules after pipeline creation for (auto& shaderStage : shaderStages) { device_.destroyShaderModule(shaderStage.module); } } vk::DescriptorSet acquire_descriptor_set(const std::array& textures) { // C++ textures array layout: // [0] = background attachment, [1..4] = planes, [5] = local_key, [6] = layer_key // Shader bindless textures[N] layout: // [0..3] = planes, [4] = local_key, [5] = layer_key auto descriptorSet = descriptorSets_[currentDescriptorSet_]; currentDescriptorSet_ = (currentDescriptorSet_ + 1) % DescriptorPoolSize; // Bind planes, local_key, and layer_key to the bindless texture array std::array textureInfos; for (int i = 0; i < 6; ++i) { textureInfos[i].sampler = textureSampler_; textureInfos[i].imageView = textures[i + 1]; textureInfos[i].imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal; } // Override samplers for local_key and layer_key to use nearest filtering textureInfos[4].sampler = keySampler_; textureInfos[5].sampler = keySampler_; vk::WriteDescriptorSet texturesWrite{}; texturesWrite.dstSet = descriptorSet; texturesWrite.dstBinding = 0; texturesWrite.dstArrayElement = 0; texturesWrite.descriptorType = vk::DescriptorType::eCombinedImageSampler; texturesWrite.setImageInfo(textureInfos); texturesWrite.descriptorCount = 6; // Bind background attachment as input attachment vk::DescriptorImageInfo backgroundInfo{}; backgroundInfo.imageLayout = vk::ImageLayout::eRenderingLocalRead; backgroundInfo.imageView = textures[0]; vk::WriteDescriptorSet backgroundWrite{}; backgroundWrite.dstSet = descriptorSet; backgroundWrite.dstBinding = 1; backgroundWrite.dstArrayElement = 0; backgroundWrite.descriptorType = vk::DescriptorType::eInputAttachment; backgroundWrite.setImageInfo(backgroundInfo); vk::WriteDescriptorSet descriptorWrites[]{ backgroundWrite, texturesWrite }; device_.updateDescriptorSets(descriptorWrites, nullptr); return descriptorSet; } void draw(vk::CommandBuffer commandBuffer, vk::Buffer vertexBuffer, uint32_t coords_count, uint32_t vertex_buffer_offset, const uniform_block& params, const std::array& textures) { auto descriptorSet = acquire_descriptor_set(textures); commandBuffer.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline_); commandBuffer.bindVertexBuffers(0, vertexBuffer, {vertex_buffer_offset}); commandBuffer.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipelineLayout_, 0, descriptorSet, nullptr); commandBuffer.pushConstants( pipelineLayout_, vk::ShaderStageFlagBits::eFragment, 0, sizeof(uniform_block), ¶ms); commandBuffer.draw(coords_count, 1, 0, 0); } ~impl() { device_.destroyDescriptorPool(descriptorPool_); device_.destroyDescriptorSetLayout(descriptorSetLayout_); device_.destroySampler(textureSampler_); device_.destroySampler(keySampler_); device_.destroyPipeline(pipeline_); device_.destroyPipelineLayout(pipelineLayout_); } }; pipeline::pipeline(vk::Device device, vk::Format format) : impl_(new impl(device, format)) { } pipeline::~pipeline() {} void pipeline::draw(vk::CommandBuffer commandBuffer, vk::Buffer vertexBuffer, uint32_t coords_count, uint32_t vertex_buffer_offset, const uniform_block& params, const std::array& textures) { impl_->draw(commandBuffer, vertexBuffer, coords_count, vertex_buffer_offset, params, textures); } vk::Pipeline pipeline::id() const { return impl_->pipeline_; } }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/pipeline.h ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #pragma once #include "uniform_block.h" #include namespace caspar { namespace accelerator { namespace vulkan { enum class shader_flags : uint32_t { none = 0, is_straight_alpha = 1 << 0, has_local_key = 1 << 1, has_layer_key = 1 << 2, invert = 1 << 3, levels = 1 << 4, csb = 1 << 5, chroma = 1 << 6, chroma_show_mask = 1 << 7 }; class pipeline final { pipeline(const pipeline&); pipeline& operator=(const pipeline&); public: pipeline(vk::Device device, vk::Format format); ~pipeline(); void draw(vk::CommandBuffer commandBuffer, vk::Buffer vertexBuffer, uint32_t coords_count, uint32_t vertex_buffer_offset, const uniform_block& params, const std::array& textures); vk::Pipeline id() const; private: struct impl; std::unique_ptr impl_; }; }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/renderpass.cpp ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #include "renderpass.h" #include "../image/image_kernel.h" #include "device.h" #include "pipeline.h" #include "texture.h" namespace caspar { namespace accelerator { namespace vulkan { vk::Buffer renderpass::upload_vertex_buffers() { uint32_t total_coords = 0; for (auto& layer : layers_) { layer.vertex_buffer_offset = total_coords * 6 * sizeof(float); total_coords += static_cast(layer.coords.size()); } std::vector fl(total_coords * 6); size_t idx = 0; for (auto& layer : layers_) { for (auto& c : layer.coords) { fl[idx * 6 + 0] = static_cast(c.vertex_x); fl[idx * 6 + 1] = static_cast(c.vertex_y); fl[idx * 6 + 2] = static_cast(c.texture_x); fl[idx * 6 + 3] = static_cast(c.texture_y); fl[idx * 6 + 4] = static_cast(c.texture_r); fl[idx * 6 + 5] = static_cast(c.texture_q); ++idx; } } return _ctx->upload_vertex_data(fl); } renderpass::renderpass(frame_context* ctx, uint32_t width, uint32_t height) : _ctx(ctx) , _pipeline(ctx->get_pipeline()) , _width(width) , _height(height) , _default_attachment(ctx->create_attachment(width, height, 4)) { } renderpass::~renderpass() {} std::shared_ptr renderpass::create_attachment(uint32_t components_count) { return _ctx->create_attachment(_width, _height, components_count); } void renderpass::draw(const draw_params& params) { auto attachment = params.background; auto [coords, uniforms] = _ctx->create_draw_data(params); if (coords.empty()) { return; } std::array textures = {attachment->view(), nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}; for (int n = 0; n < params.textures.size(); ++n) { textures[1+n] = params.textures[n]->view(); } if (params.local_key) { textures[5] = params.local_key->view(); } if (params.layer_key) { textures[6] = params.layer_key->view(); } layers_.push_back({ attachment, params.local_key, params.layer_key, std::move(textures), std::move(coords), uniforms, }); } void renderpass::commit() { auto vertex_buffer = upload_vertex_buffers(); auto cmd_buffer = _ctx->get_command_buffer(); cmd_buffer.begin(vk::CommandBufferBeginInfo(vk::CommandBufferUsageFlagBits::eOneTimeSubmit)); vk::ClearValue clearColor{vk::ClearColorValue(std::array{0.0f, 0.0f, 0.0f, 0.0f})}; // Viewport and scissor vk::Viewport viewport{0.0f, 0.0f, static_cast(_width), static_cast(_height), 0.0f, 1.0f}; vk::Extent2D extent = {_width, _height}; vk::Rect2D scissor{{0, 0}, extent}; if (layers_.empty()) { // No layers, just clear the default attachment vk::RenderingAttachmentInfo attachment_info{}; attachment_info.imageView = _default_attachment->view(); attachment_info.imageLayout = vk::ImageLayout::eRenderingLocalRead; attachment_info.loadOp = vk::AttachmentLoadOp::eClear; attachment_info.storeOp = vk::AttachmentStoreOp::eStore; vk::RenderingInfo rendering_info{}; rendering_info.renderArea = scissor; rendering_info.layerCount = 1; rendering_info.setColorAttachments(attachment_info); cmd_buffer.beginRendering(rendering_info); cmd_buffer.setViewport(0, viewport); cmd_buffer.setScissor(0, scissor); } else { // create a renderpass for each layer bool default_cleared = false; std::shared_ptr previous_attachment; for (auto& layer : layers_) { if (layer.attachment != previous_attachment) { // We need to start a new render pass if (previous_attachment) { // If this is not the first pass, end the previous render pass cmd_buffer.endRendering(); if (previous_attachment != _default_attachment) { // If we're done with a non-default attachment, we need to transition it to a shader read layout vk::ImageMemoryBarrier2 memoryBarrier{}; auto range = vk::ImageSubresourceRange(vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1); memoryBarrier.subresourceRange = range; memoryBarrier.srcStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput; memoryBarrier.srcAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite; memoryBarrier.dstStageMask = vk::PipelineStageFlagBits2::eFragmentShader; memoryBarrier.dstAccessMask = vk::AccessFlagBits2::eInputAttachmentRead; memoryBarrier.oldLayout = vk::ImageLayout::eRenderingLocalRead; memoryBarrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal; memoryBarrier.image = previous_attachment->id(); vk::DependencyInfo dependencyInfo{}; dependencyInfo.setImageMemoryBarriers(memoryBarrier); cmd_buffer.pipelineBarrier2(dependencyInfo); } } // We only want to clear the default attachment once bool do_clear = (layer.attachment != _default_attachment) || !default_cleared; vk::RenderingAttachmentInfo attachment_info{}; attachment_info.imageView = layer.attachment->view(); attachment_info.imageLayout = vk::ImageLayout::eRenderingLocalRead; attachment_info.loadOp = do_clear ? vk::AttachmentLoadOp::eClear : vk::AttachmentLoadOp::eLoad; attachment_info.storeOp = vk::AttachmentStoreOp::eStore; if (layer.attachment == _default_attachment) { default_cleared = true; } previous_attachment = layer.attachment; vk::RenderingInfo rendering_info{}; rendering_info.renderArea = scissor; rendering_info.layerCount = 1; rendering_info.setColorAttachments(attachment_info); cmd_buffer.beginRendering(rendering_info); cmd_buffer.setViewport(0, viewport); cmd_buffer.setScissor(0, scissor); } else { // We are continuing in the same render pass, so we need a barrier to ensure the attachment is ready vk::MemoryBarrier2 memoryBarrier{}; memoryBarrier.srcStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput; memoryBarrier.srcAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite; memoryBarrier.dstStageMask = vk::PipelineStageFlagBits2::eFragmentShader; memoryBarrier.dstAccessMask = vk::AccessFlagBits2::eInputAttachmentRead; vk::DependencyInfo dependencyInfo{}; dependencyInfo.dependencyFlags = vk::DependencyFlagBits::eByRegion; dependencyInfo.memoryBarrierCount = 1; dependencyInfo.pMemoryBarriers = &memoryBarrier; cmd_buffer.pipelineBarrier2(dependencyInfo); } _pipeline->draw(cmd_buffer, vertex_buffer, static_cast(layer.coords.size()), layer.vertex_buffer_offset, layer.uniforms, layer.textures); } } cmd_buffer.endRendering(); cmd_buffer.end(); _ctx->submit(); } }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/renderpass.h ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #pragma once #include #include #include #include #include #include #include "draw_params.h" #include "uniform_block.h" namespace caspar { namespace accelerator { namespace vulkan { using draw_data = std::pair, uniform_block>; struct frame_context { virtual vk::Buffer upload_vertex_data(const std::vector& data) = 0; virtual draw_data create_draw_data(const draw_params& params) = 0; virtual std::shared_ptr get_pipeline() = 0; virtual vk::CommandBuffer get_command_buffer() = 0; virtual void submit() = 0; virtual std::shared_ptr create_attachment(uint32_t width, uint32_t height, uint32_t components_count) = 0; }; class renderpass { frame_context* _ctx; std::shared_ptr _pipeline; uint32_t _width; uint32_t _height; std::shared_ptr _default_attachment; struct layer_info { std::shared_ptr attachment; std::shared_ptr local_key_attachment; std::shared_ptr layer_key_attachment; std::array textures; std::vector coords; uniform_block uniforms; uint32_t vertex_buffer_offset = 0; }; std::vector layers_; public: renderpass(frame_context* ctx, uint32_t width, uint32_t height); renderpass() = delete; renderpass(const renderpass&) = delete; renderpass& operator=(const renderpass&) = delete; ~renderpass(); std::shared_ptr create_attachment(uint32_t components_count = 4); void draw(const draw_params& params); virtual void commit(); std::shared_ptr default_attachment() const { return _default_attachment; } private: vk::Buffer upload_vertex_buffers(); }; }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/texture.cpp ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #include "texture.h" #include "buffer.h" #include #include namespace caspar { namespace accelerator { namespace vulkan { struct texture::impl { vk::Image image_; vk::DeviceMemory memory_; vk::ImageView imageView_; vk::Device device_; int width_ = 0; int height_ = 0; int stride_ = 0; int size_ = 0; common::bit_depth depth_; impl(const impl&) = delete; impl& operator=(const impl&) = delete; public: impl(int width, int height, int stride, common::bit_depth depth, vk::Image image, vk::DeviceMemory memory, vk::ImageView imageView, vk::Device device) : image_(image) , memory_(memory) , imageView_(imageView) , device_(device) , width_(width) , height_(height) , stride_(stride) , depth_(depth) , size_(width * height * stride * (depth == common::bit_depth::bit8 ? 1 : 2)) { } ~impl() { device_.destroyImageView(imageView_); device_.freeMemory(memory_); device_.destroyImage(image_); } }; texture::texture(int width, int height, int stride, common::bit_depth depth, vk::Image image, vk::DeviceMemory memory, vk::ImageView imageView, vk::Device device) : impl_(new impl(width, height, stride, depth, image, memory, imageView, device)) { } texture::texture(texture&& other) : impl_(std::move(other.impl_)) { } texture::~texture() {} texture& texture::operator=(texture&& other) { impl_ = std::move(other.impl_); return *this; } vk::ImageView texture::view() const { return impl_->imageView_; } int texture::width() const { return impl_->width_; } int texture::height() const { return impl_->height_; } int texture::stride() const { return impl_->stride_; } common::bit_depth texture::depth() const { return impl_->depth_; } void texture::set_depth(common::bit_depth depth) { impl_->depth_ = depth; } int texture::size() const { return impl_->size_; } VkImage texture::id() const { return impl_->image_; } }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/texture.h ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #pragma once #include #include #include #include namespace caspar { namespace accelerator { namespace vulkan { class texture final { public: texture(int width, int height, int stride, common::bit_depth depth, vk::Image image, vk::DeviceMemory memory, vk::ImageView imageView, vk::Device device); texture(const texture&) = delete; texture(texture&& other); ~texture(); texture& operator=(const texture&) = delete; texture& operator=(texture&& other); vk::ImageView view() const; int width() const; int height() const; int stride() const; common::bit_depth depth() const; void set_depth(common::bit_depth depth); int size() const; VkImage id() const; private: struct impl; std::unique_ptr impl_; }; }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/transforms.cpp ================================================ #include "transforms.h" #include #include namespace caspar::accelerator::vulkan { draw_crop_region::draw_crop_region(double left, double top, double right, double bottom) { // upper left coords[0] = t_point(3); coords[0](0) = left; coords[0](1) = top; coords[0](2) = 1; // upper right coords[1] = t_point(3); coords[1](0) = right; coords[1](1) = top; coords[1](2) = 1; // lower right coords[2] = t_point(3); coords[2](0) = right; coords[2](1) = bottom; coords[2](2) = 1; // lower left coords[3] = t_point(3); coords[3](0) = left; coords[3](1) = bottom; coords[3](2) = 1; } void draw_crop_region::apply_transform(const caspar::accelerator::vulkan::t_matrix& matrix) { coords[0] = coords[0] * matrix; coords[1] = coords[1] * matrix; coords[2] = coords[2] * matrix; coords[3] = coords[3] * matrix; } void apply_transform_colour_values(core::image_transform& self, const core::image_transform& other) { // Note: this intentionally does not affect any geometry-related fields, they follow a separate flow self.opacity *= other.opacity; self.brightness *= other.brightness; self.contrast *= other.contrast; self.saturation *= other.saturation; self.levels.min_input = std::max(self.levels.min_input, other.levels.min_input); self.levels.max_input = std::min(self.levels.max_input, other.levels.max_input); self.levels.min_output = std::max(self.levels.min_output, other.levels.min_output); self.levels.max_output = std::min(self.levels.max_output, other.levels.max_output); self.levels.gamma *= other.levels.gamma; self.chroma.enable |= other.chroma.enable; self.chroma.show_mask |= other.chroma.show_mask; self.chroma.target_hue = std::max(other.chroma.target_hue, self.chroma.target_hue); self.chroma.min_saturation = std::max(other.chroma.min_saturation, self.chroma.min_saturation); self.chroma.min_brightness = std::max(other.chroma.min_brightness, self.chroma.min_brightness); self.chroma.hue_width = std::max(other.chroma.hue_width, self.chroma.hue_width); self.chroma.softness = std::max(other.chroma.softness, self.chroma.softness); self.chroma.spill_suppress = std::max(other.chroma.spill_suppress, self.chroma.spill_suppress); self.chroma.spill_suppress_saturation = std::min(other.chroma.spill_suppress_saturation, self.chroma.spill_suppress_saturation); self.is_key |= other.is_key; self.invert |= other.invert; self.is_mix |= other.is_mix; self.blend_mode = std::max(self.blend_mode, other.blend_mode); self.layer_depth += other.layer_depth; } bool is_default_perspective(const core::corners& perspective) { return perspective.ul[0] == 0 && perspective.ul[1] == 0 && perspective.ur[0] == 1 && perspective.ur[1] == 0 && perspective.ll[0] == 0 && perspective.ll[1] == 1 && perspective.lr[0] == 1 && perspective.lr[1] == 1; } draw_transforms draw_transforms::combine_transform(const core::image_transform& transform, double aspect_ratio) const { draw_transforms new_transform(image_transform, steps); auto transform_before = new_transform.current().vertex_matrix; // Get matrix for turning coords in 'transform' into the parent frame. auto new_matrix = get_vertex_matrix(transform, aspect_ratio); apply_transform_colour_values(new_transform.image_transform, transform); new_transform.current().vertex_matrix = new_matrix * new_transform.current().vertex_matrix; // Only enable this for some transforms, to avoid applying crops when a draw_frame is just being used to flatten // other draw_frames if (transform.enable_geometry_modifiers) { // Push the new clip before the new transform applied draw_crop_region new_clip(transform.clip_translation[0], transform.clip_translation[1], transform.clip_translation[0] + transform.clip_scale[0], transform.clip_translation[1] + transform.clip_scale[1]); new_clip.apply_transform(transform_before); new_transform.current().crop_regions.push_back(std::move(new_clip)); if (!is_default_perspective(transform.perspective)) { // Split into a new step new_transform.steps.emplace_back(transform.perspective, boost::numeric::ublas::identity_matrix(3, 3)); } // Push the new crop region with the new transform applied draw_crop_region new_crop( transform.crop.ul[0], transform.crop.ul[1], transform.crop.lr[0], transform.crop.lr[1]); new_crop.apply_transform(new_transform.current().vertex_matrix); new_transform.current().crop_regions.push_back(std::move(new_crop)); } return std::move(new_transform); } void apply_perspective_to_vertex(t_point& vertex, const core::corners& perspective) { const double x = vertex(0); const double y = vertex(1); // ul: x' = (1-y) * a + (1 - a * (1-y)) * x vertex(0) += (1 - y) * perspective.ul[0] + (1 - perspective.ul[0] + perspective.ul[0] * y) * x - x; vertex(1) += (1 - x) * perspective.ul[1] + (1 - perspective.ul[1] + perspective.ul[1] * x) * y - y; // ur/ll: x' = x * (a * (1-y) + y) vertex(0) += x * (perspective.ur[0] * (1 - y) + y) - x; vertex(1) += y * (perspective.ll[1] * (1 - x) + x) - y; // ur/ll: x' = y * a + x * (1 - a * y) vertex(0) += y * perspective.ll[0] + x * (1 - perspective.ll[0] * y) - x; vertex(1) += x * perspective.ur[1] + y * (1 - perspective.ur[1] * x) - y; // lr: x' = x * (y * a + (1-y)) vertex(0) += x * (y * perspective.lr[0] + (1 - y)) - x; vertex(1) += y * (x * perspective.lr[1] + (1 - x)) - y; } struct wrapped_vertex { explicit wrapped_vertex(const core::frame_geometry::coord& coord) { vertex(0) = coord.vertex_x; vertex(1) = coord.vertex_y; vertex(2) = 1; texture_x = coord.texture_x; texture_y = coord.texture_y; texture_r = coord.texture_r; texture_q = coord.texture_q; } explicit wrapped_vertex() { vertex(2) = 1; }; [[nodiscard]] core::frame_geometry::coord as_geometry() const { core::frame_geometry::coord res = {vertex(0), vertex(1), texture_x, texture_y}; res.texture_r = texture_r; res.texture_q = texture_q; return res; } t_point vertex = t_point(3); double texture_x = 0.0; double texture_y = 0.0; double texture_r = 0.0; double texture_q = 1.0; }; static const double epsilon = 0.001; bool inline point_is_to_left_of_line(const t_point& line_1, const t_point& line_2, const t_point& vertex) { // use a cross product to check if the point is on the right side of the line return (line_2(0) - line_1(0)) * (vertex(1) - line_1(1)) - (line_2(1) - line_1(1)) * (vertex(0) - line_1(0)) < -epsilon; } // http://stackoverflow.com/questions/563198/how-do-you-detect-where-two-line-segments-intersect bool get_intersection_with_crop_line(const t_point& crop0, const t_point& crop1, const t_point& p0, const t_point& p1, t_point& result) { double s1_x = crop1(0) - crop0(0); double s1_y = crop1(1) - crop0(1); double s2_x = p1(0) - p0(0); double s2_y = p1(1) - p0(1); double s = (-s1_y * (crop0(0) - p0(0)) + s1_x * (crop0(1) - p0(1))) / (-s2_x * s1_y + s1_x * s2_y); double t = (s2_x * (crop0(1) - p0(1)) - s2_y * (crop0(0) - p0(0))) / (-s2_x * s1_y + s1_x * s2_y); if (s >= 0 && s <= 1) { // Collision detected result(0) = crop0(0) + t * s1_x; result(1) = crop0(1) + t * s1_y; return true; } return false; // No collision } double hypotenuse(double x1, double y1, double x2, double y2) { auto x = x2 - x1; auto y = y2 - y1; return std::sqrt(x * x + y * y); } double calc_q(double close_diagonal, double distant_diagonal) { return (close_diagonal + distant_diagonal) / distant_diagonal; } void crop_texture_for_vertex(const wrapped_vertex& line_a, const wrapped_vertex& line_b, wrapped_vertex& vertex) { auto delta_point = vertex.vertex - line_a.vertex; auto delta_line = line_b.vertex - line_a.vertex; // Calculate the dot product auto dot_product = delta_point(0) * delta_line(0) + delta_point(1) * delta_line(1); auto line_len_squared = delta_line(0) * delta_line(0) + delta_line(1) * delta_line(1); // Skip if line has no length if (line_len_squared == 0) { vertex.texture_x = line_a.texture_x; vertex.texture_y = line_a.texture_y; return; } auto dist_delta = dot_product / line_len_squared; vertex.texture_x = line_a.texture_x + dist_delta * (line_b.texture_x - line_a.texture_x); vertex.texture_y = line_a.texture_y + dist_delta * (line_b.texture_y - line_a.texture_y); vertex.texture_q = line_a.texture_q + dist_delta * (line_b.texture_q - line_a.texture_q); } void fill_texture_q_for_quad(std::vector& coords) { if (coords.size() != 4) return; // Based on formula from: // http://www.reedbeta.com/blog/2012/05/26/quadrilateral-interpolation-part-1/ double s1_x = coords[2].vertex(0) - coords[0].vertex(0); double s1_y = coords[2].vertex(1) - coords[0].vertex(1); double s2_x = coords[3].vertex(0) - coords[1].vertex(0); double s2_y = coords[3].vertex(1) - coords[1].vertex(1); double s = (-s1_y * (coords[0].vertex(0) - coords[1].vertex(0)) + s1_x * (coords[0].vertex(1) - coords[1].vertex(1))) / (-s2_x * s1_y + s1_x * s2_y); double t = (s2_x * (coords[0].vertex(1) - coords[1].vertex(1)) - s2_y * (coords[0].vertex(0) - coords[1].vertex(0))) / (-s2_x * s1_y + s1_x * s2_y); if (s >= 0 && s <= 1 && t >= 0 && t <= 1) { // Collision detected double diagonal_intersection_x = coords[0].vertex(0) + t * s1_x; double diagonal_intersection_y = coords[0].vertex(1) + t * s1_y; auto d0 = hypotenuse(coords[3].vertex(0), coords[3].vertex(1), diagonal_intersection_x, diagonal_intersection_y); auto d1 = hypotenuse(coords[2].vertex(0), coords[2].vertex(1), diagonal_intersection_x, diagonal_intersection_y); auto d2 = hypotenuse(coords[1].vertex(0), coords[1].vertex(1), diagonal_intersection_x, diagonal_intersection_y); auto d3 = hypotenuse(coords[0].vertex(0), coords[0].vertex(1), diagonal_intersection_x, diagonal_intersection_y); auto ulq = calc_q(d3, d1); auto urq = calc_q(d2, d0); auto lrq = calc_q(d1, d3); auto llq = calc_q(d0, d2); std::vector q_values = {ulq, urq, lrq, llq}; int corner = 0; for (auto& coord : coords) { coord.texture_q = q_values[corner]; coord.texture_x *= q_values[corner]; coord.texture_y *= q_values[corner]; if (++corner == 4) corner = 0; } } } void transform_vertex(const draw_transform_step& step, t_point& vertex) { // Apply basic transforms of this step vertex = vertex * step.vertex_matrix; // Apply perspective. These rely on x and y of the coord, so can't be done as a shared matrix apply_perspective_to_vertex(vertex, step.perspective); } std::vector draw_transforms::transform_coords(const std::vector& coords) const { // Convert to matrix representations std::vector cropped_coords; cropped_coords.reserve(coords.size()); for (const auto& coord : coords) { cropped_coords.emplace_back(coord); } std::vector transformed_regions; // Apply the transforms for (int i = (int)steps.size() - 1; i >= 0; i--) { for (auto& coord : cropped_coords) { transform_vertex(steps[i], coord.vertex); } // Transform existing regions for (auto& region : transformed_regions) { for (int l = 0; l < 4; ++l) { transform_vertex(steps[i], region.coords[l]); } } // Push new regions for (auto& region : steps[i].crop_regions) { draw_crop_region new_region = region; for (int l = 0; l < 4; ++l) { // Only apply perspective for new ones apply_perspective_to_vertex(new_region.coords[l], steps[i].perspective); } transformed_regions.push_back(new_region); } } // Apply the perspective correction fill_texture_q_for_quad(cropped_coords); // Perform the crop for (auto& crop_region : transformed_regions) { for (int l = 0; l < 4; ++l) { // Apply the crop, one edge at a time int to_index = l == 3 ? 0 : l + 1; t_point from_point = crop_region.coords[l]; t_point to_point = crop_region.coords[to_index]; std::unordered_set points_to_left_of_line; // Figure out which points are 'left' of the line (outside the crop region) for (size_t j = 0; j < cropped_coords.size(); ++j) { bool v = point_is_to_left_of_line(from_point, to_point, cropped_coords[j].vertex); if (v) points_to_left_of_line.insert(j); } if (points_to_left_of_line.empty()) { // Line has no effect, skip continue; } else if (points_to_left_of_line.size() == cropped_coords.size()) { // All are to the left, shape has no geometry return {}; } std::vector new_coords; new_coords.reserve(cropped_coords.size() * 2); // Avoid reallocs for complex shapes // Iterate through the coords for (size_t j = 0; j < cropped_coords.size(); ++j) { if (points_to_left_of_line.count(j) == 0) { new_coords.push_back(cropped_coords[j]); continue; } size_t prev_index = j == 0 ? cropped_coords.size() - 1 : j - 1; size_t next_index = j == cropped_coords.size() - 1 ? 0 : j + 1; bool prev_is_left_of_line = points_to_left_of_line.count(prev_index) == 1; bool next_is_left_of_line = points_to_left_of_line.count(next_index) == 1; if (prev_is_left_of_line && next_is_left_of_line) { // Vertex and its edges are completely left of the line, skip continue; } if (!prev_is_left_of_line) { // This edge intersects the crop line, calculate the new coordinates wrapped_vertex new_coord; if (get_intersection_with_crop_line(to_point, from_point, cropped_coords[prev_index].vertex, cropped_coords[j].vertex, new_coord.vertex)) { crop_texture_for_vertex(cropped_coords[prev_index], cropped_coords[j], new_coord); new_coords.emplace_back(std::move(new_coord)); } else { // Geometry error! skip coordinate } } if (!next_is_left_of_line) { // This edge intersects the crop line, calculate the new coordinates wrapped_vertex new_coord; if (get_intersection_with_crop_line(to_point, from_point, cropped_coords[j].vertex, cropped_coords[next_index].vertex, new_coord.vertex)) { crop_texture_for_vertex(cropped_coords[j], cropped_coords[next_index], new_coord); new_coords.emplace_back(std::move(new_coord)); } else { // Geometry error! skip coordinate } } } // Polygon is cropped, update state cropped_coords = new_coords; } { static const double pixel_epsilon = 0.0001; // less than a pixel at 8k // Prune duplicate coords std::vector new_coords; new_coords.reserve(cropped_coords.size()); // Avoid reallocs for (size_t j = 0; j < cropped_coords.size(); ++j) { size_t prev_index = j == 0 ? cropped_coords.size() - 1 : j - 1; auto delta = cropped_coords[j].vertex - cropped_coords[prev_index].vertex; if (std::abs(delta(0)) > pixel_epsilon || std::abs(delta(1)) > pixel_epsilon) { new_coords.emplace_back(cropped_coords[j]); } } if (new_coords.size() < 3) { // Not enough coords to draw anything return {}; } cropped_coords = new_coords; } } // Convert back to frame_geometry types std::vector result; result.reserve(cropped_coords.size()); for (auto& coord : cropped_coords) { result.push_back(coord.as_geometry()); } return result; } } // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/transforms.h ================================================ #pragma once #include #include #include #include #include #include #include "matrix.h" namespace caspar::accelerator::vulkan { struct draw_crop_region { explicit draw_crop_region(double left, double top, double right, double bottom); void apply_transform(const t_matrix& matrix); std::array coords; }; struct draw_transform_step { draw_transform_step() : vertex_matrix(boost::numeric::ublas::identity_matrix(3, 3)) { } draw_transform_step(const core::corners& perspective, const t_matrix& vertex_matrix) : perspective(perspective) , vertex_matrix(vertex_matrix) { } core::corners perspective; std::vector crop_regions; t_matrix vertex_matrix; }; struct draw_transforms { std::vector steps; draw_transforms() : image_transform(core::image_transform()) , steps({draw_transform_step()}) { } explicit draw_transforms(core::image_transform transform, std::vector steps) : image_transform(transform) , steps(std::move(steps)) { } core::image_transform image_transform; draw_transform_step& current() { return steps.back(); } [[nodiscard]] draw_transforms combine_transform(const core::image_transform& transform, double aspect_ratio) const; [[nodiscard]] std::vector transform_coords(const std::vector& coords) const; }; } // namespace caspar::accelerator::vulkan ================================================ FILE: src/accelerator/vulkan/util/uniform_block.h ================================================ /* * Copyright 2025 * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@niklaspandersson.se */ #pragma once namespace caspar { namespace accelerator { namespace vulkan { struct uniform_block { uint32_t color_space_index = 0; float precision_factor[4] = {1.0f, 1.0f, 1.0f, 1.0f}; int32_t blend_mode = 0; int32_t keyer = 0; int32_t pixel_format = 0; float opacity = 1.0; /* levels */ float min_input = 0; float max_input = 0; float gamma = 0; float min_output = 0; float max_output = 0; /* contrast, saturation & brightness */ float brt = 0; float sat = 0; float con = 0; /* Chroma */ float chroma_target_hue = 0; float chroma_hue_width = 0; float chroma_min_saturation = 0; float chroma_min_brightness = 0; float chroma_softness = 0; float chroma_spill_suppress = 0; float chroma_spill_suppress_saturation = 0; uint32_t flags = 0; }; }}} // namespace caspar::accelerator::vulkan ================================================ FILE: src/common/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 3.28) project (common) set(SOURCES diagnostics/graph.cpp gl/gl_check.cpp base64.cpp env.cpp filesystem.cpp log.cpp tweener.cpp utf.cpp ) if (MSVC) list(APPEND SOURCES compiler/vs/disable_silly_warnings.h os/windows/filesystem.cpp os/windows/prec_timer.cpp os/windows/thread.cpp os/windows/windows.h ) else () list(APPEND SOURCES os/linux/filesystem.cpp os/linux/prec_timer.cpp os/linux/thread.cpp ) endif () set(HEADERS diagnostics/graph.h gl/gl_check.h os/filesystem.h os/thread.h array.h assert.h base64.h endian.h enum_class.h env.h executor.h except.h filesystem.h future.h log.h memory.h memshfl.h param.h prec_timer.h ptree.h scope_exit.h stdafx.h timer.h tweener.h utf.h ) casparcg_add_library(common SOURCES ${SOURCES} ${HEADERS}) target_include_directories(common PRIVATE ..) target_precompile_headers(common PRIVATE stdafx.h) target_link_libraries(common PRIVATE GLEW::glew) source_group(sources ./*) source_group(sources\\gl gl/*) source_group(sources\\diagnostics diagnostics/*) source_group(sources\\compiler\\vs compiler/vs/*) source_group(sources\\os\\windows os/windows/*) source_group(sources\\os os/*) ================================================ FILE: src/common/array.h ================================================ #pragma once #include #include #include #include #include #include namespace caspar { template class array final { template friend class array; public: using iterator = T*; using const_iterator = const T*; array() = default; explicit array(std::size_t size) : size_(size) { if (size_ > 0) { auto storage = std::shared_ptr(std::malloc(size), std::free); ptr_ = reinterpret_cast(storage.get()); std::memset(ptr_, 0, size_); storage_ = std::make_shared(std::move(storage)); } } array(std::vector other) { auto storage = std::make_shared>(std::move(other)); ptr_ = storage->data(); size_ = storage->size(); storage_ = std::make_shared(std::move(storage)); } template explicit array(T* ptr, std::size_t size, S&& storage) : ptr_(ptr) , size_(size) , storage_(std::make_shared(std::forward(storage))) { } array(const array&) = delete; array(array&& other) : ptr_(other.ptr_) , size_(other.size_) , storage_(std::move(other.storage_)) { other.ptr_ = nullptr; other.size_ = 0; } array& operator=(const array&) = delete; array& operator=(array&& other) { ptr_ = std::move(other.ptr_); size_ = std::move(other.size_); storage_ = std::move(other.storage_); return *this; } T* begin() const { return ptr_; } T* data() const { return ptr_; } T* end() const { return ptr_ + size_; } std::size_t size() const { return size_; } explicit operator bool() const { return size_ > 0; }; template S* storage() const { return std::any_cast(storage_.get()); } private: T* ptr_ = nullptr; std::size_t size_ = 0; std::shared_ptr storage_; }; template class array final { public: using iterator = const T*; using const_iterator = const T*; array() = default; array(std::size_t size) : size_(size) { if (size_ > 0) { auto storage = std::shared_ptr(std::malloc(size), std::free); ptr_ = reinterpret_cast(storage.get()); std::memset(ptr_, 0, size_); storage_ = std::make_shared(storage); } } array(const std::vector& other) { auto storage = std::make_shared>(std::move(other)); ptr_ = storage->data(); size_ = storage->size(); storage_ = std::make_shared(std::move(storage)); } template explicit array(const T* ptr, std::size_t size, S&& storage) : ptr_(ptr) , size_(size) , storage_(std::make_shared(std::forward(storage))) { } array(const array& other) : ptr_(other.ptr_) , size_(other.size_) , storage_(other.storage_) { } array(array&& other) : ptr_(other.ptr_) , size_(other.size_) , storage_(other.storage_) { other.ptr_ = nullptr; other.size_ = 0; other.storage_ = nullptr; } array& operator=(const array& other) { ptr_ = other.ptr_; size_ = other.size_; storage_ = other.storage_; return *this; } const T* begin() const { return ptr_; } const T* data() const { return ptr_; } const T* end() const { return ptr_ + size_; } std::size_t size() const { return size_; } explicit operator bool() const { return size_ > 0; } template S* storage() const { return std::any_cast(storage_.get()); } private: const T* ptr_ = nullptr; std::size_t size_ = 0; std::shared_ptr storage_; }; } // namespace caspar namespace std { template void swap(caspar::array& lhs, caspar::array& rhs) { lhs.swap(rhs); } } // namespace std ================================================ FILE: src/common/assert.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "except.h" #include "log.h" #ifdef _MSC_VER #define _CASPAR_DBG_BREAK _CrtDbgBreak() #else #define _CASPAR_DBG_BREAK #endif #define CASPAR_VERIFY_EXPR_STR(str) #str #define CASPAR_VERIFY(expr) \ do { \ if (!(expr)) { \ CASPAR_LOG(warning) << "Assertion Failed: " << CASPAR_VERIFY_EXPR_STR(expr) << " " << "file:" << __FILE__ \ << " " << "line:" << __LINE__ << " "; \ _CASPAR_DBG_BREAK; \ } \ } while (0); #define CASPAR_ENSURE(expr) \ do { \ if (!(expr)) { \ CASPAR_THROW_EXCEPTION(programming_error() \ << msg_info(std::string("Assertion Failed: ") + CASPAR_VERIFY_EXPR_STR(expr))); \ } \ } while (0); #ifdef _DEBUG #define CASPAR_ASSERT(expr) CASPAR_VERIFY(expr) #else #define CASPAR_ASSERT(expr) #endif ================================================ FILE: src/common/base64.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #include "base64.h" #include #include #include #include #include #include #include #include #include #include #include "except.h" namespace caspar { std::string to_base64(const char* data, size_t length) { using namespace boost::archive::iterators; // From http://www.webbiscuit.co.uk/2012/04/02/base64-encoder-and-boost/ using base64_iterator = insert_linebreaks< // insert line breaks every 76 characters base64_from_binary< // convert binary values to base64 characters transform_width>, // retrieve 6 bit integers from a sequence of 8 bit bytes 76>; // compose all the above operations in to a new iterator std::vector bytes; bytes.resize(length); std::memcpy(bytes.data(), data, length); int padding = 0; while (bytes.size() % 3 != 0) { ++padding; bytes.push_back(0x00); } std::string result(base64_iterator(bytes.data()), base64_iterator(bytes.data() + length)); result.insert(result.end(), padding, '='); return std::move(result); } std::vector from_base64(const std::string& data) { using namespace boost::archive::iterators; // The boost base64 iterator will over-iterate the string if not a multiple // of 4, so we have to short circuit before. auto length = std::count_if( data.begin(), data.end(), [](char c) { return std::isspace(static_cast(c)) == 0; }); if (length % 4 != 0) CASPAR_THROW_EXCEPTION(user_error() << msg_info("The length of a base64 sequence must be a multiple of 4")); int padding = 0; std::string zero_padding; // binary_from_base64 does not support padding characters so we have to append base64 0 -> 'A' and then remove it // after decoding if (data.length() >= 2) { if (data[data.length() - 1] == '=') { ++padding; zero_padding += 'A'; } if (data[data.length() - 2] == '=') { ++padding; zero_padding += 'A'; } } if (padding > 0) { auto concatenated = boost::join(data | boost::adaptors::sliced(0, data.length() - padding), boost::make_iterator_range(zero_padding.cbegin(), zero_padding.cend())); // From https://svn.boost.org/trac/boost/ticket/5624 using base64_iterator = transform_width>, 8, 6>; std::vector result(base64_iterator(concatenated.begin()), base64_iterator(concatenated.end())); result.resize(result.size() - padding); return std::move(result); } // From https://svn.boost.org/trac/boost/ticket/5624 using base64_iterator = transform_width>, 8, 6>; std::vector result(base64_iterator(data.begin()), base64_iterator(data.end())); return std::move(result); } } // namespace caspar ================================================ FILE: src/common/base64.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #pragma once #include #include namespace caspar { std::string to_base64(const char* data, size_t length); std::vector from_base64(const std::string& data); } // namespace caspar ================================================ FILE: src/common/bit_depth.h ================================================ #pragma once #include #include namespace caspar { namespace common { enum class bit_depth : uint8_t { bit8 = 0, bit10, bit12, // bit14, bit16, }; }} // namespace caspar::common ================================================ FILE: src/common/compiler/vs/disable_silly_warnings.h ================================================ /* * copyright (c) 2010 Sveriges Television AB * * This file is part of CasparCG. * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * */ #pragma once #pragma warning(disable : 4100) // unreferenced formal parameter #pragma warning(disable : 4127) // conditional expression is constant #pragma warning(disable : 4180) // qualifier applied to function type has no meaning; ignored #pragma warning(disable : 4244) // conversion from 'type1' to 'type2', possible loss of data #pragma warning(disable : 4245) // conversion from 'type1' to 'type2', signed/unsigned mismatch #pragma warning(disable : 4324) // padding a structure for alignment #pragma warning(disable : 4355) // 'this' : used in base member initializer list #pragma warning(disable : 4503) // decorated name length exceeded, name was truncated #pragma warning(disable : 4505) // unreferenced local function has been #pragma warning(disable : 4512) // assignment operator could not be generated #pragma warning(disable : 4702) // unreachable code #pragma warning(disable : 4714) // marked as __forceinline not inlined #pragma warning(disable : 4996) // function call with parameters that may be unsafe ================================================ FILE: src/common/diagnostics/graph.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "graph.h" #include #include namespace caspar { namespace diagnostics { int color(float r, float g, float b, float a) { int code = 0; code |= static_cast(r * 255.0f + 0.5f) << 24; code |= static_cast(g * 255.0f + 0.5f) << 16; code |= static_cast(b * 255.0f + 0.5f) << 8; code |= static_cast(a * 255.0f + 0.5f) << 0; return code; } std::tuple color(int code) { float r = static_cast(code >> 24 & 255) / 255.0f; float g = static_cast(code >> 16 & 255) / 255.0f; float b = static_cast(code >> 8 & 255) / 255.0f; float a = static_cast(code >> 0 & 255) / 255.0f; return std::make_tuple(r, g, b, a); } using sink_factories_t = std::vector; static std::mutex g_sink_factories_mutex; static sink_factories_t g_sink_factories; std::vector> create_sinks() { std::lock_guard lock(g_sink_factories_mutex); std::vector> result; for (auto sink : g_sink_factories) { result.push_back(sink()); } return result; } struct graph::impl { std::vector> sinks_ = create_sinks(); public: impl() {} void activate() { for (auto& sink : sinks_) sink->activate(); } void set_text(const std::wstring& value) { for (auto& sink : sinks_) sink->set_text(value); } void set_value(const std::string& name, double value) { for (auto& sink : sinks_) sink->set_value(name, value); } void set_tag(tag_severity severity, const std::string& name) { for (auto& sink : sinks_) sink->set_tag(severity, name); } void set_color(const std::string& name, int color) { for (auto& sink : sinks_) sink->set_color(name, color); } void auto_reset() { for (auto& sink : sinks_) sink->auto_reset(); } private: impl(impl&); impl& operator=(impl&); }; graph::graph() : impl_(new impl) { } void graph::set_text(const std::wstring& value) { impl_->set_text(value); } void graph::set_value(const std::string& name, double value) { impl_->set_value(name, value); } void graph::set_color(const std::string& name, int color) { impl_->set_color(name, color); } void graph::set_tag(tag_severity severity, const std::string& name) { impl_->set_tag(severity, name); } void graph::auto_reset() { impl_->auto_reset(); } void register_graph(const spl::shared_ptr& graph) { graph->impl_->activate(); } namespace spi { void register_sink_factory(sink_factory_t factory) { std::lock_guard lock(g_sink_factories_mutex); g_sink_factories.push_back(std::move(factory)); } } // namespace spi }} // namespace caspar::diagnostics ================================================ FILE: src/common/diagnostics/graph.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "../memory.h" #include #include #include namespace caspar { namespace diagnostics { int color(float r, float g, float b, float a = 1.0f); std::tuple color(int code); enum class tag_severity { WARNING, INFO, SILENT, }; class graph { friend void register_graph(const spl::shared_ptr& graph); public: graph(); void set_text(const std::wstring& value); void set_value(const std::string& name, double value); void set_color(const std::string& name, int color); void set_tag(tag_severity severity, const std::string& name); void auto_reset(); private: struct impl; std::shared_ptr impl_; graph(const graph&) = delete; graph& operator=(const graph&) = delete; }; void register_graph(const spl::shared_ptr& graph); namespace spi { class graph_sink { graph_sink(const graph_sink&) = delete; graph_sink& operator=(const graph_sink&) = delete; public: graph_sink() = default; virtual ~graph_sink() {}; virtual void activate() = 0; virtual void set_text(const std::wstring& value) = 0; virtual void set_value(const std::string& name, double value) = 0; virtual void set_color(const std::string& name, int color) = 0; virtual void set_tag(tag_severity severity, const std::string& name) = 0; virtual void auto_reset() = 0; }; using sink_factory_t = std::function()>; void register_sink_factory(sink_factory_t factory); } // namespace spi }} // namespace caspar::diagnostics ================================================ FILE: src/common/endian.h ================================================ /* * Copyright 2013 Sveriges Television AB http://casparcg.com/ * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include namespace caspar { template typename std::enable_if::type swap_byte_order(const T& value) { return value; } template typename std::enable_if::type swap_byte_order(const T& value) { #ifdef _MSC_VER auto swapped = _byteswap_ushort(reinterpret_cast(value)); #elif __GNUC__ auto swapped = __builtin_bswap16(value); #endif return reinterpret_cast(swapped); } template typename std::enable_if::type swap_byte_order(const T& value) { #ifdef _MSC_VER auto swapped = _byteswap_ulong(reinterpret_cast(value)); #elif __GNUC__ auto swapped = __builtin_bswap32(value); #endif return reinterpret_cast(swapped); } template typename std::enable_if::type swap_byte_order(const T& value) { #ifdef _MSC_VER auto swapped = _byteswap_uint64(reinterpret_cast(value)); #elif __GNUC__ auto swapped = __builtin_bswap64(value); #endif return reinterpret_cast(swapped); } } // namespace caspar ================================================ FILE: src/common/enum_class.h ================================================ #pragma once #include #include #define ENUM_ENABLE_BITWISE(enum_class) \ static enum_class operator&(enum_class lhs, enum_class rhs) \ { \ return static_cast(static_cast::type>(lhs) & \ static_cast::type>(rhs)); \ }; \ static enum_class& operator&=(enum_class& lhs, enum_class rhs) \ { \ lhs = lhs & rhs; \ return lhs; \ }; \ static enum_class operator|(enum_class lhs, enum_class rhs) \ { \ return static_cast(static_cast::type>(lhs) | \ static_cast::type>(rhs)); \ }; \ static enum_class& operator|=(enum_class& lhs, enum_class rhs) \ { \ lhs = lhs | rhs; \ return lhs; \ }; \ static enum_class operator^(enum_class lhs, enum_class rhs) \ { \ return static_cast(static_cast::type>(lhs) ^ \ static_cast::type>(rhs)); \ }; \ static enum_class& operator^=(enum_class& lhs, enum_class rhs) \ { \ lhs = lhs ^ rhs; \ return lhs; \ }; \ static enum_class operator~(enum_class e) \ { \ return static_cast(~static_cast::type>(e)); \ }; namespace caspar { template const std::vector& enum_constants() { using integer = typename std::underlying_type::type; static auto res = [] { std::vector result; for (auto n = 0; n < static_cast(E::count); ++n) { result.push_back(static_cast(n)); } return result; }(); return res; } } // namespace caspar ================================================ FILE: src/common/env.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "env.h" #include "version.h" #include "except.h" #include "log.h" #include "os/filesystem.h" #include "ptree.h" #include #include #include #include #include #include namespace caspar { namespace env { std::wstring initial; std::wstring media; std::wstring log; bool log_enabled = true; std::wstring ftemplate; std::wstring data; boost::property_tree::wptree pt; void check_is_configured() { if (pt.empty()) CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(L"Environment properties has not been configured")); } std::wstring resolve_or_create(const std::wstring& folder) { auto found_path = find_case_insensitive(folder); if (found_path) return *found_path; boost::system::error_code ec; boost::filesystem::create_directories(folder, ec); if (ec) CASPAR_THROW_EXCEPTION(user_error() << msg_info("Failed to create directory " + u8(folder) + " (" + ec.message() + ")")); return folder; } void ensure_writable(const std::wstring& folder) { static const std::wstring CREATE_FILE_TEST = L"casparcg_test_writable.empty"; boost::system::error_code ec; boost::filesystem::path test_file(folder + L"/" + CREATE_FILE_TEST); boost::filesystem::ofstream out(folder + L"/" + CREATE_FILE_TEST); if (out.fail()) { boost::filesystem::remove(test_file, ec); CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Directory " + folder + L" is not writable.")); } out.close(); boost::filesystem::remove(test_file, ec); } void configure(const std::wstring& filename) { initial = clean_path(boost::filesystem::initial_path().wstring()); std::wstring fullpath = filename; if (!boost::filesystem::exists(fullpath)) { fullpath = initial + L"/" + filename; } if (!boost::filesystem::exists(fullpath)) { CASPAR_LOG(fatal) << L"### Configuration file " + filename + L" was not found. ###"; CASPAR_THROW_EXCEPTION(expected_user_error() << msg_info(L"Configuration file " + fullpath + L" was not found.")); } try { boost::filesystem::wifstream file(fullpath); boost::property_tree::read_xml(file, pt, boost::property_tree::xml_parser::trim_whitespace | boost::property_tree::xml_parser::no_comments); auto paths = ptree_get_child(pt, L"configuration.paths"); media = clean_path(paths.get(L"media-path", initial + L"/media/")); auto log_path_node = paths.get_child(L"log-path"); log_enabled = !log_path_node.get(L".disabled", false); if (log_enabled) { log = clean_path(log_path_node.get_value(initial + L"/log/")); } ftemplate = clean_path(boost::filesystem::absolute(paths.get(L"template-path", initial + L"/template/")).wstring()); data = clean_path(paths.get(L"data-path", initial + L"/data/")); } catch (...) { CASPAR_LOG(error) << L" ### Invalid configuration file. ###"; throw; } media = ensure_trailing_slash(resolve_or_create(media)); ftemplate = ensure_trailing_slash(resolve_or_create(ftemplate)); data = ensure_trailing_slash(resolve_or_create(data)); if (log_enabled) { log = ensure_trailing_slash(resolve_or_create(log)); ensure_writable(log); } ensure_writable(ftemplate); ensure_writable(data); } const std::wstring& initial_folder() { check_is_configured(); return initial; } const std::wstring& media_folder() { check_is_configured(); return media; } const std::wstring& log_folder() { check_is_configured(); return log; } bool log_to_file() { check_is_configured(); return log_enabled; } const std::wstring& template_folder() { check_is_configured(); return ftemplate; } const std::wstring& data_folder() { check_is_configured(); return data; } #define QUOTE(str) #str #define EXPAND_AND_QUOTE(str) QUOTE(str) const std::wstring& version() { static std::wstring ver = u16(EXPAND_AND_QUOTE(CASPAR_GEN) "." EXPAND_AND_QUOTE(CASPAR_MAJOR) "." EXPAND_AND_QUOTE( CASPAR_MINOR) " " CASPAR_HASH " " CASPAR_TAG); return ver; } const boost::property_tree::wptree& properties() { check_is_configured(); return pt; } void log_configuration_warnings() { if (pt.empty()) return; } }} // namespace caspar::env ================================================ FILE: src/common/env.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include namespace caspar { namespace env { void configure(const std::wstring& filename); const std::wstring& initial_folder(); const std::wstring& media_folder(); const std::wstring& log_folder(); const std::wstring& template_folder(); const std::wstring& data_folder(); const std::wstring& version(); bool log_to_file(); const boost::property_tree::wptree& properties(); void log_configuration_warnings(); }} // namespace caspar::env ================================================ FILE: src/common/except.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "utf.h" #include "log.h" #include #include #include #ifndef NOMINMAX #define NOMINMAX #endif #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #include namespace caspar { using arg_name_info_t = boost::error_info; using arg_value_info_t = boost::error_info; using msg_info_t = boost::error_info; using error_info_t = boost::error_info; using source_info_t = boost::error_info; using file_name_info_t = boost::error_info; using stacktrace_info_t = boost::error_info; template arg_name_info_t arg_name_info(const T& str) { return arg_name_info_t(u8(str)); } template arg_value_info_t arg_value_info(const T& str) { return arg_value_info_t(u8(str)); } template msg_info_t msg_info(const T& str) { return msg_info_t(u8(str)); } template error_info_t error_info(const T& str) { return error_info_t(u8(str)); } template source_info_t source_info(const T& str) { return source_info_t(u8(str)); } template file_name_info_t file_name_info(const T& str) { return file_name_info_t(u8(str)); } inline stacktrace_info_t stacktrace_info() { return stacktrace_info_t(boost::stacktrace::stacktrace()); } using line_info = boost::error_info; using nested_exception = boost::error_info; struct caspar_exception : virtual boost::exception , virtual std::exception { caspar_exception() {} const char* what() const throw() override { return boost::diagnostic_information_what(*this); } }; struct io_error : virtual caspar_exception { }; struct directory_not_found : virtual io_error { }; struct file_not_found : virtual io_error { }; struct file_read_error : virtual io_error { }; struct file_write_error : virtual io_error { }; struct invalid_argument : virtual caspar_exception { }; struct null_argument : virtual invalid_argument { }; struct out_of_range : virtual invalid_argument { }; struct programming_error : virtual caspar_exception { }; struct bad_alloc : virtual caspar_exception { }; struct invalid_operation : virtual caspar_exception { }; struct operation_failed : virtual caspar_exception { }; struct timed_out : virtual caspar_exception { }; struct not_implemented : virtual caspar_exception { }; struct user_error : virtual caspar_exception { }; struct expected_user_error : virtual user_error { }; struct not_supported : virtual user_error { }; #define CASPAR_THROW_EXCEPTION(x) \ ::boost::throw_exception(::boost::enable_error_info(x) \ << ::boost::throw_function(BOOST_CURRENT_FUNCTION) << ::boost::throw_file(__FILE__) \ << ::boost::throw_line((int)__LINE__) << stacktrace_info()) } // namespace caspar ================================================ FILE: src/common/executor.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "except.h" #include "log.h" #include "os/thread.h" #include #include #include #include namespace caspar { class executor final { executor(const executor&); executor& operator=(const executor&); using task_t = std::function; using queue_t = tbb::concurrent_bounded_queue; std::wstring name_; std::atomic is_running_{true}; queue_t queue_; std::thread thread_; public: executor(const std::wstring& name) : name_(name) , thread_(std::thread([this] { run(); })) { } ~executor() { stop_and_wait(); } template auto begin_invoke(Func&& func) { if (!is_running_) { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("executor not running.")); } using result_type = decltype(func()); auto task = std::make_shared>(std::forward(func)); queue_.push([=]() mutable { (*task)(); }); return task->get_future(); } template auto invoke(Func&& func) { if (is_current()) { // Avoids potential deadlock. return func(); } return begin_invoke(std::forward(func)).get(); } template typename std::enable_if())>::value, void>::type invoke(Func&& func) { if (is_current()) { // Avoids potential deadlock. func(); return; } begin_invoke(std::forward(func)).wait(); } void set_capacity(queue_t::size_type capacity) { queue_.set_capacity(capacity); } queue_t::size_type capacity() const { return queue_.capacity(); } void clear() { queue_.clear(); } void stop() { if (!is_running_) { return; } is_running_ = false; queue_.push(nullptr); } void stop_and_wait() { stop(); if (thread_.joinable()) thread_.join(); } void wait() { invoke([] {}); } queue_t::size_type size() const { return queue_.size(); } bool is_running() const { return is_running_; } bool is_current() const { return std::this_thread::get_id() == thread_.get_id(); } const std::wstring& name() const { return name_; } private: void run() { set_thread_name(name_); task_t task; while (true) { try { queue_.pop(task); do { if (!task) { return; } task(); } while (queue_.try_pop(task)); } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } } } }; } // namespace caspar ================================================ FILE: src/common/filesystem.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #include "except.h" #include "./os/filesystem.h" #include "filesystem.h" #include #include #include #include namespace caspar { std::optional probe_path(const boost::filesystem::path& full_path, const std::function& is_valid_file) { auto parent = find_case_insensitive(full_path.parent_path().wstring()); if (!parent) return {}; auto dir = boost::filesystem::path(*parent); auto loc = std::locale(""); // Use system locale auto leaf_name = full_path.filename().stem().wstring(); auto has_extension = !full_path.filename().extension().wstring().empty(); auto leaf_filename = full_path.filename().wstring(); for (auto it = boost::filesystem::directory_iterator(dir); it != boost::filesystem::directory_iterator(); ++it) { if (has_extension) { auto it_path = it->path().filename().wstring(); if (boost::iequals(it_path, leaf_filename, loc) && is_valid_file(it->path().wstring())) return it->path(); } if (boost::iequals(it->path().stem().wstring(), leaf_filename, loc) && is_valid_file(it->path().wstring())) return it->path(); } return {}; } std::optional find_file_within_dir_or_absolute(const std::wstring& parent_dir, const std::wstring& filename, const std::function& is_valid_file) { // Try it assuming an absolute path was given auto file_path = boost::filesystem::path(filename); auto file_path_match = probe_path(file_path, is_valid_file); if (file_path_match) { return file_path_match; } // Try and find within the default parent directory auto full_path = boost::filesystem::path(parent_dir) / boost::filesystem::path(filename); return probe_path(full_path, is_valid_file); } boost::filesystem::path get_relative(const boost::filesystem::path& file, const boost::filesystem::path& relative_to) { auto result = file.filename(); auto current_path = file; if (boost::filesystem::equivalent(current_path, relative_to)) return L""; while (true) { current_path = current_path.parent_path(); if (boost::filesystem::equivalent(current_path, relative_to)) break; if (current_path.empty()) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("File " + file.string() + " not relative to folder " + relative_to.string())); result = current_path.filename() / result; } return result; } boost::filesystem::path get_relative_without_extension(const boost::filesystem::path& file, const boost::filesystem::path& relative_to) { return get_relative(file.parent_path() / file.stem(), relative_to); } } // namespace caspar ================================================ FILE: src/common/filesystem.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #pragma once #include #include namespace caspar { std::optional find_file_within_dir_or_absolute(const std::wstring& parent_dir, const std::wstring& filename, const std::function& is_valid_file); boost::filesystem::path get_relative(const boost::filesystem::path& file, const boost::filesystem::path& relative_to); boost::filesystem::path get_relative_without_extension(const boost::filesystem::path& file, const boost::filesystem::path& relative_to); } // namespace caspar ================================================ FILE: src/common/future.h ================================================ #pragma once #include #include #include namespace caspar { template auto flatten(F&& f) { return std::async(std::launch::deferred, [f = std::forward(f)]() mutable { return f.get().get(); }); } template bool is_ready(const F& future) { return future.wait_for(std::chrono::seconds(0)) == std::future_status::ready; } template auto make_ready_future(T&& value) { std::promise::type> p; p.set_value(std::forward(value)); return p.get_future(); } static std::future make_ready_future() { std::promise p; p.set_value(); return p.get_future(); } } // namespace caspar ================================================ FILE: src/common/gl/gl_check.cpp ================================================ /////////////////////////// // // SFML - Simple and Fast Multimedia Library // Copyright (C) 2007-2009 Laurent Gomila (laurent.gom@gmail.com) // // This software is provided 'as-is', without any express or implied warranty. // In no event will the authors be held liable for any damages arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it freely, // subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; // you must not claim that you wrote the original software. // If you use this software in a product, an acknowledgment // in the product documentation would be appreciated but is not required. // // 2. Altered source versions must be plainly marked as such, // and must not be misrepresented as being the original software. // // 3. This notice may not be removed or altered from any source distribution. // /////////////////////////// #include "gl_check.h" #include "../except.h" #include "../log.h" #include namespace caspar { namespace gl { void SMFL_GLCheckError(const std::string& /*unused*/, const char* func, const char* file, unsigned int line) { // Get the last error GLenum LastErrorCode = GL_NO_ERROR; for (GLenum ErrorCode = glGetError(); ErrorCode != GL_NO_ERROR; ErrorCode = glGetError()) { std::string str(reinterpret_cast(glewGetErrorString(ErrorCode))); CASPAR_LOG(error) << "OpenGL Error: " << ErrorCode << L" " << str; LastErrorCode = ErrorCode; } if (LastErrorCode != GL_NO_ERROR) { // Decode the error code switch (LastErrorCode) { case GL_INVALID_ENUM: CASPAR_THROW_EXCEPTION(ogl_invalid_enum() << msg_info( "an unacceptable value has been specified for an enumerated argument") << error_info("GL_INVALID_ENUM")); case GL_INVALID_VALUE: CASPAR_THROW_EXCEPTION(ogl_invalid_value() << msg_info("a numeric argument is out of range") << error_info("GL_INVALID_VALUE")); case GL_INVALID_OPERATION: CASPAR_THROW_EXCEPTION(ogl_invalid_operation() << msg_info("the specified operation is not allowed in the current state") << error_info("GL_INVALID_OPERATION")); case GL_STACK_OVERFLOW: CASPAR_THROW_EXCEPTION(ogl_stack_overflow() << msg_info("this command would cause a stack overflow") << error_info("GL_STACK_OVERFLOW")); case GL_STACK_UNDERFLOW: CASPAR_THROW_EXCEPTION(ogl_stack_underflow() << msg_info("this command would cause a stack underflow") << error_info("GL_STACK_UNDERFLOW")); case GL_OUT_OF_MEMORY: CASPAR_THROW_EXCEPTION(ogl_out_of_memory() << msg_info("there is not enough memory left to execute the command") << error_info("GL_OUT_OF_MEMORY")); case GL_INVALID_FRAMEBUFFER_OPERATION_EXT: CASPAR_THROW_EXCEPTION( ogl_invalid_framebuffer_operation_ext() << msg_info("the object bound to FRAMEBUFFER_BINDING_EXT is not \"framebuffer complete\"") << error_info("GL_INVALID_FRAMEBUFFER_OPERATION_EXT")); } } } }} // namespace caspar::gl ================================================ FILE: src/common/gl/gl_check.h ================================================ /////////////////////////// // // SFML - Simple and Fast Multimedia Library // Copyright (C) 2007-2009 Laurent Gomila (laurent.gom@gmail.com) // // This software is provided 'as-is', without any express or implied warranty. // In no event will the authors be held liable for any damages arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it freely, // subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; // you must not claim that you wrote the original software. // If you use this software in a product, an acknowledgment // in the product documentation would be appreciated but is not required. // // 2. Altered source versions must be plainly marked as such, // and must not be misrepresented as being the original software. // // 3. This notice may not be removed or altered from any source distribution. // /////////////////////////// #pragma once #include "../except.h" namespace caspar { namespace gl { struct ogl_exception : virtual caspar_exception { }; struct ogl_invalid_enum : virtual ogl_exception { }; struct ogl_invalid_value : virtual ogl_exception { }; struct ogl_invalid_operation : virtual ogl_exception { }; struct ogl_stack_overflow : virtual ogl_exception { }; struct ogl_stack_underflow : virtual ogl_exception { }; struct ogl_out_of_memory : virtual ogl_exception { }; struct ogl_invalid_framebuffer_operation_ext : virtual ogl_exception { }; void SMFL_GLCheckError(const std::string& expr, const char* func, const char* file, unsigned int line); // #ifdef _DEBUG #define CASPAR_GL_EXPR_STR(expr) #expr #define GL(expr) \ if (false) { \ } else { \ (expr); \ caspar::gl::SMFL_GLCheckError(CASPAR_GL_EXPR_STR(expr), __FUNCTION__, __FILE__, __LINE__); \ } // TODO: decltype version does not play well with gcc #define GL2(expr) \ [&]() { \ auto ret = (expr); \ caspar::gl::SMFL_GLCheckError(CASPAR_GL_EXPR_STR(expr), __FUNCTION__, __FILE__, __LINE__); \ return ret; \ }() /*#define GL2(expr) \ [&]() -> decltype(expr) \ { \ auto ret = (expr); \ caspar::gl::SMFL_GLCheckError(CASPAR_GL_EXPR_STR(expr), __FILE__, __LINE__); \ return ret; \ }()*/ // #define GL2(expr) [&]() -> decltype(expr) { auto ret = (expr); // caspar::gl::SMFL_GLCheckError(CASPAR_GL_EXPR_STR(expr), //__FILE__, __LINE__); return ret; }() #else #define GL(expr) expr #endif }} // namespace caspar::gl ================================================ FILE: src/common/log.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "log.h" #include "except.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace logging = boost::log; namespace src = boost::log::sources; namespace sinks = boost::log::sinks; namespace keywords = boost::log::keywords; using namespace boost::placeholders; namespace caspar { namespace log { logging_config current_config; std::string current_exception_diagnostic_information() { { auto e = boost::current_exception_cast(); if (e != nullptr) { return std::string("[char *] = ") + *e + "\n"; } } { auto e = boost::current_exception_cast(); if (e != nullptr) { return std::string("[char *] = ") + u8(*e) + "\n"; } } { auto e = boost::current_exception_cast(); if (e != nullptr) { return std::string("[char *] = ") + *e + "\n"; } } { auto e = boost::current_exception_cast(); if (e != nullptr) { return std::string("[char *] = ") + u8(*e) + "\n"; } } return boost::current_exception_diagnostic_information(true); } template void append_timestamp(Stream& stream, boost::posix_time::ptime timestamp) { auto date = timestamp.date(); auto time = timestamp.time_of_day(); auto milliseconds = time.fractional_seconds() / 1000; // microseconds to milliseconds std::wstringstream buffer; buffer << std::setfill(L'0') << L"[" << std::setw(4) << date.year() << L"-" << std::setw(2) << date.month().as_number() << "-" << std::setw(2) << date.day().as_number() << L" " << std::setw(2) << time.hours() << L":" << std::setw(2) << time.minutes() << L":" << std::setw(2) << time.seconds() << L"." << std::setw(3) << milliseconds << L"] "; stream << buffer.str(); } class column_writer { std::atomic column_width_; public: explicit column_writer(int initial_width = 0) : column_width_(initial_width) { } template void write(Stream& out, const Val& value) { std::wstring to_string = boost::lexical_cast(value); if (!current_config.align_columns) { out << L"[" << to_string << L"] "; } else { int length = static_cast(to_string.size()); int read_width; while (true) { read_width = column_width_; if (read_width >= length || column_width_.compare_exchange_strong(length, read_width)) { break; } } read_width = column_width_; out << L"[" << to_string << L"] "; for (int n = 0; n < read_width - length; ++n) { out << L" "; } } } }; template void my_formatter(bool print_all_characters, const boost::log::record_view& rec, Stream& strm) { // static column_writer thread_id_column; static column_writer severity_column(7); namespace expr = boost::log::expressions; std::wstringstream pre_message_stream; append_timestamp(pre_message_stream, boost::log::extract("TimestampMillis", rec).get()); // thread_id_column.write(pre_message_stream, boost::log::extract("NativeThreadId", rec)); severity_column.write(pre_message_stream, boost::log::extract("Severity", rec)); auto pre_message = pre_message_stream.str(); strm << pre_message; auto line_break_replacement = L"\n" + pre_message; if (print_all_characters) { strm << boost::replace_all_copy(rec[expr::message].get(), "\n", line_break_replacement); } else { strm << boost::replace_all_copy( replace_nonprintable_copy(rec[expr::message].get(), L'?'), L"\n", line_break_replacement); } } void add_file_sink(const std::wstring& file) { using file_sink_type = boost::log::sinks::synchronous_sink; try { if (!boost::filesystem::is_directory(boost::filesystem::path(file).parent_path())) { CASPAR_THROW_EXCEPTION(directory_not_found()); } auto file_sink = boost::make_shared( boost::log::keywords::file_name = file + L"_%Y-%m-%d.log", boost::log::keywords::time_based_rotation = boost::log::sinks::file::rotation_at_time_point(0, 0, 0), boost::log::keywords::auto_flush = true, boost::log::keywords::open_mode = std::ios::app); file_sink->set_formatter(boost::bind(&my_formatter, true, _1, _2)); boost::log::core::get()->add_sink(file_sink); } catch (...) { std::wcerr << L"Failed to Setup File Logging Sink" << std::endl << std::endl; } } void add_cout_sink() { boost::log::add_common_attributes(); // boost::log::core::get()->add_global_attribute("NativeThreadId", // boost::log::attributes::make_function(&std::this_thread::get_id)); boost::log::core::get()->add_global_attribute("TimestampMillis", boost::log::attributes::make_function([] { return boost::posix_time::microsec_clock::local_time(); })); using stream_sink_type = sinks::asynchronous_sink; auto stream_backend = boost::make_shared(); stream_backend->add_stream(boost::shared_ptr(&std::wcout, boost::null_deleter())); stream_backend->auto_flush(true); auto stream_sink = boost::make_shared(stream_backend); stream_sink->set_formatter(boost::bind(&my_formatter, false, _1, _2)); logging::core::get()->add_sink(stream_sink); } bool set_log_level(const std::wstring& lvl) { if (boost::iequals(lvl, L"trace")) logging::core::get()->set_filter(logging::trivial::severity >= boost::log::trivial::trace); else if (boost::iequals(lvl, L"debug")) logging::core::get()->set_filter(logging::trivial::severity >= boost::log::trivial::debug); else if (boost::iequals(lvl, L"info")) logging::core::get()->set_filter(logging::trivial::severity >= boost::log::trivial::info); else if (boost::iequals(lvl, L"warning")) logging::core::get()->set_filter(logging::trivial::severity >= boost::log::trivial::warning); else if (boost::iequals(lvl, L"error")) logging::core::get()->set_filter(logging::trivial::severity >= boost::log::trivial::error); else if (boost::iequals(lvl, L"fatal")) logging::core::get()->set_filter(logging::trivial::severity >= boost::log::trivial::fatal); else return false; // TODO is this a race condition? current_config.current_level = lvl; return true; } std::wstring& get_log_level() { return current_config.current_level; } void set_log_column_alignment(bool align_columns) { current_config.align_columns = align_columns; } }} // namespace caspar::log ================================================ FILE: src/common/log.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "utf.h" #include #include #include #include #ifndef NOMINMAX #define NOMINMAX #endif #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #include #include #include namespace caspar { namespace log { template void replace_nonprintable(std::basic_string, std::allocator>& str, T with) { std::locale loc(""); // Use system locale std::replace_if( str.begin(), str.end(), [&](T c) -> bool { return (!std::isprint(c, loc) && c != '\r' && c != '\n'); }, with); } template std::basic_string replace_nonprintable_copy(std::basic_string, std::allocator> str, T with) { replace_nonprintable(str, with); return str; } std::string current_exception_diagnostic_information(); using caspar_logger = boost::log::sources::wseverity_logger; BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(logger, caspar_logger) #define CASPAR_LOG(lvl) BOOST_LOG_SEV(::caspar::log::logger::get(), boost::log::trivial::severity_level::lvl) struct logging_config { std::atomic align_columns = {false}; std::wstring current_level; }; void add_file_sink(const std::wstring& file); void add_cout_sink(); bool set_log_level(const std::wstring& lvl); std::wstring& get_log_level(); void set_log_column_alignment(bool align_columns); inline std::wstring get_stack_trace() { auto bt = boost::stacktrace::stacktrace(); if (bt) { return caspar::u16(boost::stacktrace::detail::to_string(&bt.as_vector()[0], bt.size())); } return L""; } #define CASPAR_LOG_CURRENT_EXCEPTION() \ try { \ CASPAR_LOG(error) << L"Exception: " << caspar::u16(::caspar::log::current_exception_diagnostic_information()) \ << L"\r\n" \ << caspar::log::get_stack_trace(); \ } catch (...) { \ } #define CASPAR_LOG_CURRENT_CALL_STACK() // TODO (fix) }} // namespace caspar::log ================================================ FILE: src/common/memory.h ================================================ /* * Copyright (c) 2011,2018 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNu General public: License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOuT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PuRPOSE. See the * GNU General public: License for more details. * * You should have received a copy of the GNU General public: License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include namespace caspar { namespace spl { // unique_ptr /** * A wrapper around std::unique_ptr ensuring that the pointer is never null * except in the case of a moved from instance. * * The default constructor will point the wrapped pointer to a default * constructed instance of T. * * Use the make_unique overloads for perfectly forwarding the constructor * arguments of T and creating a unique_ptr to the created T instance. */ template > class unique_ptr { unique_ptr(const unique_ptr&); unique_ptr& operator=(const unique_ptr&); template friend class unique_ptr; template friend class shared_ptr; public: using element_type = T; using deleter_type = D; unique_ptr() : p_(new T()) { } template unique_ptr(unique_ptr&& p, typename std::enable_if /*unused*/::value, void*>::type = 0) : p_(p.p_.release(), p.p_.get_deleter()) { } template explicit unique_ptr(std::unique_ptr&& p, typename std::enable_if /*unused*/::value, void*>::type = 0) : p_(std::move(p)) { if (!p_) throw std::invalid_argument("p"); } template explicit unique_ptr(T2* p, typename std::enable_if /*unused*/::value, void*>::type = 0) : p_(p) { if (!p_) throw std::invalid_argument("p"); } template explicit unique_ptr(T2* p, typename std::remove_reference /*unused*/::type&& d, typename std::enable_if::value, void*>::type = 0) : p_(p, d) { if (!p_) throw std::invalid_argument("p"); } unique_ptr& operator=(unique_ptr&& other) { other.swap(*this); return *this; } T& operator*() const { return *p_.get(); } T* operator->() const { return p_.get(); } T* get() const { return p_.get(); } void swap(unique_ptr& other) { p_.swap(other.p_); } D& get_deleter() { return p_.get_deleter(); } private: T* release() { return p_.release(); } std::unique_ptr p_; }; template bool operator==(const unique_ptr& a, const unique_ptr& b) { return a.get() == b.get(); } template bool operator==(const std::unique_ptr& a, const unique_ptr& b) { return a.get() == b.get(); } template bool operator==(const unique_ptr& a, const std::unique_ptr& b) { return a.get() == b.get(); } template bool operator!=(const unique_ptr& a, const unique_ptr& b) { return a.get() != b.get(); } template bool operator!=(const std::unique_ptr& a, const unique_ptr& b) { return a.get() != b.get(); } template bool operator!=(const unique_ptr& a, const std::unique_ptr& b) { return a.get() != b.get(); } template bool operator<(const unique_ptr& a, const unique_ptr& b) { return a.get() < b.get(); } template bool operator<(const std::unique_ptr& a, const unique_ptr& b) { return a.get() < b.get(); } template bool operator<(const unique_ptr& a, const std::unique_ptr& b) { return a.get() < b.get(); } template bool operator>(const unique_ptr& a, const unique_ptr& b) { return a.get() > b.get(); } template bool operator>(const std::unique_ptr& a, const unique_ptr& b) { return a.get() > b.get(); } template bool operator>(const unique_ptr& a, const std::unique_ptr& b) { return a.get() > b.get(); } template bool operator>=(const unique_ptr& a, const unique_ptr& b) { return a.get() >= b.get(); } template bool operator>=(const std::unique_ptr& a, const unique_ptr& b) { return a.get() >= b.get(); } template bool operator>=(const unique_ptr& a, const std::unique_ptr& b) { return a.get() >= b.get(); } template bool operator<=(const unique_ptr& a, const unique_ptr& b) { return a.get() <= b.get(); } template bool operator<=(const std::unique_ptr& a, const unique_ptr& b) { return a.get() <= b.get(); } template bool operator<=(const unique_ptr& a, const std::unique_ptr& b) { return a.get() <= b.get(); } template std::basic_ostream& operator<<(std::basic_ostream& oT2t, const unique_ptr& p) { return oT2t << p.get(); } template void swap(unique_ptr& a, unique_ptr& b) { a.swap(b); } template T* get_pointer(unique_ptr const& p) { return p.get(); } template unique_ptr static_pointer_cast(const unique_ptr& p) { return unique_ptr(std::static_pointer_cast(std::unique_ptr(p))); } template unique_ptr const_pointer_cast(const unique_ptr& p) { return unique_ptr(std::const_pointer_cast(std::unique_ptr(p))); } template unique_ptr dynamic_pointer_cast(const unique_ptr& p) { auto temp = std::dynamic_pointer_cast(std::unique_ptr(p)); if (!temp) throw std::bad_cast(); return unique_ptr(std::move(temp)); } template unique_ptr make_unique_ptr(std::unique_ptr&& ptr) { return unique_ptr(std::move(ptr)); } template unique_ptr make_unique() { return unique_ptr(new T()); } template unique_ptr make_unique(P0&& p0) { return unique_ptr(new T(std::forward(p0))); } template unique_ptr make_unique(P0&& p0, P1&& p1) { return unique_ptr(new T(std::forward(p0), std::forward(p1))); } template unique_ptr make_unique(P0&& p0, P1&& p1, P2&& p2) { return unique_ptr(new T(std::forward(p0), std::forward(p1), std::forward(p2))); } template unique_ptr make_unique(P0&& p0, P1&& p1, P2&& p2, P3&& p3) { return unique_ptr(new T(std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3))); } template unique_ptr make_unique(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4) { return unique_ptr(new T( std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3), std::forward(p4))); } template unique_ptr make_unique(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5) { return unique_ptr(new T(std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3), std::forward(p4), std::forward(p5))); } // shared_ptr /** * A wrapper around std::shared_ptr ensuring that it never points to a null * pointer except in the case of a moved from instance. * * A default constructed shared_ptr will point to a default constructed T. * * Use the make_shared overloads for perfect forwarding of the constructor * arguments of T which will return a shared_ptr pointing to the constructed T. */ template class shared_ptr { template friend class shared_ptr; public: using element_type = T; shared_ptr(); // will constrT2ct new T object T2sing make_shared() template shared_ptr(shared_ptr other, typename std::enable_if /*unused*/::value, void*>::type = 0) : p_(std::move(other.p_)) { } template explicit shared_ptr(std::unique_ptr&& p, typename std::enable_if /*unused*/::value, void*>::type = 0) : p_(std::move(p)) { if (!p_) throw std::invalid_argument("p"); } template explicit shared_ptr(spl::unique_ptr&& p, typename std::enable_if /*unused*/::value, void*>::type = 0) : p_(p.release(), p.get_deleter()) { if (!p_) throw std::invalid_argument("p"); } template explicit shared_ptr(std::shared_ptr p, typename std::enable_if /*unused*/::value, void*>::type = 0) : p_(std::move(p)) { if (!p_) throw std::invalid_argument("p"); } template explicit shared_ptr(T2* p, typename std::enable_if /*unused*/::value, void*>::type = 0) : p_(p) { if (!p_) throw std::invalid_argument("p"); } template explicit shared_ptr(T2* p, D d, typename std::enable_if /*unused*/::value, void*>::type = 0) : p_(p, d) { if (!p_) throw std::invalid_argument("p"); } shared_ptr operator=(shared_ptr other) { other.swap(*this); return *this; } T& operator*() const { return *p_.get(); } T* operator->() const { return p_.get(); } T* get() const { return p_.get(); } long use_count() const { return p_.use_count(); } void swap(shared_ptr& other) { p_.swap(other.p_); } template operator std::shared_ptr() const { return p_; } template operator std::weak_ptr() const { return std::weak_ptr(p_); } template bool owner_before(const shared_ptr& ptr) { return p_.owner_before(ptr.p_); } template bool owner_before(const std::shared_ptr& ptr) { return p_.owner_before(ptr); } private: std::shared_ptr p_; }; template D* get_deleter(shared_ptr const& ptr) { return ptr.get_deleter(); } template bool operator==(const shared_ptr& a, const shared_ptr& b) { return a.get() == b.get(); } template bool operator==(const std::shared_ptr& a, const shared_ptr& b) { return a.get() == b.get(); } template bool operator==(const shared_ptr& a, const std::shared_ptr& b) { return a.get() == b.get(); } template bool operator!=(const shared_ptr& a, const shared_ptr& b) { return a.get() != b.get(); } template bool operator!=(const std::shared_ptr& a, const shared_ptr& b) { return a.get() != b.get(); } template bool operator!=(const shared_ptr& a, const std::shared_ptr& b) { return a.get() != b.get(); } template bool operator<(const shared_ptr& a, const shared_ptr& b) { return a.get() < b.get(); } template bool operator<(const std::shared_ptr& a, const shared_ptr& b) { return a.get() < b.get(); } template bool operator<(const shared_ptr& a, const std::shared_ptr& b) { return a.get() < b.get(); } template bool operator>(const shared_ptr& a, const shared_ptr& b) { return a.get() > b.get(); } template bool operator>(const std::shared_ptr& a, const shared_ptr& b) { return a.get() > b.get(); } template bool operator>(const shared_ptr& a, const std::shared_ptr& b) { return a.get() > b.get(); } template bool operator>=(const shared_ptr& a, const shared_ptr& b) { return a.get() >= b.get(); } template bool operator>=(const std::shared_ptr& a, const shared_ptr& b) { return a.get() >= b.get(); } template bool operator>=(const shared_ptr& a, const std::shared_ptr& b) { return a.get() >= b.get(); } template bool operator<=(const shared_ptr& a, const shared_ptr& b) { return a.get() <= b.get(); } template bool operator<=(const std::shared_ptr& a, const shared_ptr& b) { return a.get() <= b.get(); } template bool operator<=(const shared_ptr& a, const std::shared_ptr& b) { return a.get() <= b.get(); } template std::basic_ostream& operator<<(std::basic_ostream& oT2t, const shared_ptr& p) { return oT2t << p.get(); } template void swap(shared_ptr& a, shared_ptr& b) { a.swap(b); } template T* get_pointer(shared_ptr const& p) { return p.get(); } template shared_ptr static_pointer_cast(const shared_ptr& p) { return shared_ptr(std::static_pointer_cast(std::shared_ptr(p))); } template shared_ptr const_pointer_cast(const shared_ptr& p) { return shared_ptr(std::const_pointer_cast(std::shared_ptr(p))); } template shared_ptr dynamic_pointer_cast(const shared_ptr& p) { auto temp = std::dynamic_pointer_cast(std::shared_ptr(p)); if (!temp) throw std::bad_cast(); return shared_ptr(std::move(temp)); } template bool instance_of(const shared_ptr& p) { auto temp = std::dynamic_pointer_cast(std::shared_ptr(p)); return temp != nullptr; } // // enable_safe_this // // A shared_ptr version of enable_shared_from_this. // So that an object may get shared_ptr objects to itself. // template class enable_shared_from_this : public std::enable_shared_from_this { public: shared_ptr shared_from_this() { return shared_ptr(std::enable_shared_from_this::shared_from_this()); } shared_ptr shared_from_this() const { return shared_ptr(std::enable_shared_from_this::shared_from_this()); } protected: enable_shared_from_this() {} enable_shared_from_this(const enable_shared_from_this& /*unused*/) {} enable_shared_from_this& operator=(const enable_shared_from_this& /*unused*/) { return *this; } ~enable_shared_from_this() {} }; // // make_shared // // shared_ptr eqT2ivalents to make_shared // template shared_ptr make_shared_ptr(std::unique_ptr&& ptr) { return shared_ptr(std::move(ptr)); } template shared_ptr make_shared_ptr(std::shared_ptr ptr) { return shared_ptr(std::move(ptr)); } template shared_ptr make_shared() { return shared_ptr(std::make_shared()); } template shared_ptr make_shared() { return shared_ptr(std::make_shared()); } template shared_ptr make_shared(P0&& p0) { return shared_ptr(std::make_shared(std::forward(p0))); } template shared_ptr make_shared(P0&& p0, P1&& p1) { return shared_ptr(std::make_shared(std::forward(p0), std::forward(p1))); } template shared_ptr make_shared(P0&& p0) { return shared_ptr(std::make_shared(std::forward(p0))); } template shared_ptr make_shared(P0&& p0, P1&& p1) { return shared_ptr(std::make_shared(std::forward(p0), std::forward(p1))); } template shared_ptr make_shared(P0&& p0, P1&& p1, P2&& p2) { return shared_ptr(std::make_shared(std::forward(p0), std::forward(p1), std::forward(p2))); } template shared_ptr make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3) { return shared_ptr( std::make_shared(std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3))); } template shared_ptr make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4) { return shared_ptr(std::make_shared( std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3), std::forward(p4))); } template shared_ptr make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5) { return shared_ptr(std::make_shared(std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3), std::forward(p4), std::forward(p5))); } template shared_ptr make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5, P6&& p6) { return shared_ptr(std::make_shared(std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3), std::forward(p4), std::forward(p5), std::forward(p6))); } template shared_ptr make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5, P6&& p6, P7&& p7) { return shared_ptr(std::make_shared(std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3), std::forward(p4), std::forward(p5), std::forward(p6), std::forward(p7))); } template shared_ptr make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5, P6&& p6, P7&& p7, P8&& p8) { return shared_ptr(std::make_shared(std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3), std::forward(p4), std::forward(p5), std::forward(p6), std::forward(p7), std::forward(p8))); } template shared_ptr make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5, P6&& p6, P7&& p7, P8&& p8, P9&& p9) { return shared_ptr(std::make_shared(std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3), std::forward(p4), std::forward(p5), std::forward(p6), std::forward(p7), std::forward(p8), std::forward(p9))); } template shared_ptr make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5, P6&& p6, P7&& p7, P8&& p8, P9&& p9, P10&& p10) { return shared_ptr(std::make_shared(std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3), std::forward(p4), std::forward(p5), std::forward(p6), std::forward(p7), std::forward(p8), std::forward(p9), std::forward(p10))); } template shared_ptr make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5, P6&& p6, P7&& p7, P8&& p8, P9&& p9, P10&& p10, P11&& p11) { return shared_ptr(std::make_shared(std::forward(p0), std::forward(p1), std::forward(p2), std::forward(p3), std::forward(p4), std::forward(p5), std::forward(p6), std::forward(p7), std::forward(p8), std::forward(p9), std::forward(p10), std::forward(p11))); } template shared_ptr::shared_ptr() : p_(make_shared()) { } }} // namespace caspar::spl ================================================ FILE: src/common/memshfl.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #ifdef USE_SIMDE #define SIMDE_ENABLE_NATIVE_ALIASES #include #else #ifdef _MSC_VER #include #include #else #include #endif #endif namespace caspar { #ifdef _MSC_VER static std::shared_ptr create_aligned_buffer(size_t size, size_t alignment = 64) { return std::shared_ptr(scalable_aligned_malloc(size, alignment), scalable_aligned_free); } #else static std::shared_ptr create_aligned_buffer(size_t size, size_t alignment = 64) { return std::shared_ptr(aligned_alloc(alignment, size), free); } #endif static void* aligned_memshfl(void* dest, const void* source, size_t count, int m1, int m2, int m3, int m4) { __m128i* dest128 = reinterpret_cast<__m128i*>(dest); const __m128i* source128 = reinterpret_cast(source); count /= 16; // 128 bit const __m128i mask128 = _mm_set_epi32(m1, m2, m3, m4); for (size_t n = 0; n < count / 4; ++n) { __m128i xmm0 = _mm_load_si128(source128++); __m128i xmm1 = _mm_load_si128(source128++); __m128i xmm2 = _mm_load_si128(source128++); __m128i xmm3 = _mm_load_si128(source128++); _mm_stream_si128(dest128++, _mm_shuffle_epi8(xmm0, mask128)); _mm_stream_si128(dest128++, _mm_shuffle_epi8(xmm1, mask128)); _mm_stream_si128(dest128++, _mm_shuffle_epi8(xmm2, mask128)); _mm_stream_si128(dest128++, _mm_shuffle_epi8(xmm3, mask128)); } return dest; } } // namespace caspar ================================================ FILE: src/common/os/filesystem.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #pragma once #include #include namespace caspar { std::optional find_case_insensitive(const std::wstring& case_insensitive); std::wstring clean_path(std::wstring path); std::wstring ensure_trailing_slash(std::wstring folder); } // namespace caspar ================================================ FILE: src/common/os/linux/filesystem.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #include "../../stdafx.h" #include "../filesystem.h" #include #include using namespace boost::filesystem; namespace caspar { std::optional find_case_insensitive(const std::wstring& case_insensitive) { path p(case_insensitive); if (exists(p)) return case_insensitive; p = absolute(p); path result; auto loc = std::locale(""); // Use system locale for (auto part : p) { auto concatenated = result / part; if (exists(concatenated)) { result = concatenated; } else { bool found = false; for (auto it = directory_iterator(absolute(result)); it != directory_iterator(); ++it) { auto leaf = it->path().filename(); if (boost::algorithm::iequals(part.wstring(), leaf.wstring(), loc)) { result = result / leaf; found = true; break; } } if (!found) return {}; } } return result.wstring(); } std::wstring clean_path(std::wstring path) { boost::replace_all(path, L"\\\\", L"/"); boost::replace_all(path, L"\\", L"/"); return path; } std::wstring ensure_trailing_slash(std::wstring folder) { auto last_char = folder.at(folder.length() - 1); if (last_char != L'/') folder.append(L"/"); return folder; } } // namespace caspar ================================================ FILE: src/common/os/linux/prec_timer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../../stdafx.h" #include "../../prec_timer.h" #include #include using namespace std::chrono; namespace caspar { prec_timer::prec_timer() : time_(0) { } void prec_timer::tick_nanos(int64_t ticks_to_wait) { auto t = duration_cast(high_resolution_clock::now().time_since_epoch()).count(); if (time_ != 0) { bool done = 0; do { auto ticks_passed = t - time_; auto ticks_left = ticks_to_wait - ticks_passed; if (t < time_) // time wrap done = 1; if (ticks_passed >= ticks_to_wait) done = 1; if (!done) { timespec spec; if (ticks_left > 2000000) { spec.tv_sec = ticks_left / 1000000000; spec.tv_nsec = 1000000; } else { spec.tv_sec = 0; spec.tv_nsec = ticks_left / 100; } nanosleep(&spec, nullptr); } t = duration_cast(high_resolution_clock::now().time_since_epoch()).count(); } while (!done); } time_ = t; } } // namespace caspar ================================================ FILE: src/common/os/linux/thread.cpp ================================================ #include "../thread.h" #include "../../utf.h" #include #include namespace caspar { void set_thread_name(const std::wstring& name) { pthread_setname_np(pthread_self(), u8(name).c_str()); } void set_thread_realtime_priority() { pthread_t handle = pthread_self(); int policy; struct sched_param param; if (pthread_getschedparam(handle, &policy, ¶m) != 0) return; param.sched_priority = 2; pthread_setschedparam(handle, SCHED_FIFO, ¶m); } } // namespace caspar ================================================ FILE: src/common/os/thread.h ================================================ #pragma once #include namespace caspar { void set_thread_name(const std::wstring& name); void set_thread_realtime_priority(); } // namespace caspar ================================================ FILE: src/common/os/windows/filesystem.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #include "../../stdafx.h" #include "../filesystem.h" #include #include namespace caspar { std::optional find_case_insensitive(const std::wstring& case_insensitive) { if (boost::filesystem::exists(case_insensitive)) return case_insensitive; return {}; } std::wstring clean_path(std::wstring path) { return path; } std::wstring ensure_trailing_slash(std::wstring folder) { auto last_char = folder.at(folder.length() - 1); if (last_char != L'/' && last_char != L'\\') folder.append(L"/"); return folder; } } // namespace caspar ================================================ FILE: src/common/os/windows/prec_timer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../../stdafx.h" #include "../../prec_timer.h" #include using namespace std::chrono; namespace caspar { prec_timer::prec_timer() : time_(0) { } void prec_timer::tick_nanos(int64_t ticks_to_wait) { auto t = duration_cast(high_resolution_clock::now().time_since_epoch()).count(); if (time_ != 0) { bool done = false; do { auto ticks_passed = t - time_; auto ticks_left = ticks_to_wait - ticks_passed; if (t < time_) // time wrap done = true; if (ticks_passed >= ticks_to_wait) done = true; if (!done) { // if > 0.002s left, do Sleep(1), which will actually sleep some // steady amount, probably 1-2 ms, // and do so in a nice way (cpu meter drops; laptop battery spared). // otherwise, do a few Sleep(0)'s, which just give up the timeslice, // but don't really save cpu or battery, but do pass a tiny // amount of time. if (ticks_left > 2000000) Sleep(1); else for (int i = 0; i < 10; ++i) Sleep(0); // causes thread to give up its timeslice } t = duration_cast(high_resolution_clock::now().time_since_epoch()).count(); } while (!done); } time_ = t; } } // namespace caspar ================================================ FILE: src/common/os/windows/thread.cpp ================================================ #include "../thread.h" #include #include #include "../../utf.h" namespace caspar { typedef struct tagTHREADNAME_INFO { DWORD dwType; // must be 0x1000 LPCSTR szName; // pointer to name (in user addr space) DWORD dwThreadID; // thread ID (-1=caller thread) DWORD dwFlags; // reserved for future use, must be zero } THREADNAME_INFO; inline void SetThreadName(DWORD dwThreadID, LPCSTR szThreadName) { THREADNAME_INFO info; { info.dwType = 0x1000; info.szName = szThreadName; info.dwThreadID = dwThreadID; info.dwFlags = 0; } __try { RaiseException(0x406D1388, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info); } __except (EXCEPTION_CONTINUE_EXECUTION) { } } void set_thread_name(const std::wstring& name) { SetThreadName(GetCurrentThreadId(), u8(name).c_str()); } void set_thread_realtime_priority() { SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_TIME_CRITICAL); } } // namespace caspar ================================================ FILE: src/common/os/windows/windows.h ================================================ #pragma once #undef _UNICODE #define _UNICODE #undef UNICODE #define UNICODE #undef NOMINMAX #define NOMINMAX #undef NOSERVICE #define NOSERVICE #undef NOMCX #define NOMCX #ifdef _MSC_VER #include #endif #ifndef _WIN32_WINNT #define _WIN32_WINNT _WIN32_WINNT_WIN7 #endif #undef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #include ================================================ FILE: src/common/param.h ================================================ #pragma once #include "except.h" #include #include #include #include namespace caspar { class param_comparer { const std::wstring& lhs; public: explicit param_comparer(const std::wstring& p) : lhs(p) { } bool operator()(const std::wstring& rhs) { return boost::iequals(lhs, rhs); } }; template bool contains_param(const std::wstring& name, C&& params) { return std::find_if(params.begin(), params.end(), param_comparer(name)) != params.end(); } template bool get_and_consume_flag(const std::wstring& flag_param, C& params) { auto flag_it = std::find_if(params.begin(), params.end(), param_comparer(flag_param)); bool flag = false; if (flag_it != params.end()) { flag = true; params.erase(flag_it); } return flag; } template void replace_placeholders(const std::wstring& placeholder, const std::wstring& replacement, C&& params) { for (auto& param : params) boost::ireplace_all(param, placeholder, replacement); } static std::pair protocol_split(const std::wstring& s) { size_t pos; if ((pos = s.find(L"://")) != std::wstring::npos) { return std::make_pair(s.substr(0, pos), s.substr(pos + 3)); } return std::make_pair(L"", s); } template typename std::enable_if::value, typename std::decay::type>::type get_param(const std::wstring& name, C&& params, T fail_value = T()) { auto it = std::find_if(std::begin(params), std::end(params), param_comparer(name)); if (it == params.end()) return fail_value; try { if (++it == params.end()) throw std::out_of_range(""); return boost::lexical_cast::type>(*it); } catch (...) { CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Failed to parse param " + name) << nested_exception(std::current_exception())); } } template std::wstring get_param(const std::wstring& name, C&& params, const std::wstring& fail_value = L"") { auto it = std::find_if(std::begin(params), std::end(params), param_comparer(name)); if (it == params.end()) return fail_value; try { if (++it == params.end()) throw std::out_of_range(""); return *it; } catch (...) { CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Failed to parse param " + name) << nested_exception(std::current_exception())); } } } // namespace caspar ================================================ FILE: src/common/prec_timer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include namespace caspar { class prec_timer { public: prec_timer(); void tick(double interval) { tick_nanos(static_cast(interval * 1000000000.0)); } void tick_millis(int64_t interval) { tick_nanos(interval * 1000000); } // Author: Ryan M. Geiss // http://www.geisswerks.com/ryan/FAQS/timing.html void tick_nanos(int64_t interval); private: int64_t time_; }; } // namespace caspar ================================================ FILE: src/common/ptree.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #pragma once #include "except.h" #include #include #include #include #include #include namespace caspar { struct ptree_exception : virtual user_error { }; static std::string to_xpath(std::string path) { path.insert(path.begin(), '/'); boost::replace_all(path, ".", "@"); boost::replace_all(path, ".", "/"); return path; } template T ptree_get(const Ptree& ptree, const typename Ptree::key_type& path) { try { return ptree.template get(path); } catch (boost::property_tree::ptree_bad_path&) { CASPAR_THROW_EXCEPTION(ptree_exception() << msg_info("No such element: " + to_xpath(u8(path)))); } catch (const boost::property_tree::ptree_bad_data& e) { CASPAR_THROW_EXCEPTION(ptree_exception() << msg_info(e.what())); } } template T ptree_get_value(const Ptree& ptree) { try { return ptree.template get_value(); } catch (const boost::property_tree::ptree_bad_data& e) { CASPAR_THROW_EXCEPTION(ptree_exception() << msg_info(e.what())); } } template const Ptree& ptree_get_child(const Ptree& ptree, const typename Ptree::key_type& path) { try { return ptree.get_child(path); } catch (boost::property_tree::ptree_bad_path&) { CASPAR_THROW_EXCEPTION(ptree_exception() << msg_info("No such element: " + to_xpath(u8(path)))); } } template class scope_aware_ptree_child_range { const Ptree& child_; using type = std::pair; public: class scoped_const_iterator : public boost::iterator_facade { typename Ptree::const_iterator wrapped_; public: explicit scoped_const_iterator(typename Ptree::const_iterator it) : wrapped_(std::move(it)) { } void increment() { ++wrapped_; } bool equal(const scoped_const_iterator& other) const { return wrapped_ == other.wrapped_; } const type& dereference() const { return *wrapped_; } }; using iterator = scoped_const_iterator; using const_iterator = scoped_const_iterator; scope_aware_ptree_child_range(const Ptree& parent, const typename Ptree::key_type& path) : child_(ptree_get_child(parent, path)) { } scoped_const_iterator begin() const { return scoped_const_iterator(child_.begin()); } scoped_const_iterator end() const { return scoped_const_iterator(child_.end()); } }; template struct iterate_children_tag { Key val; explicit iterate_children_tag(Key val_) : val(std::move(val_)) { } }; using witerate_children = iterate_children_tag; using iterate_children = iterate_children_tag; template scope_aware_ptree_child_range operator|(const Ptree& ptree, iterate_children_tag path) { return scope_aware_ptree_child_range(ptree, path.val); } template struct basic_scoped_element_translator { mutable std::map by_name; using result_type = const std::pair&; result_type operator()(result_type pair) const { return pair; } }; template struct element_context_iteration_tag { }; static element_context_iteration_tag welement_context_iteration; static element_context_iteration_tag element_context_iteration; template auto operator|(const Range& rng, element_context_iteration_tag tag) -> decltype(rng | boost::adaptors::transformed(basic_scoped_element_translator())) { return rng | boost::adaptors::transformed(basic_scoped_element_translator()); } template void ptree_verify_element_name(const std::pair& elem, const Str& expected) { if (elem.first != expected) CASPAR_THROW_EXCEPTION(ptree_exception() << msg_info("Expected element named " + u8(expected) + ". Was " + u8(elem.first))); } } // namespace caspar ================================================ FILE: src/common/scope_exit.h ================================================ #pragma once #include "except.h" namespace caspar { namespace detail { template class scope_exit { scope_exit(const scope_exit&); scope_exit& operator=(const scope_exit&); public: template explicit scope_exit(T2&& func) : func_(std::forward(func)) , valid_(true) { } scope_exit(scope_exit&& other) : func_(std::move(other.func_)) , valid_(std::move(other.valid_)) { other.valid_ = false; } scope_exit& operator=(scope_exit&& other) { func_ = std::move(other.func_); valid_ = std::move(other.valid_); other.valid_ = false; return *this; } ~scope_exit() { try { if (valid_) func_(); } catch (...) { if (!std::uncaught_exceptions()) #pragma warning(push) #pragma warning(disable : 4297) throw; #pragma warning(pop) else CASPAR_LOG_CURRENT_EXCEPTION(); } } private: T func_; bool valid_; }; class scope_exit_helper { }; template scope_exit::type> operator+(scope_exit_helper /*unused*/, T&& exitScope) { return scope_exit::type>(std::forward(exitScope)); } } // namespace detail #define _CASPAR_EXIT_SCOPE_LINENAME_CAT(name, line) name##line #define _CASPAR_EXIT_SCOPE_LINENAME(name, line) _CASPAR_EXIT_SCOPE_LINENAME_CAT(name, line) #define CASPAR_SCOPE_EXIT \ auto _CASPAR_EXIT_SCOPE_LINENAME(EXIT, __LINE__) = ::caspar::detail::scope_exit_helper() + [&]() mutable } // namespace caspar ================================================ FILE: src/common/stdafx.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "stdafx.h" ================================================ FILE: src/common/stdafx.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include #include // #include // #include // #include // #include // #include // #include // #include // #include #include #include #include #include #include #include #include // #include // #include // #include // #include // #include // #include // #include // #include // #include // #include // #include // #include // #include // #include // #include // #include // #include // #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ================================================ FILE: src/common/timer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #pragma once #include namespace caspar { class timer { std::int_least64_t start_time_; public: timer() { start_time_ = now(); } void restart() { start_time_ = now(); } double elapsed() const { return static_cast(now() - start_time_) / 1000.0; } private: static std::int_least64_t now() { using namespace std::chrono; return duration_cast(high_resolution_clock::now().time_since_epoch()).count(); } }; } // namespace caspar ================================================ FILE: src/common/tweener.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ // The following code is based on Tweener for actionscript, http://code.google.com/p/tweener/ // // Disclaimer for Robert Penner's Easing Equations license: // // TERMS OF USE - EASING EQUATIONS // // Open source under the BSD License. // // Copyright � 2001 Robert Penner // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // * Neither the name of the author nor the names of contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "tweener.h" #include "except.h" #include #include #include #include #include #include #include namespace caspar { using tweener_t = std::function; static const double PI = std::atan(1.0) * 4.0; static const double H_PI = std::atan(1.0) * 2.0; double ease_none(double t, double b, double c, double d, const std::vector& params) { return c * t / d + b; } double ease_in_quad(double t, double b, double c, double d, const std::vector& params) { t /= d; return c * t * t + b; } double ease_out_quad(double t, double b, double c, double d, const std::vector& params) { t /= d; return -c * t * (t - 2) + b; } double ease_in_out_quad(double t, double b, double c, double d, const std::vector& params) { t /= d / 2; if (t < 1) return c / 2 * t * t + b; return -c / 2 * ((t - 1) * (t - 3) - 1) + b; } double ease_out_in_quad(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_out_quad(t * 2, b, c / 2, d, params); return ease_in_quad(t * 2 - d, b + c / 2, c / 2, d, params); } double ease_in_cubic(double t, double b, double c, double d, const std::vector& params) { t /= d; return c * t * t * t + b; } double ease_out_cubic(double t, double b, double c, double d, const std::vector& params) { t = t / d - 1; return c * (t * t * t + 1) + b; } double ease_in_out_cubic(double t, double b, double c, double d, const std::vector& params) { t /= d / 2; if (t < 1) return c / 2 * t * t * t + b; t -= 2; return c / 2 * (t * t * t + 2) + b; } double ease_out_in_cubic(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_out_cubic(t * 2, b, c / 2, d, params); return ease_in_cubic(t * 2 - d, b + c / 2, c / 2, d, params); } double ease_in_quart(double t, double b, double c, double d, const std::vector& params) { t /= d; return c * t * t * t * t + b; } double ease_out_quart(double t, double b, double c, double d, const std::vector& params) { t = t / d - 1; return -c * (t * t * t * t - 1) + b; } double ease_in_out_quart(double t, double b, double c, double d, const std::vector& params) { t /= d / 2; if (t < 1) return c / 2 * t * t * t * t + b; t -= 2; return -c / 2 * (t * t * t * t - 2) + b; } double ease_out_in_quart(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_out_quart(t * 2, b, c / 2, d, params); return ease_in_quart(t * 2 - d, b + c / 2, c / 2, d, params); } double ease_in_quint(double t, double b, double c, double d, const std::vector& params) { t /= d; return c * t * t * t * t * t + b; } double ease_out_quint(double t, double b, double c, double d, const std::vector& params) { t = t / d - 1; return c * (t * t * t * t * t + 1) + b; } double ease_in_out_quint(double t, double b, double c, double d, const std::vector& params) { t /= d / 2; if (t < 1) return c / 2 * t * t * t * t * t + b; t -= 2; return c / 2 * (t * t * t * t * t + 2) + b; } double ease_out_in_quint(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_out_quint(t * 2, b, c / 2, d, params); return ease_in_quint(t * 2 - d, b + c / 2, c / 2, d, params); } double ease_in_sine(double t, double b, double c, double d, const std::vector& params) { return -c * std::cos(t / d * (PI / 2)) + c + b; } double ease_out_sine(double t, double b, double c, double d, const std::vector& params) { return c * std::sin(t / d * (PI / 2)) + b; } double ease_in_out_sine(double t, double b, double c, double d, const std::vector& params) { return -c / 2 * (std::cos(PI * t / d) - 1) + b; } double ease_out_in_sine(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_out_sine(t * 2, b, c / 2, d, params); return ease_in_sine(t * 2 - d, b + c / 2, c / 2, d, params); } double ease_in_expo(double t, double b, double c, double d, const std::vector& params) { return t == 0 ? b : c * std::pow(2, 10 * (t / d - 1)) + b - c * 0.001; } double ease_out_expo(double t, double b, double c, double d, const std::vector& params) { return t == d ? b + c : c * 1.001 * (-std::pow(2, -10 * t / d) + 1) + b; } double ease_in_out_expo(double t, double b, double c, double d, const std::vector& params) { if (t == 0) return b; if (t == d) return b + c; t /= d / 2; if (t < 1) return c / 2 * std::pow(2, 10 * (t - 1)) + b - c * 0.0005; return c / 2 * 1.0005 * (-std::pow(2, -10 * (t - 1)) + 2) + b; } double ease_out_in_expo(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_out_expo(t * 2, b, c / 2, d, params); return ease_in_expo(t * 2 - d, b + c / 2, c / 2, d, params); } double ease_in_circ(double t, double b, double c, double d, const std::vector& params) { t /= d; return -c * (std::sqrt(1 - t * t) - 1) + b; } double ease_out_circ(double t, double b, double c, double d, const std::vector& params) { t = t / d - 1; return c * std::sqrt(1 - t * t) + b; } double ease_in_out_circ(double t, double b, double c, double d, const std::vector& params) { t /= d / 2; if (t < 1) return -c / 2 * (std::sqrt(1 - t * t) - 1) + b; t -= 2; return c / 2 * (std::sqrt(1 - t * t) + 1) + b; } double ease_out_in_circ(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_out_circ(t * 2, b, c / 2, d, params); return ease_in_circ(t * 2 - d, b + c / 2, c / 2, d, params); } double ease_in_elastic(double t, double b, double c, double d, const std::vector& params) { if (t == 0) return b; t /= d; if (t == 1) return b + c; // var p:Number = !Boolean(p_params) || isNaN(p_params.period) ? d*.3 : p_params.period; // var s:Number; // var a:Number = !Boolean(p_params) || isNaN(p_params.amplitude) ? 0 : p_params.amplitude; double p = !params.empty() ? params[0] : d * 0.3; double s; double a = params.size() > 1 ? params[1] : 0.0; if (a == 0.0 || a < std::abs(c)) { a = c; s = p / 4; } else s = p / (2 * PI) * std::asin(c / a); t--; return -(a * std::pow(2, 10 * t) * std::sin((t * d - s) * (2 * PI) / p)) + b; } double ease_out_elastic(double t, double b, double c, double d, const std::vector& params) { if (t == 0) return b; t /= d; if (t == 1) return b + c; // var p:Number = !Boolean(p_params) || isNaN(p_params.period) ? d*.3 : p_params.period; // var s:Number; // var a:Number = !Boolean(p_params) || isNaN(p_params.amplitude) ? 0 : p_params.amplitude; double p = !params.empty() ? params[0] : d * 0.3; double s; double a = params.size() > 1 ? params[1] : 0.0; if (a == 0.0 || a < std::abs(c)) { a = c; s = p / 4; } else s = p / (2 * PI) * std::asin(c / a); return a * std::pow(2, -10 * t) * std::sin((t * d - s) * (2 * PI) / p) + c + b; } double ease_in_out_elastic(double t, double b, double c, double d, const std::vector& params) { if (t == 0) return b; t /= d / 2; if (t == 2) return b + c; // var p:Number = !Boolean(p_params) || isNaN(p_params.period) ? d*(.3*1.5) : p_params.period; // var s:Number; // var a:Number = !Boolean(p_params) || isNaN(p_params.amplitude) ? 0 : p_params.amplitude; double p = !params.empty() ? params[0] : d * 0.3 * 1.5; double s; double a = params.size() > 1 ? params[1] : 0.0; if (a == 0.0 || a < std::abs(c)) { a = c; s = p / 4; } else s = p / (2 * PI) * std::asin(c / a); if (t-- < 1) { return -.5 * (a * std::pow(2, 10 * t) * std::sin((t * d - s) * (2 * PI) / p)) + b; } return a * std::pow(2, -10 * t) * std::sin((t * d - s) * (2 * PI) / p) * .5 + c + b; } double ease_out_in_elastic(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_out_elastic(t * 2, b, c / 2, d, params); return ease_in_elastic(t * 2 - d, b + c / 2, c / 2, d, params); } double ease_in_back(double t, double b, double c, double d, const std::vector& params) { // var s:Number = !Boolean(p_params) || isNaN(p_params.overshoot) ? 1.70158 : p_params.overshoot; double s = !params.empty() ? params[0] : 1.70158; t /= d; return c * t * t * ((s + 1) * t - s) + b; } double ease_out_back(double t, double b, double c, double d, const std::vector& params) { // var s:Number = !Boolean(p_params) || isNaN(p_params.overshoot) ? 1.70158 : p_params.overshoot; double s = !params.empty() ? params[0] : 1.70158; t = t / d - 1; return c * (t * t * ((s + 1) * t + s) + 1) + b; } double ease_in_out_back(double t, double b, double c, double d, const std::vector& params) { // var s:Number = !Boolean(p_params) || isNaN(p_params.overshoot) ? 1.70158 : p_params.overshoot; double s = !params.empty() ? params[0] : 1.70158; t /= d / 2; s *= 1.525; if (t < 1) return c / 2 * (t * t * ((s + 1) * t - s)) + b; t -= 2; return c / 2 * (t * t * ((s + 1) * t + s) + 2) + b; } double ease_out_int_back(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_out_back(t * 2, b, c / 2, d, params); return ease_in_back(t * 2 - d, b + c / 2, c / 2, d, params); } double ease_out_bounce(double t, double b, double c, double d, const std::vector& params) { t /= d; if (t < 1 / 2.75) return c * (7.5625 * t * t) + b; if (t < 2 / 2.75) { t -= 1.5 / 2.75; return c * (7.5625 * t * t + .75) + b; } if (t < 2.5 / 2.75) { t -= 2.25 / 2.75; return c * (7.5625 * t * t + .9375) + b; } t -= 2.625 / 2.75; return c * (7.5625 * t * t + .984375) + b; } double ease_in_bounce(double t, double b, double c, double d, const std::vector& params) { return c - ease_out_bounce(d - t, 0, c, d, params) + b; } double ease_in_out_bounce(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_in_bounce(t * 2, 0, c, d, params) * .5 + b; return ease_out_bounce(t * 2 - d, 0, c, d, params) * .5 + c * .5 + b; } double ease_out_in_bounce(double t, double b, double c, double d, const std::vector& params) { if (t < d / 2) return ease_out_bounce(t * 2, b, c / 2, d, params); return ease_in_bounce(t * 2 - d, b + c / 2, c / 2, d, params); } using tween_t = std::function&)>; const std::unordered_map& get_tweens() { static const std::unordered_map tweens = {{L"", ease_none}, {L"linear", ease_none}, {L"easenone", ease_none}, {L"easeinquad", ease_in_quad}, {L"easeoutquad", ease_out_quad}, {L"easeinoutquad", ease_in_out_quad}, {L"easeoutinquad", ease_out_in_quad}, {L"easeincubic", ease_in_cubic}, {L"easeoutcubic", ease_out_cubic}, {L"easeinoutcubic", ease_in_out_cubic}, {L"easeoutincubic", ease_out_in_cubic}, {L"easeinquart", ease_in_quart}, {L"easeoutquart", ease_out_quart}, {L"easeinoutquart", ease_in_out_quart}, {L"easeoutinquart", ease_out_in_quart}, {L"easeinquint", ease_in_quint}, {L"easeoutquint", ease_out_quint}, {L"easeinoutquint", ease_in_out_quint}, {L"easeoutinquint", ease_out_in_quint}, {L"easeinsine", ease_in_sine}, {L"easeoutsine", ease_out_sine}, {L"easeinoutsine", ease_in_out_sine}, {L"easeoutinsine", ease_out_in_sine}, {L"easeinexpo", ease_in_expo}, {L"easeoutexpo", ease_out_expo}, {L"easeinoutexpo", ease_in_out_expo}, {L"easeoutinexpo", ease_out_in_expo}, {L"easeincirc", ease_in_circ}, {L"easeoutcirc", ease_out_circ}, {L"easeinoutcirc", ease_in_out_circ}, {L"easeoutincirc", ease_out_in_circ}, {L"easeinelastic", ease_in_elastic}, {L"easeoutelastic", ease_out_elastic}, {L"easeinoutelastic", ease_in_out_elastic}, {L"easeoutinelastic", ease_out_in_elastic}, {L"easeinback", ease_in_back}, {L"easeoutback", ease_out_back}, {L"easeinoutback", ease_in_out_back}, {L"easeoutintback", ease_out_int_back}, {L"easeoutbounce", ease_out_bounce}, {L"easeinbounce", ease_in_bounce}, {L"easeinoutbounce", ease_in_out_bounce}, {L"easeoutinbounce", ease_out_in_bounce}}; return tweens; } tweener_t get_tweener(std::wstring name) { std::transform(name.begin(), name.end(), name.begin(), std::towlower); if (name == L"linear") return [](double t, double b, double c, double d) { return ease_none(t, b, c, d, std::vector()); }; std::vector params; static const boost::wregex expr( LR"((?\w*)(:(?\d+\.?\d?))?(:(?\d+\.?\d?))?)"); // boost::regex has no repeated captures? boost::wsmatch what; if (boost::regex_match(name, what, expr)) { name = what["NAME"].str(); if (what["V0"].matched) params.push_back(std::stod(what["V0"].str())); if (what["V1"].matched) params.push_back(std::stod(what["V1"].str())); } auto it = get_tweens().find(name); if (it == get_tweens().end()) CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Could not find tween " + name)); auto tween = it->second; return [=](double t, double b, double c, double d) { return tween(t, b, c, d, params); }; }; tweener::tweener(const std::wstring& name) : func_(get_tweener(name)) , name_(name) { } double tweener::operator()(double t, double b, double c, double d) const { return func_(t, b, c, d); } bool tweener::operator==(const tweener& other) const { return name_ == other.name_; } bool tweener::operator!=(const tweener& other) const { return !(*this == other); } const std::vector& tweener::names() { static auto result = [] { std::vector tweens; for (auto& tween : get_tweens()) tweens.push_back(tween.first); return tweens; }(); return result; } } // namespace caspar ================================================ FILE: src/common/tweener.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include namespace caspar { /** * A tweener can be used for creating any kind of (image position, image fade * in/out, audio volume etc) transition, by invoking it for each temporal * timepoint when a tweened value is needed. * * For audio the smoothest transitions will be generated by using the samplerate * as temporal resolution, but using the video frame/field rate is probably fine * most of the times and much less time consuming. */ class tweener { public: /** * Constructor. * * @param name The name of the tween function to use. */ tweener(const std::wstring& name = L"linear"); /** * @return The possible tween function names. Some of them may also support * additional parameters appended to the name. */ static const std::vector& names(); /** * Calculate a tweened value given a timepoint within the total duration * and the starting value and the destination delta value. * * Usually b, c and d remains constant during a transition, while t changes * for each temporal tweened value. * * @param t The timepoint within the total duration (0 <= n <= d). * @param b The starting value. * @param c The destination value delta from the starting value * (absolute destination value - b). * @param d The total duration (when t = d, the destination value should * have been reached). * * @return The tweened value for the given timepoint. Can sometimes be less * than b or greater than b + c for some tweener functions. */ double operator()(double t, double b, double c, double d) const; bool operator==(const tweener& other) const; bool operator!=(const tweener& other) const; private: std::function func_; std::wstring name_; }; } // namespace caspar ================================================ FILE: src/common/utf.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "utf.h" #include namespace caspar { std::wstring u16(const std::string& str) { return boost::locale::conv::utf_to_utf(str); // std::wstring(str.begin(), str.end()); } std::wstring u16(const std::wstring& str) { return str; } std::string u8(const std::wstring& str) { return boost::locale::conv::utf_to_utf(str); // std::string(str.begin(), str.end()); } std::string u8(const std::string& str) { return str; } } // namespace caspar ================================================ FILE: src/common/utf.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include namespace caspar { std::wstring u16(const std::string& str); std::wstring u16(const std::wstring& str); std::string u8(const std::wstring& str); std::string u8(const std::string& str); } // namespace caspar ================================================ FILE: src/core/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 3.28) project (core) set(SOURCES consumer/frame_consumer.cpp consumer/frame_consumer_registry.cpp consumer/output.cpp diagnostics/call_context.cpp diagnostics/osd_graph.cpp frame/draw_frame.cpp frame/frame.cpp frame/frame_transform.cpp frame/geometry.cpp mixer/audio/audio_mixer.cpp mixer/image/blend_modes.cpp mixer/mixer.cpp producer/color/color_producer.cpp producer/separated/separated_producer.cpp producer/transition/transition_producer.cpp producer/transition/sting_producer.cpp producer/route/route_producer.cpp producer/cg_proxy.cpp producer/frame_producer.cpp producer/frame_producer_registry.cpp producer/layer.cpp producer/stage.cpp video_channel.cpp video_format.cpp ) set(HEADERS consumer/frame_consumer.h consumer/frame_consumer_registry.h consumer/output.h diagnostics/call_context.h diagnostics/osd_graph.h frame/draw_frame.h frame/frame.h frame/frame_factory.h frame/frame_transform.h frame/frame_visitor.h frame/geometry.h frame/pixel_format.h mixer/audio/audio_mixer.h mixer/image/blend_modes.h mixer/image/image_mixer.h mixer/mixer.h monitor/monitor.h producer/color/color_producer.h producer/separated/separated_producer.h producer/transition/transition_producer.h producer/transition/sting_producer.h producer/route/route_producer.h producer/cg_proxy.h producer/frame_producer.h producer/frame_producer_registry.h producer/layer.h producer/stage.h fwd.h module_dependencies.h StdAfx.h video_channel.h video_format.h consumer/channel_info.h ) casparcg_add_library(core SOURCES ${SOURCES} ${HEADERS}) target_include_directories(core PRIVATE ..) target_precompile_headers(core PRIVATE StdAfx.h) target_link_libraries(core PRIVATE common GLEW::glew sfml-graphics) source_group(sources ./*) source_group(sources\\consumer consumer/*) source_group(sources\\diagnostics diagnostics/*) source_group(sources\\producer producer/*) source_group(sources\\monitor monitor/*) source_group(sources\\frame frame/*) source_group(sources\\mixer mixer/*) source_group(sources\\mixer\\audio mixer/audio/*) source_group(sources\\mixer\\image mixer/image/*) source_group(sources\\producer\\color producer/color/*) source_group(sources\\producer\\route producer/route/*) source_group(sources\\producer\\transition producer/transition/*) source_group(sources\\producer\\separated producer/separated/*) ================================================ FILE: src/core/StdAfx.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ================================================ FILE: src/core/consumer/channel_info.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Julian Waller, julian@superfly.tv */ #pragma once #include "../frame/pixel_format.h" #include "common/bit_depth.h" namespace caspar::core { struct channel_info { channel_info(int channel_index, common::bit_depth depth, color_space color_space) : index(channel_index) , depth(depth) , default_color_space(color_space) { } int index; common::bit_depth depth; color_space default_color_space; }; } // namespace caspar::core ================================================ FILE: src/core/consumer/frame_consumer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../StdAfx.h" #include "frame_consumer.h" #include #include #include #include namespace caspar { namespace core { const spl::shared_ptr& frame_consumer::empty() { class empty_frame_consumer : public frame_consumer { public: std::future send(const core::video_field field, const_frame) override { return make_ready_future(false); } void initialize(const video_format_desc&, const core::channel_info&, int port_index) override {} std::wstring print() const override { return L"empty"; } std::wstring name() const override { return L"empty"; } bool has_synchronization_clock() const override { return false; } int index() const override { return -1; } core::monitor::state state() const override { static const monitor::state empty; return empty; } }; static spl::shared_ptr consumer = spl::make_shared(); return consumer; } }} // namespace caspar::core ================================================ FILE: src/core/consumer/frame_consumer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "../fwd.h" #include "../monitor/monitor.h" #include #include #include #include #include #include #include #include #include namespace caspar { namespace core { class frame_consumer { frame_consumer(const frame_consumer&); frame_consumer& operator=(const frame_consumer&); public: static const spl::shared_ptr& empty(); frame_consumer() {} virtual ~frame_consumer() {} virtual std::future send(const core::video_field field, const_frame frame) = 0; virtual void initialize(const video_format_desc& format_desc, const core::channel_info& channel_info, int port_index) = 0; virtual std::future call(const std::vector& params) { return caspar::make_ready_future(false); } virtual core::monitor::state state() const = 0; virtual std::wstring print() const = 0; virtual std::wstring name() const = 0; virtual bool has_synchronization_clock() const { return false; } virtual int index() const = 0; }; }} // namespace caspar::core ================================================ FILE: src/core/consumer/frame_consumer_registry.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../StdAfx.h" #include "frame_consumer.h" #include "frame_consumer_registry.h" #include #include #include #include #include #include namespace caspar { namespace core { std::atomic& destroy_consumers_in_separate_thread() { static std::atomic state; return state; } void destroy_consumers_synchronously() { destroy_consumers_in_separate_thread() = false; } class destroy_consumer_proxy : public frame_consumer { std::shared_ptr consumer_; public: destroy_consumer_proxy(spl::shared_ptr&& consumer) : consumer_(std::move(consumer)) { destroy_consumers_in_separate_thread() = true; } ~destroy_consumer_proxy() { static std::atomic counter; static std::once_flag counter_init_once; std::call_once(counter_init_once, [] { counter = 0; }); if (!destroy_consumers_in_separate_thread()) return; ++counter; CASPAR_VERIFY(counter < 8); auto consumer = new std::shared_ptr(std::move(consumer_)); std::thread([=] { std::unique_ptr> pointer_guard(consumer); auto str = (*consumer)->print(); try { if (consumer->use_count() != 1) CASPAR_LOG(debug) << str << L" Not destroyed on asynchronous destruction thread: " << consumer->use_count(); else CASPAR_LOG(debug) << str << L" Destroying on asynchronous destruction thread."; } catch (...) { } pointer_guard.reset(); counter--; }).detach(); } std::future send(const core::video_field field, const_frame frame) override { return consumer_->send(field, std::move(frame)); } void initialize(const video_format_desc& format_desc, const core::channel_info& channel_info, int port_index) override { return consumer_->initialize(format_desc, channel_info, port_index); } std::future call(const std::vector& params) override { return consumer_->call(params); } std::wstring print() const override { return consumer_->print(); } std::wstring name() const override { return consumer_->name(); } bool has_synchronization_clock() const override { return consumer_->has_synchronization_clock(); } int index() const override { return consumer_->index(); } core::monitor::state state() const override { return consumer_->state(); } }; class print_consumer_proxy : public frame_consumer { std::shared_ptr consumer_; public: print_consumer_proxy(spl::shared_ptr&& consumer) : consumer_(std::move(consumer)) { } ~print_consumer_proxy() { auto str = consumer_->print(); CASPAR_LOG(debug) << str << L" Uninitializing."; consumer_.reset(); CASPAR_LOG(info) << str << L" Uninitialized."; } std::future send(const core::video_field field, const_frame frame) override { return consumer_->send(field, std::move(frame)); } void initialize(const video_format_desc& format_desc, const core::channel_info& channel_info, int port_index) override { consumer_->initialize(format_desc, channel_info, port_index); CASPAR_LOG(info) << consumer_->print() << L" Initialized."; } std::future call(const std::vector& params) override { return consumer_->call(params); } std::wstring print() const override { return consumer_->print(); } std::wstring name() const override { return consumer_->name(); } bool has_synchronization_clock() const override { return consumer_->has_synchronization_clock(); } int index() const override { return consumer_->index(); } core::monitor::state state() const override { return consumer_->state(); } }; frame_consumer_registry::frame_consumer_registry() {} void frame_consumer_registry::register_consumer_factory(const std::wstring& name, const consumer_factory_t& factory) { consumer_factories_.push_back(factory); } void frame_consumer_registry::register_preconfigured_consumer_factory(const std::wstring& element_name, const preconfigured_consumer_factory_t& factory) { preconfigured_consumer_factories_.insert(std::make_pair(element_name, factory)); } spl::shared_ptr frame_consumer_registry::create_consumer(const std::vector& params, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info) const { if (params.empty()) CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info("params cannot be empty")); auto consumer = frame_consumer::empty(); auto& consumer_factories = consumer_factories_; if (!std::any_of( consumer_factories.begin(), consumer_factories.end(), [&](const consumer_factory_t& factory) -> bool { try { consumer = factory(params, format_repository, channels, channel_info); } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } return consumer != frame_consumer::empty(); })) { CASPAR_THROW_EXCEPTION(file_not_found() << msg_info("No match found for supplied commands. Check syntax.")); } return spl::make_shared(spl::make_shared(std::move(consumer))); } spl::shared_ptr frame_consumer_registry::create_consumer(const std::wstring& element_name, const boost::property_tree::wptree& element, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info) const { auto& preconfigured_consumer_factories = preconfigured_consumer_factories_; auto found = preconfigured_consumer_factories.find(element_name); if (found == preconfigured_consumer_factories.end()) CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"No consumer factory registered for element name " + element_name)); return spl::make_shared( spl::make_shared(found->second(element, format_repository, channels, channel_info))); } }} // namespace caspar::core ================================================ FILE: src/core/consumer/frame_consumer_registry.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "../fwd.h" #include "../monitor/monitor.h" #include "channel_info.h" #include #include #include #include #include #include #include #include #include namespace caspar { namespace core { void destroy_consumers_synchronously(); using consumer_factory_t = std::function(const std::vector& params, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info)>; using preconfigured_consumer_factory_t = std::function(const boost::property_tree::wptree& element, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info)>; class frame_consumer_registry { public: frame_consumer_registry(); void register_consumer_factory(const std::wstring& name, const consumer_factory_t& factory); void register_preconfigured_consumer_factory(const std::wstring& element_name, const preconfigured_consumer_factory_t& factory); spl::shared_ptr create_consumer(const std::vector& params, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info) const; spl::shared_ptr create_consumer(const std::wstring& element_name, const boost::property_tree::wptree& element, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info) const; private: std::vector consumer_factories_; std::map preconfigured_consumer_factories_; frame_consumer_registry(const frame_consumer_registry&) = delete; frame_consumer_registry& operator=(const frame_consumer_registry&) = delete; }; }} // namespace caspar::core ================================================ FILE: src/core/consumer/output.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "output.h" #include "channel_info.h" #include "frame_consumer.h" #include "../frame/frame.h" #include "../frame/pixel_format.h" #include #include #include #include #include #include #include #include #include namespace caspar { namespace core { using time_point_t = decltype(std::chrono::high_resolution_clock::now()); struct output::impl { monitor::state state_; spl::shared_ptr graph_; const channel_info channel_info_; video_format_desc format_desc_; std::mutex consumers_mutex_; std::map> consumers_; std::optional time_; public: impl(const spl::shared_ptr& graph, const video_format_desc& format_desc, const core::channel_info& channel_info) : graph_(graph) , channel_info_(channel_info) , format_desc_(format_desc) { } void add(int index, spl::shared_ptr consumer) { remove(index); consumer->initialize(format_desc_, channel_info_, index); std::lock_guard lock(consumers_mutex_); consumers_.emplace(index, std::move(consumer)); } void add(const spl::shared_ptr& consumer) { add(consumer->index(), consumer); } bool remove(int index) { std::lock_guard lock(consumers_mutex_); auto count = consumers_.erase(index); return count > 0; } bool remove(const spl::shared_ptr& consumer) { return remove(consumer->index()); } std::future call(int index, const std::vector& params) { std::lock_guard lock(consumers_mutex_); auto it = consumers_.find(index); if (it != consumers_.end()) { try { return it->second->call(params); } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } } else { CASPAR_LOG(warning) << print() << L" No consumer found for index " << index << L"."; } return caspar::make_ready_future(false); } size_t consumer_count() { std::lock_guard lock(consumers_mutex_); return consumers_.size(); } void operator()(const const_frame& input_frame1, const const_frame& input_frame2, const core::video_format_desc& format_desc) { auto time = std::move(time_); if (format_desc_ != format_desc) { std::lock_guard lock(consumers_mutex_); for (auto it = consumers_.begin(); it != consumers_.end();) { try { it->second->initialize(format_desc, channel_info_, it->first); ++it; } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); it = consumers_.erase(it); } } format_desc_ = format_desc; time_ = {}; return; } // If no frame is provided, this should only happen when the channel has no consumers. // Take a shortcut and perform the sleep to let the channel tick correctly. if (!input_frame1) { if (!time) { time = std::chrono::high_resolution_clock::now(); } else { std::this_thread::sleep_until(*time); } time_ = *time + std::chrono::microseconds(static_cast(1e6 / format_desc_.hz)); return; } const auto bytesPerComponent1 = input_frame1.pixel_format_desc().planes.at(0).depth == common::bit_depth::bit8 ? 1 : 2; if (input_frame1.size() != format_desc_.size * bytesPerComponent1) { CASPAR_LOG(warning) << print() << L" Invalid input frame size."; return; } if (input_frame2) { const auto bytesPerComponent2 = input_frame2.pixel_format_desc().planes.at(0).depth == common::bit_depth::bit8 ? 1 : 2; if (input_frame2.size() != format_desc_.size * bytesPerComponent2) { CASPAR_LOG(warning) << print() << L" Invalid input frame size."; return; } } decltype(consumers_) consumers; { std::lock_guard lock(consumers_mutex_); consumers = consumers_; } auto do_send = [this, &consumers](core::video_field field, const core::const_frame& frame) { std::map> futures; for (auto it = consumers.begin(); it != consumers.end();) { try { futures.emplace(it->first, it->second->send(field, frame)); ++it; } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); auto index = it->first; it = consumers.erase(it); std::lock_guard lock(consumers_mutex_); consumers_.erase(index); } } for (auto& p : futures) { try { if (!p.second.get()) { consumers.erase(p.first); std::lock_guard lock(consumers_mutex_); consumers_.erase(p.first); } } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); consumers.erase(p.first); std::lock_guard lock(consumers_mutex_); consumers_.erase(p.first); } } }; if (format_desc_.field_count == 2) { do_send(core::video_field::a, input_frame1); do_send(core::video_field::b, input_frame2); } else { do_send(core::video_field::progressive, input_frame1); } monitor::state state; for (auto& p : consumers) { state["port"][p.first] = p.second->state(); state["port"][p.first]["consumer"] = p.second->name(); } state_ = std::move(state); const auto needs_sync = std::all_of( consumers.begin(), consumers.end(), [](auto& p) { return !p.second->has_synchronization_clock(); }); if (needs_sync) { if (!time) { time = std::chrono::high_resolution_clock::now(); } else { std::this_thread::sleep_until(*time); } time_ = *time + std::chrono::microseconds(static_cast(1e6 / format_desc_.hz)); } else { time_.reset(); } } std::wstring print() const { return L"output[" + std::to_wstring(channel_info_.index) + L"]"; } }; output::output(const spl::shared_ptr& graph, const video_format_desc& format_desc, const core::channel_info& channel_info) : impl_(new impl(graph, format_desc, channel_info)) { } output::~output() {} void output::add(int index, const spl::shared_ptr& consumer) { impl_->add(index, consumer); } void output::add(const spl::shared_ptr& consumer) { impl_->add(consumer); } bool output::remove(int index) { return impl_->remove(index); } bool output::remove(const spl::shared_ptr& consumer) { return impl_->remove(consumer); } std::future output::call(int index, const std::vector& params) { return impl_->call(index, params); } size_t output::consumer_count() const { return impl_->consumer_count(); } void output::operator()(const const_frame& frame, const const_frame& frame2, const video_format_desc& format_desc) { return (*impl_)(frame, frame2, format_desc); } core::monitor::state output::state() const { return impl_->state_; } }} // namespace caspar::core ================================================ FILE: src/core/consumer/output.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "../fwd.h" #include "../monitor/monitor.h" #include #include #include namespace caspar::diagnostics { class graph; } namespace caspar { namespace core { class output final { public: explicit output(const spl::shared_ptr& graph, const video_format_desc& format_desc, const core::channel_info& channel_info); output(const output&) = delete; output& operator=(const output&) = delete; ~output(); // Send a frame to the output. If running an interlaced channel, two frames will be provided void operator()(const const_frame& frame, const const_frame& frame2, const video_format_desc& format_desc); void add(const spl::shared_ptr& consumer); void add(int index, const spl::shared_ptr& consumer); bool remove(const spl::shared_ptr& consumer); bool remove(int index); std::future call(int index, const std::vector& params); size_t consumer_count() const; core::monitor::state state() const; private: struct impl; std::unique_ptr impl_; }; }} // namespace caspar::core ================================================ FILE: src/core/diagnostics/call_context.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #include "../StdAfx.h" #include "call_context.h" namespace caspar { namespace core { namespace diagnostics { thread_local call_context context; call_context& call_context::for_thread() { return context; } std::wstring call_context::to_string() const { if (video_channel == -1) return L"[]"; if (layer == -1) return L"[ch=" + std::to_wstring(video_channel) + L"]"; return L"[ch=" + std::to_wstring(video_channel) + L"; layer=" + std::to_wstring(layer) + L"]"; } }}} // namespace caspar::core::diagnostics ================================================ FILE: src/core/diagnostics/call_context.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #pragma once #include namespace caspar { namespace core { namespace diagnostics { struct call_context { int video_channel = -1; int layer = -1; static call_context& for_thread(); std::wstring to_string() const; }; class scoped_call_context { call_context saved_; scoped_call_context(const scoped_call_context&) = delete; scoped_call_context& operator=(const scoped_call_context&) = delete; public: scoped_call_context() { saved_ = call_context::for_thread(); } ~scoped_call_context() { call_context::for_thread() = saved_; } }; }}} // namespace caspar::core::diagnostics ================================================ FILE: src/core/diagnostics/osd_graph.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../StdAfx.h" #include "osd_graph.h" #include "call_context.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace fs = std::filesystem; namespace caspar { namespace core { namespace diagnostics { namespace osd { #if SFML_VERSION_MAJOR >= 3 void register_sink() {} void show_graphs(bool value) {} void shutdown() {} #else static const int PREFERRED_VERTICAL_GRAPHS = 8; static const int RENDERING_WIDTH = 1024; static const int RENDERING_HEIGHT = RENDERING_WIDTH / PREFERRED_VERTICAL_GRAPHS; sf::Color get_sfml_color(int color) { auto c = caspar::diagnostics::color(color); return {static_cast(color >> 24 & 255), static_cast(color >> 16 & 255), static_cast(color >> 8 & 255), static_cast(color >> 0 & 255)}; } auto& get_default_font() { static sf::Font DEFAULT_FONT = []() { fs::path path{DIAG_FONT_PATH}; #ifdef __linux__ if (!fs::exists(path)) { auto cmd = "fc-match --format=%{file} " + path.string(); if (auto pipe = popen(cmd.data(), "r")) { char buf[128]; path.clear(); while (fgets(buf, sizeof(buf), pipe)) path += buf; } } #endif sf::Font font; if (!font.loadFromFile(path.string())) CASPAR_THROW_EXCEPTION(file_not_found() << msg_info(DIAG_FONT_PATH " not found")); return font; }(); return DEFAULT_FONT; } struct drawable : public sf::Drawable , public sf::Transformable { virtual ~drawable() {} virtual void render(sf::RenderTarget& target, sf::RenderStates states) = 0; void draw(sf::RenderTarget& target, sf::RenderStates states) const override { states.transform *= getTransform(); const_cast(this)->render(target, states); } }; class context : public drawable { std::unique_ptr window_; sf::View view_; std::list> drawables_; caspar::timer display_time_; bool calculate_view_ = true; int scroll_position_ = 0; bool dragging_ = false; int last_mouse_y_ = 0; executor executor_{L"diagnostics"}; public: static void register_drawable(const std::shared_ptr& drawable) { if (!drawable) return; get_instance()->executor_.begin_invoke([=] { get_instance()->do_register_drawable(drawable); }); } static void show(bool value) { get_instance()->executor_.begin_invoke([=] { get_instance()->do_show(value); }); } static void shutdown() { get_instance().reset(); } private: context() {} void do_show(bool value) { if (value) { if (!window_) { window_.reset( new sf::RenderWindow(sf::VideoMode(RENDERING_WIDTH, RENDERING_WIDTH), "CasparCG Diagnostics")); window_->setPosition(sf::Vector2i(0, 0)); window_->setActive(); window_->setVerticalSyncEnabled(true); calculate_view_ = true; glEnable(GL_BLEND); glEnable(GL_LINE_SMOOTH); glHint(GL_LINE_SMOOTH_HINT, GL_NICEST); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); tick(); } } else window_.reset(); } void tick() { if (!window_) return; sf::Event e; while (window_->pollEvent(e)) { switch (e.type) { case sf::Event::Closed: window_.reset(); return; case sf::Event::Resized: calculate_view_ = true; break; case sf::Event::MouseButtonPressed: dragging_ = true; last_mouse_y_ = e.mouseButton.y; break; case sf::Event::MouseButtonReleased: dragging_ = false; break; case sf::Event::MouseMoved: if (dragging_) { auto delta_y = e.mouseMove.y - last_mouse_y_; scroll_position_ += delta_y; last_mouse_y_ = e.mouseMove.y; calculate_view_ = true; } break; case sf::Event::MouseWheelMoved: scroll_position_ += e.mouseWheel.delta * 15; calculate_view_ = true; break; default: break; } } window_->clear(); if (calculate_view_) { int content_height = static_cast(RENDERING_HEIGHT * drawables_.size()); int window_height = static_cast(window_->getSize().y); int not_visible = std::max(0, content_height - window_height); int min_scroll_position = -not_visible; int max_scroll_position = 0; scroll_position_ = std::min(max_scroll_position, std::max(min_scroll_position, scroll_position_)); view_.setViewport(sf::FloatRect(0, 0, 1.0, 1.0)); view_.setSize(RENDERING_WIDTH, window_height); view_.setCenter(RENDERING_WIDTH / 2, window_height / 2 - scroll_position_); window_->setView(view_); calculate_view_ = false; } CASPAR_LOG(trace) << "osd_graph::tick()"; window_->draw(*this); window_->display(); display_time_.restart(); if (executor_.is_running()) { executor_.begin_invoke([this] { tick(); }); } std::this_thread::sleep_for(std::chrono::milliseconds(40)); } void render(sf::RenderTarget& target, sf::RenderStates states) override { int n = 0; for (auto it = drawables_.begin(); it != drawables_.end(); ++n) { auto drawable = it->lock(); if (drawable) { float target_y = n * RENDERING_HEIGHT; drawable->setPosition(0.0f, target_y); target.draw(*drawable, states); ++it; } else it = drawables_.erase(it); } } void do_register_drawable(const std::shared_ptr& drawable) { drawables_.push_back(drawable); auto it = drawables_.begin(); while (it != drawables_.end()) { if (it->lock()) ++it; else it = drawables_.erase(it); } } static std::unique_ptr& get_instance() { static auto impl = std::unique_ptr(new context); return impl; } }; class line : public drawable { size_t res_{1024}; boost::circular_buffer line_data_{res_}; boost::circular_buffer> line_tags_{res_}; std::atomic tick_data_; std::atomic tick_tag_; std::atomic color_; double x_delta_ = 1.0 / (static_cast(res_) - 1.0); public: line() : tick_data_(-1.0f) , tick_tag_(false) , color_(0xFFFFFFFF) { } line(const line& other) : res_(other.res_) , line_data_(other.line_data_) , line_tags_(other.line_tags_) , tick_data_(other.tick_data_.load()) , tick_tag_(other.tick_tag_.load()) , color_(other.color_.load()) , x_delta_(other.x_delta_) { } void set_value(float value) { tick_data_ = value; } void set_tag() { tick_tag_ = true; } void set_color(int color) { color_ = color; } int get_color() { return color_; } void render(sf::RenderTarget& target, sf::RenderStates states) override { /*states.transform.translate(x_pos_, 0.f); if (line_data_.size() == res_) x_pos_ = -get_insertion_xcoord() + 1.0 - x_delta_; // Otherwise the graph will drift because of floating point precision else x_pos_ -= x_delta_;*/ for (auto& vertex : line_data_) vertex.position.x -= x_delta_; for (auto& tag : line_tags_) { if (tag) { (*tag)[0].position.x -= x_delta_; (*tag)[1].position.x -= x_delta_; } } auto color = get_sfml_color(color_); color.a = 255 * 0.8; line_data_.push_back(sf::Vertex( sf::Vector2f(get_insertion_xcoord(), std::max(0.1f, std::min(0.9f, (1.0f - tick_data_) * 0.8f + 0.1f))), color)); if (tick_tag_) { sf::VertexArray vertical_dash(sf::LinesStrip); vertical_dash.append(sf::Vertex(sf::Vector2f(get_insertion_xcoord() - x_delta_, 0.f), color)); vertical_dash.append(sf::Vertex(sf::Vector2f(get_insertion_xcoord() - x_delta_, 1.f), color)); line_tags_.push_back(vertical_dash); } else line_tags_.push_back({}); tick_tag_ = false; if (tick_data_ > -0.5) { auto array_one = line_data_.array_one(); auto array_two = line_data_.array_two(); // since boost::circular_buffer guarantees two contiguous views of the buffer we can provide raw access to // SFML, which can use glDrawArrays. target.draw(array_one.first, static_cast(array_one.second), sf::LinesStrip, states); target.draw(array_two.first, static_cast(array_two.second), sf::LinesStrip, states); if (array_one.second > 0 && array_two.second > 0) { // Connect the gap between the arrays sf::VertexArray connecting_line(sf::LinesStrip); connecting_line.append(*(array_one.first + array_one.second - 1)); connecting_line.append(*array_two.first); target.draw(connecting_line, states); } } else { glEnable(GL_LINE_STIPPLE); glLineStipple(3, 0xAAAA); for (size_t n = 0; n < line_tags_.size(); ++n) { if (line_tags_[n]) { target.draw(*line_tags_[n], states); } } glDisable(GL_LINE_STIPPLE); } } private: double get_insertion_xcoord() const { return line_data_.empty() ? 1.0 : line_data_.back().position.x + x_delta_; } }; struct graph : public drawable , public caspar::diagnostics::spi::graph_sink , public std::enable_shared_from_this { call_context context_ = call_context::for_thread(); tbb::concurrent_unordered_map lines_; std::mutex mutex_; std::wstring text_; bool auto_reset_ = false; graph() {} void activate() override { context::register_drawable(shared_from_this()); } void set_text(const std::wstring& value) override { auto temp = value; std::lock_guard lock(mutex_); text_ = std::move(temp); } void set_value(const std::string& name, double value) override { lines_[name].set_value(static_cast(value)); } void set_tag(caspar::diagnostics::tag_severity /*severity*/, const std::string& name) override { lines_[name].set_tag(); } void set_color(const std::string& name, int color) override { lines_[name].set_color(color); } void auto_reset() override { std::lock_guard lock(mutex_); auto_reset_ = true; } private: void render(sf::RenderTarget& target, sf::RenderStates states) override { const size_t text_size = 15; const size_t text_margin = 2; const size_t text_offset = (text_size + text_margin * 2) * 2; std::wstring text_str; bool auto_reset; { std::lock_guard lock(mutex_); text_str = text_; auto_reset = auto_reset_; } sf::Text text(text_str.c_str(), get_default_font(), text_size); text.setStyle(sf::Text::Italic); text.move(text_margin, text_margin); target.draw(text, states); if (context_.video_channel != -1) { auto ctx_str = std::to_string(context_.video_channel); if (context_.layer != -1) ctx_str += "-" + std::to_string(context_.layer); sf::Text context_text(ctx_str, get_default_font(), text_size); context_text.setStyle(sf::Text::Italic); context_text.move(RENDERING_WIDTH - text_margin - 5 - context_text.getLocalBounds().width, text_margin); target.draw(context_text, states); } float x_offset = text_margin; for (auto it = lines_.begin(); it != lines_.end(); ++it) { sf::Text line_text(it->first, get_default_font(), text_size); line_text.setPosition(x_offset, text_margin + text_offset / 2); line_text.setColor(get_sfml_color(it->second.get_color())); target.draw(line_text, states); x_offset += line_text.getLocalBounds().width + text_margin * 2; } static const auto rect = []() { sf::RectangleShape r(sf::Vector2f(RENDERING_WIDTH, RENDERING_HEIGHT - 2)); r.setFillColor(sf::Color(255, 255, 255, 51)); r.setOutlineThickness(0.00f); r.move(0, 1); return r; }(); target.draw(rect, states); states.transform.translate(0, text_offset) .scale(RENDERING_WIDTH, RENDERING_HEIGHT * (static_cast(RENDERING_HEIGHT - text_offset) / static_cast(RENDERING_HEIGHT))); static const sf::Color guide_color(255, 255, 255, 127); static const sf::VertexArray middle_guide = []() { sf::VertexArray result(sf::LinesStrip); result.append(sf::Vertex(sf::Vector2f(0.0f, 0.5f), guide_color)); result.append(sf::Vertex(sf::Vector2f(1.0f, 0.5f), guide_color)); return result; }(); static const sf::VertexArray bottom_guide = []() { sf::VertexArray result(sf::LinesStrip); result.append(sf::Vertex(sf::Vector2f(0.0f, 0.9f), guide_color)); result.append(sf::Vertex(sf::Vector2f(1.0f, 0.9f), guide_color)); return result; }(); static const sf::VertexArray top_guide = []() { sf::VertexArray result(sf::LinesStrip); result.append(sf::Vertex(sf::Vector2f(0.0f, 0.1f), guide_color)); result.append(sf::Vertex(sf::Vector2f(1.0f, 0.1f), guide_color)); return result; }(); glEnable(GL_LINE_STIPPLE); glLineStipple(3, 0xAAAA); target.draw(middle_guide, states); target.draw(bottom_guide, states); target.draw(top_guide, states); glDisable(GL_LINE_STIPPLE); for (auto it = lines_.begin(); it != lines_.end(); ++it) { target.draw(it->second, states); if (auto_reset) it->second.set_value(0.0f); } } }; void register_sink() { caspar::diagnostics::spi::register_sink_factory([] { return spl::make_shared(); }); } void show_graphs(bool value) { context::show(value); } void shutdown() { context::shutdown(); } #endif }}}} // namespace caspar::core::diagnostics::osd ================================================ FILE: src/core/diagnostics/osd_graph.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once namespace caspar { namespace core { namespace diagnostics { namespace osd { void register_sink(); void show_graphs(bool value); void shutdown(); }}}} // namespace caspar::core::diagnostics::osd ================================================ FILE: src/core/frame/draw_frame.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "draw_frame.h" #include "frame.h" #include "frame_transform.h" #include "frame_visitor.h" #include namespace caspar { namespace core { using frame_t = boost::variant>; struct draw_frame::impl { frame_t frame_; frame_transform transform_; public: impl() {} impl(frame_t frame) : frame_(std::move(frame)) { } void accept(frame_visitor& visitor) const { struct accept_visitor : public boost::static_visitor { frame_visitor& visitor; accept_visitor(frame_visitor& visitor) : visitor(visitor) { } void operator()(boost::blank /*unused*/) const {} void operator()(const const_frame& frame) const { visitor.visit(frame); } void operator()(const std::vector& frames) const { for (auto& frame : frames) { frame.accept(visitor); } } }; visitor.push(transform_); boost::apply_visitor(accept_visitor{visitor}, frame_); visitor.pop(); } bool operator==(const impl& other) const { return frame_ == other.frame_ && transform_ == other.transform_; } }; draw_frame::draw_frame() : impl_(new impl()) { } draw_frame::draw_frame(const draw_frame& other) : impl_(new impl()) { impl_->frame_ = other.impl_->frame_; impl_->transform_ = other.impl_->transform_; } draw_frame::draw_frame(draw_frame&& other) : impl_(std::move(other.impl_)) { } draw_frame::draw_frame(const_frame frame) : impl_(new impl(std::move(frame))) { } draw_frame::draw_frame(mutable_frame&& frame) : impl_(new impl(std::move(frame))) { } draw_frame::draw_frame(std::vector frames) : impl_(new impl(std::move(frames))) { } draw_frame::~draw_frame() {} draw_frame& draw_frame::operator=(draw_frame other) { impl_ = std::move(other.impl_); return *this; } void draw_frame::swap(draw_frame& other) { impl_.swap(other.impl_); } const frame_transform& draw_frame::transform() const { return impl_->transform_; } frame_transform& draw_frame::transform() { return impl_->transform_; } void draw_frame::accept(frame_visitor& visitor) const { impl_->accept(visitor); } bool draw_frame::operator==(const draw_frame& other) const { return impl_ && *impl_ == *other.impl_; } bool draw_frame::operator!=(const draw_frame& other) const { return !(*this == other); } draw_frame draw_frame::over(draw_frame frame1, draw_frame frame2) { if (!frame1 && !frame2) { return draw_frame{}; } std::vector frames; frames.push_back(std::move(frame1)); frames.push_back(std::move(frame2)); return draw_frame(std::move(frames)); } draw_frame draw_frame::mask(draw_frame fill, draw_frame key) { if (!fill || !key) { return draw_frame{}; } std::vector frames; key.transform().image_transform.is_key = true; frames.push_back(std::move(key)); frames.push_back(std::move(fill)); return draw_frame(std::move(frames)); } draw_frame draw_frame::push(draw_frame frame) { std::vector frames; frames.push_back(std::move(frame)); return draw_frame(std::move(frames)); } draw_frame draw_frame::push(draw_frame frame, const frame_transform& transform) { std::vector frames; frames.push_back(std::move(frame)); auto result = draw_frame(std::move(frames)); result.transform() = std::move(transform); return result; } draw_frame draw_frame::pop(const draw_frame& frame) { draw_frame result; result.impl_->frame_ = frame.impl_->frame_; return result; } draw_frame draw_frame::still(draw_frame frame) { frame.transform().audio_transform.volume = 0.0; frame.transform().audio_transform.immediate_volume = true; return frame; } draw_frame draw_frame::empty() { return draw_frame(std::vector{}); } draw_frame::operator bool() const { return impl_ && impl_->frame_.which() != 0; } }} // namespace caspar::core ================================================ FILE: src/core/frame/draw_frame.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include namespace caspar { namespace core { class draw_frame final { public: static draw_frame over(draw_frame frame1, draw_frame frame2); static draw_frame mask(draw_frame fill, draw_frame key); static draw_frame still(draw_frame frame); static draw_frame push(draw_frame frame); static draw_frame push(draw_frame frame, const struct frame_transform& transform); static draw_frame pop(const draw_frame& frame); static draw_frame empty(); draw_frame(); draw_frame(const draw_frame& other); draw_frame(draw_frame&& other); draw_frame(class const_frame frame); draw_frame(class mutable_frame&& frame); draw_frame(std::vector frames); ~draw_frame(); draw_frame& operator=(draw_frame other); void swap(draw_frame& other); void accept(class frame_visitor& visitor) const; bool operator==(const draw_frame& other) const; bool operator!=(const draw_frame& other) const; const struct frame_transform& transform() const; struct frame_transform& transform(); explicit operator bool() const; private: struct impl; std::unique_ptr impl_; }; }} // namespace caspar::core ================================================ FILE: src/core/frame/frame.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "frame.h" #include "geometry.h" #include "pixel_format.h" #include #include #include #include #include namespace caspar { namespace core { struct mutable_frame::impl { std::vector> image_data_; array audio_data_; const core::pixel_format_desc desc_; const void* tag_; frame_geometry geometry_ = frame_geometry::get_default(); mutable_frame::commit_t commit_; impl(const impl&) = delete; impl& operator=(const impl&) = delete; impl(const void* tag, std::vector> image_data, array audio_data, const core::pixel_format_desc& desc, commit_t commit) : image_data_(std::move(image_data)) , audio_data_(std::move(audio_data)) , desc_(desc) , tag_(tag) , commit_(std::move(commit)) { } }; mutable_frame::mutable_frame(const void* tag, std::vector> image_data, array audio_data, const core::pixel_format_desc& desc, commit_t commit) : impl_(new impl(tag, std::move(image_data), std::move(audio_data), desc, std::move(commit))) { } mutable_frame::mutable_frame(mutable_frame&& other) noexcept : impl_(std::move(other.impl_)) { } mutable_frame::~mutable_frame() {} mutable_frame& mutable_frame::operator=(mutable_frame&& other) { impl_ = std::move(other.impl_); return *this; } void mutable_frame::swap(mutable_frame& other) { impl_.swap(other.impl_); } const core::pixel_format_desc& mutable_frame::pixel_format_desc() const { return impl_->desc_; } const array& mutable_frame::image_data(std::size_t index) const { return impl_->image_data_.at(index); } const array& mutable_frame::audio_data() const { return impl_->audio_data_; } array& mutable_frame::image_data(std::size_t index) { return impl_->image_data_.at(index); } array& mutable_frame::audio_data() { return impl_->audio_data_; } std::size_t mutable_frame::width() const { return impl_->desc_.planes.at(0).width; } std::size_t mutable_frame::height() const { return impl_->desc_.planes.at(0).height; } const void* mutable_frame::stream_tag() const { return impl_->tag_; } const frame_geometry& mutable_frame::geometry() const { return impl_->geometry_; } frame_geometry& mutable_frame::geometry() { return impl_->geometry_; } struct const_frame::impl { std::vector> image_data_; array audio_data_; core::pixel_format_desc desc_ = core::pixel_format_desc(pixel_format::invalid); const void* tag_; frame_geometry geometry_ = frame_geometry::get_default(); std::any opaque_; std::shared_ptr texture_; impl(const void* tag, std::vector> image_data, array audio_data, const core::pixel_format_desc& desc, std::shared_ptr texture) : image_data_(std::move(image_data)) , audio_data_(std::move(audio_data)) , desc_(desc) , tag_(tag) , texture_(texture) { if (desc_.planes.size() != image_data_.size()) { CASPAR_THROW_EXCEPTION(invalid_argument()); } } impl(mutable_frame&& other) : image_data_(std::make_move_iterator(other.impl_->image_data_.begin()), std::make_move_iterator(other.impl_->image_data_.end())) , audio_data_(std::move(other.impl_->audio_data_)) , desc_(std::move(other.impl_->desc_)) , tag_(other.stream_tag()) , texture_(nullptr) , geometry_(std::move(other.impl_->geometry_)) { if (desc_.planes.size() != image_data_.size() && !other.impl_->commit_) { CASPAR_THROW_EXCEPTION(invalid_argument()); } if (other.impl_->commit_) { opaque_ = other.impl_->commit_(image_data_); } } const array& image_data(std::size_t index) const { return image_data_.at(index); } std::shared_ptr texture() { return texture_; } std::size_t width() const { return desc_.planes.at(0).width; } std::size_t height() const { return desc_.planes.at(0).height; } std::size_t size() const { return desc_.planes.at(0).size; } }; const_frame::const_frame() {} const_frame::const_frame(const void* tag, std::vector> image_data, array audio_data, const core::pixel_format_desc& desc, std::shared_ptr texture) : impl_(new impl(tag, std::move(image_data), std::move(audio_data), desc, texture)) { } const_frame::const_frame(mutable_frame&& other) : impl_(new impl(std::move(other))) { } const_frame::const_frame(const const_frame& other) : impl_(other.impl_) { } const_frame::~const_frame() {} const_frame& const_frame::operator=(const const_frame& other) { impl_ = other.impl_; return *this; } bool const_frame::operator==(const const_frame& other) const { return impl_ == other.impl_; } bool const_frame::operator!=(const const_frame& other) const { return !(*this == other); } bool const_frame::operator<(const const_frame& other) const { return impl_ < other.impl_; } bool const_frame::operator>(const const_frame& other) const { return impl_ > other.impl_; } const pixel_format_desc& const_frame::pixel_format_desc() const { return impl_->desc_; } const array& const_frame::image_data(std::size_t index) const { return impl_->image_data(index); } const array& const_frame::audio_data() const { return impl_->audio_data_; } std::shared_ptr const_frame::texture() const { return impl_->texture(); } std::size_t const_frame::width() const { return impl_->width(); } std::size_t const_frame::height() const { return impl_->height(); } std::size_t const_frame::size() const { return impl_->size(); } const void* const_frame::stream_tag() const { return impl_->tag_; } const_frame const_frame::with_tag(const void* new_tag) const { if (!impl_) { return const_frame(); } std::vector> image_data_copy = impl_->image_data_; auto new_frame = const_frame(new_tag, std::move(image_data_copy), impl_->audio_data_, impl_->desc_, impl_->texture_); new_frame.impl_->geometry_ = impl_->geometry_; if (impl_->opaque_.has_value()) { new_frame.impl_->opaque_ = impl_->opaque_; } return new_frame; } const frame_geometry& const_frame::geometry() const { return impl_->geometry_; } const std::any& const_frame::opaque() const { return impl_->opaque_; } const_frame::operator bool() const { return impl_ != nullptr && impl_->desc_.format != core::pixel_format::invalid; } }} // namespace caspar::core ================================================ FILE: src/core/frame/frame.h ================================================ #pragma once #include #include #include #include #include #include #include namespace caspar { namespace core { class texture { public: virtual ~texture() {} virtual void bind(int index) = 0; virtual void unbind() = 0; }; class mutable_frame final { friend class const_frame; public: using commit_t = std::function>)>; explicit mutable_frame(const void* tag, std::vector> image_data, array audio_data, const struct pixel_format_desc& desc, commit_t commit = nullptr); mutable_frame(const mutable_frame&) = delete; mutable_frame(mutable_frame&& other) noexcept; ~mutable_frame(); mutable_frame& operator=(const mutable_frame&) = delete; mutable_frame& operator=(mutable_frame&& other); void swap(mutable_frame& other); const struct pixel_format_desc& pixel_format_desc() const; array& image_data(std::size_t index); const array& image_data(std::size_t index) const; array& audio_data(); const array& audio_data() const; std::size_t width() const; std::size_t height() const; const void* stream_tag() const; class frame_geometry& geometry(); const class frame_geometry& geometry() const; private: struct impl; std::unique_ptr impl_; }; class const_frame final { public: const_frame(); explicit const_frame(const void* tag, std::vector> image_data, array audio_data, const struct pixel_format_desc& desc, std::shared_ptr texture = nullptr); const_frame(const const_frame& other); const_frame(mutable_frame&& other); ~const_frame(); const_frame& operator=(const const_frame& other); const struct pixel_format_desc& pixel_format_desc() const; const array& image_data(std::size_t index) const; const array& audio_data() const; std::shared_ptr texture() const; std::size_t width() const; std::size_t height() const; std::size_t size() const; const void* stream_tag() const; const_frame with_tag(const void* new_tag) const; const std::any& opaque() const; const class frame_geometry& geometry() const; bool operator==(const const_frame& other) const; bool operator!=(const const_frame& other) const; bool operator<(const const_frame& other) const; bool operator>(const const_frame& other) const; explicit operator bool() const; private: struct impl; std::shared_ptr impl_; }; }} // namespace caspar::core ================================================ FILE: src/core/frame/frame_factory.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #ifdef WIN32 #include #include namespace caspar::accelerator::d3d { class d3d_texture2d; } #endif namespace caspar { namespace core { class frame_factory { public: frame_factory() = default; frame_factory& operator=(const frame_factory&) = delete; virtual ~frame_factory() = default; frame_factory(const frame_factory&) = delete; virtual class mutable_frame create_frame(const void* video_stream_tag, const struct pixel_format_desc& desc) = 0; virtual class mutable_frame create_frame(const void* video_stream_tag, const struct pixel_format_desc& desc, common::bit_depth depth) = 0; #ifdef WIN32 virtual class const_frame import_d3d_texture(const void* video_stream_tag, const std::shared_ptr& d3d_texture, core::pixel_format format, common::bit_depth depth) = 0; #endif }; }} // namespace caspar::core ================================================ FILE: src/core/frame/frame_transform.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "frame_transform.h" #include #include #include #include namespace caspar { namespace core { double do_tween(double time, double source, double dest, double duration, const tweener& tween) { return tween(time, source, dest - source, duration); } template void do_tween_rectangle(const Rect& source, const Rect& dest, Rect& out, double time, double duration, const tweener& tweener) { out.ul[0] = do_tween(time, source.ul[0], dest.ul[0], duration, tweener); out.ul[1] = do_tween(time, source.ul[1], dest.ul[1], duration, tweener); out.lr[0] = do_tween(time, source.lr[0], dest.lr[0], duration, tweener); out.lr[1] = do_tween(time, source.lr[1], dest.lr[1], duration, tweener); } void do_tween_corners(const corners& source, const corners& dest, corners& out, double time, double duration, const tweener& tweener) { do_tween_rectangle(source, dest, out, time, duration, tweener); out.ur[0] = do_tween(time, source.ur[0], dest.ur[0], duration, tweener); out.ur[1] = do_tween(time, source.ur[1], dest.ur[1], duration, tweener); out.ll[0] = do_tween(time, source.ll[0], dest.ll[0], duration, tweener); out.ll[1] = do_tween(time, source.ll[1], dest.ll[1], duration, tweener); } image_transform image_transform::tween(double time, const image_transform& source, const image_transform& dest, double duration, const tweener& tween) { image_transform result; result.brightness = do_tween(time, source.brightness, dest.brightness, duration, tween); result.contrast = do_tween(time, source.contrast, dest.contrast, duration, tween); result.saturation = do_tween(time, source.saturation, dest.saturation, duration, tween); result.opacity = do_tween(time, source.opacity, dest.opacity, duration, tween); result.anchor[0] = do_tween(time, source.anchor[0], dest.anchor[0], duration, tween); result.anchor[1] = do_tween(time, source.anchor[1], dest.anchor[1], duration, tween); result.fill_translation[0] = do_tween(time, source.fill_translation[0], dest.fill_translation[0], duration, tween); result.fill_translation[1] = do_tween(time, source.fill_translation[1], dest.fill_translation[1], duration, tween); result.fill_scale[0] = do_tween(time, source.fill_scale[0], dest.fill_scale[0], duration, tween); result.fill_scale[1] = do_tween(time, source.fill_scale[1], dest.fill_scale[1], duration, tween); result.clip_translation[0] = do_tween(time, source.clip_translation[0], dest.clip_translation[0], duration, tween); result.clip_translation[1] = do_tween(time, source.clip_translation[1], dest.clip_translation[1], duration, tween); result.clip_scale[0] = do_tween(time, source.clip_scale[0], dest.clip_scale[0], duration, tween); result.clip_scale[1] = do_tween(time, source.clip_scale[1], dest.clip_scale[1], duration, tween); result.angle = do_tween(time, source.angle, dest.angle, duration, tween); result.levels.max_input = do_tween(time, source.levels.max_input, dest.levels.max_input, duration, tween); result.levels.min_input = do_tween(time, source.levels.min_input, dest.levels.min_input, duration, tween); result.levels.max_output = do_tween(time, source.levels.max_output, dest.levels.max_output, duration, tween); result.levels.min_output = do_tween(time, source.levels.min_output, dest.levels.min_output, duration, tween); result.levels.gamma = do_tween(time, source.levels.gamma, dest.levels.gamma, duration, tween); result.chroma.target_hue = do_tween(time, source.chroma.target_hue, dest.chroma.target_hue, duration, tween); result.chroma.hue_width = do_tween(time, source.chroma.hue_width, dest.chroma.hue_width, duration, tween); result.chroma.min_saturation = do_tween(time, source.chroma.min_saturation, dest.chroma.min_saturation, duration, tween); result.chroma.min_brightness = do_tween(time, source.chroma.min_brightness, dest.chroma.min_brightness, duration, tween); result.chroma.softness = do_tween(time, source.chroma.softness, dest.chroma.softness, duration, tween); result.chroma.spill_suppress = do_tween(time, source.chroma.spill_suppress, dest.chroma.spill_suppress, duration, tween); result.chroma.spill_suppress_saturation = do_tween(time, source.chroma.spill_suppress_saturation, dest.chroma.spill_suppress_saturation, duration, tween); result.chroma.enable = dest.chroma.enable; result.chroma.show_mask = dest.chroma.show_mask; result.is_key = source.is_key || dest.is_key; result.invert = source.invert || dest.invert; result.is_mix = source.is_mix || dest.is_mix; result.blend_mode = std::max(source.blend_mode, dest.blend_mode); result.layer_depth = dest.layer_depth; do_tween_rectangle(source.crop, dest.crop, result.crop, time, duration, tween); do_tween_corners(source.perspective, dest.perspective, result.perspective, time, duration, tween); return result; } bool eq(double lhs, double rhs) { return std::abs(lhs - rhs) < 5e-8; } bool operator==(const corners& lhs, const corners& rhs) { return boost::range::equal(lhs.ul, rhs.ul, eq) && boost::range::equal(lhs.ur, rhs.ur, eq) && boost::range::equal(lhs.lr, rhs.lr, eq) && boost::range::equal(lhs.ll, rhs.ll, eq); } bool operator==(const rectangle& lhs, const rectangle& rhs) { return boost::range::equal(lhs.ul, rhs.ul, eq) && boost::range::equal(lhs.lr, rhs.lr, eq); } bool operator==(const image_transform& lhs, const image_transform& rhs) { return eq(lhs.opacity, rhs.opacity) && eq(lhs.contrast, rhs.contrast) && eq(lhs.brightness, rhs.brightness) && eq(lhs.saturation, rhs.saturation) && boost::range::equal(lhs.anchor, rhs.anchor, eq) && boost::range::equal(lhs.fill_translation, rhs.fill_translation, eq) && boost::range::equal(lhs.fill_scale, rhs.fill_scale, eq) && boost::range::equal(lhs.clip_translation, rhs.clip_translation, eq) && boost::range::equal(lhs.clip_scale, rhs.clip_scale, eq) && eq(lhs.angle, rhs.angle) && lhs.is_key == rhs.is_key && lhs.invert == rhs.invert && lhs.is_mix == rhs.is_mix && lhs.blend_mode == rhs.blend_mode && lhs.layer_depth == rhs.layer_depth && lhs.chroma.enable == rhs.chroma.enable && lhs.chroma.show_mask == rhs.chroma.show_mask && eq(lhs.chroma.target_hue, rhs.chroma.target_hue) && eq(lhs.chroma.hue_width, rhs.chroma.hue_width) && eq(lhs.chroma.min_saturation, rhs.chroma.min_saturation) && eq(lhs.chroma.min_brightness, rhs.chroma.min_brightness) && eq(lhs.chroma.softness, rhs.chroma.softness) && eq(lhs.chroma.spill_suppress, rhs.chroma.spill_suppress) && eq(lhs.chroma.spill_suppress_saturation, rhs.chroma.spill_suppress_saturation) && lhs.crop == rhs.crop && lhs.perspective == rhs.perspective || lhs.enable_geometry_modifiers == rhs.enable_geometry_modifiers; } bool operator!=(const image_transform& lhs, const image_transform& rhs) { return !(lhs == rhs); } // audio_transform audio_transform& audio_transform::operator*=(const audio_transform& other) { volume *= other.volume; return *this; } audio_transform audio_transform::operator*(const audio_transform& other) const { return audio_transform(*this) *= other; } audio_transform audio_transform::tween(double time, const audio_transform& source, const audio_transform& dest, double duration, const tweener& tween) { audio_transform result; result.volume = do_tween(time, source.volume, dest.volume, duration, tween); return result; } bool operator==(const audio_transform& lhs, const audio_transform& rhs) { return eq(lhs.volume, rhs.volume); } bool operator!=(const audio_transform& lhs, const audio_transform& rhs) { return !(lhs == rhs); } // frame_transform frame_transform::frame_transform() = default; frame_transform frame_transform::tween(double time, const frame_transform& source, const frame_transform& dest, double duration, const tweener& tween) { frame_transform result; result.image_transform = image_transform::tween(time, source.image_transform, dest.image_transform, duration, tween); result.audio_transform = audio_transform::tween(time, source.audio_transform, dest.audio_transform, duration, tween); return result; } bool operator==(const frame_transform& lhs, const frame_transform& rhs) { return lhs.image_transform == rhs.image_transform && lhs.audio_transform == rhs.audio_transform; } bool operator!=(const frame_transform& lhs, const frame_transform& rhs) { return !(lhs == rhs); } tweened_transform::tweened_transform(const frame_transform& source, const frame_transform& dest, int duration, tweener tween) : source_(source) , dest_(dest) , duration_(duration) , tweener_(std::move(tween)) { } const frame_transform& tweened_transform::dest() const { return dest_; } frame_transform tweened_transform::fetch() { return time_ == duration_ ? dest_ : frame_transform::tween( static_cast(time_), source_, dest_, static_cast(duration_), tweener_); } void tweened_transform::tick(int num) { time_ = std::min(time_ + num, duration_); } std::optional get_chroma_mode(const std::wstring& str) { if (boost::iequals(str, L"none")) { return chroma::legacy_type::none; } if (boost::iequals(str, L"green")) { return chroma::legacy_type::green; } if (boost::iequals(str, L"blue")) { return chroma::legacy_type::blue; } else { return {}; } } }} // namespace caspar::core ================================================ FILE: src/core/frame/frame_transform.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include namespace caspar { namespace core { struct chroma { enum class legacy_type { none, green, blue }; bool enable = false; bool show_mask = false; double target_hue = 0.0; double hue_width = 0.0; double min_saturation = 0.0; double min_brightness = 0.0; double softness = 0.0; double spill_suppress = 0.0; double spill_suppress_saturation = 1.0; }; struct levels final { double min_input = 0.0; double max_input = 1.0; double gamma = 1.0; double min_output = 0.0; double max_output = 1.0; }; struct corners final { std::array ul = {0.0, 0.0}; std::array ur = {1.0, 0.0}; std::array lr = {1.0, 1.0}; std::array ll = {0.0, 1.0}; }; struct rectangle final { std::array ul = {0.0, 0.0}; std::array lr = {1.0, 1.0}; }; struct image_transform final { double opacity = 1.0; double contrast = 1.0; double brightness = 1.0; double saturation = 1.0; /** * This enables the clip/crop/perspective fields. * It is often desirable to have this disabled, to avoid cropping/clipping unnecessarily */ bool enable_geometry_modifiers = false; std::array anchor = {0.0, 0.0}; std::array fill_translation = {0.0, 0.0}; std::array fill_scale = {1.0, 1.0}; std::array clip_translation = {0.0, 0.0}; std::array clip_scale = {1.0, 1.0}; double angle = 0.0; rectangle crop; corners perspective; core::levels levels; core::chroma chroma; bool is_key = false; bool invert = false; bool is_mix = false; core::blend_mode blend_mode = blend_mode::normal; int layer_depth = 0; static image_transform tween(double time, const image_transform& source, const image_transform& dest, double duration, const tweener& tween); }; bool operator==(const image_transform& lhs, const image_transform& rhs); bool operator!=(const image_transform& lhs, const image_transform& rhs); struct audio_transform final { double volume = 1.0; bool immediate_volume = false; // When false, intra-frame samples are ramped from previous volume in audio mixer audio_transform& operator*=(const audio_transform& other); audio_transform operator*(const audio_transform& other) const; static audio_transform tween(double time, const audio_transform& source, const audio_transform& dest, double duration, const tweener& tween); }; bool operator==(const audio_transform& lhs, const audio_transform& rhs); bool operator!=(const audio_transform& lhs, const audio_transform& rhs); struct frame_transform final { public: frame_transform(); core::image_transform image_transform; core::audio_transform audio_transform; static frame_transform tween(double time, const frame_transform& source, const frame_transform& dest, double duration, const tweener& tween); }; bool operator==(const frame_transform& lhs, const frame_transform& rhs); bool operator!=(const frame_transform& lhs, const frame_transform& rhs); class tweened_transform { frame_transform source_; frame_transform dest_; int duration_ = 0; int time_ = 0; tweener tweener_; public: tweened_transform() = default; tweened_transform(const frame_transform& source, const frame_transform& dest, int duration, tweener tween); const frame_transform& dest() const; frame_transform fetch(); void tick(int num); }; std::optional get_chroma_mode(const std::wstring& str); }} // namespace caspar::core ================================================ FILE: src/core/frame/frame_visitor.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once namespace caspar { namespace core { class frame_visitor { public: frame_visitor() = default; frame_visitor(const frame_visitor&) = delete; virtual ~frame_visitor() = default; frame_visitor& operator=(const frame_visitor&) = delete; virtual void push(const struct frame_transform& transform) = 0; virtual void visit(const class const_frame& frame) = 0; virtual void pop() = 0; }; }} // namespace caspar::core ================================================ FILE: src/core/frame/geometry.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas P Andersson, niklas.p.andersson@svt.se */ #include "geometry.h" #include namespace caspar { namespace core { frame_geometry::coord::coord(double vertex_x, double vertex_y, double texture_x, double texture_y) : vertex_x(vertex_x) , vertex_y(vertex_y) , texture_x(texture_x) , texture_y(texture_y) { } bool frame_geometry::coord::operator==(const frame_geometry::coord& other) const { return vertex_x == other.vertex_x && vertex_y == other.vertex_y && texture_x == other.texture_x && texture_y == other.texture_y && texture_r == other.texture_r && texture_q == other.texture_q; } struct frame_geometry::impl { impl(frame_geometry::geometry_type type, frame_geometry::scale_mode mode, std::vector data) : type_{type} , mode_{mode} { if (type == geometry_type::quad && data.size() != 4) CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info("The number of coordinates needs to be 4")); data_ = std::move(data); } frame_geometry::geometry_type type_; frame_geometry::scale_mode mode_; std::vector data_; }; frame_geometry::frame_geometry(geometry_type type, scale_mode mode, std::vector data) : impl_{new impl{type, mode, std::move(data)}} { } frame_geometry::geometry_type frame_geometry::type() const { return impl_->type_; } frame_geometry::scale_mode frame_geometry::mode() const { return impl_->mode_; } const std::vector& frame_geometry::data() const { return impl_->data_; } const frame_geometry frame_geometry::get_default(scale_mode mode) { std::vector data{ // vertex texture {0.0, 0.0, 0.0, 0.0}, // upper left {1.0, 0.0, 1.0, 0.0}, // upper right {1.0, 1.0, 1.0, 1.0}, // lower right {0.0, 1.0, 0.0, 1.0} // lower left }; return frame_geometry{frame_geometry::geometry_type::quad, mode, std::move(data)}; } const frame_geometry frame_geometry::get_default_vflip(scale_mode mode) { std::vector data{ // vertex texture {0.0, 0.0, 0.0, 1.0}, // upper left {1.0, 0.0, 1.0, 1.0}, // upper right {1.0, 1.0, 1.0, 0.0}, // lower right {0.0, 1.0, 0.0, 0.0} // lower left }; return frame_geometry{frame_geometry::geometry_type::quad, mode, std::move(data)}; } frame_geometry::scale_mode scale_mode_from_string(const std::wstring& str) { auto str2 = boost::to_lower_copy(str); if (str2 == L"fit") { return frame_geometry::scale_mode::fit; } else if (str2 == L"fill") { return frame_geometry::scale_mode::fill; } else if (str2 == L"original") { return frame_geometry::scale_mode::original; } else if (str2 == L"hfill") { return frame_geometry::scale_mode::hfill; } else if (str2 == L"vfill") { return frame_geometry::scale_mode::vfill; } else { return frame_geometry::scale_mode::stretch; } } std::wstring scale_mode_to_string(frame_geometry::scale_mode mode) { switch (mode) { case frame_geometry::scale_mode::fit: return L"FIT"; case frame_geometry::scale_mode::fill: return L"FILL"; case frame_geometry::scale_mode::original: return L"ORIGINAL"; case frame_geometry::scale_mode::hfill: return L"HFILL"; case frame_geometry::scale_mode::vfill: return L"VFILL"; default: return L"STRETCH"; } } }} // namespace caspar::core ================================================ FILE: src/core/frame/geometry.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas P Andersson, niklas.p.andersson@svt.se */ #pragma once #include #include namespace caspar { namespace core { class frame_geometry { public: enum class geometry_type { quad }; enum class scale_mode { stretch, // default fit, fill, original, hfill, vfill, }; struct coord { double vertex_x = 0.0; double vertex_y = 0.0; double texture_x = 0.0; double texture_y = 0.0; double texture_r = 0.0; double texture_q = 1.0; coord() = default; coord(double vertex_x, double vertex_y, double texture_x, double texture_y); bool operator==(const coord& other) const; }; frame_geometry(geometry_type type, scale_mode, std::vector data); geometry_type type() const; scale_mode mode() const; const std::vector& data() const; static const frame_geometry get_default(scale_mode = scale_mode::stretch); static const frame_geometry get_default_vflip(scale_mode = scale_mode::stretch); private: struct impl; spl::shared_ptr impl_; }; frame_geometry::scale_mode scale_mode_from_string(const std::wstring&); std::wstring scale_mode_to_string(frame_geometry::scale_mode); }} // namespace caspar::core ================================================ FILE: src/core/frame/pixel_format.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include namespace caspar { namespace core { enum class pixel_format { gray = 0, bgra, rgba, argb, abgr, ycbcr, ycbcra, luma, bgr, rgb, uyvy, gbrp, // planar gbrap, // planar count, invalid, }; enum class color_space { bt601, bt709, bt2020, }; struct pixel_format_desc final { struct plane { int linesize = 0; int width = 0; int height = 0; int size = 0; int stride = 0; common::bit_depth depth = common::bit_depth::bit8; plane() = default; plane(int width, int height, int stride, common::bit_depth depth = common::bit_depth::bit8) : linesize(width * stride * (depth == common::bit_depth::bit8 ? 1 : 2)) , width(width) , height(height) , size(width * height * stride * (depth == common::bit_depth::bit8 ? 1 : 2)) , stride(stride) , depth(depth) { } }; pixel_format_desc() = default; explicit pixel_format_desc(pixel_format format, core::color_space color_space = core::color_space::bt709) : format(format) , color_space(color_space) { } pixel_format format = pixel_format::invalid; bool is_straight_alpha = false; std::vector planes; core::color_space color_space = core::color_space::bt709; }; }} // namespace caspar::core ================================================ FILE: src/core/fwd.h ================================================ /* * Copyright 2013 Sveriges Television AB http://casparcg.com/ * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once namespace caspar::accelerator { class accelerator; } namespace caspar::accelerator::ogl { class device; } namespace caspar::core { class stage; class mixer; class output; class image_mixer; struct video_format_desc; class frame_factory; class frame_producer; class frame_consumer; class draw_frame; class mutable_frame; class const_frame; class video_channel; struct pixel_format_desc; struct frame_transform; struct frame_producer_dependencies; struct module_dependencies; class cg_producer_registry; class frame_producer_registry; class frame_consumer_registry; class video_format_repository; struct channel_info; } // namespace caspar::core ================================================ FILE: src/core/mixer/audio/audio_mixer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../../StdAfx.h" #include "audio_mixer.h" #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace core { using namespace boost::container; struct audio_item { const void* tag = nullptr; audio_transform transform; array samples; }; using audio_buffer_ps = std::vector; struct audio_mixer::impl { monitor::state state_; std::stack transform_stack_; std::vector items_; std::map> audio_streams_; // For audio cadence handling std::map previous_volumes_; // For audio transitions video_format_desc format_desc_; std::atomic master_volume_{1.0f}; spl::shared_ptr graph_; size_t max_expected_cadence_samples_{0}; size_t max_buffer_size_{0}; bool has_variable_cadence_{false}; std::vector silence_buffer_; int channels_{0}; impl(spl::shared_ptr graph) : graph_(std::move(graph)) { graph_->set_color("volume", diagnostics::color(1.0f, 0.8f, 0.1f)); graph_->set_color("audio-clipping", diagnostics::color(0.3f, 0.6f, 0.3f)); graph_->set_color("audio-buffer-overflow", diagnostics::color(0.6f, 0.3f, 0.3f)); transform_stack_.push(core::audio_transform()); } impl(const impl&) = delete; impl& operator=(const impl&) = delete; void push(const frame_transform& transform) { transform_stack_.push(transform_stack_.top() * transform.audio_transform); } void visit(const const_frame& frame) { if (transform_stack_.top().volume < 0.002 || !frame.audio_data()) return; items_.push_back(std::move(audio_item{frame.stream_tag(), transform_stack_.top(), frame.audio_data()})); } void pop() { transform_stack_.pop(); } void set_master_volume(float volume) { master_volume_ = volume; } float get_master_volume() { return master_volume_; } array mix(const video_format_desc& format_desc, int nb_samples) { if (format_desc_ != format_desc) { audio_streams_.clear(); previous_volumes_.clear(); format_desc_ = format_desc; channels_ = format_desc.audio_channels; // Calculate these values only when format changes max_expected_cadence_samples_ = 0; if (!format_desc.audio_cadence.empty()) { max_expected_cadence_samples_ = *std::max_element(format_desc.audio_cadence.begin(), format_desc.audio_cadence.end()); } // Pre-calculate max buffer size based on max cadence (2 frames worth) max_buffer_size_ = channels_; if (max_expected_cadence_samples_ > 0) { max_buffer_size_ *= 2 * max_expected_cadence_samples_; } else { max_buffer_size_ *= 4000; // Fallback: 2 frames × ~2000 samples } has_variable_cadence_ = format_desc.audio_cadence.size() > 1; if (has_variable_cadence_) { silence_buffer_.resize(channels_, 0); } else { silence_buffer_.clear(); } } auto items = std::move(items_); auto result = std::vector(size_t(nb_samples) * channels_, 0); auto mixed = std::vector(size_t(nb_samples) * channels_, 0.0f); std::map> next_audio_streams; std::map next_volumes; for (auto& item : items) { auto ptr = item.samples.data(); auto item_size = item.samples.size(); auto dst_size = result.size(); size_t last_size = 0; const int32_t* last_ptr = nullptr; if (has_variable_cadence_) { auto audio_stream = audio_streams_.find(item.tag); if (audio_stream != audio_streams_.end()) { last_size = audio_stream->second.size(); last_ptr = audio_stream->second.data(); } else if (nullptr != item.tag) { // Insert a sample of silence at startup // Covers the startup case where there may be a cadence mismatch // The sample of silence will be output before any valid audio data from the source last_size = channels_; last_ptr = silence_buffer_.data(); } } // Get previous volume for this tag, defaulting to current volume double previous_volume = item.transform.volume; auto prev_vol_it = previous_volumes_.find(item.tag); if (prev_vol_it != previous_volumes_.end()) { previous_volume = prev_vol_it->second; } // Store current volume for next frame next_volumes[item.tag] = item.transform.volume; // Sample collection and volume application loop for (auto n = 0; n < dst_size; ++n) { double sample_value = 0.0; if (last_ptr && n < last_size) { sample_value = static_cast(last_ptr[n]); } else if (n < last_size + item_size) { sample_value = static_cast(ptr[n - last_size]); } else { // If we run out of samples, hold the last sample value per channel int channel_pos = n % channels_; int offset = int(item_size) - (channels_ - channel_pos); if (offset < 0) { offset = channel_pos; } sample_value = static_cast(ptr[offset]); } double applied_volume = item.transform.volume; // Apply sample-level volume ramping if not immediate volume and there's a volume change if (!item.transform.immediate_volume && std::abs(item.transform.volume - previous_volume) > 0.001) { size_t sample_index = n / channels_; size_t total_samples = dst_size / channels_; // Calculate linear interpolation position (0.0 to 1.0) double position = total_samples > 1 ? static_cast(sample_index) / static_cast(total_samples - 1) : 1.0; position = std::min(1.0, std::max(0.0, position)); // Clamp between 0 and 1 // Linear interpolation between previous and current volume applied_volume = previous_volume + (item.transform.volume - previous_volume) * position; } mixed[n] += sample_value * applied_volume; } if (has_variable_cadence_ && item.tag) { if (item_size + last_size > dst_size) { // Calculate remaining samples after mixing the current frame auto remaining_samples = item_size + last_size - dst_size; // Apply the most restrictive limit and log if needed if (remaining_samples > max_buffer_size_ || remaining_samples > item_size) { graph_->set_tag(diagnostics::tag_severity::WARNING, "audio-buffer-overflow"); // Apply the most restrictive limit remaining_samples = (max_buffer_size_ < item_size) ? max_buffer_size_ : item_size; } std::vector buf(remaining_samples); // Calculate the correct offset in the source buffer size_t offset = (dst_size > last_size) ? (dst_size - last_size) : 0; if (offset < item_size) { std::memcpy(buf.data(), ptr + offset, remaining_samples * sizeof(int32_t)); next_audio_streams[item.tag] = std::move(buf); } else { next_audio_streams[item.tag] = std::vector(); } } else { next_audio_streams[item.tag] = std::vector(); } } } previous_volumes_ = std::move(next_volumes); audio_streams_ = std::move(next_audio_streams); auto master_volume = master_volume_.load(); for (auto n = 0; n < mixed.size(); ++n) { auto sample = mixed[n] * master_volume; if (sample > std::numeric_limits::max()) { result[n] = std::numeric_limits::max(); } else if (sample < std::numeric_limits::min()) { result[n] = std::numeric_limits::min(); } else { result[n] = static_cast(sample); } } auto max = std::vector(channels_, std::numeric_limits::min()); for (size_t n = 0; n < result.size(); n += channels_) { for (int ch = 0; ch < channels_; ++ch) { max[ch] = std::max(max[ch], std::abs(result[n + ch])); } } if (boost::range::count_if(max, [](auto val) { return val >= std::numeric_limits::max(); }) > 0) { graph_->set_tag(diagnostics::tag_severity::WARNING, "audio-clipping"); } state_["volume"] = std::move(max); graph_->set_value("volume", static_cast(*boost::max_element(max)) / std::numeric_limits::max()); return std::move(result); } }; audio_mixer::audio_mixer(spl::shared_ptr graph) : impl_(new impl(std::move(graph))) { } void audio_mixer::push(const frame_transform& transform) { impl_->push(transform); } void audio_mixer::visit(const const_frame& frame) { impl_->visit(frame); } void audio_mixer::pop() { impl_->pop(); } void audio_mixer::set_master_volume(float volume) { impl_->set_master_volume(volume); } float audio_mixer::get_master_volume() { return impl_->get_master_volume(); } array audio_mixer::operator()(const video_format_desc& format_desc, int nb_samples) { return impl_->mix(format_desc, nb_samples); } core::monitor::state audio_mixer::state() const { return impl_->state_; } }} // namespace caspar::core ================================================ FILE: src/core/mixer/audio/audio_mixer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include #include #include namespace caspar::diagnostics { class graph; } namespace caspar { namespace core { class audio_mixer final : public frame_visitor { audio_mixer(const audio_mixer&); audio_mixer& operator=(const audio_mixer&); public: audio_mixer(spl::shared_ptr<::caspar::diagnostics::graph> graph); array operator()(const struct video_format_desc& format_desc, int nb_samples); void set_master_volume(float volume); float get_master_volume(); core::monitor::state state() const; void push(const struct frame_transform& transform) override; void visit(const class const_frame& frame) override; void pop() override; private: struct impl; spl::shared_ptr impl_; }; }} // namespace caspar::core ================================================ FILE: src/core/mixer/audio/audio_util.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include namespace caspar { namespace core { template std::vector audio_32_to_24(const T& audio_data) { auto size = std::distance(std::begin(audio_data), std::end(audio_data)); auto input8 = reinterpret_cast(&(*std::begin(audio_data))); auto output8 = std::vector(); output8.reserve(size * 3); for (int n = 0; n < size; ++n) { output8.push_back(input8[n * 4 + 1]); output8.push_back(input8[n * 4 + 2]); output8.push_back(input8[n * 4 + 3]); } return output8; } template std::vector audio_32_to_16(const T& audio_data) { auto size = std::distance(std::begin(audio_data), std::end(audio_data)); auto input32 = &(*std::begin(audio_data)); auto output16 = std::vector(); output16.reserve(size); for (int n = 0; n < size; ++n) output16.push_back((input32[n] >> 16) & 0xFFFF); return output16; } }} // namespace caspar::core ================================================ FILE: src/core/mixer/image/blend_modes.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../../StdAfx.h" #include "blend_modes.h" #include namespace caspar { namespace core { blend_mode get_blend_mode(const std::wstring& str) { if (boost::iequals(str, L"normal")) return blend_mode::normal; if (boost::iequals(str, L"lighten")) return blend_mode::lighten; if (boost::iequals(str, L"darken")) return blend_mode::darken; if (boost::iequals(str, L"multiply")) return blend_mode::multiply; if (boost::iequals(str, L"average")) return blend_mode::average; if (boost::iequals(str, L"add")) return blend_mode::add; if (boost::iequals(str, L"subtract")) return blend_mode::subtract; if (boost::iequals(str, L"difference")) return blend_mode::difference; if (boost::iequals(str, L"negation")) return blend_mode::negation; if (boost::iequals(str, L"exclusion")) return blend_mode::exclusion; if (boost::iequals(str, L"screen")) return blend_mode::screen; if (boost::iequals(str, L"overlay")) return blend_mode::overlay; if (boost::iequals(str, L"soft_light")) return blend_mode::soft_light; if (boost::iequals(str, L"hard_light")) return blend_mode::hard_light; if (boost::iequals(str, L"color_dodge")) return blend_mode::color_dodge; if (boost::iequals(str, L"color_burn")) return blend_mode::color_burn; if (boost::iequals(str, L"linear_dodge")) return blend_mode::linear_dodge; if (boost::iequals(str, L"linear_burn")) return blend_mode::linear_burn; if (boost::iequals(str, L"linear_light")) return blend_mode::linear_light; if (boost::iequals(str, L"vivid_light")) return blend_mode::vivid_light; if (boost::iequals(str, L"pin_light")) return blend_mode::pin_light; if (boost::iequals(str, L"hard_mix")) return blend_mode::hard_mix; if (boost::iequals(str, L"reflect")) return blend_mode::reflect; if (boost::iequals(str, L"glow")) return blend_mode::glow; if (boost::iequals(str, L"phoenix")) return blend_mode::phoenix; if (boost::iequals(str, L"contrast")) return blend_mode::contrast; if (boost::iequals(str, L"saturation")) return blend_mode::saturation; if (boost::iequals(str, L"color")) return blend_mode::color; if (boost::iequals(str, L"luminosity")) return blend_mode::luminosity; return blend_mode::normal; } std::wstring get_blend_mode(blend_mode mode) { switch (mode) { case blend_mode::normal: return L"normal"; case blend_mode::lighten: return L"lighten"; case blend_mode::darken: return L"darken"; case blend_mode::multiply: return L"multiply"; case blend_mode::average: return L"average"; case blend_mode::add: return L"add"; case blend_mode::subtract: return L"subtract"; case blend_mode::difference: return L"difference"; case blend_mode::negation: return L"negation"; case blend_mode::exclusion: return L"exclusion"; case blend_mode::screen: return L"screen"; case blend_mode::overlay: return L"overlay"; case blend_mode::soft_light: return L"soft_light"; case blend_mode::hard_light: return L"hard_light"; case blend_mode::color_dodge: return L"color_dodge"; case blend_mode::color_burn: return L"color_burn"; case blend_mode::linear_dodge: return L"linear_dodge"; case blend_mode::linear_burn: return L"linear_burn"; case blend_mode::linear_light: return L"linear_light"; case blend_mode::vivid_light: return L"vivid_light"; case blend_mode::pin_light: return L"pin_light"; case blend_mode::hard_mix: return L"hard_mix"; case blend_mode::reflect: return L"reflect"; case blend_mode::glow: return L"glow"; case blend_mode::phoenix: return L"phoenix"; case blend_mode::contrast: return L"contrast"; case blend_mode::saturation: return L"saturation"; case blend_mode::color: return L"color"; case blend_mode::luminosity: return L"luminosity"; default: return L"normal"; } } }} // namespace caspar::core ================================================ FILE: src/core/mixer/image/blend_modes.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include namespace caspar { namespace core { enum class blend_mode { normal = 0, lighten, darken, multiply, average, add, subtract, difference, negation, exclusion, screen, overlay, soft_light, hard_light, color_dodge, color_burn, linear_dodge, linear_burn, linear_light, vivid_light, pin_light, hard_mix, reflect, glow, phoenix, contrast, saturation, color, luminosity, mix, blend_mode_count }; blend_mode get_blend_mode(const std::wstring& str); std::wstring get_blend_mode(blend_mode mode); }} // namespace caspar::core ================================================ FILE: src/core/mixer/image/image_mixer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include #include #include namespace caspar { namespace core { class image_mixer : public frame_visitor , public frame_factory { image_mixer(const image_mixer&); image_mixer& operator=(const image_mixer&); public: image_mixer() {} virtual ~image_mixer() {} void push(const struct frame_transform& frame) override = 0; void visit(const class const_frame& frame) override = 0; void pop() override = 0; virtual void update_aspect_ratio(double aspect_ratio) = 0; virtual std::future, std::shared_ptr>> render(const struct video_format_desc& format_desc) = 0; class mutable_frame create_frame(const void* tag, const struct pixel_format_desc& desc) override = 0; class mutable_frame create_frame(const void* video_stream_tag, const struct pixel_format_desc& desc, common::bit_depth depth) override = 0; #ifdef WIN32 class const_frame import_d3d_texture(const void* tag, const std::shared_ptr& d3d_texture, core::pixel_format format, common::bit_depth depth) override = 0; #endif virtual common::bit_depth depth() const = 0; }; }} // namespace caspar::core ================================================ FILE: src/core/mixer/mixer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../StdAfx.h" #include "mixer.h" #include "../frame/frame.h" #include "audio/audio_mixer.h" #include "image/image_mixer.h" #include #include #include #include #include #include #include #include namespace caspar { namespace core { struct mixer::impl { monitor::state state_; int channel_index_; spl::shared_ptr graph_; audio_mixer audio_mixer_{graph_}; spl::shared_ptr image_mixer_; std::queue> buffer_; impl(const impl&) = delete; impl& operator=(const impl&) = delete; impl(int channel_index, spl::shared_ptr graph, spl::shared_ptr image_mixer) : channel_index_(channel_index) , graph_(std::move(graph)) , image_mixer_(std::move(image_mixer)) { } const_frame operator()(std::vector frames, const video_format_desc& format_desc, int nb_samples) { image_mixer_->update_aspect_ratio(static_cast(format_desc.square_width) / static_cast(format_desc.square_height)); for (auto& frame : frames) { frame.accept(audio_mixer_); frame.transform().image_transform.layer_depth = 1; frame.accept(*image_mixer_); } auto result = image_mixer_->render(format_desc); auto audio = audio_mixer_(format_desc, nb_samples); state_["audio"] = audio_mixer_.state(); auto depth = image_mixer_->depth(); buffer_.push(std::async( std::launch::deferred, [result = std::move(result), audio = std::move(audio), graph = graph_, depth, format_desc, tag = this]() mutable { auto desc = pixel_format_desc(pixel_format::bgra); desc.planes.push_back(pixel_format_desc::plane(format_desc.width, format_desc.height, 4, depth)); std::vector> image_data; auto tuple = std::move(result.get()); image_data.emplace_back(std::move(std::get<0>(tuple))); return const_frame(tag, std::move(image_data), std::move(audio), desc, std::move(std::get<1>(tuple))); })); if (buffer_.size() <= format_desc.field_count) { return const_frame{}; } auto frame = std::move(buffer_.front().get()); buffer_.pop(); return frame; } void set_master_volume(float volume) { audio_mixer_.set_master_volume(volume); } float get_master_volume() { return audio_mixer_.get_master_volume(); } }; mixer::mixer(int channel_index, spl::shared_ptr graph, spl::shared_ptr image_mixer) : impl_(new impl(channel_index, std::move(graph), std::move(image_mixer))) { } void mixer::set_master_volume(float volume) { impl_->set_master_volume(volume); } float mixer::get_master_volume() { return impl_->get_master_volume(); } const_frame mixer::operator()(std::vector frames, const video_format_desc& format_desc, int nb_samples) { return (*impl_)(std::move(frames), format_desc, nb_samples); } mutable_frame mixer::create_frame(const void* tag, const pixel_format_desc& desc) { return impl_->image_mixer_->create_frame(tag, desc); } core::monitor::state mixer::state() const { return impl_->state_; } common::bit_depth mixer::depth() const { return impl_->image_mixer_->depth(); } }} // namespace caspar::core ================================================ FILE: src/core/mixer/mixer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include namespace caspar::diagnostics { class graph; } namespace caspar { namespace core { class mixer final { mixer(const mixer&); mixer& operator=(const mixer&); public: explicit mixer(int channel_index, spl::shared_ptr graph, spl::shared_ptr image_mixer); const_frame operator()(std::vector frames, const video_format_desc& format_desc, int nb_samples); void set_master_volume(float volume); float get_master_volume(); mutable_frame create_frame(const void* tag, const pixel_format_desc& desc); core::monitor::state state() const; common::bit_depth depth() const; private: struct impl; spl::shared_ptr impl_; }; }} // namespace caspar::core ================================================ FILE: src/core/module_dependencies.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #pragma once #include #include "consumer/frame_consumer_registry.h" #include "producer/cg_proxy.h" #include "producer/frame_producer_registry.h" namespace caspar::protocol::amcp { class amcp_command_repository_wrapper; } namespace caspar::core { struct module_dependencies { const spl::shared_ptr cg_registry; const spl::shared_ptr producer_registry; const spl::shared_ptr consumer_registry; const std::shared_ptr command_repository; module_dependencies(const spl::shared_ptr& cg_registry, const spl::shared_ptr& producer_registry, const spl::shared_ptr& consumer_registry, const std::shared_ptr& command_repository) : cg_registry(cg_registry) , producer_registry(producer_registry) , consumer_registry(consumer_registry) , command_repository(command_repository) { } }; } // namespace caspar::core ================================================ FILE: src/core/monitor/monitor.h ================================================ /* * Copyright 2013 Sveriges Television AB http://casparcg.com/ * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include #include #include #include namespace caspar { namespace core { namespace monitor { using data_t = boost:: variant; using vector_t = boost::container::small_vector; using data_map_t = boost::container::flat_map; class state { data_map_t data_; class state_proxy { std::string key_; data_map_t& data_; public: state_proxy(const std::string& key, data_map_t& data) : key_(key) , data_(data) { } state_proxy& operator=(data_t data) { data_[key_] = {std::move(data)}; return *this; } state_proxy& operator=(vector_t data) { data_[key_] = std::move(data); return *this; } template state_proxy operator[](const T& key) { return state_proxy(key_ + "/" + boost::lexical_cast(key), data_); } template state_proxy& operator=(const std::vector& data) { data_[key_] = vector_t(data.begin(), data.end()); return *this; } state_proxy& operator=(std::initializer_list data) { data_[key_] = vector_t(std::move(data)); return *this; } state_proxy& operator=(const state& other) { for (auto& p : other) { data_[key_ + "/" + p.first] = std::move(p.second); } return *this; } }; public: state() = default; state(const state& other) : data_(other.data_) { } state(data_map_t data) : data_(std::move(data)) { } state& operator=(const state& other) { data_ = other.data_; return *this; } template state_proxy operator[](const T& key) { return state_proxy(boost::lexical_cast(key), data_); } data_map_t::const_iterator begin() const { return data_.begin(); } data_map_t::const_iterator end() const { return data_.end(); } }; }}} // namespace caspar::core::monitor ================================================ FILE: src/core/producer/cg_proxy.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #include "../StdAfx.h" #include "cg_proxy.h" #include "../diagnostics/call_context.h" #include "../video_channel.h" #include "frame_producer.h" #include "stage.h" #include #include #include #include #include #include #include namespace caspar { namespace core { const spl::shared_ptr& cg_proxy::empty() { class empty_proxy : public cg_proxy { void add(int, const std::wstring&, bool, const std::wstring&, const std::wstring&) override {} void remove(int) override {} void play(int) override {} void stop(int) override {} void next(int) override {} void update(int, const std::wstring&) override {} std::wstring invoke(int, const std::wstring&) override { return L""; } }; static spl::shared_ptr instance = spl::make_shared(); return instance; } struct cg_producer_registry::impl { private: struct record { std::wstring name; cg_proxy_factory proxy_factory; cg_producer_factory producer_factory; bool reusable_producer_instance; }; mutable std::mutex mutex_; std::map records_by_extension_; public: void register_cg_producer(std::wstring cg_producer_name, std::set file_extensions, cg_proxy_factory proxy_factory, cg_producer_factory producer_factory, bool reusable_producer_instance) { std::lock_guard lock(mutex_); record rec{std::move(cg_producer_name), std::move(proxy_factory), std::move(producer_factory), reusable_producer_instance}; for (auto& extension : file_extensions) { records_by_extension_.insert(std::make_pair(extension, rec)); } } spl::shared_ptr create_producer(const frame_producer_dependencies& dependencies, const std::wstring& filename) const { auto found = find_record(filename); if (!found) return frame_producer::empty(); return found->producer_factory(dependencies, filename); } spl::shared_ptr get_proxy(const spl::shared_ptr& producer) const { auto producer_name = producer->name(); std::lock_guard lock(mutex_); for (auto& elem : records_by_extension_) { if (elem.second.name == producer_name) return elem.second.proxy_factory(producer); } return cg_proxy::empty(); } spl::shared_ptr get_proxy(const spl::shared_ptr& video_channel, int render_layer) const { auto producer = spl::make_shared_ptr(video_channel->stage()->foreground(render_layer).get()); return get_proxy(producer); } spl::shared_ptr get_or_create_proxy(const spl::shared_ptr& video_channel, const frame_producer_dependencies& dependencies, int render_layer, const std::wstring& filename) const { using namespace boost::filesystem; auto found = find_record(filename); if (!found) return cg_proxy::empty(); auto producer = spl::make_shared_ptr(video_channel->stage()->foreground(render_layer).get()); auto current_producer_name = producer->name(); bool create_new = current_producer_name != found->name || !found->reusable_producer_instance; if (create_new) { diagnostics::scoped_call_context save; diagnostics::call_context::for_thread().video_channel = video_channel->index(); diagnostics::call_context::for_thread().layer = render_layer; producer = found->producer_factory(dependencies, filename); if (producer == core::frame_producer::empty()) return cg_proxy::empty(); video_channel->stage()->load(render_layer, producer); video_channel->stage()->play(render_layer); } return found->proxy_factory(producer); } bool is_cg_extension(const std::wstring& extension) const { std::lock_guard lock(mutex_); return records_by_extension_.find(extension) != records_by_extension_.end(); } std::wstring get_cg_producer_name(const std::wstring& filename) const { auto record = find_record(filename); if (!record) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(filename + L" is not a cg template.")); return record->name; } private: std::optional find_record(const std::wstring& filename) const { using namespace boost::filesystem; auto basepath = path(env::template_folder()) / path(filename); std::lock_guard lock(mutex_); for (auto& rec : records_by_extension_) { auto p = path(basepath.wstring() + rec.first); if (find_case_insensitive(p.wstring())) return rec.second; } auto protocol = caspar::protocol_split(filename).first; if (!protocol.empty()) { auto ext = path(filename).extension().wstring(); for (auto& rec : records_by_extension_) { if (rec.first == ext) return rec.second; } } // TODO (fix): This is a hack to allow query params. for (auto& rec : records_by_extension_) { if (rec.first == L".html") return rec.second; } return {}; } }; cg_producer_registry::cg_producer_registry() : impl_(new impl) { } void cg_producer_registry::register_cg_producer(std::wstring cg_producer_name, std::set file_extensions, cg_proxy_factory proxy_factory, cg_producer_factory producer_factory, bool reusable_producer_instance) { impl_->register_cg_producer(std::move(cg_producer_name), std::move(file_extensions), std::move(proxy_factory), std::move(producer_factory), reusable_producer_instance); } spl::shared_ptr cg_producer_registry::create_producer(const frame_producer_dependencies& dependencies, const std::wstring& filename) const { return impl_->create_producer(dependencies, filename); } spl::shared_ptr cg_producer_registry::get_proxy(const spl::shared_ptr& producer) const { return impl_->get_proxy(producer); } spl::shared_ptr cg_producer_registry::get_proxy(const spl::shared_ptr& video_channel, int render_layer) const { return impl_->get_proxy(video_channel, render_layer); } spl::shared_ptr cg_producer_registry::get_or_create_proxy(const spl::shared_ptr& video_channel, const frame_producer_dependencies& dependencies, int render_layer, const std::wstring& filename) const { return impl_->get_or_create_proxy(video_channel, dependencies, render_layer, filename); } bool cg_producer_registry::is_cg_extension(const std::wstring& extension) const { return impl_->is_cg_extension(extension); } std::wstring cg_producer_registry::get_cg_producer_name(const std::wstring& filename) const { return impl_->get_cg_producer_name(filename); } }} // namespace caspar::core ================================================ FILE: src/core/producer/cg_proxy.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "frame_producer.h" #include #include #include #include namespace caspar { namespace core { class cg_proxy { public: static const unsigned int DEFAULT_LAYER = 9999; virtual ~cg_proxy() {} virtual void add(int layer, const std::wstring& template_name, bool play_on_load, const std::wstring& start_from_label = L"", const std::wstring& data = L"") = 0; virtual void remove(int layer) = 0; virtual void play(int layer) = 0; virtual void stop(int layer) = 0; virtual void next(int layer) = 0; virtual void update(int layer, const std::wstring& data) = 0; virtual std::wstring invoke(int layer, const std::wstring& label) = 0; static const spl::shared_ptr& empty(); }; using cg_proxy_factory = std::function(const spl::shared_ptr& producer)>; using cg_producer_factory = std::function(const frame_producer_dependencies& dependencies, const std::wstring& filename)>; class cg_producer_registry { public: cg_producer_registry(); void register_cg_producer(std::wstring cg_producer_name, std::set file_extensions, cg_proxy_factory proxy_factory, cg_producer_factory producer_factory, bool reusable_producer_instance); spl::shared_ptr create_producer(const frame_producer_dependencies& dependencies, const std::wstring& filename) const; spl::shared_ptr get_proxy(const spl::shared_ptr& producer) const; spl::shared_ptr get_proxy(const spl::shared_ptr& video_channel, int render_layer) const; spl::shared_ptr get_or_create_proxy(const spl::shared_ptr& video_channel, const frame_producer_dependencies& dependencies, int render_layer, const std::wstring& filename) const; bool is_cg_extension(const std::wstring& extension) const; std::wstring get_cg_producer_name(const std::wstring& filename) const; private: struct impl; spl::shared_ptr impl_; cg_producer_registry(const cg_producer_registry&) = delete; cg_producer_registry& operator=(const cg_producer_registry&) = delete; }; }} // namespace caspar::core ================================================ FILE: src/core/producer/color/color_producer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../../StdAfx.h" #include "color_producer.h" #include #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace core { draw_frame create_color_frame(void* tag, const spl::shared_ptr& frame_factory, const std::vector& values) { core::pixel_format_desc desc(pixel_format::bgra); desc.planes.push_back(core::pixel_format_desc::plane(static_cast(values.size()), 1, 4)); auto frame = frame_factory->create_frame(tag, desc); for (int i = 0; i < values.size(); ++i) *reinterpret_cast(frame.image_data(0).begin() + i * 4) = values.at(i); return core::draw_frame(std::move(frame)); } draw_frame create_color_frame(void* tag, const spl::shared_ptr& frame_factory, uint32_t value) { std::vector values = {value}; return create_color_frame(tag, frame_factory, values); } draw_frame create_color_frame(void* tag, const spl::shared_ptr& frame_factory, const std::vector& strs) { std::vector values(strs.size()); for (int i = 0; i < values.size(); ++i) { if (!try_get_color(strs.at(i), values.at(i))) CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Invalid color: " + strs.at(i))); } return create_color_frame(tag, frame_factory, values); } class color_producer : public frame_producer { monitor::state state_; const std::wstring color_str_; draw_frame frame_; public: color_producer(const spl::shared_ptr& frame_factory, uint32_t value) : frame_(create_color_frame(this, frame_factory, value)) { CASPAR_LOG(info) << print() << L" Initialized"; } color_producer(const spl::shared_ptr& frame_factory, const std::vector& colors) : color_str_(boost::join(colors, L", ")) , frame_(create_color_frame(this, frame_factory, colors)) { state_["color"] = color_str_; CASPAR_LOG(info) << print() << L" Initialized"; } // frame_producer draw_frame receive_impl(const core::video_field field, int nb_samples) override { return frame_; } std::wstring print() const override { return L"color[" + color_str_ + L"]"; } std::wstring name() const override { return L"color"; } core::monitor::state state() const override { return state_; } bool is_ready() override { return true; } }; std::wstring get_hex_color(const std::wstring& str) { if (str.size() == 0) return str; if (str.at(0) == '#') return str.length() == 7 ? L"#FF" + str.substr(1) : str; std::wstring col_str = boost::to_upper_copy(str); if (col_str == L"EMPTY") return L"#00000000"; if (col_str == L"BLACK") return L"#FF000000"; if (col_str == L"WHITE") return L"#FFFFFFFF"; if (col_str == L"RED") return L"#FFFF0000"; if (col_str == L"GREEN") return L"#FF00FF00"; if (col_str == L"BLUE") return L"#FF0000FF"; if (col_str == L"ORANGE") return L"#FFFFA500"; if (col_str == L"YELLOW") return L"#FFFFFF00"; if (col_str == L"BROWN") return L"#FFA52A2A"; if (col_str == L"GRAY") return L"#FF808080"; if (col_str == L"TEAL") return L"#FF008080"; return str; } bool try_get_color(const std::wstring& str, uint32_t& value) { auto color_str = get_hex_color(str); if (color_str.length() != 9 || color_str[0] != '#') return false; std::wstringstream ss(color_str.substr(1)); if (!(ss >> std::hex >> value) || !ss.eof()) return false; return true; } spl::shared_ptr create_color_producer(const spl::shared_ptr& frame_factory, uint32_t value) { return spl::make_shared(frame_factory, value); } spl::shared_ptr create_color_producer(const spl::shared_ptr& frame_factory, const std::vector& params) { if (params.size() < 1) return core::frame_producer::empty(); uint32_t value = 0; if (!try_get_color(params.at(0), value)) return core::frame_producer::empty(); std::vector colors; for (auto& param : params) { if (try_get_color(param, value)) { colors.push_back(param); } else { // Stop after something not a colour, as that probably means the transition definition break; } } return spl::make_shared(frame_factory, colors); } }} // namespace caspar::core ================================================ FILE: src/core/producer/color/color_producer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include #include namespace caspar { namespace core { bool try_get_color(const std::wstring& str, uint32_t& value); spl::shared_ptr create_color_producer(const spl::shared_ptr& frame_factory, uint32_t value); spl::shared_ptr create_color_producer(const spl::shared_ptr& frame_factory, const std::vector& params); draw_frame create_color_frame(void* tag, const spl::shared_ptr& frame_factory, uint32_t value); draw_frame create_color_frame(void* tag, const spl::shared_ptr& frame_factory, const std::wstring& color); }} // namespace caspar::core ================================================ FILE: src/core/producer/frame_producer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../StdAfx.h" #include "cg_proxy.h" #include "frame_producer.h" #include #include #include #include namespace caspar { namespace core { frame_producer_dependencies::frame_producer_dependencies( const spl::shared_ptr& frame_factory, const std::vector>& channels, const video_format_repository& format_repository, const video_format_desc& format_desc, const spl::shared_ptr& producer_registry, const spl::shared_ptr& cg_registry) : frame_factory(frame_factory) , channels(channels) , format_repository(format_repository) , format_desc(format_desc) , producer_registry(producer_registry) , cg_registry(cg_registry) { } const spl::shared_ptr& frame_producer::empty() { class empty_frame_producer : public frame_producer { public: empty_frame_producer() {} draw_frame receive_impl(const core::video_field field, int nb_samples) override { return draw_frame{}; } uint32_t nb_frames() const override { return 0; } std::wstring print() const override { return L"empty"; } std::wstring name() const override { return L"empty"; } uint32_t frame_number() const override { return 0; } std::future call(const std::vector& params) override { CASPAR_LOG(warning) << L" Cannot call on empty frame_producer"; return make_ready_future(std::wstring()); } draw_frame last_frame(const core::video_field field) override { return draw_frame{}; } draw_frame first_frame(const core::video_field field) override { return draw_frame{}; } core::monitor::state state() const override { static const monitor::state empty; return empty; } bool is_ready() override { return true; } }; static spl::shared_ptr producer = spl::make_shared(); return producer; } }} // namespace caspar::core ================================================ FILE: src/core/producer/frame_producer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "../fwd.h" #include "../monitor/monitor.h" #include #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace core { class frame_producer { frame_producer(const frame_producer&); frame_producer& operator=(const frame_producer&); uint32_t frame_number_ = 0; core::draw_frame last_frame_; core::draw_frame first_frame_; bool is_ready_ = false; public: static const spl::shared_ptr& empty(); frame_producer() = default; virtual ~frame_producer() = default; draw_frame receive(const video_field field, int nb_samples) { if (frame_number_ == 0 && first_frame_) { frame_number_ += 1; return first_frame_; } auto frame = receive_impl(field, nb_samples); if (frame) { frame_number_ += 1; last_frame_ = frame; if (!first_frame_) { first_frame_ = frame; } } return frame; } virtual draw_frame receive_impl(const video_field field, int nb_samples) = 0; virtual std::future call(const std::vector& params) { CASPAR_THROW_EXCEPTION(not_implemented()); } virtual core::monitor::state state() const = 0; virtual std::wstring print() const = 0; virtual std::wstring name() const = 0; virtual uint32_t frame_number() const { return frame_number_; } virtual uint32_t nb_frames() const { return std::numeric_limits::max(); } virtual draw_frame last_frame(const video_field field) { if (!last_frame_) { last_frame_ = receive_impl(field, 0); } return core::draw_frame::still(last_frame_); } virtual draw_frame first_frame(const video_field field) { if (!first_frame_) { first_frame_ = receive_impl(field, 0); } return core::draw_frame::still(first_frame_); } virtual void leading_producer(const spl::shared_ptr&) {} virtual spl::shared_ptr following_producer() const { return core::frame_producer::empty(); } virtual std::optional auto_play_delta() const { return {}; } /** * Some producers take a couple of frames before they produce frames. * While this returns false, the previous producer will be left running for a limited number of frames. */ virtual bool is_ready() = 0; }; class const_producer : public core::frame_producer { const core::draw_frame frame1_; const core::draw_frame frame2_; public: const_producer(core::draw_frame frame1, core::draw_frame frame2) : frame1_(std::move(frame1)) , frame2_(std::move(frame2)) { } // frame_producer core::draw_frame last_frame(const core::video_field field) override { if (field == core::video_field::b) return frame2_; else return frame1_; } core::draw_frame first_frame(const core::video_field field) override { return last_frame(field); } core::draw_frame receive_impl(const video_field field, int nb_samples) override { return last_frame(field); } std::wstring name() const override { return L"const_producer"; } std::wstring print() const override { return L"const_producer"; } core::monitor::state state() const override { static const monitor::state empty; return empty; } bool is_ready() override { return true; } }; class frame_producer_registry; struct frame_producer_dependencies { spl::shared_ptr frame_factory; std::vector> channels; video_format_repository format_repository; video_format_desc format_desc; spl::shared_ptr producer_registry; spl::shared_ptr cg_registry; frame_producer_dependencies(const spl::shared_ptr& frame_factory, const std::vector>& channels, const video_format_repository& format_repository, const video_format_desc& format_desc, const spl::shared_ptr& producer_registry, const spl::shared_ptr& cg_registry); }; }} // namespace caspar::core ================================================ FILE: src/core/producer/frame_producer_registry.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Julian Waller, julian@superfly.tv */ #include "../StdAfx.h" #include "cg_proxy.h" #include "frame_producer.h" #include "frame_producer_registry.h" #include "color/color_producer.h" #include "route/route_producer.h" #include "separated/separated_producer.h" #include namespace caspar { namespace core { frame_producer_registry::frame_producer_registry() {} void frame_producer_registry::register_producer_factory(std::wstring name, const producer_factory_t& factory) { producer_factories_.push_back(factory); } std::shared_ptr& producer_destroyer() { static auto destroyer = [] { auto result = std::make_shared(L"Producer destroyer"); result->set_capacity(std::numeric_limits::max()); return result; }(); return destroyer; } std::atomic& destroy_producers_in_separate_thread() { static std::atomic state; return state; } void destroy_producers_synchronously() { destroy_producers_in_separate_thread() = false; // Join destroyer, executing rest of producers in queue synchronously. producer_destroyer().reset(); } class destroy_producer_proxy : public frame_producer { std::shared_ptr producer_; public: destroy_producer_proxy(spl::shared_ptr&& producer) : producer_(std::move(producer)) { destroy_producers_in_separate_thread() = true; } virtual ~destroy_producer_proxy() { if (producer_ == core::frame_producer::empty() || !destroy_producers_in_separate_thread()) return; auto destroyer = producer_destroyer(); if (!destroyer) return; CASPAR_VERIFY(destroyer->size() < 8); auto producer = new spl::shared_ptr(std::move(producer_)); destroyer->begin_invoke([=] { std::unique_ptr> pointer_guard(producer); auto str = (*producer)->print(); try { if (producer->use_count() != 1) CASPAR_LOG(debug) << str << L" Not destroyed on asynchronous destruction thread: " << producer->use_count(); else CASPAR_LOG(debug) << str << L" Destroying on asynchronous destruction thread."; } catch (...) { } try { pointer_guard.reset(); CASPAR_LOG(info) << str << L" Destroyed."; } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } }); } draw_frame receive_impl(const core::video_field field, int nb_samples) override { return producer_->receive(field, nb_samples); } std::wstring print() const override { return producer_->print(); } std::wstring name() const override { return producer_->name(); } std::future call(const std::vector& params) override { return producer_->call(params); } void leading_producer(const spl::shared_ptr& producer) override { return producer_->leading_producer(producer); } uint32_t frame_number() const override { return producer_->frame_number(); } uint32_t nb_frames() const override { return producer_->nb_frames(); } draw_frame last_frame(const core::video_field field) override { return producer_->last_frame(field); } draw_frame first_frame(const core::video_field field) override { return producer_->first_frame(field); } core::monitor::state state() const override { return producer_->state(); } bool is_ready() override { return producer_->is_ready(); } }; spl::shared_ptr do_create_producer(const frame_producer_dependencies& dependencies, const std::vector& params, const std::vector& factories, bool throw_on_fail = false) { if (params.empty()) { CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info("params cannot be empty")); } auto producer = create_color_producer(dependencies.frame_factory, params); if (producer != frame_producer::empty()) { return producer; } producer = create_route_producer(dependencies, params); if (producer != frame_producer::empty()) { return producer; } if (std::any_of(factories.begin(), factories.end(), [&](const producer_factory_t& factory) -> bool { try { producer = factory(dependencies, params); } catch (user_error&) { throw; } catch (...) { if (throw_on_fail) throw; else CASPAR_LOG_CURRENT_EXCEPTION(); } return producer != frame_producer::empty(); })) { return producer; } return frame_producer::empty(); } spl::shared_ptr frame_producer_registry::create_producer(const frame_producer_dependencies& dependencies, const std::vector& params) const { auto& producer_factories = producer_factories_; auto producer = do_create_producer(dependencies, params, producer_factories); auto key_producer = frame_producer::empty(); if (!params.empty() && !boost::contains(params.at(0), L"://")) { try // to find a key file. { auto params_copy = params; params_copy[0] += L"_A"; key_producer = do_create_producer(dependencies, params_copy, producer_factories); if (key_producer == frame_producer::empty()) { params_copy[0] += L"LPHA"; key_producer = do_create_producer(dependencies, params_copy, producer_factories); } } catch (...) { } } if (producer != frame_producer::empty() && key_producer != frame_producer::empty()) return create_separated_producer(producer, key_producer); if (producer == frame_producer::empty()) { std::wstring str; for (auto& param : params) str += param + L" "; CASPAR_THROW_EXCEPTION(file_not_found() << msg_info("No match found for supplied commands. Check syntax.") << arg_value_info(u8(str))); } // Skip destroy_producer_proxy for route_producer, as it needs to be able to perform this cast // This isn't a nice approach, but it keeps it simple and ensures every other producer has the // destroy_producer_proxy wrapping if (spl::instance_of(producer)) { return std::move(producer); } return spl::make_shared(std::move(producer)); } spl::shared_ptr frame_producer_registry::create_producer(const frame_producer_dependencies& dependencies, const std::wstring& params) const { std::wstringstream iss(params); std::vector tokens; using iterator = std::istream_iterator>; std::copy(iterator(iss), iterator(), std::back_inserter(tokens)); return create_producer(dependencies, tokens); } }} // namespace caspar::core ================================================ FILE: src/core/producer/frame_producer_registry.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Julian Waller, julian@superfly.tv */ #pragma once #include "../fwd.h" #include "../monitor/monitor.h" #include #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace core { using producer_factory_t = std::function(const frame_producer_dependencies&, const std::vector&)>; class frame_producer_registry { public: frame_producer_registry(); void register_producer_factory(std::wstring name, const producer_factory_t& factoryr); // Not thread-safe. spl::shared_ptr create_producer(const frame_producer_dependencies&, const std::vector& params) const; spl::shared_ptr create_producer(const frame_producer_dependencies&, const std::wstring& params) const; private: std::vector producer_factories_; frame_producer_registry(const frame_producer_registry&) = delete; frame_producer_registry& operator=(const frame_producer_registry&) = delete; }; void destroy_producers_synchronously(); }} // namespace caspar::core ================================================ FILE: src/core/producer/layer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../StdAfx.h" #include "layer.h" #include "frame_producer.h" #include "../frame/draw_frame.h" #include "../video_format.h" namespace caspar { namespace core { struct layer::impl { monitor::state state_; const core::video_format_desc format_desc_; spl::shared_ptr foreground_ = frame_producer::empty(); spl::shared_ptr background_ = frame_producer::empty(); bool auto_play_ = false; bool paused_ = false; public: impl(const core::video_format_desc format_desc) : format_desc_(format_desc) { } void pause() { paused_ = true; } void resume() { paused_ = false; } void load(spl::shared_ptr producer, bool preview_producer, bool auto_play) { background_ = std::move(producer); auto_play_ = auto_play; if (auto_play_ && foreground_ == frame_producer::empty()) { play(); } else if (preview_producer) { preview(true); } } void preview(bool force) { if (force || background_ != frame_producer::empty()) { play(); paused_ = true; } } void play() { if (background_ != frame_producer::empty()) { if (!paused_) { background_->leading_producer(foreground_); } else { if (format_desc_.field_count == 2) { auto frame1 = foreground_->last_frame(core::video_field::a); auto frame2 = foreground_->last_frame(core::video_field::b); background_->leading_producer( spl::make_shared(std::move(frame1), std::move(frame2))); } else { auto frame = foreground_->last_frame(core::video_field::progressive); background_->leading_producer(spl::make_shared(frame, frame)); } } foreground_ = std::move(background_); background_ = frame_producer::empty(); auto_play_ = false; } paused_ = false; } void stop() { foreground_ = frame_producer::empty(); auto_play_ = false; } draw_frame receive(const video_field field, int nb_samples) { try { if (foreground_->following_producer() != core::frame_producer::empty() && field != video_field::b) { foreground_ = foreground_->following_producer(); } int64_t frames_left = 0; if (auto_play_) { auto auto_play_delta = background_->auto_play_delta(); if (auto_play_delta) { auto time = static_cast(foreground_->frame_number()); auto duration = static_cast(foreground_->nb_frames()); frames_left = duration - time - *auto_play_delta; if (frames_left < 1 && field != video_field::b) { play(); } } } auto frame = paused_ ? core::draw_frame{} : foreground_->receive(field, nb_samples); if (!frame) { frame = foreground_->last_frame(field); } state_ = {}; state_["foreground"] = foreground_->state(); state_["foreground"]["producer"] = foreground_->name(); state_["foreground"]["paused"] = paused_; if (frames_left > 0) { state_["foreground"]["frames_left"] = frames_left; } state_["background"] = background_->state(); state_["background"]["producer"] = background_->name(); return frame; } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); stop(); return draw_frame{}; } } draw_frame receive_background(const video_field field, int nb_samples) { try { return background_->first_frame(field); } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); background_ = frame_producer::empty(); return draw_frame{}; } } }; layer::layer(const core::video_format_desc format_desc) : impl_(new impl(format_desc)) { } layer::layer(layer&& other) : impl_(std::move(other.impl_)) { } layer& layer::operator=(layer&& other) { impl_ = std::move(other.impl_); return *this; } void layer::swap(layer& other) { impl_.swap(other.impl_); } void layer::load(spl::shared_ptr frame_producer, bool preview, bool auto_play) { return impl_->load(std::move(frame_producer), preview, auto_play); } void layer::play() { impl_->play(); } void layer::preview() { impl_->preview(false); } void layer::pause() { impl_->pause(); } void layer::resume() { impl_->resume(); } void layer::stop() { impl_->stop(); } draw_frame layer::receive(const video_field field, int nb_samples) { return impl_->receive(field, nb_samples); } draw_frame layer::receive_background(const video_field field, int nb_samples) { return impl_->receive_background(field, nb_samples); } spl::shared_ptr layer::foreground() const { return impl_->foreground_; } spl::shared_ptr layer::background() const { return impl_->background_; } bool layer::has_background() const { return impl_->background_ != frame_producer::empty(); } core::monitor::state layer::state() const { return impl_->state_; } }} // namespace caspar::core ================================================ FILE: src/core/producer/layer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "frame_producer.h" #include "../fwd.h" #include "../monitor/monitor.h" #include namespace caspar { namespace core { class layer final { layer(const layer&); layer& operator=(const layer&); public: explicit layer(const core::video_format_desc format_desc); layer(layer&& other); layer& operator=(layer&& other); void swap(layer& other); void load(spl::shared_ptr producer, bool preview, bool auto_play = false); void play(); void preview(); void pause(); void resume(); void stop(); draw_frame receive(const video_field field, int nb_samples); draw_frame receive_background(const video_field field, int nb_samples); core::monitor::state state() const; spl::shared_ptr foreground() const; spl::shared_ptr background() const; bool has_background() const; private: struct impl; spl::shared_ptr impl_; }; }} // namespace caspar::core ================================================ FILE: src/core/producer/route/route_producer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "route_producer.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace core { class fix_stream_tag : public frame_visitor { const void* route_producer_ptr_; std::stack>> frames_stack_; std::optional upd_frame_; fix_stream_tag(const fix_stream_tag&); fix_stream_tag& operator=(const fix_stream_tag&); public: fix_stream_tag(void* stream_tag) : route_producer_ptr_(stream_tag) { frames_stack_ = std::stack>>(); frames_stack_.emplace(frame_transform{}, std::vector()); } void push(const frame_transform& transform) { frames_stack_.emplace(transform, std::vector()); } void visit(const const_frame& frame) { // Get original tag from the frame const void* source_tag = frame.stream_tag(); // Calculate a unique but stable tag for this source // This calculation will always produce the same result for the same inputs intptr_t base_addr = reinterpret_cast(route_producer_ptr_); intptr_t source_addr = reinterpret_cast(source_tag); // Use XOR to create a unique value that combines route producer and source identities intptr_t unique_value = base_addr ^ source_addr ^ 0xDEADBEEF; // Constant helps avoid collisions const void* unique_tag = reinterpret_cast(unique_value); // Apply the tag to the frame upd_frame_ = frame.with_tag(unique_tag); } void pop() { auto popped = frames_stack_.top(); frames_stack_.pop(); if (upd_frame_ != std::nullopt) { auto new_frame = draw_frame(std::move(*upd_frame_)); upd_frame_ = std::nullopt; new_frame.transform() = popped.first; frames_stack_.top().second.push_back(std::move(new_frame)); } else { auto new_frame = draw_frame(std::move(popped.second)); new_frame.transform() = popped.first; frames_stack_.top().second.push_back(new_frame); } } draw_frame operator()(draw_frame frame) { frame.accept(*this); auto popped = frames_stack_.top(); frames_stack_.pop(); draw_frame result = draw_frame(std::move(popped.second)); frames_stack_ = std::stack>>(); frames_stack_.emplace(frame_transform{}, std::vector()); return result; } }; class route_producer : public frame_producer , public route_control , public std::enable_shared_from_this { spl::shared_ptr graph_; tbb::concurrent_bounded_queue> buffer_; caspar::timer produce_timer_; caspar::timer consume_timer_; std::shared_ptr route_; const video_format_desc format_desc_; std::optional> frame_; int source_channel_; int source_layer_; fix_stream_tag tag_fix_; core::video_format_desc source_format_; bool is_cross_channel_ = false; boost::signals2::scoped_connection connection_; int get_source_channel() const override { return source_channel_; } int get_source_layer() const override { return source_layer_; } // set the buffer depth to 2 for cross-channel routes, 1 otherwise void set_cross_channel(bool cross) override { is_cross_channel_ = cross; if (cross) { buffer_.set_capacity(2); source_format_ = route_->format_desc; } else { buffer_.set_capacity(1); source_format_ = core::video_format_desc(); } } public: route_producer(std::shared_ptr route, video_format_desc format_desc, int buffer, int source_channel, int source_layer) : route_(route) , format_desc_(format_desc) , source_channel_(source_channel) , source_layer_(source_layer) , tag_fix_(this) { graph_ = spl::make_shared(); buffer_.set_capacity(buffer > 0 ? buffer : 1); graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f)); graph_->set_color("produce-time", caspar::diagnostics::color(0.0f, 1.0f, 0.0f)); graph_->set_color("consume-time", caspar::diagnostics::color(1.0f, 0.4f, 0.0f, 0.8f)); graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f)); graph_->set_text(print()); diagnostics::register_graph(graph_); CASPAR_LOG(debug) << print() << L" Initialized"; } void connect_slot() { auto weak_self = weak_from_this(); connection_ = route_->signal.connect([weak_self](const core::draw_frame& frame1, const core::draw_frame& frame2) { if (auto self = weak_self.lock()) { auto frame1b = frame1; if (!frame1b) { // We got a frame, so ensure it is a real frame (otherwise the layer gets confused) frame1b = core::draw_frame::push(frame1); } // Update the tag in the frame to allow the audio mixer to distinguish between the source frame and // the routed frame frame1b = self->tag_fix_(frame1b); auto frame2b = frame2; if (!frame2b) { // Ensure that any interlaced channel will repeat frames instead of showing black. frame2b = frame1b; } else { // For interlaced formats, ensure field B gets the proper tag as well frame2b = self->tag_fix_(frame2b); } if (!self->buffer_.try_push(std::make_pair(frame1b, frame2b))) { self->graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame"); } self->graph_->set_value("produce-time", self->produce_timer_.elapsed() * self->route_->format_desc.fps * 0.5); self->produce_timer_.restart(); } }); } draw_frame last_frame(const core::video_field field) override { if (!frame_) { std::pair frame; if (buffer_.try_pop(frame)) { frame_ = frame; } } if (!frame_) { return core::draw_frame{}; } if (field == core::video_field::b) { return core::draw_frame::still(frame_->second); } else { return core::draw_frame::still(frame_->first); } } core::video_field next_field_ = core::video_field::a; draw_frame receive_impl(core::video_field field, int nb_samples) override { // If going i -> p, alternate between the fields // Note: this doesn't fix the audio if going 50i -> 25p if (field == core::video_field::progressive && source_format_.field_count != 1 && format_desc_.fps >= source_format_.fps) { field = next_field_; next_field_ = (next_field_ == core::video_field::a) ? core::video_field::b : core::video_field::a; } if (field == core::video_field::a || field == core::video_field::progressive) { std::pair frame; if (!buffer_.try_pop(frame)) { graph_->set_tag(diagnostics::tag_severity::WARNING, "late-frame"); } else { frame_ = frame; } } graph_->set_value("consume-time", consume_timer_.elapsed() * route_->format_desc.fps * 0.5); consume_timer_.restart(); if (!frame_) { return core::draw_frame{}; } if (field == core::video_field::b) { return frame_->second; } else { return frame_->first; } } bool is_ready() override { return true; } std::wstring print() const override { return L"route[" + route_->name + L"]"; } std::wstring name() const override { return L"route"; } core::monitor::state state() const override { core::monitor::state state; state["route/channel"] = source_channel_; if (source_layer_ > -1) { state["route/layer"] = source_layer_; } return state; } }; spl::shared_ptr create_route_producer(const core::frame_producer_dependencies& dependencies, const std::vector& params) { static boost::wregex expr(L"route://(?\\d+)(-(?\\d+))?", boost::regex::icase); boost::wsmatch what; if (params.empty() || !boost::regex_match(params.at(0), what, expr)) { return core::frame_producer::empty(); } auto channel = std::stoi(what["CHANNEL"].str()); auto layer = what["LAYER"].matched ? std::stoi(what["LAYER"].str()) : -1; auto mode = core::route_mode::foreground; if (layer >= 0) { if (contains_param(L"BACKGROUND", params)) mode = core::route_mode::background; else if (contains_param(L"NEXT", params)) mode = core::route_mode::next; } auto channel_it = std::find_if(dependencies.channels.begin(), dependencies.channels.end(), [=](const spl::shared_ptr& ch) { return ch->index() == channel; }); if (channel_it == dependencies.channels.end()) { CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"No channel with id " + std::to_wstring(channel))); } auto buffer = get_param(L"BUFFER", params, 0); auto rp = spl::make_shared( (*channel_it)->route(layer, mode), dependencies.format_desc, buffer, channel, layer); rp->connect_slot(); return rp; } }} // namespace caspar::core ================================================ FILE: src/core/producer/route/route_producer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include namespace caspar { namespace core { class route_control { public: virtual ~route_control() {} virtual int get_source_channel() const = 0; virtual int get_source_layer() const = 0; virtual void set_cross_channel(bool cross) = 0; }; spl::shared_ptr create_route_producer(const core::frame_producer_dependencies& dependencies, const std::vector& params); }} // namespace caspar::core ================================================ FILE: src/core/producer/separated/separated_producer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../../StdAfx.h" #include "separated_producer.h" #include #include #include #include #include namespace caspar { namespace core { struct frame_pair { draw_frame field1; draw_frame field2; draw_frame get(video_field field) { if (field == video_field::b) return field2; else return field1; } void set(video_field field, const draw_frame& frame) { if (field == video_field::b) field2 = frame; else field1 = frame; } }; class separated_producer : public frame_producer { monitor::state state_; spl::shared_ptr fill_producer_; spl::shared_ptr key_producer_; frame_pair fill_; frame_pair key_; public: explicit separated_producer(const spl::shared_ptr& fill, const spl::shared_ptr& key) : fill_producer_(fill) , key_producer_(key) { CASPAR_LOG(debug) << print() << L" Initialized"; } // frame_producer draw_frame last_frame(const core::video_field field) override { return draw_frame::mask(fill_producer_->last_frame(field), key_producer_->last_frame(field)); } draw_frame first_frame(const core::video_field field) override { return draw_frame::mask(fill_producer_->first_frame(field), key_producer_->first_frame(field)); } draw_frame receive_impl(const core::video_field field, int nb_samples) override { CASPAR_SCOPE_EXIT { state_ = fill_producer_->state(); state_["keyer"] = key_producer_->state(); }; auto fill = fill_.get(field); auto key = key_.get(field); if (!fill) { fill = fill_producer_->receive(field, nb_samples); } if (!key) { key = key_producer_->receive(field, nb_samples); } if (!fill || !key) { fill_.set(field, fill); key_.set(field, key); return core::draw_frame{}; } auto frame = draw_frame::mask(fill, key); fill_.set(field, draw_frame{}); key_.set(field, draw_frame{}); return frame; } uint32_t frame_number() const override { return fill_producer_->frame_number(); } uint32_t nb_frames() const override { return std::min(fill_producer_->nb_frames(), key_producer_->nb_frames()); } std::wstring print() const override { return L"separated[fill:" + fill_producer_->print() + L"|key[" + key_producer_->print() + L"]]"; } std::future call(const std::vector& params) override { key_producer_->call(params); return fill_producer_->call(params); } std::wstring name() const override { return L"separated"; } core::monitor::state state() const override { return state_; } bool is_ready() override { return key_producer_->is_ready() && fill_producer_->is_ready(); } }; spl::shared_ptr create_separated_producer(const spl::shared_ptr& fill, const spl::shared_ptr& key) { return spl::make_shared(fill, key); } }} // namespace caspar::core ================================================ FILE: src/core/producer/separated/separated_producer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include namespace caspar { namespace core { spl::shared_ptr create_separated_producer(const spl::shared_ptr& fill, const spl::shared_ptr& key); }} // namespace caspar::core ================================================ FILE: src/core/producer/stage.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../StdAfx.h" #include "stage.h" #include "layer.h" #include "../frame/draw_frame.h" #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace core { struct stage::impl : public std::enable_shared_from_this { int channel_index_; spl::shared_ptr graph_; monitor::state state_; std::map layers_; std::map tweens_; std::set routeSources; mutable std::mutex format_desc_mutex_; core::video_format_desc format_desc_; executor executor_{L"stage " + std::to_wstring(channel_index_)}; std::mutex lock_; private: void orderSourceLayers(std::vector>& layerVec, const std::map>& routed_layers, int l, int depth) { if (0 == depth) routeSources.clear(); if (std::find_if(layerVec.begin(), layerVec.end(), [l](std::pair p) { return p.first == l; }) != layerVec.end()) { return; } auto routeIt = routed_layers.find(l); if (routed_layers.end() == routeIt) { layerVec.push_back(std::make_pair(l, true)); return; } std::pair routeSrc(routeIt->second); if (channel_index_ != routeSrc.first) { layerVec.push_back(std::make_pair(l, true)); return; } // check for circular route setup - skip recursion if found routeSources.emplace(routeSrc.second); bool layerOK = true; if (routeSources.find(l) == routeSources.end()) { orderSourceLayers(layerVec, routed_layers, routeSrc.second, ++depth); } else { layerOK = false; } if (std::find_if(layerVec.begin(), layerVec.end(), [l](std::pair p) { return p.first == l; }) == layerVec.end()) { layerVec.push_back(std::make_pair(l, layerOK)); } } layer& get_layer(int index) { auto it = layers_.find(index); if (it == std::end(layers_)) { it = layers_.emplace(index, layer(video_format_desc())).first; } return it->second; } public: impl(int channel_index, spl::shared_ptr graph, const core::video_format_desc& format_desc) : channel_index_(channel_index) , graph_(std::move(graph)) , format_desc_(format_desc) { } const stage_frames operator()(uint64_t frame_number, std::vector& fetch_background, std::function routesCb) { return executor_.invoke([=, this] { std::map frames; stage_frames result = {}; result.format_desc = video_format_desc(); result.nb_samples = result.format_desc.audio_cadence[frame_number % result.format_desc.audio_cadence.size()]; auto is_interlaced = format_desc_.field_count == 2; auto field1 = is_interlaced ? video_field::a : video_field::progressive; try { for (auto& t : tweens_) t.second.tick(1); // build a map of layers that are sourced from route producers std::map> routed_layers; for (auto& p : layers_) { auto producer = std::move(p.second.foreground()); if (0 == producer->name().compare(L"route")) { try { auto rc = spl::dynamic_pointer_cast(producer); auto srcChan = rc->get_source_channel(); auto srcLayer = rc->get_source_layer(); routed_layers.emplace(p.first, std::make_pair(srcChan, srcLayer)); rc->set_cross_channel(channel_index_ != srcChan); } catch (std::bad_cast) { CASPAR_LOG(error) << "Failed to cast route producer"; } } } // sort layer order so that sources get pulled before routes std::vector> layerVec; for (auto& p : layers_) orderSourceLayers(layerVec, routed_layers, p.first, 0); // when running interlaced, both fields are be pulled at once. // This will risk some stutter for freshly created producers, but it lets us tick at 25hz and avoids // amcp changes starting on the second field for (auto& l : layerVec) { auto p = layers_.find(l.first); if (p == layers_.end()) continue; auto& layer = p->second; auto& tween = tweens_[p->first]; auto has_background_route = std::find(fetch_background.begin(), fetch_background.end(), p->first) != fetch_background.end(); layer_frame res = {}; if (l.second) { res.foreground1 = draw_frame::push(layer.receive(field1, result.nb_samples), tween.fetch()); res.foreground1.transform().image_transform.enable_geometry_modifiers = true; } res.has_background = layer.has_background(); if (has_background_route) res.background1 = layer.receive_background(field1, result.nb_samples); if (is_interlaced) { res.is_interlaced = true; if (l.second) { res.foreground2 = draw_frame::push(layer.receive(video_field::b, result.nb_samples), tween.fetch()); res.foreground2.transform().image_transform.enable_geometry_modifiers = true; } if (has_background_route) res.background2 = layer.receive_background(video_field::b, result.nb_samples); } frames[p->first] = res; // push received foreground frame to any configured route producer routesCb(p->first, res); } for (auto& p : frames) { result.frames.push_back(p.second.foreground1); if (is_interlaced) result.frames2.push_back(p.second.foreground2); } { // push stage_frames to support any channel routes that have been set layer_frame chan_lf = {}; chan_lf.is_interlaced = is_interlaced; chan_lf.foreground1 = wrap_layer_frames_for_route(result.frames); if (is_interlaced) chan_lf.foreground2 = wrap_layer_frames_for_route(result.frames2); routesCb(-1, chan_lf); } monitor::state state; for (auto& p : layers_) { state["layer"][p.first] = p.second.state(); } state_ = std::move(state); } catch (...) { layers_.clear(); CASPAR_LOG_CURRENT_EXCEPTION(); } return result; }); } core::draw_frame wrap_layer_frames_for_route(std::vector frames) { // Note: this must not mutate the vector used for the layer for (auto& frame : frames) { // Tell the compositor that these are layers, matching what normal rendering does frame.transform().image_transform.layer_depth = 1; } return core::draw_frame(frames); } std::future apply_transforms(const std::vector>& transforms) { return executor_.begin_invoke([=, this] { for (auto& transform : transforms) { auto& tween = tweens_[std::get<0>(transform)]; auto src = tween.fetch(); auto dst = std::get<1>(transform)(tween.dest()); tweens_[std::get<0>(transform)] = tweened_transform(src, dst, std::get<2>(transform), std::get<3>(transform)); } }); } std::future apply_transform(int index, const stage::transform_func_t& transform, unsigned int mix_duration, const tweener& tween) { return executor_.begin_invoke([=, this] { auto src = tweens_[index].fetch(); auto dst = transform(src); tweens_[index] = tweened_transform(src, dst, mix_duration, tween); }); } std::future clear_transforms(int index) { return executor_.begin_invoke([=, this] { tweens_.erase(index); }); } std::future clear_transforms() { return executor_.begin_invoke([=, this] { tweens_.clear(); }); } std::future get_current_transform(int index) { return executor_.begin_invoke([=, this] { return tweens_[index].fetch(); }); } std::future load(int index, const spl::shared_ptr& producer, bool preview, bool auto_play) { return executor_.begin_invoke([=, this] { get_layer(index).load(producer, preview, auto_play); }); } std::future preview(int index) { return executor_.begin_invoke([=, this] { get_layer(index).preview(); }); } std::future pause(int index) { return executor_.begin_invoke([=, this] { get_layer(index).pause(); }); } std::future resume(int index) { return executor_.begin_invoke([=, this] { get_layer(index).resume(); }); } std::future play(int index) { return executor_.begin_invoke([=, this] { get_layer(index).play(); }); } std::future stop(int index) { return executor_.begin_invoke([=, this] { get_layer(index).stop(); }); } std::future clear(int index) { return executor_.begin_invoke([=, this] { layers_.erase(index); }); } std::future clear() { return executor_.begin_invoke([=, this] { layers_.clear(); }); } std::future swap_layers(const std::shared_ptr& other, bool swap_transforms) { auto other_impl = other->impl_; if (other_impl.get() == this) { return make_ready_future(); } auto func = [=, this] { auto layers = layers_ | boost::adaptors::map_values; auto other_layers = other_impl->layers_ | boost::adaptors::map_values; std::swap(layers_, other_impl->layers_); if (swap_transforms) std::swap(tweens_, other_impl->tweens_); }; return invoke_both(other, func); } std::future swap_layer(int index, int other_index, bool swap_transforms) { return executor_.begin_invoke([=, this] { std::swap(get_layer(index), get_layer(other_index)); if (swap_transforms) std::swap(tweens_[index], tweens_[other_index]); }); } std::future swap_layer(int index, int other_index, const std::shared_ptr& other, bool swap_transforms) { auto other_impl = other->impl_; if (other_impl.get() == this) return swap_layer(index, other_index, swap_transforms); auto func = [=, this] { auto& my_layer = get_layer(index); auto& other_layer = other_impl->get_layer(other_index); std::swap(my_layer, other_layer); if (swap_transforms) { auto& my_tween = tweens_[index]; auto& other_tween = other_impl->tweens_[other_index]; std::swap(my_tween, other_tween); } }; return invoke_both(other, func); } std::future invoke_both(const std::shared_ptr& other, std::function func) { auto other_impl = other->impl_; if (other_impl->channel_index_ < channel_index_) { return other_impl->executor_.begin_invoke([=, this] { executor_.invoke(func); }); } return executor_.begin_invoke([=, this] { other_impl->executor_.invoke(func); }); } std::future> foreground(int index) { return executor_.begin_invoke( [=, this]() -> std::shared_ptr { return get_layer(index).foreground(); }); } std::future> background(int index) { return executor_.begin_invoke( [=, this]() -> std::shared_ptr { return get_layer(index).background(); }); } std::future call(int index, const std::vector& params) { return flatten( executor_.begin_invoke([=, this] { return get_layer(index).foreground()->call(params).share(); })); } std::future callbg(int index, const std::vector& params) { return flatten( executor_.begin_invoke([=, this] { return get_layer(index).background()->call(params).share(); })); } std::unique_lock get_lock() { return std::move(std::unique_lock(lock_)); } core::video_format_desc video_format_desc() const { std::lock_guard lock(format_desc_mutex_); return format_desc_; } std::future video_format_desc(const core::video_format_desc& format_desc) { return executor_.begin_invoke([=, this] { { std::lock_guard lock(format_desc_mutex_); format_desc_ = format_desc; } layers_.clear(); }); } }; stage::stage(int channel_index, spl::shared_ptr graph, const core::video_format_desc& format_desc) : impl_(new impl(channel_index, std::move(graph), format_desc)) { } std::future stage::call(int index, const std::vector& params) { return impl_->call(index, params); } std::future stage::callbg(int index, const std::vector& params) { return impl_->callbg(index, params); } std::future stage::apply_transforms(const std::vector& transforms) { return impl_->apply_transforms(transforms); } std::future stage::apply_transform(int index, const std::function& transform, unsigned int mix_duration, const tweener& tween) { return impl_->apply_transform(index, transform, mix_duration, tween); } std::future stage::clear_transforms(int index) { return impl_->clear_transforms(index); } std::future stage::clear_transforms() { return impl_->clear_transforms(); } std::future stage::get_current_transform(int index) { return impl_->get_current_transform(index); } std::future stage::load(int index, const spl::shared_ptr& producer, bool preview, bool auto_play) { return impl_->load(index, producer, preview, auto_play); } std::future stage::preview(int index) { return impl_->preview(index); } std::future stage::pause(int index) { return impl_->pause(index); } std::future stage::resume(int index) { return impl_->resume(index); } std::future stage::play(int index) { return impl_->play(index); } std::future stage::stop(int index) { return impl_->stop(index); } std::future stage::clear(int index) { return impl_->clear(index); } std::future stage::clear() { return impl_->clear(); } std::future stage::swap_layers(const std::shared_ptr& other, bool swap_transforms) { const auto other2 = std::static_pointer_cast(other); return impl_->swap_layers(other2, swap_transforms); } std::future stage::swap_layer(int index, int other_index, bool swap_transforms) { return impl_->swap_layer(index, other_index, swap_transforms); } std::future stage::swap_layer(int index, int other_index, const std::shared_ptr& other, bool swap_transforms) { const auto other2 = std::static_pointer_cast(other); return impl_->swap_layer(index, other_index, other2, swap_transforms); } std::future> stage::foreground(int index) { return impl_->foreground(index); } std::future> stage::background(int index) { return impl_->background(index); } const stage_frames stage::operator()(uint64_t frame_number, std::vector& fetch_background, std::function routesCb) { return (*impl_)(frame_number, fetch_background, routesCb); } core::monitor::state stage::state() const { return impl_->state_; } core::video_format_desc stage::video_format_desc() const { return impl_->video_format_desc(); } std::future stage::video_format_desc(const core::video_format_desc& format_desc) { return impl_->video_format_desc(format_desc); } std::unique_lock stage::get_lock() const { return impl_->get_lock(); } std::future stage::execute(std::function func) { func(); return make_ready_future(); } // STAGE DELAYED (For batching operations) stage_delayed::stage_delayed(std::shared_ptr& st, int index) : executor_{L"batch stage " + boost::lexical_cast(index)} , stage_(st) { // Start the executor blocked on a future that will complete when we are ready for it to execute executor_.begin_invoke([=, this]() -> void { waiter_.get_future().get(); }); } std::future stage_delayed::call(int index, const std::vector& params) { return executor_.begin_invoke([=, this]() -> std::wstring { return stage_->call(index, params).get(); }); } std::future stage_delayed::callbg(int index, const std::vector& params) { return executor_.begin_invoke([=, this]() -> std::wstring { return stage_->callbg(index, params).get(); }); } std::future stage_delayed::apply_transforms(const std::vector& transforms) { return executor_.begin_invoke([=, this]() { return stage_->apply_transforms(transforms).get(); }); } std::future stage_delayed::apply_transform(int index, const std::function& transform, unsigned int mix_duration, const tweener& tween) { return executor_.begin_invoke( [=, this]() { return stage_->apply_transform(index, transform, mix_duration, tween).get(); }); } std::future stage_delayed::clear_transforms(int index) { return executor_.begin_invoke([=, this]() { return stage_->clear_transforms(index).get(); }); } std::future stage_delayed::clear_transforms() { return executor_.begin_invoke([=, this]() { return stage_->clear_transforms().get(); }); } std::future stage_delayed::get_current_transform(int index) { return executor_.begin_invoke([=, this]() { return stage_->get_current_transform(index).get(); }); } std::future stage_delayed::load(int index, const spl::shared_ptr& producer, bool preview, bool auto_play) { return executor_.begin_invoke([=, this]() { return stage_->load(index, producer, preview, auto_play).get(); }); } std::future stage_delayed::preview(int index) { return executor_.begin_invoke([=, this]() { return stage_->preview(index).get(); }); } std::future stage_delayed::pause(int index) { return executor_.begin_invoke([=, this]() { return stage_->pause(index).get(); }); } std::future stage_delayed::resume(int index) { return executor_.begin_invoke([=, this]() { return stage_->resume(index).get(); }); } std::future stage_delayed::play(int index) { return executor_.begin_invoke([=, this]() { return stage_->play(index).get(); }); } std::future stage_delayed::stop(int index) { return executor_.begin_invoke([=, this]() { return stage_->stop(index).get(); }); } std::future stage_delayed::clear(int index) { return executor_.begin_invoke([=, this]() { return stage_->clear(index).get(); }); } std::future stage_delayed::clear() { return executor_.begin_invoke([=, this]() { return stage_->clear().get(); }); } std::future stage_delayed::swap_layers(const std::shared_ptr& other, bool swap_transforms) { const auto other2 = std::static_pointer_cast(other); return executor_.begin_invoke([=, this]() { return stage_->swap_layers(other2->stage_, swap_transforms).get(); }); } std::future stage_delayed::swap_layer(int index, int other_index, bool swap_transforms) { return executor_.begin_invoke( [=, this]() { return stage_->swap_layer(index, other_index, swap_transforms).get(); }); } std::future stage_delayed::swap_layer(int index, int other_index, const std::shared_ptr& other, bool swap_transforms) { const auto other2 = std::static_pointer_cast(other); // Something so that we know to lock the channel other2->executor_.begin_invoke([]() {}); return executor_.begin_invoke( [=, this]() { return stage_->swap_layer(index, other_index, other2->stage_, swap_transforms).get(); }); } std::future> stage_delayed::foreground(int index) { return executor_.begin_invoke( [=, this]() -> std::shared_ptr { return stage_->foreground(index).get(); }); } std::future> stage_delayed::background(int index) { return executor_.begin_invoke( [=, this]() -> std::shared_ptr { return stage_->background(index).get(); }); } std::future stage_delayed::execute(std::function func) { return executor_.begin_invoke([=, this]() { return stage_->execute(func).get(); }); } }} // namespace caspar::core ================================================ FILE: src/core/producer/stage.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "../fwd.h" #include "../monitor/monitor.h" #include #include #include #include #include #include #include #include #include #include #include namespace caspar::diagnostics { class graph; } namespace caspar { namespace core { struct layer_frame { bool is_interlaced; draw_frame foreground1; draw_frame background1; draw_frame foreground2; draw_frame background2; bool has_background; }; struct stage_frames { core::video_format_desc format_desc; int nb_samples; std::vector frames; std::vector frames2; }; /** * Base class for the stage. Should be used when either stage or stage_delayed may be used */ class stage_base { public: using transform_func_t = std::function; using transform_tuple_t = std::tuple; virtual ~stage_base() {} // Methods virtual std::future apply_transforms(const std::vector& transforms) = 0; virtual std::future apply_transform(int index, const transform_func_t& transform, unsigned int mix_duration, const tweener& tween) = 0; virtual std::future clear_transforms(int index) = 0; virtual std::future clear_transforms() = 0; virtual std::future get_current_transform(int index) = 0; virtual std::future load(int index, const spl::shared_ptr& producer, bool preview = false, bool auto_play = false) = 0; virtual std::future preview(int index) = 0; virtual std::future pause(int index) = 0; virtual std::future resume(int index) = 0; virtual std::future play(int index) = 0; virtual std::future stop(int index) = 0; virtual std::future call(int index, const std::vector& params) = 0; virtual std::future callbg(int index, const std::vector& params) = 0; virtual std::future clear(int index) = 0; virtual std::future clear() = 0; virtual std::future swap_layers(const std::shared_ptr& other, bool swap_transforms) = 0; virtual std::future swap_layer(int index, int other_index, bool swap_transforms) = 0; virtual std::future swap_layer(int index, int other_index, const std::shared_ptr& other, bool swap_transforms) = 0; virtual std::future execute(std::function k) = 0; // Properties virtual std::future> foreground(int index) = 0; virtual std::future> background(int index) = 0; }; /** * The normal stage implementation. */ class stage final : public stage_base { stage(const stage&); stage& operator=(const stage&); public: explicit stage(int channel_index, spl::shared_ptr graph, const core::video_format_desc& format_desc); const stage_frames operator()(uint64_t frame_number, std::vector& fetch_background, std::function routesCb); std::future apply_transforms(const std::vector& transforms) override; std::future apply_transform(int index, const transform_func_t& transform, unsigned int mix_duration, const tweener& tween) override; std::future clear_transforms(int index) override; std::future clear_transforms() override; std::future get_current_transform(int index) override; std::future load(int index, const spl::shared_ptr& producer, bool preview = false, bool auto_play = false) override; std::future preview(int index) override; std::future pause(int index) override; std::future resume(int index) override; std::future play(int index) override; std::future stop(int index) override; std::future call(int index, const std::vector& params) override; std::future callbg(int index, const std::vector& params) override; std::future clear(int index) override; std::future clear() override; std::future swap_layers(const std::shared_ptr& other, bool swap_transforms) override; std::future swap_layer(int index, int other_index, bool swap_transforms) override; std::future swap_layer(int index, int other_index, const std::shared_ptr& other, bool swap_transforms) override; core::monitor::state state() const; std::future> foreground(int index) override; std::future> background(int index) override; std::future execute(std::function k) override; std::unique_lock get_lock() const; core::video_format_desc video_format_desc() const; std::future video_format_desc(const core::video_format_desc& format_desc); private: struct impl; spl::shared_ptr impl_; }; /** * A stage wrapper, that queues up stage operations until release() is called. * This is useful for batching commands. */ class stage_delayed final : public stage_base { public: stage_delayed(std::shared_ptr& st, int index); int64_t count_queued() const { return executor_.size(); } void release() { waiter_.set_value(); } void abort() { executor_.clear(); } void wait() { executor_.stop_and_wait(); } std::future apply_transforms(const std::vector& transforms) override; std::future apply_transform(int index, const transform_func_t& transform, unsigned int mix_duration, const tweener& tween) override; std::future clear_transforms(int index) override; std::future clear_transforms() override; std::future get_current_transform(int index) override; std::future load(int index, const spl::shared_ptr& producer, bool preview = false, bool auto_play = false) override; std::future preview(int index) override; std::future pause(int index) override; std::future resume(int index) override; std::future play(int index) override; std::future stop(int index) override; std::future call(int index, const std::vector& params) override; std::future callbg(int index, const std::vector& params) override; std::future clear(int index) override; std::future clear() override; std::future swap_layers(const std::shared_ptr& other, bool swap_transforms) override; std::future swap_layer(int index, int other_index, bool swap_transforms) override; std::future swap_layer(int index, int other_index, const std::shared_ptr& other, bool swap_transforms) override; // Properties std::future> foreground(int index) override; std::future> background(int index) override; std::future execute(std::function k) override; std::unique_lock get_lock() const { return stage_->get_lock(); } private: std::promise waiter_; std::shared_ptr& stage_; executor executor_; }; }} // namespace caspar::core ================================================ FILE: src/core/producer/transition/sting_producer.cpp ================================================ /* * Copyright (c) 2018 Norsk rikskringkasting AS * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Julian Waller, julian@superfly.tv */ #include "../../StdAfx.h" #include "sting_producer.h" #include "../../frame/draw_frame.h" #include "../../frame/frame_transform.h" #include "../../monitor/monitor.h" #include "../frame_producer.h" #include "../frame_producer_registry.h" #include #include #include #include namespace caspar { namespace core { struct frame_pair { draw_frame field1; draw_frame field2; draw_frame get(video_field field) { if (field == video_field::b) return field2; else return field1; } void set(video_field field, const draw_frame& frame) { if (field == video_field::b) field2 = frame; else field1 = frame; } }; class sting_producer : public frame_producer { monitor::state state_; uint32_t current_frame_ = 0; caspar::tweener audio_tweener_{L"linear"}; frame_pair dst_; frame_pair src_; frame_pair mask_; frame_pair overlay_; const sting_info info_; const bool is_cut_mode_; spl::shared_ptr dst_producer_ = frame_producer::empty(); spl::shared_ptr src_producer_ = frame_producer::empty(); spl::shared_ptr mask_producer_ = frame_producer::empty(); spl::shared_ptr overlay_producer_ = frame_producer::empty(); public: sting_producer(const spl::shared_ptr& dest, const sting_info& info, const spl::shared_ptr& mask, const spl::shared_ptr& overlay) : info_(info) , is_cut_mode_(boost::iequals(info.mask_filename, L"empty")) , dst_producer_(dest) , mask_producer_(mask) , overlay_producer_(overlay) { } // frame_producer void leading_producer(const spl::shared_ptr& producer) override { src_producer_ = producer; } spl::shared_ptr following_producer() const override { if (is_cut_mode_) { uint32_t overlay_duration = 0; if (overlay_producer_ != frame_producer::empty()) { overlay_duration = overlay_producer_->nb_frames(); // Clamp ONLY infinite duration overlays to prevent infinite transitions if (overlay_duration == UINT32_MAX) { overlay_duration = 0; } } uint32_t transition_end = std::max(info_.trigger_point, overlay_duration); if (current_frame_ >= transition_end) { // CASPAR_LOG(debug) << L"[sting] Cut mode transition complete at frame " << current_frame_ << L", // returning dst_producer_"; return dst_producer_; } return core::frame_producer::empty(); } auto duration = target_duration(); if (info_.audio_fade_duration < UINT32_MAX) { uint32_t audio_end = info_.audio_fade_start + info_.audio_fade_duration; if (current_frame_ >= audio_end) { return dst_producer_; } } return duration && current_frame_ >= *duration ? dst_producer_ : core::frame_producer::empty(); } std::optional auto_play_delta() const override { if (is_cut_mode_) { uint32_t overlay_duration = 0; if (overlay_producer_ != frame_producer::empty()) { overlay_duration = overlay_producer_->nb_frames(); // Clamp ONLY infinite duration overlays to prevent infinite transitions if (overlay_duration == UINT32_MAX) { overlay_duration = 0; } } return static_cast(std::max(info_.trigger_point, overlay_duration)); } auto duration = static_cast(mask_producer_->nb_frames()); if (duration > -1) { return std::optional(duration); } return {}; } std::optional target_duration() const { if (is_cut_mode_) { uint32_t overlay_duration = 0; if (overlay_producer_ != frame_producer::empty()) { overlay_duration = overlay_producer_->nb_frames(); // Clamp ONLY infinite duration overlays to prevent infinite transitions if (overlay_duration == UINT32_MAX) { overlay_duration = 0; } } return std::max(info_.trigger_point, overlay_duration); } // Sting mode logic auto autoplay = auto_play_delta(); if (!autoplay) { return {}; } auto autoplay2 = static_cast(*autoplay); // If mask is infinite, rely on audio fade if specified if (autoplay2 == UINT32_MAX) { if (info_.audio_fade_duration < UINT32_MAX) { return info_.audio_fade_start + info_.audio_fade_duration; } else { // Infinite mask and no audio fade: use a default duration (e.g., 10 seconds / 600 frames) return 600; } } // Finite mask: Use mask duration, potentially extended by audio fade if (info_.audio_fade_duration < UINT32_MAX) { return std::max(autoplay2, info_.audio_fade_duration + info_.audio_fade_start); } return autoplay2; } draw_frame receive_impl(const core::video_field field, int nb_samples) override { auto duration = target_duration(); CASPAR_SCOPE_EXIT { state_ = dst_producer_->state(); state_["transition/type"] = is_cut_mode_ ? std::string("cut") : std::string("sting"); if (duration) state_["transition/frame"] = {static_cast(current_frame_), static_cast(*duration)}; }; if (duration && current_frame_ >= *duration) { return dst_producer_->receive(field, nb_samples); } if (is_cut_mode_) { uint32_t overlay_duration = 0; if (overlay_producer_ != frame_producer::empty()) { overlay_duration = overlay_producer_->nb_frames(); if (overlay_duration == 0 || overlay_duration == UINT32_MAX) { overlay_duration = 0; } } auto src = src_.get(field); if (!src) { src = src_producer_->receive(field, nb_samples); src_.set(field, src); if (!src) src = src_producer_->last_frame(field); } auto dst = dst_.get(field); if (!dst && current_frame_ >= info_.trigger_point) { dst = dst_producer_->receive(field, nb_samples); dst_.set(field, dst); if (!dst) dst = dst_producer_->last_frame(field); } draw_frame result = (current_frame_ < info_.trigger_point ? src : dst); double audio_delta = get_audio_delta(); if (src) src.transform().audio_transform.volume = 1.0 - audio_delta; if (dst) dst.transform().audio_transform.volume = audio_delta; bool has_overlay = overlay_producer_ != core::frame_producer::empty(); auto overlay = overlay_.get(field); if (has_overlay && !overlay) { overlay = overlay_producer_->receive(field, nb_samples); overlay_.set(field, overlay); if (!overlay) overlay = overlay_producer_->last_frame(field); } src_.set(field, draw_frame{}); dst_.set(field, draw_frame{}); overlay_.set(field, draw_frame{}); current_frame_++; if (overlay && result) { return draw_frame::over(result, overlay); } return result; } auto src = src_.get(field); if (!src) { src = src_producer_->receive(field, nb_samples); src_.set(field, src); if (!src) { src = src_producer_->last_frame(field); } } bool started_dst = current_frame_ >= info_.trigger_point; auto dst = dst_.get(field); if (!dst && started_dst) { dst = dst_producer_->receive(field, nb_samples); dst_.set(field, dst); if (!dst) { dst = dst_producer_->last_frame(field); } if (!dst) { src_.set(field, draw_frame{}); return src; } } auto mask = mask_.get(field); if (!mask) { mask = mask_producer_->receive(field, nb_samples); mask_.set(field, mask); } bool expecting_overlay = overlay_producer_ != core::frame_producer::empty(); auto overlay = overlay_.get(field); if (expecting_overlay && !overlay) { overlay = overlay_producer_->receive(field, nb_samples); overlay_.set(field, overlay); } bool mask_and_overlay_valid = mask && (!expecting_overlay || overlay); if (current_frame_ == 0 && !mask_and_overlay_valid) { src_.set(field, draw_frame{}); return src; } if (!mask_and_overlay_valid) { mask = mask_producer_->last_frame(field); overlay = overlay_producer_->last_frame(field); } auto res = compose(dst, src, mask, overlay); dst_.set(field, draw_frame{}); src_.set(field, draw_frame{}); if (mask_and_overlay_valid) { mask_.set(field, draw_frame{}); overlay_.set(field, draw_frame{}); current_frame_ += 1; } return res; } core::draw_frame first_frame(const core::video_field field) override { return dst_producer_->first_frame(field); } uint32_t nb_frames() const override { return dst_producer_->nb_frames(); } uint32_t frame_number() const override { return dst_producer_->frame_number(); } std::wstring print() const override { return L"transition[" + src_producer_->print() + L"=>" + dst_producer_->print() + L"]"; } std::wstring name() const override { return L"transition"; } std::future call(const std::vector& params) override { return dst_producer_->call(params); } double get_audio_delta() const { if (info_.audio_fade_duration < UINT32_MAX) { if (current_frame_ < info_.audio_fade_start) { return 0; } auto total_duration = target_duration(); if (!total_duration) { return 0; } uint32_t frame_number = current_frame_ - info_.audio_fade_start; uint32_t duration = std::min(*total_duration - info_.audio_fade_start, info_.audio_fade_duration); if (frame_number > duration) { return 1.0; } return audio_tweener_(frame_number, 0.0, 1.0, static_cast(duration)); } auto duration = target_duration(); if (!duration) { return 0; } return audio_tweener_(current_frame_, 0.0, 1.0, static_cast(*duration)); } draw_frame compose(draw_frame dst_frame, draw_frame src_frame, draw_frame mask_frame, draw_frame overlay_frame) const { const double delta = get_audio_delta(); src_frame.transform().audio_transform.volume = 1.0 - delta; dst_frame.transform().audio_transform.volume = delta; draw_frame mask_frame2 = mask_frame; mask_frame.transform().image_transform.is_key = true; mask_frame2.transform().image_transform.is_key = true; mask_frame2.transform().image_transform.invert = true; std::vector frames; frames.push_back(std::move(mask_frame2)); frames.push_back(std::move(src_frame)); frames.push_back(std::move(mask_frame)); frames.push_back(std::move(dst_frame)); if (overlay_frame != draw_frame::empty()) frames.push_back(std::move(overlay_frame)); return draw_frame(std::move(frames)); } monitor::state state() const override { return state_; } bool is_ready() override { return dst_producer_->is_ready(); } }; spl::shared_ptr create_sting_producer(const frame_producer_dependencies& dependencies, const spl::shared_ptr& destination, sting_info& info) { auto mask_producer = dependencies.producer_registry->create_producer(dependencies, info.mask_filename); auto overlay_producer = frame_producer::empty(); if (!info.overlay_filename.empty()) { overlay_producer = dependencies.producer_registry->create_producer(dependencies, info.overlay_filename); } return spl::make_shared(destination, info, mask_producer, overlay_producer); } }} // namespace caspar::core ================================================ FILE: src/core/producer/transition/sting_producer.h ================================================ /* * Copyright (c) 2018 Norsk rikskringkasting AS * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Julian Waller, julian@superfly.tv */ #pragma once #include "../../fwd.h" #include "../../video_format.h" #include #include namespace caspar { namespace core { struct sting_info { std::wstring mask_filename = L""; std::wstring overlay_filename = L""; uint32_t trigger_point = 0; uint32_t audio_fade_start = 0; uint32_t audio_fade_duration = UINT32_MAX; }; spl::shared_ptr create_sting_producer(const frame_producer_dependencies& dependencies, const spl::shared_ptr& destination, sting_info& info); }} // namespace caspar::core ================================================ FILE: src/core/producer/transition/transition_producer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "../../StdAfx.h" #include "transition_producer.h" #include "../../frame/draw_frame.h" #include "../../frame/frame_transform.h" #include "../../monitor/monitor.h" #include "../frame_producer.h" #include #include #include namespace caspar::core { class transition_producer : public frame_producer { monitor::state state_; int current_frame_ = 0; const transition_info info_; spl::shared_ptr dst_producer_ = frame_producer::empty(); spl::shared_ptr src_producer_ = frame_producer::empty(); bool dst_is_ready_ = false; public: transition_producer(const spl::shared_ptr& dest, transition_info info) : info_(std::move(info)) , dst_producer_(dest) { dst_is_ready_ = dst_producer_->is_ready(); update_state(); } // frame_producer void update_is_ready(const core::video_field field) { // Ensure a frame has been attempted dst_producer_->first_frame(field); dst_is_ready_ = dst_producer_->is_ready(); } core::draw_frame last_frame(const core::video_field field) override { CASPAR_SCOPE_EXIT { update_state(); }; update_is_ready(field); if (!dst_is_ready_) { return src_producer_->last_frame(field); } auto src = src_producer_->last_frame(field); auto dst = dst_producer_->last_frame(field); if (dst && current_frame_ >= info_.duration) { return dst; } else { return src; } } core::draw_frame first_frame(const core::video_field field) override { return dst_producer_->first_frame(field); } void leading_producer(const spl::shared_ptr& producer) override { src_producer_ = producer; } [[nodiscard]] spl::shared_ptr following_producer() const override { return current_frame_ >= info_.duration && dst_is_ready_ ? dst_producer_ : core::frame_producer::empty(); } [[nodiscard]] std::optional auto_play_delta() const override { // Return how many frames before the end to start the transition // Must never return 0 to avoid duplicate fields in AUTO playback int64_t delta; switch (info_.type) { case transition_type::cut: // CUT: Need full duration to show all frames delta = info_.duration; break; case transition_type::cutfade: // CUTFADE: Source is removed on first frame delta = 1; break; case transition_type::vfade: // VFADE: Source fades out by midpoint delta = static_cast(std::floor(info_.duration / 2.0)); break; default: // Standard transitions (MIX, PUSH, SLIDE, WIPE) // Need full duration to complete properly and avoid duplicated fields delta = info_.duration; break; } // Ensure we never return less than 1 return std::max(delta, int64_t(1)); } void update_state() { state_ = dst_producer_->state(); state_["transition/producer"] = dst_producer_->name(); state_["transition/frame"] = {current_frame_, info_.duration}; state_["transition/type"] = [&]() -> std::string { switch (info_.type) { case transition_type::mix: return "mix"; case transition_type::wipe: return "wipe"; case transition_type::slide: return "slide"; case transition_type::push: return "push"; case transition_type::cut: return "cut"; case transition_type::fadecut: return "fadecut"; case transition_type::cutfade: return "cutfade"; case transition_type::vfade: return "vfade"; default: return "n/a"; } }(); state_["transition/direction"] = [&]() -> std::string { switch (info_.direction) { case transition_direction::from_left: return "from_left"; case transition_direction::from_right: return "from_right"; case transition_direction::from_top: return "from_top"; case transition_direction::from_bottom: return "from_bottom"; default: return "n/a"; } }(); } draw_frame receive_impl(const core::video_field field, int nb_samples) override { CASPAR_SCOPE_EXIT { update_state(); }; update_is_ready(field); // If destination is not ready, always use source (don't start any transition) if (!dst_is_ready_) { return src_producer_->receive(field, nb_samples); } // If transition is complete, return destination if (current_frame_ >= info_.duration) { auto dst = dst_producer_->receive(field, nb_samples); if (!dst) dst = dst_producer_->last_frame(field); return dst; } // For CUT transitions, handle based on duration if (info_.type == transition_type::cut) { if (current_frame_ >= info_.duration) { // Cut now - return destination auto dst = dst_producer_->receive(field, nb_samples); if (!dst) dst = dst_producer_->last_frame(field); current_frame_++; // Increment after processing return dst; } else { // Not time to cut yet - return source auto src = src_producer_->receive(field, nb_samples); if (!src) src = src_producer_->last_frame(field); current_frame_++; // Increment after processing return src; } } // Compose and progress the transition auto result = compose(field, nb_samples); current_frame_++; return result; } [[nodiscard]] uint32_t nb_frames() const override { return dst_producer_->nb_frames(); } [[nodiscard]] uint32_t frame_number() const override { return dst_producer_->frame_number(); } [[nodiscard]] std::wstring print() const override { return L"transition[" + src_producer_->print() + L"=>" + dst_producer_->print() + L"]"; } [[nodiscard]] std::wstring name() const override { return L"transition"; } [[nodiscard]] std::future call(const std::vector& params) override { return dst_producer_->call(params); } [[nodiscard]] draw_frame compose(const core::video_field field, int nb_samples) const { // Helper lambdas to get wrapped frames auto get_src_frame = [&]() { auto f = src_producer_->receive(field, nb_samples); if (!f) f = src_producer_->last_frame(field); return draw_frame::push(std::move(f)); }; auto get_dst_frame = [&]() { auto f = dst_producer_->receive(field, nb_samples); if (!f) f = dst_producer_->last_frame(field); return draw_frame::push(std::move(f)); }; if (info_.type == transition_type::cut) { return get_dst_frame(); } else if (info_.type == transition_type::fadecut) { // Only use source during transition auto src = get_src_frame(); double delta = info_.tweener(current_frame_, 0.0, 1.0, static_cast(info_.duration - 1)); double opacity = info_.duration > 1 ? 1.0 - delta : 0.0; src.transform().image_transform.opacity = opacity; src.transform().audio_transform.volume = opacity; src.transform().audio_transform.immediate_volume = current_frame_ == 0; return src; } else if (info_.type == transition_type::vfade) { double delta = info_.tweener(current_frame_, 0.0, 1.0, static_cast(info_.duration - 1)); bool is_even_duration = (info_.duration % 2 == 0); if (is_even_duration) { double half_step = 0.5 / (info_.duration - 1); if (delta >= (0.5 - half_step) && delta < 0.5) { auto src = get_src_frame(); src.transform().image_transform.opacity = 0.0; src.transform().audio_transform.volume = 0.0; return src; } if (delta >= 0.5 && delta < (0.5 + half_step)) { auto dst = get_dst_frame(); dst.transform().image_transform.opacity = 0.0; dst.transform().audio_transform.volume = 0.0; dst.transform().audio_transform.immediate_volume = true; return dst; } } if (!is_even_duration && delta == 0.5) { auto dst = get_dst_frame(); dst.transform().image_transform.opacity = 0.0; dst.transform().audio_transform.volume = 0.0; dst.transform().audio_transform.immediate_volume = true; return dst; } else if (delta < 0.5) { auto src = get_src_frame(); double fade_out = 1.0 - (delta * 2.0); src.transform().image_transform.opacity = fade_out; src.transform().audio_transform.volume = fade_out; return src; } else { auto dst = get_dst_frame(); double fade_in = (delta - 0.5) * 2.0; dst.transform().image_transform.opacity = fade_in; dst.transform().audio_transform.volume = fade_in; dst.transform().audio_transform.immediate_volume = is_even_duration ? (delta < 0.5 + (1.0 / (info_.duration - 1))) : (delta <= 0.5 + (1.0 / (info_.duration - 1))); return dst; } } // For all other transitions, we need both frames auto src_frame = get_src_frame(); auto dst_frame = get_dst_frame(); const double delta = info_.tweener(current_frame_, 0.0, 1.0, static_cast(info_.duration - 1)); // Get horizontal or vertical direction based on transition direction const bool is_horizontal = info_.direction == transition_direction::from_left || info_.direction == transition_direction::from_right; const double h_dir = info_.direction == transition_direction::from_left ? 1.0 : -1.0; const double v_dir = info_.direction == transition_direction::from_top ? 1.0 : -1.0; src_frame.transform().audio_transform.volume = 1.0 - delta; dst_frame.transform().audio_transform.volume = delta; if (info_.type == transition_type::cutfade) { double mix; if (current_frame_ == 0) { mix = 0.0; } else { mix = static_cast(current_frame_) / static_cast(info_.duration - 1); } if (info_.duration <= 1) { mix = 1.0; } dst_frame.transform().image_transform.opacity = mix; dst_frame.transform().audio_transform.volume = mix; dst_frame.transform().audio_transform.immediate_volume = current_frame_ == 0; return dst_frame; } else if (info_.type == transition_type::mix) { dst_frame.transform().image_transform.opacity = delta; dst_frame.transform().image_transform.is_mix = true; src_frame.transform().image_transform.opacity = 1.0 - delta; src_frame.transform().image_transform.is_mix = true; } else if (info_.type == transition_type::slide) { if (is_horizontal) { dst_frame.transform().image_transform.fill_translation[0] = (-1.0 + delta) * h_dir; } else { dst_frame.transform().image_transform.fill_translation[1] = (-1.0 + delta) * v_dir; } } else if (info_.type == transition_type::push) { if (is_horizontal) { dst_frame.transform().image_transform.fill_translation[0] = (-1.0 + delta) * h_dir; src_frame.transform().image_transform.fill_translation[0] = (0.0 + delta) * h_dir; } else { dst_frame.transform().image_transform.fill_translation[1] = (-1.0 + delta) * v_dir; src_frame.transform().image_transform.fill_translation[1] = (0.0 + delta) * v_dir; } } else if (info_.type == transition_type::wipe) { if (is_horizontal) { if (info_.direction == transition_direction::from_right) { dst_frame.transform().image_transform.clip_scale[0] = delta; } else { dst_frame.transform().image_transform.clip_translation[0] = (1.0 - delta); } } else { if (info_.direction == transition_direction::from_bottom) { dst_frame.transform().image_transform.clip_scale[1] = delta; } else { dst_frame.transform().image_transform.clip_translation[1] = (1.0 - delta); } } } return draw_frame::over(src_frame, dst_frame); } [[nodiscard]] core::monitor::state state() const override { return state_; } bool is_ready() override { return dst_producer_->is_ready(); } }; spl::shared_ptr create_transition_producer(const spl::shared_ptr& destination, const transition_info& info) { return spl::make_shared(destination, info); } bool try_match_transition(const std::wstring& message, transition_info& transitionInfo) { // Using word boundaries to ensure we match complete transition names static const boost::wregex expr( LR"(.*\b(?VFADE|FADECUT|CUTFADE|CUT|PUSH|SLIDE|WIPE|MIX)\b\s+(?\d+)\s*(?(LINEAR)|(EASE[^\s]*))?\s*(?FROMLEFT|FROMRIGHT|FROMTOP|FROMBOTTOM|LEFT|RIGHT|UP|DOWN)?.*)"); boost::wsmatch what; if (!boost::regex_match(message, what, expr)) { return false; } transitionInfo.duration = std::stoi(what["DURATION"].str()); if (transitionInfo.duration == 0) { return false; } auto transition = what["TRANSITION"].str(); auto direction = what["DIRECTION"].matched ? what["DIRECTION"].str() : L""; auto tween = what["TWEEN"].matched ? what["TWEEN"].str() : L""; transitionInfo.tweener = tween; if (transition == L"CUT") transitionInfo.type = transition_type::cut; else if (transition == L"MIX") transitionInfo.type = transition_type::mix; else if (transition == L"PUSH") transitionInfo.type = transition_type::push; else if (transition == L"SLIDE") transitionInfo.type = transition_type::slide; else if (transition == L"WIPE") transitionInfo.type = transition_type::wipe; else if (transition == L"FADECUT") transitionInfo.type = transition_type::fadecut; else if (transition == L"CUTFADE") transitionInfo.type = transition_type::cutfade; else if (transition == L"VFADE") transitionInfo.type = transition_type::vfade; if (direction == L"FROMLEFT" || direction == L"RIGHT") transitionInfo.direction = transition_direction::from_left; else if (direction == L"FROMRIGHT" || direction == L"LEFT") transitionInfo.direction = transition_direction::from_right; else if (direction == L"FROMTOP" || direction == L"DOWN") transitionInfo.direction = transition_direction::from_top; else if (direction == L"FROMBOTTOM" || direction == L"UP") transitionInfo.direction = transition_direction::from_bottom; return true; } } // namespace caspar::core ================================================ FILE: src/core/producer/transition/transition_producer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "../../fwd.h" #include #include #include namespace caspar { namespace core { enum class transition_type { cut, mix, push, slide, wipe, fadecut, cutfade, vfade, count }; enum class transition_direction { from_left, from_right, from_top, from_bottom, count }; struct transition_info { int duration = 0; transition_direction direction = transition_direction::from_left; transition_type type = transition_type::cut; caspar::tweener tweener{L"linear"}; }; bool try_match_transition(const std::wstring& message, transition_info& transitionInfo); spl::shared_ptr create_transition_producer(const spl::shared_ptr& destination, const transition_info& info); }} // namespace caspar::core ================================================ FILE: src/core/video_channel.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "StdAfx.h" #include "common/os/thread.h" #include "video_channel.h" #include "video_format.h" #include "consumer/channel_info.h" #include "consumer/output.h" #include "frame/draw_frame.h" #include "frame/frame.h" #include "frame/frame_factory.h" #include "mixer/mixer.h" #include "producer/stage.h" #include #include #include #include #include #include #include #include #include namespace caspar { namespace core { bool operator<(const route_id& a, const route_id& b) { return a.mode + (a.index << 2) < b.mode + (b.index << 2); } struct video_channel::impl final { monitor::state state_; const channel_info channel_info_; const spl::shared_ptr graph_ = [](int index) { core::diagnostics::scoped_call_context save; core::diagnostics::call_context::for_thread().video_channel = index; return spl::make_shared(); }(channel_info_.index); caspar::core::output output_; spl::shared_ptr image_mixer_; caspar::core::mixer mixer_; std::shared_ptr stage_; uint64_t frame_counter_ = 0; std::function tick_; std::map> routes_; std::mutex routes_mutex_; std::atomic abort_request_{false}; std::thread thread_; std::function routesCb = [&](int layer, const layer_frame& layer_frame) { std::lock_guard lock(routes_mutex_); for (auto& r : routes_) { // if this layer is the source for this route, push the frame to the route producers if (layer == r.first.index) { auto route = r.second.lock(); if (!route) continue; if (r.first.index == -1) { route->signal(layer_frame.foreground1, layer_frame.foreground2); } else if (r.first.mode == route_mode::background || (r.first.mode == route_mode::next && layer_frame.has_background)) { route->signal(draw_frame::pop(layer_frame.background1), draw_frame::pop(layer_frame.background2)); } else { route->signal(draw_frame::pop(layer_frame.foreground1), draw_frame::pop(layer_frame.foreground2)); } } } }; public: impl(int index, const core::video_format_desc& format_desc, color_space default_color_space, std::unique_ptr image_mixer, std::function tick) : channel_info_(index, image_mixer->depth(), default_color_space) , output_(graph_, format_desc, channel_info_) , image_mixer_(std::move(image_mixer)) , mixer_(index, graph_, image_mixer_) , stage_(std::make_shared(index, graph_, format_desc)) , tick_(std::move(tick)) { graph_->set_color("produce-time", caspar::diagnostics::color(0.0f, 1.0f, 0.0f)); graph_->set_color("mix-time", caspar::diagnostics::color(1.0f, 0.0f, 0.9f, 0.8f)); graph_->set_color("consume-time", caspar::diagnostics::color(1.0f, 0.4f, 0.0f, 0.8f)); graph_->set_color("frame-time", caspar::diagnostics::color(1.0f, 0.4f, 0.4f, 0.8f)); graph_->set_color("osc-time", caspar::diagnostics::color(0.3f, 0.4f, 0.0f, 0.8f)); graph_->set_text(print()); caspar::diagnostics::register_graph(graph_); CASPAR_LOG(info) << print() << " Successfully Initialized."; thread_ = std::thread([this] { set_thread_realtime_priority(); set_thread_name(L"channel-" + std::to_wstring(channel_info_.index)); while (!abort_request_) { try { graph_->set_text(print()); frame_counter_ += 1; caspar::timer frame_timer; // Determine all layers that need a frame from the background producer std::vector background_routes = {}; { std::lock_guard lock(routes_mutex_); for (auto& r : routes_) { // Ensure pointer is still valid if (!r.second.lock()) continue; if (r.first.mode != route_mode::foreground) { background_routes.push_back(r.first.index); } } } // Produce caspar::timer produce_timer; auto stage_frames = (*stage_)(frame_counter_, background_routes, routesCb); graph_->set_value("produce-time", produce_timer.elapsed() * stage_frames.format_desc.hz * 0.5); // This is a little race prone, but at worst a new consumer will start with a frame of black bool has_consumers = output_.consumer_count() > 0; // Mix caspar::timer mix_timer; auto mixed_frame = has_consumers ? mixer_(stage_frames.frames, stage_frames.format_desc, stage_frames.nb_samples) : const_frame{}; auto mixed_frame2 = has_consumers && stage_frames.format_desc.field_count == 2 ? mixer_(stage_frames.frames2, stage_frames.format_desc, stage_frames.nb_samples) : const_frame{}; graph_->set_value("mix-time", mix_timer.elapsed() * stage_frames.format_desc.hz * 0.5); // Consume caspar::timer consume_timer; output_(mixed_frame, mixed_frame2, stage_frames.format_desc); graph_->set_value("consume-time", consume_timer.elapsed() * stage_frames.format_desc.hz * 0.5); graph_->set_value("frame-time", frame_timer.elapsed() * stage_frames.format_desc.hz * 0.5); monitor::state state = {}; state["stage"] = stage_->state(); state["mixer"] = mixer_.state(); state["output"] = output_.state(); state["framerate"] = {stage_frames.format_desc.framerate.numerator() * stage_frames.format_desc.field_count, stage_frames.format_desc.framerate.denominator()}; state["format"] = stage_frames.format_desc.name; state_ = state; caspar::timer osc_timer; tick_(state_); graph_->set_value("osc-time", osc_timer.elapsed() * stage_frames.format_desc.hz * 0.5); } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } } }); } ~impl() { CASPAR_LOG(info) << print() << " Uninitializing."; abort_request_ = true; thread_.join(); } std::shared_ptr route(int index = -1, route_mode mode = route_mode::foreground) { std::lock_guard lock(routes_mutex_); route_id id = {}; id.index = index; id.mode = mode; auto route = routes_[id].lock(); if (!route) { route = std::make_shared(); route->format_desc = stage_->video_format_desc(); // TODO this needs updating whenever the videomode changes route->name = std::to_wstring(channel_info_.index); if (index != -1) { route->name += L"/" + std::to_wstring(index); } if (mode == route_mode::background) { route->name += L"/background"; } else if (mode == route_mode::next) { route->name += L"/next"; } routes_[id] = route; } return route; } std::wstring print() const { return L"video_channel[" + std::to_wstring(channel_info_.index) + L"|" + stage_->video_format_desc().name + L"]"; } int index() const { return channel_info_.index; } channel_info get_consumer_channel_info() const { return channel_info_; } }; video_channel::video_channel(int index, const core::video_format_desc& format_desc, color_space default_color_space, std::unique_ptr image_mixer, std::function tick) : impl_(new impl(index, format_desc, default_color_space, std::move(image_mixer), std::move(tick))) { } video_channel::~video_channel() {} const std::shared_ptr& video_channel::stage() const { return impl_->stage_; } std::shared_ptr& video_channel::stage() { return impl_->stage_; } const mixer& video_channel::mixer() const { return impl_->mixer_; } mixer& video_channel::mixer() { return impl_->mixer_; } const output& video_channel::output() const { return impl_->output_; } output& video_channel::output() { return impl_->output_; } spl::shared_ptr video_channel::frame_factory() { return impl_->image_mixer_; } int video_channel::index() const { return impl_->index(); } channel_info video_channel::get_consumer_channel_info() const { return impl_->get_consumer_channel_info(); }; core::monitor::state video_channel::state() const { return impl_->state_; } std::shared_ptr video_channel::route(int index, route_mode mode) { return impl_->route(index, mode); } }} // namespace caspar::core ================================================ FILE: src/core/video_channel.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include "fwd.h" #include "video_format.h" #include "frame/pixel_format.h" #include "monitor/monitor.h" #include #include #include namespace caspar { namespace core { enum route_mode { foreground, background, next, // background if any, otherwise foreground }; struct route_id { int index; route_mode mode; bool const operator==(const route_id& o) { return index == o.index && mode == o.mode; } }; struct route { route() = default; route(const route&) = delete; route(route&&) = default; route& operator=(const route&) = delete; route& operator=(route&&) = default; boost::signals2::signal signal; video_format_desc format_desc; std::wstring name; }; class video_channel final { video_channel(const video_channel&); video_channel& operator=(const video_channel&); public: explicit video_channel(int index, const video_format_desc& format_desc, color_space default_color_space, std::unique_ptr image_mixer, std::function on_tick); ~video_channel(); core::monitor::state state() const; const std::shared_ptr& stage() const; std::shared_ptr& stage(); const core::mixer& mixer() const; core::mixer& mixer(); const core::output& output() const; core::output& output(); spl::shared_ptr frame_factory(); int index() const; [[nodiscard]] channel_info get_consumer_channel_info() const; std::shared_ptr route(int index = -1, route_mode mode = route_mode::foreground); private: struct impl; spl::unique_ptr impl_; }; }} // namespace caspar::core ================================================ FILE: src/core/video_format.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "StdAfx.h" #include "video_format.h" #include #include #include #include namespace caspar { namespace core { struct video_format_repository::impl { private: std::map formats_; public: impl() : formats_() { const std::vector default_formats = { {video_format::pal, 2, 720, 576, 1024, 576, 25000, 1000, L"PAL", {1920 / 2}}, {video_format::ntsc, 2, 720, 486, 720, 540, 30000, 1001, L"NTSC", {801, 801, 801, 800, 801, 801, 801, 800, 801, 801}}, {video_format::x576p2500, 1, 720, 576, 1024, 576, 25000, 1000, L"576p2500", {1920}}, {video_format::x720p2398, 1, 1280, 720, 1280, 720, 24000, 1001, L"720p2398", {2002}}, {video_format::x720p2400, 1, 1280, 720, 1280, 720, 24000, 1000, L"720p2400", {2000}}, {video_format::x720p2500, 1, 1280, 720, 1280, 720, 25000, 1000, L"720p2500", {1920}}, {video_format::x720p2997, 1, 1280, 720, 1280, 720, 30000, 1001, L"720p2997", {1602, 1601, 1602, 1601, 1602}}, {video_format::x720p3000, 1, 1280, 720, 1280, 720, 30000, 1000, L"720p3000", {1600}}, {video_format::x720p5000, 1, 1280, 720, 1280, 720, 50000, 1000, L"720p5000", {960}}, {video_format::x720p5994, 1, 1280, 720, 1280, 720, 60000, 1001, L"720p5994", {801, 800, 801, 801, 801}}, {video_format::x720p6000, 1, 1280, 720, 1280, 720, 60000, 1000, L"720p6000", {800}}, {video_format::x1080i5000, 2, 1920, 1080, 1920, 1080, 25000, 1000, L"1080i5000", {1920 / 2}}, {video_format::x1080i5994, 2, 1920, 1080, 1920, 1080, 30000, 1001, L"1080i5994", {801, 801, 801, 800, 801, 801, 801, 800, 801, 801}}, {video_format::x1080i6000, 2, 1920, 1080, 1920, 1080, 30000, 1000, L"1080i6000", {1600 / 2}}, {video_format::x1080p2398, 1, 1920, 1080, 1920, 1080, 24000, 1001, L"1080p2398", {2002}}, {video_format::x1080p2400, 1, 1920, 1080, 1920, 1080, 24000, 1000, L"1080p2400", {2000}}, {video_format::x1080p2500, 1, 1920, 1080, 1920, 1080, 25000, 1000, L"1080p2500", {1920}}, {video_format::x1080p2997, 1, 1920, 1080, 1920, 1080, 30000, 1001, L"1080p2997", {1602, 1601, 1602, 1601, 1602}}, {video_format::x1080p3000, 1, 1920, 1080, 1920, 1080, 30000, 1000, L"1080p3000", {1600}}, {video_format::x1080p5000, 1, 1920, 1080, 1920, 1080, 50000, 1000, L"1080p5000", {960}}, {video_format::x1080p5994, 1, 1920, 1080, 1920, 1080, 60000, 1001, L"1080p5994", {801, 800, 801, 801, 801}}, {video_format::x1080p6000, 1, 1920, 1080, 1920, 1080, 60000, 1000, L"1080p6000", {800}}, {video_format::x1556p2398, 1, 2048, 1556, 2048, 1556, 24000, 1001, L"1556p2398", {2002}}, {video_format::x1556p2400, 1, 2048, 1556, 2048, 1556, 24000, 1000, L"1556p2400", {2000}}, {video_format::x1556p2500, 1, 2048, 1556, 2048, 1556, 25000, 1000, L"1556p2500", {1920}}, {video_format::x2160p2398, 1, 3840, 2160, 3840, 2160, 24000, 1001, L"2160p2398", {2002}}, {video_format::x2160p2400, 1, 3840, 2160, 3840, 2160, 24000, 1000, L"2160p2400", {2000}}, {video_format::x2160p2500, 1, 3840, 2160, 3840, 2160, 25000, 1000, L"2160p2500", {1920}}, {video_format::x2160p2997, 1, 3840, 2160, 3840, 2160, 30000, 1001, L"2160p2997", {1602, 1601, 1602, 1601, 1602}}, {video_format::x2160p3000, 1, 3840, 2160, 3840, 2160, 30000, 1000, L"2160p3000", {1600}}, {video_format::x2160p5000, 1, 3840, 2160, 3840, 2160, 50000, 1000, L"2160p5000", {960}}, {video_format::x2160p5994, 1, 3840, 2160, 3840, 2160, 60000, 1001, L"2160p5994", {801, 800, 801, 801, 801}}, {video_format::x2160p6000, 1, 3840, 2160, 3840, 2160, 60000, 1000, L"2160p6000", {800}}, {video_format::x4kDCIp2398, 1, 4096, 2160, 4096, 2160, 24000, 1001, L"4kDCIp2398", {2002}}, {video_format::x4kDCIp2400, 1, 4096, 2160, 4096, 2160, 24000, 1000, L"4kDCIp2400", {2000}}, {video_format::x4kDCIp2500, 1, 4096, 2160, 4096, 2160, 25000, 1000, L"4kDCIp2500", {1920}}, {video_format::x4kDCIp2997, 1, 4096, 2160, 4096, 2160, 30000, 1001, L"4kDCIp2997", {1602, 1601, 1602, 1601, 1602}}, {video_format::x4kDCIp3000, 1, 4096, 2160, 4096, 2160, 30000, 1000, L"4kDCIp3000", {1600}}, {video_format::x4kDCIp5000, 1, 4096, 2160, 4096, 2160, 50000, 1000, L"4kDCIp5000", {960}}, {video_format::x4kDCIp5994, 1, 4096, 2160, 4096, 2160, 60000, 1001, L"4kDCIp5994", {801, 800, 801, 801, 801}}, {video_format::x4kDCIp6000, 1, 4096, 2160, 4096, 2160, 60000, 1000, L"4kDCIp6000", {800}}, }; for (auto& f : default_formats) formats_.insert({(boost::to_lower_copy(f.name)), f}); } video_format_desc find(const std::wstring& name) const { const std::wstring lower = boost::to_lower_copy(name); const auto res = formats_.find(lower); if (res != formats_.end()) return res->second; return invalid(); } video_format_desc find_format(const video_format& id) const { for (auto& f : formats_) { if (f.second.format == id) return f.second; } return invalid(); } void store(const video_format_desc& format) { const std::wstring lower = boost::to_lower_copy(format.name); formats_.insert({lower, format}); } std::size_t get_max_video_format_size() const { size_t max = 0; for (auto& f : formats_) { if (f.second.size > max) max = f.second.size; } const size_t MaxBytesPerColor = 2; return max * MaxBytesPerColor; } }; video_format_repository::video_format_repository() : impl_(new impl()) { } video_format_desc video_format_repository::invalid() { return video_format_desc(video_format::invalid, 1, 0, 0, 0, 0, 1, 1, L"invalid", {1}); }; video_format_desc video_format_repository::find(const std::wstring& name) const { return impl_->find(name); } video_format_desc video_format_repository::find_format(const video_format& format) const { return impl_->find_format(format); } void video_format_repository::store(const video_format_desc& format) { impl_->store(format); } std::size_t video_format_repository::get_max_video_format_size() const { return impl_->get_max_video_format_size(); } video_format_desc::video_format_desc(const video_format format, const int field_count, const int width, const int height, const int square_width, const int square_height, const int time_scale, const int duration, const std::wstring name, const std::vector audio_cadence) : format(format) , width(width) , height(height) , square_width(square_width) , square_height(square_height) , field_count(field_count) , hz(static_cast(time_scale) / static_cast(duration)) , fps(hz * field_count) , framerate(time_scale, duration) , time_scale(time_scale) , duration(duration) , size(width * height * 4) , name(std::move(name)) , audio_sample_rate(48000) , audio_cadence(std::move(audio_cadence)) { } video_format_desc::video_format_desc() : format(video_format::invalid) { *this = video_format_repository::invalid(); } bool operator==(const video_format_desc& lhs, const video_format_desc& rhs) { if (lhs.format == video_format::custom || rhs.format == video_format::custom) { if (lhs.format != rhs.format) { // If one is custom, and the other isnt, then they dont match return false; } // TODO - expand on this if (lhs.width != rhs.width || lhs.height != rhs.height || lhs.framerate != rhs.framerate) { return false; } return true; } else { // If neither are custom, look just at the format return lhs.format == rhs.format; } } bool operator!=(const video_format_desc& lhs, const video_format_desc& rhs) { return !(lhs == rhs); } std::wostream& operator<<(std::wostream& out, const video_format_desc& format_desc) { out << format_desc.name.c_str(); return out; } }} // namespace caspar::core ================================================ FILE: src/core/video_format.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include #include namespace caspar { namespace core { enum class video_field { progressive, a, b, }; enum class video_format { pal, ntsc, x576p2500, x720p2398, x720p2400, x720p2500, x720p2997, x720p3000, x720p5000, x720p5994, x720p6000, x1080i5000, x1080i5994, x1080i6000, x1080p2398, x1080p2400, x1080p2500, x1080p2997, x1080p3000, x1080p5000, x1080p5994, x1080p6000, x1556p2398, x1556p2400, x1556p2500, x2160p2398, x2160p2400, x2160p2500, x2160p2997, x2160p3000, x2160p5000, x2160p5994, x2160p6000, x4kDCIp2398, x4kDCIp2400, x4kDCIp2500, x4kDCIp2997, x4kDCIp3000, x4kDCIp5000, x4kDCIp5994, x4kDCIp6000, invalid, custom, count }; struct video_format_desc final { video_format format{video_format::invalid}; int width; int height; int square_width; int square_height; int field_count; double hz; // actual tickrate of the channel, e.g. i50 = 25 hz, p50 = 50 hz double fps; // actual fieldrate, e.g. i50 = 50 fps, p50 = 50 fps boost::rational framerate; int time_scale; int duration; std::size_t size; // frame size in bytes std::wstring name; // name of output format int audio_channels = 16; int audio_sample_rate; std::vector audio_cadence; // rotating optimal number of samples per frame video_format_desc(video_format format, int field_count, int width, int height, int square_width, int square_height, int time_scale, int duration, std::wstring name, std::vector audio_cadence); video_format_desc(); }; bool operator==(const video_format_desc& rhs, const video_format_desc& lhs); bool operator!=(const video_format_desc& rhs, const video_format_desc& lhs); std::wostream& operator<<(std::wostream& out, const video_format_desc& format_desc); class video_format_repository { public: explicit video_format_repository(); video_format_desc find(const std::wstring& name) const; video_format_desc find_format(const video_format& format) const; void store(const video_format_desc& format); std::size_t get_max_video_format_size() const; static video_format_desc invalid(); private: struct impl; spl::shared_ptr impl_; }; }} // namespace caspar::core ================================================ FILE: src/modules/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 3.28) project("modules") add_subdirectory(image) add_subdirectory(ffmpeg) add_subdirectory(oal) add_subdirectory(decklink) add_subdirectory(screen) add_subdirectory(newtek) add_subdirectory(artnet) if (ENABLE_HTML) add_subdirectory(html) endif () if (MSVC) add_subdirectory(flash) add_subdirectory(bluefish) endif() ================================================ FILE: src/modules/artnet/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 3.28) project (artnet) set(SOURCES consumer/artnet_consumer.cpp consumer/artnet_consumer.h util/fixture_calculation.cpp util/fixture_calculation.h artnet.cpp artnet.h ) casparcg_add_module_project(artnet SOURCES ${SOURCES} INIT_FUNCTION "artnet::init" ) set_target_properties(artnet PROPERTIES FOLDER modules) source_group(sources\\consumer consumer/*) source_group(sources\\util util/*) source_group(sources ./*) ================================================ FILE: src/modules/artnet/artnet.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Eliyah Sundström eliyah@sundstroem.com */ #include "artnet.h" #define WIN32_LEAN_AND_MEAN #include "consumer/artnet_consumer.h" #include #include namespace caspar { namespace artnet { void init(const core::module_dependencies& dependencies) { dependencies.consumer_registry->register_preconfigured_consumer_factory(L"artnet", create_preconfigured_consumer); } }} // namespace caspar::artnet ================================================ FILE: src/modules/artnet/artnet.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Eliyah Sundström eliyah@sundstroem.com */ #pragma once #include namespace caspar { namespace artnet { void init(const core::module_dependencies& dependencies); }} // namespace caspar::artnet ================================================ FILE: src/modules/artnet/consumer/artnet_consumer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Eliyah Sundström eliyah@sundstroem.com */ #include "artnet_consumer.h" #undef NOMINMAX // ^^ This is needed to avoid a conflict between boost asio and other header files defining NOMINMAX #include #include #include #include #include #include #include #include #include #include using namespace boost::asio; using namespace boost::asio::ip; namespace caspar { namespace artnet { struct configuration { int universe = 0; std::wstring host = L"127.0.0.1"; unsigned short port = 6454; int refreshRate = 10; std::vector fixtures; }; struct artnet_consumer : public core::frame_consumer { const configuration config; std::vector computed_fixtures; public: // frame_consumer explicit artnet_consumer(configuration config) : config(std::move(config)) , io_context_() , socket(io_context_) { socket.open(udp::v4()); std::string host_ = u8(this->config.host); remote_endpoint = boost::asio::ip::udp::endpoint(boost::asio::ip::make_address(host_), this->config.port); compute_fixtures(); } void initialize(const core::video_format_desc& /*format_desc*/, const core::channel_info& channel_info, int port_index) override { thread_ = std::thread([this] { long long time = 1000 / config.refreshRate; auto last_send = std::chrono::system_clock::now(); while (!abort_request_) { try { auto now = std::chrono::system_clock::now(); std::chrono::duration elapsed_seconds = now - last_send; long long elapsed_ms = std::chrono::duration_cast(elapsed_seconds).count(); long long sleep_time = time - elapsed_ms * 1000; if (sleep_time > 0) std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time)); last_send = now; frame_mutex_.lock(); auto frame = last_frame_; frame_mutex_.unlock(); if (!frame) continue; // No frame available uint8_t dmx_data[512]; memset(dmx_data, 0, 512); for (auto computed_fixture : computed_fixtures) { auto color = average_color(frame, computed_fixture.rectangle); uint8_t* ptr = dmx_data + computed_fixture.address; switch (computed_fixture.type) { case FixtureType::DIMMER: ptr[0] = (uint8_t)(0.279 * color.r + 0.547 * color.g + 0.106 * color.b); break; case FixtureType::RGB: ptr[0] = color.r; ptr[1] = color.g; ptr[2] = color.b; break; case FixtureType::RGBW: uint8_t w = std::min(std::min(color.r, color.g), color.b); ptr[0] = color.r - w; ptr[1] = color.g - w; ptr[2] = color.b - w; ptr[3] = w; break; } } send_dmx_data(dmx_data, 512); } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } } }); } ~artnet_consumer() { abort_request_ = true; if (thread_.joinable()) thread_.join(); } std::future send(core::video_field field, core::const_frame frame) override { std::lock_guard lock(frame_mutex_); last_frame_ = frame; return make_ready_future(true); } std::wstring print() const override { return L"artnet[]"; } std::wstring name() const override { return L"artnet"; } int index() const override { return 1337; } core::monitor::state state() const override { core::monitor::state state; state["artnet/computed-fixtures"] = computed_fixtures.size(); state["artnet/fixtures"] = config.fixtures.size(); state["artnet/universe"] = config.universe; state["artnet/host"] = config.host; state["artnet/port"] = config.port; state["artnet/refresh-rate"] = config.refreshRate; return state; } private: core::const_frame last_frame_; std::mutex frame_mutex_; std::thread thread_; std::atomic abort_request_{false}; io_context io_context_; udp::socket socket; udp::endpoint remote_endpoint; void compute_fixtures() { computed_fixtures.clear(); for (auto fixture : config.fixtures) { for (unsigned short i = 0; i < fixture.fixtureCount; i++) { computed_fixture computed_fixture{}; computed_fixture.type = fixture.type; computed_fixture.address = fixture.startAddress + i * fixture.fixtureChannels; computed_fixture.rectangle = compute_rect(fixture.fixtureBox, i, fixture.fixtureCount); computed_fixtures.push_back(computed_fixture); } } } void send_dmx_data(const std::uint8_t* data, std::size_t length) { int universe = this->config.universe; std::uint8_t hUni = (universe >> 8) & 0xff; std::uint8_t lUni = universe & 0xff; std::uint8_t hLen = (length >> 8) & 0xff; std::uint8_t lLen = (length & 0xff); std::uint8_t header[] = {65, 114, 116, 45, 78, 101, 116, 0, 0, 80, 0, 14, 0, 0, lUni, hUni, hLen, lLen}; std::uint8_t buffer[18 + 512]; for (int i = 0; i < 18 + 512; i++) { if (i < 18) { buffer[i] = header[i]; continue; } if (i - 18 < length) { buffer[i] = data[i - 18]; continue; } buffer[i] = 0; } boost::system::error_code err; socket.send_to(boost::asio::buffer(buffer), remote_endpoint, 0, err); if (err) CASPAR_THROW_EXCEPTION(io_error() << msg_info(err.message())); } }; std::vector get_fixtures_ptree(const boost::property_tree::wptree& ptree) { std::vector fixtures; using boost::property_tree::wptree; for (auto& xml_channel : ptree | witerate_children(L"fixtures") | welement_context_iteration) { ptree_verify_element_name(xml_channel, L"fixture"); fixture f{}; int startAddress = xml_channel.second.get(L"start-address", 0); if (startAddress < 1) CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Fixture start address must be specified")); f.startAddress = (unsigned short)startAddress - 1; int fixtureCount = xml_channel.second.get(L"fixture-count", -1); if (fixtureCount < 1) CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Fixture count must be specified")); f.fixtureCount = (unsigned short)fixtureCount; std::wstring type = xml_channel.second.get(L"type", L""); if (type.empty()) CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Fixture type must be specified")); if (boost::iequals(type, L"DIMMER")) { f.type = FixtureType::DIMMER; } else if (boost::iequals(type, L"RGB")) { f.type = FixtureType::RGB; } else if (boost::iequals(type, L"RGBW")) { f.type = FixtureType::RGBW; } else { CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Unknown fixture type")); } int fixtureChannels = xml_channel.second.get(L"fixture-channels", -1); if (fixtureChannels < 0) fixtureChannels = f.type; if (fixtureChannels < f.type) CASPAR_THROW_EXCEPTION( user_error() << msg_info( L"Fixture channel count must be at least enough channels for current color mode")); f.fixtureChannels = (unsigned short)fixtureChannels; box b{}; auto x = xml_channel.second.get(L"x", 0.0f); auto y = xml_channel.second.get(L"y", 0.0f); b.x = x; b.y = y; auto width = xml_channel.second.get(L"width", 0.0f); auto height = xml_channel.second.get(L"height", 0.0f); b.width = width; b.height = height; auto rotation = xml_channel.second.get(L"rotation", 0.0f); b.rotation = rotation; f.fixtureBox = b; fixtures.push_back(f); } return fixtures; } spl::shared_ptr create_preconfigured_consumer(const boost::property_tree::wptree& ptree, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info) { configuration config; if (channel_info.depth != common::bit_depth::bit8) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Artnet consumer only supports 8-bit color depth.")); config.universe = ptree.get(L"universe", config.universe); config.host = ptree.get(L"host", config.host); config.port = ptree.get(L"port", config.port); config.refreshRate = ptree.get(L"refresh-rate", config.refreshRate); if (config.refreshRate < 1) CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Refresh rate must be at least 1")); config.fixtures = get_fixtures_ptree(ptree); return spl::make_shared(config); } }} // namespace caspar::artnet ================================================ FILE: src/modules/artnet/consumer/artnet_consumer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Eliyah Sundström eliyah@sundstroem.com */ #pragma once #include "../util/fixture_calculation.h" #include #include #include #include #include namespace caspar { namespace artnet { spl::shared_ptr create_preconfigured_consumer(const boost::property_tree::wptree& ptree, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info); }} // namespace caspar::artnet ================================================ FILE: src/modules/artnet/util/fixture_calculation.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Eliyah Sundström eliyah@sundstroem.com */ #include "fixture_calculation.h" #define M_PI 3.14159265358979323846 /* pi */ namespace caspar { namespace artnet { rect compute_rect(box fixtureBox, int index, int count) { // Calculates the corners of a rectangle that is part of a fixture // The count represents how many fixtures exist in the box and the index which one to calculate auto f_count = (float)count; auto f_index = (float)index; float x = fixtureBox.x; float y = fixtureBox.y; float width = fixtureBox.width; float height = fixtureBox.height; float rotation = fixtureBox.rotation; double angle = M_PI * rotation / 180.0f; double sin_ = sin(angle); double cos_ = cos(angle); // Half width and height of the rectangle for this fixture float hx = width / (2 * f_count); float hy = height / 2.0f; // Offset distance from the center of the box to the center of the fixture float od = (2 * f_index - f_count + 1) * hx; // Center of the fixture double ox = x + od * cos_; double oy = y + od * sin_; // Calculate the corners of the rectangle, by offsetting the center with the half width and height // in the direction of the corners and the box's rotation point p1{ static_cast(ox + -hx * cos_ + -hy * -sin_), static_cast(oy + -hx * sin_ + -hy * cos_), }; point p2{ static_cast(ox + hx * cos_ + -hy * -sin_), static_cast(oy + hx * sin_ + -hy * cos_), }; point p3{ static_cast(ox + hx * cos_ + hy * -sin_), static_cast(oy + hx * sin_ + hy * cos_), }; point p4{ static_cast(ox + -hx * cos_ + hy * -sin_), static_cast(oy + -hx * sin_ + hy * cos_), }; rect rectangle{p1, p2, p3, p4}; return rectangle; } color average_color(const core::const_frame& frame, rect& rectangle) { int width = (int)frame.width(); int height = (int)frame.height(); float x_values[] = {rectangle.p1.x, rectangle.p2.x, rectangle.p3.x, rectangle.p4.x}; float y_values[] = {rectangle.p1.y, rectangle.p2.y, rectangle.p3.y, rectangle.p4.y}; // Sort the points by y value, then by x value if y is equal for (int i = 0; i < 3; i++) { for (int j = 3; j > i; j--) { if (y_values[j] > y_values[j - 1]) continue; if (y_values[j] < y_values[j - 1]) { float x = x_values[j]; float y = y_values[j]; x_values[j] = x_values[j - 1]; y_values[j] = y_values[j - 1]; x_values[j - 1] = x; y_values[j - 1] = y; continue; } if (x_values[j] < x_values[j - 1]) { float x = x_values[j]; x_values[j] = x_values[j - 1]; x_values[j - 1] = x; } } } // Below is a rasterization algorithm that goes through the pixels in rectangle // and calculates the average color // Which lines to use for the rasterization // in the format [a, b, c, d] => a -> b, c -> d // the numbers are indices into the x_values, y_values arrays const int indices[3][4] = { {0, 1, 0, 2}, // Line 1, Line 2 {0, 2, 1, 3}, // Line 2, Line 3 {1, 3, 2, 3}, // Line 3, Line 4 }; // The y values of the top and bottom of the rectangle, clamped to the image size int y_min = std::max(0, std::min(height - 1, (int)y_values[0])); int y_max = std::max(0, std::min(height - 1, (int)y_values[3])); const array& values = frame.image_data(0); const std::uint8_t* value_ptr = values.data(); // Total color values, as well as the number of pixels in the rectangle // used to calculate the average without loss of precision unsigned long long tr = 0; unsigned long long tg = 0; unsigned long long tb = 0; unsigned long long count = 0; // Go through the vertical lines of the rectangle, and then through the pixels in the line // that are inside the rectangle for (int y = y_min; y <= y_max; y++) { // Determine which lines to use for the rasterization, if one line has passed we should use the next one int index = 0; if (y >= (int)y_values[1]) index = 1; if (y >= (int)y_values[2]) index = 2; // The x and y values of the first line float ax1 = x_values[indices[index][0]]; float ay1 = y_values[indices[index][0]]; float bx1 = x_values[indices[index][1]]; float by1 = y_values[indices[index][1]]; // The x and y values of the second line float ax2 = x_values[indices[index][2]]; float ay2 = y_values[indices[index][2]]; float bx2 = x_values[indices[index][3]]; float by2 = y_values[indices[index][3]]; int x1 = 0; int x2 = width - 1; // If the lines are horizontal, we can skip the calculations // This only happens if the box is oriented in 90 degree increments if (by2 != ay2) { // The slopes of the lines float d1 = (bx1 - ax1) / (by1 - ay1); float d2 = (bx2 - ax2) / (by2 - ay2); // The x values of the lines at the current y value auto x1_ = (int)(ax1 + ((float)y - ay1) * d1); auto x2_ = (int)(ax2 + ((float)y - ay2) * d2); // The clamped x values x1 = std::max(0, std::min(width - 1, x1_)); x2 = std::max(0, std::min(width - 1, x2_)); } // The left and right x values int min_x = std::min(x1, x2); int max_x = std::max(x1, x2); // Go through the pixels in the line for (int x = min_x; x <= max_x; x++) { int pos = y * width + x; const std::uint8_t* base_ptr = value_ptr + pos * 4; float a = (float)base_ptr[3] / 255.0f; float r = (float)base_ptr[2] * a; float g = (float)base_ptr[1] * a; float b = (float)base_ptr[0] * a; tr += (unsigned long long)(r); tg += (unsigned long long)(g); tb += (unsigned long long)(b); count++; } } color c{(std::uint8_t)(tr / count), (std::uint8_t)(tg / count), (std::uint8_t)(tb / count)}; return c; } }} // namespace caspar::artnet ================================================ FILE: src/modules/artnet/util/fixture_calculation.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Eliyah Sundström eliyah@sundstroem.com */ #pragma once #include #include #include namespace caspar { namespace artnet { enum FixtureType { DIMMER = 1, RGB = 3, RGBW = 4, }; struct point { float x; float y; }; struct rect { point p1; point p2; point p3; point p4; }; struct computed_fixture { FixtureType type; unsigned short address; rect rectangle; }; struct color { std::uint8_t r; std::uint8_t g; std::uint8_t b; }; struct box { float x; float y; float width; float height; float rotation; // degrees }; struct fixture { FixtureType type; unsigned short startAddress; // DMX address of the first channel in the fixture unsigned short fixtureCount; // number of fixtures in the chain, dividing along the width unsigned short fixtureChannels; // number of channels per fixture box fixtureBox; }; rect compute_rect(box fixtureBox, int index, int count); color average_color(const core::const_frame& frame, rect& rectangle); }} // namespace caspar::artnet ================================================ FILE: src/modules/bluefish/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 3.28) project (bluefish) set(SOURCES consumer/bluefish_consumer.cpp consumer/bluefish_consumer.h producer/bluefish_producer.cpp producer/bluefish_producer.h util/blue_velvet.cpp util/blue_velvet.h util/memory.h bluefish.cpp bluefish.h StdAfx.h ) casparcg_add_module_project(bluefish SOURCES ${SOURCES} INIT_FUNCTION "bluefish::init" ) target_include_directories(bluefish PRIVATE .. ${FFMPEG_INCLUDE_PATH} ) target_precompile_headers(bluefish PRIVATE "StdAfx.h") set_target_properties(bluefish PROPERTIES FOLDER modules) source_group(sources ./*) source_group(sources\\consumer consumer/*) source_group(sources\\producer producer/*) source_group(sources\\util util/*) source_group(sources\\interop interop/*) target_link_libraries(bluefish PRIVATE ffmpeg) ================================================ FILE: src/modules/bluefish/StdAfx.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #if defined _DEBUG && defined _MSC_VER #include #endif #define NOMINMAX #include #include #include #include #include #include #include #include #include #include #include #include // #include #include #include #include ================================================ FILE: src/modules/bluefish/bluefish.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com * James Wise, james.wise@bluefish444.com */ #include "StdAfx.h" #include "bluefish.h" #include "consumer/bluefish_consumer.h" #include "producer/bluefish_producer.h" #include "util/blue_velvet.h" #include #include namespace caspar { namespace bluefish { std::wstring version() { try { bvc_wrapper blue; return u16(blue.get_version()); } catch (...) { return L"Not found"; } } std::vector device_list() { std::vector devices; try { bvc_wrapper blue; int numCards = 0; blue.enumerate(&numCards); for (int n = 1; n < numCards + 1; n++) { blue.attach(n); devices.push_back(std::wstring(get_card_desc(blue, n)) + L" [" + std::to_wstring(n) + L"] " + get_sdi_inputs(blue) + L"i" + get_sdi_outputs(blue) + L"o"); blue.detach(); } } catch (...) { } return devices; } void init(const core::module_dependencies& dependencies) { try { bvc_wrapper blue; int num_cards = 0; blue.enumerate(&num_cards); } catch (...) { } dependencies.consumer_registry->register_consumer_factory(L"Bluefish Consumer", create_consumer); dependencies.consumer_registry->register_preconfigured_consumer_factory(L"bluefish", create_preconfigured_consumer); dependencies.producer_registry->register_producer_factory(L"Bluefish Producer", create_producer); } }} // namespace caspar::bluefish ================================================ FILE: src/modules/bluefish/bluefish.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include namespace caspar { namespace bluefish { std::wstring get_version(); void init(const core::module_dependencies& dependencies); }} // namespace caspar::bluefish ================================================ FILE: src/modules/bluefish/consumer/bluefish_consumer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com * James Wise, james.wise@bluefish444.com */ #include "../StdAfx.h" #include "../util/blue_velvet.h" #include "../util/memory.h" #include #include #include #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace bluefish { #define BLUEFISH_MAX_SOFTWARE_BUFFERS 4 #define SIZE_TEMP_AUDIO_BUFFER \ (2048 * 16) // max 2002 samples across 16 channels, use 2048 for safety cos sometimes caspar gives us too many... enum class hardware_downstream_keyer_mode { disable = 0, external = 1, internal = 2, // Bluefish dedicated HW keyer - only available on some models. }; enum class hardware_downstream_keyer_audio_source { SDIVideoInput = 1, VideoOutputChannel = 2 }; enum class bluefish_hardware_output_channel { channel_1 = 1, channel_2 = 2, channel_3 = 3, channel_4 = 4, channel_5 = 5, channel_6 = 6, channel_7 = 7, channel_8 = 8, }; enum class uhd_output_option { disable_BVC_MultiLink = 0, auto_uhd = 1, force_2si = 2, force_square_division = 3, }; struct configuration { unsigned int device_index = 1; bluefish_hardware_output_channel device_stream = bluefish_hardware_output_channel::channel_1; bool embedded_audio = true; hardware_downstream_keyer_mode hardware_keyer_value = hardware_downstream_keyer_mode::disable; hardware_downstream_keyer_audio_source keyer_audio_source = hardware_downstream_keyer_audio_source::VideoOutputChannel; unsigned int watchdog_timeout = 2; uhd_output_option uhd_mode = uhd_output_option::disable_BVC_MultiLink; }; bool get_videooutput_channel_routing_info_from_streamid(bluefish_hardware_output_channel streamid, EEpochRoutingElements& channelSrcElement, EEpochRoutingElements& sdioutputDstElement) { switch (streamid) { case bluefish_hardware_output_channel::channel_1: channelSrcElement = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH1; sdioutputDstElement = EPOCH_DEST_SDI_OUTPUT_1; break; case bluefish_hardware_output_channel::channel_2: channelSrcElement = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH2; sdioutputDstElement = EPOCH_DEST_SDI_OUTPUT_2; break; case bluefish_hardware_output_channel::channel_3: channelSrcElement = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH3; sdioutputDstElement = EPOCH_DEST_SDI_OUTPUT_3; break; case bluefish_hardware_output_channel::channel_4: channelSrcElement = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH4; sdioutputDstElement = EPOCH_DEST_SDI_OUTPUT_4; break; case bluefish_hardware_output_channel::channel_5: channelSrcElement = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH5; sdioutputDstElement = EPOCH_DEST_SDI_OUTPUT_5; break; case bluefish_hardware_output_channel::channel_6: channelSrcElement = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH6; sdioutputDstElement = EPOCH_DEST_SDI_OUTPUT_6; break; case bluefish_hardware_output_channel::channel_7: channelSrcElement = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH7; sdioutputDstElement = EPOCH_DEST_SDI_OUTPUT_7; break; case bluefish_hardware_output_channel::channel_8: channelSrcElement = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH8; sdioutputDstElement = EPOCH_DEST_SDI_OUTPUT_8; break; default: return false; } return true; } EBlueVideoChannel get_bluesdk_videochannel_from_streamid(bluefish_hardware_output_channel streamid) { /*This function would return the corresponding EBlueVideoChannel from the device output channel*/ switch (streamid) { case bluefish_hardware_output_channel::channel_1: return BLUE_VIDEO_OUTPUT_CHANNEL_1; case bluefish_hardware_output_channel::channel_2: return BLUE_VIDEO_OUTPUT_CHANNEL_2; case bluefish_hardware_output_channel::channel_3: return BLUE_VIDEO_OUTPUT_CHANNEL_3; case bluefish_hardware_output_channel::channel_4: return BLUE_VIDEO_OUTPUT_CHANNEL_4; case bluefish_hardware_output_channel::channel_5: return BLUE_VIDEO_OUTPUT_CHANNEL_5; case bluefish_hardware_output_channel::channel_6: return BLUE_VIDEO_OUTPUT_CHANNEL_6; case bluefish_hardware_output_channel::channel_7: return BLUE_VIDEO_OUTPUT_CHANNEL_7; case bluefish_hardware_output_channel::channel_8: return BLUE_VIDEO_OUTPUT_CHANNEL_8; default: return BLUE_VIDEO_OUTPUT_CHANNEL_1; } } struct bluefish_consumer { const int channel_index_; const configuration config_; spl::shared_ptr blue_ = create_blue(config_.device_index); spl::shared_ptr watchdog_bvc_ = create_blue(config_.device_index); std::mutex exception_mutex_; std::exception_ptr exception_; std::wstring model_name_; const core::video_format_desc format_desc_; std::mutex buffer_mutex_; std::condition_variable buffer_cond_; std::atomic scheduled_frames_completed_{0}; int field_count_; std::atomic abort_request_{false}; unsigned int mode_; // ie bf video mode / format bool interlaced_ = false; std::array all_frames_; tbb::concurrent_bounded_queue reserved_frames_; tbb::concurrent_bounded_queue live_frames_; std::atomic audio_frames_filled_{0}; blue_dma_buffer_ptr last_field_buf_ = nullptr; std::vector tmp_audio_buf_; unsigned int tmp_audio_buf_contains_samples = 0; std::shared_ptr dma_present_thread_; std::shared_ptr hardware_watchdog_thread_; std::atomic end_hardware_watchdog_thread_; unsigned int interrupts_to_wait_ = config_.watchdog_timeout; spl::shared_ptr graph_; caspar::timer tick_timer_; caspar::timer sync_timer_; bluefish_consumer(const bluefish_consumer&) = delete; bluefish_consumer& operator=(const bluefish_consumer&) = delete; bluefish_consumer(const configuration& config, const core::video_format_desc& format_desc, int channel_index) : channel_index_(channel_index) , config_(config) , format_desc_(format_desc) { // OK this is the Guts of it, lets see what we can do to get a compile working, and then some actual // functionality eh? graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f)); graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f)); graph_->set_color("flushed-frame", diagnostics::color(0.4f, 0.3f, 0.8f)); graph_->set_color("buffered-audio", diagnostics::color(0.9f, 0.9f, 0.5f)); graph_->set_color("buffered-video", diagnostics::color(0.2f, 0.9f, 0.9f)); graph_->set_text(print()); diagnostics::register_graph(graph_); reserved_frames_.set_capacity(BLUEFISH_MAX_SOFTWARE_BUFFERS); live_frames_.set_capacity(BLUEFISH_MAX_SOFTWARE_BUFFERS); // get BF video mode mode_ = get_bluefish_video_format(format_desc_.format); if (mode_ == VID_FMT_EXT_1080I_5000 || mode_ == VID_FMT_EXT_1080I_5994 || mode_ == VID_FMT_EXT_1080I_6000 || mode_ == VID_FMT_EXT_PAL || mode_ == VID_FMT_EXT_NTSC) { interlaced_ = true; } // Specify the video channel setup_hardware_output_channel(); // ie stream id model_name_ = get_card_desc(*blue_.get(), (int)config_.device_index); // disable the video output while we do all the config. disable_video_output(); // check if we need to set Multilink, and configure if required setup_multlink(); // Setting output Video mode if (blue_->set_card_property32(VIDEO_MODE_EXT_OUTPUT, mode_)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to set videomode.")); // Select Update Mode for output if (blue_->set_card_property32(VIDEO_UPDATE_TYPE, UPD_FMT_FRAME)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to set update type.")); setup_hardware_output_channel_routing(); // Select output memory format if (blue_->set_card_property32(VIDEO_MEMORY_FORMAT, MEM_FMT_ARGB_PC)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to set memory format.")); // Select image orientation if (blue_->set_card_property32(VIDEO_IMAGE_ORIENTATION, ImageOrientation_Normal)) CASPAR_LOG(warning) << print() << L" Failed to set image orientation to normal."; // Select data range if (blue_->set_card_property32(VIDEO_RGB_DATA_RANGE, CGR_RANGE)) CASPAR_LOG(warning) << print() << L" Failed to set RGB data range to CGR."; // configure audio if (!config_.embedded_audio || (config_.hardware_keyer_value == hardware_downstream_keyer_mode::internal && config.keyer_audio_source == hardware_downstream_keyer_audio_source::SDIVideoInput)) { if (blue_->set_card_property32(EMBEDEDDED_AUDIO_OUTPUT, 0)) CASPAR_LOG(warning) << TEXT("BLUECARD ERROR: Failed to disable embedded audio."); CASPAR_LOG(info) << print() << TEXT(" Disabled embedded-audio."); } else { ULONG audio_value = blue_emb_audio_enable | blue_emb_audio_group1_enable; if (format_desc_.audio_channels > 4) audio_value |= blue_emb_audio_group2_enable; if (format_desc_.audio_channels > 8) audio_value |= blue_emb_audio_group3_enable; if (format_desc_.audio_channels > 12) audio_value |= blue_emb_audio_group4_enable; if (blue_->set_card_property32(EMBEDEDDED_AUDIO_OUTPUT, audio_value)) CASPAR_LOG(warning) << print() << TEXT(" Failed to enable embedded audio."); CASPAR_LOG(info) << print() << TEXT(" Enabled embedded-audio."); } if (blue_->set_card_property32(VIDEO_OUTPUT_ENGINE, VIDEO_ENGINE_PLAYBACK)) CASPAR_LOG(warning) << print() << TEXT(" Failed to set video engine."); if (is_epoch_card(*blue_) && is_epoch_neutron_1i2o_card(*blue_)) setup_hardware_downstream_keyer(config.hardware_keyer_value, config.keyer_audio_source); // ok here we create a bunch of Bluefish buffers, that contain video and encoded hanc.... // this is the software Q. / software buffers int n = 0; boost::range::generate( all_frames_, [&] { return std::make_shared(static_cast(format_desc_.size), n++); }); for (size_t i = 0; i < all_frames_.size(); i++) reserved_frames_.push(all_frames_[i]); tmp_audio_buf_.reserve(SIZE_TEMP_AUDIO_BUFFER); // start the thread if required. if (dma_present_thread_ == nullptr) { dma_present_thread_ = std::make_shared([this] { dma_present_thread_actual(); }); #if defined(_WIN32) HANDLE handle = (HANDLE)dma_present_thread_->native_handle(); SetThreadPriority(handle, THREAD_PRIORITY_HIGHEST); #endif } configure_watchdog(); enable_video_output(); } ~bluefish_consumer() { try { abort_request_ = true; buffer_cond_.notify_all(); if (!end_hardware_watchdog_thread_) disable_watchdog(); disable_video_output(); watchdog_bvc_->detach(); blue_->detach(); if (dma_present_thread_) dma_present_thread_->join(); if (hardware_watchdog_thread_) hardware_watchdog_thread_->join(); } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } } void watchdog_thread_actual() { watchdog_bvc_->attach(config_.device_index); EBlueVideoChannel out_vid_channel = get_bluesdk_videochannel_from_streamid(config_.device_stream); watchdog_bvc_->set_card_property32(DEFAULT_VIDEO_OUTPUT_CHANNEL, out_vid_channel); unsigned long fc = 0; unsigned int blue_prop = EPOCH_WATCHDOG_TIMER_SET_MACRO(enum_blue_app_watchdog_timer_keepalive, config_.watchdog_timeout); while (!end_hardware_watchdog_thread_) { watchdog_bvc_->wait_video_output_sync(UPD_FMT_FIELD, &fc); watchdog_bvc_->set_card_property32(EPOCH_APP_WATCHDOG_TIMER, blue_prop); } disable_watchdog(); watchdog_bvc_->detach(); } void configure_watchdog() { // First test if we even want to enable the watchdog, only on Ch 1 and if user has not explicitly set count to // 0, and only if card has at least 1 input sdi, else dont do anything unsigned int val = 0; blue_->get_card_property32(CARD_FEATURE_STREAM_INFO, &val); unsigned int num_input_streams = CARD_FEATURE_GET_SDI_INPUT_STREAM_COUNT(val); if ((interrupts_to_wait_ != 0u) && config_.device_stream == bluefish_hardware_output_channel::channel_1 && (num_input_streams != 0u)) { // check if it is already running unsigned int blue_prop = EPOCH_WATCHDOG_TIMER_SET_MACRO(enum_blue_app_watchdog_get_timer_activated_status, 0); blue_->get_card_property32(EPOCH_APP_WATCHDOG_TIMER, &blue_prop); if (EPOCH_WATCHDOG_TIMER_GET_VALUE_MACRO(blue_prop)) { // watchdog timer is running already, switch it off. blue_prop = EPOCH_WATCHDOG_TIMER_SET_MACRO(enum_blue_app_watchdog_timer_start_stop, (unsigned int)0); blue_->set_card_property32(EPOCH_APP_WATCHDOG_TIMER, blue_prop); } // Setting up the watchdog properties unsigned int watchdog_timer_gpo_port = 1; // GPO port to use: 0 = none, 1 = port A, 2 = port B blue_prop = EPOCH_WATCHDOG_TIMER_SET_MACRO(enum_blue_app_watchdog_enable_gpo_on_active, watchdog_timer_gpo_port); blue_->set_card_property32(EPOCH_APP_WATCHDOG_TIMER, blue_prop); if (interrupts_to_wait_ == 1) // using too low a value can cause instability on the watchdog so always make // sure we use a value of 2 or more... interrupts_to_wait_++; blue_prop = EPOCH_WATCHDOG_TIMER_SET_MACRO(enum_blue_app_watchdog_timer_start_stop, interrupts_to_wait_); blue_->set_card_property32(EPOCH_APP_WATCHDOG_TIMER, blue_prop); // start the thread if required. if (hardware_watchdog_thread_ == nullptr) { end_hardware_watchdog_thread_ = false; hardware_watchdog_thread_ = std::make_shared([this] { watchdog_thread_actual(); }); } } } void disable_watchdog() { end_hardware_watchdog_thread_ = true; unsigned int stop_value = 0; unsigned int blue_prop = EPOCH_WATCHDOG_TIMER_SET_MACRO(enum_blue_app_watchdog_timer_start_stop, stop_value); watchdog_bvc_->get_card_property32(EPOCH_APP_WATCHDOG_TIMER, &blue_prop); } void setup_hardware_output_channel() { // This function would be used to setup the logic video channel in the bluefish hardware EBlueVideoChannel out_vid_channel = get_bluesdk_videochannel_from_streamid(config_.device_stream); if (is_epoch_card(*blue_) || is_kronos_card(*blue_)) { if (out_vid_channel != BLUE_VIDEOCHANNEL_INVALID) { if (blue_->set_card_property32(DEFAULT_VIDEO_OUTPUT_CHANNEL, out_vid_channel)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(" Failed to set video stream.")); blue_->video_playback_stop(0, 0); } } } void setup_multlink() { // We only want to enable multi-link in specific scenarios, so lets check all those conditions first EBlueVideoChannel out_vid_channel = get_bluesdk_videochannel_from_streamid(config_.device_stream); if (is_kronos_card(*blue_) && (out_vid_channel == BLUE_VIDEO_OUTPUT_CHANNEL_1 || out_vid_channel == BLUE_VIDEO_OUTPUT_CHANNEL_5) && (config_.uhd_mode != uhd_output_option::disable_BVC_MultiLink) && (format_desc_.width > 2048)) { unsigned int val = mode_; blue_->set_multilink(config_.device_index, out_vid_channel); // Now lest test to see if our MultiLink enable instance can support the video Mode in Q. blue_->get_card_property32(IS_VIDEO_MODE_EXT_SUPPORTED_OUTPUT, &val); if (val) return; else { blue_->set_multilink(0, -1); CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(" Failed to set video stream.")); } } } void setup_hardware_output_channel_routing() { // This function would be used to setup the dual link and any other routing that would be required . if (is_epoch_card(*blue_) || is_kronos_card(*blue_)) { EBlueVideoChannel blue_video_output_channel = get_bluesdk_videochannel_from_streamid(config_.device_stream); EEpochRoutingElements src_element = (EEpochRoutingElements)0; EEpochRoutingElements dst_element = (EEpochRoutingElements)0; get_videooutput_channel_routing_info_from_streamid(config_.device_stream, src_element, dst_element); bool duallink_4224_enabled = false; if ((config_.device_stream == bluefish_hardware_output_channel::channel_1 || config_.device_stream == bluefish_hardware_output_channel::channel_3) && config_.hardware_keyer_value == hardware_downstream_keyer_mode::external || config_.hardware_keyer_value == hardware_downstream_keyer_mode::internal) { duallink_4224_enabled = true; } // Enable/Disable dual link output if (blue_->set_card_property32(VIDEO_DUAL_LINK_OUTPUT, duallink_4224_enabled)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to enable/disable dual link.")); if (!duallink_4224_enabled) { if (blue_->set_card_property32(VIDEO_DUAL_LINK_OUTPUT_SIGNAL_FORMAT_TYPE, Signal_FormatType_Independent_422)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to set dual link format type to 4:2:2.")); // ULONG routingValue = EPOCH_SET_ROUTING(src_element, dst_element, BLUE_CONNECTOR_PROP_SINGLE_LINK); // if (blue_->set_card_property32(MR2_ROUTING, routingValue)) // CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(" Failed to MR 2 routing.")); // If single link 422, but on second channel AND on Neutron we need to set Genlock to Aux. if (is_epoch_neutron_1i2o_card(*blue_)) { if (blue_video_output_channel == BLUE_VIDEO_OUTPUT_CHANNEL_B) { ULONG genlock_source = BlueGenlockAux; if (blue_->set_card_property32(VIDEO_GENLOCK_SIGNAL, genlock_source)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to set GenLock to Aux Input.")); } } if (is_epoch_neutron_3o_card(*blue_)) { if (blue_video_output_channel == BLUE_VIDEO_OUTPUT_CHANNEL_C) { ULONG genlock_source = BlueGenlockAux; if (blue_->set_card_property32(VIDEO_GENLOCK_SIGNAL, genlock_source)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to set GenLock to Aux Input.")); } else if (blue_video_output_channel == BLUE_VIDEO_OUTPUT_CHANNEL_B) { ULONG routing_value = EPOCH_SET_ROUTING(EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHB, EPOCH_DEST_SDI_OUTPUT_B, BLUE_CONNECTOR_PROP_DUALLINK_LINK_1); if (blue_->set_card_property32(MR2_ROUTING, routing_value)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to MR 2 routing.")); } } } else // dual Link IS enabled, ie. 4224 Fill and Key { if (blue_->set_card_property32(VIDEO_DUAL_LINK_OUTPUT_SIGNAL_FORMAT_TYPE, Signal_FormatType_4224)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to set dual link format type to 4:2:2:4.")); if (blue_video_output_channel == BLUE_VIDEO_OUTPUT_CHANNEL_1) { ULONG routing_value = EPOCH_SET_ROUTING(EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHA, EPOCH_DEST_SDI_OUTPUT_A, BLUE_CONNECTOR_PROP_DUALLINK_LINK_1); if (blue_->set_card_property32(MR2_ROUTING, routing_value)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to MR 2 routing.")); routing_value = EPOCH_SET_ROUTING(EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHA, EPOCH_DEST_SDI_OUTPUT_B, BLUE_CONNECTOR_PROP_DUALLINK_LINK_2); if (blue_->set_card_property32(MR2_ROUTING, routing_value)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to MR 2 routing.")); if (is_epoch_neutron_1i2o_card(*blue_)) // Neutron cards require setting the Genlock connector to // Aux to enable them to do Dual-Link { ULONG genLockSource = BlueGenlockAux; if (blue_->set_card_property32(VIDEO_GENLOCK_SIGNAL, genLockSource)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to set GenLock to Aux Input.")); } else if (is_epoch_neutron_3o_card(*blue_)) { if (blue_video_output_channel == BLUE_VIDEO_OUTPUT_CHANNEL_C) { ULONG genLockSource = BlueGenlockAux; if (blue_->set_card_property32(VIDEO_GENLOCK_SIGNAL, genLockSource)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to set GenLock to Aux Input.")); } } } else { // using channel C for 4224 on other configurations requires explicit routing if (blue_video_output_channel == BLUE_VIDEO_OUTPUT_CHANNEL_C) { ULONG routingValue = EPOCH_SET_ROUTING(EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHC, EPOCH_DEST_SDI_OUTPUT_C, BLUE_CONNECTOR_PROP_DUALLINK_LINK_1); if (blue_->set_card_property32(MR2_ROUTING, routingValue)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to MR 2 routing.")); routingValue = EPOCH_SET_ROUTING(EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHC, EPOCH_DEST_SDI_OUTPUT_D, BLUE_CONNECTOR_PROP_DUALLINK_LINK_2); if (blue_->set_card_property32(MR2_ROUTING, routingValue)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to MR 2 routing.")); } } } } } void setup_hardware_downstream_keyer(hardware_downstream_keyer_mode keyer, hardware_downstream_keyer_audio_source audio_source) { unsigned int keyer_control_value = 0; if (keyer == hardware_downstream_keyer_mode::disable || keyer == hardware_downstream_keyer_mode::external) { keyer_control_value = VIDEO_ONBOARD_KEYER_SET_STATUS_DISABLED(keyer_control_value); keyer_control_value = VIDEO_ONBOARD_KEYER_SET_STATUS_DISABLE_OVER_BLACK(keyer_control_value); } else if (keyer == hardware_downstream_keyer_mode::internal) { unsigned int input_video_mode = 0; if (blue_->get_card_property32(VIDEO_MODE_EXT_INPUT, &input_video_mode)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(" Failed to get invalid video mode flag")); // The bluefish HW keyer is NOT going to pre-multiply the RGB with the A. keyer_control_value = VIDEO_ONBOARD_KEYER_SET_STATUS_DATA_IS_PREMULTIPLIED(keyer_control_value); keyer_control_value = VIDEO_ONBOARD_KEYER_SET_STATUS_ENABLED(keyer_control_value); if (blue_->get_card_property32(VIDEO_INPUT_SIGNAL_VIDEO_MODE, &input_video_mode)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(" Failed to get video input signal mode")); if (input_video_mode == VID_FMT_EXT_INVALID) keyer_control_value = VIDEO_ONBOARD_KEYER_SET_STATUS_ENABLE_OVER_BLACK(keyer_control_value); else keyer_control_value = VIDEO_ONBOARD_KEYER_SET_STATUS_DISABLE_OVER_BLACK(keyer_control_value); // lock to input if (blue_->set_card_property32( VIDEO_GENLOCK_SIGNAL, BlueSDI_A_BNC)) // todo: will need to adjust when we support keyer on all channels... CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(" Failed to set the genlock to the input for the HW keyer")); } if (audio_source == hardware_downstream_keyer_audio_source::SDIVideoInput && keyer == hardware_downstream_keyer_mode::internal) keyer_control_value = VIDEO_ONBOARD_KEYER_SET_STATUS_USE_INPUT_ANCILLARY(keyer_control_value); else if (audio_source == hardware_downstream_keyer_audio_source::VideoOutputChannel) keyer_control_value = VIDEO_ONBOARD_KEYER_SET_STATUS_USE_OUTPUT_ANCILLARY(keyer_control_value); if (blue_->set_card_property32(VIDEO_ONBOARD_KEYER, keyer_control_value)) CASPAR_LOG(error) << print() << TEXT(" Failed to set keyer control."); } void enable_video_output() { if (config_.device_stream == bluefish_hardware_output_channel::channel_1) blue_->set_card_property32(BYPASS_RELAY_A_ENABLE, 0); else if (config_.device_stream == bluefish_hardware_output_channel::channel_2) blue_->set_card_property32(BYPASS_RELAY_B_ENABLE, 0); if (blue_->set_card_property32(VIDEO_BLACKGENERATOR, 0)) CASPAR_LOG(error) << print() << TEXT(" Failed to disable video output."); } void disable_video_output() { blue_->video_playback_stop(0, 0); blue_->set_card_property32(VIDEO_DUAL_LINK_OUTPUT, 0); if (blue_->set_card_property32(VIDEO_BLACKGENERATOR, 1)) CASPAR_LOG(error) << print() << TEXT(" Failed to disable video output."); if (blue_->set_card_property32(EMBEDEDDED_AUDIO_OUTPUT, 0)) CASPAR_LOG(error) << print() << TEXT(" Failed to disable audio output."); } bool send(core::video_field field, core::const_frame frame) { // TODO - field alignment { std::lock_guard lock(exception_mutex_); if (exception_ != nullptr) { std::rethrow_exception(exception_); } } if (!frame) { return !abort_request_; } try { std::unique_lock lock(buffer_mutex_); copy_frame(frame); graph_->set_value("tick-time", static_cast(tick_timer_.elapsed() * format_desc_.fps * 0.5)); tick_timer_.restart(); } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } return !abort_request_; } void dma_present_thread_actual() { bvc_wrapper wait_b; wait_b.attach(config_.device_index); EBlueVideoChannel out_vid_channel = get_bluesdk_videochannel_from_streamid(config_.device_stream); wait_b.set_card_property32(DEFAULT_VIDEO_OUTPUT_CHANNEL, out_vid_channel); int frames_to_buffer = 1; unsigned long buffer_id = 0; unsigned long underrun = 0; while (!abort_request_) { blue_dma_buffer_ptr buf = nullptr; if (live_frames_.try_pop(buf) && (blue_->video_playback_allocate(&buffer_id, &underrun) == 0)) { // Send and display if (config_.embedded_audio) { // Do video first, then do hanc DMA... blue_->system_buffer_write(const_cast(buf->image_data()), static_cast(buf->image_size()), BlueImage_HANC_DMABuffer(buffer_id, BLUE_DATA_IMAGE), 0); blue_->system_buffer_write(buf->hanc_data(), static_cast(buf->hanc_size()), BlueImage_HANC_DMABuffer(buffer_id, BLUE_DATA_HANC), 0); if (blue_->video_playback_present(BlueBuffer_Image_HANC(buffer_id), 1, 0, 0)) CASPAR_LOG(warning) << print() << TEXT(" video_playback_present failed."); } else { blue_->system_buffer_write(const_cast(buf->image_data()), static_cast(buf->image_size()), BlueImage_DMABuffer(buffer_id, BLUE_DATA_IMAGE), 0); if (blue_->video_playback_present(BlueBuffer_Image(buffer_id), 1, 0, 0)) CASPAR_LOG(warning) << print() << TEXT(" video_playback_present failed."); } ++scheduled_frames_completed_; reserved_frames_.push(buf); } { // Sync and Update timer unsigned long n_field = 0; wait_b.wait_video_output_sync(UPD_FMT_FRAME, &n_field); graph_->set_value("sync-time", sync_timer_.elapsed() * format_desc_.fps * 0.5); sync_timer_.restart(); } if (frames_to_buffer > 0) { frames_to_buffer--; if (frames_to_buffer == 0) { if (blue_->video_playback_start(0, 0)) CASPAR_LOG(warning) << print() << TEXT("Error video playback start failed"); } } } wait_b.detach(); blue_->video_playback_stop(0, 0); } void copy_frame(core::const_frame frame) { int audio_samples_for_next_frame = blue_->get_num_audio_samples_for_frame(mode_, static_cast(audio_frames_filled_)); if (interlaced_) { if (!last_field_buf_) // field 1 { if (reserved_frames_.try_pop(last_field_buf_)) { // copy video data into holding buf void* dest = last_field_buf_->image_data(); if (frame.image_data(0).size()) { std::memcpy(dest, frame.image_data(0).begin(), frame.image_data(0).size()); } else std::memset(dest, 0, last_field_buf_->image_size()); // now copy Some of the Audio bytes that we need if (config_.embedded_audio) { auto audio_size = frame.audio_data().size() * 4; if (audio_size) { tmp_audio_buf_.insert( tmp_audio_buf_.end(), frame.audio_data().begin(), frame.audio_data().end()); } } } } else // field 2 { // we have already done the video... just grab the last bit of audio, encode and push to Q. if (config_.embedded_audio) { auto audio_size = frame.audio_data().size() * 4; if (audio_size) { tmp_audio_buf_.insert( tmp_audio_buf_.end(), frame.audio_data().begin(), frame.audio_data().end()); encode_hanc(reinterpret_cast(last_field_buf_->hanc_data()), reinterpret_cast(tmp_audio_buf_.data()), audio_samples_for_next_frame, static_cast(format_desc_.audio_channels)); ++audio_frames_filled_; } } // push to in use Q. live_frames_.push(last_field_buf_); last_field_buf_ = nullptr; tmp_audio_buf_.clear(); } } else { blue_dma_buffer_ptr buf = nullptr; // Copy to local buffers if (reserved_frames_.try_pop(buf)) { void* dest = buf->image_data(); if (frame.image_data(0).size()) { if (config_.uhd_mode == uhd_output_option::force_2si) { // Do the Square Division top 2si conversion here. blue_->convert_sq_to_2si( (int)frame.width(), (int)frame.height(), (void*)frame.image_data(0).begin(), dest); } else std::memcpy(dest, frame.image_data(0).begin(), frame.image_data(0).size()); } else std::memset(dest, 0, buf->image_size()); // encode and copy hanc data if (config_.embedded_audio) { if (frame.audio_data().size()) { encode_hanc(reinterpret_cast(buf->hanc_data()), (void*)frame.audio_data().data(), audio_samples_for_next_frame, static_cast(format_desc_.audio_channels)); ++audio_frames_filled_; } } // push to in use Q. live_frames_.push(buf); } } // Sync unsigned long n_field = 0; if (interlaced_) blue_->wait_video_output_sync(UPD_FMT_FIELD, &n_field); else blue_->wait_video_output_sync(UPD_FMT_FRAME, &n_field); graph_->set_value("sync-time", sync_timer_.elapsed() * format_desc_.fps * 0.5); sync_timer_.restart(); } void encode_hanc(BLUE_U32* hanc_data, void* audio_data, int audio_samples, int audio_nchannels) { const auto sample_type = AUDIO_CHANNEL_LITTLEENDIAN; auto emb_audio_flag = blue_emb_audio_enable | blue_emb_audio_group1_enable; if (audio_nchannels > 4) emb_audio_flag |= blue_emb_audio_group2_enable; if (audio_nchannels > 8) emb_audio_flag |= blue_emb_audio_group3_enable; if (audio_nchannels > 12) emb_audio_flag |= blue_emb_audio_group4_enable; hanc_stream_info_struct hanc_stream_info; std::memset(&hanc_stream_info, 0, sizeof(hanc_stream_info)); hanc_stream_info.AudioDBNArray[0] = -1; hanc_stream_info.AudioDBNArray[1] = -1; hanc_stream_info.AudioDBNArray[2] = -1; hanc_stream_info.AudioDBNArray[3] = -1; hanc_stream_info.hanc_data_ptr = hanc_data; hanc_stream_info.video_mode = get_bluefish_video_format(format_desc_.format); int card_type = CRD_INVALID; blue_->query_card_type(&card_type, config_.device_index); blue_->encode_hanc_frame( card_type, &hanc_stream_info, audio_data, audio_nchannels, audio_samples, sample_type, emb_audio_flag); } std::wstring print() const { return model_name_ + L" [" + std::to_wstring(channel_index_) + L"-" + std::to_wstring(config_.device_index) + L"Stream: " + std::to_wstring(static_cast(config_.device_stream)) + L"|" + format_desc_.name + L"]"; } int64_t presentation_delay_millis() const { return 0; } }; struct bluefish_consumer_proxy : public core::frame_consumer { const configuration config_; std::unique_ptr consumer_; core::video_format_desc format_desc_; executor executor_; public: explicit bluefish_consumer_proxy(const configuration& config) : config_(config) , executor_(L"bluefish_consumer[" + std::to_wstring(config.device_index) + L"]") { } ~bluefish_consumer_proxy() { executor_.invoke([=, this] { consumer_.reset(); }); } // frame_consumer void initialize(const core::video_format_desc& format_desc, const core::channel_info& channel_info, int port_index) override { format_desc_ = format_desc; executor_.invoke([=, this] { consumer_.reset(); consumer_.reset(new bluefish_consumer(config_, format_desc, channel_info.index)); }); } std::future send(core::video_field field, core::const_frame frame) override { return executor_.begin_invoke([=, this] { return consumer_->send(field, frame); }); } std::wstring print() const override { return consumer_ ? consumer_->print() : L"[bluefish_consumer]"; } std::wstring name() const override { return L"bluefish"; } int index() const override { return 400 + config_.device_index; } bool has_synchronization_clock() const override { return true; } core::monitor::state state() const override { core::monitor::state state; state["bluefish/index"] = config_.device_index; state["bluefish/stream"] = static_cast(config_.device_stream); state["bluefish/embedded_audio"] = config_.embedded_audio; return state; } }; spl::shared_ptr create_consumer(const std::vector& params, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info) { if (params.size() < 1 || !boost::iequals(params.at(0), L"BLUEFISH")) { return core::frame_consumer::empty(); } if (channel_info.depth != common::bit_depth::bit8) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Bluefish consumer only supports 8-bit color depth.")); configuration config; // const auto device_index = params.size() > 1 ? std::stoi(params.at(1)) : 1; // const auto device_stream = contains_param(L"SDI-STREAM", params); // const auto embedded_audio = contains_param(L"EMBEDDED_AUDIO", params); // const auto keyer_option = contains_param(L"KEYER", params); // const auto keyer_audio_option = contains_param(L"INTERNAL-KEYER-AUDIO-SOURCE", params); config.device_stream = bluefish_hardware_output_channel::channel_1; if (contains_param(L"1", params)) config.device_stream = bluefish_hardware_output_channel::channel_1; else if (contains_param(L"2", params)) config.device_stream = bluefish_hardware_output_channel::channel_2; else if (contains_param(L"3", params)) config.device_stream = bluefish_hardware_output_channel::channel_3; else if (contains_param(L"4", params)) config.device_stream = bluefish_hardware_output_channel::channel_4; else if (contains_param(L"5", params)) config.device_stream = bluefish_hardware_output_channel::channel_5; else if (contains_param(L"6", params)) config.device_stream = bluefish_hardware_output_channel::channel_6; else if (contains_param(L"7", params)) config.device_stream = bluefish_hardware_output_channel::channel_7; else if (contains_param(L"8", params)) config.device_stream = bluefish_hardware_output_channel::channel_8; config.hardware_keyer_value = hardware_downstream_keyer_mode::disable; if (contains_param(L"DISABLED", params)) config.hardware_keyer_value = hardware_downstream_keyer_mode::disable; else if (contains_param(L"EXTERNAL", params)) config.hardware_keyer_value = hardware_downstream_keyer_mode::external; else if (contains_param(L"INTERNAL", params)) config.hardware_keyer_value = hardware_downstream_keyer_mode::internal; config.keyer_audio_source = hardware_downstream_keyer_audio_source::SDIVideoInput; if (contains_param(L"SDIVIDEOINPUT", params)) config.keyer_audio_source = hardware_downstream_keyer_audio_source::SDIVideoInput; else if (contains_param(L"VIDEOOUTPUTCHANNEL", params)) config.keyer_audio_source = hardware_downstream_keyer_audio_source::VideoOutputChannel; config.embedded_audio = contains_param(L"EMBEDDED_AUDIO", params); config.watchdog_timeout = 2; return spl::make_shared(config); } spl::shared_ptr create_preconfigured_consumer(const boost::property_tree::wptree& ptree, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info) { configuration config; auto device_index = ptree.get(L"device", 1); config.device_index = device_index; if (channel_info.depth != common::bit_depth::bit8) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Bluefish consumer only supports 8-bit color depth.")); auto device_stream = ptree.get(L"sdi-stream", L"1"); if (device_stream == L"1") config.device_stream = bluefish_hardware_output_channel::channel_1; else if (device_stream == L"2") config.device_stream = bluefish_hardware_output_channel::channel_2; else if (device_stream == L"3") config.device_stream = bluefish_hardware_output_channel::channel_3; else if (device_stream == L"4") config.device_stream = bluefish_hardware_output_channel::channel_4; else if (device_stream == L"5") config.device_stream = bluefish_hardware_output_channel::channel_5; else if (device_stream == L"6") config.device_stream = bluefish_hardware_output_channel::channel_6; else if (device_stream == L"7") config.device_stream = bluefish_hardware_output_channel::channel_7; else if (device_stream == L"8") config.device_stream = bluefish_hardware_output_channel::channel_8; auto embedded_audio = ptree.get(L"embedded-audio", false); config.embedded_audio = embedded_audio; auto hardware_keyer_value = ptree.get(L"keyer", L"disabled"); if (hardware_keyer_value == L"disabled") config.hardware_keyer_value = hardware_downstream_keyer_mode::disable; else if (hardware_keyer_value == L"external") config.hardware_keyer_value = hardware_downstream_keyer_mode::external; else if (hardware_keyer_value == L"internal") config.hardware_keyer_value = hardware_downstream_keyer_mode::internal; auto keyer_audio_source_value = ptree.get(L"internal-keyer-audio-source", L"videooutputchannel"); if (keyer_audio_source_value == L"videooutputchannel") config.keyer_audio_source = hardware_downstream_keyer_audio_source::VideoOutputChannel; else if (keyer_audio_source_value == L"sdivideoinput") config.keyer_audio_source = hardware_downstream_keyer_audio_source::SDIVideoInput; auto watchdog_timeout = ptree.get(L"watchdog", 2); config.watchdog_timeout = watchdog_timeout; auto uhd_mode = ptree.get(L"uhd-mode", 0); config.uhd_mode = uhd_output_option::disable_BVC_MultiLink; if (uhd_mode == 1) config.uhd_mode = uhd_output_option::auto_uhd; else if (uhd_mode == 2) config.uhd_mode = uhd_output_option::force_2si; else if (uhd_mode == 3) config.uhd_mode = uhd_output_option::force_square_division; return spl::make_shared(config); } }} // namespace caspar::bluefish ================================================ FILE: src/modules/bluefish/consumer/bluefish_consumer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com * James Wise, james.wise@bluefish444.com */ #pragma once #include #include #include #include #include #include namespace caspar { namespace bluefish { spl::shared_ptr create_consumer(const std::vector& params, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info); spl::shared_ptr create_preconfigured_consumer(const boost::property_tree::wptree& ptree, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info); }} // namespace caspar::bluefish ================================================ FILE: src/modules/bluefish/interop/BlueDriver_p.h ================================================ /*/////////////////////////////////////////////////////////////////////////// // File: BlueDriver_p.h // Author: Tim Bragulla // // Description: Legacy definitions for usage with legacy Bluefish APIs // (BlueVelvet and BlueVelvetC) // // (C) Copyright 2017 by Bluefish Technologies Pty Ltd. All Rights Reserved. ///////////////////////////////////////////////////////////////////////////*/ #ifndef HG_BLUE_DRIVER_P_LEGACY_HG #define HG_BLUE_DRIVER_P_LEGACY_HG #include "BlueTypes.h" /*/////////////////////////////////////////////////////////////////////////// // L E G A C Y D E C L A R A T I O N S - BEGIN ///////////////////////////////////////////////////////////////////////////*/ #define BLUE_LITTLE_ENDIAN 0 #define BLUE_BIG_ENDIAN 1 #define BLUE_CARD_BUFFER_TYPE_OFFSET (12) #define BLUE_DMA_DATA_TYPE_OFFSET (16) #define GetDMACardBufferId(value) ( value & 0x00000FFF) #define GetCardBufferType(value) ((value & 0x0000F000) >> BLUE_CARD_BUFFER_TYPE_OFFSET) #define GetDMADataType(value) ((value & 0x000F0000) >> BLUE_DMA_DATA_TYPE_OFFSET) /* FLAGS FOR DMA FUNCTION CALLS */ #define Blue_DMABuffer(CardBufferType, BufferId, DataType) ( (((ULONG)DataType&0xF)<<(ULONG)BLUE_DMA_DATA_TYPE_OFFSET)| \ ( CardBufferType<<(ULONG)BLUE_CARD_BUFFER_TYPE_OFFSET) | \ ( ((ULONG)BufferId&0xFFF)) | 0) #define BlueImage_VBI_DMABuffer(BufferId, DataType) ( (((ULONG)DataType&0xF)<<(ULONG)BLUE_DMA_DATA_TYPE_OFFSET)| \ ( BLUE_CARDBUFFER_IMAGE_VBI<<(ULONG)BLUE_CARD_BUFFER_TYPE_OFFSET) | \ ( ((ULONG)BufferId&0xFFF)) | 0) #define BlueImage_DMABuffer(BufferId, DataType) ( (((ULONG)DataType&0xF)<<(ULONG)BLUE_DMA_DATA_TYPE_OFFSET)| \ ( BLUE_CARDBUFFER_IMAGE<<(ULONG)BLUE_CARD_BUFFER_TYPE_OFFSET) | \ ( ((ULONG)BufferId&0xFFF)) | 0) #define BlueImage_VBI_HANC_DMABuffer(BufferId, DataType) ( (((ULONG)DataType&0xF)<<(ULONG)BLUE_DMA_DATA_TYPE_OFFSET)| \ ( BLUE_CARDBUFFER_IMAGE_VBI_HANC<<(ULONG)BLUE_CARD_BUFFER_TYPE_OFFSET) | \ ( ((ULONG)BufferId&0xFFF)) | 0) #define BlueImage_HANC_DMABuffer(BufferId, DataType) ( (((ULONG)DataType&0xF)<<(ULONG)BLUE_DMA_DATA_TYPE_OFFSET)| \ ( BLUE_CARDBUFFER_IMAGE_HANC<<(ULONG)BLUE_CARD_BUFFER_TYPE_OFFSET) | \ ( ((ULONG)BufferId&0xFFF)) | 0) /* FLAGS FOR CAPTURE AND PLAYBACK FUNCTION CALLS */ #define BlueBuffer(CardBufferType,BufferId) (((CardBufferType)<>8) #define SET_ANALOG_AUDIO_ROUTINGCHANNEL(left,right) (((right & 0xFF)<<8)|(left & 0xFF)) #define SET_AUDIO_OUTPUT_ROUTINGCHANNEL(output_type,src_channel_id,_output_channel_id) ((1<<31)|((output_type&3)<<29)|((_output_channel_id &0x3F)<<23)|((src_channel_id & 0x3F)<<16)) #define GET_AUDIO_OUTPUT_SRC_CHANNEL_ROUTING(value) ((value>>16) & 0x3F) #define GET_AUDIO_OUTPUT_CHANNEL_ROUTING(value) ((value>>23) & 0x3F) #define GET_AUDIO_OUTPUT_TYPE_ROUTING(value) ((value & 0x60000000)>>29) /* AES_OUTPUT_ROUTING */ #define SET_AES_OUTPUT_ROUTING(OutputVideoChannel, AudioSrcChannel, AudioDstChannel) (((OutputVideoChannel & 0xFF) << 16) | ((AudioDstChannel & 0xFF) << 8) | (AudioSrcChannel & 0xFF)) #define GET_AES_OUTPUT_ROUTING_STREAM(value) ((value >> 16) & 0xFF) #define GET_AES_OUTPUT_ROUTING_DST_CHANNEL(value) ((value >> 8) & 0xFF) #define GET_AES_OUTPUT_ROUTING_SRC_CHANNEL(value) (value & 0xFF) /* MUTE_AES_OUTPUT_CHANNEL */ #define SET_MUTE_AES_OUTPUT_CHANNEL(AudioDstChannel, Mute) (((Mute & 0x1) << 31) | AudioDstChannel & 0xFF) #define AUDIO_INPUT_SOURCE_SELECT_FLAG (1<<16) #define AUDIO_INPUT_SOURCE_SELECT(SynchCount,AudioInputSource) (AUDIO_INPUT_SOURCE_SELECT_FLAG|(SynchCount)|(AudioInputSource<<17)) #define EPOCH_WATCHDOG_TIMER_SET_MACRO(prop, value) (prop|(value &0xFFFF)) #define EPOCH_WATCHDOG_TIMER_QUERY_MACRO(prop) (prop) #define EPOCH_WATCHDOG_TIMER_GET_VALUE_MACRO(value) (value&0xFFFF) #define EPOCH_RS422_PORT_FLAG_SET_MACRO(portid,value) ((portid&0x3)|(value<<3)) #define EPOCH_RS422_PORT_FLAG_GET_FLAG_MACRO(value) ((value>>3)&0xFFFF) #define EPOCH_RS422_PORT_FLAG_GET_PORTID_MACRO(value) (value&0x3) #define RS422_SERIALPORT_FLAG(timeout, port, RxFlushBuffer) (((unsigned long)(timeout)<<16)|(port & 0x3) | (RxFlushBuffer<<15)) #define RS422_SERIALPORT_FLAG2(timeout, port, RxFlushBuffer, RXIntWaitReturnOnAvailData) (((unsigned long)(timeout)<<16)|(port & 0x3) | (RxFlushBuffer<<15)|(RXIntWaitReturnOnAvailData<<14)) /* Epoch scaler not supported */ #define SET_EPOCH_SCALER_MODE(scaler_id,video_mode) ((scaler_id << 16) | video_mode) #define GET_EPOCH_SCALER_MODE(value) (value & 0xFFFF) #define GET_EPOCH_SCALER_ID(value) ((value & 0xFFFF0000) >> 16) /* use these macros for retrieving the temp and fan speed on epoch range of cards. */ #define EPOCH_CORE_TEMP(value) (value & 0xFF) #define EPOCH_BOARD_TEMP(value) ((value>>16) & 0xFF) #define EPOCH_FAN_SPEED(value) ((value>>24) & 0xFF) /* Use these macro for doing the MR2 routing on epoch range of cards. MR2 routing can be controlled using the property MR_ROUTING. */ #define EPOCH_SET_ROUTING(routing_src,routing_dest,data_link_type) ((routing_src & 0xFF) | ((routing_dest & 0xFF)<<8) | ((data_link_type&0xFFFF)<<16)) #define EPOCH_ROUTING_GET_SRC_DATA(value) (value & 0xFF) #define EPOCH_ROUTING_GET_DEST_DATA(value) ((value>>8) & 0xFF) #define EPOCH_ROUTING_GET_LINK_TYPE_DATA(value) ((value>>16) & 0xFFFF) /* MACROS for card property BYPASS_RELAY_ENABLE */ #define BLUE_ENABLE_BYPASS_RELAY(RelayNumber) ((RelayNumber << 16) | 1) #define BLUE_DISABLE_BYPASS_RELAY(RelayNumber) ((RelayNumber << 16)) #define BLUE_GET_BYPASS_RELAY_SETTING(RelayNumber) (RelayNumber << 16) #define GPIO_TX_PORT_A (1) #define GPIO_TX_PORT_B (2) #define EPOCH_GPIO_TX(port,value) (port<<16|value) /* if want to set each of the GPO ports individually you should use this macro. without the macro it would set both the GPO ports on the card */ #define VPEnableFieldCountTrigger ((BLUE_U64)1<<63) #define VPTriggerGetFieldCount(value) ((BLUE_U64)value & 0xFFFFFFFF) #define VIDEO_CAPS_INPUT_SDI (0x00000001) /**< Capable of input of SDI Video */ #define VIDEO_CAPS_OUTPUT_SDI (0x00000002) /**< Capable of output of SDI Video */ #define VIDEO_CAPS_INPUT_COMP (0x00000004) /**< Capable of capturing Composite Video input */ #define VIDEO_CAPS_OUTPUT_COMP (0x00000008) /**< Capable of capturing Composite Video output */ #define VIDEO_CAPS_INPUT_YUV (0x00000010) /**< Capable of capturing Component Video input */ #define VIDEO_CAPS_OUTPUT_YUV (0x00000020) /**< Capable of capturing Component Video output */ #define VIDEO_CAPS_INPUT_SVIDEO (0x00000040) /**< Capable of capturing SVideo input */ #define VIDEO_CAPS_OUTPUT_SVIDEO (0x00000080) /**< Capable of capturing SVideo output */ #define VIDEO_CAPS_GENLOCK (0x00000100) /**< Able to adjust Vert & Horiz timing */ #define VIDEO_CAPS_VERTICAL_FLIP (0x00000200) /**< Able to flip rasterisation */ #define VIDEO_CAPS_KEY_OUTPUT (0x00000400) /**< Video keying output capable */ #define VIDEO_CAPS_4444_OUTPUT (0x00000800) /**< Capable of outputting 4444 (dual link) */ #define VIDEO_CAPS_DUALLINK_INPUT (0x00001000) /**< Dual Link input */ #define VIDEO_CAPS_INTERNAL_KEYER (0x00002000) /**< Has got an internal Keyer */ #define VIDEO_CAPS_RGB_COLORSPACE_SDI_CONN (0x00004000) /**< Support RGB colorspace in on an SDI connector */ #define VIDEO_CAPS_HAS_PILLOR_BOX (0x00008000) /**< Has got support for pillor box */ #define VIDEO_CAPS_OUTPUT_RGB (0x00010000) /**< Has Analog RGB output connector */ #define VIDEO_CAPS_SCALED_RGB (0x00020000) /**< Can scale RGB colour space */ #define AUDIO_CAPS_PLAYBACK (0x00040000) /**< Has got audio output */ #define AUDIO_CAPS_CAPTURE (0x00080000) #define VIDEO_CAPS_DOWNCONVERTER (0x00100000) #define VIDEO_CAPS_DUALOUTPUT_422_IND_STREAM (0x00200000) /**< Specifies whether the card supports Dual Independent 422 output streams */ #define VIDEO_CAPS_DUALINPUT_422_IND_STREAM (0x00400000) /**< Specifies whether the card supports Dual Independent 422 input streams */ #define VIDEO_CAPS_VBI_OUTPUT (0x00800000) /**< Specifies whether the card supports VBI output */ #define VIDEO_CAPS_VBI_INPUT (0x04000000) /**< Specifies whether the card supports VBI input */ #define VIDEO_CAPS_HANC_OUTPUT (0x02000000) #define VIDEO_CAPS_HANC_INPUT (0x04000000) #define ANALOG_CHANNEL_0 MONO_CHANNEL_9 #define ANALOG_CHANNEL_1 MONO_CHANNEL_10 #define ANALOG_AUDIO_PAIR (ANALOG_CHANNEL_0 | ANALOG_CHANNEL_1) #define STEREO_PAIR_1 (MONO_CHANNEL_1 | MONO_CHANNEL_2) #define STEREO_PAIR_2 (MONO_CHANNEL_3 | MONO_CHANNEL_4) #define STEREO_PAIR_3 (MONO_CHANNEL_5 | MONO_CHANNEL_6) #define STEREO_PAIR_4 (MONO_CHANNEL_7 | MONO_CHANNEL_8) #define IGNORE_SYNC_WAIT_TIMEOUT_VALUE (0xFFFFFFFF) #define AUDIO_INPUT_SOURCE_EMB 0 #define AUDIO_INPUT_SOURCE_AES 1 /*/////////////////////////////////////////////////////////////////////////////////// //H E L P E R M A C R O S ///////////////////////////////////////////////////////////////////////////////////*/ /* the following macros are used with card property OVERRIDE_OUTPUT_VPID_DEFAULT */ #define OUTPUT_VPID_SET_ENABLED(value) ((value) |= 0x8000000000000000ULL ) #define OUTPUT_VPID_SET_DISABLED(value) ((value) &= ~(0x8000000000000000ULL)) #define OUTPUT_VPID_SET_BYTES(value, byte1, byte2, byte3, byte4) (value = ((value & 0xFFFFFFFF00000000ULL) | ((((BLUE_U64)(byte4 & 0xFF)) << 24) | ((byte3 & 0xFF) << 16) | ((byte2 & 0xFF) << 8) | (byte1 & 0xFF)))) #define OUTPUT_VPID_SET_SDI_CONNECTOR(value, outputconnector) (value = ((value & 0xFFFF0000FFFFFFFFULL) | ((outputconnector & 0xFFFFULL) << 32))) #define OUTPUT_VPID_GET_ENABLED(value) ((value) & 0x8000000000000000ULL ) #define OUTPUT_VPID_GET_SDI_CONNECTOR(value) ((value >> 32) & 0xFFFFULL) #define OUTPUT_VPID_GET_VPID_BYTE1(value) (value & 0xFFLL) #define OUTPUT_VPID_GET_VPID_BYTE2(value) ((value >> 8) & 0xFFULL) #define OUTPUT_VPID_GET_VPID_BYTE3(value) ((value >> 16) & 0xFFULL) #define OUTPUT_VPID_GET_VPID_BYTE4(value) ((value >> 24) & 0xFFULL) /* the following macros are used with card property INTERLOCK_REFERENCE */ #define INTERLOCK_REFERENCE_GET_OUTPUT_ENABLED(value) ((value) & 0x01) #define INTERLOCK_REFERENCE_GET_INPUT_DETECTED(value) ((value >> 1) & 0x01) #define INTERLOCK_REFERENCE_GET_SLAVE_POSITION(value) ((value >> 2) & 0x1F) /* the following macros are used with card property CARD_FEATURE_STREAM_INFO */ #define CARD_FEATURE_GET_SDI_OUTPUT_STREAM_COUNT(value) ((value) & 0xF) #define CARD_FEATURE_GET_SDI_INPUT_STREAM_COUNT(value) ((value >> 4) & 0xF) #define CARD_FEATURE_GET_ASI_OUTPUT_STREAM_COUNT(value) ((value >> 8) & 0xF) #define CARD_FEATURE_GET_ASI_INPUT_STREAM_COUNT(value) ((value >> 12) & 0xF) #define CARD_FEATURE_GET_3G_SUPPORT(value) ((value >> 16) & 0xF) /* the following macros are used with card property CARD_FEATURE_CONNECTOR_INFO */ #define CARD_FEATURE_GET_SDI_OUTPUT_CONNECTOR_COUNT(value) ((value) & 0xF) #define CARD_FEATURE_GET_SDI_INPUT_CONNECTOR_COUNT(value) ((value >> 4) & 0xF) #define CARD_FEATURE_GET_AES_CONNECTOR_SUPPORT(value) ((value >> 8) & 0x1) #define CARD_FEATURE_GET_RS422_CONNECTOR_SUPPORT(value) ((value >> 9) & 0x1) #define CARD_FEATURE_GET_LTC_CONNECTOR_SUPPORT(value) ((value >> 10) & 0x1) #define CARD_FEATURE_GET_GPIO_CONNECTOR_SUPPORT(value) ((value >> 11) & 0x1) #define CARD_FEATURE_GET_HDMI_CONNECTOR_SUPPORT(value) ((value >> 12) & 0x1) #define CARD_FEATURE_GET_HDMI_OUTPUT_CONNECTOR_SUPPORT(value) ((value >> 12) & 0x1) #define CARD_FEATURE_GET_HDMI_INPUT_CONNECTOR_SUPPORT(value) ((value >> 13) & 0x1) /* the following macros are used with card property VIDEO_ONBOARD_KEYER */ #define VIDEO_ONBOARD_KEYER_GET_STATUS_ENABLED(value) ((value) & 0x1) #define VIDEO_ONBOARD_KEYER_GET_STATUS_OVER_BLACK(value) ((value) & 0x2) #define VIDEO_ONBOARD_KEYER_GET_STATUS_USE_INPUT_ANCILLARY(value) ((value) & 0x4) #define VIDEO_ONBOARD_KEYER_GET_STATUS_DATA_IS_PREMULTIPLIED(value) ((value) & 0x8) #define VIDEO_ONBOARD_KEYER_SET_STATUS_ENABLED(value) (value |= 0x1) #define VIDEO_ONBOARD_KEYER_SET_STATUS_DISABLED(value) (value &= ~(0x1)) #define VIDEO_ONBOARD_KEYER_SET_STATUS_ENABLE_OVER_BLACK(value) (value |= 0x2) #define VIDEO_ONBOARD_KEYER_SET_STATUS_DISABLE_OVER_BLACK(value) (value &= ~(0x2)) #define VIDEO_ONBOARD_KEYER_SET_STATUS_USE_INPUT_ANCILLARY(value) (value |= 0x4) /* only use this setting when keying over valid input (input must also match output video mode), includes HANC and VANC */ #define VIDEO_ONBOARD_KEYER_SET_STATUS_USE_OUTPUT_ANCILLARY(value) (value &= ~(0x4)) #define VIDEO_ONBOARD_KEYER_SET_STATUS_DATA_IS_PREMULTIPLIED(value) (value |= 0x8) #define VIDEO_ONBOARD_KEYER_SET_STATUS_DATA_IS_NOT_PREMULTIPLIED(value) (value &= ~(0x8)) /* the following macros are used with card property EPOCH_HANC_INPUT_FLAGS */ #define HANC_FLAGS_IS_ARRI_RECORD_FLAG_SET(value) ((value) & 0x1) /* the following macros are used with card property EPOCH_RAW_VIDEO_INPUT_TYPE */ #define RAW_VIDEO_INPUT_TYPE_IS_10BIT (0x01) #define RAW_VIDEO_INPUT_TYPE_IS_12BIT (0x02) #define RAW_VIDEO_INPUT_TYPE_IS_WEISSCAM (0x10) #define RAW_VIDEO_INPUT_TYPE_IS_ARRI (0x20) /* the following macros are used with card property EPOCH_PCIE_CONFIG_INFO */ #define PCIE_CONFIG_INFO_GET_MAX_PAYLOAD_SIZE(value) ((value) & 0xFFFF) #define PCIE_CONFIG_INFO_GET_MAX_READREQUEST_SIZE(value) ((value >> 16) & 0xFFFF) /* the following macros are used with card property MISC_CONNECTOR_OUT */ #define MISC_CONNECTOR_OUT_SET_CONNECTOR_ID(value, ID) (value = (ID & 0x0000FFFF) << 16) #define MISC_CONNECTOR_OUT_SET_SIGNAL_TYPE_LTC(value) (value = (value & 0xFFFF0000) | 0x0002) #define MISC_CONNECTOR_OUT_SET_SIGNAL_TYPE_INTERLOCK(value) (value = (value & 0xFFFF0000) | 0x0004) #define MISC_CONNECTOR_OUT_SET_SIGNAL_TYPE_SPG(value) (value = (value & 0xFFFF0000) | 0x0008) #define MISC_CONNECTOR_OUT_IS_SIGNAL_TYPE_LTC(value) (value & 0x0002) #define MISC_CONNECTOR_OUT_IS_SIGNAL_TYPE_INTERLOCK(value) (value & 0x0004) #define MISC_CONNECTOR_OUT_IS_SIGNAL_TYPE_SPG(value) (value & 0x0008) /* the following macros are used with card property MISC_CONNECTOR_IN */ #define MISC_CONNECTOR_IN_SET_CONNECTOR_ID(value, ID) (value = (ID & 0xFFFF) << 16) #define MISC_CONNECTOR_IN_IS_SIGNAL_DETECTED(value) (value & 0x0001) #define MISC_CONNECTOR_IN_IS_SIGNAL_TYPE_LTC(value) (value & 0x0002) #define MISC_CONNECTOR_IN_IS_SIGNAL_TYPE_INTERLOCK(value) (value & 0x0004) typedef enum _EBlueCardProperty { VIDEO_DUAL_LINK_OUTPUT = 0, /**< Use this property to enable/diable cards dual link output property */ VIDEO_DUAL_LINK_INPUT = 1, /**< Use this property to enable/diable cards dual link input property */ VIDEO_DUAL_LINK_OUTPUT_SIGNAL_FORMAT_TYPE = 2, /**< Use this property to select signal format type that should be used when dual link output is enabled. Possible values this property can accept is defined in the enumerator EDualLinkSignalFormatType */ VIDEO_DUAL_LINK_INPUT_SIGNAL_FORMAT_TYPE = 3, /**< Use this property to select signal format type that should be used when dual link input is enabled. Possible values this property can accept is defined in the enumerator EDualLinkSignalFormatType */ VIDEO_OUTPUT_SIGNAL_COLOR_SPACE = 4, /**< Use this property to select color space of the signal when dual link output is set to use 4:4:4/4:4:4:4 signal format type. Possible values this property can accept is defined in the enumerator EConnectorSignalColorSpace */ VIDEO_INPUT_SIGNAL_COLOR_SPACE = 5, /**< Use this property to select color space of the signal when dual link input is set to use 4:4:4/4:4:4:4 signal format type. Possible values this property can accept is defined in the enumerator EConnectorSignalColorSpace */ VIDEO_MEMORY_FORMAT = 6, /**< Use this property to set the pixel format that should be used by video output channels. Possible values this property can accept is defined in the enumerator EMemoryFormat*/ VIDEO_MODE = 7, /**< Use this property to set the video mode that should be used by video output channels. Possible values this property can accept is defined in the enumerator EVideoMode*/ VIDEO_UPDATE_TYPE = 8, /**< Use this property to set the framestore update type that should be used by video output channels. Card can update video framestore at field/frame rate. Possible values this property can accept is defined in the enumerator EUpdateMethod */ VIDEO_ENGINE = 9, VIDEO_IMAGE_ORIENTATION = 10, /**< Use this property to set the image orientation of the video output framestore. This property must be set before frame is transferred to on card memory using DMA transfer functions(system_buffer_write_async). It is recommended to use vertical flipped image orientation only on RGB pixel formats. Possible values this property can accept is defined in the enumerator EImageOrientation */ VIDEO_USER_DEFINED_COLOR_MATRIX = 11, VIDEO_PREDEFINED_COLOR_MATRIX = 12, /* EPreDefinedColorSpaceMatrix */ VIDEO_RGB_DATA_RANGE = 13, /**< Use this property to set the data range of RGB pixel format, user can specify whether the RGB data is in either SMPTE or CGR range. Based on this information driver is decide which color matrix should be used. Possible values this property can accept is defined in the enumerator ERGBDataRange For SD cards this property will set the input and the output to the specified value. For Epoch/Create/SuperNova cards this property will only set the output to the specified value. For setting the input on Epoch/Create/SuperNova cards see EPOCH_VIDEO_INPUT_RGB_DATA_RANGE */ VIDEO_KEY_OVER_BLACK = 14, /**< DEPRECATED */ VIDEO_KEY_OVER_INPUT_SIGNAL = 15, /**< DEPRECATED */ VIDEO_SET_DOWN_CONVERTER_VIDEO_MODE = 16, /**< DEPRECATED */ VIDEO_LETTER_BOX = 17, /**< DEPRECATED */ VIDEO_PILLOR_BOX_LEFT = 18, /**< DEPRECATED */ VIDEO_PILLOR_BOX_RIGHT = 19, /**< DEPRECATED */ VIDEO_PILLOR_BOX_TOP = 20, /**< DEPRECATED */ VIDEO_PILLOR_BOX_BOTTOM = 21, /**< DEPRECATED */ VIDEO_SAFE_PICTURE = 22, /**< DEPRECATED */ VIDEO_SAFE_TITLE = 23, /**< DEPRECATED */ VIDEO_INPUT_SIGNAL_VIDEO_MODE = 24, /**< Use this property to retrieve the video input signal information on the default video input channel used by that SDK object. When calling SetCardProperty with a valid video mode on this property, the SDK will will use this video mode "Hint" if the card buffers are set up despite there being a valid input signal; the card buffers will be set up when calling one of these card properties: VIDEO_INPUT_MEMORY_FORMAT VIDEO_INPUT_UPDATE_TYPE VIDEO_INPUT_ENGINE Note: QueryCardProperty(VIDEO_INPUT_SIGNAL_VIDEO_MODE) will still return the actual video input signal */ VIDEO_COLOR_MATRIX_MODE = 25, VIDEO_OUTPUT_MAIN_LUT = 26, /**< DEPRECATED */ VIDEO_OUTPUT_AUX_LUT = 27, /**< DEPRECATED */ VIDEO_LTC = 28, /**< DEPRECATED; To retreive/ outputting LTC information you can use the HANC decoding and encoding functions. */ VIDEO_GPIO = 29, VIDEO_PLAYBACK_FIFO_STATUS = 30, /**< This property can be used to retrieve how many frames are buffered in the video playback fifo. */ RS422_RX_BUFFER_LENGTH = 31, RS422_RX_BUFFER_FLUSH = 32, VIDEO_INPUT_UPDATE_TYPE = 33, /**< Use this property to set the framestore update type that should be used by video input channels. Card can update video framestore at field/frame rate. Possible values this property can accept is defined in the enumerator EUpdateMethod */ VIDEO_INPUT_MEMORY_FORMAT = 34, /**< Use this property to set the pixel format that should be used by video input channels when it is capturing a frame from video input source. Possible values this property can accept is defined in the enumerator EMemoryFormat*/ VIDEO_GENLOCK_SIGNAL = 35, /**< Use this property to retrieve video signal of the reference source that is used by the card. This can also be used to select the reference signal source that should be used. See application note AN004_Genlock.pdf for more information */ AUDIO_OUTPUT_PROP = 36, /**< this can be used to route PCM audio data onto respective audio output connectors. */ AUDIO_CHANNEL_ROUTING = AUDIO_OUTPUT_PROP, AUDIO_INPUT_PROP = 37, /**< Use this property to select audio input source that should be used when doing an audio capture. Possible values this property can accept is defined in the enumerator Blue_Audio_Connector_Type. */ VIDEO_ENABLE_LETTERBOX = 38, /**< DEPRECATED */ VIDEO_DUALLINK_OUTPUT_INVERT_KEY_COLOR = 39, /**< this property is deprecated and no longer supported on epoch/create range of cards. */ VIDEO_DUALLINK_OUTPUT_DEFAULT_KEY_COLOR = 40, /**< this property is deprecated and no longer supported on epoch/create range of cards. */ VIDEO_BLACKGENERATOR = 41, /**< Use this property to control the black generator on the video output channel. */ VIDEO_INPUTFRAMESTORE_IMAGE_ORIENTATION = 42, VIDEO_INPUT_SOURCE_SELECTION = 43, /**< The video input source that should be used by the SDK default video input channel can be configured using this property. Possible values this property can accept is defined in the enumerator EBlueConnectorIdentifier. */ DEFAULT_VIDEO_OUTPUT_CHANNEL = 44, DEFAULT_VIDEO_INPUT_CHANNEL = 45, VIDEO_REFERENCE_SIGNAL_TIMING = 46, EMBEDEDDED_AUDIO_OUTPUT = 47, /**< the embedded audio output property can be configured using this property. Possible values this property can accept is defined in the enumerator EBlueEmbAudioOutput. */ EMBEDDED_AUDIO_OUTPUT = EMBEDEDDED_AUDIO_OUTPUT, VIDEO_PLAYBACK_FIFO_FREE_STATUS = 48, /**< this will return the number of free buffer in the fifo. If the video engine is framestore this will give you the number of buffers that the framestore mode can you use with that video output channel. */ VIDEO_IMAGE_WIDTH = 49, /**< selective output DMA: see application note AN008_SelectiveDMA.pdf for more details */ VIDEO_IMAGE_HEIGHT = 50, /**< selective output DMA: see application note AN008_SelectiveDMA.pdf for more details */ VIDEO_SELECTIVE_OUTPUT_DMA_DST_PITCH = VIDEO_IMAGE_WIDTH, /* pitch (bytes per line) of destination buffer (card memory) */ VIDEO_SELECTIVE_OUTPUT_DMA_SRC_LINES = VIDEO_IMAGE_HEIGHT, /* number of video lines to extract from source image (system memory)*/ VIDEO_SCALER_MODE = 51, AVAIL_AUDIO_INPUT_SAMPLE_COUNT = 52, /* DEPRECATED */ VIDEO_PLAYBACK_FIFO_ENGINE_STATUS = 53, /**< this will return the playback fifo status. The values returned by this property are defined in the enumerator BlueVideoFifoStatus. */ VIDEO_CAPTURE_FIFO_ENGINE_STATUS = 54, /**< this will return the capture fifo status. The values returned by this property are defined in the enumerator BlueVideoFifoStatus. */ VIDEO_2K_1556_PANSCAN = 55, /**< DEPRECATED */ VIDEO_OUTPUT_ENGINE = 56, /**< Use this property to set the video engine of the video output channels. Possible values this property can accept is defined in the enumerator EEngineMode */ VIDEO_INPUT_ENGINE = 57, /**< Use this property to set the video engine of the video input channels. Possible values this property can accept is defined in the enumerator EEngineMode */ BYPASS_RELAY_A_ENABLE = 58, /**< use this property to control the bypass relay on SDI A output. */ BYPASS_RELAY_B_ENABLE = 59, /**< use this property to control the bypass relay on SDI B output. */ VIDEO_PREMULTIPLIER = 60, VIDEO_PLAYBACK_START_TRIGGER_POINT = 61, /**< Using this property you can instruct the driver to start the video playback fifo on a particular video output field count. Normally video playback fifo is started on the next video interrupt after the video_playback_start call. */ GENLOCK_TIMING = 62, VIDEO_IMAGE_PITCH = 63, /**< selective output DMA: see application note AN008_SelectiveDMA.pdf for more details */ VIDEO_IMAGE_OFFSET = 64, /**< currently not used; selective output DMA: see application note AN008_SelectiveDMA.pdf for more details */ VIDEO_SELECTIVE_OUTPUT_DMA_SRC_PITCH = VIDEO_IMAGE_PITCH, /* pitch (bytes per line) of source buffer (system memory) */ VIDEO_INPUT_IMAGE_WIDTH = 65, /**< selective input DMA: see application note AN008_SelectiveDMA.pdf for more details */ VIDEO_INPUT_IMAGE_HEIGHT = 66, /**< selective input DMA: see application note AN008_SelectiveDMA.pdf for more details */ VIDEO_INPUT_IMAGE_PITCH = 67, /**< selective input DMA: see application note AN008_SelectiveDMA.pdf for more details */ VIDEO_INPUT_IMAGE_OFFSET = 68, /**< currently not used; selective input DMA: see application note AN008_SelectiveDMA.pdf for more details */ VIDEO_SELECTIVE_INPUT_DMA_SRC_PITCH = VIDEO_INPUT_IMAGE_WIDTH, /* pitch (bytes per line) of source buffer (card memory) */ VIDEO_SELECTIVE_INPUT_DMA_SRC_LINES = VIDEO_INPUT_IMAGE_HEIGHT, /* number of video lines to extract from source image (card memory) */ VIDEO_SELECTIVE_INPUT_DMA_DST_PITCH = VIDEO_INPUT_IMAGE_PITCH, /* pitch (bytes per line) of destination buffer (system memory) */ TIMECODE_RP188 = 69, /**< this property is deprecated and no longer supported on epoch/create range of cards.*/ BOARD_TEMPERATURE = 70, /** Use the macro's EPOCH_CORE_TEMP ,EPOCH_BOARD_TEMP and EPOCH_FAN_SPEED to retireive the respective values from the property. */ MR2_ROUTING = 71, /**< Use this property to control the MR2 functionality on epoch range of cards. Use the following macro with this property.
1) EPOCH_SET_ROUTING --> for setting the source, destination and link type of the routing connection, 2) EPOCH_ROUTING_GET_SRC_DATA --> for getting the routing source. The possible source and destination elements supported by the routing matrix are defined in the enumerator EEpochRoutingElements. */ SAVEAS_POWERUP_SETTINGS = 72, /**< DEPRECATED */ VIDEO_CAPTURE_AVAIL_BUFFER_COUNT = 73, /**< This property will return the number of captured frame avail in the fifo at present. If the video engine is framestore this will give you the number of buffers that the framestore mode can you use with that video input channel */ EPOCH_APP_WATCHDOG_TIMER = 74,/**< Use this property to control the application watchdog timer functionality. Possible values this property can accept is defined in the enumerator enum_blue_app_watchdog_timer_prop. */ EPOCH_RESET_VIDEO_INPUT_FIELDCOUNT = 75, /**< Use this property to reset the field count on both the video channels of the card. You can pass the value that should be used as starting fieldcount after the reset. This property can be used to keep track sync between left and right signal when you are capturing in stereoscopic mode. */ EPOCH_RS422_PORT_FLAGS = 76, /**< Use this property to set the master/slave property of the RS422 ports. Possible values this property can accept is defined in the enumerator enum_blue_rs422_port_flags. */ EPOCH_DVB_ASI_INPUT_TIMEOUT = 77, /**< Current DVB ASI input firmware does not support this property in hardware, this is a future addition. Use this property to set the timeout of the DVB ASI input stream. timeout is specified in milliseconds.If hardware did not get the required no of packets( specified using EPOCH_DVB_ASI_INPUT_LATENCY_PACKET_COUNT) within the period specified in the timeout, hardware would generate a video input interrupt and it would be safe to read the dvb asi packet from the card. */ EPOCH_DVB_ASI_INPUT_PACKING_FORMAT = 78, /**< Use this property to specify the packing method that should be used when capturing DVB ASI packets. The possible packing methods are defined in the enumerator enum_blue_dvb_asi_packing_format. */ EPOCH_DVB_ASI_INPUT_LATENCY_PACKET_COUNT = 79, /**< Use this property to set how many asi packets should be captured by the card , before it notifies the driver of available data using video input interrupt. */ VIDEO_PLAYBACK_FIFO_CURRENT_FRAME_UNIQUEID = 80, /**< This property can be used to query the current unique id of the frame that is being displayed currently by the video output channel. This property is only useful in the context of video fifo.
You get a uniqueid when you present a frame using video_playback_present function. Alternative ways to get this information are
1) using blue_wait_video_sync_async , the member current_display_frame_uniqueid contains the same information
2) using wait_video_output_sync function on epoch cards, if the flag UPD_FMT_FLAG_RETURN_CURRENT_UNIQUEID is appended with either UPD_FMT_FRAME or UPD_FMT_FIELD , the return value of the function wait_video_output_sync would contain the current display frames uniqueid. */ EPOCH_DVB_ASI_INPUT_GET_PACKET_SIZE = 81, /**< use this property to get the size of each asi transport stream packet (whether it is 188 or 204.*/ EPOCH_DVB_ASI_INPUT_PACKET_COUNT = 82, /**< this property would give you the number of packets captured during the last interrupt time frame. For ASI interrupt is generated if hardware captured the requested number of packets or it hit the timeout value */ EPOCH_DVB_ASI_INPUT_LIVE_PACKET_COUNT = 83, /**< this property would give you the number of packets that is being captured during the current interrupt time frame. For ASI interrupt is generated when has hardware captured the requested number of packets specified using EPOCH_DVB_ASI_INPUT_LATENCY_PACKET_COUNT property. */ EPOCH_DVB_ASI_INPUT_AVAIL_PACKETS_IN_FIFO = 84, /**< This property would return the number of ASI packets that has been captured into card memory, that can be retrieved. This property is only valid when the video input channel is being used in FIFO modes. */ EPOCH_ROUTING_SOURCE_VIDEO_MODE = VIDEO_SCALER_MODE, /**< Use this property to change the video mode that scaler should be set to. USe the macro SET_EPOCH_SCALER_MODE when using this property, as this macro would allow you to select which one of the scaler blocks video mode should be updated. */ EPOCH_AVAIL_VIDEO_SCALER_COUNT = 85, /**< DEPRECATED */ EPOCH_ENUM_AVAIL_VIDEO_SCALERS_ID = 86, /**< DEPRECATED */ EPOCH_ALLOCATE_VIDEO_SCALER = 87, /**< DEPRECATED */ EPOCH_RELEASE_VIDEO_SCALER = 88, /**< DEPRECATED */ EPOCH_DMA_CARDMEMORY_PITCH = 89, /**< DEPRECATED */ EPOCH_OUTPUT_CHANNEL_AV_OFFSET = 90, /**< DEPRECATED */ EPOCH_SCALER_CHANNEL_MUX_MODE = 91, /**< DEPRECATED */ EPOCH_INPUT_CHANNEL_AV_OFFSET = 92, /**< DEPRECATED */ EPOCH_AUDIOOUTPUT_MANUAL_UCZV_GENERATION = 93, /* ASI firmware only */ EPOCH_SAMPLE_RATE_CONVERTER_BYPASS = 94, /** bypasses the sample rate converter for AES audio; only turn on for Dolby-E support * pass in a flag to signal which audio stereo pair should be bypassed: * bit 0: AES channels 0 and 1 * bit 1: AES channels 2 and 3 * bit 2: AES channels 4 and 5 * bit 3: AES channels 6 and 7 * For example: bypass the sample rate converter for channels 0 to 3: flag = 0x3; */ EPOCH_GET_PRODUCT_ID = 95, /* returns the enum for the firmware type EEpochFirmwareProductID */ EPOCH_GENLOCK_IS_LOCKED = 96, EPOCH_DVB_ASI_OUTPUT_PACKET_COUNT = 97, /* ASI firmware only */ EPOCH_DVB_ASI_OUTPUT_BIT_RATE = 98, /* ASI firmware only */ EPOCH_DVB_ASI_DUPLICATE_OUTPUT_A = 99, /* ASI firmware only */ EPOCH_DVB_ASI_DUPLICATE_OUTPUT_B = 100, /* ASI firmware only */ EPOCH_SCALER_HORIZONTAL_FLIP = 101, /* see SideBySide_3D sample application */ EPOCH_CONNECTOR_DIRECTION = 102, /* deprecated / not supported */ EPOCH_AUDIOOUTPUT_VALIDITY_BITS = 103, /* ASI firmware only */ EPOCH_SIZEOF_DRIVER_ALLOCATED_MEMORY = 104, /**< DEPRECATED */ INVALID_VIDEO_MODE_FLAG = 105, /* returns the enum for VID_FMT_INVALID that this SDK/Driver was compiled with; it changed between 5.9.x.x and 5.10.x.x driver branch and has to be handled differently for each driver if the application wants to use the VID_FMT_INVALID flag and support both driver branches */ EPOCH_VIDEO_INPUT_VPID = 106, /* returns the VPID for the current video input signal. Input value is of type EBlueConnectorIdentifier */ EPOCH_LOW_LATENCY_DMA = 107, /**< DEPRECATED; use new feature EPOCH_SUBFIELD_INPUT_INTERRUPTS instead */ EPOCH_VIDEO_INPUT_RGB_DATA_RANGE = 108, EPOCH_DVB_ASI_OUTPUT_PACKET_SIZE = 109, /* firmware supports either 188 or 204 bytes per ASI packet; set to either enum_blue_dvb_asi_packet_size_188_bytes or enum_blue_dvb_asi_packet_size_204_bytes */ EPOCH_SUBFIELD_INPUT_INTERRUPTS = 110, /* similar to the EPOCH_LOW_LATENCY_DMA card feature, but this doesn't influence the DMA; it simply adds interrupts between the frame/field interrupts that trigger when a corresponding video chunk has been captured required minimum driver: 5.10.1.8*/ EPOCH_AUDIOOUTPUT_METADATA_SETTINGS = 111, /* Use the EAudioMetaDataSettings enumerator to change the audio output metadata settings */ EPOCH_HD_SDI_TRANSPORT = 112, /* output only: available modes are defined in the enum EHdSdiTransport; for inputs see EPOCH_HD_SDI_TRANSPORT_INPUT */ CARD_FEATURE_STREAM_INFO = 113, /* only supported from driver 5.10.2.x; info on how many in/out SDI/ASI streams are supported */ CARD_FEATURE_CONNECTOR_INFO = 114, /* only supported from driver 5.10.2.x; info on which connectors are supported: SDI in/out, AES, RS422, LTC, GPIO */ EPOCH_HANC_INPUT_FLAGS = 115, /* this property can be queried to test flags being set in the HANC space (e.g. HANC_FLAGS_IS_ARRI_RECORD_FLAG_SET) */ EPOCH_INPUT_VITC = 116, /* this property retrieves the current input VITC timecode; set .vt = VT_UI8 as this is a 64bit value; */ EPOCH_RAW_VIDEO_INPUT_TYPE = 117, /* specifies if the raw/bayer input is ARRI 10/12 bit or Weisscam; set to 0 to revert back to normal SDI mode */ EPOCH_PCIE_CONFIG_INFO = 118, /* only supported from driver 5.10.2.x; provides info on PCIE maximum payload size and maximum read request siize */ EPOCH_4K_QUADLINK_CHANNEL = 119, /* use this property to set the 4K quadrant number for the current channel in 4K output mode; quadrant numbers are 1 - 4 */ EXTERNAL_LTC_SOURCE_SELECTION = 120, /* use the enum EBlueExternalLtcSource to set the input source for the external LTC */ EPOCH_HD_SDI_TRANSPORT_INPUT = 121, /* can only be queried; return values are defined in the enum EHdSdiTransport. Input value is of type EBlueConnectorIdentifier */ CARD_CONNECTED_VIA_TB = 122, /* MAC only: use this to check if the Card is connected via ThunderBolt */ INTERLOCK_REFERENCE = 123, /* this feature is only supported on Epoch Neutron cards; check application note AN004_Genlock.pdf for more information */ VIDEO_ONBOARD_KEYER = 124, /* this property is currently only supported by Epoch Neutron cards; use the VIDEO_ONBOARD_KEYER_GET_STATUS macros for this property*/ EPOCH_OUTPUT_VITC_MANUAL_CONTROL = 125, /* Epoch Neutron only: this property enables the feature to allow output of a custom VITC timecode on a field by field basis (low frame rates only); for high frame rates the conventional way (using the HANC buffer) must be used */ EPOCH_OUTPUT_VITC = 126, /* Epoch Neutron only: this property sets the custom VITC timecode (64 bit value) on a field by field basis (for low frame rates only); set .vt = VT_UI8 as this is a 64bit value; */ EPOCH_INPUT_VITC_SOURCE = 127, /* this property selects the source for the card property EPOCH_INPUT_VITC for SD video modes; in SD video modes the VITC source can be either from VBI space or from RP188 packets; the default (value = 0) is set to RP188; setting this to 1 will select VBI space as the source for EPOCH_INPUT_VITC; set .vt = VT_UI4 */ TWO_SAMPLE_INTERLEAVE_OUTPUT = 128, /* enables two sample interleave mode for 4K video modes using two output channels; options are: 0 = turn feature off, 1 = turn feature on */ TWO_SAMPLE_INTERLEAVE_INPUT = 129, /* enables two sample interleave mode for 4K video modes using two input channels; options are: 0 = turn feature off, 1 = turn feature on */ BTC_TIMER = 130, /* BTC: Coordinated Bluefish Time; this timer has microsecond granularity and is started/reset when the driver starts; set .vt = VT_UI8 as this is a 64bit value; */ BFLOCK_SIGNAL_ENABLE = 131, /* S+ cards can generate a proprietary lock signal on the S+ connector (connector 0); options are 0 = turn off signal (connector 0 will be copy of SDI A output); 1 = turn on lock signal output; set .vt = VT_UI4 */ AES_OUTPUT_ROUTING = 132, /* set the stream source and source channels for the AES output; .vt = VT_UI4 */ MUTE_AES_OUTPUT_CHANNEL = 133, /* mute any of the AES output channels (0..7); to enable/disable mute use the SET_MUTE_AES_OUTPUT_CHANNEL macro; to query an AES output channels mute status set VT.ulVal to the AES output channel number (0..7) then call QueryCardProperty(); the return value will be 1 = muted or 0 = enabled; set .vt to VT_UI4 */ FORCE_SD_VBI_OUTPUT_BUFFER_TO_V210 = 134, /* this card property forces the VBI buffer to V210 memory format in SD video modes (default for HD video modes) so that it can handle 10 bit VANC packets. set 1 = force to V210 or 0 = follow video memory format (default); set .vt to VT_UI4; when changing this property the video output mode and video output engine need to be set again manually! */ EMBEDDED_AUDIO_INPUT_INFO = 135, /* this card property returns info on how which embedded audio input channels are available (channel mask for channels 1 - 16 in lower 16 bits). it also returns the data payload for each channel (1 - 16) in the upper 16 bits (0 = embedded audio, 1 = other (e.g. Dolby Digital)) */ OVERRIDE_OUTPUT_VPID_DEFAULT = 136, /* this card property should only be used for debugging purposes if the default VPID needs to be changed; it will override the output VPID that is set up by default depending on the video mode, pixel format and signal format type. this property takes a 64 bit value (set .vt to VT_UI8) and is defined as follows (there are helper MACROS defined in the MACROS section at the end of this header file): 7...0: Byte 1 of VPID 15...8: Byte 2 of VPID 23..16: Byte 3 of VPID 31..24: Byte 4 of VPID 47..32: SDI output connector (EBlueConnectorIdentifier) 62..48: reserved (set to 0) 63: enable/disable VPID output (0 = disabled, 1 = enabled) */ FORCE_SD_VBI_INPUT_BUFFER_TO_V210 = 137, /* this card property forces the VBI input buffer to V210 memory format in SD video modes (default for HD video modes) so that it can handle 10 bit VANC packets. set 1 = force to V210 or 0 = follow video memory format (default); set .vt to VT_UI4; when changing this property the video input engine needs to be set again manually! */ BYPASS_RELAY_ENABLE = 138, /* Enable/Disable the bypass relays; use MACROs BLUE_SET_ENABLE_BYPASS_RELAY(), BLUE_SET_DISABLE_BYPASS_RELAY() and BLUE_GET_BYPASS_RELAY_SETTING() to initialise the value (.vt = VT_UI4) */ VIDEO_MODE_EXT_OUTPUT = 139, /* Query or set the video mode for the output channel using the _EVideoModeExt enums. */ VIDEO_MODE_EXT_INPUT = 140, /* Query the current video mode on the input channel using the _EVideoModeExt enums. In case of no valid video mode present the input FIFOs can be set up for an expected video mode by setting this card property*/ IS_VIDEO_MODE_EXT_SUPPORTED_OUTPUT = 141, IS_VIDEO_MODE_EXT_SUPPORTED_INPUT = 142, MISC_CONNECTOR_OUT = 143, /* KRONOS and above: 32 bit value, query and set. Use MACROS MISC_CONNECTOR_OUT_***; supported connectors to set/query: BLUE_CONNECTOR_REF_OUT, BLUE_CONNECTOR_INTERLOCK_OUT */ MISC_CONNECTOR_IN = 144, /* KRONOS and above: 32 bit value, query only. Use MACROS MISC_CONNECTOR_IN_***; supported connectors to query: BLUE_CONNECTOR_REF_IN, BLUE_CONNECTOR_INTERLOCK_IN */ SUB_IMAGE_MAPPING = 145, /* Valid on input and output, but only has an effect on 4k/UHD and larger buffers. Use ESubImageMapping enum to specify value; default value is IMAGE_MAPPING_TWO_SAMPLE_INTERLEAVE. set to IMAGE_MAPPING_SQUARE_DIVISION to specify that the 4K/UHD buffers handled by this channel are in SD (Square Division) mode */ VIDEO_CARDPROPERTY_INVALID = 1000 }EBlueCardProperty; typedef enum _ESubImageMapping { IMAGE_MAPPING_INVALID = 0, IMAGE_MAPPING_TWO_SAMPLE_INTERLEAVE = 1, IMAGE_MAPPING_SQUARE_DIVISION = 2, }ESubImageMapping; typedef enum _EHdSdiTransport { HD_SDI_TRANSPORT_INVALID = 0, /* Invalid input signal */ HD_SDI_TRANSPORT_1_5G = 0x1, /* HD as 1.5G */ HD_SDI_TRANSPORT_3G_LEVEL_A = 0x2, /* 3G Level A */ HD_SDI_TRANSPORT_3G_LEVEL_B = 0x3, /* 3G Level B */ }EHdSdiTransport; typedef enum _EAudioMetaDataSettings { AUDIO_METADATA_KEEP_ALIVE = 0x1 /* When setting this bit for the EPOCH_AUDIOOUTPUT_METADATA_SETTINGS card property the audio meta data (like RP188 timecode will still be played out even after stopping audio playback; this is a static settings and only needs to be set once; it is channel based and can be changed for all output channels independently */ }EAudioMetaDataSettings; /* This enumerator is still supported, but deprecated */ /* Please use _EVideoModeExt instead and card properties VIDEO_MODE_EXT_OUTPUT and VIDEO_MODE_EXT_INPUT */ typedef enum _EVideoMode { VID_FMT_PAL = 0, VID_FMT_NTSC = 1, VID_FMT_576I_5000 = VID_FMT_PAL, VID_FMT_486I_5994 = VID_FMT_NTSC, VID_FMT_720P_5994 = 2, VID_FMT_720P_6000 = 3, VID_FMT_1080PSF_2397 = 4, VID_FMT_1080PSF_2400 = 5, VID_FMT_1080P_2397 = 6, VID_FMT_1080P_2400 = 7, VID_FMT_1080I_5000 = 8, VID_FMT_1080I_5994 = 9, VID_FMT_1080I_6000 = 10, VID_FMT_1080P_2500 = 11, VID_FMT_1080P_2997 = 12, VID_FMT_1080P_3000 = 13, VID_FMT_HSDL_1498 = 14, VID_FMT_HSDL_1500 = 15, VID_FMT_720P_5000 = 16, VID_FMT_720P_2398 = 17, VID_FMT_720P_2400 = 18, VID_FMT_2048_1080PSF_2397 = 19, VID_FMT_2048_1080PSF_2400 = 20, VID_FMT_2048_1080P_2397 = 21, VID_FMT_2048_1080P_2400 = 22, VID_FMT_1080PSF_2500 = 23, VID_FMT_1080PSF_2997 = 24, VID_FMT_1080PSF_3000 = 25, VID_FMT_1080P_5000 = 26, VID_FMT_1080P_5994 = 27, VID_FMT_1080P_6000 = 28, VID_FMT_720P_2500 = 29, VID_FMT_720P_2997 = 30, VID_FMT_720P_3000 = 31, VID_FMT_DVB_ASI = 32, VID_FMT_2048_1080PSF_2500 = 33, VID_FMT_2048_1080PSF_2997 = 34, VID_FMT_2048_1080PSF_3000 = 35, VID_FMT_2048_1080P_2500 = 36, VID_FMT_2048_1080P_2997 = 37, VID_FMT_2048_1080P_3000 = 38, VID_FMT_2048_1080P_5000 = 39, VID_FMT_2048_1080P_5994 = 40, VID_FMT_2048_1080P_6000 = 41, VID_FMT_1080P_4800 = 42, VID_FMT_2048_1080P_4800 = 43, VID_FMT_1080P_4795 = 44, VID_FMT_2048_1080P_4795 = 45, VID_FMT_INVALID = 46 } EVideoMode; typedef enum _EVideoModeExt { VID_FMT_EXT_INVALID = 1024, VID_FMT_EXT_CUSTOM = 1025, VID_FMT_EXT_DVB_ASI = 1026, VID_FMT_EXT_576I_5000 = 1027, VID_FMT_EXT_486I_5994 = 1028, VID_FMT_EXT_720P_2398 = 1029, VID_FMT_EXT_720P_2400 = 1030, VID_FMT_EXT_720P_2500 = 1031, VID_FMT_EXT_720P_2997 = 1032, VID_FMT_EXT_720P_3000 = 1033, VID_FMT_EXT_720P_4795 = 1034, VID_FMT_EXT_720P_4800 = 1035, VID_FMT_EXT_720P_5000 = 1036, VID_FMT_EXT_720P_5994 = 1037, VID_FMT_EXT_720P_6000 = 1038, VID_FMT_EXT_1080I_5000 = 1039, VID_FMT_EXT_1080I_5994 = 1040, VID_FMT_EXT_1080I_6000 = 1041, VID_FMT_EXT_1080PSF_2398 = 1042, VID_FMT_EXT_1080PSF_2400 = 1043, VID_FMT_EXT_1080PSF_2500 = 1044, VID_FMT_EXT_1080PSF_2997 = 1045, VID_FMT_EXT_1080PSF_3000 = 1046, VID_FMT_EXT_1080P_2398 = 1047, VID_FMT_EXT_1080P_2400 = 1048, VID_FMT_EXT_1080P_2500 = 1049, VID_FMT_EXT_1080P_2997 = 1050, VID_FMT_EXT_1080P_3000 = 1051, VID_FMT_EXT_1080P_4795 = 1052, VID_FMT_EXT_1080P_4800 = 1053, VID_FMT_EXT_1080P_5000 = 1054, VID_FMT_EXT_1080P_5994 = 1055, VID_FMT_EXT_1080P_6000 = 1056, VID_FMT_EXT_2K_1080PSF_2398 = 1057, VID_FMT_EXT_2K_1080PSF_2400 = 1058, VID_FMT_EXT_2K_1080PSF_2500 = 1059, VID_FMT_EXT_2K_1080PSF_2997 = 1060, VID_FMT_EXT_2K_1080PSF_3000 = 1061, VID_FMT_EXT_2K_1080P_2398 = 1062, VID_FMT_EXT_2K_1080P_2400 = 1063, VID_FMT_EXT_2K_1080P_2500 = 1064, VID_FMT_EXT_2K_1080P_2997 = 1065, VID_FMT_EXT_2K_1080P_3000 = 1066, VID_FMT_EXT_2K_1080P_4795 = 1067, VID_FMT_EXT_2K_1080P_4800 = 1068, VID_FMT_EXT_2K_1080P_5000 = 1069, VID_FMT_EXT_2K_1080P_5994 = 1070, VID_FMT_EXT_2K_1080P_6000 = 1071, VID_FMT_EXT_2K_1556I_1499 = 1072, VID_FMT_EXT_2K_1556I_1500 = 1073, VID_FMT_EXT_2160P_2398 = 1074, VID_FMT_EXT_2160P_2400 = 1075, VID_FMT_EXT_2160P_2500 = 1076, VID_FMT_EXT_2160P_2997 = 1077, VID_FMT_EXT_2160P_3000 = 1078, VID_FMT_EXT_2160P_4795 = 1079, VID_FMT_EXT_2160P_4800 = 1080, VID_FMT_EXT_2160P_5000 = 1081, VID_FMT_EXT_2160P_5994 = 1082, VID_FMT_EXT_2160P_6000 = 1083, VID_FMT_EXT_4K_2160P_2398 = 1084, VID_FMT_EXT_4K_2160P_2400 = 1085, VID_FMT_EXT_4K_2160P_2500 = 1086, VID_FMT_EXT_4K_2160P_2997 = 1087, VID_FMT_EXT_4K_2160P_3000 = 1088, VID_FMT_EXT_4K_2160P_4795 = 1089, VID_FMT_EXT_4K_2160P_4800 = 1090, VID_FMT_EXT_4K_2160P_5000 = 1091, VID_FMT_EXT_4K_2160P_5994 = 1092, VID_FMT_EXT_4K_2160P_6000 = 1093, VID_FMT_EXT_4320P_2398 = 1094, VID_FMT_EXT_4320P_2400 = 1095, VID_FMT_EXT_4320P_2500 = 1096, VID_FMT_EXT_4320P_2997 = 1097, VID_FMT_EXT_4320P_3000 = 1098, VID_FMT_EXT_4320P_4795 = 1099, VID_FMT_EXT_4320P_4800 = 1100, VID_FMT_EXT_4320P_5000 = 1101, VID_FMT_EXT_4320P_5994 = 1102, VID_FMT_EXT_4320P_6000 = 1103, VID_FMT_EXT_8K_4320P_2398 = 1104, VID_FMT_EXT_8K_4320P_2400 = 1105, VID_FMT_EXT_8K_4320P_2500 = 1106, VID_FMT_EXT_8K_4320P_2997 = 1107, VID_FMT_EXT_8K_4320P_3000 = 1108, VID_FMT_EXT_8K_4320P_4795 = 1109, VID_FMT_EXT_8K_4320P_4800 = 1110, VID_FMT_EXT_8K_4320P_5000 = 1111, VID_FMT_EXT_8K_4320P_5994 = 1112, VID_FMT_EXT_8K_4320P_6000 = 1113, VID_FMT_EXT_LAST_ENTRY_V1 = VID_FMT_EXT_8K_4320P_6000, /* aliases */ VID_FMT_EXT_PAL = VID_FMT_EXT_576I_5000, VID_FMT_EXT_NTSC = VID_FMT_EXT_486I_5994, VID_FMT_EXT_HSDL_1499 = VID_FMT_EXT_2K_1556I_1499, VID_FMT_EXT_HSDL_1500 = VID_FMT_EXT_2K_1556I_1500, }EVideoModeExt; typedef enum _EBFLockSignalType { BFLOCK_SIGNAL_UNKNOWN = 0x1000, BFLOCK_SIGNAL_2398 = 0x1001, BFLOCK_SIGNAL_2400 = 0x1002, BFLOCK_SIGNAL_2500 = 0x1003, BFLOCK_SIGNAL_2997 = 0x1004, BFLOCK_SIGNAL_3000 = 0x1005, BFLOCK_SIGNAL_4795 = 0x1006, BFLOCK_SIGNAL_4800 = 0x1007, BFLOCK_SIGNAL_5000 = 0x1008, BFLOCK_SIGNAL_5994 = 0x1009, BFLOCK_SIGNAL_6000 = 0x100A, }EBFLockSignalType; typedef enum _EMemoryFormat { MEM_FMT_ARGB = 0, MEM_FMT_BV10 = 1, /* not supported */ MEM_FMT_BV8 = 2, MEM_FMT_YUVS = MEM_FMT_BV8, MEM_FMT_V210 = 3, MEM_FMT_RGBA = 4, MEM_FMT_CINEON_LITTLE_ENDIAN = 5, MEM_FMT_ARGB_PC = 6, MEM_FMT_BGRA = MEM_FMT_ARGB_PC, MEM_FMT_CINEON = 7, MEM_FMT_2VUY = 8, MEM_FMT_BGR = 9, MEM_FMT_BGR_16_16_16 = 10, MEM_FMT_BGR_48 = MEM_FMT_BGR_16_16_16, MEM_FMT_BGRA_16_16_16_16 = 11, MEM_FMT_BGRA_64 = MEM_FMT_BGRA_16_16_16_16, MEM_FMT_VUYA_4444 = 12, MEM_FMT_V216 = 13, MEM_FMT_Y210 = 14, MEM_FMT_Y216 = 15, MEM_FMT_RGB = 16, MEM_FMT_YUV_ALPHA = 17, MEM_FMT_RGB_16_16_16 = 18, MEM_FMT_RGB_48 = MEM_FMT_RGB_16_16_16, MEM_FMT_RGBA_16_16_16_16 = 19, MEM_FMT_RGBA_64 = MEM_FMT_RGBA_16_16_16_16, MEM_FMT_YCA8 = 20, MEM_FMT_CYA8 = 21, MEM_FMT_YUV_ALPHA_10 = 22, MEM_FMT_YCA10 = 23, MEM_FMT_CYA10 = 24, MEM_FMT_YAC10 = 25, MEM_FMT_CAY10 = 26, MEM_FMT_INVALID = 27 } EMemoryFormat; typedef enum _EUpdateMethod { UPD_FMT_FIELD = 0, UPD_FMT_FRAME = 1, UPD_FMT_FRAME_DISPLAY_FIELD1 = 2, UPD_FMT_FRAME_DISPLAY_FIELD2 = 3, UPD_FMT_INVALID = 4, UPD_FMT_FLAG_RETURN_CURRENT_UNIQUEID = 0x80000000, /* if this flag is used on epoch cards, function would return the unique id of the current frame as the return value. */ } EUpdateMethod; typedef enum _EResoFormat { RES_FMT_NORMAL = 0, RES_FMT_HALF, RES_FMT_INVALID } EResoFormat; typedef enum _EEngineMode { VIDEO_ENGINE_FRAMESTORE = 0, /* Low latency mode for capture and playback; buffer cycling must be done by the user */ VIDEO_ENGINE_PLAYBACK = 1, /* deprecated; use VIDEO_ENGINE_DUPLEX instead */ VIDEO_ENGINE_CAPTURE = 2, /* FIFO mode for capturing Video/VANC/HANC and have driver do automatic playthrough on the output. output channel cannot be used for anything else */ VIDEO_ENGINE_PAGEFLIP = 3, /* deprecated/not supported */ VIDEO_ENGINE_DUPLEX = 4, /* FIFO mode for either capture or playback; capture and playback side can be used independently at the same time*/ VIDEO_ENGINE_INVALID = 5 } EEngineMode; typedef enum { BLUE_FIFO_CLOSED = 0, /**< Fifo has not been initialized*/ BLUE_FIFO_STARTING = 1, /**< Fifo is starting */ BLUE_FIFO_RUNNING = 2, /**< Fifo is running */ BLUE_FIFO_STOPPING = 3, /**< Fifo is in the process of stopping */ BLUE_FIFO_PASSIVE = 5, /**< Fifo is currently stopped or not active*/ BLUE_FIFO_STATUS_INVALID = 10 }BlueVideoFifoStatus; typedef enum { BLUE_FIFO_NULL_ATTRIBUTE = 0x0, BLUE_FIFO_ECHOPORT_ENABLED = 0x1, BLUE_FIFO_STEPMODE = 0x2, BLUE_FIFO_LOOPMODE = 0x4 }BlueVideoFifo_Attributes; typedef enum _ECardType { CRD_BLUEDEEP_LT = 0, /* not supported */ CRD_BLUEDEEP_SD = 1, /* not supported */ CRD_BLUEDEEP_AV = 2, /* not supported */ CRD_BLUEDEEP_IO = 3, /* not supported */ CRD_BLUEWILD_AV = 4, /* not supported */ CRD_IRIDIUM_HD = 5, /* not supported */ CRD_BLUEWILD_RT = 6, /* not supported */ CRD_BLUEWILD_HD = 7, /* not supported */ CRD_REDDEVIL = 8, /* not supported */ CRD_BLUEDEEP_HD = 9, /* not supported, but value used for CRD_BLUE_EPOCH_2K */ CRD_BLUEDEEP_HDS = 10, /* not supported */ CRD_BLUE_ENVY = 11, /* not supported */ CRD_BLUE_PRIDE = 12, /* not supported */ CRD_BLUE_GREED = 13, /* not supported */ CRD_BLUE_INGEST = 14, /* not supported */ CRD_BLUE_SD_DUALLINK = 15, /* not supported */ CRD_BLUE_CATALYST = 16, /* not supported */ CRD_BLUE_SD_DUALLINK_PRO = 17, /* not supported */ CRD_BLUE_SD_INGEST_PRO = 18, /* not supported */ CRD_BLUE_SD_DEEPBLUE_LITE_PRO = 19, /* not supported */ CRD_BLUE_SD_SINGLELINK_PRO = 20, /* not supported */ CRD_BLUE_SD_IRIDIUM_AV_PRO = 21, /* not supported */ CRD_BLUE_SD_FIDELITY = 22, /* not supported */ CRD_BLUE_SD_FOCUS = 23, /* not supported */ CRD_BLUE_SD_PRIME = 24, /* not supported */ CRD_BLUE_EPOCH_2K = 9, CRD_BLUE_EPOCH_2K_HORIZON = CRD_BLUE_EPOCH_2K, CRD_BLUE_EPOCH_2K_CORE = 25, CRD_BLUE_EPOCH_2K_ULTRA = 26, CRD_BLUE_EPOCH_HORIZON = 27, CRD_BLUE_EPOCH_CORE = 28, CRD_BLUE_EPOCH_ULTRA = 29, CRD_BLUE_CREATE_HD = 30, CRD_BLUE_CREATE_2K = 31, CRD_BLUE_CREATE_2K_ULTRA = 32, CRD_BLUE_CREATE_3D = CRD_BLUE_CREATE_2K, CRD_BLUE_CREATE_3D_ULTRA = CRD_BLUE_CREATE_2K_ULTRA, CRD_BLUE_SUPER_NOVA = 33, CRD_BLUE_SUPER_NOVA_S_PLUS = 34, CRD_BLUE_SUPER_NOVA_MICRO = 35, CRD_BLUE_NEUTRON = CRD_BLUE_SUPER_NOVA_MICRO, CRD_BLUE_EPOCH_CG = 36, CRD_BLUE_KRONOS_ELEKTRON = 37, CRD_BLUE_KRONOS_OPTIKOS = 38, CRD_BLUE_KRONOS_K8 = 39, CRD_INVALID = 500 } ECardType; typedef enum _EHDCardSubType { CRD_HD_FURY = 1, /* not supported */ CRD_HD_VENGENCE = 2, /* not supported */ CRD_HD_IRIDIUM_XP = 3, /* not supported */ CRD_HD_IRIDIUM = 4, /* not supported */ CRD_HD_LUST = 5, /* not supported */ CRD_HD_INVALID /* not supported */ }EHDCardSubType; enum EEpochFirmwareProductID { ORAC_FILMPOST_FIRMWARE_PRODUCTID = (0x01), /* Epoch (2K) Horizon/Core/Ultra, Create/Create3D/Create3D Ultra */ ORAC_BROADCAST_FIRMWARE_PRODUCTID = (0x02), /* Epoch (2K) Horizon/Core/Ultra, Create/Create3D/Create3D Ultra */ ORAC_ASI_FIRMWARE_PRODUCTID = (0x03), /* Epoch (2K) Horizon/Core/Ultra */ ORAC_4SDIINPUT_FIRMWARE_PRODUCTID = (0x04), /* Epoch Supernova/Supernova S+ */ ORAC_4SDIOUTPUT_FIRMWARE_PRODUCTID = (0x05), /* Epoch Supernova/Supernova S+ */ ORAC_2SDIINPUT_2SDIOUTPUT_FIRMWARE_PRODUCTID = (0x06), /* Epoch Supernova/Supernova S+ */ ORAC_1SDIINPUT_3SDIOUTPUT_FIRMWARE_PRODUCTID = (0x08), /* Epoch Supernova/Supernova S+, deprecated */ ORAC_INPUT_1SDI_1CHANNEL_OUTPUT_4SDI_3CHANNEL_FIRMWARE_PRODUCTID = (0x09), /* Epoch Supernova/Supernova S+ */ ORAC_INPUT_2SDI_2CHANNEL_OUTPUT_3SDI_2CHANNEL_FIRMWARE_PRODUCTID = (0x0A), /* Epoch Supernova/Supernova S+, deprecated */ ORAC_INPUT_3SDI_3CHANNEL_OUTPUT_1SDI_1CHANNEL_FIRMWARE_PRODUCTID = (0x0B), /* Epoch Supernova/Supernova S+ */ ORAC_BNC_ASI_FIRMWARE_PRODUCTID = (0x0C), /* Epoch Supernova/Supernova S+ */ ORAC_NEUTRON_2_IN_0_OUT_FIRMWARE_PRODUCTID = (0x0D), /* Epoch Neutron */ ORAC_NEUTRON_0_IN_2_OUT_FIRMWARE_PRODUCTID = (0x0E), /* Epoch Neutron */ ORAC_NEUTRON_1_IN_1_OUT_FIRMWARE_PRODUCTID = (0x0F), /* Epoch Neutron */ ORAC_NEUTRON_2_IN_0_OUT_SCALER_FIRMWARE_PRODUCTID = (0x10), /* Epoch Neutron */ ORAC_NEUTRON_0_IN_2_OUT_SCALER_FIRMWARE_PRODUCTID = (0x11), /* Epoch Neutron */ ORAC_NEUTRON_1_IN_1_OUT_SCALER_FIRMWARE_PRODUCTID = (0x12), /* Epoch Neutron */ ORAC_NEUTRON_ASI_FIRMWARE_PRODUCTID = (0x13), /* Epoch Neutron */ ORAC_INPUT_1SDI_1CHANNEL_OUTPUT_3SDI_3CHANNEL_FIRMWARE_PRODUCTID = (0x14), /* Epoch Supernova/Supernova S+ */ ORAC_NEUTRON_1_IN_2_OUT_FIRMWARE_PRODUCTID = (0x15), /* Epoch Neutron */ ORAC_NEUTRON_3_IN_0_OUT_FIRMWARE_PRODUCTID = (0x16), /* Epoch Neutron */ ORAC_NEUTRON_0_IN_3_OUT_FIRMWARE_PRODUCTID = (0x17), /* Epoch Neutron */ ORAC_INPUT_1SDI_1CHANNEL_OUTPUT_3SDI_2CHANNEL_FIRMWARE_PRODUCTID = (0x18), /* Epoch Supernova/Supernova S+ */ ORAC_NEUTRON_2_IN_1_OUT_FIRMWARE_PRODUCTID = (0x19), /* Epoch Neutron */ ZEUS_4_IN_4_OUT_FIRMWARE_PRODUCTID = (0x1A), /* Kronos Elektron/Optikos */ ZEUS_8_IN_FIRMWARE_PRODUCTID = (0x1B), /* Kronos Elektron/Optikos */ ZEUS_8_OUT_FIRMWARE_PRODUCTID = (0x1C), /* Kronos Elektron/Optikos */ ZEUS_RECOVERY_FIRMWARE_PRODUCTID = (0x1D), /* Kronos Elektron/Optikos */ }; typedef enum _EBlueLUTType { BLUE_MAIN_LUT_B_Pb = 0, BLUE_MAIN_LUT_G_Y = 1, BLUE_MAIN_LUT_R_Pr = 2, BLUE_AUX_LUT_B_Pb = 3, BLUE_AUX_LUT_G_Y = 4, BLUE_AUX_LUT_R_Pr = 5, }EBlueLUTType; typedef enum _EBlueConnectorIdentifier { BLUE_CONNECTOR_INVALID = -1, BLUE_CONNECTOR_BNC_A = 0, BLUE_CONNECTOR_BNC_B = 1, BLUE_CONNECTOR_BNC_C = 2, BLUE_CONNECTOR_BNC_D = 3, BLUE_CONNECTOR_BNC_E = 4, BLUE_CONNECTOR_BNC_F = 5, BLUE_CONNECTOR_GENLOCK = 6, BLUE_CONNECTOR_REF_IN = BLUE_CONNECTOR_GENLOCK, BLUE_CONNECTOR_REF_OUT = 7, BLUE_CONNECTOR_INTERLOCK_IN = 8, BLUE_CONNECTOR_INTERLOCK_OUT = 9, BLUE_CONNECTOR_ANALOG_VIDEO_1 = 100, BLUE_CONNECTOR_ANALOG_VIDEO_2 = 101, BLUE_CONNECTOR_ANALOG_VIDEO_3 = 102, BLUE_CONNECTOR_ANALOG_VIDEO_4 = 103, BLUE_CONNECTOR_ANALOG_VIDEO_5 = 104, BLUE_CONNECTOR_ANALOG_VIDEO_6 = 105, BLUE_CONNECTOR_DVID_1 = 200, BLUE_CONNECTOR_DVID_2 = 201, BLUE_CONNECTOR_DVID_3 = 202, BLUE_CONNECTOR_DVID_4 = 203, BLUE_CONNECTOR_DVID_5 = 204, BLUE_CONNECTOR_SDI_OUTPUT_A = BLUE_CONNECTOR_DVID_1, BLUE_CONNECTOR_SDI_OUTPUT_B = BLUE_CONNECTOR_DVID_2, BLUE_CONNECTOR_SDI_OUTPUT_C = 205, BLUE_CONNECTOR_SDI_OUTPUT_D = 206, BLUE_CONNECTOR_SDI_OUTPUT_E = 207, BLUE_CONNECTOR_SDI_OUTPUT_F = 208, BLUE_CONNECTOR_SDI_OUTPUT_G = 209, BLUE_CONNECTOR_SDI_OUTPUT_H = 210, BLUE_CONNECTOR_SDI_OUTPUT_1 = BLUE_CONNECTOR_SDI_OUTPUT_A, BLUE_CONNECTOR_SDI_OUTPUT_2 = BLUE_CONNECTOR_SDI_OUTPUT_B, BLUE_CONNECTOR_SDI_OUTPUT_3 = BLUE_CONNECTOR_SDI_OUTPUT_C, BLUE_CONNECTOR_SDI_OUTPUT_4 = BLUE_CONNECTOR_SDI_OUTPUT_D, BLUE_CONNECTOR_SDI_OUTPUT_5 = BLUE_CONNECTOR_SDI_OUTPUT_E, BLUE_CONNECTOR_SDI_OUTPUT_6 = BLUE_CONNECTOR_SDI_OUTPUT_F, BLUE_CONNECTOR_SDI_OUTPUT_7 = BLUE_CONNECTOR_SDI_OUTPUT_G, BLUE_CONNECTOR_SDI_OUTPUT_8 = BLUE_CONNECTOR_SDI_OUTPUT_H, BLUE_CONNECTOR_AES = 300, BLUE_CONNECTOR_ANALOG_AUDIO_1 = 301, BLUE_CONNECTOR_ANALOG_AUDIO_2 = 302, BLUE_CONNECTOR_DVID_6 = 303, BLUE_CONNECTOR_DVID_7 = 304, BLUE_CONNECTOR_SDI_INPUT_A = BLUE_CONNECTOR_DVID_3, BLUE_CONNECTOR_SDI_INPUT_B = BLUE_CONNECTOR_DVID_4, BLUE_CONNECTOR_SDI_INPUT_C = BLUE_CONNECTOR_DVID_6, BLUE_CONNECTOR_SDI_INPUT_D = BLUE_CONNECTOR_DVID_7, BLUE_CONNECTOR_SDI_INPUT_E = 305, BLUE_CONNECTOR_SDI_INPUT_F = 306, BLUE_CONNECTOR_SDI_INPUT_G = 307, BLUE_CONNECTOR_SDI_INPUT_H = 308, BLUE_CONNECTOR_SDI_INPUT_1 = BLUE_CONNECTOR_SDI_INPUT_A, BLUE_CONNECTOR_SDI_INPUT_2 = BLUE_CONNECTOR_SDI_INPUT_B, BLUE_CONNECTOR_SDI_INPUT_3 = BLUE_CONNECTOR_SDI_INPUT_C, BLUE_CONNECTOR_SDI_INPUT_4 = BLUE_CONNECTOR_SDI_INPUT_D, BLUE_CONNECTOR_SDI_INPUT_5 = BLUE_CONNECTOR_SDI_INPUT_E, BLUE_CONNECTOR_SDI_INPUT_6 = BLUE_CONNECTOR_SDI_INPUT_F, BLUE_CONNECTOR_SDI_INPUT_7 = BLUE_CONNECTOR_SDI_INPUT_G, BLUE_CONNECTOR_SDI_INPUT_8 = BLUE_CONNECTOR_SDI_INPUT_H, }EBlueConnectorIdentifier; typedef enum _EBlueConnectorSignalDirection { BLUE_CONNECTOR_SIGNAL_INVALID = -1, BLUE_CONNECTOR_SIGNAL_INPUT = 0, BLUE_CONNECTOR_SIGNAL_OUTPUT = 1, }EBlueConnectorSignalDirection; typedef enum _EBlueConnectorProperty { BLUE_INVALID_CONNECTOR_PROPERTY = -1, /* signal property */ BLUE_CONNECTOR_PROP_INPUT_SIGNAL = 0, BLUE_CONNECTOR_PROP_OUTPUT_SIGNAL = 1, /* Video output */ BLUE_CONNECTOR_PROP_SDI = 0, BLUE_CONNECTOR_PROP_YUV_Y = 1, BLUE_CONNECTOR_PROP_YUV_U = 2, BLUE_CONNECTOR_PROP_YUV_V = 3, BLUE_CONNECTOR_PROP_RGB_R = 4, BLUE_CONNECTOR_PROP_RGB_G = 5, BLUE_CONNECTOR_PROP_RGB_B = 6, BLUE_CONNECTOR_PROP_CVBS = 7, BLUE_CONNECTOR_PROP_SVIDEO_Y = 8, BLUE_CONNECTOR_PROP_SVIDEO_C = 9, /* Audio output */ BLUE_CONNECTOR_PROP_AUDIO_AES = 0x2000, BLUE_CONNECTOR_PROP_AUDIO_EMBEDDED = 0x2001, BLUE_CONNECTOR_PROP_AUDIO_ANALOG = 0x2002, BLUE_CONNECTOR_PROP_SINGLE_LINK = 0x3000, BLUE_CONNECTOR_PROP_DUALLINK_LINK_1 = 0x3001, BLUE_CONNECTOR_PROP_DUALLINK_LINK_2 = 0x3002, BLUE_CONNECTOR_PROP_DUALLINK_LINK = 0x3003, BLUE_CONNECTOR_PROP_STEREO_MODE_SIDE_BY_SIDE = 0x3004, BLUE_CONNECTOR_PROP_STEREO_MODE_TOP_DOWN = 0x3005, BLUE_CONNECTOR_PROP_STEREO_MODE_LINE_BY_LINE = 0x3006, }EBlueConnectorProperty; typedef enum { BLUE_AUDIO_AES = 0, /** 8 channels of AES */ BLUE_AUDIO_ANALOG = 1, /** 2 channels of analog audio */ BLUE_AUDIO_SDIA = 2, /** deprecated, do not use */ BLUE_AUDIO_EMBEDDED = BLUE_AUDIO_SDIA, /** use BLUE_AUDIO_EMBEDDED for any embedded audio stream; the stream is associated with the SDK object (BlueVelvet4/BlueVelvetC) */ BLUE_AUDIO_SDIB = 3, /** deprecated, do not use */ BLUE_AUDIO_AES_PAIR0 = 4, /** deprecated, do not use */ BLUE_AUDIO_AES_PAIR1 = 5, /** deprecated, do not use */ BLUE_AUDIO_AES_PAIR2 = 6, /** deprecated, do not use */ BLUE_AUDIO_AES_PAIR3 = 7, /** deprecated, do not use */ BLUE_AUDIO_SDIC = 8, /** deprecated, do not use */ BLUE_AUDIO_SDID = 9, /** deprecated, do not use */ BLUE_AUDIO_SDIE = 10, /** deprecated, do not use */ BLUE_AUDIO_SDIF = 11, /** deprecated, do not use */ BLUE_AUDIO_SDIG = 12, /** deprecated, do not use */ BLUE_AUDIO_SDIH = 13, /** deprecated, do not use */ BLUE_AUDIO_INVALID = 100 } Blue_Audio_Connector_Type; typedef enum _EAudioRate { AUDIO_SAMPLE_RATE_48K = 48000, AUDIO_SAMPLE_RATE_96K = 96000, AUDIO_SAMPLE_RATE_UNKNOWN = -1 } EAudioRate; typedef enum _EDMADataType { BLUE_DATA_FRAME = 0, BLUE_DATA_IMAGE = BLUE_DATA_FRAME, BLUE_DATA_FIELD1 = 1, BLUE_DATA_FIELD2 = 2, /** deprecated, do not use */ BLUE_DATA_VBI = 3, BLUE_DATA_HANC = 4, BLUE_DATA_AUDIO_IN = 5, /** deprecated, do not use */ BLUE_DATA_AUDIO_OUT = 6, /** deprecated, do not use */ BLUE_DATA_FRAME_RDOM = 7, /** deprecated, do not use */ BLUE_DATA_FRAME_STEREO_LEFT = BLUE_DATA_FRAME, BLUE_DATA_FRAME_STEREO_RIGHT = 8, BLUE_DMADATA_INVALID = 9, // preferred aliases and new types BLUE_DMA_DATA_TYPE_IMAGE = BLUE_DATA_FRAME, BLUE_DMA_DATA_TYPE_IMAGE_FRAME = BLUE_DMA_DATA_TYPE_IMAGE, BLUE_DMA_DATA_TYPE_IMAGE_FIELD = BLUE_DATA_FIELD1, BLUE_DMA_DATA_TYPE_VANC = BLUE_DATA_VBI, BLUE_DMA_DATA_TYPE_VANC_A = BLUE_DMA_DATA_TYPE_VANC, BLUE_DMA_DATA_TYPE_VANC_B = 10, BLUE_DMA_DATA_TYPE_HANC_AUDIO = BLUE_DATA_HANC, BLUE_DMA_DATA_TYPE_HANC_RAW_A = 11, BLUE_DMA_DATA_TYPE_HANC_RAW_B = 12, }EDMADataType; typedef enum _EDMADirection { DMA_WRITE = 0, DMA_READ = 1, DMA_INVALID = 2 }EDMADirection; typedef enum _EBlueVideoAuxInfoType { BLUE_VIDEO_AUX_MEMFMT_CHANGE = 1, BLUE_VIDEO_AUX_UPDATE_LTC = 2, BLUE_VIDEO_AUX_UPDATE_GPIO = 4, BLUE_VIDEO_AUX_VIDFMT_CHANGE = 8, }EBlueVideoAuxInfoType; typedef enum _MatrixColType { COL_BLUE_PB = 0, COL_RED_PR = 1, COL_GREEN_Y = 2, COL_KEY = 3 }MatrixColType; /* This enumerator can be used to set the image orientation of the frame. */ typedef enum _EImageOrientation { ImageOrientation_Normal = 0, /* in this configuration frame is top to bottom and left to right */ ImageOrientation_VerticalFlip = 1, /* in this configuration frame is bottom to top and left to right */ ImageOrientation_Invalid = 2, }EImageOrientation; /* This enumerator defines the reference signal source that can be used with bluefish cards */ typedef enum _EBlueGenlockSource { BlueGenlockBNC = 0x00000000, /** Genlock is used as reference signal source */ BlueSDIBNC = 0x00010000, /** SDI input B is used as reference signal source */ BlueSDI_B_BNC = BlueSDIBNC, BlueSDI_A_BNC = 0x00020000, /** SDI input A is used as reference signal source */ BlueAnalog_BNC = 0x00040000, /** deprecated, not supported */ BlueSoftware = 0x00080000, BlueFreeRunning = BlueSoftware, /** free running, but phase can be software controlled */ BlueGenlockAux = 0x00100000, /** auxiliary genlock connector on Epoch Neutron cards */ BlueInterlock = 0x00200000, /** interlock connector on Epoch Neutron and Kronos cards */ /* aliases and new definitions */ BlueRefSrc_FreeRunning = BlueFreeRunning, BlueRefSrc_RefIn = BlueGenlockBNC, /* genlock signal on ref in BNC connector (Epoch/Kronos) */ BlueRefSrc_RefInAux = BlueGenlockAux, /* genlock signal on ref in AUX BNC connector (Epoch Neutron only) */ BlueRefSrc_InterlockIn = BlueInterlock, /* interlock signal on internal MMCX connector (Epoch/Kronos) */ BlueRefSrc_SdiInput1 = BlueSDI_A_BNC, BlueRefSrc_SdiInput2 = BlueSDI_B_BNC, BlueRefSrc_SdiInput3 = 0x01000000, BlueRefSrc_SdiInput4 = 0x01100000, BlueRefSrc_SdiInput5 = 0x01200000, BlueRefSrc_SdiInput6 = 0x01300000, BlueRefSrc_SdiInput7 = 0x01400000, BlueRefSrc_SdiInput8 = 0x01500000, }EBlueGenlockSource; typedef enum _EBlueVideoChannel { BLUE_VIDEOCHANNEL_A = 0, BLUE_VIDEOCHANNEL_B = 1, BLUE_VIDEOCHANNEL_C = 2, BLUE_VIDEOCHANNEL_D = 3, BLUE_VIDEOCHANNEL_E = 4, BLUE_VIDEOCHANNEL_F = 5, BLUE_VIDEOCHANNEL_G = 6, BLUE_VIDEOCHANNEL_H = 7, BLUE_VIDEOCHANNEL_I = 8, BLUE_VIDEOCHANNEL_J = 9, BLUE_VIDEOCHANNEL_K = 10, BLUE_VIDEOCHANNEL_L = 11, BLUE_VIDEOCHANNEL_M = 12, BLUE_VIDEOCHANNEL_N = 13, BLUE_VIDEOCHANNEL_O = 14, BLUE_VIDEOCHANNEL_P = 15, BLUE_VIDEO_OUTPUT_CHANNEL_A = BLUE_VIDEOCHANNEL_A, BLUE_VIDEO_OUTPUT_CHANNEL_B = BLUE_VIDEOCHANNEL_B, BLUE_VIDEO_INPUT_CHANNEL_A = BLUE_VIDEOCHANNEL_C, BLUE_VIDEO_INPUT_CHANNEL_B = BLUE_VIDEOCHANNEL_D, BLUE_VIDEO_INPUT_CHANNEL_C = BLUE_VIDEOCHANNEL_E, BLUE_VIDEO_INPUT_CHANNEL_D = BLUE_VIDEOCHANNEL_F, BLUE_VIDEO_OUTPUT_CHANNEL_C = BLUE_VIDEOCHANNEL_G, BLUE_VIDEO_OUTPUT_CHANNEL_D = BLUE_VIDEOCHANNEL_H, BLUE_VIDEO_OUTPUT_CHANNEL_E = BLUE_VIDEOCHANNEL_I, BLUE_VIDEO_OUTPUT_CHANNEL_F = BLUE_VIDEOCHANNEL_J, BLUE_VIDEO_OUTPUT_CHANNEL_G = BLUE_VIDEOCHANNEL_K, BLUE_VIDEO_OUTPUT_CHANNEL_H = BLUE_VIDEOCHANNEL_L, BLUE_VIDEO_INPUT_CHANNEL_E = BLUE_VIDEOCHANNEL_M, BLUE_VIDEO_INPUT_CHANNEL_F = BLUE_VIDEOCHANNEL_N, BLUE_VIDEO_INPUT_CHANNEL_G = BLUE_VIDEOCHANNEL_O, BLUE_VIDEO_INPUT_CHANNEL_H = BLUE_VIDEOCHANNEL_P, BLUE_VIDEO_INPUT_CHANNEL_1 = BLUE_VIDEO_INPUT_CHANNEL_A, BLUE_VIDEO_INPUT_CHANNEL_2 = BLUE_VIDEO_INPUT_CHANNEL_B, BLUE_VIDEO_INPUT_CHANNEL_3 = BLUE_VIDEO_INPUT_CHANNEL_C, BLUE_VIDEO_INPUT_CHANNEL_4 = BLUE_VIDEO_INPUT_CHANNEL_D, BLUE_VIDEO_INPUT_CHANNEL_5 = BLUE_VIDEO_INPUT_CHANNEL_E, BLUE_VIDEO_INPUT_CHANNEL_6 = BLUE_VIDEO_INPUT_CHANNEL_F, BLUE_VIDEO_INPUT_CHANNEL_7 = BLUE_VIDEO_INPUT_CHANNEL_G, BLUE_VIDEO_INPUT_CHANNEL_8 = BLUE_VIDEO_INPUT_CHANNEL_H, BLUE_VIDEO_OUTPUT_CHANNEL_1 = BLUE_VIDEO_OUTPUT_CHANNEL_A, BLUE_VIDEO_OUTPUT_CHANNEL_2 = BLUE_VIDEO_OUTPUT_CHANNEL_B, BLUE_VIDEO_OUTPUT_CHANNEL_3 = BLUE_VIDEO_OUTPUT_CHANNEL_C, BLUE_VIDEO_OUTPUT_CHANNEL_4 = BLUE_VIDEO_OUTPUT_CHANNEL_D, BLUE_VIDEO_OUTPUT_CHANNEL_5 = BLUE_VIDEO_OUTPUT_CHANNEL_E, BLUE_VIDEO_OUTPUT_CHANNEL_6 = BLUE_VIDEO_OUTPUT_CHANNEL_F, BLUE_VIDEO_OUTPUT_CHANNEL_7 = BLUE_VIDEO_OUTPUT_CHANNEL_G, BLUE_VIDEO_OUTPUT_CHANNEL_8 = BLUE_VIDEO_OUTPUT_CHANNEL_H, BLUE_VIDEOCHANNEL_INVALID = 30 }EBlueVideoChannel; typedef enum _EEpochRoutingElements { EPOCH_SRC_DEST_SCALER_0 = 0x1, EPOCH_SRC_DEST_SCALER_1 = 0x2, EPOCH_SRC_DEST_SCALER_2 = 0x3, EPOCH_SRC_DEST_SCALER_3 = 0x4, EPOCH_SRC_SDI_INPUT_A = 0x5, EPOCH_SRC_SDI_INPUT_B = 0x6, EPOCH_SRC_SDI_INPUT_C = 0x7, EPOCH_SRC_SDI_INPUT_D = 0x8, EPOCH_DEST_SDI_OUTPUT_A = 0x9, EPOCH_DEST_SDI_OUTPUT_B = 0xA, EPOCH_DEST_SDI_OUTPUT_C = 0xB, EPOCH_DEST_SDI_OUTPUT_D = 0xC, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHA = 0xD, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHB = 0xE, EPOCH_DEST_INPUT_MEM_INTERFACE_CHA = 0xF, EPOCH_DEST_INPUT_MEM_INTERFACE_CHB = 0x10, EPOCH_DEST_AES_ANALOG_AUDIO_OUTPUT = 0x11, EPOCH_SRC_AV_SIGNAL_GEN = 0x12, EPOCH_SRC_DEST_VPIO_SCALER_0 = 0x13, EPOCH_SRC_DEST_VPIO_SCALER_1 = 0x14, EPOCH_DEST_VARIVUE_HDMI = 0x15, EPOCH_DEST_INPUT_MEM_INTERFACE_CHC = 0x16, EPOCH_DEST_INPUT_MEM_INTERFACE_CHD = 0x17, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHC = 0x18, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHD = 0x19, EPOCH_SRC_SDI_INPUT_A_3GB_LINK_B = 0x1A, EPOCH_SRC_SDI_INPUT_B_3GB_LINK_B = 0x1B, EPOCH_SRC_SDI_INPUT_C_3GB_LINK_B = 0x1C, EPOCH_SRC_SDI_INPUT_D_3GB_LINK_B = 0x1D, EPOCH_DEST_SDI_OUTPUT_A_3GB_LINK_B = 0x1E, EPOCH_DEST_SDI_OUTPUT_B_3GB_LINK_B = 0x1F, EPOCH_DEST_SDI_OUTPUT_C_3GB_LINK_B = 0x20, EPOCH_DEST_SDI_OUTPUT_D_3GB_LINK_B = 0x21, EPOCH_DEST_HDMI_OUTPUT = 0x22, EPOCH_DEST_HDMI_OUTPUT_LINK_A = EPOCH_DEST_HDMI_OUTPUT, EPOCH_DEST_HDMI_OUTPUT_LINK_B = 0x23, EPOCH_SRC_SDI_INPUT_E = 0x24, EPOCH_SRC_SDI_INPUT_F = 0x25, EPOCH_SRC_SDI_INPUT_G = 0x26, EPOCH_SRC_SDI_INPUT_H = 0x27, EPOCH_SRC_SDI_INPUT_E_3GB_LINK_B = 0x28, EPOCH_SRC_SDI_INPUT_F_3GB_LINK_B = 0x29, EPOCH_SRC_SDI_INPUT_G_3GB_LINK_B = 0x2A, EPOCH_SRC_SDI_INPUT_H_3GB_LINK_B = 0x2B, EPOCH_DEST_SDI_OUTPUT_E = 0x2C, EPOCH_DEST_SDI_OUTPUT_F = 0x2D, EPOCH_DEST_SDI_OUTPUT_G = 0x2E, EPOCH_DEST_SDI_OUTPUT_H = 0x2F, EPOCH_DEST_SDI_OUTPUT_E_3GB_LINK_B = 0x30, EPOCH_DEST_SDI_OUTPUT_F_3GB_LINK_B = 0x31, EPOCH_DEST_SDI_OUTPUT_G_3GB_LINK_B = 0x32, EPOCH_DEST_SDI_OUTPUT_H_3GB_LINK_B = 0x33, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHE = 0x34, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHF = 0x35, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHG = 0x36, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHH = 0x37, EPOCH_DEST_INPUT_MEM_INTERFACE_CHE = 0x38, EPOCH_DEST_INPUT_MEM_INTERFACE_CHF = 0x39, EPOCH_DEST_INPUT_MEM_INTERFACE_CHG = 0x3C, EPOCH_DEST_INPUT_MEM_INTERFACE_CHH = 0x3D, /* aliases */ EPOCH_SRC_SDI_INPUT_1 = EPOCH_SRC_SDI_INPUT_A, EPOCH_SRC_SDI_INPUT_2 = EPOCH_SRC_SDI_INPUT_B, EPOCH_SRC_SDI_INPUT_3 = EPOCH_SRC_SDI_INPUT_C, EPOCH_SRC_SDI_INPUT_4 = EPOCH_SRC_SDI_INPUT_D, EPOCH_SRC_SDI_INPUT_5 = EPOCH_SRC_SDI_INPUT_E, EPOCH_SRC_SDI_INPUT_6 = EPOCH_SRC_SDI_INPUT_F, EPOCH_SRC_SDI_INPUT_7 = EPOCH_SRC_SDI_INPUT_G, EPOCH_SRC_SDI_INPUT_8 = EPOCH_SRC_SDI_INPUT_H, EPOCH_SRC_SDI_INPUT_A_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_A, EPOCH_SRC_SDI_INPUT_B_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_B, EPOCH_SRC_SDI_INPUT_C_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_C, EPOCH_SRC_SDI_INPUT_D_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_D, EPOCH_SRC_SDI_INPUT_E_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_E, EPOCH_SRC_SDI_INPUT_F_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_F, EPOCH_SRC_SDI_INPUT_G_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_G, EPOCH_SRC_SDI_INPUT_H_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_H, EPOCH_SRC_SDI_INPUT_1_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_A, EPOCH_SRC_SDI_INPUT_2_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_B, EPOCH_SRC_SDI_INPUT_3_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_C, EPOCH_SRC_SDI_INPUT_4_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_D, EPOCH_SRC_SDI_INPUT_5_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_E, EPOCH_SRC_SDI_INPUT_6_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_F, EPOCH_SRC_SDI_INPUT_7_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_G, EPOCH_SRC_SDI_INPUT_8_3GB_LINK_A = EPOCH_SRC_SDI_INPUT_H, EPOCH_SRC_SDI_INPUT_1_3GB_LINK_B = EPOCH_SRC_SDI_INPUT_A_3GB_LINK_B, EPOCH_SRC_SDI_INPUT_2_3GB_LINK_B = EPOCH_SRC_SDI_INPUT_B_3GB_LINK_B, EPOCH_SRC_SDI_INPUT_3_3GB_LINK_B = EPOCH_SRC_SDI_INPUT_C_3GB_LINK_B, EPOCH_SRC_SDI_INPUT_4_3GB_LINK_B = EPOCH_SRC_SDI_INPUT_D_3GB_LINK_B, EPOCH_SRC_SDI_INPUT_5_3GB_LINK_B = EPOCH_SRC_SDI_INPUT_E_3GB_LINK_B, EPOCH_SRC_SDI_INPUT_6_3GB_LINK_B = EPOCH_SRC_SDI_INPUT_F_3GB_LINK_B, EPOCH_SRC_SDI_INPUT_7_3GB_LINK_B = EPOCH_SRC_SDI_INPUT_G_3GB_LINK_B, EPOCH_SRC_SDI_INPUT_8_3GB_LINK_B = EPOCH_SRC_SDI_INPUT_H_3GB_LINK_B, EPOCH_DEST_SDI_OUTPUT_1 = EPOCH_DEST_SDI_OUTPUT_A, EPOCH_DEST_SDI_OUTPUT_2 = EPOCH_DEST_SDI_OUTPUT_B, EPOCH_DEST_SDI_OUTPUT_3 = EPOCH_DEST_SDI_OUTPUT_C, EPOCH_DEST_SDI_OUTPUT_4 = EPOCH_DEST_SDI_OUTPUT_D, EPOCH_DEST_SDI_OUTPUT_5 = EPOCH_DEST_SDI_OUTPUT_E, EPOCH_DEST_SDI_OUTPUT_6 = EPOCH_DEST_SDI_OUTPUT_F, EPOCH_DEST_SDI_OUTPUT_7 = EPOCH_DEST_SDI_OUTPUT_G, EPOCH_DEST_SDI_OUTPUT_8 = EPOCH_DEST_SDI_OUTPUT_H, EPOCH_DEST_SDI_OUTPUT_A_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_A, EPOCH_DEST_SDI_OUTPUT_B_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_B, EPOCH_DEST_SDI_OUTPUT_C_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_C, EPOCH_DEST_SDI_OUTPUT_D_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_D, EPOCH_DEST_SDI_OUTPUT_E_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_E, EPOCH_DEST_SDI_OUTPUT_F_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_F, EPOCH_DEST_SDI_OUTPUT_G_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_G, EPOCH_DEST_SDI_OUTPUT_H_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_H, EPOCH_DEST_SDI_OUTPUT_1_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_A, EPOCH_DEST_SDI_OUTPUT_2_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_B, EPOCH_DEST_SDI_OUTPUT_3_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_C, EPOCH_DEST_SDI_OUTPUT_4_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_D, EPOCH_DEST_SDI_OUTPUT_5_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_E, EPOCH_DEST_SDI_OUTPUT_6_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_F, EPOCH_DEST_SDI_OUTPUT_7_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_G, EPOCH_DEST_SDI_OUTPUT_8_3GB_LINK_A = EPOCH_DEST_SDI_OUTPUT_H, EPOCH_DEST_SDI_OUTPUT_1_3GB_LINK_B = EPOCH_DEST_SDI_OUTPUT_A_3GB_LINK_B, EPOCH_DEST_SDI_OUTPUT_2_3GB_LINK_B = EPOCH_DEST_SDI_OUTPUT_B_3GB_LINK_B, EPOCH_DEST_SDI_OUTPUT_3_3GB_LINK_B = EPOCH_DEST_SDI_OUTPUT_C_3GB_LINK_B, EPOCH_DEST_SDI_OUTPUT_4_3GB_LINK_B = EPOCH_DEST_SDI_OUTPUT_D_3GB_LINK_B, EPOCH_DEST_SDI_OUTPUT_5_3GB_LINK_B = EPOCH_DEST_SDI_OUTPUT_E_3GB_LINK_B, EPOCH_DEST_SDI_OUTPUT_6_3GB_LINK_B = EPOCH_DEST_SDI_OUTPUT_F_3GB_LINK_B, EPOCH_DEST_SDI_OUTPUT_7_3GB_LINK_B = EPOCH_DEST_SDI_OUTPUT_G_3GB_LINK_B, EPOCH_DEST_SDI_OUTPUT_8_3GB_LINK_B = EPOCH_DEST_SDI_OUTPUT_H_3GB_LINK_B, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH1 = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHA, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH2 = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHB, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH3 = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHC, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH4 = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHD, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH5 = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHE, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH6 = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHF, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH7 = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHG, EPOCH_SRC_OUTPUT_MEM_INTERFACE_CH8 = EPOCH_SRC_OUTPUT_MEM_INTERFACE_CHH, EPOCH_DEST_INPUT_MEM_INTERFACE_CH1 = EPOCH_DEST_INPUT_MEM_INTERFACE_CHA, EPOCH_DEST_INPUT_MEM_INTERFACE_CH2 = EPOCH_DEST_INPUT_MEM_INTERFACE_CHB, EPOCH_DEST_INPUT_MEM_INTERFACE_CH3 = EPOCH_DEST_INPUT_MEM_INTERFACE_CHC, EPOCH_DEST_INPUT_MEM_INTERFACE_CH4 = EPOCH_DEST_INPUT_MEM_INTERFACE_CHD, EPOCH_DEST_INPUT_MEM_INTERFACE_CH5 = EPOCH_DEST_INPUT_MEM_INTERFACE_CHE, EPOCH_DEST_INPUT_MEM_INTERFACE_CH6 = EPOCH_DEST_INPUT_MEM_INTERFACE_CHF, EPOCH_DEST_INPUT_MEM_INTERFACE_CH7 = EPOCH_DEST_INPUT_MEM_INTERFACE_CHG, EPOCH_DEST_INPUT_MEM_INTERFACE_CH8 = EPOCH_DEST_INPUT_MEM_INTERFACE_CHH, }EEpochRoutingElements; typedef enum _BlueAudioChannelDesc { MONO_FLAG = 0xC0000000, MONO_CHANNEL_1 = 0x00000001, MONO_CHANNEL_2 = 0x00000002, MONO_CHANNEL_3 = 0x00000004, MONO_CHANNEL_4 = 0x00000008, MONO_CHANNEL_5 = 0x00000010, MONO_CHANNEL_6 = 0x00000020, MONO_CHANNEL_7 = 0x00000040, MONO_CHANNEL_8 = 0x00000080, MONO_CHANNEL_9 = 0x00000100, /* to be used by analog audio output channels */ MONO_CHANNEL_10 = 0x00000200, /* to be used by analog audio output channels */ MONO_CHANNEL_11 = 0x00000400, /* actual channel 9 */ MONO_CHANNEL_12 = 0x00000800, /* actual channel 10 */ MONO_CHANNEL_13 = 0x00001000, /* actual channel 11 */ MONO_CHANNEL_14 = 0x00002000, /* actual channel 12 */ MONO_CHANNEL_15 = 0x00004000, /* actual channel 13 */ MONO_CHANNEL_16 = 0x00008000, /* actual channel 14 */ MONO_CHANNEL_17 = 0x00010000, /* actual channel 15 */ MONO_CHANNEL_18 = 0x00020000 /* actual channel 16 */ }BlueAudioChannelDesc; /* Use this enumerator to define the color space of the video signal on the SDI cable */ typedef enum _EConnectorSignalColorSpace { RGB_ON_CONNECTOR = 0x00400000, /* Use this enumerator if the colorspace of video data on the SDI cable is RGB When using dual link capture/playback, user can choose the color space of the data. In single link SDI the color space of the signal is always YUV */ YUV_ON_CONNECTOR = 0 /* Use this enumerator if color space of video data on the SDI cable is RGB. */ }EConnectorSignalColorSpace; /* Use this enumerator to define the data range of the RGB video frame data. */ typedef enum _ERGBDataRange { CGR_RANGE = 0, /* In this mode RGB data expected by the user (capture) or provided by the user(playback) is in the range of 0-255(8 bit) or 0-1023(10 bit0). driver uses this information to choose the appropriate YUV conversion matrices. */ SMPTE_RANGE = 1 /* In this mode RGB data expected by the user (capture) or provided by the user(playback) is in the range of 16-235(8 bit) or 64-940(10 bit0). driver uses this information to choose the appropriate YUV conversion matrices. */ }ERGBDataRange; typedef enum _EPreDefinedColorSpaceMatrix { UNITY_MATRIX = 0, MATRIX_709_CGR = 1, MATRIX_RGB_TO_YUV_709_CGR = MATRIX_709_CGR, MATRIX_709 = 2, MATRIX_RGB_TO_YUV_709 = MATRIX_709, RGB_FULL_RGB_SMPTE = 3, MATRIX_601_CGR = 4, MATRIX_RGB_TO_YUV_601_CGR = MATRIX_601_CGR, MATRIX_601 = 5, MATRIX_RGB_TO_YUV_601 = MATRIX_601, MATRIX_VUYA = 6, UNITY_MATRIX_INPUT = 7, MATRIX_YUV_TO_RGB_709_CGR = 8, MATRIX_YUV_TO_RGB_709 = 9, RGB_SMPTE_RGB_FULL = 10, MATRIX_YUV_TO_RGB_601_CGR = 11, MATRIX_YUV_TO_RGB_601 = 12, MATRIX_USER_DEFINED = 13, } EPreDefinedColorSpaceMatrix; /* Use this enumerator for controlling the dual link functionality. */ typedef enum _EDualLinkSignalFormatType { Signal_FormatType_4224 = 0, /* sets the card to work in 4:2:2:4 mode */ Signal_FormatType_4444 = 1, /* sets the card to work in 4:4:4 10 bit dual link mode */ Signal_FormatType_444_10BitSDI = Signal_FormatType_4444, Signal_FormatType_444_12BitSDI = 0x4, /* sets the card to work in 4:4:4 12 bit dual link mode */ Signal_FormatType_Independent_422 = 0x2, Signal_FormatType_Key_Key = 0x8000 /* not used currently on epoch cards */ }EDualLinkSignalFormatType; enum ECardOperatingMode { CardOperatingMode_SingleLink = 0x0, CardOperatingMode_Independent_422 = CardOperatingMode_SingleLink, CardOperatingMode_DualLink = 0x1, CardOperatingMode_StereoScopic_422 = 0x3, CardOperatingMode_Dependent_422 = CardOperatingMode_StereoScopic_422, /* not used currently on epoch cards */ CardOperatingMode_DualLink_Dual3G = 0x4, }; typedef enum _blue_output_hanc_ioctl_enum { blue_get_output_hanc_buffer = 0, blue_put_output_hanc_buffer = 1, blue_get_valid_silent_hanc_data_status = 3, blue_set_valid_silent_hanc_data_status = 4, blue_start_output_fifo = 5, blue_stop_output_fifo = 6, blue_init_output_fifo = 7, blue_get_queues_info = 8, blue_get_output_fifo_info = blue_get_queues_info, blue_get_output_fifo_status = 9, blue_start_output_fifo_no_auto_turn_off = 10 /* this is used when we don't really use the FIFO, but handle audio playback ourselves in DirectShow; need to make sure that our HANC output FIFO doesn't turn off audio as there are never any HANC frames to be played */ }blue_output_hanc_ioctl_enum; typedef enum _blue_input_hanc_ioctl_enum { blue_get_input_hanc_buffer = 0, blue_start_input_fifo = 3, blue_stop_input_fifo = 4, blue_init_input_fifo = 5, blue_playthru_input_fifo = 6, blue_release_input_hanc_buffer = 7, blue_map_input_hanc_buffer = 8, blue_unmap_input_hanc_buffer = 9, blue_get_info_input_hanc_fifo = 10, blue_get_input_rp188 = 11, blue_get_input_fifo_status = 12, }blue_input_hanc_ioctl_enum; #define HANC_PLAYBACK_INIT (0x00000001) #define HANC_PLAYBACK_START (0x00000002) #define HANC_PLAYBACK_STOP (0x00000004) #define HANC_CAPTURE_INIT (0x00000010) #define HANC_CAPTURE_START (0x00000020) #define HANC_CAPTURE_STOP (0x00000040) #define HANC_CAPTURE_PLAYTHRU (0x00000080) typedef enum _EDMACardBufferType { BLUE_CARDBUFFER_IMAGE = 0, BLUE_CARDBUFFER_IMAGE_VBI_HANC = 1, BLUE_CARDBUFFER_IMAGE_VBI = 2, BLUE_CARDBUFFER_AUDIO_OUT = 3, BLUE_CARDBUFFER_AUDIO_IN = 4, BLUE_CARDBUFFER_HANC = 5, BLUE_CARDBUFFER_IMAGE_HANC = 6, }EDMACardBufferType; enum enum_blue_dvb_asi_packing_format { enum_blue_dvb_asi_packed_format = 1, /* In this packing method the asi packets are stored as 188 or 204 bytes */ enum_blue_dvb_asi_packed_format_with_timestamp = 2, /* In this packing method the asi packets are stored as (8+188) or (8+204) bytes The timestamp is stored at the begininig of the packet, using 8 bytes */ enum_blue_dvb_asi_256byte_container_format = 3, enum_blue_dvb_asi_256byte_container_format_with_timestamp = 4 }; enum enum_blue_dvb_asi_packet_size { enum_blue_dvb_asi_packet_size_188_bytes = 1, enum_blue_dvb_asi_packet_size_204_bytes = 2 }; typedef enum _blue_blackgenerator_status { ENUM_BLACKGENERATOR_OFF = 0, /* producing normal video output */ ENUM_BLACKGENERATOR_ON = 1, /* producing black video output */ ENUM_BLACKGENERATOR_SDI_SYNC_OFF = 2 /* no valid SDI signal is coming out of our SDI output connector; only available in Epoch ASI firmware */ }blue_blackgenerator_status; typedef enum _EBlueExternalLtcSource { EXT_LTC_SRC_BREAKOUT_HEADER = 0, /* default; header on the PCB board/Shield (Epoch only) */ EXT_LTC_SRC_GENLOCK_BNC = 1, /* Genlock input BNC connector (Epoch and Kronos) */ EXT_LTC_SRC_INTERLOCK = 2, /* Interlock input MMCX connector (Kronos only) */ EXT_LTC_SRC_STEM_PORT = 3, /* STEM port (Kronos only) */ }EBlueExternalLtcSource; /**< use this enumerator for controlling emb audio output properties using the property EMBEDDED_AUDIO_OUTPUT. */ typedef enum _EBlueEmbAudioOutput { blue_emb_audio_enable = 0x01, /**< Switches off/on the whole HANC output from connecotrs associated with the channel */ blue_auto_aes_to_emb_audio_encoder = 0x02, /**< control whether the auto aes to emb thread should be running or not */ blue_emb_audio_group1_enable = 0x04, /**< enables group1(ch 0 - 3) emb audio */ blue_emb_audio_group2_enable = 0x08, /**< enables group2(ch 4 - 7) emb audio */ blue_emb_audio_group3_enable = 0x10, /**< enables group3(ch 8 - 11) emb audio */ blue_emb_audio_group4_enable = 0x20, /**< enables group4(ch 12 - 16) emb audio */ blue_enable_hanc_timestamp_pkt = 0x40 }EBlueEmbAudioOutput; enum SerialPort_struct_flags { SerialPort_Read = 1, SerialPort_Write = 2, SerialPort_TX_Queue_Status = 4, SerialPort_RX_Queue_Status = 8, SerialPort_RX_FlushBuffer = 16, SerialPort_RX_IntWait_Return_On_Data = 32, }; /* Use these macros for controlling epoch application watch dog settings. The card property EPOCH_APP_WATCHDOG_TIMER can be used to control the watchdog timer functionality. */ enum enum_blue_app_watchdog_timer_prop { enum_blue_app_watchdog_timer_start_stop = (1 << 31), /* can be used to enable/disable timer */ enum_blue_app_watchdog_timer_keepalive = (1 << 30), /* can be used to reset the timer value */ enum_blue_app_watchdog_timer_get_present_time = (1 << 29), /* can query to get the value of the timer */ enum_blue_app_watchdog_get_timer_activated_status = (1 << 28), /* can query to get whether the timer has been activated */ enum_blue_app_watchdog_get_timer_start_stop_status = (1 << 27), /* can query whether the timer has been set. */ enum_blue_app_watchdog_enable_gpo_on_active = (1 << 26), /* using this enumerator you can tell the system that when */ /* application watchdog timer has expired whether a GPO output should be triggered or not. */ /* you can use also use this enumerator to select */ /* which GPO output should be triggered with this. to use GPO port A pass a value of */ /* GPIO_TX_PORT_A when this enumerator is used. */ enum_blue_hardware_watchdog_enable_gpo = (1 << 25) /* can be used to enable/disable GPO trigger when hardware watchdog timer has been triggered */ }; enum enum_blue_rs422_port_flags { enum_blue_rs422_port_set_as_slave = (1 << 0) /* If this flag is set the RS422 port would be set to slave mode. by default port is setup to work in master mode , where it would be acting as master in the transactions. */ }; typedef enum { AUDIO_CHANNEL_LOOPING_OFF = 0x00000000, /**< deprecated not used any more */ AUDIO_CHANNEL_LOOPING = 0x00000001, /**< deprecated not used any more */ AUDIO_CHANNEL_LITTLEENDIAN = 0x00000000, /**< if the audio data is little endian this flag must be set*/ AUDIO_CHANNEL_BIGENDIAN = 0x00000002, /**< if the audio data is big endian this flag must be set*/ AUDIO_CHANNEL_OFFSET_IN_BYTES = 0x00000004, /**< deprecated not used any more */ AUDIO_CHANNEL_16BIT = 0x00000008, /**< if the audio channel bit depth is 16 bits this flag must be set*/ AUDIO_CHANNEL_BLIP_PENDING = 0x00000010, /**< deprecated not used any more */ AUDIO_CHANNEL_BLIP_COMPLETE = 0x00000020, /**< deprecated not used any more */ AUDIO_CHANNEL_SELECT_CHANNEL = 0x00000040, /**< deprecated not used any more */ AUDIO_CHANNEL_24BIT = 0x00000080 /**< if the audio channel bit depth is 24 bits this flag must be set*/ } EAudioFlags; /*/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////// S T R U C T S ////////////////////////////////////////////////////////////////////////////////////////////////////////////*/ #pragma pack(push, video_frame, 1) struct blue_videoframe_info { BLUE_U64 ltcTimeCode; unsigned long videochannel; unsigned long BufferId; unsigned long Count; unsigned long DroppedFrameCount; }; struct blue_videoframe_info_ex { BLUE_U64 ltcTimeCode; /* LTC timecode, not used */ unsigned long videochannel; /* the channel this frame was captured from */ long BufferId; /* this buffer contains the captured frame */ unsigned long Count; /* total captured frames */ unsigned long DroppedFrameCount; /* dropped frame count */ unsigned long nFrameTimeStamp; /* field count the frame was captured at */ unsigned long nVideoSignalType; /* video mode of this frame _EVideoMode */ unsigned int nASIPktCount; /* only for DVB-ASI; how many ASI packets are in this frame */ unsigned int nASIPktSize; /* only for DVB-ASI; how many bytes per packet */ unsigned int nAudioValidityBits; /* part of the channels status block for audio */ BLUE_U64 btcTimeStamp; /* Coordinated Bluefish Time timestamp */ unsigned char ucVideoModeLinkA; /* only used in 1.5G dual link mode */ unsigned char ucVideoModeLinkB; /* only used in 1.5G dual link mode */ unsigned long VideoModeExt; /* video mode of this frame _EVideoModeExt */ unsigned char pad[6]; /* not used */ }; typedef struct _AUXILLARY_VIDEO_INFO { BLUE_U32 video_channel_id; BLUE_U32 lVideoMode; BLUE_U32 lUniqueId; BLUE_U32 lInfoType; BLUE_U32 lMemFmt; BLUE_U32 lGpio; BLUE_U64 lLTC; }Auxillary_Video_Info; typedef struct { BLUE_S32 inputConnector; /* ANALOG_VIDEO_INPUT_CONNECTOR, EAnalogInputConnectorType */ BLUE_S32 inputPed; /* ANALOG_VIDEO_INPUT_PED, */ BLUE_S32 inputBrightness; /* ANALOG_VIDEO_INPUT_BRIGHTNESS, */ BLUE_S32 inputHue; /* ANALOG_VIDEO_INPUT_HUE, */ BLUE_S32 inputLumaGain; /* ANALOG_VIDEO_INPUT_LUMA_GAIN, */ BLUE_S32 inputChromaGain; /* ANALOG_VIDEO_INPUT_CHROMA_GAIN, */ BLUE_S32 inputAutoGain; /* ANALOG_VIDEO_INPUT_AUTO_GAIN, */ BLUE_S32 outputPed; /* ANALOG_VIDEO_OUTPUT_PED, */ BLUE_S32 outputBrightness; /* ANALOG_VIDEO_OUTPUT_BRIGHTNESS, */ BLUE_S32 outputHue; /* ANALOG_VIDEO_OUTPUT_HUE, */ BLUE_S32 outputYGain; /* ANALOG_VIDEO_OUTPUT_Y_GAIN, */ BLUE_S32 outputUGain; /* ANALOG_VIDEO_OUTPUT_U_GAIN, */ BLUE_S32 outputVGain; /* ANALOG_VIDEO_OUTPUT_V_GAIN, */ BLUE_S32 outputSharpness; /* ANALOG_VIDEO_OUTPUT_SHARPNESS, */ BLUE_S32 outputAutoGain; /* ANALOG_VIDEO_OUTPUT_AUTO_GAIN, */ BLUE_S32 outputSignalTypes; /* EAnalogConnectorSignalType */ }AnalogCardState; struct SerialPort_struct { unsigned char Buffer[64]; unsigned int nBufLength; unsigned int nSerialPortId; unsigned int bFlag; /* SerialPort_struct_flags */ unsigned short sTimeOut; }; struct blue_color_matrix_struct { BLUE_U32 VideoChannel; BLUE_U32 MatrixColumn; /* MatrixColType enumerator defines this */ double Coeff_B; double Coeff_R; double Coeff_G; double Coeff_K; double const_value; }; typedef struct _blue_video_sync_struct { BLUE_U32 sync_wait_type; /* field or frame (UPD_FMT_FIELD or UPD_FMT_FRAME) */ BLUE_U32 video_channel; /* which video channel interrupt should the interrupt wait for, e.g. BLUE_VIDEO_INPUT_CHANNEL_A, BLUE_VIDEO_OUTPUT_CHANNEL_A, etc. */ BLUE_U32 timeout_video_msc; /* field count when to return or IGNORE_SYNC_WAIT_TIMEOUT_VALUE to return at next field/frame sync */ BLUE_U32 video_msc; /* current video msc (field count) */ BLUE_U32 current_display_frame_id; /* current buffer id which is being displayed */ BLUE_U32 current_display_frame_uniqueid; /* unique id associated with current buffer id which is being displayedl; this is only valid when using fifo modes. */ BLUE_U16 subfield_interrupt; /* subfield interrupt number; 0 == main frame sync */ BLUE_U16 subfield_lines; /* number of lines of video captured at this subfield interrupt */ BLUE_U64 btcTimeStamp; /* Coordinated Bluefish Time timestamp of field/frame which is currently being displayed */ BLUE_U8 pad[12]; }blue_video_sync_struct; typedef struct sync_options { BLUE_U32 SyncWaitType; /* required: field or frame (UPD_FMT_FIELD or UPD_FMT_FRAME) */ BLUE_U32 VideoChannel; /* required: */ BLUE_U32 FieldCountTimeout; /* field count when to return or IGNORE_SYNC_WAIT_TIMEOUT_VALUE to return at next field/frame sync ( not yet supported on linux) */ BLUE_U32 FieldCount; /* Current FieldCount */ BLUE_U64 BtcTimeStamp; /* BTC at time of interrupt */ BLUE_U32 SubFieldInterrupt; /* returned value, input only. subfield interrupt number; 0 == main frame sync */ BLUE_U32 SubFieldLines; /* returned value, input only. number of lines of video captured at this subfield interrupt */ BLUE_U32 CurrentDisplayFrameID; /* returned value, output only. current buffer id which is being displayed on the output */ BLUE_U32 CurrentDisplayFrameUniqueID; /* returned value, output only. unique id associated with current buffer id which is being displayed on the output */ BLUE_U64 pad[5]; }sync_options; typedef struct blue_external_ltc_input_sync_struct { BLUE_U64 TimeCodeValue; BLUE_U32 TimeCodeIsValid; BLUE_U8 pad[20]; }blue_external_ltc_input_sync_struct; struct blue_dma_request_struct { unsigned char* pBuffer; BLUE_U32 video_channel; BLUE_U32 BufferId; BLUE_U32 BufferDataType; BLUE_U32 FrameType; BLUE_U32 BufferSize; BLUE_U32 Offset; unsigned long BytesTransferred; unsigned char pad[64]; }; struct blue_1d_lookup_table_struct { BLUE_U32 nVideoChannel; BLUE_U32 nLUTId; BLUE_U16* pLUTData; BLUE_U32 nLUTElementCount; BLUE_U8 pad[256]; }; struct blue_multi_link_info_struct { BLUE_U32 Link1_Device; BLUE_U32 Link1_MemChannel; BLUE_U32 Link2_Device; BLUE_U32 Link2_MemChannel; BLUE_U32 Link3_Device; BLUE_U32 Link3_MemChannel; BLUE_U32 Link4_Device; BLUE_U32 Link4_MemChannel; BLUE_U32 Link5_Device; BLUE_U32 Link5_MemChannel; BLUE_U32 Link6_Device; BLUE_U32 Link6_MemChannel; BLUE_U32 Link7_Device; BLUE_U32 Link7_MemChannel; BLUE_U32 Link8_Device; BLUE_U32 Link8_MemChannel; BLUE_U32 Link9_Device; BLUE_U32 Link9_MemChannel; BLUE_U32 Link10_Device; BLUE_U32 Link10_MemChannel; BLUE_U32 Link11_Device; BLUE_U32 Link11_MemChannel; BLUE_U32 Link12_Device; BLUE_U32 Link12_MemChannel; BLUE_U32 Link13_Device; BLUE_U32 Link13_MemChannel; BLUE_U32 Link14_Device; BLUE_U32 Link14_MemChannel; BLUE_U32 Link15_Device; BLUE_U32 Link15_MemChannel; BLUE_U32 Link16_Device; BLUE_U32 Link16_MemChannel; BLUE_U32 InputControl; BLUE_U32 Padding[15]; }; struct hanc_stream_info_struct { BLUE_S32 AudioDBNArray[4]; /**< Contains the DBN values that should be used for each of the embedded audio groups*/ BLUE_S32 AudioChannelStatusBlock[4]; /**< channel status block information for each of the embedded audio group*/ BLUE_U32 flag_valid_time_code; /**< deprecated/unused flag; set to 0*/ BLUE_U64 time_code; /**< RP188 time code that was extracted from the HANC buffer or RP188 timecode which should be inserted into the HANC buffer*/ BLUE_U32* hanc_data_ptr; /**< Hanc Buffer which should be used as the source or destination for either extraction or insertion */ BLUE_U32 video_mode; /**< video mode which this hanc buffer which be used with. We need this information for do the required audio distribution especially NTSC */ BLUE_U64 ltc_time_code; BLUE_U64 sd_vitc_time_code; BLUE_U64 rp188_ltc_time_code; BLUE_U32 pad[126]; }; struct hanc_decode_struct { BLUE_VOID* audio_pcm_data_ptr; /* Buffer which would be used to store the extracted PCM audio data. Must be filled in by app before calling function. */ BLUE_U32 audio_ch_required_mask; /* Defines which audio channels should be extracted; Use enumerator BlueAudioChannelDesc to set up this mask. Must be filled in by app before calling function. */ BLUE_U32 type_of_sample_required; /* Defines sample characteristics: AUDIO_CHANNEL_16BIT: for 16 bit pcm data AUDIO_CHANNEL_24BIT: for 24 bit pcm data If neither AUDIO_CHANNEL_16BIT nor AUDIO_CHANNEL_24BIT are set 32 bit pcm data will be extracted Must be filled in by app before calling function. */ BLUE_U32 no_audio_samples; /* number of audio samples that have decoded the hanc buffer */ BLUE_U64 timecodes[7]; /* Only the first four elements are currently defined: hanc_decode_struct::timcodes[0] ---> RP188 VITC timecode hanc_decode_struct::timcodes[1] ---> RP188 LTC timecode hanc_decode_struct::timcodes[2] ---> SD VITC timecode hanc_decode_struct::timcodes[3] ---> External LTC timecode */ BLUE_VOID* raw_custom_anc_pkt_data_ptr; /* This buffer would contain the raw ANC packets that was found in the orac hanc buffer. this would contain any ANC packets that is not of type embedded audio and RP188 TC. Must be filled in by app before calling function. can be NULL */ BLUE_U32 sizeof_custom_anc_pkt_data_ptr; /* size of the ANC buffer array; Must be filled in by app before calling function. can be NULL */ BLUE_U32 avail_custom_anc_pkt_data_bytes; /* how many custom ANC packets has been decoded into raw_hanc_pkt_data_ptr; Must be filled in by app before calling function. can be NULL */ BLUE_U32 audio_input_source; /* Used to select the audio input source, whether it is AES or Embedded. Must be filled in by app before calling function. */ BLUE_U32 audio_temp_buffer[16]; /* deprecated/not used; Must be initialised to zero by app before first instantiating the function. */ BLUE_U32 audio_split_buffer_mask; /* deprecated/not used; Must be initialised to zero by app before first instantiating the function. */ BLUE_U32 max_expected_audio_sample_count; /* specify the maximum number of audio samples that the provided audio pcm buffer can contain. Must be filled in by app before calling function. */ BLUE_U32 pad[124]; }; typedef struct hardware_firmware_versions { BLUE_U32 FirmwareMajor; BLUE_U32 FirmwareMinor; BLUE_U32 FirmwareBuild; BLUE_U32 FirmwareLoadedFrom; BLUE_U32 CpldConfig; BLUE_U32 CpldStemPort; BLUE_U32 CpldIOExpander; BLUE_U32 HardwareBaseBoardFamily; BLUE_U32 HardwareBaseBoardModel; BLUE_U32 HardwareBaseBoardRevision; BLUE_U32 HardwareBaseBoardBuildMajor; BLUE_U32 HardwareBaseBoardBuildMinor; BLUE_U32 HardwareRegulatorBoardFamily; BLUE_U32 HardwareRegulatorBoardModel; BLUE_U32 HardwareRegulatorBoardRevision; BLUE_U32 HardwareRegulatorBoardBuildMajor; BLUE_U32 HardwareRegulatorBoardBuildMinor; BLUE_U32 Padding[12]; }hardware_firmware_versions; #pragma pack(pop, video_frame) /*/////////////////////////////////////////////////////////////////////////// // L E G A C Y D E C L A R A T I O N S - END ///////////////////////////////////////////////////////////////////////////*/ #endif /* HG_BLUE_DRIVER_P_LEGACY_HG */ ================================================ FILE: src/modules/bluefish/interop/BlueTypes.h ================================================ //////////////////////////////////////////////////////////////////////////// // File: BlueTypes.h // // Description: Declaration for Bluefish types // // (C) Copyright 2017 by Bluefish Technologies Pty Ltd. All Rights Reserved. //////////////////////////////////////////////////////////////////////////// #ifndef HG_BLUE_TYPES_HG #define HG_BLUE_TYPES_HG #define BLUE_TRUE 1 #define BLUE_FALSE 0 typedef int BERR; typedef int BErr; typedef long long BGERROR; typedef void BLUE_VOID; typedef int BLUE_BOOL; typedef unsigned char BLUE_U8; typedef unsigned short BLUE_U16; typedef unsigned int BLUE_U32; typedef unsigned long long BLUE_U64; typedef char BLUE_S8; typedef short BLUE_S16; typedef int BLUE_S32; typedef long long BLUE_S64; typedef BLUE_U8 BLUE_UINT8; typedef BLUE_U16 BLUE_UINT16; typedef BLUE_U32 BLUE_UINT32; typedef BLUE_U64 BLUE_UINT64; typedef BLUE_S8 BLUE_INT8; typedef BLUE_S16 BLUE_INT16; typedef BLUE_S32 BLUE_INT32; typedef BLUE_S64 BLUE_INT64; // TODO: maybe make a macro to do all this ifndef def etuff. #ifdef __linux__ #ifndef ULONG #define ULONG BLUE_U32 #endif #ifndef BOOLEAN #define BOOLEAN bool #endif #ifndef BOOL #define BOOL bool #endif #ifndef LPVOID #define LPVOID void* #endif #ifndef LPDWORD #define LPDWORD BLUE_U32* #endif #ifndef LPOVERLAPPED #define LPOVERLAPPED void* #endif typedef void* PVOID; typedef void* PVOID64; typedef BLUE_U64 LARGE_INTEGER; typedef BLUE_U32 DWORD; #endif #endif //#define HG_BLUE_TYPES_HG ================================================ FILE: src/modules/bluefish/interop/BlueVelvetCFuncPtr.h ================================================ /* Copyright (c) Bluefish444. All rights reserved. NOTE: Please add #define IMPLEMENTATION_BLUEVELVETC_FUNC_PTR before including this file so that all the SDK functions are defined in your project. If including this header files from multiple c/c++ files make sure that only one c/c++ file which includes this header file defines IMPLEMENTATION_BLUEVELVETC_FUNC_PTR #define IMPLEMENTATION_BLUEVELVETC_CONVERSION_FUNC_PTR before including this file so that all conversion functions are defined in your project. If including this header files from multiple c/c++ files make sure that only one c/c++ file which includes this header file defines IMPLEMENTATION_BLUEVELVETC_CONVERSION_FUNC_PTR */ #ifndef HG_BLUEVELVETC_FUNC_PTR #define HG_BLUEVELVETC_FUNC_PTR #if defined (_WIN32) #define GET_PROCADDR_FOR_FUNC(module, available, name) { name = (pFunc_##name)GetProcAddress(module, #name); if(!name) { *available = false; OutputDebugStringA(#name); break; } } #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #include #include "BlueDriver_p.h" #elif defined (__linux__) #define GET_PROCADDR_FOR_FUNC(module, available, name) { name = (pFunc_##name)dlsym(module, #name); if(!name) { *available = false; break; } } #include #define __cdecl #include "../../hal/BlueTypes.h" #include "../../hal/BlueDriver_p.h" #endif typedef void* BLUEVELVETC_HANDLE; typedef void* BFC_SYNC_INFO; #define BLUE_OK(a) (!a) /* Test for success of a method returning BErr */ #define BLUE_FAIL(a) (a) /* Test for failure of a method returning BErr */ /* FUNCTION PROTOTYPES FOR BLUEVELVETC */ #if defined(__cplusplus) extern "C" { #endif typedef const char* (__cdecl *pFunc_bfcGetVersion)(); typedef const wchar_t* (__cdecl *pFunc_bfcGetVersionW)(); typedef BLUEVELVETC_HANDLE(__cdecl *pFunc_bfcFactory)(); typedef BLUE_VOID(__cdecl *pFunc_bfcDestroy)(BLUEVELVETC_HANDLE pHandle); typedef BLUE_S32(__cdecl *pFunc_bfcEnumerate)(BLUEVELVETC_HANDLE pHandle, BLUE_S32* pDeviceCount); typedef BLUE_S32(__cdecl *pFunc_bfcQueryCardType)(BLUEVELVETC_HANDLE pHandle, BLUE_S32* pCardType, BLUE_S32 DeviceID); typedef BLUE_S32(__cdecl *pFunc_bfcAttach)(BLUEVELVETC_HANDLE pHandle, BLUE_S32 DeviceId); typedef BLUE_S32(__cdecl *pFunc_bfcSetMultiLinkMode)(BLUEVELVETC_HANDLE pHandle, blue_multi_link_info_struct* pMultiLinkInfo); typedef BLUE_S32(__cdecl *pFunc_bfcQueryMultiLinkMode)(BLUEVELVETC_HANDLE pHandle, blue_multi_link_info_struct* pMultiLinkInfo); typedef BLUE_S32(__cdecl *pFunc_bfcDetach)(BLUEVELVETC_HANDLE pHandle); typedef BLUE_S32(__cdecl *pFunc_bfcQueryCardProperty32)(BLUEVELVETC_HANDLE pHandle, const BLUE_S32 Property, BLUE_U32* pValue32); typedef BLUE_S32(__cdecl *pFunc_bfcSetCardProperty32)(BLUEVELVETC_HANDLE pHandle, const BLUE_S32 Property, const BLUE_U32 Value32); typedef BLUE_S32(__cdecl *pFunc_bfcQueryCardProperty64)(BLUEVELVETC_HANDLE pHandle, const BLUE_S32 Property, BLUE_U64* pValue64); typedef BLUE_S32(__cdecl *pFunc_bfcSetCardProperty64)(BLUEVELVETC_HANDLE pHandle, const BLUE_S32 Property, const BLUE_U64 Value64); typedef BLUE_S32(__cdecl *pFunc_bfcGetCardSerialNumber)(BLUEVELVETC_HANDLE pHandle, BLUE_S8* pSerialNumber, BLUE_U32 StringSize); typedef BLUE_S32(__cdecl *pFunc_bfcGetCardFwVersion)(BLUEVELVETC_HANDLE pHandle, BLUE_U32* pValue); typedef BLUE_S32(__cdecl *pFunc_bfcGetCardHwFwVersions)(BLUEVELVETC_HANDLE pHandle, hardware_firmware_versions* pVersions); #if defined (_WIN32) typedef BLUE_S32(__cdecl *pFunc_bfcWaitVideoSyncAsync)(BLUEVELVETC_HANDLE pHandle, OVERLAPPED* pOverlap, blue_video_sync_struct* pSyncData); #endif typedef BLUE_S32(__cdecl *pFunc_bfcWaitVideoSync)(BLUEVELVETC_HANDLE pHandle, sync_options* pSyncData, BFC_SYNC_INFO SyncInfo); typedef BLUE_S32(__cdecl *pFunc_bfcWaitVideoInputSync)(BLUEVELVETC_HANDLE pHandle, unsigned long UpdateType, unsigned long* pFieldCount); typedef BLUE_S32(__cdecl *pFunc_bfcWaitVideoOutputSync)(BLUEVELVETC_HANDLE pHandle, unsigned long UpdateType, unsigned long* pFieldCount); typedef BLUE_S32(__cdecl *pFunc_bfcGetVideoOutputCurrentFieldCount)(BLUEVELVETC_HANDLE pHandle, unsigned long* pFieldCount); typedef BLUE_S32(__cdecl *pFunc_bfcGetVideoInputCurrentFieldCount)(BLUEVELVETC_HANDLE pHandle, unsigned long* pFieldCount); typedef BLUE_S32(__cdecl *pFunc_bfcVideoCaptureStart)(BLUEVELVETC_HANDLE pHandle); typedef BLUE_S32(__cdecl *pFunc_bfcVideoCaptureStop)(BLUEVELVETC_HANDLE pHandle); typedef BLUE_S32(__cdecl *pFunc_bfcVideoPlaybackStart)(BLUEVELVETC_HANDLE pHandle, BLUE_S32 Step, BLUE_S32 Loop); typedef BLUE_S32(__cdecl *pFunc_bfcVideoPlaybackStop)(BLUEVELVETC_HANDLE pHandle, BLUE_S32 Wait, BLUE_S32 Flush); typedef BLUE_S32(__cdecl *pFunc_bfcVideoPlaybackAllocate)(BLUEVELVETC_HANDLE pHandle, BLUE_VOID** pAddress, unsigned long* pBufferID, unsigned long* pUnderrun); typedef BLUE_S32(__cdecl *pFunc_bfcVideoPlaybackPresent)(BLUEVELVETC_HANDLE pHandle, unsigned long* UniqueID, unsigned long BufferID, unsigned long Count, BLUE_S32 Keep, BLUE_S32 Odd); typedef BLUE_S32(__cdecl *pFunc_bfcVideoPlaybackRelease)(BLUEVELVETC_HANDLE pHandle, unsigned long BufferID); #if defined (_WIN32) typedef BLUE_S32(__cdecl *pFunc_bfcGetCaptureVideoFrameInfoEx)(BLUEVELVETC_HANDLE pHandle, OVERLAPPED* pOverlap, struct blue_videoframe_info_ex* pVideoFrameInfo, BLUE_S32 iCompostLater, BLUE_U32* CaptureFifoSize); #elif defined(__linux__) typedef BLUE_S32(__cdecl *pFunc_bfcGetCaptureVideoFrameInfoEx)(BLUEVELVETC_HANDLE pHandle, struct blue_videoframe_info_ex* pVideoFrameInfo); #endif typedef BLUE_S32(__cdecl *pFunc_bfcRenderBufferCapture)(BLUEVELVETC_HANDLE pHandle, unsigned long BufferID); typedef BLUE_S32(__cdecl *pFunc_bfcRenderBufferUpdate)(BLUEVELVETC_HANDLE pHandle, unsigned long BufferID); typedef BLUE_S32(__cdecl *pFunc_bfcGetRenderBufferCount)(BLUEVELVETC_HANDLE pHandle, unsigned long* pCount); typedef BLUE_S32(__cdecl *pFunc_bfcEncodeHancFrameEx)(BLUEVELVETC_HANDLE pHandle, BLUE_U32 CardType, struct hanc_stream_info_struct* pHancEncodeInfo, BLUE_VOID* pAudioBuffer, BLUE_U32 AudioChannels, BLUE_U32 AudioSamples, BLUE_U32 SampleType, BLUE_U32 AudioFlags); typedef BLUE_S32(__cdecl *pFunc_bfcEncodeHancFrameWithUCZ)(BLUEVELVETC_HANDLE pHandle, BLUE_U32 CardType, struct hanc_stream_info_struct* pHancEncodeInfo, BLUE_VOID* pAudioBuffer, BLUE_U32 AudioChannels, BLUE_U32 AudioSamples, BLUE_U32 SampleType, BLUE_U8* pUCZBuffer); typedef BLUE_S32(__cdecl *pFunc_bfcDecodeHancFrameEx)(BLUEVELVETC_HANDLE pHandle, BLUE_U32 CardType, BLUE_U32* pHancBuffer, struct hanc_decode_struct* pHancDecodeInfo); #if defined(_WIN32) typedef BLUE_S32(__cdecl *pFunc_bfcSystemBufferReadAsync)(BLUEVELVETC_HANDLE pHandle, unsigned char* pPixels, unsigned long ulSize, OVERLAPPED* pOverlap, unsigned long ulBufferID, unsigned long ulOffset); typedef BLUE_S32(__cdecl *pFunc_bfcSystemBufferWriteAsync)(BLUEVELVETC_HANDLE pHandle, unsigned char* pPixels, unsigned long ulSize, OVERLAPPED* pOverlap, unsigned long ulBufferID, unsigned long ulOffset); #elif defined(__linux__) typedef BLUE_S32(__cdecl *pFunc_bfcSystemBufferRead)(BLUEVELVETC_HANDLE pHandle, unsigned char* pPixels, unsigned long ulSize, unsigned long ulBufferID, unsigned long ulOffset); typedef BLUE_S32(__cdecl *pFunc_bfcSystemBufferWrite)(BLUEVELVETC_HANDLE pHandle, unsigned char* pPixels, unsigned long ulSize, unsigned long ulBufferID, unsigned long ulOffset); #endif typedef BFC_SYNC_INFO(__cdecl *pFunc_bfcSyncInfoCreate)(BLUEVELVETC_HANDLE pHandle); typedef BLUE_S32(__cdecl *pFunc_bfcSyncInfoDelete)(BLUEVELVETC_HANDLE pHandle, BFC_SYNC_INFO SyncInfo); typedef BLUE_S32(__cdecl *pFunc_bfcSyncInfoWait)(BLUEVELVETC_HANDLE pHandle, BFC_SYNC_INFO SyncInfo, const BLUE_U32 nTimeOutInMilliSec); typedef BLUE_S32(__cdecl *pFunc_bfcSyncInfoWaitWithSyncOptions)(BLUEVELVETC_HANDLE pHandle, BFC_SYNC_INFO SyncInfo, sync_options* pSyncOptions, const BLUE_U32 TimeOutInMilliSec); typedef BLUE_S32(__cdecl *pFunc_bfcDmaReadFromCardAsync)(BLUEVELVETC_HANDLE pHandle, BLUE_U8* pData, unsigned long Size, BFC_SYNC_INFO SyncInfo, unsigned long BufferID, unsigned long Offset); typedef BLUE_S32(__cdecl *pFunc_bfcDmaWriteToCardAsync)(BLUEVELVETC_HANDLE pHandle, BLUE_U8* pData, unsigned long Size, BFC_SYNC_INFO SyncInfo, unsigned long BufferID, unsigned long Offset); typedef BLUE_S32(__cdecl *pFunc_bfcSerialPortWaitForInputData)(BLUEVELVETC_HANDLE pHandle, BLUE_U32 PortFlags, BLUE_U32* pBufferLength); typedef BLUE_S32(__cdecl *pFunc_bfcSerialPortRead)(BLUEVELVETC_HANDLE pHandle, BLUE_U32 nPortFlags, BLUE_U8* pBuffer, BLUE_U32 ReadLength); typedef BLUE_S32(__cdecl *pFunc_bfcSerialPortWrite)(BLUEVELVETC_HANDLE pHandle, BLUE_U32 nPortFlags, BLUE_U8* pBuffer, BLUE_U32 WriteLength); #if defined (_WIN32) typedef BLUE_S32(__cdecl *pFunc_bfcGetReferenceClockPhaseSettings)(BLUEVELVETC_HANDLE pHandle, BLUE_U32* pHPhase, BLUE_U32* pVPhase, BLUE_U32* pHPhaseMax, BLUE_U32* pVPhaseMax); typedef BLUE_S32(__cdecl *pFunc_bfcLoadOutputLUT1D)(BLUEVELVETC_HANDLE pHandle, struct blue_1d_lookup_table_struct* pLutData); #endif typedef BLUE_S32(__cdecl *pFunc_bfcGetVideoWidth)(BLUE_S32 VideoMode, BLUE_U32* pWidth); typedef BLUE_S32(__cdecl *pFunc_bfcGetVideoHeight)(BLUE_S32 VideoMode, BLUE_S32 UpdateMethod, BLUE_U32* pHeight); typedef BLUE_S32(__cdecl *pFunc_bfcGetVideoBytesPerLineV2)(BLUE_S32 VideoMode, BLUE_S32 MemoryFormat, BLUE_U32* pBytesPerLine); typedef BLUE_S32(__cdecl *pFunc_bfcGetVideoBytesPerFrame)(BLUE_S32 VideoMode, BLUE_S32 UpdateMethod, BLUE_S32 MemoryFormat, BLUE_U32* pBytesPerFrame); typedef BLUE_S32(__cdecl *pFunc_bfcGetVideoBytesPerFrameGolden)(BLUE_S32 VideoMode, BLUE_S32 UpdateMethod, BLUE_S32 MemoryFormat, BLUE_U32* pGoldenBytes); typedef BLUE_S32(__cdecl *pFunc_bfcGetVideoInfo)(BLUE_S32 VideoMode, BLUE_S32 UpdateMethod, BLUE_S32 MemoryFormat, BLUE_U32* pWidth, BLUE_U32* pHeight, BLUE_U32* pBytesPerLine, BLUE_U32* pBytesPerFrame, BLUE_U32* pGoldenBytes); typedef BLUE_S32(__cdecl *pFunc_bfcGetVancWidth)(BLUE_S32 VideoMode, BLUE_U32* pWidth); typedef BLUE_S32(__cdecl *pFunc_bfcGetVancHeight)(BLUE_S32 VideoMode, BLUE_S32 UpdateMethod, BLUE_U32* pHeight); typedef BLUE_S32(__cdecl *pFunc_bfcGetVancBytesPerLineV2)(BLUE_S32 VideoMode, BLUE_S32 MemoryFormat, BLUE_U32* pBytesPerLine); typedef BLUE_S32(__cdecl *pFunc_bfcGetVancBytesPerFrame)(BLUE_S32 VideoMode, BLUE_S32 UpdateMethod, BLUE_S32 MemoryFormat, BLUE_U32* pBytesPerFrame); typedef BLUE_S32(__cdecl *pFunc_bfcGetVancInfo)(BLUE_S32 VideoMode, BLUE_S32 UpdateMethod, BLUE_S32 MemoryFormat, BLUE_U32* pWidth, BLUE_U32* pHeight, BLUE_U32* pBytesPerLine, BLUE_U32* pBytesPerFrame); typedef BLUE_S32(__cdecl *pFunc_bfcGetBytesForGroupPixels)(EMemoryFormat MemoryFormat, BLUE_U32 VideoWidth, BLUE_U32* pVideoPitchBytes); #if defined (_WIN32) typedef BLUE_S32(__cdecl *pFunc_bfcGetWindowsDriverHandle)(BLUEVELVETC_HANDLE pHandle, HANDLE* pDriverHandle); typedef BLUE_S32(__cdecl *pFunc_bfcSetDynamicMemoryFormatChange)(BLUEVELVETC_HANDLE pHandle, OVERLAPPED* pOverlap, BLUE_U32 nUniqueId, EMemoryFormat MemoryFormat); #elif defined(__linux__) typedef BLUE_S32(__cdecl *pFunc_bfcGetFileHandle)(BLUEVELVETC_HANDLE pHandle, BLUE_U32* pFileHandle); #endif /* BlueVelvetC utils functions */ typedef void*(__cdecl *pFunc_bfAlloc)(BLUE_U32 nMemorySize); typedef void(__cdecl *pFunc_bfFree)(BLUE_U32 nMemSize, BLUE_VOID* pMemory); typedef char*(__cdecl *pFunc_bfcUtilsGetStringForCardType)(const BLUE_S32 CardType); typedef wchar_t*(__cdecl *pFunc_bfcUtilsGetWStringForCardType)(const BLUE_S32 CardType); typedef char*(__cdecl *pFunc_bfcUtilsGetStringForBlueProductId)(const BLUE_U32 ProductId); typedef wchar_t*(__cdecl *pFunc_bfcUtilsGetWStringForBlueProductId)(const BLUE_U32 ProductId); typedef char*(__cdecl *pFunc_bfcUtilsGetStringForVideoMode)(const BLUE_U32 VideoMode); typedef wchar_t*(__cdecl *pFunc_bfcUtilsGetWStringForVideoMode)(const BLUE_U32 VideoMode); typedef char*(__cdecl *pFunc_bfcUtilsGetStringForMemoryFormat)(const BLUE_U32 MemoryFormat); typedef wchar_t*(__cdecl *pFunc_bfcUtilsGetWStringForMemoryFormat)(const BLUE_U32 MemoryFormat); typedef char*(__cdecl *pFunc_bfcUtilsGetStringForUpdateFormat)(const BLUE_U32 UpdateFormat); typedef wchar_t*(__cdecl *pFunc_bfcUtilsGetWStringForUpdateFormat)(const BLUE_U32 UpdateFormat); typedef char*(__cdecl *pFunc_bfcUtilsGetStringForVideoEngine)(const BLUE_U32 VideoEngine); typedef wchar_t*(__cdecl *pFunc_bfcUtilsGetWStringForVideoEngine)(const BLUE_U32 VideoEngine); typedef char*(__cdecl *pFunc_bfcUtilsGetStringForMr2Node)(const BLUE_U32 Mr2Node); typedef char*(__cdecl *pFunc_bfcUtilsGetWStringForMr2Node)(const BLUE_U32 Mr2Node); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsGetMR2Routing)(const BLUEVELVETC_HANDLE pHandle, BLUE_U32* pSrcNode, const BLUE_U32 DestNode, BLUE_U32* LinkType); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsSetMR2Routing)(const BLUEVELVETC_HANDLE pHandle, const BLUE_U32 SrcNode, const BLUE_U32 DestNode, const BLUE_U32 LinkType); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsGetAudioOutputRouting)(const BLUEVELVETC_HANDLE pHandle, const BLUE_U32 AudioConnectorType, BLUE_U32* pAudioSourceChannelId, BLUE_U32 AudioConnectorId); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsSetAudioOutputRouting)(const BLUEVELVETC_HANDLE pHandle, const BLUE_U32 AudioConnectorType, BLUE_U32 AudioSourceChannelId, BLUE_U32 AudioConnectorId); typedef bool(__cdecl *pFunc_bfcUtilsIsVideoModeProgressive)(const BLUE_U32 VideoMode); typedef bool(__cdecl *pFunc_bfcUtilsIsVideoMode1001Framerate)(const BLUE_U32 VideoMode); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsGetFpsForVideoMode)(const BLUE_U32 VideoMode); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsGetVideoModeExtForFrameInfo)(const BLUE_U32 Width, const BLUE_U32 Height, const BLUE_U32 Rate, const BLUE_U32 bIs1001, const BLUE_U32 bIsProgressive, BLUE_U32* pVideoModeExt); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsGetFrameInfoForVideoModeExt)(const BLUE_U32 VideoModeExt, BLUE_U32* pWidth, BLUE_U32* pHeight, BLUE_U32* pRate, BLUE_U32* pIs1001, BLUE_U32* pIsProgressive); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsGetAudioSamplesPerFrame)(const BLUE_U32 VideoMode, const BLUE_U32 FrameNo); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsInitVancBuffer)(BLUE_U32 nCardType, BLUE_U32 VideoMode, BLUE_U32 PixelsPerLine, BLUE_U32 nLinesPerFrame, BLUE_U32* pVancBuffer); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsVancPktExtract)(BLUE_U32 nCardType, BLUE_U32 nVancPktType, BLUE_U32* pSrcVancBuffer, BLUE_U32 nSrcVancBufferSize, BLUE_U32 PixelsPerLine, BLUE_U32 nVancPktDid, BLUE_U16* pVancPktSdid, BLUE_U16* pVancPktDataLength, BLUE_U16* pVancPktData, BLUE_U16* pVancPktLineNo); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsVancPktInsert)(BLUE_U32 nCardType, BLUE_U32 nVancPktType, BLUE_U32 nVancPktLineNumber, BLUE_U32* pVancPktBuffer, BLUE_U32 nVancPktBufferSize, BLUE_U32* pDestVancBuffer, BLUE_U32 PixelsPerLine); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsDecodeEia708bPkt)(BLUE_U32 CardType, BLUE_U16* pVancPacketData, BLUE_U16 PacketUdwCount, BLUE_U16 EiaPacketSubtype, BLUE_U8* pDecodedString); /* deprecated */ typedef BLUE_S32(__cdecl *pFunc_bfcUtilsGetVideoModeForFrameInfo)(const BLUE_U32 Width, const BLUE_U32 Height, const BLUE_U32 Rate, const BLUE_U32 bIs1001, const BLUE_U32 bIsProgressive, BLUE_U32* pVideoMode); typedef BLUE_S32(__cdecl *pFunc_bfcUtilsGetFrameInfoForVideoMode)(const BLUE_U32 VideoMode, BLUE_U32* pWidth, BLUE_U32* pHeight, BLUE_U32* pRate, BLUE_U32* pIs1001, BLUE_U32* pIsProgressive); typedef BLUE_S32(__cdecl *pFunc_bfcGetVideoBytesPerLine)(BLUE_S32 VideoMode, BLUE_S32 UpdateMethod, BLUE_S32 MemoryFormat, BLUE_U32* pBytesPerLine); typedef BLUE_S32(__cdecl *pFunc_bfcGetVancBytesPerLine)(BLUE_S32 VideoMode, BLUE_S32 UpdateMethod, BLUE_S32 MemoryFormat, BLUE_U32* pBytesPerLine); #if defined(__cplusplus) } // extern "C" #endif #ifdef IMPLEMENTATION_BLUEVELVETC_FUNC_PTR pFunc_bfcGetVersion bfcGetVersion; pFunc_bfcGetVersionW bfcGetVersionW; pFunc_bfcFactory bfcFactory; pFunc_bfcDestroy bfcDestroy; pFunc_bfcEnumerate bfcEnumerate; pFunc_bfcQueryCardType bfcQueryCardType; pFunc_bfcAttach bfcAttach; pFunc_bfcSetMultiLinkMode bfcSetMultiLinkMode; pFunc_bfcQueryMultiLinkMode bfcQueryMultiLinkMode; pFunc_bfcDetach bfcDetach; pFunc_bfcQueryCardProperty32 bfcQueryCardProperty32; pFunc_bfcQueryCardProperty64 bfcQueryCardProperty64; pFunc_bfcSetCardProperty32 bfcSetCardProperty32; pFunc_bfcSetCardProperty64 bfcSetCardProperty64; pFunc_bfcGetCardSerialNumber bfcGetCardSerialNumber; pFunc_bfcGetCardFwVersion bfcGetCardFwVersion; pFunc_bfcGetCardHwFwVersions bfcGetCardHwFwVersions; #if defined(_WIN32) pFunc_bfcWaitVideoSyncAsync bfcWaitVideoSyncAsync; #endif pFunc_bfcWaitVideoSync bfcWaitVideoSync; pFunc_bfcWaitVideoInputSync bfcWaitVideoInputSync; pFunc_bfcWaitVideoOutputSync bfcWaitVideoOutputSync; pFunc_bfcGetVideoOutputCurrentFieldCount bfcGetVideoOutputCurrentFieldCount; pFunc_bfcGetVideoInputCurrentFieldCount bfcGetVideoInputCurrentFieldCount; pFunc_bfcVideoCaptureStart bfcVideoCaptureStart; pFunc_bfcVideoCaptureStop bfcVideoCaptureStop; pFunc_bfcVideoPlaybackStart bfcVideoPlaybackStart; pFunc_bfcVideoPlaybackStop bfcVideoPlaybackStop; pFunc_bfcVideoPlaybackAllocate bfcVideoPlaybackAllocate; pFunc_bfcVideoPlaybackPresent bfcVideoPlaybackPresent; pFunc_bfcVideoPlaybackRelease bfcVideoPlaybackRelease; pFunc_bfcGetCaptureVideoFrameInfoEx bfcGetCaptureVideoFrameInfoEx; pFunc_bfcRenderBufferCapture bfcRenderBufferCapture; pFunc_bfcRenderBufferUpdate bfcRenderBufferUpdate; pFunc_bfcGetRenderBufferCount bfcGetRenderBufferCount; pFunc_bfcEncodeHancFrameEx bfcEncodeHancFrameEx; pFunc_bfcEncodeHancFrameWithUCZ bfcEncodeHancFrameWithUCZ; pFunc_bfcDecodeHancFrameEx bfcDecodeHancFrameEx; #if defined(_WIN32) pFunc_bfcSystemBufferReadAsync bfcSystemBufferReadAsync; pFunc_bfcSystemBufferWriteAsync bfcSystemBufferWriteAsync; #elif defined(__linux__) pFunc_bfcSystemBufferRead bfcSystemBufferRead; pFunc_bfcSystemBufferWrite bfcSystemBufferWrite; #endif pFunc_bfcSyncInfoCreate bfcSyncInfoCreate; pFunc_bfcSyncInfoDelete bfcSyncInfoDelete; pFunc_bfcSyncInfoWait bfcSyncInfoWait; pFunc_bfcSyncInfoWaitWithSyncOptions bfcSyncInfoWaitWithSyncOptions; pFunc_bfcDmaReadFromCardAsync bfcDmaReadFromCardAsync; pFunc_bfcDmaWriteToCardAsync bfcDmaWriteToCardAsync; pFunc_bfcSerialPortWaitForInputData bfcSerialPortWaitForInputData; pFunc_bfcSerialPortRead bfcSerialPortRead; pFunc_bfcSerialPortWrite bfcSerialPortWrite; #if defined(_WIN32) pFunc_bfcGetReferenceClockPhaseSettings bfcGetReferenceClockPhaseSettings; pFunc_bfcLoadOutputLUT1D bfcLoadOutputLUT1D; #endif pFunc_bfcGetVideoWidth bfcGetVideoWidth; pFunc_bfcGetVideoHeight bfcGetVideoHeight; pFunc_bfcGetVideoBytesPerLineV2 bfcGetVideoBytesPerLineV2; pFunc_bfcGetVideoBytesPerFrame bfcGetVideoBytesPerFrame; pFunc_bfcGetVideoBytesPerFrameGolden bfcGetVideoBytesPerFrameGolden; pFunc_bfcGetVideoInfo bfcGetVideoInfo; pFunc_bfcGetVancWidth bfcGetVancWidth; pFunc_bfcGetVancHeight bfcGetVancHeight; pFunc_bfcGetVancBytesPerLineV2 bfcGetVancBytesPerLineV2; pFunc_bfcGetVancBytesPerFrame bfcGetVancBytesPerFrame; pFunc_bfcGetVancInfo bfcGetVancInfo; pFunc_bfcGetBytesForGroupPixels bfcGetBytesForGroupPixels; #if defined(_WIN32) pFunc_bfcGetWindowsDriverHandle bfcGetWindowsDriverHandle; pFunc_bfcSetDynamicMemoryFormatChange bfcSetDynamicMemoryFormatChange; #elif defined(__linux__) pFunc_bfcGetFileHandle bfcGetFileHandle; #endif /* BlueVelvetC utils functions */ pFunc_bfAlloc bfAlloc; pFunc_bfFree bfFree; pFunc_bfcUtilsGetStringForCardType bfcUtilsGetStringForCardType; pFunc_bfcUtilsGetWStringForCardType bfcUtilsGetWStringForCardType; pFunc_bfcUtilsGetStringForBlueProductId bfcUtilsGetStringForBlueProductId; pFunc_bfcUtilsGetWStringForBlueProductId bfcUtilsGetWStringForBlueProductId; pFunc_bfcUtilsGetStringForVideoMode bfcUtilsGetStringForVideoMode; pFunc_bfcUtilsGetWStringForVideoMode bfcUtilsGetWStringForVideoMode; pFunc_bfcUtilsGetStringForMemoryFormat bfcUtilsGetStringForMemoryFormat; pFunc_bfcUtilsGetWStringForMemoryFormat bfcUtilsGetWStringForMemoryFormat; pFunc_bfcUtilsGetStringForUpdateFormat bfcUtilsGetStringForUpdateFormat; pFunc_bfcUtilsGetWStringForUpdateFormat bfcUtilsGetWStringForUpdateFormat; pFunc_bfcUtilsGetStringForVideoEngine bfcUtilsGetStringForVideoEngine; pFunc_bfcUtilsGetWStringForVideoEngine bfcUtilsGetWStringForVideoEngine; pFunc_bfcUtilsGetStringForMr2Node bfcUtilsGetStringForMr2Node; pFunc_bfcUtilsGetWStringForMr2Node bfcUtilsGetWStringForMr2Node; pFunc_bfcUtilsGetMR2Routing bfcUtilsGetMR2Routing; pFunc_bfcUtilsSetMR2Routing bfcUtilsSetMR2Routing; pFunc_bfcUtilsGetAudioOutputRouting bfcUtilsGetAudioOutputRouting; pFunc_bfcUtilsSetAudioOutputRouting bfcUtilsSetAudioOutputRouting; pFunc_bfcUtilsIsVideoModeProgressive bfcUtilsIsVideoModeProgressive; pFunc_bfcUtilsIsVideoMode1001Framerate bfcUtilsIsVideoMode1001Framerate; pFunc_bfcUtilsGetFpsForVideoMode bfcUtilsGetFpsForVideoMode; pFunc_bfcUtilsGetVideoModeExtForFrameInfo bfcUtilsGetVideoModeExtForFrameInfo; pFunc_bfcUtilsGetFrameInfoForVideoModeExt bfcUtilsGetFrameInfoForVideoModeExt; pFunc_bfcUtilsGetAudioSamplesPerFrame bfcUtilsGetAudioSamplesPerFrame; pFunc_bfcUtilsInitVancBuffer bfcUtilsInitVancBuffer; pFunc_bfcUtilsVancPktExtract bfcUtilsVancPktExtract; pFunc_bfcUtilsVancPktInsert bfcUtilsVancPktInsert; pFunc_bfcUtilsDecodeEia708bPkt bfcUtilsDecodeEia708bPkt; /* deprecated start */ pFunc_bfcUtilsGetVideoModeForFrameInfo bfcUtilsGetVideoModeForFrameInfo; pFunc_bfcUtilsGetFrameInfoForVideoMode bfcUtilsGetFrameInfoForVideoMode; pFunc_bfcGetVideoBytesPerLine bfcGetVideoBytesPerLine; pFunc_bfcGetVancBytesPerLine bfcGetVancBytesPerLine; /* deprecated end */ bool LoadFunctionPointers_BlueVelvetC() { bool bAllFunctionsAvailable = false; #if defined(_WIN32) #if defined(_WIN64) #if defined(_DEBUG) HMODULE hLib = LoadLibraryExA("BlueVelvetC64_d.dll", NULL, 0); #else HMODULE hLib = LoadLibraryExA("BlueVelvetC64.dll", NULL, 0); #endif #else #if defined(_DEBUG) HMODULE hLib = LoadLibraryExA("BlueVelvetC_d.dll", NULL, 0); #else HMODULE hLib = LoadLibraryExA("BlueVelvetC.dll", NULL, 0); #endif #endif #endif #if defined(__linux__) void* hLib = dlopen("libBlueVelvetC64.so", RTLD_LAZY | RTLD_GLOBAL); #endif if(hLib) { bAllFunctionsAvailable = true; do { GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVersion); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVersionW); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcFactory); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcDestroy); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcEnumerate); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcQueryCardType); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcAttach); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSetMultiLinkMode); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcQueryMultiLinkMode); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcDetach); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcQueryCardProperty32); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcQueryCardProperty64); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSetCardProperty32); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSetCardProperty64); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetCardSerialNumber); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetCardFwVersion); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetCardHwFwVersions); #if defined(_WIN32) GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcWaitVideoSyncAsync); #endif GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcWaitVideoSync); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcWaitVideoInputSync); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcWaitVideoOutputSync); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVideoOutputCurrentFieldCount); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVideoInputCurrentFieldCount); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcVideoCaptureStart); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcVideoCaptureStop); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcVideoPlaybackStart); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcVideoPlaybackStop); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcVideoPlaybackAllocate); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcVideoPlaybackPresent); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcVideoPlaybackRelease); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetCaptureVideoFrameInfoEx); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcRenderBufferCapture); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcRenderBufferUpdate); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetRenderBufferCount); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcEncodeHancFrameEx); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcEncodeHancFrameWithUCZ); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcDecodeHancFrameEx); #if defined(_WIN32) GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSystemBufferReadAsync); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSystemBufferWriteAsync); #elif defined(__linux__) GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSystemBufferRead); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSystemBufferWrite); #endif GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSyncInfoCreate); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSyncInfoDelete); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSyncInfoWait); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSyncInfoWaitWithSyncOptions); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcDmaReadFromCardAsync); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcDmaWriteToCardAsync); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSerialPortWaitForInputData); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSerialPortRead); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSerialPortWrite); #if defined(_WIN32) GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetReferenceClockPhaseSettings); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcLoadOutputLUT1D); #endif GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVideoWidth); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVideoHeight); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVideoBytesPerLineV2); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVideoBytesPerFrame); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVideoBytesPerFrameGolden); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVideoInfo); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVancWidth); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVancHeight); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVancBytesPerLineV2); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVancBytesPerFrame); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVancInfo); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetBytesForGroupPixels); #if defined(_WIN32) GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetWindowsDriverHandle); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcSetDynamicMemoryFormatChange); #elif defined(__linux__) GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetFileHandle); #endif GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfAlloc); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfFree); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetStringForCardType); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetWStringForCardType); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetStringForBlueProductId); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetWStringForBlueProductId); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetStringForVideoMode); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetWStringForVideoMode); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetStringForMemoryFormat); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetWStringForMemoryFormat); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetStringForUpdateFormat); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetWStringForUpdateFormat); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetStringForVideoEngine); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetWStringForVideoEngine); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetStringForMr2Node); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetWStringForMr2Node); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetMR2Routing); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsSetMR2Routing); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetAudioOutputRouting); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsSetAudioOutputRouting); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsIsVideoModeProgressive); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsIsVideoMode1001Framerate); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetFpsForVideoMode); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetVideoModeExtForFrameInfo); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetFrameInfoForVideoModeExt); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetAudioSamplesPerFrame); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsInitVancBuffer); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsVancPktExtract); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsVancPktInsert); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsDecodeEia708bPkt); /* deprecated start */ GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetVideoModeForFrameInfo); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcUtilsGetFrameInfoForVideoMode); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVideoBytesPerLine); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcGetVancBytesPerLine); /* deprecated end */ } while(0); } return bAllFunctionsAvailable; } #endif /* IMPLEMENTATION_BLUEVELVETC_FUNC_PTR */ typedef void* BFC_CONVERSION_HANDLE; /* FUNCTION PROTOTYPES FOR BLUEVELVETC CONVERSION FUNCTIONS */ #if defined(__cplusplus) extern "C" { #endif typedef BFC_CONVERSION_HANDLE(__cdecl* pFunc_bfcConversionFactory)(); typedef BLUE_VOID(__cdecl* pFunc_bfcConversionDestroy)(BFC_CONVERSION_HANDLE pHandle); typedef BLUE_S32(__cdecl* pFunc_bfcConversionGetAvailableThreadCount)(BFC_CONVERSION_HANDLE pHandle, BLUE_S32* pMaxThreadCount, BLUE_U32* pCurrentThreadCount); typedef BLUE_S32(__cdecl* pFunc_bfcConversionSetThreadCountLimit)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 MaxThreadCount); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_SquareDivisionToTsi_2VUY)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_SquareDivisionToTsi_ARGB32)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_SquareDivisionToTsi_BGR48)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_SquareDivisionToTsi_V210)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_TsiToSquareDivision_2VUY)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_TsiToSquareDivision_V210)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_TsiToSquareDivision_RGB)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_Single4KV210ToAligned4KV210Quadrants_SSE2)(BFC_CONVERSION_HANDLE pHandle, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_HalfFloatRGBATo16bitRGBA)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_HalfFloatRGBATo16bitRGB)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_FloatRGBATo16bitRGBA)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); typedef BLUE_S32(__cdecl* pFunc_bfcConvert_FloatRGBATo16bitRGB)(BFC_CONVERSION_HANDLE pHandle, BLUE_U32 Width, BLUE_U32 Height, BLUE_VOID* pSrcBuffer, BLUE_VOID* pDstBuffer); #if defined(__cplusplus) } // extern "C" #endif #ifdef IMPLEMENTATION_BLUEVELVETC_CONVERSION_FUNC_PTR pFunc_bfcConversionFactory bfcConversionFactory; pFunc_bfcConversionDestroy bfcConversionDestroy; pFunc_bfcConversionGetAvailableThreadCount bfcConversionGetAvailableThreadCount; pFunc_bfcConversionSetThreadCountLimit bfcConversionSetThreadCountLimit; pFunc_bfcConvert_SquareDivisionToTsi_2VUY bfcConvert_SquareDivisionToTsi_2VUY; pFunc_bfcConvert_SquareDivisionToTsi_ARGB32 bfcConvert_SquareDivisionToTsi_ARGB32; pFunc_bfcConvert_SquareDivisionToTsi_BGR48 bfcConvert_SquareDivisionToTsi_BGR48; pFunc_bfcConvert_SquareDivisionToTsi_V210 bfcConvert_SquareDivisionToTsi_V210; pFunc_bfcConvert_TsiToSquareDivision_2VUY bfcConvert_TsiToSquareDivision_2VUY; pFunc_bfcConvert_TsiToSquareDivision_V210 bfcConvert_TsiToSquareDivision_V210; pFunc_bfcConvert_TsiToSquareDivision_RGB bfcConvert_TsiToSquareDivision_RGB; pFunc_bfcConvert_Single4KV210ToAligned4KV210Quadrants_SSE2 bfcConvert_Single4KV210ToAligned4KV210Quadrants_SSE2; pFunc_bfcConvert_HalfFloatRGBATo16bitRGBA bfcConvert_HalfFloatRGBATo16bitRGBA; pFunc_bfcConvert_HalfFloatRGBATo16bitRGB bfcConvert_HalfFloatRGBATo16bitRGB; pFunc_bfcConvert_FloatRGBATo16bitRGBA bfcConvert_FloatRGBATo16bitRGBA; pFunc_bfcConvert_FloatRGBATo16bitRGB bfcConvert_FloatRGBATo16bitRGB; bool LoadFunctionPointers_BlueConversion() { bool bAllFunctionsAvailable = false; #if defined(_WIN32) #if defined(_WIN64) #if defined(_DEBUG) HMODULE hLib = LoadLibraryExA("BlueVelvetC64_d.dll", NULL, 0); #else HMODULE hLib = LoadLibraryExA("BlueVelvetC64.dll", NULL, 0); #endif #else #if defined(_DEBUG) HMODULE hLib = LoadLibraryExA("BlueVelvetC_d.dll", NULL, 0); #else HMODULE hLib = LoadLibraryExA("BlueVelvetC.dll", NULL, 0); #endif #endif #endif if(hLib) { bAllFunctionsAvailable = true; do { GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConversionFactory); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConversionDestroy); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConversionGetAvailableThreadCount); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConversionSetThreadCountLimit); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_SquareDivisionToTsi_2VUY); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_SquareDivisionToTsi_ARGB32); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_SquareDivisionToTsi_BGR48); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_SquareDivisionToTsi_V210); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_TsiToSquareDivision_2VUY); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_TsiToSquareDivision_V210); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_TsiToSquareDivision_RGB); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_Single4KV210ToAligned4KV210Quadrants_SSE2); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_HalfFloatRGBATo16bitRGBA); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_HalfFloatRGBATo16bitRGB); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_FloatRGBATo16bitRGBA); GET_PROCADDR_FOR_FUNC(hLib, &bAllFunctionsAvailable, bfcConvert_FloatRGBATo16bitRGB); } while(0); } return bAllFunctionsAvailable; } #endif /* IMPLEMENTATION_BLUEVELVETC_CONVERSION_FUNC_PTR */ #endif /* HG_BLUEVELVETC_FUNC_PTR */ ================================================ FILE: src/modules/bluefish/producer/bluefish_producer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author:Robert Nagy, ronag89@gmail.com * satchit puthenveetil * James Wise, james.wise@bluefish444.com */ #include "../StdAfx.h" #include "bluefish_producer.h" #include "../util/blue_velvet.h" #include "../util/memory.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern "C" { #include #include #include #include } using namespace caspar::ffmpeg; namespace caspar { namespace bluefish { static const size_t MAX_DECODED_AUDIO_BUFFER_SIZE = 2002 * 16; // max 2002 samples, 16 channels. unsigned int get_bluesdk_input_videochannel_from_streamid(int stream_id) { /*This function would return the corresponding EBlueVideoChannel from them stream_id argument */ switch (stream_id) { case 1: return BLUE_VIDEO_INPUT_CHANNEL_1; case 2: return BLUE_VIDEO_INPUT_CHANNEL_2; case 3: return BLUE_VIDEO_INPUT_CHANNEL_3; case 4: return BLUE_VIDEO_INPUT_CHANNEL_4; case 5: return BLUE_VIDEO_INPUT_CHANNEL_5; case 6: return BLUE_VIDEO_INPUT_CHANNEL_6; case 7: return BLUE_VIDEO_INPUT_CHANNEL_7; case 8: return BLUE_VIDEO_INPUT_CHANNEL_8; default: return BLUE_VIDEO_INPUT_CHANNEL_1; } } unsigned int extract_pcm_data_from_hanc(bvc_wrapper& blue, struct hanc_decode_struct* decode_struct, unsigned int card_type, unsigned int* src_hanc_buffer, unsigned int* pcm_audio_buffer, int audio_channels_to_extract) { decode_struct->audio_pcm_data_ptr = pcm_audio_buffer; decode_struct->type_of_sample_required = 0; // No flags indicates default of 32bit samples. decode_struct->max_expected_audio_sample_count = 2002; if (audio_channels_to_extract == 2) decode_struct->audio_ch_required_mask = MONO_CHANNEL_1 | MONO_CHANNEL_2; else if (audio_channels_to_extract == 8) decode_struct->audio_ch_required_mask = MONO_CHANNEL_1 | MONO_CHANNEL_2 | MONO_CHANNEL_3 | MONO_CHANNEL_4 | MONO_CHANNEL_5 | MONO_CHANNEL_6 | MONO_CHANNEL_7 | MONO_CHANNEL_8; else if (audio_channels_to_extract == 16) decode_struct->audio_ch_required_mask = MONO_CHANNEL_1 | MONO_CHANNEL_2 | MONO_CHANNEL_3 | MONO_CHANNEL_4 | MONO_CHANNEL_5 | MONO_CHANNEL_6 | MONO_CHANNEL_7 | MONO_CHANNEL_8 | MONO_CHANNEL_11 | MONO_CHANNEL_12 | MONO_CHANNEL_13 | MONO_CHANNEL_14 | MONO_CHANNEL_15 | MONO_CHANNEL_16 | MONO_CHANNEL_17 | MONO_CHANNEL_18; blue.decode_hanc_frame(card_type, src_hanc_buffer, decode_struct); return decode_struct->no_audio_samples; } bool is_video_format_interlaced(const core::video_format format) { bool interlaced = false; if (format == core::video_format::x1080i5000 || format == core::video_format::x1080i5994 || format == core::video_format::x1080i6000 || format == core::video_format::pal || format == core::video_format::ntsc) interlaced = true; return interlaced; } bool is_bluefish_format_interlaced(unsigned int vid_mode) { bool interlaced = false; if (vid_mode == VID_FMT_EXT_PAL || vid_mode == VID_FMT_EXT_NTSC || vid_mode == VID_FMT_EXT_1080I_5000 || vid_mode == VID_FMT_EXT_1080I_5994 || vid_mode == VID_FMT_EXT_1080I_6000) interlaced = true; return interlaced; } struct bluefish_producer { const int device_index_; const int stream_index_; spl::shared_ptr blue_; core::monitor::state state_; mutable std::mutex state_mutex_; spl::shared_ptr graph_; caspar::timer tick_timer_; caspar::timer processing_benchmark_timer_; std::vector audio_cadence_; const std::wstring model_name_ = std::wstring(L"bluefish"); std::atomic_bool process_capture_ = true; std::shared_ptr capture_thread_; std::array reserved_frames_; core::video_format_desc format_desc_; core::video_format_desc channel_format_desc_; unsigned int mode_; spl::shared_ptr frame_factory_; const core::video_format_repository format_repository_; std::vector conversion_buffer_; tbb::concurrent_bounded_queue frame_buffer_; std::exception_ptr exception_; ULONG schedule_capture_frame_id_ = 0; ULONG capturing_frame_id_ = std::numeric_limits::max(); ULONG dma_ready_captured_frame_id_ = std::numeric_limits::max(); struct hanc_decode_struct hanc_decode_struct_; std::vector decoded_audio_bytes_; unsigned int memory_format_on_card_; unsigned int sync_format_; bool first_frame_ = true; int frames_captured = 0; uint64_t capture_ts = 0; int remainaing_audio_samples_ = 0; int uhd_mode_ = 0; // 0 -> Do Not Allow BVC-ML, 1 -> Auto ( ie. Native buffers will do default mode, or BVC will do // SQ.), 2 -> Force 2SI, 3 -> Force SQ bluefish_producer(const bluefish_producer&) = delete; bluefish_producer& operator=(const bluefish_producer&) = delete; bluefish_producer(const core::video_format_desc& format_desc, int device_index, int stream_index, int uhd_mode, const spl::shared_ptr& frame_factory, const core::video_format_repository& format_repository) : device_index_(device_index) , stream_index_(stream_index) , blue_(create_blue(device_index)) , model_name_(get_card_desc(*blue_, device_index)) , channel_format_desc_(format_desc) , frame_factory_(frame_factory) , format_repository_(format_repository) , memory_format_on_card_(MEM_FMT_RGB) , sync_format_(UPD_FMT_FRAME) , uhd_mode_(uhd_mode) { mode_ = static_cast(VID_FMT_EXT_INVALID); frame_buffer_.set_capacity(2); graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f)); graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f)); graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f)); graph_->set_color("output-buffer", diagnostics::color(0.0f, 1.0f, 0.0f)); graph_->set_text(print()); diagnostics::register_graph(graph_); std::memset(&hanc_decode_struct_, 0, sizeof(hanc_decode_struct_)); hanc_decode_struct_.audio_input_source = AUDIO_INPUT_SOURCE_EMB; decoded_audio_bytes_.resize(MAX_DECODED_AUDIO_BUFFER_SIZE); // Configure input connector and routing unsigned int bf_channel = get_bluesdk_input_videochannel_from_streamid(stream_index); if (blue_->set_card_property32(DEFAULT_VIDEO_INPUT_CHANNEL, bf_channel)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to set input channel.")); if (configure_input_routing(bf_channel, true, uhd_mode_)) // to do: tofix pass rgba vs use actual dual link!! CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to set input routing.")); // Look for a valid input video mode blue_->get_card_property32(VIDEO_MODE_EXT_INPUT, &mode_); if (mode_ != VID_FMT_EXT_INVALID) { if (uhd_mode_ == 1 || uhd_mode_ == 2 || uhd_mode_ == 3) { blue_->set_multilink(device_index_, bf_channel); } blue_->get_card_property32(VIDEO_MODE_EXT_INPUT, &mode_); format_desc_ = get_format_desc(format_repository, *blue_, static_cast(mode_), static_cast(memory_format_on_card_)); audio_cadence_ = format_desc_.audio_cadence; if (format_desc_.size == 0) { CASPAR_LOG(warning) << print() << TEXT("Problem getting the size of video buffer from SDK, calculating instead."); format_desc_.size = format_desc_.width * format_desc_.height * 3; } // Select input memory format if (blue_->set_card_property32(VIDEO_INPUT_MEMORY_FORMAT, memory_format_on_card_)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to set input memory format.")); // Select image orientation if (blue_->set_card_property32(VIDEO_INPUTFRAMESTORE_IMAGE_ORIENTATION, ImageOrientation_Normal)) CASPAR_LOG(warning) << print() << L" Failed to set image orientation to normal."; // Select data range - // todo: confirm if we want to pass CGR or SMPTE range to caspar!!!! if (blue_->set_card_property32(EPOCH_VIDEO_INPUT_RGB_DATA_RANGE, CGR_RANGE)) CASPAR_LOG(warning) << print() << L" Failed to set RGB data range to CGR."; // If we have an interlaced input AND and interlaced project then we need to handle the incoming frames // differently and sync on fields, instead of frame barriers if (is_video_format_interlaced(format_desc.format) && is_bluefish_format_interlaced(mode_)) sync_format_ = UPD_FMT_FIELD; // Select Update Mode for input // HERE: this *might need to be sync format, but // currently we will leave as Frame UPD if (blue_->set_card_property32(VIDEO_INPUT_UPDATE_TYPE, UPD_FMT_FRAME)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to set input update type.")); // Generate dma buffers int n = 0; boost::range::generate(reserved_frames_, [&] { return std::make_shared(static_cast(format_desc_.size), n++); }); // Allocate a single UHD buffer for conversion Buffer if we need it! . if (uhd_mode_ == 2) { conversion_buffer_.resize(static_cast(format_desc_.size)); } // Set Video Engine if (blue_->set_card_property32(VIDEO_INPUT_ENGINE, VIDEO_ENGINE_FRAMESTORE)) CASPAR_LOG(warning) << print() << TEXT(" Failed to set video engine."); capture_thread_ = std::make_shared([this] { capture_thread_actual(); }); } } int configure_input_routing(const unsigned int bf_channel, bool dual_link, int uhd_mode) { unsigned int routing_value = 0; unsigned int routing_value_b = 0; /*This function would return the corresponding EBlueVideoChannel from the device output channel*/ switch (bf_channel) { case BLUE_VIDEO_INPUT_CHANNEL_1: if (dual_link) { routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_1, EPOCH_DEST_INPUT_MEM_INTERFACE_CH1, BLUE_CONNECTOR_PROP_DUALLINK_LINK_1); routing_value_b = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_2, EPOCH_DEST_INPUT_MEM_INTERFACE_CH1, BLUE_CONNECTOR_PROP_DUALLINK_LINK_2); } else routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_1, EPOCH_DEST_INPUT_MEM_INTERFACE_CH1, BLUE_CONNECTOR_PROP_SINGLE_LINK); break; case BLUE_VIDEO_INPUT_CHANNEL_2: if (dual_link) { routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_2, EPOCH_DEST_INPUT_MEM_INTERFACE_CH2, BLUE_CONNECTOR_PROP_DUALLINK_LINK_1); routing_value_b = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_3, EPOCH_DEST_INPUT_MEM_INTERFACE_CH2, BLUE_CONNECTOR_PROP_DUALLINK_LINK_2); } else routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_2, EPOCH_DEST_INPUT_MEM_INTERFACE_CH2, BLUE_CONNECTOR_PROP_SINGLE_LINK); break; case BLUE_VIDEO_INPUT_CHANNEL_3: if (dual_link) { routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_3, EPOCH_DEST_INPUT_MEM_INTERFACE_CH3, BLUE_CONNECTOR_PROP_DUALLINK_LINK_1); routing_value_b = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_4, EPOCH_DEST_INPUT_MEM_INTERFACE_CH3, BLUE_CONNECTOR_PROP_DUALLINK_LINK_2); } else routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_3, EPOCH_DEST_INPUT_MEM_INTERFACE_CH3, BLUE_CONNECTOR_PROP_SINGLE_LINK); break; case BLUE_VIDEO_INPUT_CHANNEL_4: routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_4, EPOCH_DEST_INPUT_MEM_INTERFACE_CH4, BLUE_CONNECTOR_PROP_SINGLE_LINK); break; default: routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_1, EPOCH_DEST_INPUT_MEM_INTERFACE_CH1, BLUE_CONNECTOR_PROP_SINGLE_LINK); break; } if (dual_link) { blue_->set_card_property32(MR2_ROUTING, routing_value); return blue_->set_card_property32(MR2_ROUTING, routing_value_b); } if (((uhd_mode == 1) || (uhd_mode == 2) || (uhd_mode == 3)) && ((bf_channel == BLUE_VIDEO_INPUT_CHANNEL_1) || (bf_channel == BLUE_VIDEO_INPUT_CHANNEL_5))) { // Configure the input routing for the first 4 input channels if (bf_channel == BLUE_VIDEO_INPUT_CHANNEL_1) { routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_1, EPOCH_DEST_INPUT_MEM_INTERFACE_CH1, BLUE_CONNECTOR_PROP_SINGLE_LINK); return blue_->set_card_property32(MR2_ROUTING, routing_value); routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_2, EPOCH_DEST_INPUT_MEM_INTERFACE_CH2, BLUE_CONNECTOR_PROP_SINGLE_LINK); return blue_->set_card_property32(MR2_ROUTING, routing_value); routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_3, EPOCH_DEST_INPUT_MEM_INTERFACE_CH3, BLUE_CONNECTOR_PROP_SINGLE_LINK); return blue_->set_card_property32(MR2_ROUTING, routing_value); routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_4, EPOCH_DEST_INPUT_MEM_INTERFACE_CH4, BLUE_CONNECTOR_PROP_SINGLE_LINK); return blue_->set_card_property32(MR2_ROUTING, routing_value); } else { routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_5, EPOCH_DEST_INPUT_MEM_INTERFACE_CH5, BLUE_CONNECTOR_PROP_SINGLE_LINK); return blue_->set_card_property32(MR2_ROUTING, routing_value); routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_6, EPOCH_DEST_INPUT_MEM_INTERFACE_CH6, BLUE_CONNECTOR_PROP_SINGLE_LINK); return blue_->set_card_property32(MR2_ROUTING, routing_value); routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_7, EPOCH_DEST_INPUT_MEM_INTERFACE_CH7, BLUE_CONNECTOR_PROP_SINGLE_LINK); return blue_->set_card_property32(MR2_ROUTING, routing_value); routing_value = EPOCH_SET_ROUTING( EPOCH_SRC_SDI_INPUT_8, EPOCH_DEST_INPUT_MEM_INTERFACE_CH8, BLUE_CONNECTOR_PROP_SINGLE_LINK); return blue_->set_card_property32(MR2_ROUTING, routing_value); } } else return blue_->set_card_property32(MR2_ROUTING, routing_value); } void schedule_capture() { if (sync_format_ == UPD_FMT_FRAME || (sync_format_ == UPD_FMT_FIELD && !first_frame_)) { blue_->render_buffer_capture(BlueBuffer_Image_HANC(schedule_capture_frame_id_)); dma_ready_captured_frame_id_ = capturing_frame_id_; capturing_frame_id_ = schedule_capture_frame_id_; schedule_capture_frame_id_ = (schedule_capture_frame_id_ + 1) % 4; } } void get_capture_time() { blue_->get_card_property64(BTC_TIMER, &capture_ts); } HRESULT process_data() { caspar::timer frame_timer; // Get info for source video mode unsigned int width = 0; unsigned int height = 0; unsigned int rate = 0; unsigned int is_1001 = 0; unsigned int is_progressive = 0; unsigned int image_size = 0; blue_->get_frame_info_for_video_mode(mode_, &width, &height, &rate, &is_1001, &is_progressive); blue_->get_bytes_per_frame(static_cast(mode_), static_cast(memory_format_on_card_), static_cast(sync_format_), &image_size); double fps = rate; if (is_1001 != 0u) fps = static_cast(rate) * 1000 / 1001; CASPAR_SCOPE_EXIT { { std::lock_guard lock(state_mutex_); state_["file/name"] = model_name_; state_["file/path"] = device_index_; state_["file/video/width"] = width; state_["file/video/height"] = height; state_["file/audio/sample-rate"] = format_desc_.audio_sample_rate; state_["file/audio/channels"] = format_desc_.audio_channels; state_["file/fps"] = fps; state_["profiler/time"] = {frame_timer.elapsed(), fps}; state_["buffer"] = {frame_buffer_.size(), frame_buffer_.capacity()}; } graph_->set_value("frame-time", frame_timer.elapsed() * fps / format_desc_.field_count * 0.5); graph_->set_value("output-buffer", static_cast(frame_buffer_.size()) / static_cast(frame_buffer_.capacity())); }; try { graph_->set_value("tick-time", tick_timer_.elapsed() * fps * 0.5); tick_timer_.restart(); { auto src_video = alloc_frame(); auto src_audio = alloc_frame(); // video src_video->format = AV_PIX_FMT_RGB24; src_video->width = width; src_video->height = height; src_video->interlaced_frame = !is_progressive; src_video->top_field_first = height != 486; src_video->key_frame = 1; // src_video->display_picture_number = frames_captured; src_video->pts = capture_ts; void* video_bytes = reserved_frames_.front()->image_data(); if (reserved_frames_.front() && video_bytes) { src_video->data[0] = reinterpret_cast(reserved_frames_.front()->image_data()); src_video->linesize[0] = static_cast(width * 3); // image_size / height); } // Audio src_audio->format = AV_SAMPLE_FMT_S32; av_channel_layout_default(&src_audio->ch_layout, format_desc_.audio_channels); src_audio->sample_rate = format_desc_.audio_sample_rate; src_audio->nb_samples = 0; int samples_decoded = 0; // hmm is audio on first frame or do we need to wait till snd field to get audio? if (sync_format_ == UPD_FMT_FRAME || (sync_format_ == UPD_FMT_FIELD && first_frame_)) { void* audio_bytes = nullptr; auto hanc_buffer = reinterpret_cast(reserved_frames_.front()->hanc_data()); if (hanc_buffer) { int card_type = CRD_INVALID; blue_->query_card_type(&card_type, device_index_); auto no_extracted_pcm_samples = extract_pcm_data_from_hanc(*blue_, &hanc_decode_struct_, card_type, reinterpret_cast(hanc_buffer), reinterpret_cast(&decoded_audio_bytes_[0]), format_desc_.audio_channels); audio_bytes = reinterpret_cast(&decoded_audio_bytes_[0]); samples_decoded = no_extracted_pcm_samples / format_desc_.audio_channels; src_audio->nb_samples = samples_decoded; src_audio->data[0] = reinterpret_cast(audio_bytes); src_audio->linesize[0] = src_audio->nb_samples * format_desc_.audio_channels * av_get_bytes_per_sample(static_cast(src_audio->format)); src_audio->pts = capture_ts; } } if (sync_format_ == UPD_FMT_FIELD) { // since we provide an entire frame for each field in interlaced modes, we need to adjust the // src_audio if (first_frame_) { remainaing_audio_samples_ = src_audio->nb_samples - src_audio->nb_samples / 2; src_audio->nb_samples = src_audio->nb_samples / 2; } else { auto audio_bytes = reinterpret_cast(&decoded_audio_bytes_[0]); if (audio_bytes) { src_audio->nb_samples = remainaing_audio_samples_; int bytes_left = remainaing_audio_samples_ * 4 * format_desc_.audio_channels; src_audio->data[0] = audio_bytes + bytes_left; src_audio->linesize[0] = bytes_left; remainaing_audio_samples_ = 0; } } } if (uhd_mode_ == 2 && conversion_buffer_.size() <= (width * height * 3)) { // Do additional processing required to handle a 2SI input memcpy(&conversion_buffer_[0], reserved_frames_.front()->image_data(), (width * height * 3)); blue_->convert_2si_to_sq( width, height, &conversion_buffer_[0], reserved_frames_.front()->image_data()); } // pass to caspar auto frame = core::draw_frame(make_frame(this, *frame_factory_, src_video, src_audio)); if (!frame_buffer_.try_push(frame)) { core::draw_frame dummy; frame_buffer_.try_pop(dummy); frame_buffer_.try_push(frame); graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame"); CASPAR_LOG(warning) << print() << TEXT(" ERROR dropped frame."); } if (sync_format_ == UPD_FMT_FRAME || (sync_format_ == UPD_FMT_FIELD && !first_frame_)) boost::range::rotate(audio_cadence_, std::end(audio_cadence_) - 1); } } catch (...) { exception_ = std::current_exception(); return E_FAIL; } return S_OK; } bool grab_frame_from_bluefishcard() { try { if (reserved_frames_.front()->image_data()) { if (sync_format_ == UPD_FMT_FIELD && first_frame_) { blue_->system_buffer_read(const_cast(reserved_frames_.front()->image_data()), static_cast(reserved_frames_.front()->image_size()), BlueImage_HANC_DMABuffer(dma_ready_captured_frame_id_, BLUE_DATA_FRAME), 0); } else if (sync_format_ == UPD_FMT_FRAME) { blue_->system_buffer_read(const_cast(reserved_frames_.front()->image_data()), static_cast(reserved_frames_.front()->image_size()), BlueImage_HANC_DMABuffer(dma_ready_captured_frame_id_, BLUE_DATA_IMAGE), 0); } } else { CASPAR_LOG(warning) << print() << TEXT(" NO image data in reserved frames list."); return false; } if (sync_format_ == UPD_FMT_FRAME || (sync_format_ == UPD_FMT_FIELD && !first_frame_)) { if (reserved_frames_.front()->hanc_data()) { blue_->system_buffer_read(const_cast(reserved_frames_.front()->hanc_data()), static_cast(reserved_frames_.front()->hanc_size()), BlueImage_HANC_DMABuffer(dma_ready_captured_frame_id_, BLUE_DATA_HANC), 0); // CASPAR_LOG(warning) << print() << TEXT(" Hanc DMA Buf ID: ") << dma_ready_captured_frame_id_; } } } catch (...) { exception_ = std::current_exception(); } return true; } void capture_thread_actual() { ULONG current_field_count = 0; ULONG last_field_count = 0; ULONG start_field_count = 0; unsigned int current_input_video_signal = VID_FMT_EXT_INVALID; last_field_count = current_field_count; start_field_count = current_field_count; blue_->wait_video_input_sync(UPD_FMT_FRAME, ¤t_field_count); while (process_capture_) { // tell the card to capture another frame at the next interrupt schedule_capture(); blue_->wait_video_input_sync((EUpdateMethod)sync_format_, ¤t_field_count); get_capture_time(); if (last_field_count + 3 < current_field_count) CASPAR_LOG(warning) << L"Error: dropped " << (current_field_count - last_field_count - 2) / 2 << L" frames" << L"Current " << current_field_count << L" Old " << last_field_count; last_field_count = current_field_count; blue_->get_card_property32(VIDEO_MODE_EXT_INPUT, ¤t_input_video_signal); if (current_input_video_signal != VID_FMT_EXT_INVALID && dma_ready_captured_frame_id_ != std::numeric_limits::max()) { // DoneID is now what ScheduleID was at the last iteration when we called // render_buffer_capture(ScheduleID) we just checked if the video signal for the buffer “DoneID” was // valid while it was capturing so we can DMA the buffer DMA the frame from the card to our buffer if (grab_frame_from_bluefishcard()) process_data(); processing_benchmark_timer_.restart(); } if (sync_format_ == UPD_FMT_FRAME || (sync_format_ == UPD_FMT_FIELD && !first_frame_)) boost::range::rotate(reserved_frames_, std::begin(reserved_frames_) + 1); frames_captured++; if (sync_format_ == UPD_FMT_FIELD) first_frame_ = !first_frame_; } } ~bluefish_producer() { try { process_capture_ = false; if (capture_thread_) { Sleep(41); capture_thread_->join(); } blue_->detach(); } catch (...) { CASPAR_LOG_CURRENT_EXCEPTION(); } } core::draw_frame get_frame(const core::video_field field) { if (exception_ != nullptr) std::rethrow_exception(exception_); // TODO - field core::draw_frame frame; if (!frame_buffer_.try_pop(frame)) { graph_->set_tag(diagnostics::tag_severity::WARNING, "late-frame"); } return frame; } bool is_ready() { return !frame_buffer_.empty(); } std::wstring print() const { return model_name_ + L" [" + std::to_wstring(device_index_) + L"|" + format_desc_.name + L"]"; } core::monitor::state state() const { std::lock_guard lock(state_mutex_); return state_; } }; class bluefish_producer_proxy : public core::frame_producer { std::unique_ptr producer_; const uint32_t length_; executor executor_; public: explicit bluefish_producer_proxy(const core::video_format_desc& format_desc, const spl::shared_ptr& frame_factory, const core::video_format_repository& format_repository, int device_index, int stream_index, int uhd_mode, uint32_t length) : length_(length) , executor_(L"bluefish_producer[" + std::to_wstring(device_index) + L"]") { auto ctx = core::diagnostics::call_context::for_thread(); executor_.invoke([=, this] { core::diagnostics::call_context::for_thread() = ctx; producer_.reset(new bluefish_producer( format_desc, device_index, stream_index, uhd_mode, frame_factory, format_repository)); }); } ~bluefish_producer_proxy() { executor_.invoke([=, this] { producer_.reset(); }); } core::monitor::state state() const override { return producer_->state(); } // frame_producer core::draw_frame receive_impl(const core::video_field field, int nb_samples) override { return producer_->get_frame(field); } core::draw_frame first_frame(const core::video_field field) override { return receive_impl(field, 0); } core::draw_frame last_frame(const core::video_field field) override { return core::draw_frame::still(producer_->get_frame(field)); } bool is_ready() override { return producer_->is_ready(); } uint32_t nb_frames() const override { return length_; } std::wstring print() const override { return producer_->print(); } std::wstring name() const override { return L"bluefish"; } }; spl::shared_ptr create_producer(const core::frame_producer_dependencies& dependencies, const std::vector& params) { if (params.empty() || !boost::iequals(params.at(0), "bluefish")) return core::frame_producer::empty(); auto device_index = get_param(L"DEVICE", params, -1); if (device_index == -1) device_index = std::stoi(params.at(1)); auto stream_index = get_param(L"SDI-STREAM", params, -1); if (stream_index == -1) stream_index = 1; auto uhd_mode = get_param(L"UHD-MODE", params, -1); if (uhd_mode == -1) uhd_mode = 0; auto length = get_param(L"LENGTH", params, std::numeric_limits::max()); auto in_format_desc = dependencies.format_repository.find(get_param(L"FORMAT", params, L"INVALID")); return spl::make_shared(dependencies.format_desc, dependencies.frame_factory, dependencies.format_repository, device_index, stream_index, uhd_mode, length); } }} // namespace caspar::bluefish ================================================ FILE: src/modules/bluefish/producer/bluefish_producer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com * satchit puthenveetil * James Wise, james.wise@bluefish444.com */ #pragma once #include #include #include namespace caspar { namespace bluefish { spl::shared_ptr create_producer(const core::frame_producer_dependencies& dependencies, const std::vector& params); }} // namespace caspar::bluefish ================================================ FILE: src/modules/bluefish/util/blue_velvet.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "blue_velvet.h" #include "../StdAfx.h" #include #define IMPLEMENTATION_BLUEVELVETC_FUNC_PTR #define IMPLEMENTATION_BLUEVELVETC_CONVERSION_FUNC_PTR #include "../interop/BlueVelvetCFuncPtr.h" namespace caspar { namespace bluefish { bvc_wrapper::bvc_wrapper() { if (LoadFunctionPointers_BlueVelvetC()) bvc_ = std::shared_ptr(bfcFactory(), bfcDestroy); if (LoadFunctionPointers_BlueConversion()) bvc_conv_ = std::shared_ptr(bfcConversionFactory(), bfcConversionDestroy); if (!bvc_ || !bvc_conv_) CASPAR_THROW_EXCEPTION(not_supported() << msg_info( "Bluefish drivers not found.\nDriver not installed?\nMinimum Version is V6.3.0.2")); } const char* bvc_wrapper::get_version() { return bfcGetVersion(); } BLUE_U32 bvc_wrapper::attach(int iDeviceId) { return bfcAttach(bvc_.get(), iDeviceId); } BLUE_U32 bvc_wrapper::detach() { return bfcDetach(bvc_.get()); } BLUE_U32 bvc_wrapper::set_multilink(const int iDeviceID, const int memChannel) { if (memChannel == -1) return (BLUE_U32)bfcSetMultiLinkMode(bvc_.get(), NULL); // ie. detach from multiLink blue_multi_link_info_struct attach_info = {}; attach_info.InputControl = 1; attach_info.Link1_Device = iDeviceID; attach_info.Link2_Device = iDeviceID; attach_info.Link3_Device = iDeviceID; attach_info.Link4_Device = iDeviceID; if (memChannel == BLUE_VIDEO_INPUT_CHANNEL_1) { attach_info.Link1_MemChannel = BLUE_VIDEO_INPUT_CHANNEL_1; attach_info.Link2_MemChannel = BLUE_VIDEO_INPUT_CHANNEL_2; attach_info.Link3_MemChannel = BLUE_VIDEO_INPUT_CHANNEL_3; attach_info.Link4_MemChannel = BLUE_VIDEO_INPUT_CHANNEL_4; } else if (memChannel == BLUE_VIDEO_INPUT_CHANNEL_5) { attach_info.Link5_MemChannel = BLUE_VIDEO_INPUT_CHANNEL_5; attach_info.Link6_MemChannel = BLUE_VIDEO_INPUT_CHANNEL_6; attach_info.Link7_MemChannel = BLUE_VIDEO_INPUT_CHANNEL_7; attach_info.Link8_MemChannel = BLUE_VIDEO_INPUT_CHANNEL_8; } else if (memChannel == BLUE_VIDEO_OUTPUT_CHANNEL_1) { attach_info.Link1_MemChannel = BLUE_VIDEO_OUTPUT_CHANNEL_1; attach_info.Link2_MemChannel = BLUE_VIDEO_OUTPUT_CHANNEL_2; attach_info.Link3_MemChannel = BLUE_VIDEO_OUTPUT_CHANNEL_3; attach_info.Link4_MemChannel = BLUE_VIDEO_OUTPUT_CHANNEL_4; } else if (memChannel == BLUE_VIDEO_OUTPUT_CHANNEL_5) { attach_info.Link5_MemChannel = BLUE_VIDEO_OUTPUT_CHANNEL_5; attach_info.Link6_MemChannel = BLUE_VIDEO_OUTPUT_CHANNEL_6; attach_info.Link7_MemChannel = BLUE_VIDEO_OUTPUT_CHANNEL_7; attach_info.Link8_MemChannel = BLUE_VIDEO_OUTPUT_CHANNEL_8; } return (BLUE_U32)bfcSetMultiLinkMode(bvc_.get(), &attach_info); } BLUE_U32 bvc_wrapper::get_card_property32(const int iProperty, unsigned int* nValue) { return (BLUE_U32)bfcQueryCardProperty32(bvc_.get(), iProperty, nValue); } BLUE_U32 bvc_wrapper::set_card_property32(const int iProperty, const unsigned int nValue) { return bfcSetCardProperty32(bvc_.get(), iProperty, nValue); } BLUE_U32 bvc_wrapper::get_card_property64(const int iProperty, unsigned long long* nValue) { return (BLUE_U32)bfcQueryCardProperty64(bvc_.get(), iProperty, nValue); } BLUE_U32 bvc_wrapper::set_card_property64(const int iProperty, const unsigned long long nValue) { return bfcSetCardProperty64(bvc_.get(), iProperty, nValue); } BLUE_U32 bvc_wrapper::enumerate(int* iDevices) { return bfcEnumerate(bvc_.get(), iDevices); } BLUE_U32 bvc_wrapper::query_card_type(int* iCardType, int iDeviceID) { return bfcQueryCardType(bvc_.get(), iCardType, iDeviceID); } BLUE_U32 bvc_wrapper::system_buffer_write(unsigned char* pPixels, unsigned long ulSize, unsigned long ulBufferID, unsigned long ulOffset) { return bfcSystemBufferWriteAsync(bvc_.get(), pPixels, ulSize, nullptr, ulBufferID, ulOffset); } BLUE_U32 bvc_wrapper::system_buffer_read(unsigned char* pPixels, unsigned long ulSize, unsigned long ulBufferID, unsigned long ulOffset) { #if 1 return bfcSystemBufferReadAsync(bvc_.get(), pPixels, ulSize, nullptr, ulBufferID, ulOffset); #else BFC_SYNC_INFO bsi = bfcSyncInfoCreate(bvc_.get()); BLUE_U32 retVal = 0; retVal = bfcDmaReadFromCardAsync(bvc_.get(), pPixels, ulSize, bsi, ulBufferID, ulOffset); int wrv = bfcSyncInfoWait(bvc_.get(), bsi, 20); int x = wrv + 0; x++; bfcSyncInfoDelete(bvc_.get(), bsi); return retVal; #endif } BLUE_U32 bvc_wrapper::video_playback_stop(int iWait, int iFlush) { return bfcVideoPlaybackStop(bvc_.get(), iWait, iFlush); } BLUE_U32 bvc_wrapper::video_playback_start(int step, int loop) { return bfcVideoPlaybackStart(bvc_.get(), step, loop); } BLUE_U32 bvc_wrapper::video_playback_allocate(unsigned long* buffer_id, unsigned long* underrun) { void* unused = nullptr; return bfcVideoPlaybackAllocate(bvc_.get(), &unused, buffer_id, underrun); } BLUE_U32 bvc_wrapper::video_playback_present(unsigned long buffer_id, unsigned long count, unsigned long keep, unsigned long odd) { unsigned long unique_id; return bfcVideoPlaybackPresent(bvc_.get(), &unique_id, buffer_id, count, keep, odd); } BLUE_U32 bvc_wrapper::wait_video_output_sync(unsigned long ulUpdateType, unsigned long* ulFieldCount) { return bfcWaitVideoOutputSync(bvc_.get(), ulUpdateType, ulFieldCount); } BLUE_U32 bvc_wrapper::wait_video_input_sync(unsigned long ulUpdateType, unsigned long* ulFieldCount) { return bfcWaitVideoInputSync(bvc_.get(), ulUpdateType, ulFieldCount); } BLUE_U32 bvc_wrapper::render_buffer_update(unsigned long ulBufferID) { return bfcRenderBufferUpdate(bvc_.get(), ulBufferID); } BLUE_U32 bvc_wrapper::render_buffer_capture(unsigned long ulBufferID) { return bfcRenderBufferCapture(bvc_.get(), ulBufferID); } BLUE_U32 bvc_wrapper::encode_hanc_frame(unsigned int nCardType, hanc_stream_info_struct* pHancEncodeInfo, void* pAudioBuffer, unsigned int nAudioChannels, unsigned int nAudioSamples, unsigned int nSampleType, unsigned int nAudioFlags) { return bfcEncodeHancFrameEx(bvc_.get(), CRD_BLUE_NEUTRON, pHancEncodeInfo, pAudioBuffer, nAudioChannels, nAudioSamples, nSampleType, nAudioFlags); } BLUE_U32 bvc_wrapper::decode_hanc_frame(unsigned int nCardType, unsigned int* pHancBuffer, hanc_decode_struct* pHancDecodeInfo) { return bfcDecodeHancFrameEx(bvc_.get(), CRD_BLUE_NEUTRON, pHancBuffer, pHancDecodeInfo); } BLUE_U32 bvc_wrapper::get_frame_info_for_video_mode(const unsigned int nVideoModeExt, unsigned int* nWidth, unsigned int* nHeight, unsigned int* nRate, unsigned int* bIs1001, unsigned int* bIsProgressive) { return bfcUtilsGetFrameInfoForVideoModeExt(nVideoModeExt, nWidth, nHeight, nRate, bIs1001, bIsProgressive); } BLUE_U32 bvc_wrapper::get_bytes_per_frame(EVideoModeExt nVideoModeExt, EMemoryFormat nMemoryFormat, EUpdateMethod nUpdateMethod, unsigned int* nBytesPerFrame) { return bfcGetVideoBytesPerFrame(nVideoModeExt, nUpdateMethod, nMemoryFormat, nBytesPerFrame); } std::string bvc_wrapper::get_string_for_card_type(unsigned int nCardType) { return bfcUtilsGetStringForCardType(nCardType); } std::wstring bvc_wrapper::get_wstring_for_video_mode(unsigned int nVideoModeExt) { std::wstring mode_desc; switch (nVideoModeExt) { case VID_FMT_EXT_PAL: mode_desc = L"pal"; break; case VID_FMT_EXT_NTSC: mode_desc = L"ntsc"; break; case VID_FMT_EXT_720P_2398: mode_desc = L"720p23"; break; case VID_FMT_EXT_720P_2400: mode_desc = L"720p24"; break; case VID_FMT_EXT_720P_2500: mode_desc = L"720p25"; break; case VID_FMT_EXT_720P_5000: mode_desc = L"720p50"; break; case VID_FMT_EXT_720P_2997: mode_desc = L"720p29"; break; case VID_FMT_EXT_720P_5994: mode_desc = L"720p59"; break; case VID_FMT_EXT_720P_3000: mode_desc = L"720p30"; break; case VID_FMT_EXT_720P_6000: mode_desc = L"720p60"; break; case VID_FMT_EXT_1080P_2398: mode_desc = L"1080p23"; break; case VID_FMT_EXT_1080P_2400: mode_desc = L"1080p24"; break; case VID_FMT_EXT_1080I_5000: mode_desc = L"1080i50"; break; case VID_FMT_1080I_5994: mode_desc = L"1080i59"; break; case VID_FMT_EXT_1080I_6000: mode_desc = L"1080i60"; break; case VID_FMT_EXT_1080P_2500: mode_desc = L"1080p25"; break; case VID_FMT_EXT_1080P_2997: mode_desc = L"1080p29"; break; case VID_FMT_EXT_1080P_3000: mode_desc = L"1080p30"; break; case VID_FMT_EXT_1080P_5000: mode_desc = L"1080p50"; break; case VID_FMT_EXT_1080P_5994: mode_desc = L"1080p59"; break; case VID_FMT_EXT_1080P_6000: mode_desc = L"1080p60"; break; case VID_FMT_EXT_2160P_2398: mode_desc = L"2160p23"; break; case VID_FMT_EXT_2160P_2400: mode_desc = L"2160p24"; break; case VID_FMT_EXT_2160P_2500: mode_desc = L"2160p25"; break; case VID_FMT_EXT_2160P_2997: mode_desc = L"2160p29"; break; case VID_FMT_EXT_2160P_3000: mode_desc = L"2160p30"; break; case VID_FMT_EXT_2160P_5000: mode_desc = L"2160p50"; break; case VID_FMT_EXT_2160P_5994: mode_desc = L"2160p59"; break; case VID_FMT_EXT_2160P_6000: mode_desc = L"2160p60"; break; default: mode_desc = L"invalid"; break; } return mode_desc; } int bvc_wrapper::get_num_audio_samples_for_frame(const BLUE_U32 nVideoModeExt, const BLUE_U32 nFrameNo) { return bfcUtilsGetAudioSamplesPerFrame(nVideoModeExt, nFrameNo); } BLUE_U32 bvc_wrapper::convert_2si_to_sq(const BLUE_U32 Width, const BLUE_U32 Height, BLUE_VOID* pSrc, BLUE_VOID* pDst) { return bfcConvert_TsiToSquareDivision_RGB(bvc_conv_.get(), Width, Height, pSrc, pDst); } BLUE_U32 bvc_wrapper::convert_sq_to_2si(const BLUE_U32 Width, const BLUE_U32 Height, BLUE_VOID* pSrc, BLUE_VOID* pDst) { return bfcConvert_SquareDivisionToTsi_ARGB32(bvc_conv_.get(), Width, Height, pSrc, pDst); } EVideoModeExt vid_fmt_from_video_format(const core::video_format& fmt) { switch (fmt) { case core::video_format::pal: return VID_FMT_EXT_PAL; case core::video_format::ntsc: return VID_FMT_EXT_NTSC; case core::video_format::x576p2500: return VID_FMT_EXT_INVALID; // not supported case core::video_format::x720p2398: return VID_FMT_EXT_720P_2398; case core::video_format::x720p2400: return VID_FMT_EXT_720P_2400; case core::video_format::x720p2500: return VID_FMT_EXT_720P_2500; case core::video_format::x720p5000: return VID_FMT_EXT_720P_5000; case core::video_format::x720p2997: return VID_FMT_EXT_720P_2997; case core::video_format::x720p5994: return VID_FMT_EXT_720P_5994; case core::video_format::x720p3000: return VID_FMT_EXT_720P_3000; case core::video_format::x720p6000: return VID_FMT_EXT_720P_6000; case core::video_format::x1080p2398: return VID_FMT_EXT_1080P_2398; case core::video_format::x1080p2400: return VID_FMT_EXT_1080P_2400; case core::video_format::x1080i5000: return VID_FMT_EXT_1080I_5000; case core::video_format::x1080i5994: return VID_FMT_EXT_1080I_5994; case core::video_format::x1080i6000: return VID_FMT_EXT_1080I_6000; case core::video_format::x1080p2500: return VID_FMT_EXT_1080P_2500; case core::video_format::x1080p2997: return VID_FMT_EXT_1080P_2997; case core::video_format::x1080p3000: return VID_FMT_EXT_1080P_3000; case core::video_format::x1080p5000: return VID_FMT_EXT_1080P_5000; case core::video_format::x1080p5994: return VID_FMT_EXT_1080P_5994; case core::video_format::x1080p6000: return VID_FMT_EXT_1080P_6000; case core::video_format::x2160p2398: return VID_FMT_EXT_2160P_2398; case core::video_format::x2160p2400: return VID_FMT_EXT_2160P_2400; case core::video_format::x2160p2500: return VID_FMT_EXT_2160P_2500; case core::video_format::x2160p2997: return VID_FMT_EXT_2160P_2997; case core::video_format::x2160p3000: return VID_FMT_EXT_2160P_3000; case core::video_format::x2160p5000: return VID_FMT_EXT_2160P_5000; case core::video_format::x2160p5994: return VID_FMT_EXT_2160P_5994; case core::video_format::x2160p6000: return VID_FMT_EXT_2160P_6000; default: return VID_FMT_EXT_INVALID; } } bool is_epoch_card(bvc_wrapper& blue) { int device_id = 1; int card_type = 0; blue.query_card_type(&card_type, device_id); switch (card_type) { case CRD_BLUE_EPOCH_HORIZON: case CRD_BLUE_EPOCH_CORE: case CRD_BLUE_EPOCH_ULTRA: case CRD_BLUE_EPOCH_2K_HORIZON: case CRD_BLUE_EPOCH_2K_CORE: case CRD_BLUE_EPOCH_2K_ULTRA: case CRD_BLUE_CREATE_HD: case CRD_BLUE_CREATE_2K: case CRD_BLUE_CREATE_2K_ULTRA: case CRD_BLUE_SUPER_NOVA: case CRD_BLUE_SUPER_NOVA_S_PLUS: case CRD_BLUE_NEUTRON: case CRD_BLUE_EPOCH_CG: return true; default: return false; } } bool is_kronos_card(bvc_wrapper& blue) { int device_id = 1; int card_type = 0; blue.query_card_type(&card_type, device_id); switch (card_type) { case CRD_BLUE_KRONOS_ELEKTRON: case CRD_BLUE_KRONOS_OPTIKOS: case CRD_BLUE_KRONOS_K8: return true; default: return false; } } bool is_epoch_neutron_1i2o_card(bvc_wrapper& blue) { BLUE_U32 val = 0; blue.get_card_property32(EPOCH_GET_PRODUCT_ID, &val); if (val == ORAC_NEUTRON_1_IN_2_OUT_FIRMWARE_PRODUCTID) return true; return false; } bool is_epoch_neutron_3o_card(bvc_wrapper& blue) { BLUE_U32 val = 0; blue.get_card_property32(EPOCH_GET_PRODUCT_ID, &val); if (val == ORAC_NEUTRON_0_IN_3_OUT_FIRMWARE_PRODUCTID) return true; return false; } std::wstring get_card_desc(bvc_wrapper blue, int device_id) { std::wstring card_desc; int card_type = 0; blue.query_card_type(&card_type, device_id); switch (card_type) { case CRD_BLUE_EPOCH_2K_CORE: card_desc = L"Bluefish Epoch 2K Core"; break; case CRD_BLUE_EPOCH_2K_ULTRA: card_desc = L"Bluefish Epoch 2K Ultra"; break; case CRD_BLUE_EPOCH_HORIZON: card_desc = L"Bluefish Epoch Horizon"; break; case CRD_BLUE_EPOCH_CORE: card_desc = L"Blufishe Epoch Core"; break; case CRD_BLUE_EPOCH_ULTRA: card_desc = L"Bluefish Epoch Ultra"; break; case CRD_BLUE_CREATE_HD: card_desc = L"Bluefish Create HD"; break; case CRD_BLUE_CREATE_2K: card_desc = L"Bluefish Create 2K"; break; case CRD_BLUE_CREATE_2K_ULTRA: card_desc = L"Bluefish Create 2K Ultra"; break; case CRD_BLUE_SUPER_NOVA: card_desc = L"Bluefish SuperNova"; break; case CRD_BLUE_SUPER_NOVA_S_PLUS: card_desc = L"Bluefish SuperNova s+"; break; case CRD_BLUE_NEUTRON: card_desc = L"Bluefish Neutron 4k"; break; case CRD_BLUE_EPOCH_CG: card_desc = L"Bluefish Epoch CG"; break; case CRD_BLUE_KRONOS_ELEKTRON: card_desc = L"Bluefish Kronos Elektron"; break; case CRD_BLUE_KRONOS_OPTIKOS: card_desc = L"Bluefish Kronos Optikos"; break; case CRD_BLUE_KRONOS_K8: card_desc = L"Bluefish Kronos K8"; break; default: card_desc = L"Unknown"; } return card_desc; } EVideoModeExt get_video_mode(bvc_wrapper& blue, const core::video_format_desc& format_desc) { EVideoModeExt vid_fmt_ext = VID_FMT_EXT_INVALID; if ((format_desc.width <= 2048) || is_kronos_card(blue)) vid_fmt_ext = vid_fmt_from_video_format(format_desc.format); if (vid_fmt_ext == VID_FMT_EXT_INVALID) CASPAR_THROW_EXCEPTION(not_supported() << msg_info(L"video-mode not supported: " + format_desc.name)); return vid_fmt_ext; } spl::shared_ptr create_blue() { auto pWrap = new bvc_wrapper(); return spl::shared_ptr(pWrap); } spl::shared_ptr create_blue(int device_index) { auto blue = create_blue(); if (blue->attach(device_index)) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to attach device.")); return blue; } core::video_format_desc get_format_desc(const core::video_format_repository& format_repository, bvc_wrapper& blue, EVideoModeExt vid_fmt_ext, EMemoryFormat mem_fmt) { core::video_format_desc fmt; unsigned int width, height, duration = 0, time_scale = 0, rate = 0, bIs1001 = 0, is_progressive = 0, size = 0; std::vector audio_cadence; int field_count = 1; blue.get_frame_info_for_video_mode(vid_fmt_ext, &width, &height, &rate, &bIs1001, &is_progressive); CASPAR_LOG(warning) << L"vfe is " << vid_fmt_ext << L" w: " << width << L" h:" << height << L" rate:" << rate << L"is1001 " << bIs1001 << L"isProgressive " << is_progressive; blue.get_bytes_per_frame(vid_fmt_ext, mem_fmt, UPD_FMT_FRAME, &size); CASPAR_LOG(warning) << L"vfe is " << vid_fmt_ext << L" size is: " << size; switch (vid_fmt_ext) { case VID_FMT_EXT_NTSC: case VID_FMT_EXT_1080I_5994: duration = 30000; time_scale = 1001; audio_cadence = {1601, 1602, 1601, 1602, 1602}; break; case VID_FMT_EXT_2K_1080P_2500: case VID_FMT_EXT_2K_1080PSF_2500: case VID_FMT_EXT_576I_5000: case VID_FMT_EXT_1080P_2500: case VID_FMT_EXT_1080I_5000: case VID_FMT_EXT_1080PSF_2500: case VID_FMT_EXT_720P_2500: case VID_FMT_EXT_2160P_2500: duration = 25000; time_scale = 1000; audio_cadence = {1920, 1920, 1920, 1920, 1920}; break; case VID_FMT_EXT_720P_5994: case VID_FMT_EXT_2K_1080P_5994: case VID_FMT_EXT_1080P_5994: case VID_FMT_EXT_2160P_5994: duration = 60000; time_scale = 1001; audio_cadence = {801, 800, 801, 800, 800}; break; case VID_FMT_EXT_1080P_6000: case VID_FMT_EXT_2K_1080P_6000: case VID_FMT_EXT_720P_6000: case VID_FMT_EXT_2160P_6000: duration = 60000; time_scale = 1000; audio_cadence = {801, 800, 801, 800, 800}; break; case VID_FMT_EXT_1080PSF_2398: case VID_FMT_EXT_1080P_2398: case VID_FMT_EXT_720P_2398: case VID_FMT_EXT_2K_1080PSF_2398: case VID_FMT_EXT_2K_1080P_2398: case VID_FMT_EXT_2160P_2398: duration = 24000; time_scale = 1000; audio_cadence = {2002, 2002, 2002, 2002, 2002}; break; case VID_FMT_EXT_1080PSF_2400: case VID_FMT_EXT_1080P_2400: case VID_FMT_EXT_720P_2400: case VID_FMT_EXT_2K_1080PSF_2400: case VID_FMT_EXT_2K_1080P_2400: case VID_FMT_EXT_2160P_2400: duration = 24000; time_scale = 1000; audio_cadence = {2000, 2000, 2000, 2000, 2000}; break; case VID_FMT_EXT_1080I_6000: case VID_FMT_EXT_1080PSF_3000: duration = 30000; time_scale = 1000; audio_cadence = {1600, 1600, 1600, 1600, 1600}; break; case VID_FMT_EXT_720P_2997: case VID_FMT_EXT_1080P_2997: case VID_FMT_EXT_2K_1080PSF_2997: case VID_FMT_EXT_2K_1080P_2997: case VID_FMT_EXT_1080PSF_2997: case VID_FMT_EXT_2160P_2997: duration = 30000; time_scale = 1001; audio_cadence = {1602, 1601, 1602, 1601, 1602}; break; case VID_FMT_EXT_720P_3000: case VID_FMT_EXT_1080P_3000: case VID_FMT_EXT_2K_1080PSF_3000: case VID_FMT_EXT_2K_1080P_3000: case VID_FMT_EXT_2160P_3000: duration = 30000; time_scale = 1001; audio_cadence = {1600, 1600, 1600, 1600, 1600}; break; case VID_FMT_EXT_720P_5000: case VID_FMT_EXT_1080P_5000: case VID_FMT_EXT_2K_1080P_5000: case VID_FMT_EXT_2160P_5000: audio_cadence = {960, 960, 960, 960, 960}; duration = 50000; time_scale = 1000; break; } if (is_progressive == 0u) field_count = 2; fmt.field_count = field_count; fmt = format_repository.find_format(get_caspar_video_format(vid_fmt_ext)); fmt.size = size; fmt.audio_cadence = std::move(audio_cadence); fmt.name = blue.get_wstring_for_video_mode(vid_fmt_ext); return fmt; } std::wstring get_sdi_inputs(bvc_wrapper& blue) { BLUE_U32 val = 0; blue.get_card_property32(CARD_FEATURE_CONNECTOR_INFO, &val); int connectors = CARD_FEATURE_GET_SDI_INPUT_CONNECTOR_COUNT(val); return std::to_wstring(connectors); } std::wstring get_sdi_outputs(bvc_wrapper& blue) { BLUE_U32 val = 0; blue.get_card_property32(CARD_FEATURE_CONNECTOR_INFO, &val); int connectors = CARD_FEATURE_GET_SDI_OUTPUT_CONNECTOR_COUNT(val); return std::to_wstring(connectors); } EVideoModeExt get_bluefish_video_format(core::video_format fmt) { // TODO: add support for UHD 4K formats switch (fmt) { case core::video_format::pal: return VID_FMT_EXT_PAL; case core::video_format::ntsc: return VID_FMT_EXT_NTSC; case core::video_format::x720p2398: return VID_FMT_EXT_720P_2398; case core::video_format::x720p2400: return VID_FMT_EXT_720P_2400; case core::video_format::x720p2500: return VID_FMT_EXT_720P_2500; case core::video_format::x720p2997: return VID_FMT_EXT_720P_2997; case core::video_format::x720p3000: return VID_FMT_EXT_720P_3000; case core::video_format::x720p5000: return VID_FMT_EXT_720P_5000; case core::video_format::x720p5994: return VID_FMT_EXT_720P_5994; case core::video_format::x720p6000: return VID_FMT_EXT_720P_6000; case core::video_format::x1080i5000: return VID_FMT_EXT_1080I_5000; case core::video_format::x1080i5994: return VID_FMT_EXT_1080I_5994; case core::video_format::x1080i6000: return VID_FMT_EXT_1080I_6000; case core::video_format::x1080p2398: return VID_FMT_EXT_1080P_2398; case core::video_format::x1080p2400: return VID_FMT_EXT_1080P_2400; case core::video_format::x1080p2500: return VID_FMT_EXT_1080P_2500; case core::video_format::x1080p2997: return VID_FMT_EXT_1080P_2997; case core::video_format::x1080p3000: return VID_FMT_EXT_1080P_3000; case core::video_format::x1080p5000: return VID_FMT_EXT_1080P_5000; case core::video_format::x1080p5994: return VID_FMT_EXT_1080P_5994; case core::video_format::x1080p6000: return VID_FMT_EXT_1080P_6000; case core::video_format::x2160p2398: return VID_FMT_EXT_2160P_2398; case core::video_format::x2160p2400: return VID_FMT_EXT_2160P_2400; case core::video_format::x2160p2500: return VID_FMT_EXT_2160P_2500; case core::video_format::x2160p2997: return VID_FMT_EXT_2160P_2997; case core::video_format::x2160p3000: return VID_FMT_EXT_2160P_3000; case core::video_format::x2160p5000: return VID_FMT_EXT_2160P_5000; case core::video_format::x2160p5994: return VID_FMT_EXT_2160P_5994; case core::video_format::x2160p6000: return VID_FMT_EXT_2160P_6000; default: return VID_FMT_EXT_INVALID; } } static core::video_format get_caspar_video_format(EVideoModeExt fmt) { switch (fmt) { case VID_FMT_EXT_PAL: return core::video_format::pal; case VID_FMT_EXT_NTSC: return core::video_format::ntsc; case VID_FMT_EXT_720P_2398: return core::video_format::x720p2398; case VID_FMT_EXT_720P_2400: return core::video_format::x720p2400; case VID_FMT_EXT_720P_2500: return core::video_format::x720p2500; case VID_FMT_EXT_720P_5000: return core::video_format::x720p5000; case VID_FMT_EXT_720P_2997: return core::video_format::x720p2997; case VID_FMT_EXT_720P_5994: return core::video_format::x720p5994; case VID_FMT_EXT_720P_3000: return core::video_format::x720p3000; case VID_FMT_EXT_720P_6000: return core::video_format::x720p6000; case VID_FMT_EXT_1080P_2398: case VID_FMT_EXT_1080PSF_2398: return core::video_format::x1080p2398; case VID_FMT_EXT_1080P_2400: case VID_FMT_EXT_1080PSF_2400: return core::video_format::x1080p2400; case VID_FMT_EXT_1080I_5000: return core::video_format::x1080i5000; case VID_FMT_EXT_1080I_5994: return core::video_format::x1080i5994; case VID_FMT_EXT_1080I_6000: return core::video_format::x1080i6000; case VID_FMT_EXT_1080P_2500: case VID_FMT_EXT_1080PSF_2500: return core::video_format::x1080p2500; case VID_FMT_EXT_1080P_2997: case VID_FMT_EXT_1080PSF_2997: return core::video_format::x1080p2997; case VID_FMT_EXT_1080P_3000: case VID_FMT_EXT_1080PSF_3000: return core::video_format::x1080p3000; case VID_FMT_EXT_1080P_5000: return core::video_format::x1080p5000; case VID_FMT_EXT_1080P_5994: return core::video_format::x1080p5994; case VID_FMT_EXT_1080P_6000: return core::video_format::x1080p6000; case VID_FMT_EXT_2160P_2398: return core::video_format::x2160p2398; case VID_FMT_EXT_2160P_2400: return core::video_format::x2160p2400; case VID_FMT_EXT_2160P_2500: return core::video_format::x2160p2500; case VID_FMT_EXT_2160P_2997: return core::video_format::x2160p2997; case VID_FMT_EXT_2160P_3000: return core::video_format::x2160p3000; case VID_FMT_EXT_2160P_5000: return core::video_format::x2160p5000; case VID_FMT_EXT_2160P_5994: return core::video_format::x2160p5994; case VID_FMT_EXT_2160P_6000: return core::video_format::x2160p6000; default: return core::video_format::invalid; } } }} // namespace caspar::bluefish ================================================ FILE: src/modules/bluefish/util/blue_velvet.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include #include #include "../interop/BlueDriver_p.h" namespace caspar { namespace bluefish { class bvc_wrapper { public: bvc_wrapper(); // bfcFactory + function pointer lookups const char* get_version(); BLUE_U32 enumerate(int* iDevices); BLUE_U32 query_card_type(int* iCardType, int iDeviceID); BLUE_U32 attach(int iDeviceId); BLUE_U32 detach(); BLUE_U32 set_multilink(const int iDeviceID, const int memChannel); BLUE_U32 get_card_property32(const int iProperty, unsigned int* nValue); BLUE_U32 set_card_property32(const int iProperty, const unsigned int nValue); BLUE_U32 get_card_property64(const int iProperty, unsigned long long* nValue); BLUE_U32 set_card_property64(const int iProperty, const unsigned long long nValue); BLUE_U32 system_buffer_write(unsigned char* pPixels, unsigned long ulSize, unsigned long ulBufferID, unsigned long ulOffset); BLUE_U32 system_buffer_read(unsigned char* pPixels, unsigned long ulSize, unsigned long ulBufferID, unsigned long ulOffset); BLUE_U32 video_playback_stop(int iWait, int iFlush); BLUE_U32 video_playback_start(int wait, int loop); BLUE_U32 video_playback_allocate(unsigned long* buffer_id, unsigned long* underrun); BLUE_U32 video_playback_present(unsigned long buffer_id, unsigned long count, unsigned long keep, unsigned long odd); BLUE_U32 wait_video_output_sync(unsigned long ulUpdateType, unsigned long* ulFieldCount); BLUE_U32 wait_video_input_sync(unsigned long ulUpdateType, unsigned long* ulFieldCount); BLUE_U32 render_buffer_update(unsigned long ulBufferID); BLUE_U32 render_buffer_capture(unsigned long ulBufferID); BLUE_U32 encode_hanc_frame(unsigned int nCardType, struct hanc_stream_info_struct* pHancEncodeInfo, void* pAudioBuffer, unsigned int nAudioChannels, unsigned int nAudioSamples, unsigned int nSampleType, unsigned int nAudioFlags); BLUE_U32 decode_hanc_frame(unsigned int nCardType, unsigned int* pHancBuffer, struct hanc_decode_struct* pHancDecodeInfo); BLUE_U32 get_frame_info_for_video_mode(const unsigned int nVideoModeExt, unsigned int* nWidth, unsigned int* nHeight, unsigned int* nRate, unsigned int* bIs1001, unsigned int* bIsProgressive); BLUE_U32 get_bytes_per_frame(EVideoModeExt nVideoMode, EMemoryFormat nMemoryFormat, EUpdateMethod nUpdateMethod, unsigned int* nBytesPerFrame); std::string get_string_for_card_type(unsigned int nCardType); std::wstring get_wstring_for_video_mode(unsigned int nVideoMode); int get_num_audio_samples_for_frame(const BLUE_U32 nVideoMode, const BLUE_U32 nFrameNo); // UHD Conversion functions... BLUE_U32 convert_2si_to_sq(const BLUE_U32 Width, const BLUE_U32 Height, BLUE_VOID* pSrc, BLUE_VOID* pDst); BLUE_U32 convert_sq_to_2si(const BLUE_U32 Width, const BLUE_U32 Height, BLUE_VOID* pSrc, BLUE_VOID* pDst); private: std::shared_ptr bvc_; std::shared_ptr bvc_conv_; }; spl::shared_ptr create_blue(); spl::shared_ptr create_blue(int device_index); core::video_format_desc get_format_desc(const core::video_format_repository& format_repository, bvc_wrapper& blue, EVideoModeExt vid_fmt, EMemoryFormat mem_fmt); bool is_epoch_card(bvc_wrapper& blue); bool is_kronos_card(bvc_wrapper& blue); bool is_epoch_neutron_1i2o_card(bvc_wrapper& blue); bool is_epoch_neutron_3o_card(bvc_wrapper& blue); std::wstring get_card_desc(bvc_wrapper blue, int device_id); std::wstring get_sdi_inputs(bvc_wrapper& blue); std::wstring get_sdi_outputs(bvc_wrapper& blue); EVideoModeExt get_bluefish_video_format(core::video_format fmt); static core::video_format get_caspar_video_format(EVideoModeExt fmt); }} // namespace caspar::bluefish ================================================ FILE: src/modules/bluefish/util/memory.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com James Wise, james.wise@bluefish444.com */ #pragma once #include #include #include namespace caspar { namespace bluefish { static const size_t MAX_HANC_BUFFER_SIZE = 256 * 1024; static const size_t MAX_VBI_BUFFER_SIZE = 36 * 1920 * 4; struct blue_dma_buffer { public: blue_dma_buffer(int image_size, int id) : id_(id) , image_size_(image_size) , hanc_size_(MAX_HANC_BUFFER_SIZE) , image_buffer_(image_size_) , hanc_buffer_(hanc_size_) { } int id() const { return id_; } PBYTE image_data() { return image_buffer_.data(); } PBYTE hanc_data() { return hanc_buffer_.data(); } size_t image_size() const { return image_size_; } size_t hanc_size() const { return hanc_size_; } private: int id_; size_t image_size_; size_t hanc_size_; std::vector> image_buffer_; std::vector> hanc_buffer_; }; using blue_dma_buffer_ptr = std::shared_ptr; }} // namespace caspar::bluefish ================================================ FILE: src/modules/decklink/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 3.28) project (decklink) set(SOURCES consumer/decklink_consumer.cpp consumer/decklink_consumer.h consumer/v210_strategies.cpp consumer/sdr_bgra_strategy.cpp consumer/format_strategy.h consumer/config.cpp consumer/config.h consumer/monitor.cpp consumer/monitor.h consumer/vanc.cpp consumer/vanc.h consumer/vanc_scte104_strategy.cpp consumer/vanc_op47_strategy.cpp producer/decklink_producer.cpp producer/decklink_producer.h util/util.h decklink.cpp decklink.h decklink_api.h StdAfx.h ) if (MSVC) list(APPEND SOURCES interop/DeckLinkAPI_i.c interop/DeckLinkAPI.h interop/DeckLinkAPIVersion.h ) else() list(APPEND SOURCES linux_interop/DeckLinkAPIConfiguration.h linux_interop/DeckLinkAPIDeckControl.h linux_interop/DeckLinkAPIDispatch.cpp linux_interop/DeckLinkAPIModes.h linux_interop/DeckLinkAPI_v10_2.h linux_interop/DeckLinkAPI_v7_3.h linux_interop/DeckLinkAPI_v7_9.h linux_interop/DeckLinkAPI_v8_1.h linux_interop/DeckLinkAPI_v9_9.h linux_interop/LinuxCOM.h linux_interop/DeckLinkAPIConfiguration_v10_2.h linux_interop/DeckLinkAPIDiscovery.h linux_interop/DeckLinkAPI.h linux_interop/DeckLinkAPITypes.h linux_interop/DeckLinkAPI_v7_1.h linux_interop/DeckLinkAPI_v7_6.h linux_interop/DeckLinkAPI_v8_0.h linux_interop/DeckLinkAPI_v9_2.h linux_interop/DeckLinkAPIVersion.h ) endif () casparcg_add_module_project(decklink SOURCES ${SOURCES} INIT_FUNCTION "decklink::init" ) target_include_directories(decklink PRIVATE .. ${FFMPEG_INCLUDE_PATH} ) target_precompile_headers(decklink PRIVATE "StdAfx.h") set_target_properties(decklink PROPERTIES FOLDER modules) source_group(sources ./*) source_group(sources\\consumer consumer/*) source_group(sources\\interop interop/*) source_group(sources\\producer producer/*) source_group(sources\\util util/*) if (MSVC) target_link_libraries(decklink PRIVATE ffmpeg ) else() target_link_libraries(decklink PRIVATE ffmpeg dl ) endif () ================================================ FILE: src/modules/decklink/StdAfx.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #ifdef __cplusplus #if defined _DEBUG && defined _MSC_VER #include #endif #define NOMINMAX #define WIN32_LEAN_AND_MEAN #if defined(_MSC_VER) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern "C" { #define __STDC_CONSTANT_MACROS #define __STDC_LIMIT_MACROS #include } #if defined(_MSC_VER) #include #include #include #endif #include #include #include #include #include #include #endif #if defined(_MSC_VER) #include #include #endif ================================================ FILE: src/modules/decklink/consumer/config.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Julian Waller, julian@superfly.tv */ #include "config.h" #include #include #ifdef WIN32 #include #define CHECK_INSTRUCTION_SUPPORT(a, v) (__check_arch_support((a), (v)) || __check_isa_support((a), (v))) #endif namespace caspar { namespace decklink { port_configuration parse_output_config(const boost::property_tree::wptree& ptree, const core::video_format_repository& format_repository) { port_configuration port_config; port_config.device_index = ptree.get(L"device", static_cast(-1)); port_config.key_only = ptree.get(L"key-only", port_config.key_only); auto format_desc_str = ptree.get(L"video-mode", L""); if (!format_desc_str.empty()) { auto format_desc = format_repository.find(format_desc_str); if (format_desc.format == core::video_format::invalid || format_desc.format == core::video_format::custom) CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Invalid video-mode: " + format_desc_str)); port_config.format = format_desc; } auto subregion_tree = ptree.get_child_optional(L"subregion"); if (subregion_tree) { port_config.src_x = subregion_tree->get(L"src-x", port_config.src_x); port_config.src_y = subregion_tree->get(L"src-y", port_config.src_y); port_config.dest_x = subregion_tree->get(L"dest-x", port_config.dest_x); port_config.dest_y = subregion_tree->get(L"dest-y", port_config.dest_y); port_config.region_w = subregion_tree->get(L"width", port_config.region_w); port_config.region_h = subregion_tree->get(L"height", port_config.region_h); } return port_config; } vanc_configuration parse_vanc_config(const boost::property_tree::wptree& vanc_tree) { vanc_configuration vanc_config; vanc_config.enable = true; vanc_config.op47_line = vanc_tree.get(L"op47-line", vanc_config.op47_line); vanc_config.op47_line_field2 = vanc_tree.get(L"op47-line-field2", vanc_config.op47_line_field2); vanc_config.enable_op47 = vanc_config.op47_line > 0; vanc_config.op42_sd_line = vanc_tree.get(L"op42-sd-line", vanc_config.op42_sd_line); vanc_config.scte104_line = vanc_tree.get(L"scte104-line", vanc_config.scte104_line); vanc_config.enable_scte104 = vanc_config.scte104_line > 0; vanc_config.op47_dummy_header = vanc_tree.get(L"op47-dummy-header", L""); return vanc_config; }; core::color_space get_color_space(const std::wstring& str) { auto color_space_str = boost::to_lower_copy(str); if (color_space_str == L"bt709") return core::color_space::bt709; else if (color_space_str == L"bt2020") return core::color_space::bt2020; else if (color_space_str == L"bt601") return core::color_space::bt601; CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Invalid decklink color-space, must be bt601, bt709 or bt2020")); } configuration parse_xml_config(const boost::property_tree::wptree& ptree, const core::video_format_repository& format_repository, const core::channel_info& channel_info) { configuration config; auto duplex = ptree.get(L"duplex", L"default"); if (duplex == L"full") { config.duplex = configuration::duplex_t::full_duplex; } else if (duplex == L"half") { config.duplex = configuration::duplex_t::half_duplex; } auto latency = ptree.get(L"latency", L"default"); if (latency == L"low") { config.latency = configuration::latency_t::low_latency; } else if (latency == L"normal") { config.latency = configuration::latency_t::normal_latency; } auto wait_for_reference = ptree.get(L"wait-for-reference", L"auto"); if (wait_for_reference == L"disable" || wait_for_reference == L"disabled") { config.wait_for_reference = configuration::wait_for_reference_t::disabled; } else if (wait_for_reference == L"enable" || wait_for_reference == L"enabled") { config.wait_for_reference = configuration::wait_for_reference_t::enabled; } else { config.wait_for_reference = configuration::wait_for_reference_t::automatic; } config.wait_for_reference_duration = ptree.get(L"wait-for-reference-duration", config.wait_for_reference_duration); { auto is_8bit = channel_info.depth == common::bit_depth::bit8; auto default_pixel_format = is_8bit ? L"rgba" : L"yuv"; auto pixel_format = ptree.get(L"pixel-format", default_pixel_format); if (pixel_format == L"yuv") { config.pixel_format = configuration::pixel_format_t::yuv; } else if (pixel_format == L"rgba") { config.pixel_format = configuration::pixel_format_t::rgba; } else { CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Invalid pixel format, must be rgba or yuv")); } if (channel_info.depth != common::bit_depth::bit8 && config.pixel_format == configuration::pixel_format_t::rgba) { CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"The decklink consumer only supports rgba output on 8-bit channels")); } if (config.pixel_format != configuration::pixel_format_t::rgba) { #ifdef WIN32 if (!CHECK_INSTRUCTION_SUPPORT(__IA_SUPPORT_VECTOR256, 0)) { #elif defined(__x86_64__) || defined(__i386__) if (!__builtin_cpu_supports("avx2")) { #else if (false) { #endif CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Your cpu does not support the features needed for yuv output")); } } } config.primary = parse_output_config(ptree, format_repository); if (config.primary.device_index == -1) config.primary.device_index = 1; auto keyer = ptree.get(L"keyer", L"default"); if (keyer == L"external") { config.keyer = configuration::keyer_t::external_keyer; } else if (keyer == L"internal") { config.keyer = configuration::keyer_t::internal_keyer; } else if (keyer == L"external_separate_device") { config.keyer = configuration::keyer_t::external_keyer; auto key_config = config.primary; // Copy the primary config key_config.device_index = ptree.get(L"key-device", static_cast(0)); if (key_config.device_index == 0) { key_config.device_index = config.primary.device_index + 1; } key_config.key_only = true; config.secondaries.push_back(key_config); } config.embedded_audio = ptree.get(L"embedded-audio", config.embedded_audio); config.base_buffer_depth = ptree.get(L"buffer-depth", config.base_buffer_depth); if (ptree.get_child_optional(L"ports")) { for (auto& xml_port : ptree | witerate_children(L"ports") | welement_context_iteration) { ptree_verify_element_name(xml_port, L"port"); port_configuration port_config = parse_output_config(xml_port.second, format_repository); config.secondaries.push_back(port_config); } } config.color_space = channel_info.default_color_space; auto color_space_str = ptree.get(L"color-space", L""); if (!color_space_str.empty()) config.color_space = get_color_space(color_space_str); auto hdr_metadata = ptree.get_child_optional(L"hdr-metadata"); if (hdr_metadata) { config.hdr_meta.min_dml = hdr_metadata->get(L"min-dml", config.hdr_meta.min_dml); config.hdr_meta.max_dml = hdr_metadata->get(L"max-dml", config.hdr_meta.max_dml); config.hdr_meta.max_fall = hdr_metadata->get(L"max-fall", config.hdr_meta.max_fall); config.hdr_meta.max_cll = hdr_metadata->get(L"max-cll", config.hdr_meta.max_cll); } auto vanc = ptree.get_child_optional(L"vanc"); if (vanc) { config.vanc = parse_vanc_config(vanc.get()); } return config; } configuration parse_amcp_config(const std::vector& params, const core::video_format_repository& format_repository, const core::channel_info& channel_info) { configuration config; if (params.size() > 1) config.primary.device_index = std::stoll(params.at(1)); if (contains_param(L"INTERNAL_KEY", params)) { config.keyer = configuration::keyer_t::internal_keyer; } else if (contains_param(L"EXTERNAL_KEY", params)) { config.keyer = configuration::keyer_t::external_keyer; } else { config.keyer = configuration::keyer_t::default_keyer; } if (contains_param(L"FULL_DUPLEX", params)) { config.duplex = configuration::duplex_t::full_duplex; } else if (contains_param(L"HALF_DUPLEX", params)) { config.duplex = configuration::duplex_t::half_duplex; } if (contains_param(L"LOW_LATENCY", params)) { config.latency = configuration::latency_t::low_latency; } config.embedded_audio = contains_param(L"EMBEDDED_AUDIO", params); config.primary.key_only = contains_param(L"KEY_ONLY", params); config.color_space = channel_info.default_color_space; return config; } }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/config.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Julian Waller, julian@superfly.tv */ #pragma once #include #include #include #include namespace caspar { namespace decklink { struct port_configuration { int64_t device_index = 1; // Either an index, or a persistent id bool key_only = false; core::video_format_desc format; int src_x = 0; int src_y = 0; int dest_x = 0; int dest_y = 0; int region_w = 0; int region_h = 0; [[nodiscard]] bool has_subregion_geometry() const { return src_x != 0 || src_y != 0 || region_w != 0 || region_h != 0 || dest_x != 0 || dest_y != 0; } }; struct vanc_configuration { bool enable = false; bool enable_op47 = false; bool enable_scte104 = false; uint32_t op47_line = 0; uint32_t op42_sd_line = 21; uint32_t op47_line_field2 = 0; uint32_t scte104_line = 0; std::wstring op47_dummy_header; }; struct hdr_meta_configuration { float min_dml = 0.005f; float max_dml = 1000.0f; float max_fall = 100.0f; float max_cll = 1000.0f; }; struct configuration { enum class keyer_t { internal_keyer, external_keyer, default_keyer = external_keyer }; enum class duplex_t { none, half_duplex, full_duplex, default_duplex = none }; enum class latency_t { low_latency, normal_latency, default_latency = normal_latency }; enum class wait_for_reference_t { automatic, enabled, disabled, }; enum class pixel_format_t { rgba, yuv, }; bool embedded_audio = false; keyer_t keyer = keyer_t::default_keyer; duplex_t duplex = duplex_t::default_duplex; latency_t latency = latency_t::default_latency; wait_for_reference_t wait_for_reference = wait_for_reference_t::automatic; int wait_for_reference_duration = 10; // seconds int base_buffer_depth = 3; bool hdr = false; pixel_format_t pixel_format = pixel_format_t::rgba; port_configuration primary; std::vector secondaries; core::color_space color_space = core::color_space::bt709; hdr_meta_configuration hdr_meta; vanc_configuration vanc; [[nodiscard]] int buffer_depth() const { return base_buffer_depth + (latency == latency_t::low_latency ? 0 : 1) + (embedded_audio ? 1 : 0); // TODO: Do we need this? } // int key_device_index() const { return key_device_idx == 0 ? device_index + 1 : key_device_idx; } }; configuration parse_xml_config(const boost::property_tree::wptree& ptree, const core::video_format_repository& format_repository, const core::channel_info& channel_info); configuration parse_amcp_config(const std::vector& params, const core::video_format_repository& format_repository, const core::channel_info& channel_info); }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/decklink_consumer.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com * Author: Julian Waller, julian@superfly.tv */ #include "../StdAfx.h" #include "common/os/thread.h" #include "config.h" #include "decklink_consumer.h" #include "format_strategy.h" #include "monitor.h" #include "vanc.h" #include "../util/util.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace caspar { namespace decklink { template void set_latency(const com_iface_ptr& config, configuration::latency_t latency, const std::wstring& print) { if (latency == configuration::latency_t::low_latency) { config->SetFlag(bmdDeckLinkConfigLowLatencyVideoOutput, true); CASPAR_LOG(info) << print << L" Enabled low-latency mode."; } else if (latency == configuration::latency_t::normal_latency) { config->SetFlag(bmdDeckLinkConfigLowLatencyVideoOutput, false); CASPAR_LOG(info) << print << L" Disabled low-latency mode."; } } com_ptr get_display_mode(const com_iface_ptr& device, core::video_format fmt, BMDPixelFormat pix_fmt, BMDSupportedVideoModeFlags flag) { auto format = get_decklink_video_format(fmt); IDeckLinkDisplayMode* m = nullptr; IDeckLinkDisplayModeIterator* iter; if (SUCCEEDED(device->GetDisplayModeIterator(&iter))) { auto iterator = wrap_raw(iter, true); while (SUCCEEDED(iterator->Next(&m)) && m != nullptr && m->GetDisplayMode() != format) { m->Release(); } } if (!m) CASPAR_THROW_EXCEPTION(user_error() << msg_info("Device could not find requested video-format: " + std::to_string(format))); com_ptr mode = wrap_raw(m, true); BMDDisplayMode actualMode = bmdModeUnknown; BOOL supported = false; auto displayMode = mode->GetDisplayMode(); if (FAILED(device->DoesSupportVideoMode(bmdVideoConnectionUnspecified, displayMode, pix_fmt, bmdNoVideoOutputConversion, flag, &actualMode, &supported))) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(L"Could not determine whether device supports requested video format: " + get_mode_name(mode))); else if (!supported) CASPAR_LOG(info) << L"Device may not support video-format: " << get_mode_name(mode); else if (actualMode != bmdModeUnknown && actualMode != displayMode) CASPAR_LOG(warning) << L"Device supports video-format with conversion: " << get_mode_name(mode); return mode; } void set_duplex(const com_iface_ptr& attributes, const com_iface_ptr& config, configuration::duplex_t duplex, const std::wstring& print) { BOOL supportsDuplexModeConfiguration; if (FAILED( attributes->GetFlag(static_cast(BMDDeckLinkSupportsDuplexModeConfiguration_v10_11), &supportsDuplexModeConfiguration))) { CASPAR_LOG(error) << print << L" Failed to set duplex mode, unable to check if card supports duplex mode setting."; } if (!supportsDuplexModeConfiguration) { CASPAR_LOG(warning) << print << L" This device does not support setting the duplex mode."; return; } std::map config_map{ {configuration::duplex_t::full_duplex, bmdDuplexModeFull_v10_11}, {configuration::duplex_t::half_duplex, bmdDuplexModeHalf_v10_11}, }; auto duplex_mode = config_map[duplex]; if (FAILED( config->SetInt(static_cast(bmdDeckLinkConfigDuplexMode_v10_11), duplex_mode))) { CASPAR_LOG(error) << print << L" Unable to set duplex mode."; return; } CASPAR_LOG(info) << print << L" Duplex mode set."; } void set_keyer(const com_iface_ptr& attributes, const com_iface_ptr& output, const com_iface_ptr& decklink_keyer, configuration::keyer_t keyer, BMDDisplayMode display_mode, BMDPixelFormat pixel_format, const std::wstring& print) { if (keyer == configuration::keyer_t::internal_keyer || keyer == configuration::keyer_t::external_keyer) { BMDDisplayMode actualMode = bmdModeUnknown; BOOL supported = FALSE; if (SUCCEEDED(output->DoesSupportVideoMode(bmdVideoConnectionUnspecified, display_mode, pixel_format, bmdNoVideoOutputConversion, bmdSupportedVideoModeKeying, &actualMode, &supported)) && !supported) { CASPAR_LOG(warning) << print << L" Keying is not supported by this device for the current video mode. Disabling keyer."; decklink_keyer->Disable(); return; } } if (keyer == configuration::keyer_t::internal_keyer) { BOOL value = true; if (SUCCEEDED(attributes->GetFlag(BMDDeckLinkSupportsInternalKeying, &value)) && !value) CASPAR_LOG(error) << print << L" Failed to enable internal keyer."; else if (FAILED(decklink_keyer->Enable(FALSE))) CASPAR_LOG(error) << print << L" Failed to enable internal keyer."; else if (FAILED(decklink_keyer->SetLevel(255))) CASPAR_LOG(error) << print << L" Failed to set key-level to max."; else CASPAR_LOG(info) << print << L" Enabled internal keyer."; } else if (keyer == configuration::keyer_t::external_keyer) { BOOL value = true; if (SUCCEEDED(attributes->GetFlag(BMDDeckLinkSupportsExternalKeying, &value)) && !value) CASPAR_LOG(error) << print << L" Failed to enable external keyer."; else if (FAILED(decklink_keyer->Enable(TRUE))) CASPAR_LOG(error) << print << L" Failed to enable external keyer."; else if (FAILED(decklink_keyer->SetLevel(255))) CASPAR_LOG(error) << print << L" Failed to set key-level to max."; else CASPAR_LOG(info) << print << L" Enabled external keyer."; } } core::video_format_desc get_decklink_format(const port_configuration& config, const core::video_format_desc& fallback_format_desc) { if (config.format.format != core::video_format::invalid && config.format.format != fallback_format_desc.format) { if (config.format.format != core::video_format::invalid && config.format.format != core::video_format::custom && config.format.framerate * config.format.field_count == fallback_format_desc.framerate * fallback_format_desc.field_count && config.format.duration == fallback_format_desc.duration) { return config.format; } } if (fallback_format_desc.format == core::video_format::invalid || fallback_format_desc.format == core::video_format::custom) CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Decklink does not support the channel format")); return fallback_format_desc; } spl::shared_ptr create_format_strategy(const configuration& config) { if (config.hdr) { return create_hdr_v210_strategy(config.color_space); } else { return config.pixel_format == configuration::pixel_format_t::yuv ? create_sdr_v210_strategy(config.color_space) : create_sdr_bgra_strategy(); } } enum EOTF { SDR = 0, HDR = 1, PQ = 2, HLG = 3 }; struct ChromaticityCoordinates { double RedX; double RedY; double GreenX; double GreenY; double BlueX; double BlueY; double WhiteX; double WhiteY; }; const auto REC_709 = ChromaticityCoordinates{0.640, 0.330, 0.300, 0.600, 0.150, 0.060, 0.3127, 0.3290}; const auto REC_2020 = ChromaticityCoordinates{0.708, 0.292, 0.170, 0.797, 0.131, 0.046, 0.3127, 0.3290}; class decklink_frame : public IDeckLinkVideoFrame , public IDeckLinkVideoFrameMetadataExtensions { core::video_format_desc format_desc_; std::shared_ptr data_; std::atomic ref_count_{0}; int nb_samples_; const bool hdr_; core::color_space color_space_; hdr_meta_configuration hdr_metadata_; BMDFrameFlags flags_; BMDPixelFormat pix_fmt_; int row_bytes_; com_ptr vanc_; public: decklink_frame(std::shared_ptr data, core::video_format_desc format_desc, int nb_samples, bool hdr, BMDPixelFormat pix_fmt, int row_bytes, bool vanc, core::color_space color_space, const hdr_meta_configuration& hdr_metadata) : format_desc_(std::move(format_desc)) , data_(std::move(data)) , nb_samples_(nb_samples) , hdr_(hdr) , pix_fmt_(pix_fmt) , row_bytes_(row_bytes) , color_space_(color_space) , hdr_metadata_(hdr_metadata) , flags_(hdr ? bmdFrameFlagDefault | bmdFrameContainsHDRMetadata : bmdFrameFlagDefault) , vanc_(vanc ? create_ancillary_packets() : nullptr) { } // IUnknown HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID* ppv) override { if (ppv == nullptr) return E_INVALIDARG; REFIID iunknown = IID_IUnknown; if (std::memcmp(&iid, &iunknown, sizeof(REFIID)) == 0) { *ppv = this; AddRef(); } else if (std::memcmp(&iid, &IID_IDeckLinkVideoFrame, sizeof(REFIID)) == 0) { *ppv = static_cast(this); AddRef(); } else if (hdr_ && std::memcmp(&iid, &IID_IDeckLinkVideoFrameMetadataExtensions, sizeof(REFIID)) == 0) { *ppv = static_cast(this); AddRef(); } else if (vanc_ && std::memcmp(&iid, &IID_IDeckLinkVideoFrameAncillaryPackets, sizeof(REFIID)) == 0) { auto raw = get_raw(vanc_); raw->AddRef(); *ppv = raw; } else { *ppv = nullptr; return E_NOINTERFACE; } return S_OK; } ULONG STDMETHODCALLTYPE AddRef() override { return ++ref_count_; } ULONG STDMETHODCALLTYPE Release() override { if (--ref_count_ == 0) { delete this; return 0; } return ref_count_; } // IDecklinkVideoFrame long STDMETHODCALLTYPE GetWidth() override { return static_cast(format_desc_.width); } long STDMETHODCALLTYPE GetHeight() override { return static_cast(format_desc_.height); } long STDMETHODCALLTYPE GetRowBytes() override { return static_cast(row_bytes_); } BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat() override { return pix_fmt_; } BMDFrameFlags STDMETHODCALLTYPE GetFlags() override { return flags_; } HRESULT STDMETHODCALLTYPE GetBytes(void** buffer) override { *buffer = data_.get(); return S_OK; } HRESULT STDMETHODCALLTYPE GetTimecode(BMDTimecodeFormat format, IDeckLinkTimecode** timecode) override { return S_FALSE; } HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary** ancillary) override { return S_FALSE; } [[nodiscard]] int nb_samples() const { return nb_samples_; } // IDeckLinkVideoFrameMetadataExtensions HRESULT STDMETHODCALLTYPE GetInt(BMDDeckLinkFrameMetadataID metadataID, int64_t* value) override { HRESULT result = S_OK; switch (metadataID) { case bmdDeckLinkFrameMetadataHDRElectroOpticalTransferFunc: *value = EOTF::HLG; break; case bmdDeckLinkFrameMetadataColorspace: *value = (color_space_ == core::color_space::bt2020) ? bmdColorspaceRec2020 : bmdColorspaceRec709; break; default: value = nullptr; result = E_INVALIDARG; } return result; } HRESULT STDMETHODCALLTYPE GetFloat(BMDDeckLinkFrameMetadataID metadataID, double* value) override { const auto color_space = (color_space_ == core::color_space::bt2020) ? &REC_2020 : &REC_709; HRESULT result = S_OK; switch (metadataID) { case bmdDeckLinkFrameMetadataHDRDisplayPrimariesRedX: *value = color_space->RedX; break; case bmdDeckLinkFrameMetadataHDRDisplayPrimariesRedY: *value = color_space->RedY; break; case bmdDeckLinkFrameMetadataHDRDisplayPrimariesGreenX: *value = color_space->GreenX; break; case bmdDeckLinkFrameMetadataHDRDisplayPrimariesGreenY: *value = color_space->GreenY; break; case bmdDeckLinkFrameMetadataHDRDisplayPrimariesBlueX: *value = color_space->BlueX; break; case bmdDeckLinkFrameMetadataHDRDisplayPrimariesBlueY: *value = color_space->BlueY; break; case bmdDeckLinkFrameMetadataHDRWhitePointX: *value = color_space->WhiteX; break; case bmdDeckLinkFrameMetadataHDRWhitePointY: *value = color_space->WhiteY; break; case bmdDeckLinkFrameMetadataHDRMaxDisplayMasteringLuminance: *value = hdr_metadata_.max_dml; break; case bmdDeckLinkFrameMetadataHDRMinDisplayMasteringLuminance: *value = hdr_metadata_.min_dml; break; case bmdDeckLinkFrameMetadataHDRMaximumContentLightLevel: *value = hdr_metadata_.max_cll; break; case bmdDeckLinkFrameMetadataHDRMaximumFrameAverageLightLevel: *value = hdr_metadata_.max_fall; break; default: value = nullptr; result = E_INVALIDARG; } return result; } HRESULT STDMETHODCALLTYPE GetFlag(BMDDeckLinkFrameMetadataID, BOOL* value) override { // Not expecting GetFlag *value = false; return E_INVALIDARG; } HRESULT STDMETHODCALLTYPE GetString(BMDDeckLinkFrameMetadataID, String* value) override { // Not expecting GetString *value = nullptr; return E_INVALIDARG; } HRESULT STDMETHODCALLTYPE GetBytes(BMDDeckLinkFrameMetadataID metadataID, void* buffer, uint32_t* bufferSize) override { *bufferSize = 0; return E_INVALIDARG; } }; struct decklink_secondary_port final : public IDeckLinkVideoOutputCallback { const configuration config_; const port_configuration output_config_; spl::shared_ptr format_strategy_; com_ptr decklink_ = get_device(output_config_.device_index); com_iface_ptr output_ = iface_cast(decklink_); com_iface_ptr keyer_ = iface_cast(decklink_, true); com_iface_ptr attributes_ = iface_cast(decklink_); com_iface_ptr configuration_ = iface_cast(decklink_); int device_sync_group_; std::optional first_field_; const std::wstring model_name_ = get_model_name(decklink_); // long long video_scheduled_ = 0; const core::video_format_desc channel_format_desc_; const core::video_format_desc decklink_format_desc_; com_ptr mode_ = get_display_mode(output_, decklink_format_desc_.format, format_strategy_->get_pixel_format(), bmdSupportedVideoModeDefault); decklink_secondary_port(const configuration& config, port_configuration output_config, core::video_format_desc channel_format_desc, const core::video_format_desc& main_decklink_format_desc, const std::wstring& print, int device_sync_group) : config_(config) , output_config_(std::move(output_config)) , format_strategy_(create_format_strategy(config)) , device_sync_group_(device_sync_group) , channel_format_desc_(std::move(channel_format_desc)) , decklink_format_desc_(get_decklink_format(output_config_, main_decklink_format_desc)) { if (main_decklink_format_desc.format != decklink_format_desc_.format) { CASPAR_LOG(info) << print << L" Disabling sync group for output with different format."; device_sync_group_ = 0; } if (config.duplex != configuration::duplex_t::default_duplex) { set_duplex(iface_cast(decklink_), iface_cast(decklink_), config.duplex, print); } set_latency(configuration_, config.latency, print); set_keyer(attributes_, output_, keyer_, config.keyer, mode_->GetDisplayMode(), format_strategy_->get_pixel_format(), print); if (device_sync_group_ > 0 && FAILED(configuration_->SetInt(bmdDeckLinkConfigPlaybackGroup, device_sync_group_))) { CASPAR_LOG(error) << print << L" Failed to enable sync group."; device_sync_group_ = 0; } else { CASPAR_LOG(trace) << print << L" Joined sync group " << device_sync_group; } if (FAILED(output_->SetScheduledFrameCompletionCallback(this))) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print + L" Failed to set key playback completion callback.") << boost::errinfo_api_function("SetScheduledFrameCompletionCallback")); } ~decklink_secondary_port() { if (output_) { if (device_sync_group_ == 0) { output_->StopScheduledPlayback(0, nullptr, 0); } output_->DisableVideoOutput(); } } [[nodiscard]] std::wstring print() const { return model_name_ + L" [" + std::to_wstring(output_config_.device_index) + L"|" + decklink_format_desc_.name + L"]"; } template void enable_video(const Print& print) { if (FAILED(output_->EnableVideoOutput(mode_->GetDisplayMode(), device_sync_group_ > 0 ? bmdVideoOutputSynchronizeToPlaybackGroup : bmdVideoOutputFlagDefault))) CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Could not enable secondary video output.")); } template void start_playback(const Print& print) { if (device_sync_group_ == 0) { if (FAILED(output_->StartScheduledPlayback(0, decklink_format_desc_.time_scale, 1.0))) { CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to schedule secondary playback.")); } } } void schedule_frame(core::const_frame frame, BMDTimeValue display_time) { bool isInterlaced = decklink_format_desc_.field_count != 1; if (isInterlaced && !first_field_.has_value()) { // If this is interlaced it needs a pair of frames at a time first_field_ = frame; return; } // Figure out which frame is which core::const_frame frame1; core::const_frame frame2; if (isInterlaced) { frame1 = *first_field_; first_field_ = {}; frame2 = frame; } else { frame1 = frame; } auto image_data = format_strategy_->convert_frame_for_port( channel_format_desc_, decklink_format_desc_, output_config_, frame1, frame2, mode_->GetFieldDominance()); schedule_next_video(image_data, 0, display_time); } void schedule_next_video(std::shared_ptr image_data, int nb_samples, BMDTimeValue display_time) { auto packed_frame = wrap_raw( new decklink_frame(std::move(image_data), decklink_format_desc_, nb_samples, config_.hdr, format_strategy_->get_pixel_format(), format_strategy_->get_row_bytes(decklink_format_desc_.width), false, config_.color_space, config_.hdr_meta)); if (FAILED(output_->ScheduleVideoFrame(get_raw(packed_frame), display_time, decklink_format_desc_.duration, decklink_format_desc_.time_scale))) { CASPAR_LOG(error) << print() << L" Failed to schedule primary video."; } // video_scheduled_ += decklink_format_desc_.duration; } HRESULT STDMETHODCALLTYPE QueryInterface(REFIID, LPVOID*) override { return E_NOINTERFACE; } ULONG STDMETHODCALLTYPE AddRef() override { return 1; } ULONG STDMETHODCALLTYPE Release() override { return 1; } HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped() override { return S_OK; } HRESULT STDMETHODCALLTYPE ScheduledFrameCompleted(IDeckLinkVideoFrame* completed_frame, BMDOutputFrameCompletionResult result) override { // Let the primary callback keep the pace, so no scheduling here. return S_OK; } }; struct decklink_consumer final : public IDeckLinkVideoOutputCallback { const int channel_index_; const configuration config_; spl::shared_ptr format_strategy_; com_ptr decklink_ = get_device(config_.primary.device_index); com_iface_ptr output_ = iface_cast(decklink_); com_iface_ptr configuration_ = iface_cast(decklink_); com_iface_ptr keyer_ = iface_cast(decklink_, true); com_iface_ptr attributes_ = iface_cast(decklink_); std::mutex exception_mutex_; std::exception_ptr exception_; const std::wstring model_name_ = get_model_name(decklink_); const core::video_format_desc channel_format_desc_; const core::video_format_desc decklink_format_desc_; std::mutex buffer_mutex_; std::condition_variable buffer_cond_; std::queue buffer_; int buffer_capacity_ = channel_format_desc_.field_count; const int buffer_size_ = config_.buffer_depth(); // Minimum buffer-size 3. long long video_scheduled_ = 0; long long audio_scheduled_ = 0; boost::circular_buffer> audio_container_{static_cast(buffer_size_ + 1)}; spl::shared_ptr graph_; caspar::timer tick_timer_; reference_signal_detector reference_signal_detector_{output_}; // std::atomic scheduled_frames_completed_{0}; std::vector> secondary_port_contexts_; int device_sync_group_ = 0; com_ptr mode_ = get_display_mode(output_, decklink_format_desc_.format, format_strategy_->get_pixel_format(), bmdSupportedVideoModeDefault); std::atomic abort_request_{false}; std::shared_ptr vanc_; public: decklink_consumer(const configuration& config, core::video_format_desc channel_format_desc, int channel_index) : channel_index_(channel_index) , config_(config) , format_strategy_(create_format_strategy(config)) , channel_format_desc_(std::move(channel_format_desc)) , decklink_format_desc_(get_decklink_format(config.primary, channel_format_desc_)) { graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f)); graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f)); graph_->set_color("flushed-frame", diagnostics::color(0.4f, 0.3f, 0.8f)); graph_->set_color("buffered-audio", diagnostics::color(0.9f, 0.9f, 0.5f)); graph_->set_color("buffered-video", diagnostics::color(0.2f, 0.9f, 0.9f)); if (config.duplex != configuration::duplex_t::default_duplex) { set_duplex(iface_cast(decklink_), iface_cast(decklink_), config.duplex, print()); } /* if (key_context_) { graph_->set_color("key-offset", diagnostics::color(1.0f, 0.0f, 0.0f)); } */ graph_->set_text(print()); diagnostics::register_graph(graph_); // If there are additional ports devices, then enable the sync group if (!config.secondaries.empty()) { // A unique id is needed for this group, this is simpler than a random number device_sync_group_ = static_cast(config.primary.device_index); if (FAILED(configuration_->SetInt(bmdDeckLinkConfigPlaybackGroup, device_sync_group_))) { device_sync_group_ = 0; CASPAR_LOG(error) << print() << L" Failed to enable sync group."; } else { CASPAR_LOG(debug) << print() << L" Enabled sync group: " << device_sync_group_; } } // create the secondary ports for (auto& secondary_port_config : config_.secondaries) { secondary_port_contexts_.push_back(std::make_unique(config, secondary_port_config, channel_format_desc_, decklink_format_desc_, print(), device_sync_group_)); } if (config.vanc.enable) { BOOL flag = TRUE; attributes_->GetFlag(BMDDeckLinkVANCRequires10BitYUVVideoFrames, &flag); if (flag) { CASPAR_LOG(warning) << print() << L" DeckLink hardware only supports VANC when the active picture and ancillary " L"data are both 10-bit YUV pixel format."; } else { CASPAR_LOG(info) << print() << L" DeckLink hardware supports VANC."; vanc_ = create_vanc(config.vanc); } } enable_video(); if (config.embedded_audio) { enable_audio(); } set_latency(configuration_, config.latency, print()); set_keyer(attributes_, output_, keyer_, config.keyer, mode_->GetDisplayMode(), format_strategy_->get_pixel_format(), print()); if (config.hdr) { BOOL flag = FALSE; if (SUCCEEDED(attributes_->GetFlag(BMDDeckLinkSupportsHDRMetadata, &flag)) && !flag) CASPAR_LOG(error) << print() << L" Device does not support HDR metadata."; if (SUCCEEDED(attributes_->GetFlag(BMDDeckLinkSupportsColorspaceMetadata, &flag)) && !flag) CASPAR_LOG(warning) << print() << L" Device does not support colorspace metadata."; } if (config.embedded_audio) { output_->BeginAudioPreroll(); } for (int n = 0; n < buffer_size_; ++n) { auto nb_samples = decklink_format_desc_.audio_cadence[n % decklink_format_desc_.audio_cadence.size()] * decklink_format_desc_.field_count; if (config.embedded_audio) { schedule_next_audio(std::vector(nb_samples * decklink_format_desc_.audio_channels), nb_samples); } std::shared_ptr image_data = format_strategy_->allocate_frame_data(decklink_format_desc_); schedule_next_video(image_data, nb_samples, video_scheduled_); for (auto& context : secondary_port_contexts_) { context->schedule_next_video(image_data, 0, video_scheduled_); } video_scheduled_ += decklink_format_desc_.duration; } if (config.embedded_audio) { output_->EndAudioPreroll(); } wait_for_reference_lock(); start_playback(); } ~decklink_consumer() { abort_request_ = true; buffer_cond_.notify_all(); if (output_ != nullptr) { output_->StopScheduledPlayback(0, nullptr, 0); if (config_.embedded_audio) { output_->DisableAudioOutput(); } output_->DisableVideoOutput(); } secondary_port_contexts_.clear(); } void wait_for_reference_lock() { if (config_.wait_for_reference_duration == 0 || config_.wait_for_reference == configuration::wait_for_reference_t::disabled) { // Wait disabled return; } if (config_.wait_for_reference == configuration::wait_for_reference_t::automatic && device_sync_group_ == 0) { // Wait is not necessary return; } CASPAR_LOG(info) << print() << L" Reference signal: waiting for lock"; // When using the sync group we need a reference lock before starting playback, otherwise the outputs will not // be locked correctly and will be out of sync auto wait_end = std::chrono::system_clock::now() + std::chrono::seconds(config_.wait_for_reference_duration); while (std::chrono::system_clock::now() < wait_end) { BMDReferenceStatus reference_status; if (output_->GetReferenceStatus(&reference_status) != S_OK) { CASPAR_LOG(error) << print() << L" Reference signal: failed while querying status"; break; } if (reference_status & bmdReferenceNotSupportedByHardware) { CASPAR_LOG(info) << print() << L" Reference signal: not supported by hardware."; break; } else if (reference_status & bmdReferenceLocked) { CASPAR_LOG(info) << print() << L" Reference signal: locked"; // TODO - is this necessary? This is to give it a chance to stabilise before continuing std::this_thread::sleep_for(std::chrono::milliseconds(100)); return; } std::this_thread::sleep_for(std::chrono::milliseconds(100)); } CASPAR_LOG(warning) << print() << L" Reference signal: unable to acquire lock"; } void enable_audio() { if (FAILED(output_->EnableAudioOutput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, decklink_format_desc_.audio_channels, bmdAudioOutputStreamTimestamped))) { CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Could not enable audio output.")); } CASPAR_LOG(info) << print() << L" Enabled embedded-audio."; } void enable_video() { BMDVideoOutputFlags output_flags = bmdVideoOutputFlagDefault; if (device_sync_group_ > 0) { output_flags = static_cast(output_flags | bmdVideoOutputSynchronizeToPlaybackGroup); } if (vanc_) { output_flags = static_cast(output_flags | bmdVideoOutputVANC); } if (FAILED(output_->EnableVideoOutput(mode_->GetDisplayMode(), output_flags))) { CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Could not enable primary video output.")); } if (FAILED(output_->SetScheduledFrameCompletionCallback(this))) { CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to set primary playback completion callback.") << boost::errinfo_api_function("SetScheduledFrameCompletionCallback")); } for (auto& context : secondary_port_contexts_) { context->enable_video([this]() { return print(); }); } } void start_playback() { if (FAILED(output_->StartScheduledPlayback(0, decklink_format_desc_.time_scale, 1.0))) { CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to schedule primary playback.")); } for (auto& context : secondary_port_contexts_) { context->start_playback([this]() { return print(); }); } } HRESULT STDMETHODCALLTYPE QueryInterface(REFIID, LPVOID*) override { return E_NOINTERFACE; } ULONG STDMETHODCALLTYPE AddRef() override { return 1; } ULONG STDMETHODCALLTYPE Release() override { return 1; } HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped() override { CASPAR_LOG(info) << print() << L" Scheduled playback has stopped."; return S_OK; } HRESULT STDMETHODCALLTYPE ScheduledFrameCompleted(IDeckLinkVideoFrame* completed_frame, BMDOutputFrameCompletionResult result) override { thread_local auto priority_set = false; if (!priority_set) { priority_set = true; // set_thread_realtime_priority(); set_thread_name(L"decklink_consumer[" + std::to_wstring(config_.primary.device_index) + L"]-ScheduledFrameCompleted"); } try { auto tick_time = tick_timer_.elapsed() * decklink_format_desc_.hz * 0.5; graph_->set_value("tick-time", tick_time); tick_timer_.restart(); reference_signal_detector_.detect_change([this]() { return print(); }); auto dframe = reinterpret_cast(completed_frame); // ++scheduled_frames_completed_; /* if (key_context_) { graph_->set_value( "key-offset", static_cast(scheduled_frames_completed_ - key_context_->scheduled_frames_completed_) * 0.1 + 0.5); } */ /** * TODO - track how the secondaries are doing by comparing IDeckLinkOutput::GetScheduledStreamTime */ if (result == bmdOutputFrameDisplayedLate) { graph_->set_tag(diagnostics::tag_severity::WARNING, "late-frame"); video_scheduled_ += decklink_format_desc_.duration; audio_scheduled_ += dframe->nb_samples(); } else if (result == bmdOutputFrameDropped) { graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame"); } else if (result == bmdOutputFrameFlushed) { graph_->set_tag(diagnostics::tag_severity::WARNING, "flushed-frame"); } { UINT32 buffered; output_->GetBufferedVideoFrameCount(&buffered); graph_->set_value("buffered-video", static_cast(buffered) / config_.buffer_depth()); if (config_.embedded_audio) { output_->GetBufferedAudioSampleFrameCount(&buffered); graph_->set_value("buffered-audio", static_cast(buffered) / (decklink_format_desc_.audio_cadence[0] * decklink_format_desc_.field_count * config_.buffer_depth())); } } core::const_frame frame1 = pop(); core::const_frame frame2; bool isInterlaced = mode_->GetFieldDominance() != bmdProgressiveFrame; if (mode_->GetFieldDominance() != bmdProgressiveFrame) { // If the main is not progressive, then pop the second frame frame2 = pop(); } if (abort_request_) return E_FAIL; BMDTimeValue video_display_time = video_scheduled_; video_scheduled_ += decklink_format_desc_.duration; std::vector audio_data; if (config_.embedded_audio) { audio_data.insert(audio_data.end(), frame1.audio_data().begin(), frame1.audio_data().end()); if (isInterlaced) { audio_data.insert(audio_data.end(), frame2.audio_data().begin(), frame2.audio_data().end()); } } // TODO: is this reliable? const int nb_samples = static_cast(audio_data.size()) / decklink_format_desc_.audio_channels; // Schedule video tbb::parallel_for(-1, static_cast(secondary_port_contexts_.size()), [&](int i) { if (i == -1) { // Primary port std::shared_ptr image_data = format_strategy_->convert_frame_for_port(channel_format_desc_, decklink_format_desc_, config_.primary, frame1, frame2, mode_->GetFieldDominance()); schedule_next_video(image_data, nb_samples, video_display_time); if (config_.embedded_audio) { schedule_next_audio(std::move(audio_data), nb_samples); } } else { // Send frame to secondary ports auto& context = secondary_port_contexts_[i]; context->schedule_frame(frame1, video_display_time); if (isInterlaced) { context->schedule_frame(frame2, video_display_time); } if (config_.embedded_audio) { // TODO - audio for secondaries? } } }); } catch (...) { std::lock_guard lock(exception_mutex_); exception_ = std::current_exception(); return E_FAIL; } return S_OK; } core::const_frame pop() { core::const_frame frame; { std::unique_lock lock(buffer_mutex_); buffer_cond_.wait(lock, [&] { return !buffer_.empty() || abort_request_; }); if (!abort_request_) { frame = buffer_.front(); buffer_.pop(); } } buffer_cond_.notify_all(); return frame; } void schedule_next_audio(std::vector audio, int nb_samples) { // TODO (refactor) does ScheduleAudioSamples copy data? audio_container_.push_back(std::move(audio)); if (FAILED(output_->ScheduleAudioSamples(audio_container_.back().data(), nb_samples, audio_scheduled_, decklink_format_desc_.audio_sample_rate, nullptr))) { CASPAR_LOG(error) << print() << L" Failed to schedule audio."; } audio_scheduled_ += nb_samples; // TODO - what if there are too many/few samples in this frame? } void schedule_next_video(std::shared_ptr image_data, int nb_samples, BMDTimeValue display_time) { auto fmt = format_strategy_->get_pixel_format(); auto row_bytes = format_strategy_->get_row_bytes(decklink_format_desc_.width); auto fill_frame = wrap_raw(new decklink_frame(std::move(image_data), decklink_format_desc_, nb_samples, config_.hdr, fmt, row_bytes, config_.vanc.enable, config_.color_space, config_.hdr_meta)); if (vanc_ && vanc_->has_data()) { auto ancillary_packets = iface_cast(fill_frame); auto packets = vanc_->pop_packets(); for (auto& packet : packets) { if (FAILED(ancillary_packets->AttachPacket(get_raw(packet)))) { CASPAR_LOG(error) << print() << L" Failed to add ancillary packet."; } } bool isInterlaced = mode_->GetFieldDominance() != bmdProgressiveFrame; if (isInterlaced) { auto field2_packets = vanc_->pop_packets(true); for (auto& packet : field2_packets) { if (FAILED(ancillary_packets->AttachPacket(get_raw(packet)))) { CASPAR_LOG(error) << print() << L" Failed to add ancillary packet."; } } } } if (FAILED(output_->ScheduleVideoFrame( get_raw(fill_frame), display_time, decklink_format_desc_.duration, decklink_format_desc_.time_scale))) { CASPAR_LOG(error) << print() << L" Failed to schedule primary video."; } } bool send(core::video_field field, core::const_frame frame) { { std::lock_guard lock(exception_mutex_); if (exception_ != nullptr) { std::rethrow_exception(exception_); } } if (frame) { std::unique_lock lock(buffer_mutex_); if (field != core::video_field::b) { // Always push a field2, as we have supplied field1 buffer_cond_.wait(lock, [&] { return buffer_.size() < buffer_capacity_ || abort_request_; }); } buffer_.push(std::move(frame)); } buffer_cond_.notify_all(); return !abort_request_; } bool call(const std::vector& params) { try { bool result = vanc_->try_push_data(params); if (!result) { CASPAR_LOG(warning) << print() << L" Unknown command: " << (params.empty() ? L"N/A" : params[0]); } return result; } catch (...) { CASPAR_LOG(warning) << print() << L" Failed to apply: " << (params.empty() ? L"N/A" : params[0]); } return false; } [[nodiscard]] std::wstring print() const { std::wstringstream buffer; buffer << model_name_ << L" [" + std::to_wstring(channel_index_) << L"-" << std::to_wstring(config_.primary.device_index) << L"|" << decklink_format_desc_.name << L"]"; for (auto& context : secondary_port_contexts_) { buffer << L" && " + context->print(); } return buffer.str(); } }; struct decklink_consumer_proxy : public core::frame_consumer { const configuration config_; std::unique_ptr consumer_; core::video_format_desc format_desc_; executor executor_; public: explicit decklink_consumer_proxy(const configuration& config) : config_(config) , executor_(L"decklink_consumer[" + std::to_wstring(config.primary.device_index) + L"]") { executor_.begin_invoke([=] { com_initialize(); }); } ~decklink_consumer_proxy() override { executor_.invoke([=, this] { set_thread_realtime_priority(); consumer_.reset(); com_uninitialize(); }); } void initialize(const core::video_format_desc& format_desc, const core::channel_info& channel_info, int port_index) override { format_desc_ = format_desc; executor_.invoke([=, this] { consumer_.reset(); consumer_ = std::make_unique(config_, format_desc, channel_info.index); }); } std::future send(core::video_field field, core::const_frame frame) override { return executor_.begin_invoke([=, this] { return consumer_->send(field, frame); }); } std::future call(const std::vector& params) override { return executor_.begin_invoke([=, this] { return consumer_->call(params); }); } [[nodiscard]] std::wstring print() const override { return consumer_ ? consumer_->print() : L"[decklink_consumer]"; } [[nodiscard]] std::wstring name() const override { return L"decklink"; } [[nodiscard]] int index() const override { return 300 + static_cast(config_.primary.device_index); } [[nodiscard]] bool has_synchronization_clock() const override { return true; } [[nodiscard]] core::monitor::state state() const override { return get_state_for_config(config_, format_desc_); } }; spl::shared_ptr create_consumer(const std::vector& params, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info) { if (params.empty() || !boost::iequals(params.at(0), L"DECKLINK")) { return core::frame_consumer::empty(); } configuration config = parse_amcp_config(params, format_repository, channel_info); config.hdr = (channel_info.depth != common::bit_depth::bit8); if (config.hdr && config.primary.key_only) { CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Decklink consumer does not support hdr in combination with key only")); } return spl::make_shared(config); } spl::shared_ptr create_preconfigured_consumer(const boost::property_tree::wptree& ptree, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info) { configuration config = parse_xml_config(ptree, format_repository, channel_info); config.hdr = (channel_info.depth != common::bit_depth::bit8); if (config.hdr && config.primary.key_only) { CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Decklink consumer does not support hdr in combination with key only")); } return spl::make_shared(config); } }} // namespace caspar::decklink /* ############################################################################## Pre-rolling Mail: 2011-05-09 Yoshan BMD Developer Support developer@blackmagic-design.com ----------------------------------------------------------------------------- Thanks for your inquiry. The minimum number of frames that you can preroll for scheduled playback is three frames for video and four frames for audio. As you mentioned if you preroll less frames then playback will not start or playback will be very sporadic. From our experience with Media Express, we recommended that at least seven frames are prerolled for smooth playback. Regarding the bmdDeckLinkConfigLowLatencyVideoOutput flag: There can be around 3 frames worth of latency on scheduled output. When the bmdDeckLinkConfigLowLatencyVideoOutput flag is used this latency is reduced or removed for scheduled playback. If the DisplayVideoFrameSync() method is used, the bmdDeckLinkConfigLowLatencyVideoOutput setting will guarantee that the provided frame will be output as soon the previous frame output has been completed. ################################################################################ */ /* ############################################################################## Async DMA Transfer without redundant copying Mail: 2011-05-10 Yoshan BMD Developer Support developer@blackmagic-design.com ----------------------------------------------------------------------------- Thanks for your inquiry. You could try subclassing IDeckLinkMutableVideoFrame and providing a pointer to your video buffer when GetBytes() is called. This may help to keep copying to a minimum. Please ensure that the pixel format is in bmdFormat10BitYUV, otherwise the DeckLink API / driver will have to colourspace convert which may result in additional copying. ################################################################################ */ ================================================ FILE: src/modules/decklink/consumer/decklink_consumer.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include #include #include #include #include #include namespace caspar { namespace decklink { spl::shared_ptr create_consumer(const std::vector& params, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info); spl::shared_ptr create_preconfigured_consumer(const boost::property_tree::wptree& ptree, const core::video_format_repository& format_repository, const std::vector>& channels, const core::channel_info& channel_info); }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/format_strategy.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas P Andersson, niklas@nxtedition.com */ #pragma once #include "../StdAfx.h" #include "config.h" #include "../decklink_api.h" #include #include #include namespace caspar { namespace decklink { class format_strategy { protected: format_strategy() = default; public: format_strategy& operator=(const format_strategy&) = delete; virtual ~format_strategy() = default; format_strategy(const format_strategy&) = delete; virtual BMDPixelFormat get_pixel_format() = 0; virtual int get_row_bytes(int width) = 0; virtual std::shared_ptr allocate_frame_data(const core::video_format_desc& format_desc) = 0; virtual std::shared_ptr convert_frame_for_port(const core::video_format_desc& channel_format_desc, const core::video_format_desc& decklink_format_desc, const port_configuration& config, const core::const_frame& frame1, const core::const_frame& frame2, BMDFieldDominance field_dominance) = 0; }; spl::shared_ptr create_sdr_bgra_strategy(); spl::shared_ptr create_sdr_v210_strategy(core::color_space colorspace); spl::shared_ptr create_hdr_v210_strategy(core::color_space colorspace); }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/monitor.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Julian Waller, julian@superfly.tv */ #include "monitor.h" namespace caspar { namespace decklink { core::monitor::state get_state_for_port(const port_configuration& port_config, const core::video_format_desc& channel_format) { core::monitor::state state; state["index"] = port_config.device_index; state["key-only"] = port_config.key_only; if (port_config.format.format == core::video_format::invalid) { state["video-mode"] = channel_format.name; } else { state["video-mode"] = port_config.format.name; } if (port_config.has_subregion_geometry()) { state["subregion/src-x"] = port_config.src_x; state["subregion/src-y"] = port_config.src_y; state["subregion/src-x"] = port_config.dest_x; state["subregion/dest-y"] = port_config.dest_y; state["subregion/width"] = port_config.region_w; state["subregion/height"] = port_config.region_h; } return state; } core::monitor::state get_state_for_config(const configuration& config, const core::video_format_desc& channel_format) { core::monitor::state state; state["decklink"] = get_state_for_port(config.primary, channel_format); state["decklink/embedded-audio"] = config.embedded_audio; state["decklink/buffer-depth"] = config.base_buffer_depth; if (config.keyer == configuration::keyer_t::external_keyer) { state["decklink/keyer"] = std::wstring(L"external"); } else if (config.keyer == configuration::keyer_t::internal_keyer) { state["decklink/keyer"] = std::wstring(L"internal"); } if (config.latency == configuration::latency_t::low_latency) { state["decklink/latency"] = std::wstring(L"low"); } else if (config.latency == configuration::latency_t::normal_latency) { state["decklink/latency"] = std::wstring(L"normal"); } int index = 0; for (auto& port_config : config.secondaries) { state["decklink/ports"][index++] = get_state_for_port(port_config, channel_format); } return state; } }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/monitor.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Julian Waller, julian@superfly.tv */ #pragma once #include "config.h" #include namespace caspar { namespace decklink { core::monitor::state get_state_for_config(const configuration& config, const core::video_format_desc& channel_format); }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/sdr_bgra_strategy.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Julian Waller, julian@superfly.tv * Author: Niklas Andersson, niklas@nxtedition.com */ #include "../StdAfx.h" #include "format_strategy.h" #include #include #include namespace caspar { namespace decklink { std::shared_ptr convert_to_key_only(const std::shared_ptr& image_data, std::size_t byte_count) { auto key_data = create_aligned_buffer(byte_count); aligned_memshfl(key_data.get(), image_data.get(), byte_count, 0x0F0F0F0F, 0x0B0B0B0B, 0x07070707, 0x03030303); return key_data; } class sdr_bgra_strategy : public format_strategy , std::enable_shared_from_this { public: sdr_bgra_strategy() = default; BMDPixelFormat get_pixel_format() override { return bmdFormat8BitBGRA; } int get_row_bytes(int width) override { return width * 4; } std::shared_ptr allocate_frame_data(const core::video_format_desc& format_desc) override { return create_aligned_buffer(format_desc.size, 64); } std::shared_ptr convert_frame_for_port(const core::video_format_desc& channel_format_desc, const core::video_format_desc& decklink_format_desc, const port_configuration& config, const core::const_frame& frame1, const core::const_frame& frame2, BMDFieldDominance field_dominance) override { std::shared_ptr image_data = allocate_frame_data(decklink_format_desc); if (field_dominance != bmdProgressiveFrame) { convert_frame(channel_format_desc, decklink_format_desc, config, image_data, field_dominance == bmdUpperFieldFirst, frame1); convert_frame(channel_format_desc, decklink_format_desc, config, image_data, field_dominance != bmdUpperFieldFirst, frame2); } else { convert_frame(channel_format_desc, decklink_format_desc, config, image_data, true, frame1); } if (config.key_only) { image_data = convert_to_key_only(image_data, decklink_format_desc.size); } return image_data; } private: void convert_frame(const core::video_format_desc& channel_format_desc, const core::video_format_desc& decklink_format_desc, const port_configuration& config, std::shared_ptr& image_data, bool topField, const core::const_frame& frame) { // No point copying an empty frame if (!frame) return; int firstLine = topField ? 0 : 1; if (channel_format_desc.format == decklink_format_desc.format && config.src_x == 0 && config.src_y == 0 && config.region_w == 0 && config.region_h == 0 && config.dest_x == 0 && config.dest_y == 0) { // Fast path size_t byte_count_line = (size_t)decklink_format_desc.width * 4; for (int y = firstLine; y < decklink_format_desc.height; y += decklink_format_desc.field_count) { std::memcpy(reinterpret_cast(image_data.get()) + (long long)y * byte_count_line, frame.image_data(0).data() + (long long)y * byte_count_line, byte_count_line); } } else { // Take a sub-region // Some repetetive numbers size_t byte_count_dest_line = (size_t)decklink_format_desc.width * 4; size_t byte_count_src_line = (size_t)channel_format_desc.width * 4; size_t byte_offset_src_line = std::max(0, (config.src_x * 4)); size_t byte_offset_dest_line = std::max(0, (config.dest_x * 4)); int y_skip_src_lines = std::max(0, config.src_y); int y_skip_dest_lines = std::max(0, config.dest_y); size_t byte_copy_per_line = std::min(byte_count_src_line - byte_offset_src_line, byte_count_dest_line - byte_offset_dest_line); if (config.region_w > 0) // If the user chose a width, respect that byte_copy_per_line = std::min(byte_copy_per_line, (size_t)config.region_w * 4); size_t byte_pad_end_of_line = std::max( ((size_t)decklink_format_desc.width * 4) - byte_copy_per_line - byte_offset_dest_line, (size_t)0); int copy_line_count = std::min(channel_format_desc.height - y_skip_src_lines, decklink_format_desc.height - y_skip_dest_lines); if (config.region_h > 0) // If the user chose a height, respect that copy_line_count = std::min(copy_line_count, config.region_h); int max_y_content = y_skip_dest_lines + std::min(copy_line_count, channel_format_desc.height); for (int y = firstLine; y < y_skip_dest_lines; y += decklink_format_desc.field_count) { // Fill the line with black std::memset( reinterpret_cast(image_data.get()) + (byte_count_dest_line * y), 0, byte_count_dest_line); } int firstFillLine = y_skip_dest_lines; if (decklink_format_desc.field_count != 1 && firstFillLine % 2 != firstLine) firstFillLine += 1; for (int y = firstFillLine; y < max_y_content; y += decklink_format_desc.field_count) { auto line_start_ptr = reinterpret_cast(image_data.get()) + (long long)y * byte_count_dest_line; auto line_content_ptr = line_start_ptr + byte_offset_dest_line; // Fill the start with black if (byte_offset_dest_line > 0) { std::memset(line_start_ptr, 0, byte_offset_dest_line); } // Copy the pixels long long src_y = y + y_skip_src_lines - y_skip_dest_lines; std::memcpy(line_content_ptr, frame.image_data(0).data() + src_y * byte_count_src_line + byte_offset_src_line, byte_copy_per_line); // Fill the end with black if (byte_pad_end_of_line > 0) { std::memset(line_content_ptr + byte_copy_per_line, 0, byte_pad_end_of_line); } } // Calculate the first line number to fill with black if (decklink_format_desc.field_count != 1 && max_y_content % 2 != firstLine) max_y_content += 1; for (int y = max_y_content; y < decklink_format_desc.height; y += decklink_format_desc.field_count) { // Fill the line with black std::memset( reinterpret_cast(image_data.get()) + (byte_count_dest_line * y), 0, byte_count_dest_line); } } } }; spl::shared_ptr create_sdr_bgra_strategy() { return spl::make_shared(); } }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/v210_strategies.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas@nxtedition.com */ #include "../StdAfx.h" #if !defined(WIN32) && (defined(__x86_64__) || defined(__i386__)) // Force this file to compile with avx2, as it has been crafted with intrinsics that require it. #pragma GCC target("avx2") #endif #ifdef USE_SIMDE #define SIMDE_ENABLE_NATIVE_ALIASES #include #endif #include #include "format_strategy.h" #include #include #include namespace caspar { namespace decklink { std::vector create_int_matrix(const std::vector& matrix) { static const float LumaRangeWidth = 876.f * (1024.f / 1023.f); static const float ChromaRangeWidth = 896.f * (1024.f / 1023.f); std::vector color_matrix_f(matrix); color_matrix_f[0] *= LumaRangeWidth; color_matrix_f[1] *= LumaRangeWidth; color_matrix_f[2] *= LumaRangeWidth; color_matrix_f[3] *= ChromaRangeWidth; color_matrix_f[4] *= ChromaRangeWidth; color_matrix_f[5] *= ChromaRangeWidth; color_matrix_f[6] *= ChromaRangeWidth; color_matrix_f[7] *= ChromaRangeWidth; color_matrix_f[8] *= ChromaRangeWidth; std::vector int_matrix(color_matrix_f.size()); transform(color_matrix_f.cbegin(), color_matrix_f.cend(), int_matrix.begin(), [](const float& f) { return (int32_t)round(f * 1024.f); }); return int_matrix; }; inline void rgb_to_yuv_avx2(__m256i pixel_pairs[4], const std::vector& color_matrix, __m256i* luma_out, __m256i* chroma_out) { /* COMPUTE LUMA */ { __m256i y_coeff = _mm256_broadcastsi128_si256(_mm_set_epi32(0, color_matrix[2], color_matrix[1], color_matrix[0])); __m256i y_offset = _mm256_set1_epi32(64 << 20); // Multiply by y-coefficients __m256i y4[4]; for (int i = 0; i < 4; i++) { y4[i] = _mm256_mullo_epi32(pixel_pairs[i], y_coeff); } // sum products __m256i y2_sum0123 = _mm256_hadd_epi32(y4[0], y4[1]); __m256i y2_sum4567 = _mm256_hadd_epi32(y4[2], y4[3]); __m256i y_sum01452367 = _mm256_hadd_epi32(y2_sum0123, y2_sum4567); *luma_out = _mm256_srli_epi32(_mm256_add_epi32(y_sum01452367, y_offset), 20); // add offset and shift down to 10 bit precision } /* COMPUTE CHROMA */ { __m256i cb_coeff = _mm256_broadcastsi128_si256(_mm_set_epi32(0, color_matrix[5], color_matrix[4], color_matrix[3])); __m256i cr_coeff = _mm256_broadcastsi128_si256(_mm_set_epi32(0, color_matrix[8], color_matrix[7], color_matrix[6])); __m256i c_offset = _mm256_set1_epi32((1025) << 19); // Multiply by cb-coefficients __m256i cbcr4[4]; // 0 = cb02, 1 = cr02, 2 = cb46, 3 = cr46 for (int i = 0; i < 2; i++) { cbcr4[i * 2] = _mm256_mullo_epi32(pixel_pairs[i * 2], cb_coeff); cbcr4[i * 2 + 1] = _mm256_mullo_epi32(pixel_pairs[i * 2], cr_coeff); } // sum products __m256i cbcr_sum02 = _mm256_hadd_epi32(cbcr4[1], cbcr4[0]); __m256i cbcr_sum46 = _mm256_hadd_epi32(cbcr4[3], cbcr4[2]); __m256i cbcr_sum_0426 = _mm256_hadd_epi32(cbcr_sum02, cbcr_sum46); *chroma_out = _mm256_srli_epi32(_mm256_add_epi32(cbcr_sum_0426, c_offset), 20); // add offset and shift down to 10 bit precision } } inline void pack_v210_avx2(__m256i luma[6], __m256i chroma[6], __m128i** v210_dest) { __m256i luma_16bit[3]; __m256i chroma_16bit[3]; __m256i offsets = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); for (int i = 0; i < 3; i++) { auto y16 = _mm256_packus_epi32(luma[i * 2], luma[i * 2 + 1]); auto cbcr16 = _mm256_packus_epi32(chroma[i * 2], chroma[i * 2 + 1]); // cbcr0 cbcr4 cbcr8 cbcr12 // cbcr2 cbcr6 cbcr10 cbcr14 luma_16bit[i] = _mm256_permutevar8x32_epi32(y16, offsets); // layout 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 chroma_16bit[i] = _mm256_permutevar8x32_epi32(cbcr16, offsets); // cbcr0 cbcr2 cbcr4 cbcr6 cbcr8 cbcr10 cbcr12 cbcr14 } __m128i chroma_mult = _mm_set_epi16(0, 0, 4, 16, 1, 4, 16, 1); __m128i chroma_shuf = _mm_set_epi8(-1, 11, 10, -1, 9, 8, 7, 6, -1, 5, 4, -1, 3, 2, 1, 0); __m128i luma_mult = _mm_set_epi16(0, 0, 16, 1, 4, 16, 1, 4); __m128i luma_shuf = _mm_set_epi8(11, 10, 9, 8, -1, 7, 6, -1, 5, 4, 3, 2, -1, 1, 0, -1); uint16_t* luma_ptr = reinterpret_cast(luma_16bit); uint16_t* chroma_ptr = reinterpret_cast(chroma_16bit); for (int i = 0; i < 8; ++i) { __m128i luma_values = _mm_loadu_si128(reinterpret_cast<__m128i*>(luma_ptr)); __m128i chroma_values = _mm_loadu_si128(reinterpret_cast<__m128i*>(chroma_ptr)); __m128i luma_packed = _mm_mullo_epi16(luma_values, luma_mult); __m128i chroma_packed = _mm_mullo_epi16(chroma_values, chroma_mult); luma_packed = _mm_shuffle_epi8(luma_packed, luma_shuf); chroma_packed = _mm_shuffle_epi8(chroma_packed, chroma_shuf); auto res = _mm_or_si128(luma_packed, chroma_packed); _mm_store_si128((*v210_dest)++, res); luma_ptr += 6; chroma_ptr += 6; } } template struct ARGBPixel { T R; T G; T B; T A; }; template void pack_v210(const ARGBPixel* src, const std::vector& color_matrix, uint32_t* dest, int num_pixels) { auto write_v210 = [dest, index = 0, shift = 0](uint32_t val) mutable { dest[index] |= ((val & 0x3FF) << shift); shift += 10; if (shift >= 30) { index++; shift = 0; } }; for (int x = 0; x < num_pixels; ++x, ++src) { uint32_t r, g, b; if constexpr (std::is_same()) { r = src->R >> 6; g = src->G >> 6; b = src->B >> 6; } else if constexpr (std::is_same()) { r = src->R << 2; g = src->G << 2; b = src->B << 2; } if (x % 2 == 0) { // Compute Cr uint32_t v = 1025 << 19; v += (int32_t)(color_matrix[6] * static_cast(r) + color_matrix[7] * static_cast(g) + color_matrix[8] * static_cast(b)); v >>= 20; write_v210(v); } // Compute Y uint32_t luma = 64 << 20; luma += (int32_t)(color_matrix[0] * static_cast(r) + color_matrix[1] * static_cast(g) + color_matrix[2] * static_cast(b)); luma >>= 20; write_v210(luma); if (x % 2 == 0) { // Compute Cb uint32_t u = 1025 << 19; u += (int32_t)(color_matrix[3] * static_cast(r) + color_matrix[4] * static_cast(g) + color_matrix[5] * static_cast(b)); u >>= 20; write_v210(u); } } } class v210_strategy : public format_strategy , std::enable_shared_from_this { std::vector bt709{0.212639005871510, 0.715168678767756, 0.072192315360734, -0.114592177555732, -0.385407822444268, 0.5, 0.5, -0.454155517037873, -0.045844482962127}; std::vector bt2020{0.262700212011267, 0.677998071518871, 0.059301716469862, -0.139630430187157, -0.360369569812843, 0.5, 0.5, -0.459784529009814, -0.040215470990186}; std::vector color_matrix; __m128i black_batch; uint8_t bpc; public: explicit v210_strategy(core::color_space color_space, uint8_t bpc) : color_matrix(create_int_matrix(color_space == core::color_space::bt2020 ? bt2020 : bt709)) , bpc(bpc) { // setup black batch (6 pixels of black, encoded as v210) ARGBPixel<> black[6]; memset(black, 0, sizeof(black)); memset(&black_batch, 0, sizeof(__m128i)); pack_v210(black, color_matrix, reinterpret_cast(&black_batch), 6); } BMDPixelFormat get_pixel_format() override { return bmdFormat10BitYUV; } int get_row_bytes(int width) override { return ((width + 47) / 48) * 128; } std::shared_ptr allocate_frame_data(const core::video_format_desc& format_desc) override { auto size = get_row_bytes(format_desc.width) * format_desc.height; return create_aligned_buffer(size, 128); } std::shared_ptr convert_frame_for_port(const core::video_format_desc& channel_format_desc, const core::video_format_desc& decklink_format_desc, const port_configuration& config, const core::const_frame& frame1, const core::const_frame& frame2, BMDFieldDominance field_dominance) override { return bpc == 1 ? do_convert_frame_for_port( channel_format_desc, decklink_format_desc, config, frame1, frame2, field_dominance) : do_convert_frame_for_port( channel_format_desc, decklink_format_desc, config, frame1, frame2, field_dominance); } private: // Fill count 6-pixel groups with black inline void fill_black_groups(__m128i*& dest, int count) const { for (int i = 0; i < count; ++i) { _mm_storeu_si128(dest++, black_batch); } } // Convert 48 pixels using AVX2 SIMD template inline void convert_48_pixels_avx2(const ARGBPixel*& src, __m128i*& dest) const { const __m256i* pixeldata = reinterpret_cast(src); __m256i luma[6]; __m256i chroma[6]; __m256i zero = _mm256_setzero_si256(); for (int packet_index = 0; packet_index < 6; packet_index++) { __m256i p0123, p4567; if constexpr (std::is_same()) { p0123 = _mm256_loadu_si256(pixeldata + packet_index * 2); p4567 = _mm256_loadu_si256(pixeldata + packet_index * 2 + 1); p0123 = _mm256_srli_epi16(p0123, 6); p4567 = _mm256_srli_epi16(p4567, 6); } else if constexpr (std::is_same()) { auto p01234567 = _mm256_loadu_si256(pixeldata + packet_index); auto p01452367 = _mm256_permute4x64_epi64(p01234567, 0b11011000); p0123 = _mm256_unpacklo_epi8(p01452367, zero); p4567 = _mm256_unpackhi_epi8(p01452367, zero); p0123 = _mm256_slli_epi16(p0123, 2); p4567 = _mm256_slli_epi16(p4567, 2); } else { static_assert(!std::is_same(), "Unsupported template type for v210 conversion"); } __m256i pixel_pairs[4]; pixel_pairs[0] = _mm256_unpacklo_epi16(p0123, zero); pixel_pairs[1] = _mm256_unpackhi_epi16(p0123, zero); pixel_pairs[2] = _mm256_unpacklo_epi16(p4567, zero); pixel_pairs[3] = _mm256_unpackhi_epi16(p4567, zero); rgb_to_yuv_avx2(pixel_pairs, color_matrix, &luma[packet_index], &chroma[packet_index]); } pack_v210_avx2(luma, chroma, &dest); src += 48; } // Convert remaining pixels (less than 48) using scalar code template inline void convert_remaining_pixels(const ARGBPixel*& src, __m128i*& dest, int pixel_count) const { int full_6pixel_groups = pixel_count / 6; memset(dest, 0, sizeof(__m128i) * ((pixel_count + 5) / 6)); if (full_6pixel_groups > 0) { int pixels_in_groups = full_6pixel_groups * 6; pack_v210(src, color_matrix, reinterpret_cast(dest), pixels_in_groups); dest += full_6pixel_groups; src += pixels_in_groups; pixel_count -= pixels_in_groups; } // Handle final partial packet (pad with black) if (pixel_count > 0) { ARGBPixel pixels[6]; memset(pixels, 0, sizeof(pixels)); memcpy(pixels, src, pixel_count * sizeof(ARGBPixel)); pack_v210(pixels, color_matrix, reinterpret_cast(dest), 6); dest++; src += pixel_count; } } template std::shared_ptr do_convert_frame_for_port(const core::video_format_desc& channel_format_desc, const core::video_format_desc& decklink_format_desc, const port_configuration& config, const core::const_frame& frame1, const core::const_frame& frame2, BMDFieldDominance field_dominance) { std::shared_ptr image_data = allocate_frame_data(decklink_format_desc); if (field_dominance != bmdProgressiveFrame) { convert_frame(channel_format_desc, decklink_format_desc, config, image_data, field_dominance == bmdUpperFieldFirst, frame1); convert_frame(channel_format_desc, decklink_format_desc, config, image_data, field_dominance != bmdUpperFieldFirst, frame2); } else { convert_frame(channel_format_desc, decklink_format_desc, config, image_data, true, frame1); } if (config.key_only) { // TODO: Add support for hdr frames } return image_data; } template void convert_frame(const core::video_format_desc& channel_format_desc, const core::video_format_desc& decklink_format_desc, const port_configuration& config, std::shared_ptr& image_data, bool topField, const core::const_frame& frame) { if (!frame) return; int firstLine = topField ? 0 : 1; size_t dest_line_bytes = get_row_bytes(decklink_format_desc.width); int black_groups_per_row = static_cast(dest_line_bytes / sizeof(__m128i)); // Calculate effective region dimensions int region_w = config.region_w > 0 ? config.region_w : channel_format_desc.width - config.src_x; int region_h = config.region_h > 0 ? config.region_h : channel_format_desc.height - config.src_y; // Clamp to available source pixels region_w = std::min(region_w, channel_format_desc.width - config.src_x); region_h = std::min(region_h, channel_format_desc.height - config.src_y); // Clamp to destination dimensions int pixels_to_copy = std::min(region_w, decklink_format_desc.width - config.dest_x); int lines_to_copy = std::min(region_h, decklink_format_desc.height - config.dest_y); int max_y_content = config.dest_y + lines_to_copy; // Calculate dest_x alignment for v210 (6 pixels per group) int black_groups_start = config.dest_x / 6; int partial_black_pixels = config.dest_x - black_groups_start * 6; const int NUM_THREADS = 6; auto rows_per_thread = decklink_format_desc.height / NUM_THREADS; tbb::parallel_for(0, NUM_THREADS, [&](int thread_index) { auto start_y = firstLine + thread_index * rows_per_thread; auto end_y = (thread_index + 1) * rows_per_thread; for (uint64_t y = start_y; y < end_y; y += decklink_format_desc.field_count) { auto dest_row = reinterpret_cast(image_data.get()) + y * dest_line_bytes; __m128i* v210_dest = reinterpret_cast<__m128i*>(dest_row); // Check if this row is outside the content region if (y < config.dest_y || y >= max_y_content) { fill_black_groups(v210_dest, black_groups_per_row); continue; } const uint64_t src_y = y - config.dest_y + config.src_y; // Fill the start of the row with black (complete 6-pixel groups) fill_black_groups(v210_dest, black_groups_start); int content_pixels_written = 0; // Handle partial black group at start (if dest_x is not aligned to 6 pixels) if (partial_black_pixels > 0) { ARGBPixel pixels[6]; memset(pixels, 0, sizeof(pixels)); memset(v210_dest, 0, sizeof(__m128i)); int content_in_packet = std::min(6 - partial_black_pixels, pixels_to_copy); auto src = reinterpret_cast*>(frame.image_data(0).data()) + (src_y * channel_format_desc.width + config.src_x); for (int i = 0; i < content_in_packet; ++i) { pixels[partial_black_pixels + i] = src[i]; } pack_v210(pixels, color_matrix, reinterpret_cast(v210_dest), 6); v210_dest++; content_pixels_written = content_in_packet; } // Pack the main content pixels int remaining_content = pixels_to_copy - content_pixels_written; if (remaining_content > 0) { auto src = reinterpret_cast*>(frame.image_data(0).data()) + (src_y * channel_format_desc.width + config.src_x + content_pixels_written); // Process 48-pixel batches with AVX2 int fullspeed_batches = remaining_content / 48; for (int batch = 0; batch < fullspeed_batches; ++batch) { convert_48_pixels_avx2(src, v210_dest); } // Process remaining content pixels (less than 48) int rest_content = remaining_content - fullspeed_batches * 48; if (rest_content > 0) { convert_remaining_pixels(src, v210_dest, rest_content); } } // Fill the rest of the row with black auto bytes_written = reinterpret_cast(v210_dest) - dest_row; auto padding_bytes = dest_line_bytes - bytes_written; auto padding_groups = static_cast(padding_bytes / sizeof(black_batch)); fill_black_groups(v210_dest, padding_groups); } }); } }; spl::shared_ptr create_sdr_v210_strategy(core::color_space color_space) { return spl::make_shared(color_space, static_cast(1)); } spl::shared_ptr create_hdr_v210_strategy(core::color_space color_space) { return spl::make_shared(color_space, static_cast(2)); } }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/vanc.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas.andersson@nxtedition.com */ #include "vanc.h" #include namespace caspar { namespace decklink { class decklink_vanc_packet : public IDeckLinkAncillaryPacket { std::atomic ref_count_{0}; vanc_packet pkt_; public: explicit decklink_vanc_packet(vanc_packet pkt) : pkt_(pkt) { } // IUnknown HRESULT STDMETHODCALLTYPE QueryInterface(REFIID, LPVOID*) override { return E_NOINTERFACE; } ULONG STDMETHODCALLTYPE AddRef() override { return ++ref_count_; } ULONG STDMETHODCALLTYPE Release() override { if (--ref_count_ == 0) { delete this; return 0; } return ref_count_; } // IDeckLinkAncillaryPacket HRESULT STDMETHODCALLTYPE GetBytes(BMDAncillaryPacketFormat format, const void** data, unsigned int* size) override { if (format == bmdAncillaryPacketFormatUInt8) { if (data) { *data = pkt_.data.data(); } *size = static_cast(pkt_.data.size()); return S_OK; } return E_NOTIMPL; } unsigned char STDMETHODCALLTYPE GetDID(void) override { return pkt_.did; } unsigned char STDMETHODCALLTYPE GetSDID(void) override { return pkt_.sdid; } unsigned int STDMETHODCALLTYPE GetLineNumber(void) override { return pkt_.line_number; } unsigned char STDMETHODCALLTYPE GetDataStreamIndex(void) override { return 0; } }; decklink_vanc::decklink_vanc(const vanc_configuration& config) { if (config.enable_scte104) { strategies_.push_back(create_scte104_strategy(config.scte104_line)); } if (config.enable_op47) { strategies_.push_back(create_op47_strategy( config.op47_line, config.op47_line_field2, config.op42_sd_line, config.op47_dummy_header)); } } std::vector> decklink_vanc::pop_packets(bool field2) { std::vector> packets; for (auto& strategy : strategies_) { if (strategy->has_data()) { try { auto packet = wrap_raw(new decklink_vanc_packet(strategy->pop_packet(field2))); if (packet->GetDID() != 0) packets.push_back(packet); } catch (const std::exception& e) { CASPAR_LOG(error) << "Failed to pop " << strategy->get_name() << " VANC packet: " << e.what(); } catch (...) { CASPAR_LOG(error) << "Failed to pop " << strategy->get_name() << " VANC packet."; } } } return packets; } bool decklink_vanc::has_data() const { return std::any_of(strategies_.begin(), strategies_.end(), [](auto& strategy) { return strategy->has_data(); }); } bool decklink_vanc::try_push_data(const std::vector& params) { if (params.size() < 2) { CASPAR_LOG(error) << " Not enough parameters to apply VANC data."; return false; } for (auto& strategy : strategies_) { if (boost::iequals(params.at(0), strategy->get_name())) { return strategy->try_push_data(params); } } return false; } std::shared_ptr create_vanc(const vanc_configuration& config) { return std::make_shared(config); } }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/vanc.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Niklas Andersson, niklas.andersson@nxtedition.com */ #pragma once #include "../decklink_api.h" #include "config.h" #include namespace caspar { namespace decklink { struct vanc_packet { uint8_t did; uint8_t sdid; uint32_t line_number; std::vector data; }; class decklink_vanc_strategy { public: virtual ~decklink_vanc_strategy() noexcept = default; virtual bool has_data() const = 0; virtual vanc_packet pop_packet(bool field2) = 0; virtual bool try_push_data(const std::vector& params) = 0; virtual const std::wstring& get_name() const = 0; }; class decklink_vanc { std::vector> strategies_; public: explicit decklink_vanc(const vanc_configuration& config); bool has_data() const; std::vector> pop_packets(bool field2 = false); bool try_push_data(const std::vector& params); }; std::shared_ptr create_op47_strategy(uint32_t line_number, uint32_t line_number_2, uint32_t sd_line, const std::wstring& dummy_header); std::shared_ptr create_scte104_strategy(uint32_t line_number); std::shared_ptr create_vanc(const vanc_configuration& config); }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/vanc_op47_strategy.cpp ================================================ #include "vanc.h" #include #include #include #include #include #include #include #include #include namespace caspar { namespace decklink { const uint8_t OP47_DID = 0x43; const uint8_t OP47_SDID = 0x02; class vanc_op47_strategy : public decklink_vanc_strategy { private: static const std::wstring Name; mutable std::mutex mutex_; uint32_t line_number_; uint32_t line_number_2_; uint8_t sd_line_; uint16_t counter_; std::vector dummy_header_; std::queue> packets_; public: vanc_op47_strategy(uint32_t line_number, uint32_t line_number_2, uint32_t sd_line, const std::wstring& dummy_header) : line_number_(line_number) , line_number_2_(line_number_2) , sd_line_(static_cast(sd_line & 0xFF)) , counter_(1) , dummy_header_(dummy_header.empty() ? std::vector() : base64_decode(dummy_header)) { } virtual bool has_data() const override { std::lock_guard lock(mutex_); return !packets_.empty() || !dummy_header_.empty(); } virtual vanc_packet pop_packet(bool field2) override { std::lock_guard lock(mutex_); if (field2 && line_number_2_ == 0) { return {0, 0, 0, {}}; } if (packets_.empty()) { return {OP47_DID, OP47_SDID, field2 ? line_number_2_ : line_number_, sdp_encode(dummy_header_, field2)}; } auto packet = packets_.front(); packets_.pop(); return {OP47_DID, OP47_SDID, field2 ? line_number_2_ : line_number_, sdp_encode(packet, field2)}; } virtual bool try_push_data(const std::vector& params) override { std::lock_guard lock(mutex_); try { for (size_t index = 1; index < params.size(); ++index) { packets_.push(base64_decode(params.at(index))); } } catch (const boost::bad_lexical_cast& e) { CASPAR_LOG(error) << "Failed to parse OP47 parameters: " << e.what(); return false; } return true; } virtual const std::wstring& get_name() const override { return vanc_op47_strategy::Name; } private: std::vector sdp_encode(const std::vector& packet, bool field2) { if (packet.size() != 45) { throw std::runtime_error("Invalid packet size for OP47: " + std::to_string(packet.size())); } // The following is based on the specification "Free TV Australia Operational Practice OP- 47" std::vector result(58); result[0] = 0x51; // identifier result[1] = 0x15; // identifier result[2] = static_cast(result.size()); // size of the packet result[3] = 0x02; // format-code result[4] = (sd_line_ & 0x1F) | (field2 ? 0x00 : 0x80); // VBI packet descriptor (odd field) result[5] = 0; // VBI packet descriptor (not used) result[6] = 0; // VBI packet descriptor (not used) result[7] = 0; // VBI packet descriptor (not used) result[8] = 0; // VBI packet descriptor (not used) memcpy(result.data() + 9, packet.data(), packet.size()); result[54] = 0x74; // footer id result[55] = (counter_ & 0xFF00) >> 8; // footer sequence counter result[56] = counter_ & 0x00FF; // footer sequence counter result[57] = 0x0; // SPD checksum, will be set when calculated auto sum = accumulate(result.begin(), result.end(), (uint8_t)0); result[57] = ~sum + 1; counter_++; // this is rolling over at 65535 by design return result; } std::vector base64_decode(const std::wstring& encoded) { std::vector buffer(encoded.size()); std::use_facet>(std::locale()) .narrow(encoded.data(), encoded.data() + encoded.size(), '?', buffer.data()); auto str = std::string(buffer.data(), buffer.size()); return caspar::from_base64(str); } }; const std::wstring vanc_op47_strategy::Name = L"OP47"; std::shared_ptr create_op47_strategy(uint32_t line_number, uint32_t line_number_2, uint32_t sd_line, const std::wstring& dummy_header) { return std::make_shared(line_number, line_number_2, sd_line, dummy_header); } }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/consumer/vanc_scte104_strategy.cpp ================================================ #include "vanc.h" #include #include namespace caspar { namespace decklink { const uint8_t SCTE104_DID = 0x41; const uint8_t SCTE104_SDID = 0x07; class vanc_scte104_strategy : public decklink_vanc_strategy { static const std::wstring Name; mutable std::mutex mutex_; uint32_t line_number_; std::vector payload_ = {}; public: explicit vanc_scte104_strategy(uint32_t line_number) : line_number_(line_number) { } virtual bool has_data() const override { std::lock_guard lock(mutex_); return !payload_.empty(); } virtual vanc_packet pop_packet(bool field2) override { if (field2) { // If field2 is requested, we do not return any data. return {0, 0, 0, {}}; } { std::lock_guard lock(mutex_); // If we have a payload, return it as a vanc_packet. if (payload_.size() > 0) { auto data = std::vector(payload_.begin(), payload_.end()); vanc_packet pkt{SCTE104_DID, SCTE104_SDID, line_number_, data}; payload_.clear(); return pkt; } } // If we have no data, return an empty vanc_packet. return {0, 0, 0, {}}; } virtual bool try_push_data(const std::vector& params) override { try { if (params.size() == 2) { std::lock_guard lock(mutex_); // try to parse the payload as a base64 encoded raw SCTE-104 packet. auto base64_payload = params.at(1); payload_ = base64_decode(base64_payload); } } catch (const std::exception& e) { CASPAR_LOG(error) << "Failed to parse SCTE 104 parameters: " << e.what(); return false; } return true; } virtual const std::wstring& get_name() const override { return vanc_scte104_strategy::Name; } private: std::vector base64_decode(const std::wstring& encoded) { std::vector buffer(encoded.size()); std::use_facet>(std::locale()) .narrow(encoded.data(), encoded.data() + encoded.size(), '?', buffer.data()); auto str = std::string(buffer.data(), buffer.size()); return caspar::from_base64(str); } }; const std::wstring vanc_scte104_strategy::Name = L"SCTE104"; std::shared_ptr create_scte104_strategy(uint32_t line_number) { return std::make_shared(line_number); } }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/decklink.cpp ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #include "StdAfx.h" #include "decklink.h" #include "util/util.h" #include "consumer/decklink_consumer.h" #include "producer/decklink_producer.h" #include #include #include "decklink_api.h" namespace caspar { namespace decklink { std::wstring get_version() { std::wstring ver = L"Not found"; struct co_init init; try { ver = decklink::version(create_iterator()); } catch (...) { } return ver; } std::vector device_list() { std::vector devices; struct co_init init; try { auto pDecklinkIterator = create_iterator(); IDeckLink* decklink; for (int n = 1; pDecklinkIterator->Next(&decklink) == S_OK; ++n) { auto decklink_com = wrap_raw(decklink); auto attributes = iface_cast(decklink_com); int64_t id = 0; attributes->GetInt(BMDDeckLinkPersistentID, &id); devices.push_back(get_model_name(decklink) + L" [" + std::to_wstring(n) + L"] (" + std::to_wstring(id) + L")"); decklink->Release(); } } catch (...) { } return devices; } void init(const core::module_dependencies& dependencies) { dependencies.consumer_registry->register_consumer_factory(L"Decklink Consumer", create_consumer); dependencies.consumer_registry->register_preconfigured_consumer_factory(L"decklink", create_preconfigured_consumer); dependencies.producer_registry->register_producer_factory(L"Decklink Producer", create_producer); auto devices = device_list(); if (!devices.empty()) { CASPAR_LOG(info) << L"Decklink devices found:"; for (const auto& device : devices) { CASPAR_LOG(info) << L" - " << device; } } } }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/decklink.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Robert Nagy, ronag89@gmail.com */ #pragma once #include namespace caspar { namespace decklink { std::wstring get_version(); void init(const core::module_dependencies& dependencies); }} // namespace caspar::decklink ================================================ FILE: src/modules/decklink/decklink_api.h ================================================ /* * Copyright (c) 2011 Sveriges Television AB * * This file is part of CasparCG (www.casparcg.com). * * CasparCG is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CasparCG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CasparCG. If not, see . * * Author: Helge Norberg, helge.norberg@svt.se */ #pragma once #include #if defined(_MSC_VER) #include #include "interop/DeckLinkAPI.h" #include namespace caspar { namespace decklink { using String = BSTR; using UINT32 = unsigned int; static std::wstring to_string(String bstr_string) { return static_cast(bstr_t(bstr_string, false)); } static void com_initialize() { ::CoInitialize(nullptr); } static void com_uninitialize() { ::CoUninitialize(); } struct co_init { co_init() { ::CoInitialize(nullptr); } ~co_init() { ::CoUninitialize(); } }; template using com_ptr = CComPtr; template using com_iface_ptr = CComQIPtr; template